1 //===- SCCP.cpp - Sparse Conditional Constant Propagation -----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements sparse conditional constant propagation and merging:
11 // Specifically, this:
12 // * Assumes values are constant unless proven otherwise
13 // * Assumes BasicBlocks are dead unless proven otherwise
14 // * Proves values to be constant, and replaces them with constants
15 // * Proves conditional branches to be unconditional
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Transforms/Scalar/SCCP.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/ADT/DenseSet.h"
23 #include "llvm/ADT/MapVector.h"
24 #include "llvm/ADT/PointerIntPair.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/Analysis/ConstantFolding.h"
30 #include "llvm/Analysis/GlobalsModRef.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Analysis/ValueLattice.h"
34 #include "llvm/Analysis/ValueLatticeUtils.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstVisitor.h"
44 #include "llvm/IR/InstrTypes.h"
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/Module.h"
48 #include "llvm/IR/PassManager.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Transforms/Scalar.h"
58 #include "llvm/Transforms/Utils/PredicateInfo.h"
65 #define DEBUG_TYPE "sccp"
67 STATISTIC(NumInstRemoved, "Number of instructions removed");
68 STATISTIC(NumDeadBlocks , "Number of basic blocks unreachable");
70 STATISTIC(IPNumInstRemoved, "Number of instructions removed by IPSCCP");
71 STATISTIC(IPNumArgsElimed ,"Number of arguments constant propagated by IPSCCP");
72 STATISTIC(IPNumGlobalConst, "Number of globals found to be constant by IPSCCP");
76 /// LatticeVal class - This class represents the different lattice values that
77 /// an LLVM value may occupy. It is a simple class with value semantics.
81 /// unknown - This LLVM Value has no known value yet.
84 /// constant - This LLVM Value has a specific constant value.
87 /// forcedconstant - This LLVM Value was thought to be undef until
88 /// ResolvedUndefsIn. This is treated just like 'constant', but if merged
89 /// with another (different) constant, it goes to overdefined, instead of
93 /// overdefined - This instruction is not known to be constant, and we know
98 /// Val: This stores the current lattice value along with the Constant* for
99 /// the constant if this is a 'constant' or 'forcedconstant' value.
100 PointerIntPair<Constant *, 2, LatticeValueTy> Val;
102 LatticeValueTy getLatticeValue() const {
107 LatticeVal() : Val(nullptr, unknown) {}
109 bool isUnknown() const { return getLatticeValue() == unknown; }
111 bool isConstant() const {
112 return getLatticeValue() == constant || getLatticeValue() == forcedconstant;
115 bool isOverdefined() const { return getLatticeValue() == overdefined; }
117 Constant *getConstant() const {
118 assert(isConstant() && "Cannot get the constant of a non-constant!");
119 return Val.getPointer();
122 /// markOverdefined - Return true if this is a change in status.
123 bool markOverdefined() {
127 Val.setInt(overdefined);
131 /// markConstant - Return true if this is a change in status.
132 bool markConstant(Constant *V) {
133 if (getLatticeValue() == constant) { // Constant but not forcedconstant.
134 assert(getConstant() == V && "Marking constant with different value");
139 Val.setInt(constant);
140 assert(V && "Marking constant with NULL");
143 assert(getLatticeValue() == forcedconstant &&
144 "Cannot move from overdefined to constant!");
145 // Stay at forcedconstant if the constant is the same.
146 if (V == getConstant()) return false;
148 // Otherwise, we go to overdefined. Assumptions made based on the
149 // forced value are possibly wrong. Assuming this is another constant
150 // could expose a contradiction.
151 Val.setInt(overdefined);
156 /// getConstantInt - If this is a constant with a ConstantInt value, return it
157 /// otherwise return null.
158 ConstantInt *getConstantInt() const {
160 return dyn_cast<ConstantInt>(getConstant());
164 /// getBlockAddress - If this is a constant with a BlockAddress value, return
165 /// it, otherwise return null.
166 BlockAddress *getBlockAddress() const {
168 return dyn_cast<BlockAddress>(getConstant());
172 void markForcedConstant(Constant *V) {
173 assert(isUnknown() && "Can't force a defined value!");
174 Val.setInt(forcedconstant);
178 ValueLatticeElement toValueLattice() const {
180 return ValueLatticeElement::getOverdefined();
182 return ValueLatticeElement::get(getConstant());
183 return ValueLatticeElement();
187 //===----------------------------------------------------------------------===//
189 /// SCCPSolver - This class is a general purpose solver for Sparse Conditional
190 /// Constant Propagation.
192 class SCCPSolver : public InstVisitor<SCCPSolver> {
193 const DataLayout &DL;
194 const TargetLibraryInfo *TLI;
195 SmallPtrSet<BasicBlock *, 8> BBExecutable; // The BBs that are executable.
196 DenseMap<Value *, LatticeVal> ValueState; // The state each value is in.
197 // The state each parameter is in.
198 DenseMap<Value *, ValueLatticeElement> ParamState;
200 /// StructValueState - This maintains ValueState for values that have
201 /// StructType, for example for formal arguments, calls, insertelement, etc.
202 DenseMap<std::pair<Value *, unsigned>, LatticeVal> StructValueState;
204 /// GlobalValue - If we are tracking any values for the contents of a global
205 /// variable, we keep a mapping from the constant accessor to the element of
206 /// the global, to the currently known value. If the value becomes
207 /// overdefined, it's entry is simply removed from this map.
208 DenseMap<GlobalVariable *, LatticeVal> TrackedGlobals;
210 /// TrackedRetVals - If we are tracking arguments into and the return
211 /// value out of a function, it will have an entry in this map, indicating
212 /// what the known return value for the function is.
213 MapVector<Function *, LatticeVal> TrackedRetVals;
215 /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions
216 /// that return multiple values.
217 MapVector<std::pair<Function *, unsigned>, LatticeVal> TrackedMultipleRetVals;
219 /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is
220 /// represented here for efficient lookup.
221 SmallPtrSet<Function *, 16> MRVFunctionsTracked;
223 /// MustTailFunctions - Each function here is a callee of non-removable
224 /// musttail call site.
225 SmallPtrSet<Function *, 16> MustTailCallees;
227 /// TrackingIncomingArguments - This is the set of functions for whose
228 /// arguments we make optimistic assumptions about and try to prove as
230 SmallPtrSet<Function *, 16> TrackingIncomingArguments;
232 /// The reason for two worklists is that overdefined is the lowest state
233 /// on the lattice, and moving things to overdefined as fast as possible
234 /// makes SCCP converge much faster.
236 /// By having a separate worklist, we accomplish this because everything
237 /// possibly overdefined will become overdefined at the soonest possible
239 SmallVector<Value *, 64> OverdefinedInstWorkList;
240 SmallVector<Value *, 64> InstWorkList;
242 // The BasicBlock work list
243 SmallVector<BasicBlock *, 64> BBWorkList;
245 /// KnownFeasibleEdges - Entries in this set are edges which have already had
246 /// PHI nodes retriggered.
247 using Edge = std::pair<BasicBlock *, BasicBlock *>;
248 DenseSet<Edge> KnownFeasibleEdges;
250 DenseMap<Function *, AnalysisResultsForFn> AnalysisResults;
251 DenseMap<Value *, SmallPtrSet<User *, 2>> AdditionalUsers;
254 void addAnalysis(Function &F, AnalysisResultsForFn A) {
255 AnalysisResults.insert({&F, std::move(A)});
258 const PredicateBase *getPredicateInfoFor(Instruction *I) {
259 auto A = AnalysisResults.find(I->getParent()->getParent());
260 if (A == AnalysisResults.end())
262 return A->second.PredInfo->getPredicateInfoFor(I);
265 DomTreeUpdater getDTU(Function &F) {
266 auto A = AnalysisResults.find(&F);
267 assert(A != AnalysisResults.end() && "Need analysis results for function.");
268 return {A->second.DT, A->second.PDT, DomTreeUpdater::UpdateStrategy::Lazy};
271 SCCPSolver(const DataLayout &DL, const TargetLibraryInfo *tli)
272 : DL(DL), TLI(tli) {}
274 /// MarkBlockExecutable - This method can be used by clients to mark all of
275 /// the blocks that are known to be intrinsically live in the processed unit.
277 /// This returns true if the block was not considered live before.
278 bool MarkBlockExecutable(BasicBlock *BB) {
279 if (!BBExecutable.insert(BB).second)
281 LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n');
282 BBWorkList.push_back(BB); // Add the block to the work list!
286 /// TrackValueOfGlobalVariable - Clients can use this method to
287 /// inform the SCCPSolver that it should track loads and stores to the
288 /// specified global variable if it can. This is only legal to call if
289 /// performing Interprocedural SCCP.
290 void TrackValueOfGlobalVariable(GlobalVariable *GV) {
291 // We only track the contents of scalar globals.
292 if (GV->getValueType()->isSingleValueType()) {
293 LatticeVal &IV = TrackedGlobals[GV];
294 if (!isa<UndefValue>(GV->getInitializer()))
295 IV.markConstant(GV->getInitializer());
299 /// AddTrackedFunction - If the SCCP solver is supposed to track calls into
300 /// and out of the specified function (which cannot have its address taken),
301 /// this method must be called.
302 void AddTrackedFunction(Function *F) {
303 // Add an entry, F -> undef.
304 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
305 MRVFunctionsTracked.insert(F);
306 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
307 TrackedMultipleRetVals.insert(std::make_pair(std::make_pair(F, i),
310 TrackedRetVals.insert(std::make_pair(F, LatticeVal()));
313 /// AddMustTailCallee - If the SCCP solver finds that this function is called
314 /// from non-removable musttail call site.
315 void AddMustTailCallee(Function *F) {
316 MustTailCallees.insert(F);
319 /// Returns true if the given function is called from non-removable musttail
321 bool isMustTailCallee(Function *F) {
322 return MustTailCallees.count(F);
325 void AddArgumentTrackedFunction(Function *F) {
326 TrackingIncomingArguments.insert(F);
329 /// Returns true if the given function is in the solver's set of
330 /// argument-tracked functions.
331 bool isArgumentTrackedFunction(Function *F) {
332 return TrackingIncomingArguments.count(F);
335 /// Solve - Solve for constants and executable blocks.
338 /// ResolvedUndefsIn - While solving the dataflow for a function, we assume
339 /// that branches on undef values cannot reach any of their successors.
340 /// However, this is not a safe assumption. After we solve dataflow, this
341 /// method should be use to handle this. If this returns true, the solver
343 bool ResolvedUndefsIn(Function &F);
345 bool isBlockExecutable(BasicBlock *BB) const {
346 return BBExecutable.count(BB);
349 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic
350 // block to the 'To' basic block is currently feasible.
351 bool isEdgeFeasible(BasicBlock *From, BasicBlock *To);
353 std::vector<LatticeVal> getStructLatticeValueFor(Value *V) const {
354 std::vector<LatticeVal> StructValues;
355 auto *STy = dyn_cast<StructType>(V->getType());
356 assert(STy && "getStructLatticeValueFor() can be called only on structs");
357 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
358 auto I = StructValueState.find(std::make_pair(V, i));
359 assert(I != StructValueState.end() && "Value not in valuemap!");
360 StructValues.push_back(I->second);
365 const LatticeVal &getLatticeValueFor(Value *V) const {
366 assert(!V->getType()->isStructTy() &&
367 "Should use getStructLatticeValueFor");
368 DenseMap<Value *, LatticeVal>::const_iterator I = ValueState.find(V);
369 assert(I != ValueState.end() &&
370 "V not found in ValueState nor Paramstate map!");
374 /// getTrackedRetVals - Get the inferred return value map.
375 const MapVector<Function*, LatticeVal> &getTrackedRetVals() {
376 return TrackedRetVals;
379 /// getTrackedGlobals - Get and return the set of inferred initializers for
380 /// global variables.
381 const DenseMap<GlobalVariable*, LatticeVal> &getTrackedGlobals() {
382 return TrackedGlobals;
385 /// getMRVFunctionsTracked - Get the set of functions which return multiple
386 /// values tracked by the pass.
387 const SmallPtrSet<Function *, 16> getMRVFunctionsTracked() {
388 return MRVFunctionsTracked;
391 /// getMustTailCallees - Get the set of functions which are called
392 /// from non-removable musttail call sites.
393 const SmallPtrSet<Function *, 16> getMustTailCallees() {
394 return MustTailCallees;
397 /// markOverdefined - Mark the specified value overdefined. This
398 /// works with both scalars and structs.
399 void markOverdefined(Value *V) {
400 if (auto *STy = dyn_cast<StructType>(V->getType()))
401 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
402 markOverdefined(getStructValueState(V, i), V);
404 markOverdefined(ValueState[V], V);
407 // isStructLatticeConstant - Return true if all the lattice values
408 // corresponding to elements of the structure are not overdefined,
410 bool isStructLatticeConstant(Function *F, StructType *STy) {
411 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
412 const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i));
413 assert(It != TrackedMultipleRetVals.end());
414 LatticeVal LV = It->second;
415 if (LV.isOverdefined())
422 // pushToWorkList - Helper for markConstant/markForcedConstant/markOverdefined
423 void pushToWorkList(LatticeVal &IV, Value *V) {
424 if (IV.isOverdefined())
425 return OverdefinedInstWorkList.push_back(V);
426 InstWorkList.push_back(V);
429 // markConstant - Make a value be marked as "constant". If the value
430 // is not already a constant, add it to the instruction work list so that
431 // the users of the instruction are updated later.
432 bool markConstant(LatticeVal &IV, Value *V, Constant *C) {
433 if (!IV.markConstant(C)) return false;
434 LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n');
435 pushToWorkList(IV, V);
439 bool markConstant(Value *V, Constant *C) {
440 assert(!V->getType()->isStructTy() && "structs should use mergeInValue");
441 return markConstant(ValueState[V], V, C);
444 void markForcedConstant(Value *V, Constant *C) {
445 assert(!V->getType()->isStructTy() && "structs should use mergeInValue");
446 LatticeVal &IV = ValueState[V];
447 IV.markForcedConstant(C);
448 LLVM_DEBUG(dbgs() << "markForcedConstant: " << *C << ": " << *V << '\n');
449 pushToWorkList(IV, V);
452 // markOverdefined - Make a value be marked as "overdefined". If the
453 // value is not already overdefined, add it to the overdefined instruction
454 // work list so that the users of the instruction are updated later.
455 bool markOverdefined(LatticeVal &IV, Value *V) {
456 if (!IV.markOverdefined()) return false;
458 LLVM_DEBUG(dbgs() << "markOverdefined: ";
459 if (auto *F = dyn_cast<Function>(V)) dbgs()
460 << "Function '" << F->getName() << "'\n";
461 else dbgs() << *V << '\n');
462 // Only instructions go on the work list
463 pushToWorkList(IV, V);
467 bool mergeInValue(LatticeVal &IV, Value *V, LatticeVal MergeWithV) {
468 if (IV.isOverdefined() || MergeWithV.isUnknown())
469 return false; // Noop.
470 if (MergeWithV.isOverdefined())
471 return markOverdefined(IV, V);
473 return markConstant(IV, V, MergeWithV.getConstant());
474 if (IV.getConstant() != MergeWithV.getConstant())
475 return markOverdefined(IV, V);
479 bool mergeInValue(Value *V, LatticeVal MergeWithV) {
480 assert(!V->getType()->isStructTy() &&
481 "non-structs should use markConstant");
482 return mergeInValue(ValueState[V], V, MergeWithV);
485 /// getValueState - Return the LatticeVal object that corresponds to the
486 /// value. This function handles the case when the value hasn't been seen yet
487 /// by properly seeding constants etc.
488 LatticeVal &getValueState(Value *V) {
489 assert(!V->getType()->isStructTy() && "Should use getStructValueState");
491 std::pair<DenseMap<Value*, LatticeVal>::iterator, bool> I =
492 ValueState.insert(std::make_pair(V, LatticeVal()));
493 LatticeVal &LV = I.first->second;
496 return LV; // Common case, already in the map.
498 if (auto *C = dyn_cast<Constant>(V)) {
499 // Undef values remain unknown.
500 if (!isa<UndefValue>(V))
501 LV.markConstant(C); // Constants are constant
504 // All others are underdefined by default.
508 ValueLatticeElement &getParamState(Value *V) {
509 assert(!V->getType()->isStructTy() && "Should use getStructValueState");
511 std::pair<DenseMap<Value*, ValueLatticeElement>::iterator, bool>
512 PI = ParamState.insert(std::make_pair(V, ValueLatticeElement()));
513 ValueLatticeElement &LV = PI.first->second;
515 LV = getValueState(V).toValueLattice();
520 /// getStructValueState - Return the LatticeVal object that corresponds to the
521 /// value/field pair. This function handles the case when the value hasn't
522 /// been seen yet by properly seeding constants etc.
523 LatticeVal &getStructValueState(Value *V, unsigned i) {
524 assert(V->getType()->isStructTy() && "Should use getValueState");
525 assert(i < cast<StructType>(V->getType())->getNumElements() &&
526 "Invalid element #");
528 std::pair<DenseMap<std::pair<Value*, unsigned>, LatticeVal>::iterator,
529 bool> I = StructValueState.insert(
530 std::make_pair(std::make_pair(V, i), LatticeVal()));
531 LatticeVal &LV = I.first->second;
534 return LV; // Common case, already in the map.
536 if (auto *C = dyn_cast<Constant>(V)) {
537 Constant *Elt = C->getAggregateElement(i);
540 LV.markOverdefined(); // Unknown sort of constant.
541 else if (isa<UndefValue>(Elt))
542 ; // Undef values remain unknown.
544 LV.markConstant(Elt); // Constants are constant.
547 // All others are underdefined by default.
551 /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
552 /// work list if it is not already executable.
553 bool markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) {
554 if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
555 return false; // This edge is already known to be executable!
557 if (!MarkBlockExecutable(Dest)) {
558 // If the destination is already executable, we just made an *edge*
559 // feasible that wasn't before. Revisit the PHI nodes in the block
560 // because they have potentially new operands.
561 LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
562 << " -> " << Dest->getName() << '\n');
564 for (PHINode &PN : Dest->phis())
570 // getFeasibleSuccessors - Return a vector of booleans to indicate which
571 // successors are reachable from a given terminator instruction.
572 void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs);
574 // OperandChangedState - This method is invoked on all of the users of an
575 // instruction that was just changed state somehow. Based on this
576 // information, we need to update the specified user of this instruction.
577 void OperandChangedState(Instruction *I) {
578 if (BBExecutable.count(I->getParent())) // Inst is executable?
582 // Add U as additional user of V.
583 void addAdditionalUser(Value *V, User *U) {
584 auto Iter = AdditionalUsers.insert({V, {}});
585 Iter.first->second.insert(U);
588 // Mark I's users as changed, including AdditionalUsers.
589 void markUsersAsChanged(Value *I) {
590 for (User *U : I->users())
591 if (auto *UI = dyn_cast<Instruction>(U))
592 OperandChangedState(UI);
594 auto Iter = AdditionalUsers.find(I);
595 if (Iter != AdditionalUsers.end()) {
596 for (User *U : Iter->second)
597 if (auto *UI = dyn_cast<Instruction>(U))
598 OperandChangedState(UI);
603 friend class InstVisitor<SCCPSolver>;
605 // visit implementations - Something changed in this instruction. Either an
606 // operand made a transition, or the instruction is newly executable. Change
607 // the value type of I to reflect these changes if appropriate.
608 void visitPHINode(PHINode &I);
612 void visitReturnInst(ReturnInst &I);
613 void visitTerminator(Instruction &TI);
615 void visitCastInst(CastInst &I);
616 void visitSelectInst(SelectInst &I);
617 void visitUnaryOperator(Instruction &I);
618 void visitBinaryOperator(Instruction &I);
619 void visitCmpInst(CmpInst &I);
620 void visitExtractValueInst(ExtractValueInst &EVI);
621 void visitInsertValueInst(InsertValueInst &IVI);
623 void visitCatchSwitchInst(CatchSwitchInst &CPI) {
624 markOverdefined(&CPI);
625 visitTerminator(CPI);
628 // Instructions that cannot be folded away.
630 void visitStoreInst (StoreInst &I);
631 void visitLoadInst (LoadInst &I);
632 void visitGetElementPtrInst(GetElementPtrInst &I);
634 void visitCallInst (CallInst &I) {
638 void visitInvokeInst (InvokeInst &II) {
643 void visitCallBrInst (CallBrInst &CBI) {
645 visitTerminator(CBI);
648 void visitCallSite (CallSite CS);
649 void visitResumeInst (ResumeInst &I) { /*returns void*/ }
650 void visitUnreachableInst(UnreachableInst &I) { /*returns void*/ }
651 void visitFenceInst (FenceInst &I) { /*returns void*/ }
653 void visitInstruction(Instruction &I) {
654 // All the instructions we don't do any special handling for just
655 // go to overdefined.
656 LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n');
661 } // end anonymous namespace
663 // getFeasibleSuccessors - Return a vector of booleans to indicate which
664 // successors are reachable from a given terminator instruction.
665 void SCCPSolver::getFeasibleSuccessors(Instruction &TI,
666 SmallVectorImpl<bool> &Succs) {
667 Succs.resize(TI.getNumSuccessors());
668 if (auto *BI = dyn_cast<BranchInst>(&TI)) {
669 if (BI->isUnconditional()) {
674 LatticeVal BCValue = getValueState(BI->getCondition());
675 ConstantInt *CI = BCValue.getConstantInt();
677 // Overdefined condition variables, and branches on unfoldable constant
678 // conditions, mean the branch could go either way.
679 if (!BCValue.isUnknown())
680 Succs[0] = Succs[1] = true;
684 // Constant condition variables mean the branch can only go a single way.
685 Succs[CI->isZero()] = true;
689 // Unwinding instructions successors are always executable.
690 if (TI.isExceptionalTerminator()) {
691 Succs.assign(TI.getNumSuccessors(), true);
695 if (auto *SI = dyn_cast<SwitchInst>(&TI)) {
696 if (!SI->getNumCases()) {
700 LatticeVal SCValue = getValueState(SI->getCondition());
701 ConstantInt *CI = SCValue.getConstantInt();
703 if (!CI) { // Overdefined or unknown condition?
704 // All destinations are executable!
705 if (!SCValue.isUnknown())
706 Succs.assign(TI.getNumSuccessors(), true);
710 Succs[SI->findCaseValue(CI)->getSuccessorIndex()] = true;
714 // In case of indirect branch and its address is a blockaddress, we mark
715 // the target as executable.
716 if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) {
717 // Casts are folded by visitCastInst.
718 LatticeVal IBRValue = getValueState(IBR->getAddress());
719 BlockAddress *Addr = IBRValue.getBlockAddress();
720 if (!Addr) { // Overdefined or unknown condition?
721 // All destinations are executable!
722 if (!IBRValue.isUnknown())
723 Succs.assign(TI.getNumSuccessors(), true);
727 BasicBlock* T = Addr->getBasicBlock();
728 assert(Addr->getFunction() == T->getParent() &&
729 "Block address of a different function ?");
730 for (unsigned i = 0; i < IBR->getNumSuccessors(); ++i) {
731 // This is the target.
732 if (IBR->getDestination(i) == T) {
738 // If we didn't find our destination in the IBR successor list, then we
739 // have undefined behavior. Its ok to assume no successor is executable.
743 // In case of callbr, we pessimistically assume that all successors are
745 if (isa<CallBrInst>(&TI)) {
746 Succs.assign(TI.getNumSuccessors(), true);
750 LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n');
751 llvm_unreachable("SCCP: Don't know how to handle this terminator!");
754 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic
755 // block to the 'To' basic block is currently feasible.
756 bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
757 // Check if we've called markEdgeExecutable on the edge yet. (We could
758 // be more aggressive and try to consider edges which haven't been marked
759 // yet, but there isn't any need.)
760 return KnownFeasibleEdges.count(Edge(From, To));
763 // visit Implementations - Something changed in this instruction, either an
764 // operand made a transition, or the instruction is newly executable. Change
765 // the value type of I to reflect these changes if appropriate. This method
766 // makes sure to do the following actions:
768 // 1. If a phi node merges two constants in, and has conflicting value coming
769 // from different branches, or if the PHI node merges in an overdefined
770 // value, then the PHI node becomes overdefined.
771 // 2. If a phi node merges only constants in, and they all agree on value, the
772 // PHI node becomes a constant value equal to that.
773 // 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant
774 // 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined
775 // 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined
776 // 6. If a conditional branch has a value that is constant, make the selected
777 // destination executable
778 // 7. If a conditional branch has a value that is overdefined, make all
779 // successors executable.
780 void SCCPSolver::visitPHINode(PHINode &PN) {
781 // If this PN returns a struct, just mark the result overdefined.
782 // TODO: We could do a lot better than this if code actually uses this.
783 if (PN.getType()->isStructTy())
784 return (void)markOverdefined(&PN);
786 if (getValueState(&PN).isOverdefined())
787 return; // Quick exit
789 // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant,
790 // and slow us down a lot. Just mark them overdefined.
791 if (PN.getNumIncomingValues() > 64)
792 return (void)markOverdefined(&PN);
794 // Look at all of the executable operands of the PHI node. If any of them
795 // are overdefined, the PHI becomes overdefined as well. If they are all
796 // constant, and they agree with each other, the PHI becomes the identical
797 // constant. If they are constant and don't agree, the PHI is overdefined.
798 // If there are no executable operands, the PHI remains unknown.
799 Constant *OperandVal = nullptr;
800 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
801 LatticeVal IV = getValueState(PN.getIncomingValue(i));
802 if (IV.isUnknown()) continue; // Doesn't influence PHI node.
804 if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent()))
807 if (IV.isOverdefined()) // PHI node becomes overdefined!
808 return (void)markOverdefined(&PN);
810 if (!OperandVal) { // Grab the first value.
811 OperandVal = IV.getConstant();
815 // There is already a reachable operand. If we conflict with it,
816 // then the PHI node becomes overdefined. If we agree with it, we
819 // Check to see if there are two different constants merging, if so, the PHI
820 // node is overdefined.
821 if (IV.getConstant() != OperandVal)
822 return (void)markOverdefined(&PN);
825 // If we exited the loop, this means that the PHI node only has constant
826 // arguments that agree with each other(and OperandVal is the constant) or
827 // OperandVal is null because there are no defined incoming arguments. If
828 // this is the case, the PHI remains unknown.
830 markConstant(&PN, OperandVal); // Acquire operand value
833 void SCCPSolver::visitReturnInst(ReturnInst &I) {
834 if (I.getNumOperands() == 0) return; // ret void
836 Function *F = I.getParent()->getParent();
837 Value *ResultOp = I.getOperand(0);
839 // If we are tracking the return value of this function, merge it in.
840 if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) {
841 MapVector<Function*, LatticeVal>::iterator TFRVI =
842 TrackedRetVals.find(F);
843 if (TFRVI != TrackedRetVals.end()) {
844 mergeInValue(TFRVI->second, F, getValueState(ResultOp));
849 // Handle functions that return multiple values.
850 if (!TrackedMultipleRetVals.empty()) {
851 if (auto *STy = dyn_cast<StructType>(ResultOp->getType()))
852 if (MRVFunctionsTracked.count(F))
853 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
854 mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F,
855 getStructValueState(ResultOp, i));
859 void SCCPSolver::visitTerminator(Instruction &TI) {
860 SmallVector<bool, 16> SuccFeasible;
861 getFeasibleSuccessors(TI, SuccFeasible);
863 BasicBlock *BB = TI.getParent();
865 // Mark all feasible successors executable.
866 for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i)
868 markEdgeExecutable(BB, TI.getSuccessor(i));
871 void SCCPSolver::visitCastInst(CastInst &I) {
872 LatticeVal OpSt = getValueState(I.getOperand(0));
873 if (OpSt.isOverdefined()) // Inherit overdefinedness of operand
875 else if (OpSt.isConstant()) {
876 // Fold the constant as we build.
877 Constant *C = ConstantFoldCastOperand(I.getOpcode(), OpSt.getConstant(),
879 if (isa<UndefValue>(C))
881 // Propagate constant value
886 void SCCPSolver::visitExtractValueInst(ExtractValueInst &EVI) {
887 // If this returns a struct, mark all elements over defined, we don't track
888 // structs in structs.
889 if (EVI.getType()->isStructTy())
890 return (void)markOverdefined(&EVI);
892 // If this is extracting from more than one level of struct, we don't know.
893 if (EVI.getNumIndices() != 1)
894 return (void)markOverdefined(&EVI);
896 Value *AggVal = EVI.getAggregateOperand();
897 if (AggVal->getType()->isStructTy()) {
898 unsigned i = *EVI.idx_begin();
899 LatticeVal EltVal = getStructValueState(AggVal, i);
900 mergeInValue(getValueState(&EVI), &EVI, EltVal);
902 // Otherwise, must be extracting from an array.
903 return (void)markOverdefined(&EVI);
907 void SCCPSolver::visitInsertValueInst(InsertValueInst &IVI) {
908 auto *STy = dyn_cast<StructType>(IVI.getType());
910 return (void)markOverdefined(&IVI);
912 // If this has more than one index, we can't handle it, drive all results to
914 if (IVI.getNumIndices() != 1)
915 return (void)markOverdefined(&IVI);
917 Value *Aggr = IVI.getAggregateOperand();
918 unsigned Idx = *IVI.idx_begin();
920 // Compute the result based on what we're inserting.
921 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
922 // This passes through all values that aren't the inserted element.
924 LatticeVal EltVal = getStructValueState(Aggr, i);
925 mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal);
929 Value *Val = IVI.getInsertedValueOperand();
930 if (Val->getType()->isStructTy())
931 // We don't track structs in structs.
932 markOverdefined(getStructValueState(&IVI, i), &IVI);
934 LatticeVal InVal = getValueState(Val);
935 mergeInValue(getStructValueState(&IVI, i), &IVI, InVal);
940 void SCCPSolver::visitSelectInst(SelectInst &I) {
941 // If this select returns a struct, just mark the result overdefined.
942 // TODO: We could do a lot better than this if code actually uses this.
943 if (I.getType()->isStructTy())
944 return (void)markOverdefined(&I);
946 LatticeVal CondValue = getValueState(I.getCondition());
947 if (CondValue.isUnknown())
950 if (ConstantInt *CondCB = CondValue.getConstantInt()) {
951 Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue();
952 mergeInValue(&I, getValueState(OpVal));
956 // Otherwise, the condition is overdefined or a constant we can't evaluate.
957 // See if we can produce something better than overdefined based on the T/F
959 LatticeVal TVal = getValueState(I.getTrueValue());
960 LatticeVal FVal = getValueState(I.getFalseValue());
962 // select ?, C, C -> C.
963 if (TVal.isConstant() && FVal.isConstant() &&
964 TVal.getConstant() == FVal.getConstant())
965 return (void)markConstant(&I, FVal.getConstant());
967 if (TVal.isUnknown()) // select ?, undef, X -> X.
968 return (void)mergeInValue(&I, FVal);
969 if (FVal.isUnknown()) // select ?, X, undef -> X.
970 return (void)mergeInValue(&I, TVal);
974 // Handle Unary Operators.
975 void SCCPSolver::visitUnaryOperator(Instruction &I) {
976 LatticeVal V0State = getValueState(I.getOperand(0));
978 LatticeVal &IV = ValueState[&I];
979 if (IV.isOverdefined()) return;
981 if (V0State.isConstant()) {
982 Constant *C = ConstantExpr::get(I.getOpcode(), V0State.getConstant());
985 if (isa<UndefValue>(C))
987 return (void)markConstant(IV, &I, C);
990 // If something is undef, wait for it to resolve.
991 if (!V0State.isOverdefined())
997 // Handle Binary Operators.
998 void SCCPSolver::visitBinaryOperator(Instruction &I) {
999 LatticeVal V1State = getValueState(I.getOperand(0));
1000 LatticeVal V2State = getValueState(I.getOperand(1));
1002 LatticeVal &IV = ValueState[&I];
1003 if (IV.isOverdefined()) return;
1005 if (V1State.isConstant() && V2State.isConstant()) {
1006 Constant *C = ConstantExpr::get(I.getOpcode(), V1State.getConstant(),
1007 V2State.getConstant());
1009 if (isa<UndefValue>(C))
1011 return (void)markConstant(IV, &I, C);
1014 // If something is undef, wait for it to resolve.
1015 if (!V1State.isOverdefined() && !V2State.isOverdefined())
1018 // Otherwise, one of our operands is overdefined. Try to produce something
1019 // better than overdefined with some tricks.
1020 // If this is 0 / Y, it doesn't matter that the second operand is
1021 // overdefined, and we can replace it with zero.
1022 if (I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv)
1023 if (V1State.isConstant() && V1State.getConstant()->isNullValue())
1024 return (void)markConstant(IV, &I, V1State.getConstant());
1027 // -> AND/MUL with 0
1029 // it doesn't matter that the other operand is overdefined.
1030 if (I.getOpcode() == Instruction::And || I.getOpcode() == Instruction::Mul ||
1031 I.getOpcode() == Instruction::Or) {
1032 LatticeVal *NonOverdefVal = nullptr;
1033 if (!V1State.isOverdefined())
1034 NonOverdefVal = &V1State;
1035 else if (!V2State.isOverdefined())
1036 NonOverdefVal = &V2State;
1038 if (NonOverdefVal) {
1039 if (NonOverdefVal->isUnknown())
1042 if (I.getOpcode() == Instruction::And ||
1043 I.getOpcode() == Instruction::Mul) {
1046 if (NonOverdefVal->getConstant()->isNullValue())
1047 return (void)markConstant(IV, &I, NonOverdefVal->getConstant());
1050 if (ConstantInt *CI = NonOverdefVal->getConstantInt())
1051 if (CI->isMinusOne())
1052 return (void)markConstant(IV, &I, NonOverdefVal->getConstant());
1057 markOverdefined(&I);
1060 // Handle ICmpInst instruction.
1061 void SCCPSolver::visitCmpInst(CmpInst &I) {
1062 // Do not cache this lookup, getValueState calls later in the function might
1063 // invalidate the reference.
1064 if (ValueState[&I].isOverdefined()) return;
1066 Value *Op1 = I.getOperand(0);
1067 Value *Op2 = I.getOperand(1);
1069 // For parameters, use ParamState which includes constant range info if
1071 auto V1Param = ParamState.find(Op1);
1072 ValueLatticeElement V1State = (V1Param != ParamState.end())
1074 : getValueState(Op1).toValueLattice();
1076 auto V2Param = ParamState.find(Op2);
1077 ValueLatticeElement V2State = V2Param != ParamState.end()
1079 : getValueState(Op2).toValueLattice();
1081 Constant *C = V1State.getCompare(I.getPredicate(), I.getType(), V2State);
1083 if (isa<UndefValue>(C))
1087 mergeInValue(&I, CV);
1091 // If operands are still unknown, wait for it to resolve.
1092 if (!V1State.isOverdefined() && !V2State.isOverdefined() &&
1093 !ValueState[&I].isConstant())
1096 markOverdefined(&I);
1099 // Handle getelementptr instructions. If all operands are constants then we
1100 // can turn this into a getelementptr ConstantExpr.
1101 void SCCPSolver::visitGetElementPtrInst(GetElementPtrInst &I) {
1102 if (ValueState[&I].isOverdefined()) return;
1104 SmallVector<Constant*, 8> Operands;
1105 Operands.reserve(I.getNumOperands());
1107 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
1108 LatticeVal State = getValueState(I.getOperand(i));
1109 if (State.isUnknown())
1110 return; // Operands are not resolved yet.
1112 if (State.isOverdefined())
1113 return (void)markOverdefined(&I);
1115 assert(State.isConstant() && "Unknown state!");
1116 Operands.push_back(State.getConstant());
1119 Constant *Ptr = Operands[0];
1120 auto Indices = makeArrayRef(Operands.begin() + 1, Operands.end());
1122 ConstantExpr::getGetElementPtr(I.getSourceElementType(), Ptr, Indices);
1123 if (isa<UndefValue>(C))
1125 markConstant(&I, C);
1128 void SCCPSolver::visitStoreInst(StoreInst &SI) {
1129 // If this store is of a struct, ignore it.
1130 if (SI.getOperand(0)->getType()->isStructTy())
1133 if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1)))
1136 GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1));
1137 DenseMap<GlobalVariable*, LatticeVal>::iterator I = TrackedGlobals.find(GV);
1138 if (I == TrackedGlobals.end() || I->second.isOverdefined()) return;
1140 // Get the value we are storing into the global, then merge it.
1141 mergeInValue(I->second, GV, getValueState(SI.getOperand(0)));
1142 if (I->second.isOverdefined())
1143 TrackedGlobals.erase(I); // No need to keep tracking this!
1146 // Handle load instructions. If the operand is a constant pointer to a constant
1147 // global, we can replace the load with the loaded constant value!
1148 void SCCPSolver::visitLoadInst(LoadInst &I) {
1149 // If this load is of a struct, just mark the result overdefined.
1150 if (I.getType()->isStructTy())
1151 return (void)markOverdefined(&I);
1153 LatticeVal PtrVal = getValueState(I.getOperand(0));
1154 if (PtrVal.isUnknown()) return; // The pointer is not resolved yet!
1156 LatticeVal &IV = ValueState[&I];
1157 if (IV.isOverdefined()) return;
1159 if (!PtrVal.isConstant() || I.isVolatile())
1160 return (void)markOverdefined(IV, &I);
1162 Constant *Ptr = PtrVal.getConstant();
1164 // load null is undefined.
1165 if (isa<ConstantPointerNull>(Ptr)) {
1166 if (NullPointerIsDefined(I.getFunction(), I.getPointerAddressSpace()))
1167 return (void)markOverdefined(IV, &I);
1172 // Transform load (constant global) into the value loaded.
1173 if (auto *GV = dyn_cast<GlobalVariable>(Ptr)) {
1174 if (!TrackedGlobals.empty()) {
1175 // If we are tracking this global, merge in the known value for it.
1176 DenseMap<GlobalVariable*, LatticeVal>::iterator It =
1177 TrackedGlobals.find(GV);
1178 if (It != TrackedGlobals.end()) {
1179 mergeInValue(IV, &I, It->second);
1185 // Transform load from a constant into a constant if possible.
1186 if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, I.getType(), DL)) {
1187 if (isa<UndefValue>(C))
1189 return (void)markConstant(IV, &I, C);
1192 // Otherwise we cannot say for certain what value this load will produce.
1194 markOverdefined(IV, &I);
1197 void SCCPSolver::visitCallSite(CallSite CS) {
1198 Function *F = CS.getCalledFunction();
1199 Instruction *I = CS.getInstruction();
1201 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1202 if (II->getIntrinsicID() == Intrinsic::ssa_copy) {
1203 if (ValueState[I].isOverdefined())
1206 auto *PI = getPredicateInfoFor(I);
1210 Value *CopyOf = I->getOperand(0);
1211 auto *PBranch = dyn_cast<PredicateBranch>(PI);
1213 mergeInValue(ValueState[I], I, getValueState(CopyOf));
1217 Value *Cond = PBranch->Condition;
1219 // Everything below relies on the condition being a comparison.
1220 auto *Cmp = dyn_cast<CmpInst>(Cond);
1222 mergeInValue(ValueState[I], I, getValueState(CopyOf));
1226 Value *CmpOp0 = Cmp->getOperand(0);
1227 Value *CmpOp1 = Cmp->getOperand(1);
1228 if (CopyOf != CmpOp0 && CopyOf != CmpOp1) {
1229 mergeInValue(ValueState[I], I, getValueState(CopyOf));
1233 if (CmpOp0 != CopyOf)
1234 std::swap(CmpOp0, CmpOp1);
1236 LatticeVal OriginalVal = getValueState(CopyOf);
1237 LatticeVal EqVal = getValueState(CmpOp1);
1238 LatticeVal &IV = ValueState[I];
1239 if (PBranch->TrueEdge && Cmp->getPredicate() == CmpInst::ICMP_EQ) {
1240 addAdditionalUser(CmpOp1, I);
1241 if (OriginalVal.isConstant())
1242 mergeInValue(IV, I, OriginalVal);
1244 mergeInValue(IV, I, EqVal);
1247 if (!PBranch->TrueEdge && Cmp->getPredicate() == CmpInst::ICMP_NE) {
1248 addAdditionalUser(CmpOp1, I);
1249 if (OriginalVal.isConstant())
1250 mergeInValue(IV, I, OriginalVal);
1252 mergeInValue(IV, I, EqVal);
1256 return (void)mergeInValue(IV, I, getValueState(CopyOf));
1260 // The common case is that we aren't tracking the callee, either because we
1261 // are not doing interprocedural analysis or the callee is indirect, or is
1262 // external. Handle these cases first.
1263 if (!F || F->isDeclaration()) {
1265 // Void return and not tracking callee, just bail.
1266 if (I->getType()->isVoidTy()) return;
1268 // Otherwise, if we have a single return value case, and if the function is
1269 // a declaration, maybe we can constant fold it.
1270 if (F && F->isDeclaration() && !I->getType()->isStructTy() &&
1271 canConstantFoldCallTo(cast<CallBase>(CS.getInstruction()), F)) {
1272 SmallVector<Constant*, 8> Operands;
1273 for (CallSite::arg_iterator AI = CS.arg_begin(), E = CS.arg_end();
1275 if (AI->get()->getType()->isStructTy())
1276 return markOverdefined(I); // Can't handle struct args.
1277 LatticeVal State = getValueState(*AI);
1279 if (State.isUnknown())
1280 return; // Operands are not resolved yet.
1281 if (State.isOverdefined())
1282 return (void)markOverdefined(I);
1283 assert(State.isConstant() && "Unknown state!");
1284 Operands.push_back(State.getConstant());
1287 if (getValueState(I).isOverdefined())
1290 // If we can constant fold this, mark the result of the call as a
1292 if (Constant *C = ConstantFoldCall(cast<CallBase>(CS.getInstruction()), F,
1295 if (isa<UndefValue>(C))
1297 return (void)markConstant(I, C);
1301 // Otherwise, we don't know anything about this call, mark it overdefined.
1302 return (void)markOverdefined(I);
1305 // If this is a local function that doesn't have its address taken, mark its
1306 // entry block executable and merge in the actual arguments to the call into
1307 // the formal arguments of the function.
1308 if (!TrackingIncomingArguments.empty() && TrackingIncomingArguments.count(F)){
1309 MarkBlockExecutable(&F->front());
1311 // Propagate information from this call site into the callee.
1312 CallSite::arg_iterator CAI = CS.arg_begin();
1313 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
1314 AI != E; ++AI, ++CAI) {
1315 // If this argument is byval, and if the function is not readonly, there
1316 // will be an implicit copy formed of the input aggregate.
1317 if (AI->hasByValAttr() && !F->onlyReadsMemory()) {
1318 markOverdefined(&*AI);
1322 if (auto *STy = dyn_cast<StructType>(AI->getType())) {
1323 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1324 LatticeVal CallArg = getStructValueState(*CAI, i);
1325 mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg);
1328 // Most other parts of the Solver still only use the simpler value
1329 // lattice, so we propagate changes for parameters to both lattices.
1330 LatticeVal ConcreteArgument = getValueState(*CAI);
1332 getParamState(&*AI).mergeIn(ConcreteArgument.toValueLattice(), DL);
1333 bool ValueChanged = mergeInValue(&*AI, ConcreteArgument);
1334 // Add argument to work list, if the state of a parameter changes but
1335 // ValueState does not change (because it is already overdefined there),
1336 // We have to take changes in ParamState into account, as it is used
1337 // when evaluating Cmp instructions.
1338 if (!ValueChanged && ParamChanged)
1339 pushToWorkList(ValueState[&*AI], &*AI);
1344 // If this is a single/zero retval case, see if we're tracking the function.
1345 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
1346 if (!MRVFunctionsTracked.count(F))
1347 goto CallOverdefined; // Not tracking this callee.
1349 // If we are tracking this callee, propagate the result of the function
1350 // into this call site.
1351 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1352 mergeInValue(getStructValueState(I, i), I,
1353 TrackedMultipleRetVals[std::make_pair(F, i)]);
1355 MapVector<Function*, LatticeVal>::iterator TFRVI = TrackedRetVals.find(F);
1356 if (TFRVI == TrackedRetVals.end())
1357 goto CallOverdefined; // Not tracking this callee.
1359 // If so, propagate the return value of the callee into this call result.
1360 mergeInValue(I, TFRVI->second);
1364 void SCCPSolver::Solve() {
1365 // Process the work lists until they are empty!
1366 while (!BBWorkList.empty() || !InstWorkList.empty() ||
1367 !OverdefinedInstWorkList.empty()) {
1368 // Process the overdefined instruction's work list first, which drives other
1369 // things to overdefined more quickly.
1370 while (!OverdefinedInstWorkList.empty()) {
1371 Value *I = OverdefinedInstWorkList.pop_back_val();
1373 LLVM_DEBUG(dbgs() << "\nPopped off OI-WL: " << *I << '\n');
1375 // "I" got into the work list because it either made the transition from
1376 // bottom to constant, or to overdefined.
1378 // Anything on this worklist that is overdefined need not be visited
1379 // since all of its users will have already been marked as overdefined
1380 // Update all of the users of this instruction's value.
1382 markUsersAsChanged(I);
1385 // Process the instruction work list.
1386 while (!InstWorkList.empty()) {
1387 Value *I = InstWorkList.pop_back_val();
1389 LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n');
1391 // "I" got into the work list because it made the transition from undef to
1394 // Anything on this worklist that is overdefined need not be visited
1395 // since all of its users will have already been marked as overdefined.
1396 // Update all of the users of this instruction's value.
1398 if (I->getType()->isStructTy() || !getValueState(I).isOverdefined())
1399 markUsersAsChanged(I);
1402 // Process the basic block work list.
1403 while (!BBWorkList.empty()) {
1404 BasicBlock *BB = BBWorkList.back();
1405 BBWorkList.pop_back();
1407 LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n');
1409 // Notify all instructions in this basic block that they are newly
1416 /// ResolvedUndefsIn - While solving the dataflow for a function, we assume
1417 /// that branches on undef values cannot reach any of their successors.
1418 /// However, this is not a safe assumption. After we solve dataflow, this
1419 /// method should be use to handle this. If this returns true, the solver
1420 /// should be rerun.
1422 /// This method handles this by finding an unresolved branch and marking it one
1423 /// of the edges from the block as being feasible, even though the condition
1424 /// doesn't say it would otherwise be. This allows SCCP to find the rest of the
1425 /// CFG and only slightly pessimizes the analysis results (by marking one,
1426 /// potentially infeasible, edge feasible). This cannot usefully modify the
1427 /// constraints on the condition of the branch, as that would impact other users
1430 /// This scan also checks for values that use undefs, whose results are actually
1431 /// defined. For example, 'zext i8 undef to i32' should produce all zeros
1432 /// conservatively, as "(zext i8 X -> i32) & 0xFF00" must always return zero,
1433 /// even if X isn't defined.
1434 bool SCCPSolver::ResolvedUndefsIn(Function &F) {
1435 for (BasicBlock &BB : F) {
1436 if (!BBExecutable.count(&BB))
1439 for (Instruction &I : BB) {
1440 // Look for instructions which produce undef values.
1441 if (I.getType()->isVoidTy()) continue;
1443 if (auto *STy = dyn_cast<StructType>(I.getType())) {
1444 // Only a few things that can be structs matter for undef.
1446 // Tracked calls must never be marked overdefined in ResolvedUndefsIn.
1447 if (CallSite CS = CallSite(&I))
1448 if (Function *F = CS.getCalledFunction())
1449 if (MRVFunctionsTracked.count(F))
1452 // extractvalue and insertvalue don't need to be marked; they are
1453 // tracked as precisely as their operands.
1454 if (isa<ExtractValueInst>(I) || isa<InsertValueInst>(I))
1457 // Send the results of everything else to overdefined. We could be
1458 // more precise than this but it isn't worth bothering.
1459 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1460 LatticeVal &LV = getStructValueState(&I, i);
1462 markOverdefined(LV, &I);
1467 LatticeVal &LV = getValueState(&I);
1468 if (!LV.isUnknown()) continue;
1470 // extractvalue is safe; check here because the argument is a struct.
1471 if (isa<ExtractValueInst>(I))
1474 // Compute the operand LatticeVals, for convenience below.
1475 // Anything taking a struct is conservatively assumed to require
1476 // overdefined markings.
1477 if (I.getOperand(0)->getType()->isStructTy()) {
1478 markOverdefined(&I);
1481 LatticeVal Op0LV = getValueState(I.getOperand(0));
1483 if (I.getNumOperands() == 2) {
1484 if (I.getOperand(1)->getType()->isStructTy()) {
1485 markOverdefined(&I);
1489 Op1LV = getValueState(I.getOperand(1));
1491 // If this is an instructions whose result is defined even if the input is
1492 // not fully defined, propagate the information.
1493 Type *ITy = I.getType();
1494 switch (I.getOpcode()) {
1495 case Instruction::Add:
1496 case Instruction::Sub:
1497 case Instruction::Trunc:
1498 case Instruction::FPTrunc:
1499 case Instruction::BitCast:
1500 break; // Any undef -> undef
1501 case Instruction::FSub:
1502 case Instruction::FAdd:
1503 case Instruction::FMul:
1504 case Instruction::FDiv:
1505 case Instruction::FRem:
1506 // Floating-point binary operation: be conservative.
1507 if (Op0LV.isUnknown() && Op1LV.isUnknown())
1508 markForcedConstant(&I, Constant::getNullValue(ITy));
1510 markOverdefined(&I);
1512 case Instruction::FNeg:
1513 break; // fneg undef -> undef
1514 case Instruction::ZExt:
1515 case Instruction::SExt:
1516 case Instruction::FPToUI:
1517 case Instruction::FPToSI:
1518 case Instruction::FPExt:
1519 case Instruction::PtrToInt:
1520 case Instruction::IntToPtr:
1521 case Instruction::SIToFP:
1522 case Instruction::UIToFP:
1523 // undef -> 0; some outputs are impossible
1524 markForcedConstant(&I, Constant::getNullValue(ITy));
1526 case Instruction::Mul:
1527 case Instruction::And:
1528 // Both operands undef -> undef
1529 if (Op0LV.isUnknown() && Op1LV.isUnknown())
1531 // undef * X -> 0. X could be zero.
1532 // undef & X -> 0. X could be zero.
1533 markForcedConstant(&I, Constant::getNullValue(ITy));
1535 case Instruction::Or:
1536 // Both operands undef -> undef
1537 if (Op0LV.isUnknown() && Op1LV.isUnknown())
1539 // undef | X -> -1. X could be -1.
1540 markForcedConstant(&I, Constant::getAllOnesValue(ITy));
1542 case Instruction::Xor:
1543 // undef ^ undef -> 0; strictly speaking, this is not strictly
1544 // necessary, but we try to be nice to people who expect this
1545 // behavior in simple cases
1546 if (Op0LV.isUnknown() && Op1LV.isUnknown()) {
1547 markForcedConstant(&I, Constant::getNullValue(ITy));
1550 // undef ^ X -> undef
1552 case Instruction::SDiv:
1553 case Instruction::UDiv:
1554 case Instruction::SRem:
1555 case Instruction::URem:
1556 // X / undef -> undef. No change.
1557 // X % undef -> undef. No change.
1558 if (Op1LV.isUnknown()) break;
1560 // X / 0 -> undef. No change.
1561 // X % 0 -> undef. No change.
1562 if (Op1LV.isConstant() && Op1LV.getConstant()->isZeroValue())
1565 // undef / X -> 0. X could be maxint.
1566 // undef % X -> 0. X could be 1.
1567 markForcedConstant(&I, Constant::getNullValue(ITy));
1569 case Instruction::AShr:
1570 // X >>a undef -> undef.
1571 if (Op1LV.isUnknown()) break;
1573 // Shifting by the bitwidth or more is undefined.
1574 if (Op1LV.isConstant()) {
1575 if (auto *ShiftAmt = Op1LV.getConstantInt())
1576 if (ShiftAmt->getLimitedValue() >=
1577 ShiftAmt->getType()->getScalarSizeInBits())
1582 markForcedConstant(&I, Constant::getNullValue(ITy));
1584 case Instruction::LShr:
1585 case Instruction::Shl:
1586 // X << undef -> undef.
1587 // X >> undef -> undef.
1588 if (Op1LV.isUnknown()) break;
1590 // Shifting by the bitwidth or more is undefined.
1591 if (Op1LV.isConstant()) {
1592 if (auto *ShiftAmt = Op1LV.getConstantInt())
1593 if (ShiftAmt->getLimitedValue() >=
1594 ShiftAmt->getType()->getScalarSizeInBits())
1600 markForcedConstant(&I, Constant::getNullValue(ITy));
1602 case Instruction::Select:
1603 Op1LV = getValueState(I.getOperand(1));
1604 // undef ? X : Y -> X or Y. There could be commonality between X/Y.
1605 if (Op0LV.isUnknown()) {
1606 if (!Op1LV.isConstant()) // Pick the constant one if there is any.
1607 Op1LV = getValueState(I.getOperand(2));
1608 } else if (Op1LV.isUnknown()) {
1609 // c ? undef : undef -> undef. No change.
1610 Op1LV = getValueState(I.getOperand(2));
1611 if (Op1LV.isUnknown())
1613 // Otherwise, c ? undef : x -> x.
1615 // Leave Op1LV as Operand(1)'s LatticeValue.
1618 if (Op1LV.isConstant())
1619 markForcedConstant(&I, Op1LV.getConstant());
1621 markOverdefined(&I);
1623 case Instruction::Load:
1624 // A load here means one of two things: a load of undef from a global,
1625 // a load from an unknown pointer. Either way, having it return undef
1628 case Instruction::ICmp:
1629 // X == undef -> undef. Other comparisons get more complicated.
1630 Op0LV = getValueState(I.getOperand(0));
1631 Op1LV = getValueState(I.getOperand(1));
1633 if ((Op0LV.isUnknown() || Op1LV.isUnknown()) &&
1634 cast<ICmpInst>(&I)->isEquality())
1636 markOverdefined(&I);
1638 case Instruction::Call:
1639 case Instruction::Invoke:
1640 case Instruction::CallBr:
1641 // There are two reasons a call can have an undef result
1642 // 1. It could be tracked.
1643 // 2. It could be constant-foldable.
1644 // Because of the way we solve return values, tracked calls must
1645 // never be marked overdefined in ResolvedUndefsIn.
1646 if (Function *F = CallSite(&I).getCalledFunction())
1647 if (TrackedRetVals.count(F))
1650 // If the call is constant-foldable, we mark it overdefined because
1651 // we do not know what return values are valid.
1652 markOverdefined(&I);
1655 // If we don't know what should happen here, conservatively mark it
1657 markOverdefined(&I);
1662 // Check to see if we have a branch or switch on an undefined value. If so
1663 // we force the branch to go one way or the other to make the successor
1664 // values live. It doesn't really matter which way we force it.
1665 Instruction *TI = BB.getTerminator();
1666 if (auto *BI = dyn_cast<BranchInst>(TI)) {
1667 if (!BI->isConditional()) continue;
1668 if (!getValueState(BI->getCondition()).isUnknown())
1671 // If the input to SCCP is actually branch on undef, fix the undef to
1673 if (isa<UndefValue>(BI->getCondition())) {
1674 BI->setCondition(ConstantInt::getFalse(BI->getContext()));
1675 markEdgeExecutable(&BB, TI->getSuccessor(1));
1679 // Otherwise, it is a branch on a symbolic value which is currently
1680 // considered to be undef. Make sure some edge is executable, so a
1681 // branch on "undef" always flows somewhere.
1682 // FIXME: Distinguish between dead code and an LLVM "undef" value.
1683 BasicBlock *DefaultSuccessor = TI->getSuccessor(1);
1684 if (markEdgeExecutable(&BB, DefaultSuccessor))
1690 if (auto *IBR = dyn_cast<IndirectBrInst>(TI)) {
1691 // Indirect branch with no successor ?. Its ok to assume it branches
1693 if (IBR->getNumSuccessors() < 1)
1696 if (!getValueState(IBR->getAddress()).isUnknown())
1699 // If the input to SCCP is actually branch on undef, fix the undef to
1700 // the first successor of the indirect branch.
1701 if (isa<UndefValue>(IBR->getAddress())) {
1702 IBR->setAddress(BlockAddress::get(IBR->getSuccessor(0)));
1703 markEdgeExecutable(&BB, IBR->getSuccessor(0));
1707 // Otherwise, it is a branch on a symbolic value which is currently
1708 // considered to be undef. Make sure some edge is executable, so a
1709 // branch on "undef" always flows somewhere.
1710 // FIXME: IndirectBr on "undef" doesn't actually need to go anywhere:
1711 // we can assume the branch has undefined behavior instead.
1712 BasicBlock *DefaultSuccessor = IBR->getSuccessor(0);
1713 if (markEdgeExecutable(&BB, DefaultSuccessor))
1719 if (auto *SI = dyn_cast<SwitchInst>(TI)) {
1720 if (!SI->getNumCases() || !getValueState(SI->getCondition()).isUnknown())
1723 // If the input to SCCP is actually switch on undef, fix the undef to
1724 // the first constant.
1725 if (isa<UndefValue>(SI->getCondition())) {
1726 SI->setCondition(SI->case_begin()->getCaseValue());
1727 markEdgeExecutable(&BB, SI->case_begin()->getCaseSuccessor());
1731 // Otherwise, it is a branch on a symbolic value which is currently
1732 // considered to be undef. Make sure some edge is executable, so a
1733 // branch on "undef" always flows somewhere.
1734 // FIXME: Distinguish between dead code and an LLVM "undef" value.
1735 BasicBlock *DefaultSuccessor = SI->case_begin()->getCaseSuccessor();
1736 if (markEdgeExecutable(&BB, DefaultSuccessor))
1746 static bool tryToReplaceWithConstant(SCCPSolver &Solver, Value *V) {
1747 Constant *Const = nullptr;
1748 if (V->getType()->isStructTy()) {
1749 std::vector<LatticeVal> IVs = Solver.getStructLatticeValueFor(V);
1750 if (llvm::any_of(IVs,
1751 [](const LatticeVal &LV) { return LV.isOverdefined(); }))
1753 std::vector<Constant *> ConstVals;
1754 auto *ST = dyn_cast<StructType>(V->getType());
1755 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
1756 LatticeVal V = IVs[i];
1757 ConstVals.push_back(V.isConstant()
1759 : UndefValue::get(ST->getElementType(i)));
1761 Const = ConstantStruct::get(ST, ConstVals);
1763 const LatticeVal &IV = Solver.getLatticeValueFor(V);
1764 if (IV.isOverdefined())
1767 Const = IV.isConstant() ? IV.getConstant() : UndefValue::get(V->getType());
1769 assert(Const && "Constant is nullptr here!");
1771 // Replacing `musttail` instructions with constant breaks `musttail` invariant
1772 // unless the call itself can be removed
1773 CallInst *CI = dyn_cast<CallInst>(V);
1774 if (CI && CI->isMustTailCall() && !CI->isSafeToRemove()) {
1776 Function *F = CS.getCalledFunction();
1778 // Don't zap returns of the callee
1780 Solver.AddMustTailCallee(F);
1782 LLVM_DEBUG(dbgs() << " Can\'t treat the result of musttail call : " << *CI
1783 << " as a constant\n");
1787 LLVM_DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n');
1789 // Replaces all of the uses of a variable with uses of the constant.
1790 V->replaceAllUsesWith(Const);
1794 // runSCCP() - Run the Sparse Conditional Constant Propagation algorithm,
1795 // and return true if the function was modified.
1796 static bool runSCCP(Function &F, const DataLayout &DL,
1797 const TargetLibraryInfo *TLI) {
1798 LLVM_DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
1799 SCCPSolver Solver(DL, TLI);
1801 // Mark the first block of the function as being executable.
1802 Solver.MarkBlockExecutable(&F.front());
1804 // Mark all arguments to the function as being overdefined.
1805 for (Argument &AI : F.args())
1806 Solver.markOverdefined(&AI);
1808 // Solve for constants.
1809 bool ResolvedUndefs = true;
1810 while (ResolvedUndefs) {
1812 LLVM_DEBUG(dbgs() << "RESOLVING UNDEFs\n");
1813 ResolvedUndefs = Solver.ResolvedUndefsIn(F);
1816 bool MadeChanges = false;
1818 // If we decided that there are basic blocks that are dead in this function,
1819 // delete their contents now. Note that we cannot actually delete the blocks,
1820 // as we cannot modify the CFG of the function.
1822 for (BasicBlock &BB : F) {
1823 if (!Solver.isBlockExecutable(&BB)) {
1824 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << BB);
1827 NumInstRemoved += removeAllNonTerminatorAndEHPadInstructions(&BB);
1833 // Iterate over all of the instructions in a function, replacing them with
1834 // constants if we have found them to be of constant values.
1835 for (BasicBlock::iterator BI = BB.begin(), E = BB.end(); BI != E;) {
1836 Instruction *Inst = &*BI++;
1837 if (Inst->getType()->isVoidTy() || Inst->isTerminator())
1840 if (tryToReplaceWithConstant(Solver, Inst)) {
1841 if (isInstructionTriviallyDead(Inst))
1842 Inst->eraseFromParent();
1843 // Hey, we just changed something!
1853 PreservedAnalyses SCCPPass::run(Function &F, FunctionAnalysisManager &AM) {
1854 const DataLayout &DL = F.getParent()->getDataLayout();
1855 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1856 if (!runSCCP(F, DL, &TLI))
1857 return PreservedAnalyses::all();
1859 auto PA = PreservedAnalyses();
1860 PA.preserve<GlobalsAA>();
1861 PA.preserveSet<CFGAnalyses>();
1867 //===--------------------------------------------------------------------===//
1869 /// SCCP Class - This class uses the SCCPSolver to implement a per-function
1870 /// Sparse Conditional Constant Propagator.
1872 class SCCPLegacyPass : public FunctionPass {
1874 // Pass identification, replacement for typeid
1877 SCCPLegacyPass() : FunctionPass(ID) {
1878 initializeSCCPLegacyPassPass(*PassRegistry::getPassRegistry());
1881 void getAnalysisUsage(AnalysisUsage &AU) const override {
1882 AU.addRequired<TargetLibraryInfoWrapperPass>();
1883 AU.addPreserved<GlobalsAAWrapperPass>();
1884 AU.setPreservesCFG();
1887 // runOnFunction - Run the Sparse Conditional Constant Propagation
1888 // algorithm, and return true if the function was modified.
1889 bool runOnFunction(Function &F) override {
1890 if (skipFunction(F))
1892 const DataLayout &DL = F.getParent()->getDataLayout();
1893 const TargetLibraryInfo *TLI =
1894 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1895 return runSCCP(F, DL, TLI);
1899 } // end anonymous namespace
1901 char SCCPLegacyPass::ID = 0;
1903 INITIALIZE_PASS_BEGIN(SCCPLegacyPass, "sccp",
1904 "Sparse Conditional Constant Propagation", false, false)
1905 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1906 INITIALIZE_PASS_END(SCCPLegacyPass, "sccp",
1907 "Sparse Conditional Constant Propagation", false, false)
1909 // createSCCPPass - This is the public interface to this file.
1910 FunctionPass *llvm::createSCCPPass() { return new SCCPLegacyPass(); }
1912 static void findReturnsToZap(Function &F,
1913 SmallVector<ReturnInst *, 8> &ReturnsToZap,
1914 SCCPSolver &Solver) {
1915 // We can only do this if we know that nothing else can call the function.
1916 if (!Solver.isArgumentTrackedFunction(&F))
1919 // There is a non-removable musttail call site of this function. Zapping
1920 // returns is not allowed.
1921 if (Solver.isMustTailCallee(&F)) {
1922 LLVM_DEBUG(dbgs() << "Can't zap returns of the function : " << F.getName()
1923 << " due to present musttail call of it\n");
1927 for (BasicBlock &BB : F) {
1928 if (CallInst *CI = BB.getTerminatingMustTailCall()) {
1929 LLVM_DEBUG(dbgs() << "Can't zap return of the block due to present "
1930 << "musttail call : " << *CI << "\n");
1935 if (auto *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1936 if (!isa<UndefValue>(RI->getOperand(0)))
1937 ReturnsToZap.push_back(RI);
1941 // Update the condition for terminators that are branching on indeterminate
1942 // values, forcing them to use a specific edge.
1943 static void forceIndeterminateEdge(Instruction* I, SCCPSolver &Solver) {
1944 BasicBlock *Dest = nullptr;
1945 Constant *C = nullptr;
1946 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
1947 if (!isa<ConstantInt>(SI->getCondition())) {
1948 // Indeterminate switch; use first case value.
1949 Dest = SI->case_begin()->getCaseSuccessor();
1950 C = SI->case_begin()->getCaseValue();
1952 } else if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1953 if (!isa<ConstantInt>(BI->getCondition())) {
1954 // Indeterminate branch; use false.
1955 Dest = BI->getSuccessor(1);
1956 C = ConstantInt::getFalse(BI->getContext());
1958 } else if (IndirectBrInst *IBR = dyn_cast<IndirectBrInst>(I)) {
1959 if (!isa<BlockAddress>(IBR->getAddress()->stripPointerCasts())) {
1960 // Indeterminate indirectbr; use successor 0.
1961 Dest = IBR->getSuccessor(0);
1962 C = BlockAddress::get(IBR->getSuccessor(0));
1965 llvm_unreachable("Unexpected terminator instruction");
1968 assert(Solver.isEdgeFeasible(I->getParent(), Dest) &&
1969 "Didn't find feasible edge?");
1972 I->setOperand(0, C);
1976 bool llvm::runIPSCCP(
1977 Module &M, const DataLayout &DL, const TargetLibraryInfo *TLI,
1978 function_ref<AnalysisResultsForFn(Function &)> getAnalysis) {
1979 SCCPSolver Solver(DL, TLI);
1981 // Loop over all functions, marking arguments to those with their addresses
1982 // taken or that are external as overdefined.
1983 for (Function &F : M) {
1984 if (F.isDeclaration())
1987 Solver.addAnalysis(F, getAnalysis(F));
1989 // Determine if we can track the function's return values. If so, add the
1990 // function to the solver's set of return-tracked functions.
1991 if (canTrackReturnsInterprocedurally(&F))
1992 Solver.AddTrackedFunction(&F);
1994 // Determine if we can track the function's arguments. If so, add the
1995 // function to the solver's set of argument-tracked functions.
1996 if (canTrackArgumentsInterprocedurally(&F)) {
1997 Solver.AddArgumentTrackedFunction(&F);
2001 // Assume the function is called.
2002 Solver.MarkBlockExecutable(&F.front());
2004 // Assume nothing about the incoming arguments.
2005 for (Argument &AI : F.args())
2006 Solver.markOverdefined(&AI);
2009 // Determine if we can track any of the module's global variables. If so, add
2010 // the global variables we can track to the solver's set of tracked global
2012 for (GlobalVariable &G : M.globals()) {
2013 G.removeDeadConstantUsers();
2014 if (canTrackGlobalVariableInterprocedurally(&G))
2015 Solver.TrackValueOfGlobalVariable(&G);
2018 // Solve for constants.
2019 bool ResolvedUndefs = true;
2021 while (ResolvedUndefs) {
2022 LLVM_DEBUG(dbgs() << "RESOLVING UNDEFS\n");
2023 ResolvedUndefs = false;
2024 for (Function &F : M)
2025 if (Solver.ResolvedUndefsIn(F)) {
2026 // We run Solve() after we resolved an undef in a function, because
2027 // we might deduce a fact that eliminates an undef in another function.
2029 ResolvedUndefs = true;
2033 bool MadeChanges = false;
2035 // Iterate over all of the instructions in the module, replacing them with
2036 // constants if we have found them to be of constant values.
2038 for (Function &F : M) {
2039 if (F.isDeclaration())
2042 SmallVector<BasicBlock *, 512> BlocksToErase;
2044 if (Solver.isBlockExecutable(&F.front()))
2045 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); AI != E;
2047 if (!AI->use_empty() && tryToReplaceWithConstant(Solver, &*AI)) {
2053 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
2054 if (!Solver.isBlockExecutable(&*BB)) {
2055 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB);
2060 if (&*BB != &F.front())
2061 BlocksToErase.push_back(&*BB);
2065 for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) {
2066 Instruction *Inst = &*BI++;
2067 if (Inst->getType()->isVoidTy())
2069 if (tryToReplaceWithConstant(Solver, Inst)) {
2070 if (Inst->isSafeToRemove())
2071 Inst->eraseFromParent();
2072 // Hey, we just changed something!
2079 DomTreeUpdater DTU = Solver.getDTU(F);
2080 // Change dead blocks to unreachable. We do it after replacing constants
2081 // in all executable blocks, because changeToUnreachable may remove PHI
2082 // nodes in executable blocks we found values for. The function's entry
2083 // block is not part of BlocksToErase, so we have to handle it separately.
2084 for (BasicBlock *BB : BlocksToErase) {
2086 changeToUnreachable(BB->getFirstNonPHI(), /*UseLLVMTrap=*/false,
2087 /*PreserveLCSSA=*/false, &DTU);
2089 if (!Solver.isBlockExecutable(&F.front()))
2090 NumInstRemoved += changeToUnreachable(F.front().getFirstNonPHI(),
2091 /*UseLLVMTrap=*/false,
2092 /*PreserveLCSSA=*/false, &DTU);
2094 // Now that all instructions in the function are constant folded,
2095 // use ConstantFoldTerminator to get rid of in-edges, record DT updates and
2097 for (BasicBlock *DeadBB : BlocksToErase) {
2098 // If there are any PHI nodes in this successor, drop entries for BB now.
2099 for (Value::user_iterator UI = DeadBB->user_begin(),
2100 UE = DeadBB->user_end();
2102 // Grab the user and then increment the iterator early, as the user
2103 // will be deleted. Step past all adjacent uses from the same user.
2104 auto *I = dyn_cast<Instruction>(*UI);
2105 do { ++UI; } while (UI != UE && *UI == I);
2107 // Ignore blockaddress users; BasicBlock's dtor will handle them.
2110 // If we have forced an edge for an indeterminate value, then force the
2111 // terminator to fold to that edge.
2112 forceIndeterminateEdge(I, Solver);
2113 BasicBlock *InstBB = I->getParent();
2114 bool Folded = ConstantFoldTerminator(InstBB,
2115 /*DeleteDeadConditions=*/false,
2116 /*TLI=*/nullptr, &DTU);
2118 "Expect TermInst on constantint or blockaddress to be folded");
2120 // If we folded the terminator to an unconditional branch to another
2121 // dead block, replace it with Unreachable, to avoid trying to fold that
2123 BranchInst *BI = cast<BranchInst>(InstBB->getTerminator());
2124 if (BI && BI->isUnconditional() &&
2125 !Solver.isBlockExecutable(BI->getSuccessor(0))) {
2126 InstBB->getTerminator()->eraseFromParent();
2127 new UnreachableInst(InstBB->getContext(), InstBB);
2130 // Mark dead BB for deletion.
2131 DTU.deleteBB(DeadBB);
2134 for (BasicBlock &BB : F) {
2135 for (BasicBlock::iterator BI = BB.begin(), E = BB.end(); BI != E;) {
2136 Instruction *Inst = &*BI++;
2137 if (Solver.getPredicateInfoFor(Inst)) {
2138 if (auto *II = dyn_cast<IntrinsicInst>(Inst)) {
2139 if (II->getIntrinsicID() == Intrinsic::ssa_copy) {
2140 Value *Op = II->getOperand(0);
2141 Inst->replaceAllUsesWith(Op);
2142 Inst->eraseFromParent();
2150 // If we inferred constant or undef return values for a function, we replaced
2151 // all call uses with the inferred value. This means we don't need to bother
2152 // actually returning anything from the function. Replace all return
2153 // instructions with return undef.
2155 // Do this in two stages: first identify the functions we should process, then
2156 // actually zap their returns. This is important because we can only do this
2157 // if the address of the function isn't taken. In cases where a return is the
2158 // last use of a function, the order of processing functions would affect
2159 // whether other functions are optimizable.
2160 SmallVector<ReturnInst*, 8> ReturnsToZap;
2162 const MapVector<Function*, LatticeVal> &RV = Solver.getTrackedRetVals();
2163 for (const auto &I : RV) {
2164 Function *F = I.first;
2165 if (I.second.isOverdefined() || F->getReturnType()->isVoidTy())
2167 findReturnsToZap(*F, ReturnsToZap, Solver);
2170 for (const auto &F : Solver.getMRVFunctionsTracked()) {
2171 assert(F->getReturnType()->isStructTy() &&
2172 "The return type should be a struct");
2173 StructType *STy = cast<StructType>(F->getReturnType());
2174 if (Solver.isStructLatticeConstant(F, STy))
2175 findReturnsToZap(*F, ReturnsToZap, Solver);
2178 // Zap all returns which we've identified as zap to change.
2179 for (unsigned i = 0, e = ReturnsToZap.size(); i != e; ++i) {
2180 Function *F = ReturnsToZap[i]->getParent()->getParent();
2181 ReturnsToZap[i]->setOperand(0, UndefValue::get(F->getReturnType()));
2184 // If we inferred constant or undef values for globals variables, we can
2185 // delete the global and any stores that remain to it.
2186 const DenseMap<GlobalVariable*, LatticeVal> &TG = Solver.getTrackedGlobals();
2187 for (DenseMap<GlobalVariable*, LatticeVal>::const_iterator I = TG.begin(),
2188 E = TG.end(); I != E; ++I) {
2189 GlobalVariable *GV = I->first;
2190 assert(!I->second.isOverdefined() &&
2191 "Overdefined values should have been taken out of the map!");
2192 LLVM_DEBUG(dbgs() << "Found that GV '" << GV->getName()
2193 << "' is constant!\n");
2194 while (!GV->use_empty()) {
2195 StoreInst *SI = cast<StoreInst>(GV->user_back());
2196 SI->eraseFromParent();
2198 M.getGlobalList().erase(GV);