1 //===---- NewGVN.cpp - Global Value Numbering Pass --------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the new LLVM's Global Value Numbering pass.
11 /// GVN partitions values computed by a function into congruence classes.
12 /// Values ending up in the same congruence class are guaranteed to be the same
13 /// for every execution of the program. In that respect, congruency is a
14 /// compile-time approximation of equivalence of values at runtime.
15 /// The algorithm implemented here uses a sparse formulation and it's based
16 /// on the ideas described in the paper:
17 /// "A Sparse Algorithm for Predicated Global Value Numbering" from
20 /// A brief overview of the algorithm: The algorithm is essentially the same as
21 /// the standard RPO value numbering algorithm (a good reference is the paper
22 /// "SCC based value numbering" by L. Taylor Simpson) with one major difference:
23 /// The RPO algorithm proceeds, on every iteration, to process every reachable
24 /// block and every instruction in that block. This is because the standard RPO
25 /// algorithm does not track what things have the same value number, it only
26 /// tracks what the value number of a given operation is (the mapping is
27 /// operation -> value number). Thus, when a value number of an operation
28 /// changes, it must reprocess everything to ensure all uses of a value number
29 /// get updated properly. In constrast, the sparse algorithm we use *also*
30 /// tracks what operations have a given value number (IE it also tracks the
31 /// reverse mapping from value number -> operations with that value number), so
32 /// that it only needs to reprocess the instructions that are affected when
33 /// something's value number changes. The vast majority of complexity and code
34 /// in this file is devoted to tracking what value numbers could change for what
35 /// instructions when various things happen. The rest of the algorithm is
36 /// devoted to performing symbolic evaluation, forward propagation, and
37 /// simplification of operations based on the value numbers deduced so far
39 /// In order to make the GVN mostly-complete, we use a technique derived from
40 /// "Detection of Redundant Expressions: A Complete and Polynomial-time
41 /// Algorithm in SSA" by R.R. Pai. The source of incompleteness in most SSA
42 /// based GVN algorithms is related to their inability to detect equivalence
43 /// between phi of ops (IE phi(a+b, c+d)) and op of phis (phi(a,c) + phi(b, d)).
44 /// We resolve this issue by generating the equivalent "phi of ops" form for
45 /// each op of phis we see, in a way that only takes polynomial time to resolve.
47 /// We also do not perform elimination by using any published algorithm. All
48 /// published algorithms are O(Instructions). Instead, we use a technique that
49 /// is O(number of operations with the same value number), enabling us to skip
50 /// trying to eliminate things that have unique value numbers.
51 //===----------------------------------------------------------------------===//
53 #include "llvm/Transforms/Scalar/NewGVN.h"
54 #include "llvm/ADT/BitVector.h"
55 #include "llvm/ADT/DenseMap.h"
56 #include "llvm/ADT/DenseSet.h"
57 #include "llvm/ADT/DepthFirstIterator.h"
58 #include "llvm/ADT/Hashing.h"
59 #include "llvm/ADT/MapVector.h"
60 #include "llvm/ADT/PostOrderIterator.h"
61 #include "llvm/ADT/STLExtras.h"
62 #include "llvm/ADT/SmallPtrSet.h"
63 #include "llvm/ADT/SmallSet.h"
64 #include "llvm/ADT/Statistic.h"
65 #include "llvm/ADT/TinyPtrVector.h"
66 #include "llvm/Analysis/AliasAnalysis.h"
67 #include "llvm/Analysis/AssumptionCache.h"
68 #include "llvm/Analysis/CFG.h"
69 #include "llvm/Analysis/CFGPrinter.h"
70 #include "llvm/Analysis/ConstantFolding.h"
71 #include "llvm/Analysis/GlobalsModRef.h"
72 #include "llvm/Analysis/InstructionSimplify.h"
73 #include "llvm/Analysis/MemoryBuiltins.h"
74 #include "llvm/Analysis/MemoryLocation.h"
75 #include "llvm/Analysis/MemorySSA.h"
76 #include "llvm/Analysis/TargetLibraryInfo.h"
77 #include "llvm/IR/DataLayout.h"
78 #include "llvm/IR/Dominators.h"
79 #include "llvm/IR/GlobalVariable.h"
80 #include "llvm/IR/IRBuilder.h"
81 #include "llvm/IR/IntrinsicInst.h"
82 #include "llvm/IR/LLVMContext.h"
83 #include "llvm/IR/Metadata.h"
84 #include "llvm/IR/PatternMatch.h"
85 #include "llvm/IR/Type.h"
86 #include "llvm/Support/Allocator.h"
87 #include "llvm/Support/CommandLine.h"
88 #include "llvm/Support/Debug.h"
89 #include "llvm/Support/DebugCounter.h"
90 #include "llvm/Transforms/Scalar.h"
91 #include "llvm/Transforms/Scalar/GVNExpression.h"
92 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
93 #include "llvm/Transforms/Utils/Local.h"
94 #include "llvm/Transforms/Utils/PredicateInfo.h"
95 #include "llvm/Transforms/Utils/VNCoercion.h"
97 #include <unordered_map>
100 using namespace llvm;
101 using namespace PatternMatch;
102 using namespace llvm::GVNExpression;
103 using namespace llvm::VNCoercion;
104 #define DEBUG_TYPE "newgvn"
106 STATISTIC(NumGVNInstrDeleted, "Number of instructions deleted");
107 STATISTIC(NumGVNBlocksDeleted, "Number of blocks deleted");
108 STATISTIC(NumGVNOpsSimplified, "Number of Expressions simplified");
109 STATISTIC(NumGVNPhisAllSame, "Number of PHIs whos arguments are all the same");
110 STATISTIC(NumGVNMaxIterations,
111 "Maximum Number of iterations it took to converge GVN");
112 STATISTIC(NumGVNLeaderChanges, "Number of leader changes");
113 STATISTIC(NumGVNSortedLeaderChanges, "Number of sorted leader changes");
114 STATISTIC(NumGVNAvoidedSortedLeaderChanges,
115 "Number of avoided sorted leader changes");
116 STATISTIC(NumGVNDeadStores, "Number of redundant/dead stores eliminated");
117 STATISTIC(NumGVNPHIOfOpsCreated, "Number of PHI of ops created");
118 STATISTIC(NumGVNPHIOfOpsEliminations,
119 "Number of things eliminated using PHI of ops");
120 DEBUG_COUNTER(VNCounter, "newgvn-vn",
121 "Controls which instructions are value numbered")
122 DEBUG_COUNTER(PHIOfOpsCounter, "newgvn-phi",
123 "Controls which instructions we create phi of ops for")
124 // Currently store defining access refinement is too slow due to basicaa being
125 // egregiously slow. This flag lets us keep it working while we work on this
127 static cl::opt<bool> EnableStoreRefinement("enable-store-refinement",
128 cl::init(false), cl::Hidden);
130 //===----------------------------------------------------------------------===//
132 //===----------------------------------------------------------------------===//
136 namespace GVNExpression {
137 Expression::~Expression() = default;
138 BasicExpression::~BasicExpression() = default;
139 CallExpression::~CallExpression() = default;
140 LoadExpression::~LoadExpression() = default;
141 StoreExpression::~StoreExpression() = default;
142 AggregateValueExpression::~AggregateValueExpression() = default;
143 PHIExpression::~PHIExpression() = default;
147 // Tarjan's SCC finding algorithm with Nuutila's improvements
148 // SCCIterator is actually fairly complex for the simple thing we want.
149 // It also wants to hand us SCC's that are unrelated to the phi node we ask
150 // about, and have us process them there or risk redoing work.
151 // Graph traits over a filter iterator also doesn't work that well here.
152 // This SCC finder is specialized to walk use-def chains, and only follows
154 // not generic values (arguments, etc).
157 TarjanSCC() : Components(1) {}
159 void Start(const Instruction *Start) {
160 if (Root.lookup(Start) == 0)
164 const SmallPtrSetImpl<const Value *> &getComponentFor(const Value *V) const {
165 unsigned ComponentID = ValueToComponent.lookup(V);
167 assert(ComponentID > 0 &&
168 "Asking for a component for a value we never processed");
169 return Components[ComponentID];
173 void FindSCC(const Instruction *I) {
175 // Store the DFS Number we had before it possibly gets incremented.
176 unsigned int OurDFS = DFSNum;
177 for (auto &Op : I->operands()) {
178 if (auto *InstOp = dyn_cast<Instruction>(Op)) {
179 if (Root.lookup(Op) == 0)
181 if (!InComponent.count(Op))
182 Root[I] = std::min(Root.lookup(I), Root.lookup(Op));
185 // See if we really were the root of a component, by seeing if we still have
186 // our DFSNumber. If we do, we are the root of the component, and we have
187 // completed a component. If we do not, we are not the root of a component,
188 // and belong on the component stack.
189 if (Root.lookup(I) == OurDFS) {
190 unsigned ComponentID = Components.size();
191 Components.resize(Components.size() + 1);
192 auto &Component = Components.back();
194 DEBUG(dbgs() << "Component root is " << *I << "\n");
195 InComponent.insert(I);
196 ValueToComponent[I] = ComponentID;
197 // Pop a component off the stack and label it.
198 while (!Stack.empty() && Root.lookup(Stack.back()) >= OurDFS) {
199 auto *Member = Stack.back();
200 DEBUG(dbgs() << "Component member is " << *Member << "\n");
201 Component.insert(Member);
202 InComponent.insert(Member);
203 ValueToComponent[Member] = ComponentID;
207 // Part of a component, push to stack
211 unsigned int DFSNum = 1;
212 SmallPtrSet<const Value *, 8> InComponent;
213 DenseMap<const Value *, unsigned int> Root;
214 SmallVector<const Value *, 8> Stack;
215 // Store the components as vector of ptr sets, because we need the topo order
216 // of SCC's, but not individual member order
217 SmallVector<SmallPtrSet<const Value *, 8>, 8> Components;
218 DenseMap<const Value *, unsigned> ValueToComponent;
220 // Congruence classes represent the set of expressions/instructions
221 // that are all the same *during some scope in the function*.
222 // That is, because of the way we perform equality propagation, and
223 // because of memory value numbering, it is not correct to assume
224 // you can willy-nilly replace any member with any other at any
225 // point in the function.
227 // For any Value in the Member set, it is valid to replace any dominated member
230 // Every congruence class has a leader, and the leader is used to symbolize
231 // instructions in a canonical way (IE every operand of an instruction that is a
232 // member of the same congruence class will always be replaced with leader
233 // during symbolization). To simplify symbolization, we keep the leader as a
234 // constant if class can be proved to be a constant value. Otherwise, the
235 // leader is the member of the value set with the smallest DFS number. Each
236 // congruence class also has a defining expression, though the expression may be
237 // null. If it exists, it can be used for forward propagation and reassociation
240 // For memory, we also track a representative MemoryAccess, and a set of memory
241 // members for MemoryPhis (which have no real instructions). Note that for
242 // memory, it seems tempting to try to split the memory members into a
243 // MemoryCongruenceClass or something. Unfortunately, this does not work
244 // easily. The value numbering of a given memory expression depends on the
245 // leader of the memory congruence class, and the leader of memory congruence
246 // class depends on the value numbering of a given memory expression. This
247 // leads to wasted propagation, and in some cases, missed optimization. For
248 // example: If we had value numbered two stores together before, but now do not,
249 // we move them to a new value congruence class. This in turn will move at one
250 // of the memorydefs to a new memory congruence class. Which in turn, affects
251 // the value numbering of the stores we just value numbered (because the memory
252 // congruence class is part of the value number). So while theoretically
253 // possible to split them up, it turns out to be *incredibly* complicated to get
254 // it to work right, because of the interdependency. While structurally
255 // slightly messier, it is algorithmically much simpler and faster to do what we
256 // do here, and track them both at once in the same class.
257 // Note: The default iterators for this class iterate over values
258 class CongruenceClass {
260 using MemberType = Value;
261 using MemberSet = SmallPtrSet<MemberType *, 4>;
262 using MemoryMemberType = MemoryPhi;
263 using MemoryMemberSet = SmallPtrSet<const MemoryMemberType *, 2>;
265 explicit CongruenceClass(unsigned ID) : ID(ID) {}
266 CongruenceClass(unsigned ID, Value *Leader, const Expression *E)
267 : ID(ID), RepLeader(Leader), DefiningExpr(E) {}
268 unsigned getID() const { return ID; }
269 // True if this class has no members left. This is mainly used for assertion
270 // purposes, and for skipping empty classes.
271 bool isDead() const {
272 // If it's both dead from a value perspective, and dead from a memory
273 // perspective, it's really dead.
274 return empty() && memory_empty();
277 Value *getLeader() const { return RepLeader; }
278 void setLeader(Value *Leader) { RepLeader = Leader; }
279 const std::pair<Value *, unsigned int> &getNextLeader() const {
282 void resetNextLeader() { NextLeader = {nullptr, ~0}; }
284 void addPossibleNextLeader(std::pair<Value *, unsigned int> LeaderPair) {
285 if (LeaderPair.second < NextLeader.second)
286 NextLeader = LeaderPair;
289 Value *getStoredValue() const { return RepStoredValue; }
290 void setStoredValue(Value *Leader) { RepStoredValue = Leader; }
291 const MemoryAccess *getMemoryLeader() const { return RepMemoryAccess; }
292 void setMemoryLeader(const MemoryAccess *Leader) { RepMemoryAccess = Leader; }
294 // Forward propagation info
295 const Expression *getDefiningExpr() const { return DefiningExpr; }
298 bool empty() const { return Members.empty(); }
299 unsigned size() const { return Members.size(); }
300 MemberSet::const_iterator begin() const { return Members.begin(); }
301 MemberSet::const_iterator end() const { return Members.end(); }
302 void insert(MemberType *M) { Members.insert(M); }
303 void erase(MemberType *M) { Members.erase(M); }
304 void swap(MemberSet &Other) { Members.swap(Other); }
307 bool memory_empty() const { return MemoryMembers.empty(); }
308 unsigned memory_size() const { return MemoryMembers.size(); }
309 MemoryMemberSet::const_iterator memory_begin() const {
310 return MemoryMembers.begin();
312 MemoryMemberSet::const_iterator memory_end() const {
313 return MemoryMembers.end();
315 iterator_range<MemoryMemberSet::const_iterator> memory() const {
316 return make_range(memory_begin(), memory_end());
318 void memory_insert(const MemoryMemberType *M) { MemoryMembers.insert(M); }
319 void memory_erase(const MemoryMemberType *M) { MemoryMembers.erase(M); }
322 unsigned getStoreCount() const { return StoreCount; }
323 void incStoreCount() { ++StoreCount; }
324 void decStoreCount() {
325 assert(StoreCount != 0 && "Store count went negative");
329 // True if this class has no memory members.
330 bool definesNoMemory() const { return StoreCount == 0 && memory_empty(); }
332 // Return true if two congruence classes are equivalent to each other. This
334 // that every field but the ID number and the dead field are equivalent.
335 bool isEquivalentTo(const CongruenceClass *Other) const {
341 if (std::tie(StoreCount, RepLeader, RepStoredValue, RepMemoryAccess) !=
342 std::tie(Other->StoreCount, Other->RepLeader, Other->RepStoredValue,
343 Other->RepMemoryAccess))
345 if (DefiningExpr != Other->DefiningExpr)
346 if (!DefiningExpr || !Other->DefiningExpr ||
347 *DefiningExpr != *Other->DefiningExpr)
349 // We need some ordered set
350 std::set<Value *> AMembers(Members.begin(), Members.end());
351 std::set<Value *> BMembers(Members.begin(), Members.end());
352 return AMembers == BMembers;
357 // Representative leader.
358 Value *RepLeader = nullptr;
359 // The most dominating leader after our current leader, because the member set
360 // is not sorted and is expensive to keep sorted all the time.
361 std::pair<Value *, unsigned int> NextLeader = {nullptr, ~0U};
362 // If this is represented by a store, the value of the store.
363 Value *RepStoredValue = nullptr;
364 // If this class contains MemoryDefs or MemoryPhis, this is the leading memory
366 const MemoryAccess *RepMemoryAccess = nullptr;
367 // Defining Expression.
368 const Expression *DefiningExpr = nullptr;
369 // Actual members of this class.
371 // This is the set of MemoryPhis that exist in the class. MemoryDefs and
372 // MemoryUses have real instructions representing them, so we only need to
373 // track MemoryPhis here.
374 MemoryMemberSet MemoryMembers;
375 // Number of stores in this congruence class.
376 // This is used so we can detect store equivalence changes properly.
381 struct ExactEqualsExpression {
383 explicit ExactEqualsExpression(const Expression &E) : E(E) {}
384 hash_code getComputedHash() const { return E.getComputedHash(); }
385 bool operator==(const Expression &Other) const {
386 return E.exactlyEquals(Other);
390 template <> struct DenseMapInfo<const Expression *> {
391 static const Expression *getEmptyKey() {
392 auto Val = static_cast<uintptr_t>(-1);
393 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable;
394 return reinterpret_cast<const Expression *>(Val);
396 static const Expression *getTombstoneKey() {
397 auto Val = static_cast<uintptr_t>(~1U);
398 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable;
399 return reinterpret_cast<const Expression *>(Val);
401 static unsigned getHashValue(const Expression *E) {
402 return E->getComputedHash();
404 static unsigned getHashValue(const ExactEqualsExpression &E) {
405 return E.getComputedHash();
407 static bool isEqual(const ExactEqualsExpression &LHS, const Expression *RHS) {
408 if (RHS == getTombstoneKey() || RHS == getEmptyKey())
413 static bool isEqual(const Expression *LHS, const Expression *RHS) {
416 if (LHS == getTombstoneKey() || RHS == getTombstoneKey() ||
417 LHS == getEmptyKey() || RHS == getEmptyKey())
419 // Compare hashes before equality. This is *not* what the hashtable does,
420 // since it is computing it modulo the number of buckets, whereas we are
421 // using the full hash keyspace. Since the hashes are precomputed, this
422 // check is *much* faster than equality.
423 if (LHS->getComputedHash() != RHS->getComputedHash())
428 } // end namespace llvm
434 const TargetLibraryInfo *TLI;
437 MemorySSAWalker *MSSAWalker;
438 const DataLayout &DL;
439 std::unique_ptr<PredicateInfo> PredInfo;
441 // These are the only two things the create* functions should have
442 // side-effects on due to allocating memory.
443 mutable BumpPtrAllocator ExpressionAllocator;
444 mutable ArrayRecycler<Value *> ArgRecycler;
445 mutable TarjanSCC SCCFinder;
446 const SimplifyQuery SQ;
448 // Number of function arguments, used by ranking
449 unsigned int NumFuncArgs;
451 // RPOOrdering of basic blocks
452 DenseMap<const DomTreeNode *, unsigned> RPOOrdering;
454 // Congruence class info.
456 // This class is called INITIAL in the paper. It is the class everything
457 // startsout in, and represents any value. Being an optimistic analysis,
458 // anything in the TOP class has the value TOP, which is indeterminate and
459 // equivalent to everything.
460 CongruenceClass *TOPClass;
461 std::vector<CongruenceClass *> CongruenceClasses;
462 unsigned NextCongruenceNum;
465 DenseMap<Value *, CongruenceClass *> ValueToClass;
466 DenseMap<Value *, const Expression *> ValueToExpression;
467 // Value PHI handling, used to make equivalence between phi(op, op) and
469 // These mappings just store various data that would normally be part of the
471 DenseSet<const Instruction *> PHINodeUses;
472 // Map a temporary instruction we created to a parent block.
473 DenseMap<const Value *, BasicBlock *> TempToBlock;
474 // Map between the temporary phis we created and the real instructions they
475 // are known equivalent to.
476 DenseMap<const Value *, PHINode *> RealToTemp;
477 // In order to know when we should re-process instructions that have
478 // phi-of-ops, we track the set of expressions that they needed as
479 // leaders. When we discover new leaders for those expressions, we process the
480 // associated phi-of-op instructions again in case they have changed. The
481 // other way they may change is if they had leaders, and those leaders
482 // disappear. However, at the point they have leaders, there are uses of the
483 // relevant operands in the created phi node, and so they will get reprocessed
484 // through the normal user marking we perform.
485 mutable DenseMap<const Value *, SmallPtrSet<Value *, 2>> AdditionalUsers;
486 DenseMap<const Expression *, SmallPtrSet<Instruction *, 2>>
487 ExpressionToPhiOfOps;
488 // Map from basic block to the temporary operations we created
489 DenseMap<const BasicBlock *, SmallVector<PHINode *, 8>> PHIOfOpsPHIs;
490 // Map from temporary operation to MemoryAccess.
491 DenseMap<const Instruction *, MemoryUseOrDef *> TempToMemory;
492 // Set of all temporary instructions we created.
493 DenseSet<Instruction *> AllTempInstructions;
495 // Mapping from predicate info we used to the instructions we used it with.
496 // In order to correctly ensure propagation, we must keep track of what
497 // comparisons we used, so that when the values of the comparisons change, we
498 // propagate the information to the places we used the comparison.
499 mutable DenseMap<const Value *, SmallPtrSet<Instruction *, 2>>
501 // the same reasoning as PredicateToUsers. When we skip MemoryAccesses for
502 // stores, we no longer can rely solely on the def-use chains of MemorySSA.
503 mutable DenseMap<const MemoryAccess *, SmallPtrSet<MemoryAccess *, 2>>
506 // A table storing which memorydefs/phis represent a memory state provably
507 // equivalent to another memory state.
508 // We could use the congruence class machinery, but the MemoryAccess's are
509 // abstract memory states, so they can only ever be equivalent to each other,
510 // and not to constants, etc.
511 DenseMap<const MemoryAccess *, CongruenceClass *> MemoryAccessToClass;
513 // We could, if we wanted, build MemoryPhiExpressions and
514 // MemoryVariableExpressions, etc, and value number them the same way we value
515 // number phi expressions. For the moment, this seems like overkill. They
516 // can only exist in one of three states: they can be TOP (equal to
517 // everything), Equivalent to something else, or unique. Because we do not
518 // create expressions for them, we need to simulate leader change not just
519 // when they change class, but when they change state. Note: We can do the
520 // same thing for phis, and avoid having phi expressions if we wanted, We
521 // should eventually unify in one direction or the other, so this is a little
522 // bit of an experiment in which turns out easier to maintain.
523 enum MemoryPhiState { MPS_Invalid, MPS_TOP, MPS_Equivalent, MPS_Unique };
524 DenseMap<const MemoryPhi *, MemoryPhiState> MemoryPhiState;
526 enum InstCycleState { ICS_Unknown, ICS_CycleFree, ICS_Cycle };
527 mutable DenseMap<const Instruction *, InstCycleState> InstCycleState;
528 // Expression to class mapping.
529 using ExpressionClassMap = DenseMap<const Expression *, CongruenceClass *>;
530 ExpressionClassMap ExpressionToClass;
532 // We have a single expression that represents currently DeadExpressions.
533 // For dead expressions we can prove will stay dead, we mark them with
534 // DFS number zero. However, it's possible in the case of phi nodes
535 // for us to assume/prove all arguments are dead during fixpointing.
536 // We use DeadExpression for that case.
537 DeadExpression *SingletonDeadExpression = nullptr;
539 // Which values have changed as a result of leader changes.
540 SmallPtrSet<Value *, 8> LeaderChanges;
542 // Reachability info.
543 using BlockEdge = BasicBlockEdge;
544 DenseSet<BlockEdge> ReachableEdges;
545 SmallPtrSet<const BasicBlock *, 8> ReachableBlocks;
547 // This is a bitvector because, on larger functions, we may have
548 // thousands of touched instructions at once (entire blocks,
549 // instructions with hundreds of uses, etc). Even with optimization
550 // for when we mark whole blocks as touched, when this was a
551 // SmallPtrSet or DenseSet, for some functions, we spent >20% of all
552 // the time in GVN just managing this list. The bitvector, on the
553 // other hand, efficiently supports test/set/clear of both
554 // individual and ranges, as well as "find next element" This
555 // enables us to use it as a worklist with essentially 0 cost.
556 BitVector TouchedInstructions;
558 DenseMap<const BasicBlock *, std::pair<unsigned, unsigned>> BlockInstRange;
561 // Debugging for how many times each block and instruction got processed.
562 DenseMap<const Value *, unsigned> ProcessedCount;
566 // This contains a mapping from Instructions to DFS numbers.
567 // The numbering starts at 1. An instruction with DFS number zero
568 // means that the instruction is dead.
569 DenseMap<const Value *, unsigned> InstrDFS;
571 // This contains the mapping DFS numbers to instructions.
572 SmallVector<Value *, 32> DFSToInstr;
575 SmallPtrSet<Instruction *, 8> InstructionsToErase;
578 NewGVN(Function &F, DominatorTree *DT, AssumptionCache *AC,
579 TargetLibraryInfo *TLI, AliasAnalysis *AA, MemorySSA *MSSA,
580 const DataLayout &DL)
581 : F(F), DT(DT), TLI(TLI), AA(AA), MSSA(MSSA), DL(DL),
582 PredInfo(make_unique<PredicateInfo>(F, *DT, *AC)), SQ(DL, TLI, DT, AC) {
587 // Expression handling.
588 const Expression *createExpression(Instruction *) const;
589 const Expression *createBinaryExpression(unsigned, Type *, Value *,
591 PHIExpression *createPHIExpression(Instruction *, bool &HasBackEdge,
592 bool &OriginalOpsConstant) const;
593 const DeadExpression *createDeadExpression() const;
594 const VariableExpression *createVariableExpression(Value *) const;
595 const ConstantExpression *createConstantExpression(Constant *) const;
596 const Expression *createVariableOrConstant(Value *V) const;
597 const UnknownExpression *createUnknownExpression(Instruction *) const;
598 const StoreExpression *createStoreExpression(StoreInst *,
599 const MemoryAccess *) const;
600 LoadExpression *createLoadExpression(Type *, Value *, LoadInst *,
601 const MemoryAccess *) const;
602 const CallExpression *createCallExpression(CallInst *,
603 const MemoryAccess *) const;
604 const AggregateValueExpression *
605 createAggregateValueExpression(Instruction *) const;
606 bool setBasicExpressionInfo(Instruction *, BasicExpression *) const;
608 // Congruence class handling.
609 CongruenceClass *createCongruenceClass(Value *Leader, const Expression *E) {
610 auto *result = new CongruenceClass(NextCongruenceNum++, Leader, E);
611 CongruenceClasses.emplace_back(result);
615 CongruenceClass *createMemoryClass(MemoryAccess *MA) {
616 auto *CC = createCongruenceClass(nullptr, nullptr);
617 CC->setMemoryLeader(MA);
620 CongruenceClass *ensureLeaderOfMemoryClass(MemoryAccess *MA) {
621 auto *CC = getMemoryClass(MA);
622 if (CC->getMemoryLeader() != MA)
623 CC = createMemoryClass(MA);
627 CongruenceClass *createSingletonCongruenceClass(Value *Member) {
628 CongruenceClass *CClass = createCongruenceClass(Member, nullptr);
629 CClass->insert(Member);
630 ValueToClass[Member] = CClass;
633 void initializeCongruenceClasses(Function &F);
634 const Expression *makePossiblePhiOfOps(Instruction *,
635 SmallPtrSetImpl<Value *> &);
636 void addPhiOfOps(PHINode *Op, BasicBlock *BB, Instruction *ExistingValue);
638 // Value number an Instruction or MemoryPhi.
639 void valueNumberMemoryPhi(MemoryPhi *);
640 void valueNumberInstruction(Instruction *);
642 // Symbolic evaluation.
643 const Expression *checkSimplificationResults(Expression *, Instruction *,
645 const Expression *performSymbolicEvaluation(Value *,
646 SmallPtrSetImpl<Value *> &) const;
647 const Expression *performSymbolicLoadCoercion(Type *, Value *, LoadInst *,
649 MemoryAccess *) const;
650 const Expression *performSymbolicLoadEvaluation(Instruction *) const;
651 const Expression *performSymbolicStoreEvaluation(Instruction *) const;
652 const Expression *performSymbolicCallEvaluation(Instruction *) const;
653 const Expression *performSymbolicPHIEvaluation(Instruction *) const;
654 const Expression *performSymbolicAggrValueEvaluation(Instruction *) const;
655 const Expression *performSymbolicCmpEvaluation(Instruction *) const;
656 const Expression *performSymbolicPredicateInfoEvaluation(Instruction *) const;
658 // Congruence finding.
659 bool someEquivalentDominates(const Instruction *, const Instruction *) const;
660 Value *lookupOperandLeader(Value *) const;
661 void performCongruenceFinding(Instruction *, const Expression *);
662 void moveValueToNewCongruenceClass(Instruction *, const Expression *,
663 CongruenceClass *, CongruenceClass *);
664 void moveMemoryToNewCongruenceClass(Instruction *, MemoryAccess *,
665 CongruenceClass *, CongruenceClass *);
666 Value *getNextValueLeader(CongruenceClass *) const;
667 const MemoryAccess *getNextMemoryLeader(CongruenceClass *) const;
668 bool setMemoryClass(const MemoryAccess *From, CongruenceClass *To);
669 CongruenceClass *getMemoryClass(const MemoryAccess *MA) const;
670 const MemoryAccess *lookupMemoryLeader(const MemoryAccess *) const;
671 bool isMemoryAccessTOP(const MemoryAccess *) const;
674 unsigned int getRank(const Value *) const;
675 bool shouldSwapOperands(const Value *, const Value *) const;
677 // Reachability handling.
678 void updateReachableEdge(BasicBlock *, BasicBlock *);
679 void processOutgoingEdges(TerminatorInst *, BasicBlock *);
680 Value *findConditionEquivalence(Value *) const;
684 void convertClassToDFSOrdered(const CongruenceClass &,
685 SmallVectorImpl<ValueDFS> &,
686 DenseMap<const Value *, unsigned int> &,
687 SmallPtrSetImpl<Instruction *> &) const;
688 void convertClassToLoadsAndStores(const CongruenceClass &,
689 SmallVectorImpl<ValueDFS> &) const;
691 bool eliminateInstructions(Function &);
692 void replaceInstruction(Instruction *, Value *);
693 void markInstructionForDeletion(Instruction *);
694 void deleteInstructionsInBlock(BasicBlock *);
695 Value *findPhiOfOpsLeader(const Expression *E, const BasicBlock *BB) const;
697 // New instruction creation.
698 void handleNewInstruction(Instruction *){};
700 // Various instruction touch utilities
701 template <typename Map, typename KeyType, typename Func>
702 void for_each_found(Map &, const KeyType &, Func);
703 template <typename Map, typename KeyType>
704 void touchAndErase(Map &, const KeyType &);
705 void markUsersTouched(Value *);
706 void markMemoryUsersTouched(const MemoryAccess *);
707 void markMemoryDefTouched(const MemoryAccess *);
708 void markPredicateUsersTouched(Instruction *);
709 void markValueLeaderChangeTouched(CongruenceClass *CC);
710 void markMemoryLeaderChangeTouched(CongruenceClass *CC);
711 void markPhiOfOpsChanged(const Expression *E);
712 void addPredicateUsers(const PredicateBase *, Instruction *) const;
713 void addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const;
714 void addAdditionalUsers(Value *To, Value *User) const;
716 // Main loop of value numbering
717 void iterateTouchedInstructions();
720 void cleanupTables();
721 std::pair<unsigned, unsigned> assignDFSNumbers(BasicBlock *, unsigned);
722 void updateProcessedCount(const Value *V);
723 void verifyMemoryCongruency() const;
724 void verifyIterationSettled(Function &F);
725 void verifyStoreExpressions() const;
726 bool singleReachablePHIPath(SmallPtrSet<const MemoryAccess *, 8> &,
727 const MemoryAccess *, const MemoryAccess *) const;
728 BasicBlock *getBlockForValue(Value *V) const;
729 void deleteExpression(const Expression *E) const;
730 MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
731 MemoryAccess *getDefiningAccess(const MemoryAccess *) const;
732 MemoryPhi *getMemoryAccess(const BasicBlock *) const;
733 template <class T, class Range> T *getMinDFSOfRange(const Range &) const;
734 unsigned InstrToDFSNum(const Value *V) const {
735 assert(isa<Instruction>(V) && "This should not be used for MemoryAccesses");
736 return InstrDFS.lookup(V);
739 unsigned InstrToDFSNum(const MemoryAccess *MA) const {
740 return MemoryToDFSNum(MA);
742 Value *InstrFromDFSNum(unsigned DFSNum) { return DFSToInstr[DFSNum]; }
743 // Given a MemoryAccess, return the relevant instruction DFS number. Note:
744 // This deliberately takes a value so it can be used with Use's, which will
745 // auto-convert to Value's but not to MemoryAccess's.
746 unsigned MemoryToDFSNum(const Value *MA) const {
747 assert(isa<MemoryAccess>(MA) &&
748 "This should not be used with instructions");
749 return isa<MemoryUseOrDef>(MA)
750 ? InstrToDFSNum(cast<MemoryUseOrDef>(MA)->getMemoryInst())
751 : InstrDFS.lookup(MA);
753 bool isCycleFree(const Instruction *) const;
754 bool isBackedge(BasicBlock *From, BasicBlock *To) const;
755 // Debug counter info. When verifying, we have to reset the value numbering
756 // debug counter to the same state it started in to get the same results.
757 std::pair<int, int> StartingVNCounter;
759 } // end anonymous namespace
761 template <typename T>
762 static bool equalsLoadStoreHelper(const T &LHS, const Expression &RHS) {
763 if (!isa<LoadExpression>(RHS) && !isa<StoreExpression>(RHS))
765 return LHS.MemoryExpression::equals(RHS);
768 bool LoadExpression::equals(const Expression &Other) const {
769 return equalsLoadStoreHelper(*this, Other);
772 bool StoreExpression::equals(const Expression &Other) const {
773 if (!equalsLoadStoreHelper(*this, Other))
775 // Make sure that store vs store includes the value operand.
776 if (const auto *S = dyn_cast<StoreExpression>(&Other))
777 if (getStoredValue() != S->getStoredValue())
782 // Determine if the edge From->To is a backedge
783 bool NewGVN::isBackedge(BasicBlock *From, BasicBlock *To) const {
786 auto *FromDTN = DT->getNode(From);
787 auto *ToDTN = DT->getNode(To);
788 return RPOOrdering.lookup(FromDTN) >= RPOOrdering.lookup(ToDTN);
792 static std::string getBlockName(const BasicBlock *B) {
793 return DOTGraphTraits<const Function *>::getSimpleNodeLabel(B, nullptr);
797 // Get a MemoryAccess for an instruction, fake or real.
798 MemoryUseOrDef *NewGVN::getMemoryAccess(const Instruction *I) const {
799 auto *Result = MSSA->getMemoryAccess(I);
800 return Result ? Result : TempToMemory.lookup(I);
803 // Get a MemoryPhi for a basic block. These are all real.
804 MemoryPhi *NewGVN::getMemoryAccess(const BasicBlock *BB) const {
805 return MSSA->getMemoryAccess(BB);
808 // Get the basic block from an instruction/memory value.
809 BasicBlock *NewGVN::getBlockForValue(Value *V) const {
810 if (auto *I = dyn_cast<Instruction>(V)) {
811 auto *Parent = I->getParent();
814 Parent = TempToBlock.lookup(V);
815 assert(Parent && "Every fake instruction should have a block");
819 auto *MP = dyn_cast<MemoryPhi>(V);
820 assert(MP && "Should have been an instruction or a MemoryPhi");
821 return MP->getBlock();
824 // Delete a definitely dead expression, so it can be reused by the expression
825 // allocator. Some of these are not in creation functions, so we have to accept
827 void NewGVN::deleteExpression(const Expression *E) const {
828 assert(isa<BasicExpression>(E));
829 auto *BE = cast<BasicExpression>(E);
830 const_cast<BasicExpression *>(BE)->deallocateOperands(ArgRecycler);
831 ExpressionAllocator.Deallocate(E);
833 PHIExpression *NewGVN::createPHIExpression(Instruction *I, bool &HasBackedge,
834 bool &OriginalOpsConstant) const {
835 BasicBlock *PHIBlock = getBlockForValue(I);
836 auto *PN = cast<PHINode>(I);
838 new (ExpressionAllocator) PHIExpression(PN->getNumOperands(), PHIBlock);
840 E->allocateOperands(ArgRecycler, ExpressionAllocator);
841 E->setType(I->getType());
842 E->setOpcode(I->getOpcode());
844 // NewGVN assumes the operands of a PHI node are in a consistent order across
845 // PHIs. LLVM doesn't seem to always guarantee this. While we need to fix
846 // this in LLVM at some point we don't want GVN to find wrong congruences.
847 // Therefore, here we sort uses in predecessor order.
848 // We're sorting the values by pointer. In theory this might be cause of
849 // non-determinism, but here we don't rely on the ordering for anything
850 // significant, e.g. we don't create new instructions based on it so we're
852 SmallVector<const Use *, 4> PHIOperands;
853 for (const Use &U : PN->operands())
854 PHIOperands.push_back(&U);
855 std::sort(PHIOperands.begin(), PHIOperands.end(),
856 [&](const Use *U1, const Use *U2) {
857 return PN->getIncomingBlock(*U1) < PN->getIncomingBlock(*U2);
860 // Filter out unreachable phi operands.
861 auto Filtered = make_filter_range(PHIOperands, [&](const Use *U) {
864 if (!ReachableEdges.count({PN->getIncomingBlock(*U), PHIBlock}))
866 // Things in TOPClass are equivalent to everything.
867 if (ValueToClass.lookup(*U) == TOPClass)
869 if (lookupOperandLeader(*U) == PN)
873 std::transform(Filtered.begin(), Filtered.end(), op_inserter(E),
874 [&](const Use *U) -> Value * {
875 auto *BB = PN->getIncomingBlock(*U);
876 HasBackedge = HasBackedge || isBackedge(BB, PHIBlock);
877 OriginalOpsConstant =
878 OriginalOpsConstant && isa<Constant>(*U);
879 return lookupOperandLeader(*U);
884 // Set basic expression info (Arguments, type, opcode) for Expression
885 // E from Instruction I in block B.
886 bool NewGVN::setBasicExpressionInfo(Instruction *I, BasicExpression *E) const {
887 bool AllConstant = true;
888 if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
889 E->setType(GEP->getSourceElementType());
891 E->setType(I->getType());
892 E->setOpcode(I->getOpcode());
893 E->allocateOperands(ArgRecycler, ExpressionAllocator);
895 // Transform the operand array into an operand leader array, and keep track of
896 // whether all members are constant.
897 std::transform(I->op_begin(), I->op_end(), op_inserter(E), [&](Value *O) {
898 auto Operand = lookupOperandLeader(O);
899 AllConstant = AllConstant && isa<Constant>(Operand);
906 const Expression *NewGVN::createBinaryExpression(unsigned Opcode, Type *T,
909 auto *E = new (ExpressionAllocator) BasicExpression(2);
912 E->setOpcode(Opcode);
913 E->allocateOperands(ArgRecycler, ExpressionAllocator);
914 if (Instruction::isCommutative(Opcode)) {
915 // Ensure that commutative instructions that only differ by a permutation
916 // of their operands get the same value number by sorting the operand value
917 // numbers. Since all commutative instructions have two operands it is more
918 // efficient to sort by hand rather than using, say, std::sort.
919 if (shouldSwapOperands(Arg1, Arg2))
920 std::swap(Arg1, Arg2);
922 E->op_push_back(lookupOperandLeader(Arg1));
923 E->op_push_back(lookupOperandLeader(Arg2));
925 Value *V = SimplifyBinOp(Opcode, E->getOperand(0), E->getOperand(1), SQ);
926 if (const Expression *SimplifiedE = checkSimplificationResults(E, nullptr, V))
931 // Take a Value returned by simplification of Expression E/Instruction
932 // I, and see if it resulted in a simpler expression. If so, return
934 // TODO: Once finished, this should not take an Instruction, we only
935 // use it for printing.
936 const Expression *NewGVN::checkSimplificationResults(Expression *E,
941 if (auto *C = dyn_cast<Constant>(V)) {
943 DEBUG(dbgs() << "Simplified " << *I << " to "
944 << " constant " << *C << "\n");
945 NumGVNOpsSimplified++;
946 assert(isa<BasicExpression>(E) &&
947 "We should always have had a basic expression here");
949 return createConstantExpression(C);
950 } else if (isa<Argument>(V) || isa<GlobalVariable>(V)) {
952 DEBUG(dbgs() << "Simplified " << *I << " to "
953 << " variable " << *V << "\n");
955 return createVariableExpression(V);
958 CongruenceClass *CC = ValueToClass.lookup(V);
959 if (CC && CC->getDefiningExpr()) {
960 // If we simplified to something else, we need to communicate
961 // that we're users of the value we simplified to.
963 // Don't add temporary instructions to the user lists.
964 if (!AllTempInstructions.count(I))
965 addAdditionalUsers(V, I);
969 DEBUG(dbgs() << "Simplified " << *I << " to "
970 << " expression " << *CC->getDefiningExpr() << "\n");
971 NumGVNOpsSimplified++;
973 return CC->getDefiningExpr();
978 const Expression *NewGVN::createExpression(Instruction *I) const {
979 auto *E = new (ExpressionAllocator) BasicExpression(I->getNumOperands());
981 bool AllConstant = setBasicExpressionInfo(I, E);
983 if (I->isCommutative()) {
984 // Ensure that commutative instructions that only differ by a permutation
985 // of their operands get the same value number by sorting the operand value
986 // numbers. Since all commutative instructions have two operands it is more
987 // efficient to sort by hand rather than using, say, std::sort.
988 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!");
989 if (shouldSwapOperands(E->getOperand(0), E->getOperand(1)))
990 E->swapOperands(0, 1);
993 // Perform simplificaiton
994 // TODO: Right now we only check to see if we get a constant result.
995 // We may get a less than constant, but still better, result for
1000 // We should handle this by simply rewriting the expression.
1001 if (auto *CI = dyn_cast<CmpInst>(I)) {
1002 // Sort the operand value numbers so x<y and y>x get the same value
1004 CmpInst::Predicate Predicate = CI->getPredicate();
1005 if (shouldSwapOperands(E->getOperand(0), E->getOperand(1))) {
1006 E->swapOperands(0, 1);
1007 Predicate = CmpInst::getSwappedPredicate(Predicate);
1009 E->setOpcode((CI->getOpcode() << 8) | Predicate);
1010 // TODO: 25% of our time is spent in SimplifyCmpInst with pointer operands
1011 assert(I->getOperand(0)->getType() == I->getOperand(1)->getType() &&
1012 "Wrong types on cmp instruction");
1013 assert((E->getOperand(0)->getType() == I->getOperand(0)->getType() &&
1014 E->getOperand(1)->getType() == I->getOperand(1)->getType()));
1016 SimplifyCmpInst(Predicate, E->getOperand(0), E->getOperand(1), SQ);
1017 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1019 } else if (isa<SelectInst>(I)) {
1020 if (isa<Constant>(E->getOperand(0)) ||
1021 E->getOperand(0) == E->getOperand(1)) {
1022 assert(E->getOperand(1)->getType() == I->getOperand(1)->getType() &&
1023 E->getOperand(2)->getType() == I->getOperand(2)->getType());
1024 Value *V = SimplifySelectInst(E->getOperand(0), E->getOperand(1),
1025 E->getOperand(2), SQ);
1026 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1029 } else if (I->isBinaryOp()) {
1031 SimplifyBinOp(E->getOpcode(), E->getOperand(0), E->getOperand(1), SQ);
1032 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1034 } else if (auto *BI = dyn_cast<BitCastInst>(I)) {
1036 SimplifyCastInst(BI->getOpcode(), BI->getOperand(0), BI->getType(), SQ);
1037 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1039 } else if (isa<GetElementPtrInst>(I)) {
1040 Value *V = SimplifyGEPInst(
1041 E->getType(), ArrayRef<Value *>(E->op_begin(), E->op_end()), SQ);
1042 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1044 } else if (AllConstant) {
1045 // We don't bother trying to simplify unless all of the operands
1047 // TODO: There are a lot of Simplify*'s we could call here, if we
1048 // wanted to. The original motivating case for this code was a
1049 // zext i1 false to i8, which we don't have an interface to
1050 // simplify (IE there is no SimplifyZExt).
1052 SmallVector<Constant *, 8> C;
1053 for (Value *Arg : E->operands())
1054 C.emplace_back(cast<Constant>(Arg));
1056 if (Value *V = ConstantFoldInstOperands(I, C, DL, TLI))
1057 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1063 const AggregateValueExpression *
1064 NewGVN::createAggregateValueExpression(Instruction *I) const {
1065 if (auto *II = dyn_cast<InsertValueInst>(I)) {
1066 auto *E = new (ExpressionAllocator)
1067 AggregateValueExpression(I->getNumOperands(), II->getNumIndices());
1068 setBasicExpressionInfo(I, E);
1069 E->allocateIntOperands(ExpressionAllocator);
1070 std::copy(II->idx_begin(), II->idx_end(), int_op_inserter(E));
1072 } else if (auto *EI = dyn_cast<ExtractValueInst>(I)) {
1073 auto *E = new (ExpressionAllocator)
1074 AggregateValueExpression(I->getNumOperands(), EI->getNumIndices());
1075 setBasicExpressionInfo(EI, E);
1076 E->allocateIntOperands(ExpressionAllocator);
1077 std::copy(EI->idx_begin(), EI->idx_end(), int_op_inserter(E));
1080 llvm_unreachable("Unhandled type of aggregate value operation");
1083 const DeadExpression *NewGVN::createDeadExpression() const {
1084 // DeadExpression has no arguments and all DeadExpression's are the same,
1085 // so we only need one of them.
1086 return SingletonDeadExpression;
1089 const VariableExpression *NewGVN::createVariableExpression(Value *V) const {
1090 auto *E = new (ExpressionAllocator) VariableExpression(V);
1091 E->setOpcode(V->getValueID());
1095 const Expression *NewGVN::createVariableOrConstant(Value *V) const {
1096 if (auto *C = dyn_cast<Constant>(V))
1097 return createConstantExpression(C);
1098 return createVariableExpression(V);
1101 const ConstantExpression *NewGVN::createConstantExpression(Constant *C) const {
1102 auto *E = new (ExpressionAllocator) ConstantExpression(C);
1103 E->setOpcode(C->getValueID());
1107 const UnknownExpression *NewGVN::createUnknownExpression(Instruction *I) const {
1108 auto *E = new (ExpressionAllocator) UnknownExpression(I);
1109 E->setOpcode(I->getOpcode());
1113 const CallExpression *
1114 NewGVN::createCallExpression(CallInst *CI, const MemoryAccess *MA) const {
1115 // FIXME: Add operand bundles for calls.
1117 new (ExpressionAllocator) CallExpression(CI->getNumOperands(), CI, MA);
1118 setBasicExpressionInfo(CI, E);
1122 // Return true if some equivalent of instruction Inst dominates instruction U.
1123 bool NewGVN::someEquivalentDominates(const Instruction *Inst,
1124 const Instruction *U) const {
1125 auto *CC = ValueToClass.lookup(Inst);
1126 // This must be an instruction because we are only called from phi nodes
1127 // in the case that the value it needs to check against is an instruction.
1129 // The most likely candiates for dominance are the leader and the next leader.
1130 // The leader or nextleader will dominate in all cases where there is an
1131 // equivalent that is higher up in the dom tree.
1132 // We can't *only* check them, however, because the
1133 // dominator tree could have an infinite number of non-dominating siblings
1134 // with instructions that are in the right congruence class.
1139 // Instruction U could be in H, with equivalents in every other sibling.
1140 // Depending on the rpo order picked, the leader could be the equivalent in
1141 // any of these siblings.
1144 if (DT->dominates(cast<Instruction>(CC->getLeader()), U))
1146 if (CC->getNextLeader().first &&
1147 DT->dominates(cast<Instruction>(CC->getNextLeader().first), U))
1149 return llvm::any_of(*CC, [&](const Value *Member) {
1150 return Member != CC->getLeader() &&
1151 DT->dominates(cast<Instruction>(Member), U);
1155 // See if we have a congruence class and leader for this operand, and if so,
1156 // return it. Otherwise, return the operand itself.
1157 Value *NewGVN::lookupOperandLeader(Value *V) const {
1158 CongruenceClass *CC = ValueToClass.lookup(V);
1160 // Everything in TOP is represented by undef, as it can be any value.
1161 // We do have to make sure we get the type right though, so we can't set the
1162 // RepLeader to undef.
1164 return UndefValue::get(V->getType());
1165 return CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader();
1171 const MemoryAccess *NewGVN::lookupMemoryLeader(const MemoryAccess *MA) const {
1172 auto *CC = getMemoryClass(MA);
1173 assert(CC->getMemoryLeader() &&
1174 "Every MemoryAccess should be mapped to a congruence class with a "
1175 "representative memory access");
1176 return CC->getMemoryLeader();
1179 // Return true if the MemoryAccess is really equivalent to everything. This is
1180 // equivalent to the lattice value "TOP" in most lattices. This is the initial
1181 // state of all MemoryAccesses.
1182 bool NewGVN::isMemoryAccessTOP(const MemoryAccess *MA) const {
1183 return getMemoryClass(MA) == TOPClass;
1186 LoadExpression *NewGVN::createLoadExpression(Type *LoadType, Value *PointerOp,
1188 const MemoryAccess *MA) const {
1190 new (ExpressionAllocator) LoadExpression(1, LI, lookupMemoryLeader(MA));
1191 E->allocateOperands(ArgRecycler, ExpressionAllocator);
1192 E->setType(LoadType);
1194 // Give store and loads same opcode so they value number together.
1196 E->op_push_back(PointerOp);
1198 E->setAlignment(LI->getAlignment());
1200 // TODO: Value number heap versions. We may be able to discover
1201 // things alias analysis can't on it's own (IE that a store and a
1202 // load have the same value, and thus, it isn't clobbering the load).
1206 const StoreExpression *
1207 NewGVN::createStoreExpression(StoreInst *SI, const MemoryAccess *MA) const {
1208 auto *StoredValueLeader = lookupOperandLeader(SI->getValueOperand());
1209 auto *E = new (ExpressionAllocator)
1210 StoreExpression(SI->getNumOperands(), SI, StoredValueLeader, MA);
1211 E->allocateOperands(ArgRecycler, ExpressionAllocator);
1212 E->setType(SI->getValueOperand()->getType());
1214 // Give store and loads same opcode so they value number together.
1216 E->op_push_back(lookupOperandLeader(SI->getPointerOperand()));
1218 // TODO: Value number heap versions. We may be able to discover
1219 // things alias analysis can't on it's own (IE that a store and a
1220 // load have the same value, and thus, it isn't clobbering the load).
1224 const Expression *NewGVN::performSymbolicStoreEvaluation(Instruction *I) const {
1225 // Unlike loads, we never try to eliminate stores, so we do not check if they
1226 // are simple and avoid value numbering them.
1227 auto *SI = cast<StoreInst>(I);
1228 auto *StoreAccess = getMemoryAccess(SI);
1229 // Get the expression, if any, for the RHS of the MemoryDef.
1230 const MemoryAccess *StoreRHS = StoreAccess->getDefiningAccess();
1231 if (EnableStoreRefinement)
1232 StoreRHS = MSSAWalker->getClobberingMemoryAccess(StoreAccess);
1233 // If we bypassed the use-def chains, make sure we add a use.
1234 if (StoreRHS != StoreAccess->getDefiningAccess())
1235 addMemoryUsers(StoreRHS, StoreAccess);
1236 StoreRHS = lookupMemoryLeader(StoreRHS);
1237 // If we are defined by ourselves, use the live on entry def.
1238 if (StoreRHS == StoreAccess)
1239 StoreRHS = MSSA->getLiveOnEntryDef();
1241 if (SI->isSimple()) {
1242 // See if we are defined by a previous store expression, it already has a
1243 // value, and it's the same value as our current store. FIXME: Right now, we
1244 // only do this for simple stores, we should expand to cover memcpys, etc.
1245 const auto *LastStore = createStoreExpression(SI, StoreRHS);
1246 const auto *LastCC = ExpressionToClass.lookup(LastStore);
1247 // We really want to check whether the expression we matched was a store. No
1248 // easy way to do that. However, we can check that the class we found has a
1249 // store, which, assuming the value numbering state is not corrupt, is
1250 // sufficient, because we must also be equivalent to that store's expression
1251 // for it to be in the same class as the load.
1252 if (LastCC && LastCC->getStoredValue() == LastStore->getStoredValue())
1254 // Also check if our value operand is defined by a load of the same memory
1255 // location, and the memory state is the same as it was then (otherwise, it
1256 // could have been overwritten later. See test32 in
1257 // transforms/DeadStoreElimination/simple.ll).
1258 if (auto *LI = dyn_cast<LoadInst>(LastStore->getStoredValue()))
1259 if ((lookupOperandLeader(LI->getPointerOperand()) ==
1260 LastStore->getOperand(0)) &&
1261 (lookupMemoryLeader(getMemoryAccess(LI)->getDefiningAccess()) ==
1264 deleteExpression(LastStore);
1267 // If the store is not equivalent to anything, value number it as a store that
1268 // produces a unique memory state (instead of using it's MemoryUse, we use
1270 return createStoreExpression(SI, StoreAccess);
1273 // See if we can extract the value of a loaded pointer from a load, a store, or
1274 // a memory instruction.
1276 NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
1277 LoadInst *LI, Instruction *DepInst,
1278 MemoryAccess *DefiningAccess) const {
1279 assert((!LI || LI->isSimple()) && "Not a simple load");
1280 if (auto *DepSI = dyn_cast<StoreInst>(DepInst)) {
1281 // Can't forward from non-atomic to atomic without violating memory model.
1282 // Also don't need to coerce if they are the same type, we will just
1284 if (LI->isAtomic() > DepSI->isAtomic() ||
1285 LoadType == DepSI->getValueOperand()->getType())
1287 int Offset = analyzeLoadFromClobberingStore(LoadType, LoadPtr, DepSI, DL);
1289 if (auto *C = dyn_cast<Constant>(
1290 lookupOperandLeader(DepSI->getValueOperand()))) {
1291 DEBUG(dbgs() << "Coercing load from store " << *DepSI << " to constant "
1293 return createConstantExpression(
1294 getConstantStoreValueForLoad(C, Offset, LoadType, DL));
1298 } else if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1299 // Can't forward from non-atomic to atomic without violating memory model.
1300 if (LI->isAtomic() > DepLI->isAtomic())
1302 int Offset = analyzeLoadFromClobberingLoad(LoadType, LoadPtr, DepLI, DL);
1304 // We can coerce a constant load into a load
1305 if (auto *C = dyn_cast<Constant>(lookupOperandLeader(DepLI)))
1306 if (auto *PossibleConstant =
1307 getConstantLoadValueForLoad(C, Offset, LoadType, DL)) {
1308 DEBUG(dbgs() << "Coercing load from load " << *LI << " to constant "
1309 << *PossibleConstant << "\n");
1310 return createConstantExpression(PossibleConstant);
1314 } else if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1315 int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL);
1317 if (auto *PossibleConstant =
1318 getConstantMemInstValueForLoad(DepMI, Offset, LoadType, DL)) {
1319 DEBUG(dbgs() << "Coercing load from meminst " << *DepMI
1320 << " to constant " << *PossibleConstant << "\n");
1321 return createConstantExpression(PossibleConstant);
1326 // All of the below are only true if the loaded pointer is produced
1327 // by the dependent instruction.
1328 if (LoadPtr != lookupOperandLeader(DepInst) &&
1329 !AA->isMustAlias(LoadPtr, DepInst))
1331 // If this load really doesn't depend on anything, then we must be loading an
1332 // undef value. This can happen when loading for a fresh allocation with no
1333 // intervening stores, for example. Note that this is only true in the case
1334 // that the result of the allocation is pointer equal to the load ptr.
1335 if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI)) {
1336 return createConstantExpression(UndefValue::get(LoadType));
1338 // If this load occurs either right after a lifetime begin,
1339 // then the loaded value is undefined.
1340 else if (auto *II = dyn_cast<IntrinsicInst>(DepInst)) {
1341 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1342 return createConstantExpression(UndefValue::get(LoadType));
1344 // If this load follows a calloc (which zero initializes memory),
1345 // then the loaded value is zero
1346 else if (isCallocLikeFn(DepInst, TLI)) {
1347 return createConstantExpression(Constant::getNullValue(LoadType));
1353 const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) const {
1354 auto *LI = cast<LoadInst>(I);
1356 // We can eliminate in favor of non-simple loads, but we won't be able to
1357 // eliminate the loads themselves.
1358 if (!LI->isSimple())
1361 Value *LoadAddressLeader = lookupOperandLeader(LI->getPointerOperand());
1362 // Load of undef is undef.
1363 if (isa<UndefValue>(LoadAddressLeader))
1364 return createConstantExpression(UndefValue::get(LI->getType()));
1365 MemoryAccess *OriginalAccess = getMemoryAccess(I);
1366 MemoryAccess *DefiningAccess =
1367 MSSAWalker->getClobberingMemoryAccess(OriginalAccess);
1369 if (!MSSA->isLiveOnEntryDef(DefiningAccess)) {
1370 if (auto *MD = dyn_cast<MemoryDef>(DefiningAccess)) {
1371 Instruction *DefiningInst = MD->getMemoryInst();
1372 // If the defining instruction is not reachable, replace with undef.
1373 if (!ReachableBlocks.count(DefiningInst->getParent()))
1374 return createConstantExpression(UndefValue::get(LI->getType()));
1375 // This will handle stores and memory insts. We only do if it the
1376 // defining access has a different type, or it is a pointer produced by
1377 // certain memory operations that cause the memory to have a fixed value
1378 // (IE things like calloc).
1379 if (const auto *CoercionResult =
1380 performSymbolicLoadCoercion(LI->getType(), LoadAddressLeader, LI,
1381 DefiningInst, DefiningAccess))
1382 return CoercionResult;
1386 const Expression *E = createLoadExpression(LI->getType(), LoadAddressLeader,
1387 LI, DefiningAccess);
1392 NewGVN::performSymbolicPredicateInfoEvaluation(Instruction *I) const {
1393 auto *PI = PredInfo->getPredicateInfoFor(I);
1397 DEBUG(dbgs() << "Found predicate info from instruction !\n");
1399 auto *PWC = dyn_cast<PredicateWithCondition>(PI);
1403 auto *CopyOf = I->getOperand(0);
1404 auto *Cond = PWC->Condition;
1406 // If this a copy of the condition, it must be either true or false depending
1407 // on the predicate info type and edge
1408 if (CopyOf == Cond) {
1409 // We should not need to add predicate users because the predicate info is
1410 // already a use of this operand.
1411 if (isa<PredicateAssume>(PI))
1412 return createConstantExpression(ConstantInt::getTrue(Cond->getType()));
1413 if (auto *PBranch = dyn_cast<PredicateBranch>(PI)) {
1414 if (PBranch->TrueEdge)
1415 return createConstantExpression(ConstantInt::getTrue(Cond->getType()));
1416 return createConstantExpression(ConstantInt::getFalse(Cond->getType()));
1418 if (auto *PSwitch = dyn_cast<PredicateSwitch>(PI))
1419 return createConstantExpression(cast<Constant>(PSwitch->CaseValue));
1422 // Not a copy of the condition, so see what the predicates tell us about this
1423 // value. First, though, we check to make sure the value is actually a copy
1424 // of one of the condition operands. It's possible, in certain cases, for it
1425 // to be a copy of a predicateinfo copy. In particular, if two branch
1426 // operations use the same condition, and one branch dominates the other, we
1427 // will end up with a copy of a copy. This is currently a small deficiency in
1428 // predicateinfo. What will end up happening here is that we will value
1429 // number both copies the same anyway.
1431 // Everything below relies on the condition being a comparison.
1432 auto *Cmp = dyn_cast<CmpInst>(Cond);
1436 if (CopyOf != Cmp->getOperand(0) && CopyOf != Cmp->getOperand(1)) {
1437 DEBUG(dbgs() << "Copy is not of any condition operands!\n");
1440 Value *FirstOp = lookupOperandLeader(Cmp->getOperand(0));
1441 Value *SecondOp = lookupOperandLeader(Cmp->getOperand(1));
1442 bool SwappedOps = false;
1444 if (shouldSwapOperands(FirstOp, SecondOp)) {
1445 std::swap(FirstOp, SecondOp);
1448 CmpInst::Predicate Predicate =
1449 SwappedOps ? Cmp->getSwappedPredicate() : Cmp->getPredicate();
1451 if (isa<PredicateAssume>(PI)) {
1452 // If the comparison is true when the operands are equal, then we know the
1453 // operands are equal, because assumes must always be true.
1454 if (CmpInst::isTrueWhenEqual(Predicate)) {
1455 addPredicateUsers(PI, I);
1456 addAdditionalUsers(Cmp->getOperand(0), I);
1457 return createVariableOrConstant(FirstOp);
1460 if (const auto *PBranch = dyn_cast<PredicateBranch>(PI)) {
1461 // If we are *not* a copy of the comparison, we may equal to the other
1462 // operand when the predicate implies something about equality of
1463 // operations. In particular, if the comparison is true/false when the
1464 // operands are equal, and we are on the right edge, we know this operation
1465 // is equal to something.
1466 if ((PBranch->TrueEdge && Predicate == CmpInst::ICMP_EQ) ||
1467 (!PBranch->TrueEdge && Predicate == CmpInst::ICMP_NE)) {
1468 addPredicateUsers(PI, I);
1469 addAdditionalUsers(Cmp->getOperand(0), I);
1470 return createVariableOrConstant(FirstOp);
1472 // Handle the special case of floating point.
1473 if (((PBranch->TrueEdge && Predicate == CmpInst::FCMP_OEQ) ||
1474 (!PBranch->TrueEdge && Predicate == CmpInst::FCMP_UNE)) &&
1475 isa<ConstantFP>(FirstOp) && !cast<ConstantFP>(FirstOp)->isZero()) {
1476 addPredicateUsers(PI, I);
1477 addAdditionalUsers(Cmp->getOperand(0), I);
1478 return createConstantExpression(cast<Constant>(FirstOp));
1484 // Evaluate read only and pure calls, and create an expression result.
1485 const Expression *NewGVN::performSymbolicCallEvaluation(Instruction *I) const {
1486 auto *CI = cast<CallInst>(I);
1487 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1488 // Instrinsics with the returned attribute are copies of arguments.
1489 if (auto *ReturnedValue = II->getReturnedArgOperand()) {
1490 if (II->getIntrinsicID() == Intrinsic::ssa_copy)
1491 if (const auto *Result = performSymbolicPredicateInfoEvaluation(I))
1493 return createVariableOrConstant(ReturnedValue);
1496 if (AA->doesNotAccessMemory(CI)) {
1497 return createCallExpression(CI, TOPClass->getMemoryLeader());
1498 } else if (AA->onlyReadsMemory(CI)) {
1499 MemoryAccess *DefiningAccess = MSSAWalker->getClobberingMemoryAccess(CI);
1500 return createCallExpression(CI, DefiningAccess);
1505 // Retrieve the memory class for a given MemoryAccess.
1506 CongruenceClass *NewGVN::getMemoryClass(const MemoryAccess *MA) const {
1508 auto *Result = MemoryAccessToClass.lookup(MA);
1509 assert(Result && "Should have found memory class");
1513 // Update the MemoryAccess equivalence table to say that From is equal to To,
1514 // and return true if this is different from what already existed in the table.
1515 bool NewGVN::setMemoryClass(const MemoryAccess *From,
1516 CongruenceClass *NewClass) {
1518 "Every MemoryAccess should be getting mapped to a non-null class");
1519 DEBUG(dbgs() << "Setting " << *From);
1520 DEBUG(dbgs() << " equivalent to congruence class ");
1521 DEBUG(dbgs() << NewClass->getID() << " with current MemoryAccess leader ");
1522 DEBUG(dbgs() << *NewClass->getMemoryLeader() << "\n");
1524 auto LookupResult = MemoryAccessToClass.find(From);
1525 bool Changed = false;
1526 // If it's already in the table, see if the value changed.
1527 if (LookupResult != MemoryAccessToClass.end()) {
1528 auto *OldClass = LookupResult->second;
1529 if (OldClass != NewClass) {
1530 // If this is a phi, we have to handle memory member updates.
1531 if (auto *MP = dyn_cast<MemoryPhi>(From)) {
1532 OldClass->memory_erase(MP);
1533 NewClass->memory_insert(MP);
1534 // This may have killed the class if it had no non-memory members
1535 if (OldClass->getMemoryLeader() == From) {
1536 if (OldClass->definesNoMemory()) {
1537 OldClass->setMemoryLeader(nullptr);
1539 OldClass->setMemoryLeader(getNextMemoryLeader(OldClass));
1540 DEBUG(dbgs() << "Memory class leader change for class "
1541 << OldClass->getID() << " to "
1542 << *OldClass->getMemoryLeader()
1543 << " due to removal of a memory member " << *From
1545 markMemoryLeaderChangeTouched(OldClass);
1549 // It wasn't equivalent before, and now it is.
1550 LookupResult->second = NewClass;
1558 // Determine if a instruction is cycle-free. That means the values in the
1559 // instruction don't depend on any expressions that can change value as a result
1560 // of the instruction. For example, a non-cycle free instruction would be v =
1562 bool NewGVN::isCycleFree(const Instruction *I) const {
1563 // In order to compute cycle-freeness, we do SCC finding on the instruction,
1564 // and see what kind of SCC it ends up in. If it is a singleton, it is
1565 // cycle-free. If it is not in a singleton, it is only cycle free if the
1566 // other members are all phi nodes (as they do not compute anything, they are
1568 auto ICS = InstCycleState.lookup(I);
1569 if (ICS == ICS_Unknown) {
1571 auto &SCC = SCCFinder.getComponentFor(I);
1572 // It's cycle free if it's size 1 or or the SCC is *only* phi nodes.
1573 if (SCC.size() == 1)
1574 InstCycleState.insert({I, ICS_CycleFree});
1577 llvm::all_of(SCC, [](const Value *V) { return isa<PHINode>(V); });
1578 ICS = AllPhis ? ICS_CycleFree : ICS_Cycle;
1579 for (auto *Member : SCC)
1580 if (auto *MemberPhi = dyn_cast<PHINode>(Member))
1581 InstCycleState.insert({MemberPhi, ICS});
1584 if (ICS == ICS_Cycle)
1589 // Evaluate PHI nodes symbolically, and create an expression result.
1590 const Expression *NewGVN::performSymbolicPHIEvaluation(Instruction *I) const {
1591 // True if one of the incoming phi edges is a backedge.
1592 bool HasBackedge = false;
1593 // All constant tracks the state of whether all the *original* phi operands
1594 // This is really shorthand for "this phi cannot cycle due to forward
1595 // change in value of the phi is guaranteed not to later change the value of
1596 // the phi. IE it can't be v = phi(undef, v+1)
1597 bool AllConstant = true;
1599 cast<PHIExpression>(createPHIExpression(I, HasBackedge, AllConstant));
1600 // We match the semantics of SimplifyPhiNode from InstructionSimplify here.
1601 // See if all arguments are the same.
1602 // We track if any were undef because they need special handling.
1603 bool HasUndef = false;
1604 auto Filtered = make_filter_range(E->operands(), [&](Value *Arg) {
1605 if (isa<UndefValue>(Arg)) {
1611 // If we are left with no operands, it's dead.
1612 if (Filtered.begin() == Filtered.end()) {
1613 // If it has undef at this point, it means there are no-non-undef arguments,
1614 // and thus, the value of the phi node must be undef.
1616 DEBUG(dbgs() << "PHI Node " << *I
1617 << " has no non-undef arguments, valuing it as undef\n");
1618 return createConstantExpression(UndefValue::get(I->getType()));
1621 DEBUG(dbgs() << "No arguments of PHI node " << *I << " are live\n");
1622 deleteExpression(E);
1623 return createDeadExpression();
1625 unsigned NumOps = 0;
1626 Value *AllSameValue = *(Filtered.begin());
1628 // Can't use std::equal here, sadly, because filter.begin moves.
1629 if (llvm::all_of(Filtered, [&](Value *Arg) {
1631 return Arg == AllSameValue;
1633 // In LLVM's non-standard representation of phi nodes, it's possible to have
1634 // phi nodes with cycles (IE dependent on other phis that are .... dependent
1635 // on the original phi node), especially in weird CFG's where some arguments
1636 // are unreachable, or uninitialized along certain paths. This can cause
1637 // infinite loops during evaluation. We work around this by not trying to
1638 // really evaluate them independently, but instead using a variable
1639 // expression to say if one is equivalent to the other.
1640 // We also special case undef, so that if we have an undef, we can't use the
1641 // common value unless it dominates the phi block.
1643 // If we have undef and at least one other value, this is really a
1644 // multivalued phi, and we need to know if it's cycle free in order to
1645 // evaluate whether we can ignore the undef. The other parts of this are
1646 // just shortcuts. If there is no backedge, or all operands are
1647 // constants, or all operands are ignored but the undef, it also must be
1649 if (!AllConstant && HasBackedge && NumOps > 0 &&
1650 !isa<UndefValue>(AllSameValue) && !isCycleFree(I))
1653 // Only have to check for instructions
1654 if (auto *AllSameInst = dyn_cast<Instruction>(AllSameValue))
1655 if (!someEquivalentDominates(AllSameInst, I))
1658 // Can't simplify to something that comes later in the iteration.
1659 // Otherwise, when and if it changes congruence class, we will never catch
1660 // up. We will always be a class behind it.
1661 if (isa<Instruction>(AllSameValue) &&
1662 InstrToDFSNum(AllSameValue) > InstrToDFSNum(I))
1664 NumGVNPhisAllSame++;
1665 DEBUG(dbgs() << "Simplified PHI node " << *I << " to " << *AllSameValue
1667 deleteExpression(E);
1668 return createVariableOrConstant(AllSameValue);
1674 NewGVN::performSymbolicAggrValueEvaluation(Instruction *I) const {
1675 if (auto *EI = dyn_cast<ExtractValueInst>(I)) {
1676 auto *II = dyn_cast<IntrinsicInst>(EI->getAggregateOperand());
1677 if (II && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
1678 unsigned Opcode = 0;
1679 // EI might be an extract from one of our recognised intrinsics. If it
1680 // is we'll synthesize a semantically equivalent expression instead on
1681 // an extract value expression.
1682 switch (II->getIntrinsicID()) {
1683 case Intrinsic::sadd_with_overflow:
1684 case Intrinsic::uadd_with_overflow:
1685 Opcode = Instruction::Add;
1687 case Intrinsic::ssub_with_overflow:
1688 case Intrinsic::usub_with_overflow:
1689 Opcode = Instruction::Sub;
1691 case Intrinsic::smul_with_overflow:
1692 case Intrinsic::umul_with_overflow:
1693 Opcode = Instruction::Mul;
1700 // Intrinsic recognized. Grab its args to finish building the
1702 assert(II->getNumArgOperands() == 2 &&
1703 "Expect two args for recognised intrinsics.");
1704 return createBinaryExpression(
1705 Opcode, EI->getType(), II->getArgOperand(0), II->getArgOperand(1));
1710 return createAggregateValueExpression(I);
1712 const Expression *NewGVN::performSymbolicCmpEvaluation(Instruction *I) const {
1713 auto *CI = dyn_cast<CmpInst>(I);
1714 // See if our operands are equal to those of a previous predicate, and if so,
1715 // if it implies true or false.
1716 auto Op0 = lookupOperandLeader(CI->getOperand(0));
1717 auto Op1 = lookupOperandLeader(CI->getOperand(1));
1718 auto OurPredicate = CI->getPredicate();
1719 if (shouldSwapOperands(Op0, Op1)) {
1720 std::swap(Op0, Op1);
1721 OurPredicate = CI->getSwappedPredicate();
1724 // Avoid processing the same info twice
1725 const PredicateBase *LastPredInfo = nullptr;
1726 // See if we know something about the comparison itself, like it is the target
1728 auto *CmpPI = PredInfo->getPredicateInfoFor(I);
1729 if (dyn_cast_or_null<PredicateAssume>(CmpPI))
1730 return createConstantExpression(ConstantInt::getTrue(CI->getType()));
1733 // This condition does not depend on predicates, no need to add users
1734 if (CI->isTrueWhenEqual())
1735 return createConstantExpression(ConstantInt::getTrue(CI->getType()));
1736 else if (CI->isFalseWhenEqual())
1737 return createConstantExpression(ConstantInt::getFalse(CI->getType()));
1740 // NOTE: Because we are comparing both operands here and below, and using
1741 // previous comparisons, we rely on fact that predicateinfo knows to mark
1742 // comparisons that use renamed operands as users of the earlier comparisons.
1743 // It is *not* enough to just mark predicateinfo renamed operands as users of
1744 // the earlier comparisons, because the *other* operand may have changed in a
1745 // previous iteration.
1748 // %b.0 = ssa.copy(%b)
1750 // icmp slt %c, %b.0
1752 // %c and %a may start out equal, and thus, the code below will say the second
1753 // %icmp is false. c may become equal to something else, and in that case the
1754 // %second icmp *must* be reexamined, but would not if only the renamed
1755 // %operands are considered users of the icmp.
1757 // *Currently* we only check one level of comparisons back, and only mark one
1758 // level back as touched when changes appen . If you modify this code to look
1759 // back farther through comparisons, you *must* mark the appropriate
1760 // comparisons as users in PredicateInfo.cpp, or you will cause bugs. See if
1761 // we know something just from the operands themselves
1763 // See if our operands have predicate info, so that we may be able to derive
1764 // something from a previous comparison.
1765 for (const auto &Op : CI->operands()) {
1766 auto *PI = PredInfo->getPredicateInfoFor(Op);
1767 if (const auto *PBranch = dyn_cast_or_null<PredicateBranch>(PI)) {
1768 if (PI == LastPredInfo)
1772 // TODO: Along the false edge, we may know more things too, like icmp of
1773 // same operands is false.
1774 // TODO: We only handle actual comparison conditions below, not and/or.
1775 auto *BranchCond = dyn_cast<CmpInst>(PBranch->Condition);
1778 auto *BranchOp0 = lookupOperandLeader(BranchCond->getOperand(0));
1779 auto *BranchOp1 = lookupOperandLeader(BranchCond->getOperand(1));
1780 auto BranchPredicate = BranchCond->getPredicate();
1781 if (shouldSwapOperands(BranchOp0, BranchOp1)) {
1782 std::swap(BranchOp0, BranchOp1);
1783 BranchPredicate = BranchCond->getSwappedPredicate();
1785 if (BranchOp0 == Op0 && BranchOp1 == Op1) {
1786 if (PBranch->TrueEdge) {
1787 // If we know the previous predicate is true and we are in the true
1788 // edge then we may be implied true or false.
1789 if (CmpInst::isImpliedTrueByMatchingCmp(BranchPredicate,
1791 addPredicateUsers(PI, I);
1792 return createConstantExpression(
1793 ConstantInt::getTrue(CI->getType()));
1796 if (CmpInst::isImpliedFalseByMatchingCmp(BranchPredicate,
1798 addPredicateUsers(PI, I);
1799 return createConstantExpression(
1800 ConstantInt::getFalse(CI->getType()));
1804 // Just handle the ne and eq cases, where if we have the same
1805 // operands, we may know something.
1806 if (BranchPredicate == OurPredicate) {
1807 addPredicateUsers(PI, I);
1808 // Same predicate, same ops,we know it was false, so this is false.
1809 return createConstantExpression(
1810 ConstantInt::getFalse(CI->getType()));
1811 } else if (BranchPredicate ==
1812 CmpInst::getInversePredicate(OurPredicate)) {
1813 addPredicateUsers(PI, I);
1814 // Inverse predicate, we know the other was false, so this is true.
1815 return createConstantExpression(
1816 ConstantInt::getTrue(CI->getType()));
1822 // Create expression will take care of simplifyCmpInst
1823 return createExpression(I);
1826 // Return true if V is a value that will always be available (IE can
1827 // be placed anywhere) in the function. We don't do globals here
1828 // because they are often worse to put in place.
1829 // TODO: Separate cost from availability
1830 static bool alwaysAvailable(Value *V) {
1831 return isa<Constant>(V) || isa<Argument>(V);
1834 // Substitute and symbolize the value before value numbering.
1836 NewGVN::performSymbolicEvaluation(Value *V,
1837 SmallPtrSetImpl<Value *> &Visited) const {
1838 const Expression *E = nullptr;
1839 if (auto *C = dyn_cast<Constant>(V))
1840 E = createConstantExpression(C);
1841 else if (isa<Argument>(V) || isa<GlobalVariable>(V)) {
1842 E = createVariableExpression(V);
1844 // TODO: memory intrinsics.
1845 // TODO: Some day, we should do the forward propagation and reassociation
1846 // parts of the algorithm.
1847 auto *I = cast<Instruction>(V);
1848 switch (I->getOpcode()) {
1849 case Instruction::ExtractValue:
1850 case Instruction::InsertValue:
1851 E = performSymbolicAggrValueEvaluation(I);
1853 case Instruction::PHI:
1854 E = performSymbolicPHIEvaluation(I);
1856 case Instruction::Call:
1857 E = performSymbolicCallEvaluation(I);
1859 case Instruction::Store:
1860 E = performSymbolicStoreEvaluation(I);
1862 case Instruction::Load:
1863 E = performSymbolicLoadEvaluation(I);
1865 case Instruction::BitCast: {
1866 E = createExpression(I);
1868 case Instruction::ICmp:
1869 case Instruction::FCmp: {
1870 E = performSymbolicCmpEvaluation(I);
1872 case Instruction::Add:
1873 case Instruction::FAdd:
1874 case Instruction::Sub:
1875 case Instruction::FSub:
1876 case Instruction::Mul:
1877 case Instruction::FMul:
1878 case Instruction::UDiv:
1879 case Instruction::SDiv:
1880 case Instruction::FDiv:
1881 case Instruction::URem:
1882 case Instruction::SRem:
1883 case Instruction::FRem:
1884 case Instruction::Shl:
1885 case Instruction::LShr:
1886 case Instruction::AShr:
1887 case Instruction::And:
1888 case Instruction::Or:
1889 case Instruction::Xor:
1890 case Instruction::Trunc:
1891 case Instruction::ZExt:
1892 case Instruction::SExt:
1893 case Instruction::FPToUI:
1894 case Instruction::FPToSI:
1895 case Instruction::UIToFP:
1896 case Instruction::SIToFP:
1897 case Instruction::FPTrunc:
1898 case Instruction::FPExt:
1899 case Instruction::PtrToInt:
1900 case Instruction::IntToPtr:
1901 case Instruction::Select:
1902 case Instruction::ExtractElement:
1903 case Instruction::InsertElement:
1904 case Instruction::ShuffleVector:
1905 case Instruction::GetElementPtr:
1906 E = createExpression(I);
1915 // Look up a container in a map, and then call a function for each thing in the
1917 template <typename Map, typename KeyType, typename Func>
1918 void NewGVN::for_each_found(Map &M, const KeyType &Key, Func F) {
1919 const auto Result = M.find_as(Key);
1920 if (Result != M.end())
1921 for (typename Map::mapped_type::value_type Mapped : Result->second)
1925 // Look up a container of values/instructions in a map, and touch all the
1926 // instructions in the container. Then erase value from the map.
1927 template <typename Map, typename KeyType>
1928 void NewGVN::touchAndErase(Map &M, const KeyType &Key) {
1929 const auto Result = M.find_as(Key);
1930 if (Result != M.end()) {
1931 for (const typename Map::mapped_type::value_type Mapped : Result->second)
1932 TouchedInstructions.set(InstrToDFSNum(Mapped));
1937 void NewGVN::addAdditionalUsers(Value *To, Value *User) const {
1938 if (isa<Instruction>(To))
1939 AdditionalUsers[To].insert(User);
1942 void NewGVN::markUsersTouched(Value *V) {
1943 // Now mark the users as touched.
1944 for (auto *User : V->users()) {
1945 assert(isa<Instruction>(User) && "Use of value not within an instruction?");
1946 TouchedInstructions.set(InstrToDFSNum(User));
1948 touchAndErase(AdditionalUsers, V);
1951 void NewGVN::addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const {
1952 DEBUG(dbgs() << "Adding memory user " << *U << " to " << *To << "\n");
1953 MemoryToUsers[To].insert(U);
1956 void NewGVN::markMemoryDefTouched(const MemoryAccess *MA) {
1957 TouchedInstructions.set(MemoryToDFSNum(MA));
1960 void NewGVN::markMemoryUsersTouched(const MemoryAccess *MA) {
1961 if (isa<MemoryUse>(MA))
1963 for (auto U : MA->users())
1964 TouchedInstructions.set(MemoryToDFSNum(U));
1965 touchAndErase(MemoryToUsers, MA);
1968 // Add I to the set of users of a given predicate.
1969 void NewGVN::addPredicateUsers(const PredicateBase *PB, Instruction *I) const {
1970 // Don't add temporary instructions to the user lists.
1971 if (AllTempInstructions.count(I))
1974 if (auto *PBranch = dyn_cast<PredicateBranch>(PB))
1975 PredicateToUsers[PBranch->Condition].insert(I);
1976 else if (auto *PAssume = dyn_cast<PredicateBranch>(PB))
1977 PredicateToUsers[PAssume->Condition].insert(I);
1980 // Touch all the predicates that depend on this instruction.
1981 void NewGVN::markPredicateUsersTouched(Instruction *I) {
1982 touchAndErase(PredicateToUsers, I);
1985 // Mark users affected by a memory leader change.
1986 void NewGVN::markMemoryLeaderChangeTouched(CongruenceClass *CC) {
1987 for (auto M : CC->memory())
1988 markMemoryDefTouched(M);
1991 // Touch the instructions that need to be updated after a congruence class has a
1992 // leader change, and mark changed values.
1993 void NewGVN::markValueLeaderChangeTouched(CongruenceClass *CC) {
1994 for (auto M : *CC) {
1995 if (auto *I = dyn_cast<Instruction>(M))
1996 TouchedInstructions.set(InstrToDFSNum(I));
1997 LeaderChanges.insert(M);
2001 // Give a range of things that have instruction DFS numbers, this will return
2002 // the member of the range with the smallest dfs number.
2003 template <class T, class Range>
2004 T *NewGVN::getMinDFSOfRange(const Range &R) const {
2005 std::pair<T *, unsigned> MinDFS = {nullptr, ~0U};
2006 for (const auto X : R) {
2007 auto DFSNum = InstrToDFSNum(X);
2008 if (DFSNum < MinDFS.second)
2009 MinDFS = {X, DFSNum};
2011 return MinDFS.first;
2014 // This function returns the MemoryAccess that should be the next leader of
2015 // congruence class CC, under the assumption that the current leader is going to
2017 const MemoryAccess *NewGVN::getNextMemoryLeader(CongruenceClass *CC) const {
2018 // TODO: If this ends up to slow, we can maintain a next memory leader like we
2019 // do for regular leaders.
2020 // Make sure there will be a leader to find
2021 assert(!CC->definesNoMemory() && "Can't get next leader if there is none");
2022 if (CC->getStoreCount() > 0) {
2023 if (auto *NL = dyn_cast_or_null<StoreInst>(CC->getNextLeader().first))
2024 return getMemoryAccess(NL);
2025 // Find the store with the minimum DFS number.
2026 auto *V = getMinDFSOfRange<Value>(make_filter_range(
2027 *CC, [&](const Value *V) { return isa<StoreInst>(V); }));
2028 return getMemoryAccess(cast<StoreInst>(V));
2030 assert(CC->getStoreCount() == 0);
2032 // Given our assertion, hitting this part must mean
2033 // !OldClass->memory_empty()
2034 if (CC->memory_size() == 1)
2035 return *CC->memory_begin();
2036 return getMinDFSOfRange<const MemoryPhi>(CC->memory());
2039 // This function returns the next value leader of a congruence class, under the
2040 // assumption that the current leader is going away. This should end up being
2041 // the next most dominating member.
2042 Value *NewGVN::getNextValueLeader(CongruenceClass *CC) const {
2043 // We don't need to sort members if there is only 1, and we don't care about
2044 // sorting the TOP class because everything either gets out of it or is
2047 if (CC->size() == 1 || CC == TOPClass) {
2048 return *(CC->begin());
2049 } else if (CC->getNextLeader().first) {
2050 ++NumGVNAvoidedSortedLeaderChanges;
2051 return CC->getNextLeader().first;
2053 ++NumGVNSortedLeaderChanges;
2054 // NOTE: If this ends up to slow, we can maintain a dual structure for
2055 // member testing/insertion, or keep things mostly sorted, and sort only
2056 // here, or use SparseBitVector or ....
2057 return getMinDFSOfRange<Value>(*CC);
2061 // Move a MemoryAccess, currently in OldClass, to NewClass, including updates to
2062 // the memory members, etc for the move.
2064 // The invariants of this function are:
2066 // I must be moving to NewClass from OldClass The StoreCount of OldClass and
2067 // NewClass is expected to have been updated for I already if it is is a store.
2068 // The OldClass memory leader has not been updated yet if I was the leader.
2069 void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I,
2070 MemoryAccess *InstMA,
2071 CongruenceClass *OldClass,
2072 CongruenceClass *NewClass) {
2073 // If the leader is I, and we had a represenative MemoryAccess, it should
2074 // be the MemoryAccess of OldClass.
2075 assert((!InstMA || !OldClass->getMemoryLeader() ||
2076 OldClass->getLeader() != I ||
2077 OldClass->getMemoryLeader() == InstMA) &&
2078 "Representative MemoryAccess mismatch");
2079 // First, see what happens to the new class
2080 if (!NewClass->getMemoryLeader()) {
2081 // Should be a new class, or a store becoming a leader of a new class.
2082 assert(NewClass->size() == 1 ||
2083 (isa<StoreInst>(I) && NewClass->getStoreCount() == 1));
2084 NewClass->setMemoryLeader(InstMA);
2085 // Mark it touched if we didn't just create a singleton
2086 DEBUG(dbgs() << "Memory class leader change for class " << NewClass->getID()
2087 << " due to new memory instruction becoming leader\n");
2088 markMemoryLeaderChangeTouched(NewClass);
2090 setMemoryClass(InstMA, NewClass);
2091 // Now, fixup the old class if necessary
2092 if (OldClass->getMemoryLeader() == InstMA) {
2093 if (!OldClass->definesNoMemory()) {
2094 OldClass->setMemoryLeader(getNextMemoryLeader(OldClass));
2095 DEBUG(dbgs() << "Memory class leader change for class "
2096 << OldClass->getID() << " to "
2097 << *OldClass->getMemoryLeader()
2098 << " due to removal of old leader " << *InstMA << "\n");
2099 markMemoryLeaderChangeTouched(OldClass);
2101 OldClass->setMemoryLeader(nullptr);
2105 // Move a value, currently in OldClass, to be part of NewClass
2106 // Update OldClass and NewClass for the move (including changing leaders, etc).
2107 void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E,
2108 CongruenceClass *OldClass,
2109 CongruenceClass *NewClass) {
2110 if (I == OldClass->getNextLeader().first)
2111 OldClass->resetNextLeader();
2114 NewClass->insert(I);
2116 if (NewClass->getLeader() != I)
2117 NewClass->addPossibleNextLeader({I, InstrToDFSNum(I)});
2118 // Handle our special casing of stores.
2119 if (auto *SI = dyn_cast<StoreInst>(I)) {
2120 OldClass->decStoreCount();
2121 // Okay, so when do we want to make a store a leader of a class?
2122 // If we have a store defined by an earlier load, we want the earlier load
2123 // to lead the class.
2124 // If we have a store defined by something else, we want the store to lead
2125 // the class so everything else gets the "something else" as a value.
2126 // If we have a store as the single member of the class, we want the store
2128 if (NewClass->getStoreCount() == 0 && !NewClass->getStoredValue()) {
2129 // If it's a store expression we are using, it means we are not equivalent
2130 // to something earlier.
2131 if (auto *SE = dyn_cast<StoreExpression>(E)) {
2132 NewClass->setStoredValue(SE->getStoredValue());
2133 markValueLeaderChangeTouched(NewClass);
2134 // Shift the new class leader to be the store
2135 DEBUG(dbgs() << "Changing leader of congruence class "
2136 << NewClass->getID() << " from " << *NewClass->getLeader()
2137 << " to " << *SI << " because store joined class\n");
2138 // If we changed the leader, we have to mark it changed because we don't
2139 // know what it will do to symbolic evlauation.
2140 NewClass->setLeader(SI);
2142 // We rely on the code below handling the MemoryAccess change.
2144 NewClass->incStoreCount();
2146 // True if there is no memory instructions left in a class that had memory
2147 // instructions before.
2149 // If it's not a memory use, set the MemoryAccess equivalence
2150 auto *InstMA = dyn_cast_or_null<MemoryDef>(getMemoryAccess(I));
2152 moveMemoryToNewCongruenceClass(I, InstMA, OldClass, NewClass);
2153 ValueToClass[I] = NewClass;
2154 // See if we destroyed the class or need to swap leaders.
2155 if (OldClass->empty() && OldClass != TOPClass) {
2156 if (OldClass->getDefiningExpr()) {
2157 DEBUG(dbgs() << "Erasing expression " << *OldClass->getDefiningExpr()
2158 << " from table\n");
2159 // We erase it as an exact expression to make sure we don't just erase an
2161 auto Iter = ExpressionToClass.find_as(
2162 ExactEqualsExpression(*OldClass->getDefiningExpr()));
2163 if (Iter != ExpressionToClass.end())
2164 ExpressionToClass.erase(Iter);
2165 #ifdef EXPENSIVE_CHECKS
2167 (*OldClass->getDefiningExpr() != *E || ExpressionToClass.lookup(E)) &&
2168 "We erased the expression we just inserted, which should not happen");
2171 } else if (OldClass->getLeader() == I) {
2172 // When the leader changes, the value numbering of
2173 // everything may change due to symbolization changes, so we need to
2175 DEBUG(dbgs() << "Value class leader change for class " << OldClass->getID()
2177 ++NumGVNLeaderChanges;
2178 // Destroy the stored value if there are no more stores to represent it.
2179 // Note that this is basically clean up for the expression removal that
2180 // happens below. If we remove stores from a class, we may leave it as a
2181 // class of equivalent memory phis.
2182 if (OldClass->getStoreCount() == 0) {
2183 if (OldClass->getStoredValue())
2184 OldClass->setStoredValue(nullptr);
2186 OldClass->setLeader(getNextValueLeader(OldClass));
2187 OldClass->resetNextLeader();
2188 markValueLeaderChangeTouched(OldClass);
2192 // For a given expression, mark the phi of ops instructions that could have
2193 // changed as a result.
2194 void NewGVN::markPhiOfOpsChanged(const Expression *E) {
2195 touchAndErase(ExpressionToPhiOfOps, ExactEqualsExpression(*E));
2198 // Perform congruence finding on a given value numbering expression.
2199 void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) {
2200 // This is guaranteed to return something, since it will at least find
2203 CongruenceClass *IClass = ValueToClass.lookup(I);
2204 assert(IClass && "Should have found a IClass");
2205 // Dead classes should have been eliminated from the mapping.
2206 assert(!IClass->isDead() && "Found a dead class");
2208 CongruenceClass *EClass = nullptr;
2209 if (const auto *VE = dyn_cast<VariableExpression>(E)) {
2210 EClass = ValueToClass.lookup(VE->getVariableValue());
2211 } else if (isa<DeadExpression>(E)) {
2215 auto lookupResult = ExpressionToClass.insert({E, nullptr});
2217 // If it's not in the value table, create a new congruence class.
2218 if (lookupResult.second) {
2219 CongruenceClass *NewClass = createCongruenceClass(nullptr, E);
2220 auto place = lookupResult.first;
2221 place->second = NewClass;
2223 // Constants and variables should always be made the leader.
2224 if (const auto *CE = dyn_cast<ConstantExpression>(E)) {
2225 NewClass->setLeader(CE->getConstantValue());
2226 } else if (const auto *SE = dyn_cast<StoreExpression>(E)) {
2227 StoreInst *SI = SE->getStoreInst();
2228 NewClass->setLeader(SI);
2229 NewClass->setStoredValue(SE->getStoredValue());
2230 // The RepMemoryAccess field will be filled in properly by the
2231 // moveValueToNewCongruenceClass call.
2233 NewClass->setLeader(I);
2235 assert(!isa<VariableExpression>(E) &&
2236 "VariableExpression should have been handled already");
2239 DEBUG(dbgs() << "Created new congruence class for " << *I
2240 << " using expression " << *E << " at " << NewClass->getID()
2241 << " and leader " << *(NewClass->getLeader()));
2242 if (NewClass->getStoredValue())
2243 DEBUG(dbgs() << " and stored value " << *(NewClass->getStoredValue()));
2244 DEBUG(dbgs() << "\n");
2246 EClass = lookupResult.first->second;
2247 if (isa<ConstantExpression>(E))
2248 assert((isa<Constant>(EClass->getLeader()) ||
2249 (EClass->getStoredValue() &&
2250 isa<Constant>(EClass->getStoredValue()))) &&
2251 "Any class with a constant expression should have a "
2254 assert(EClass && "Somehow don't have an eclass");
2256 assert(!EClass->isDead() && "We accidentally looked up a dead class");
2259 bool ClassChanged = IClass != EClass;
2260 bool LeaderChanged = LeaderChanges.erase(I);
2261 if (ClassChanged || LeaderChanged) {
2262 DEBUG(dbgs() << "New class " << EClass->getID() << " for expression " << *E
2265 moveValueToNewCongruenceClass(I, E, IClass, EClass);
2266 markPhiOfOpsChanged(E);
2269 markUsersTouched(I);
2270 if (MemoryAccess *MA = getMemoryAccess(I))
2271 markMemoryUsersTouched(MA);
2272 if (auto *CI = dyn_cast<CmpInst>(I))
2273 markPredicateUsersTouched(CI);
2275 // If we changed the class of the store, we want to ensure nothing finds the
2276 // old store expression. In particular, loads do not compare against stored
2277 // value, so they will find old store expressions (and associated class
2278 // mappings) if we leave them in the table.
2279 if (ClassChanged && isa<StoreInst>(I)) {
2280 auto *OldE = ValueToExpression.lookup(I);
2281 // It could just be that the old class died. We don't want to erase it if we
2282 // just moved classes.
2283 if (OldE && isa<StoreExpression>(OldE) && *E != *OldE) {
2284 // Erase this as an exact expression to ensure we don't erase expressions
2285 // equivalent to it.
2286 auto Iter = ExpressionToClass.find_as(ExactEqualsExpression(*OldE));
2287 if (Iter != ExpressionToClass.end())
2288 ExpressionToClass.erase(Iter);
2291 ValueToExpression[I] = E;
2294 // Process the fact that Edge (from, to) is reachable, including marking
2295 // any newly reachable blocks and instructions for processing.
2296 void NewGVN::updateReachableEdge(BasicBlock *From, BasicBlock *To) {
2297 // Check if the Edge was reachable before.
2298 if (ReachableEdges.insert({From, To}).second) {
2299 // If this block wasn't reachable before, all instructions are touched.
2300 if (ReachableBlocks.insert(To).second) {
2301 DEBUG(dbgs() << "Block " << getBlockName(To) << " marked reachable\n");
2302 const auto &InstRange = BlockInstRange.lookup(To);
2303 TouchedInstructions.set(InstRange.first, InstRange.second);
2305 DEBUG(dbgs() << "Block " << getBlockName(To)
2306 << " was reachable, but new edge {" << getBlockName(From)
2307 << "," << getBlockName(To) << "} to it found\n");
2309 // We've made an edge reachable to an existing block, which may
2310 // impact predicates. Otherwise, only mark the phi nodes as touched, as
2311 // they are the only thing that depend on new edges. Anything using their
2312 // values will get propagated to if necessary.
2313 if (MemoryAccess *MemPhi = getMemoryAccess(To))
2314 TouchedInstructions.set(InstrToDFSNum(MemPhi));
2316 auto BI = To->begin();
2317 while (isa<PHINode>(BI)) {
2318 TouchedInstructions.set(InstrToDFSNum(&*BI));
2321 for_each_found(PHIOfOpsPHIs, To, [&](const PHINode *I) {
2322 TouchedInstructions.set(InstrToDFSNum(I));
2328 // Given a predicate condition (from a switch, cmp, or whatever) and a block,
2329 // see if we know some constant value for it already.
2330 Value *NewGVN::findConditionEquivalence(Value *Cond) const {
2331 auto Result = lookupOperandLeader(Cond);
2332 return isa<Constant>(Result) ? Result : nullptr;
2335 // Process the outgoing edges of a block for reachability.
2336 void NewGVN::processOutgoingEdges(TerminatorInst *TI, BasicBlock *B) {
2337 // Evaluate reachability of terminator instruction.
2339 if ((BR = dyn_cast<BranchInst>(TI)) && BR->isConditional()) {
2340 Value *Cond = BR->getCondition();
2341 Value *CondEvaluated = findConditionEquivalence(Cond);
2342 if (!CondEvaluated) {
2343 if (auto *I = dyn_cast<Instruction>(Cond)) {
2344 const Expression *E = createExpression(I);
2345 if (const auto *CE = dyn_cast<ConstantExpression>(E)) {
2346 CondEvaluated = CE->getConstantValue();
2348 } else if (isa<ConstantInt>(Cond)) {
2349 CondEvaluated = Cond;
2353 BasicBlock *TrueSucc = BR->getSuccessor(0);
2354 BasicBlock *FalseSucc = BR->getSuccessor(1);
2355 if (CondEvaluated && (CI = dyn_cast<ConstantInt>(CondEvaluated))) {
2357 DEBUG(dbgs() << "Condition for Terminator " << *TI
2358 << " evaluated to true\n");
2359 updateReachableEdge(B, TrueSucc);
2360 } else if (CI->isZero()) {
2361 DEBUG(dbgs() << "Condition for Terminator " << *TI
2362 << " evaluated to false\n");
2363 updateReachableEdge(B, FalseSucc);
2366 updateReachableEdge(B, TrueSucc);
2367 updateReachableEdge(B, FalseSucc);
2369 } else if (auto *SI = dyn_cast<SwitchInst>(TI)) {
2370 // For switches, propagate the case values into the case
2373 // Remember how many outgoing edges there are to every successor.
2374 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2376 Value *SwitchCond = SI->getCondition();
2377 Value *CondEvaluated = findConditionEquivalence(SwitchCond);
2378 // See if we were able to turn this switch statement into a constant.
2379 if (CondEvaluated && isa<ConstantInt>(CondEvaluated)) {
2380 auto *CondVal = cast<ConstantInt>(CondEvaluated);
2381 // We should be able to get case value for this.
2382 auto Case = *SI->findCaseValue(CondVal);
2383 if (Case.getCaseSuccessor() == SI->getDefaultDest()) {
2384 // We proved the value is outside of the range of the case.
2385 // We can't do anything other than mark the default dest as reachable,
2387 updateReachableEdge(B, SI->getDefaultDest());
2390 // Now get where it goes and mark it reachable.
2391 BasicBlock *TargetBlock = Case.getCaseSuccessor();
2392 updateReachableEdge(B, TargetBlock);
2394 for (unsigned i = 0, e = SI->getNumSuccessors(); i != e; ++i) {
2395 BasicBlock *TargetBlock = SI->getSuccessor(i);
2396 ++SwitchEdges[TargetBlock];
2397 updateReachableEdge(B, TargetBlock);
2401 // Otherwise this is either unconditional, or a type we have no
2402 // idea about. Just mark successors as reachable.
2403 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
2404 BasicBlock *TargetBlock = TI->getSuccessor(i);
2405 updateReachableEdge(B, TargetBlock);
2408 // This also may be a memory defining terminator, in which case, set it
2409 // equivalent only to itself.
2411 auto *MA = getMemoryAccess(TI);
2412 if (MA && !isa<MemoryUse>(MA)) {
2413 auto *CC = ensureLeaderOfMemoryClass(MA);
2414 if (setMemoryClass(MA, CC))
2415 markMemoryUsersTouched(MA);
2420 void NewGVN::addPhiOfOps(PHINode *Op, BasicBlock *BB,
2421 Instruction *ExistingValue) {
2422 InstrDFS[Op] = InstrToDFSNum(ExistingValue);
2423 AllTempInstructions.insert(Op);
2424 PHIOfOpsPHIs[BB].push_back(Op);
2425 TempToBlock[Op] = BB;
2427 RealToTemp[ExistingValue] = Op;
2430 static bool okayForPHIOfOps(const Instruction *I) {
2431 return isa<BinaryOperator>(I) || isa<SelectInst>(I) || isa<CmpInst>(I) ||
2435 // When we see an instruction that is an op of phis, generate the equivalent phi
2438 NewGVN::makePossiblePhiOfOps(Instruction *I,
2439 SmallPtrSetImpl<Value *> &Visited) {
2440 if (!okayForPHIOfOps(I))
2443 if (!Visited.insert(I).second)
2445 // For now, we require the instruction be cycle free because we don't
2446 // *always* create a phi of ops for instructions that could be done as phi
2447 // of ops, we only do it if we think it is useful. If we did do it all the
2448 // time, we could remove the cycle free check.
2449 if (!isCycleFree(I))
2452 unsigned IDFSNum = InstrToDFSNum(I);
2453 SmallPtrSet<const Value *, 8> ProcessedPHIs;
2454 // TODO: We don't do phi translation on memory accesses because it's
2455 // complicated. For a load, we'd need to be able to simulate a new memoryuse,
2456 // which we don't have a good way of doing ATM.
2457 auto *MemAccess = getMemoryAccess(I);
2458 // If the memory operation is defined by a memory operation this block that
2459 // isn't a MemoryPhi, transforming the pointer backwards through a scalar phi
2460 // can't help, as it would still be killed by that memory operation.
2461 if (MemAccess && !isa<MemoryPhi>(MemAccess->getDefiningAccess()) &&
2462 MemAccess->getDefiningAccess()->getBlock() == I->getParent())
2465 // Convert op of phis to phi of ops
2466 for (auto &Op : I->operands()) {
2467 // TODO: We can't handle expressions that must be recursively translated
2471 // g = f + phi of something
2472 // To properly make a phi of ops for g, we'd have to properly translate and
2473 // use the instruction for f. We should add this by splitting out the
2474 // instruction creation we do below.
2475 if (isa<Instruction>(Op) && PHINodeUses.count(cast<Instruction>(Op)))
2477 if (!isa<PHINode>(Op))
2479 auto *OpPHI = cast<PHINode>(Op);
2480 // No point in doing this for one-operand phis.
2481 if (OpPHI->getNumOperands() == 1)
2483 if (!DebugCounter::shouldExecute(PHIOfOpsCounter))
2485 SmallVector<std::pair<Value *, BasicBlock *>, 4> Ops;
2486 auto *PHIBlock = getBlockForValue(OpPHI);
2487 for (auto PredBB : OpPHI->blocks()) {
2488 Value *FoundVal = nullptr;
2489 // We could just skip unreachable edges entirely but it's tricky to do
2490 // with rewriting existing phi nodes.
2491 if (ReachableEdges.count({PredBB, PHIBlock})) {
2492 // Clone the instruction, create an expression from it, and see if we
2494 Instruction *ValueOp = I->clone();
2496 TempToMemory.insert({ValueOp, MemAccess});
2498 for (auto &Op : ValueOp->operands()) {
2499 Op = Op->DoPHITranslation(PHIBlock, PredBB);
2500 // When this operand changes, it could change whether there is a
2501 // leader for us or not.
2502 addAdditionalUsers(Op, I);
2504 // Make sure it's marked as a temporary instruction.
2505 AllTempInstructions.insert(ValueOp);
2506 // and make sure anything that tries to add it's DFS number is
2507 // redirected to the instruction we are making a phi of ops
2509 InstrDFS.insert({ValueOp, IDFSNum});
2510 const Expression *E = performSymbolicEvaluation(ValueOp, Visited);
2511 InstrDFS.erase(ValueOp);
2512 AllTempInstructions.erase(ValueOp);
2513 ValueOp->deleteValue();
2515 TempToMemory.erase(ValueOp);
2518 FoundVal = findPhiOfOpsLeader(E, PredBB);
2520 ExpressionToPhiOfOps[E].insert(I);
2523 if (auto *SI = dyn_cast<StoreInst>(FoundVal))
2524 FoundVal = SI->getValueOperand();
2526 DEBUG(dbgs() << "Skipping phi of ops operand for incoming block "
2527 << getBlockName(PredBB)
2528 << " because the block is unreachable\n");
2529 FoundVal = UndefValue::get(I->getType());
2532 Ops.push_back({FoundVal, PredBB});
2533 DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal << " in "
2534 << getBlockName(PredBB) << "\n");
2536 auto *ValuePHI = RealToTemp.lookup(I);
2537 bool NewPHI = false;
2539 ValuePHI = PHINode::Create(I->getType(), OpPHI->getNumOperands());
2540 addPhiOfOps(ValuePHI, PHIBlock, I);
2542 NumGVNPHIOfOpsCreated++;
2545 for (auto PHIOp : Ops)
2546 ValuePHI->addIncoming(PHIOp.first, PHIOp.second);
2549 for (auto PHIOp : Ops) {
2550 ValuePHI->setIncomingValue(i, PHIOp.first);
2551 ValuePHI->setIncomingBlock(i, PHIOp.second);
2556 DEBUG(dbgs() << "Created phi of ops " << *ValuePHI << " for " << *I
2558 return performSymbolicEvaluation(ValuePHI, Visited);
2563 // The algorithm initially places the values of the routine in the TOP
2564 // congruence class. The leader of TOP is the undetermined value `undef`.
2565 // When the algorithm has finished, values still in TOP are unreachable.
2566 void NewGVN::initializeCongruenceClasses(Function &F) {
2567 NextCongruenceNum = 0;
2569 // Note that even though we use the live on entry def as a representative
2570 // MemoryAccess, it is *not* the same as the actual live on entry def. We
2571 // have no real equivalemnt to undef for MemoryAccesses, and so we really
2572 // should be checking whether the MemoryAccess is top if we want to know if it
2573 // is equivalent to everything. Otherwise, what this really signifies is that
2574 // the access "it reaches all the way back to the beginning of the function"
2576 // Initialize all other instructions to be in TOP class.
2577 TOPClass = createCongruenceClass(nullptr, nullptr);
2578 TOPClass->setMemoryLeader(MSSA->getLiveOnEntryDef());
2579 // The live on entry def gets put into it's own class
2580 MemoryAccessToClass[MSSA->getLiveOnEntryDef()] =
2581 createMemoryClass(MSSA->getLiveOnEntryDef());
2583 for (auto DTN : nodes(DT)) {
2584 BasicBlock *BB = DTN->getBlock();
2585 // All MemoryAccesses are equivalent to live on entry to start. They must
2586 // be initialized to something so that initial changes are noticed. For
2587 // the maximal answer, we initialize them all to be the same as
2589 auto *MemoryBlockDefs = MSSA->getBlockDefs(BB);
2590 if (MemoryBlockDefs)
2591 for (const auto &Def : *MemoryBlockDefs) {
2592 MemoryAccessToClass[&Def] = TOPClass;
2593 auto *MD = dyn_cast<MemoryDef>(&Def);
2594 // Insert the memory phis into the member list.
2596 const MemoryPhi *MP = cast<MemoryPhi>(&Def);
2597 TOPClass->memory_insert(MP);
2598 MemoryPhiState.insert({MP, MPS_TOP});
2601 if (MD && isa<StoreInst>(MD->getMemoryInst()))
2602 TOPClass->incStoreCount();
2604 for (auto &I : *BB) {
2605 // TODO: Move to helper
2606 if (isa<PHINode>(&I))
2607 for (auto *U : I.users())
2608 if (auto *UInst = dyn_cast<Instruction>(U))
2609 if (InstrToDFSNum(UInst) != 0 && okayForPHIOfOps(UInst))
2610 PHINodeUses.insert(UInst);
2611 // Don't insert void terminators into the class. We don't value number
2612 // them, and they just end up sitting in TOP.
2613 if (isa<TerminatorInst>(I) && I.getType()->isVoidTy())
2615 TOPClass->insert(&I);
2616 ValueToClass[&I] = TOPClass;
2620 // Initialize arguments to be in their own unique congruence classes
2621 for (auto &FA : F.args())
2622 createSingletonCongruenceClass(&FA);
2625 void NewGVN::cleanupTables() {
2626 for (unsigned i = 0, e = CongruenceClasses.size(); i != e; ++i) {
2627 DEBUG(dbgs() << "Congruence class " << CongruenceClasses[i]->getID()
2628 << " has " << CongruenceClasses[i]->size() << " members\n");
2629 // Make sure we delete the congruence class (probably worth switching to
2630 // a unique_ptr at some point.
2631 delete CongruenceClasses[i];
2632 CongruenceClasses[i] = nullptr;
2635 // Destroy the value expressions
2636 SmallVector<Instruction *, 8> TempInst(AllTempInstructions.begin(),
2637 AllTempInstructions.end());
2638 AllTempInstructions.clear();
2640 // We have to drop all references for everything first, so there are no uses
2641 // left as we delete them.
2642 for (auto *I : TempInst) {
2643 I->dropAllReferences();
2646 while (!TempInst.empty()) {
2647 auto *I = TempInst.back();
2648 TempInst.pop_back();
2652 ValueToClass.clear();
2653 ArgRecycler.clear(ExpressionAllocator);
2654 ExpressionAllocator.Reset();
2655 CongruenceClasses.clear();
2656 ExpressionToClass.clear();
2657 ValueToExpression.clear();
2659 AdditionalUsers.clear();
2660 ExpressionToPhiOfOps.clear();
2661 TempToBlock.clear();
2662 TempToMemory.clear();
2663 PHIOfOpsPHIs.clear();
2664 ReachableBlocks.clear();
2665 ReachableEdges.clear();
2667 ProcessedCount.clear();
2670 InstructionsToErase.clear();
2672 BlockInstRange.clear();
2673 TouchedInstructions.clear();
2674 MemoryAccessToClass.clear();
2675 PredicateToUsers.clear();
2676 MemoryToUsers.clear();
2679 // Assign local DFS number mapping to instructions, and leave space for Value
2681 std::pair<unsigned, unsigned> NewGVN::assignDFSNumbers(BasicBlock *B,
2683 unsigned End = Start;
2684 if (MemoryAccess *MemPhi = getMemoryAccess(B)) {
2685 InstrDFS[MemPhi] = End++;
2686 DFSToInstr.emplace_back(MemPhi);
2689 // Then the real block goes next.
2690 for (auto &I : *B) {
2691 // There's no need to call isInstructionTriviallyDead more than once on
2692 // an instruction. Therefore, once we know that an instruction is dead
2693 // we change its DFS number so that it doesn't get value numbered.
2694 if (isInstructionTriviallyDead(&I, TLI)) {
2696 DEBUG(dbgs() << "Skipping trivially dead instruction " << I << "\n");
2697 markInstructionForDeletion(&I);
2700 InstrDFS[&I] = End++;
2701 DFSToInstr.emplace_back(&I);
2704 // All of the range functions taken half-open ranges (open on the end side).
2705 // So we do not subtract one from count, because at this point it is one
2706 // greater than the last instruction.
2707 return std::make_pair(Start, End);
2710 void NewGVN::updateProcessedCount(const Value *V) {
2712 if (ProcessedCount.count(V) == 0) {
2713 ProcessedCount.insert({V, 1});
2715 ++ProcessedCount[V];
2716 assert(ProcessedCount[V] < 100 &&
2717 "Seem to have processed the same Value a lot");
2721 // Evaluate MemoryPhi nodes symbolically, just like PHI nodes
2722 void NewGVN::valueNumberMemoryPhi(MemoryPhi *MP) {
2723 // If all the arguments are the same, the MemoryPhi has the same value as the
2724 // argument. Filter out unreachable blocks and self phis from our operands.
2725 // TODO: We could do cycle-checking on the memory phis to allow valueizing for
2726 // self-phi checking.
2727 const BasicBlock *PHIBlock = MP->getBlock();
2728 auto Filtered = make_filter_range(MP->operands(), [&](const Use &U) {
2729 return cast<MemoryAccess>(U) != MP &&
2730 !isMemoryAccessTOP(cast<MemoryAccess>(U)) &&
2731 ReachableEdges.count({MP->getIncomingBlock(U), PHIBlock});
2733 // If all that is left is nothing, our memoryphi is undef. We keep it as
2734 // InitialClass. Note: The only case this should happen is if we have at
2735 // least one self-argument.
2736 if (Filtered.begin() == Filtered.end()) {
2737 if (setMemoryClass(MP, TOPClass))
2738 markMemoryUsersTouched(MP);
2742 // Transform the remaining operands into operand leaders.
2743 // FIXME: mapped_iterator should have a range version.
2744 auto LookupFunc = [&](const Use &U) {
2745 return lookupMemoryLeader(cast<MemoryAccess>(U));
2747 auto MappedBegin = map_iterator(Filtered.begin(), LookupFunc);
2748 auto MappedEnd = map_iterator(Filtered.end(), LookupFunc);
2750 // and now check if all the elements are equal.
2751 // Sadly, we can't use std::equals since these are random access iterators.
2752 const auto *AllSameValue = *MappedBegin;
2754 bool AllEqual = std::all_of(
2755 MappedBegin, MappedEnd,
2756 [&AllSameValue](const MemoryAccess *V) { return V == AllSameValue; });
2759 DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue << "\n");
2761 DEBUG(dbgs() << "Memory Phi value numbered to itself\n");
2762 // If it's equal to something, it's in that class. Otherwise, it has to be in
2763 // a class where it is the leader (other things may be equivalent to it, but
2764 // it needs to start off in its own class, which means it must have been the
2765 // leader, and it can't have stopped being the leader because it was never
2767 CongruenceClass *CC =
2768 AllEqual ? getMemoryClass(AllSameValue) : ensureLeaderOfMemoryClass(MP);
2769 auto OldState = MemoryPhiState.lookup(MP);
2770 assert(OldState != MPS_Invalid && "Invalid memory phi state");
2771 auto NewState = AllEqual ? MPS_Equivalent : MPS_Unique;
2772 MemoryPhiState[MP] = NewState;
2773 if (setMemoryClass(MP, CC) || OldState != NewState)
2774 markMemoryUsersTouched(MP);
2777 // Value number a single instruction, symbolically evaluating, performing
2778 // congruence finding, and updating mappings.
2779 void NewGVN::valueNumberInstruction(Instruction *I) {
2780 DEBUG(dbgs() << "Processing instruction " << *I << "\n");
2781 if (!I->isTerminator()) {
2782 const Expression *Symbolized = nullptr;
2783 SmallPtrSet<Value *, 2> Visited;
2784 if (DebugCounter::shouldExecute(VNCounter)) {
2785 Symbolized = performSymbolicEvaluation(I, Visited);
2786 // Make a phi of ops if necessary
2787 if (Symbolized && !isa<ConstantExpression>(Symbolized) &&
2788 !isa<VariableExpression>(Symbolized) && PHINodeUses.count(I)) {
2789 auto *PHIE = makePossiblePhiOfOps(I, Visited);
2795 // Mark the instruction as unused so we don't value number it again.
2798 // If we couldn't come up with a symbolic expression, use the unknown
2800 if (Symbolized == nullptr)
2801 Symbolized = createUnknownExpression(I);
2802 performCongruenceFinding(I, Symbolized);
2804 // Handle terminators that return values. All of them produce values we
2805 // don't currently understand. We don't place non-value producing
2806 // terminators in a class.
2807 if (!I->getType()->isVoidTy()) {
2808 auto *Symbolized = createUnknownExpression(I);
2809 performCongruenceFinding(I, Symbolized);
2811 processOutgoingEdges(dyn_cast<TerminatorInst>(I), I->getParent());
2815 // Check if there is a path, using single or equal argument phi nodes, from
2817 bool NewGVN::singleReachablePHIPath(
2818 SmallPtrSet<const MemoryAccess *, 8> &Visited, const MemoryAccess *First,
2819 const MemoryAccess *Second) const {
2820 if (First == Second)
2822 if (MSSA->isLiveOnEntryDef(First))
2825 // This is not perfect, but as we're just verifying here, we can live with
2826 // the loss of precision. The real solution would be that of doing strongly
2827 // connected component finding in this routine, and it's probably not worth
2828 // the complexity for the time being. So, we just keep a set of visited
2829 // MemoryAccess and return true when we hit a cycle.
2830 if (Visited.count(First))
2832 Visited.insert(First);
2834 const auto *EndDef = First;
2835 for (auto *ChainDef : optimized_def_chain(First)) {
2836 if (ChainDef == Second)
2838 if (MSSA->isLiveOnEntryDef(ChainDef))
2842 auto *MP = cast<MemoryPhi>(EndDef);
2843 auto ReachableOperandPred = [&](const Use &U) {
2844 return ReachableEdges.count({MP->getIncomingBlock(U), MP->getBlock()});
2846 auto FilteredPhiArgs =
2847 make_filter_range(MP->operands(), ReachableOperandPred);
2848 SmallVector<const Value *, 32> OperandList;
2849 std::copy(FilteredPhiArgs.begin(), FilteredPhiArgs.end(),
2850 std::back_inserter(OperandList));
2851 bool Okay = OperandList.size() == 1;
2854 std::equal(OperandList.begin(), OperandList.end(), OperandList.begin());
2856 return singleReachablePHIPath(Visited, cast<MemoryAccess>(OperandList[0]),
2861 // Verify the that the memory equivalence table makes sense relative to the
2862 // congruence classes. Note that this checking is not perfect, and is currently
2863 // subject to very rare false negatives. It is only useful for
2864 // testing/debugging.
2865 void NewGVN::verifyMemoryCongruency() const {
2867 // Verify that the memory table equivalence and memory member set match
2868 for (const auto *CC : CongruenceClasses) {
2869 if (CC == TOPClass || CC->isDead())
2871 if (CC->getStoreCount() != 0) {
2872 assert((CC->getStoredValue() || !isa<StoreInst>(CC->getLeader())) &&
2873 "Any class with a store as a leader should have a "
2874 "representative stored value");
2875 assert(CC->getMemoryLeader() &&
2876 "Any congruence class with a store should have a "
2877 "representative access");
2880 if (CC->getMemoryLeader())
2881 assert(MemoryAccessToClass.lookup(CC->getMemoryLeader()) == CC &&
2882 "Representative MemoryAccess does not appear to be reverse "
2884 for (auto M : CC->memory())
2885 assert(MemoryAccessToClass.lookup(M) == CC &&
2886 "Memory member does not appear to be reverse mapped properly");
2889 // Anything equivalent in the MemoryAccess table should be in the same
2890 // congruence class.
2892 // Filter out the unreachable and trivially dead entries, because they may
2893 // never have been updated if the instructions were not processed.
2894 auto ReachableAccessPred =
2895 [&](const std::pair<const MemoryAccess *, CongruenceClass *> Pair) {
2896 bool Result = ReachableBlocks.count(Pair.first->getBlock());
2897 if (!Result || MSSA->isLiveOnEntryDef(Pair.first) ||
2898 MemoryToDFSNum(Pair.first) == 0)
2900 if (auto *MemDef = dyn_cast<MemoryDef>(Pair.first))
2901 return !isInstructionTriviallyDead(MemDef->getMemoryInst());
2903 // We could have phi nodes which operands are all trivially dead,
2904 // so we don't process them.
2905 if (auto *MemPHI = dyn_cast<MemoryPhi>(Pair.first)) {
2906 for (auto &U : MemPHI->incoming_values()) {
2907 if (Instruction *I = dyn_cast<Instruction>(U.get())) {
2908 if (!isInstructionTriviallyDead(I))
2918 auto Filtered = make_filter_range(MemoryAccessToClass, ReachableAccessPred);
2919 for (auto KV : Filtered) {
2920 if (auto *FirstMUD = dyn_cast<MemoryUseOrDef>(KV.first)) {
2921 auto *SecondMUD = dyn_cast<MemoryUseOrDef>(KV.second->getMemoryLeader());
2922 if (FirstMUD && SecondMUD) {
2923 SmallPtrSet<const MemoryAccess *, 8> VisitedMAS;
2924 assert((singleReachablePHIPath(VisitedMAS, FirstMUD, SecondMUD) ||
2925 ValueToClass.lookup(FirstMUD->getMemoryInst()) ==
2926 ValueToClass.lookup(SecondMUD->getMemoryInst())) &&
2927 "The instructions for these memory operations should have "
2928 "been in the same congruence class or reachable through"
2929 "a single argument phi");
2931 } else if (auto *FirstMP = dyn_cast<MemoryPhi>(KV.first)) {
2932 // We can only sanely verify that MemoryDefs in the operand list all have
2934 auto ReachableOperandPred = [&](const Use &U) {
2935 return ReachableEdges.count(
2936 {FirstMP->getIncomingBlock(U), FirstMP->getBlock()}) &&
2940 // All arguments should in the same class, ignoring unreachable arguments
2941 auto FilteredPhiArgs =
2942 make_filter_range(FirstMP->operands(), ReachableOperandPred);
2943 SmallVector<const CongruenceClass *, 16> PhiOpClasses;
2944 std::transform(FilteredPhiArgs.begin(), FilteredPhiArgs.end(),
2945 std::back_inserter(PhiOpClasses), [&](const Use &U) {
2946 const MemoryDef *MD = cast<MemoryDef>(U);
2947 return ValueToClass.lookup(MD->getMemoryInst());
2949 assert(std::equal(PhiOpClasses.begin(), PhiOpClasses.end(),
2950 PhiOpClasses.begin()) &&
2951 "All MemoryPhi arguments should be in the same class");
2957 // Verify that the sparse propagation we did actually found the maximal fixpoint
2958 // We do this by storing the value to class mapping, touching all instructions,
2959 // and redoing the iteration to see if anything changed.
2960 void NewGVN::verifyIterationSettled(Function &F) {
2962 DEBUG(dbgs() << "Beginning iteration verification\n");
2963 if (DebugCounter::isCounterSet(VNCounter))
2964 DebugCounter::setCounterValue(VNCounter, StartingVNCounter);
2966 // Note that we have to store the actual classes, as we may change existing
2967 // classes during iteration. This is because our memory iteration propagation
2968 // is not perfect, and so may waste a little work. But it should generate
2969 // exactly the same congruence classes we have now, with different IDs.
2970 std::map<const Value *, CongruenceClass> BeforeIteration;
2972 for (auto &KV : ValueToClass) {
2973 if (auto *I = dyn_cast<Instruction>(KV.first))
2974 // Skip unused/dead instructions.
2975 if (InstrToDFSNum(I) == 0)
2977 BeforeIteration.insert({KV.first, *KV.second});
2980 TouchedInstructions.set();
2981 TouchedInstructions.reset(0);
2982 iterateTouchedInstructions();
2983 DenseSet<std::pair<const CongruenceClass *, const CongruenceClass *>>
2985 for (const auto &KV : ValueToClass) {
2986 if (auto *I = dyn_cast<Instruction>(KV.first))
2987 // Skip unused/dead instructions.
2988 if (InstrToDFSNum(I) == 0)
2990 // We could sink these uses, but i think this adds a bit of clarity here as
2991 // to what we are comparing.
2992 auto *BeforeCC = &BeforeIteration.find(KV.first)->second;
2993 auto *AfterCC = KV.second;
2994 // Note that the classes can't change at this point, so we memoize the set
2996 if (!EqualClasses.count({BeforeCC, AfterCC})) {
2997 assert(BeforeCC->isEquivalentTo(AfterCC) &&
2998 "Value number changed after main loop completed!");
2999 EqualClasses.insert({BeforeCC, AfterCC});
3005 // Verify that for each store expression in the expression to class mapping,
3006 // only the latest appears, and multiple ones do not appear.
3007 // Because loads do not use the stored value when doing equality with stores,
3008 // if we don't erase the old store expressions from the table, a load can find
3009 // a no-longer valid StoreExpression.
3010 void NewGVN::verifyStoreExpressions() const {
3012 // This is the only use of this, and it's not worth defining a complicated
3013 // densemapinfo hash/equality function for it.
3015 std::pair<const Value *,
3016 std::tuple<const Value *, const CongruenceClass *, Value *>>>
3018 for (const auto &KV : ExpressionToClass) {
3019 if (auto *SE = dyn_cast<StoreExpression>(KV.first)) {
3020 // Make sure a version that will conflict with loads is not already there
3021 auto Res = StoreExpressionSet.insert(
3022 {SE->getOperand(0), std::make_tuple(SE->getMemoryLeader(), KV.second,
3023 SE->getStoredValue())});
3024 bool Okay = Res.second;
3025 // It's okay to have the same expression already in there if it is
3026 // identical in nature.
3027 // This can happen when the leader of the stored value changes over time.
3029 Okay = (std::get<1>(Res.first->second) == KV.second) &&
3030 (lookupOperandLeader(std::get<2>(Res.first->second)) ==
3031 lookupOperandLeader(SE->getStoredValue()));
3032 assert(Okay && "Stored expression conflict exists in expression table");
3033 auto *ValueExpr = ValueToExpression.lookup(SE->getStoreInst());
3034 assert(ValueExpr && ValueExpr->equals(*SE) &&
3035 "StoreExpression in ExpressionToClass is not latest "
3036 "StoreExpression for value");
3042 // This is the main value numbering loop, it iterates over the initial touched
3043 // instruction set, propagating value numbers, marking things touched, etc,
3044 // until the set of touched instructions is completely empty.
3045 void NewGVN::iterateTouchedInstructions() {
3046 unsigned int Iterations = 0;
3047 // Figure out where touchedinstructions starts
3048 int FirstInstr = TouchedInstructions.find_first();
3049 // Nothing set, nothing to iterate, just return.
3050 if (FirstInstr == -1)
3052 const BasicBlock *LastBlock = getBlockForValue(InstrFromDFSNum(FirstInstr));
3053 while (TouchedInstructions.any()) {
3055 // Walk through all the instructions in all the blocks in RPO.
3056 // TODO: As we hit a new block, we should push and pop equalities into a
3057 // table lookupOperandLeader can use, to catch things PredicateInfo
3058 // might miss, like edge-only equivalences.
3059 for (unsigned InstrNum : TouchedInstructions.set_bits()) {
3061 // This instruction was found to be dead. We don't bother looking
3063 if (InstrNum == 0) {
3064 TouchedInstructions.reset(InstrNum);
3068 Value *V = InstrFromDFSNum(InstrNum);
3069 const BasicBlock *CurrBlock = getBlockForValue(V);
3071 // If we hit a new block, do reachability processing.
3072 if (CurrBlock != LastBlock) {
3073 LastBlock = CurrBlock;
3074 bool BlockReachable = ReachableBlocks.count(CurrBlock);
3075 const auto &CurrInstRange = BlockInstRange.lookup(CurrBlock);
3077 // If it's not reachable, erase any touched instructions and move on.
3078 if (!BlockReachable) {
3079 TouchedInstructions.reset(CurrInstRange.first, CurrInstRange.second);
3080 DEBUG(dbgs() << "Skipping instructions in block "
3081 << getBlockName(CurrBlock)
3082 << " because it is unreachable\n");
3085 updateProcessedCount(CurrBlock);
3087 // Reset after processing (because we may mark ourselves as touched when
3088 // we propagate equalities).
3089 TouchedInstructions.reset(InstrNum);
3091 if (auto *MP = dyn_cast<MemoryPhi>(V)) {
3092 DEBUG(dbgs() << "Processing MemoryPhi " << *MP << "\n");
3093 valueNumberMemoryPhi(MP);
3094 } else if (auto *I = dyn_cast<Instruction>(V)) {
3095 valueNumberInstruction(I);
3097 llvm_unreachable("Should have been a MemoryPhi or Instruction");
3099 updateProcessedCount(V);
3102 NumGVNMaxIterations = std::max(NumGVNMaxIterations.getValue(), Iterations);
3105 // This is the main transformation entry point.
3106 bool NewGVN::runGVN() {
3107 if (DebugCounter::isCounterSet(VNCounter))
3108 StartingVNCounter = DebugCounter::getCounterValue(VNCounter);
3109 bool Changed = false;
3110 NumFuncArgs = F.arg_size();
3111 MSSAWalker = MSSA->getWalker();
3112 SingletonDeadExpression = new (ExpressionAllocator) DeadExpression();
3114 // Count number of instructions for sizing of hash tables, and come
3115 // up with a global dfs numbering for instructions.
3116 unsigned ICount = 1;
3117 // Add an empty instruction to account for the fact that we start at 1
3118 DFSToInstr.emplace_back(nullptr);
3119 // Note: We want ideal RPO traversal of the blocks, which is not quite the
3120 // same as dominator tree order, particularly with regard whether backedges
3121 // get visited first or second, given a block with multiple successors.
3122 // If we visit in the wrong order, we will end up performing N times as many
3124 // The dominator tree does guarantee that, for a given dom tree node, it's
3125 // parent must occur before it in the RPO ordering. Thus, we only need to sort
3127 ReversePostOrderTraversal<Function *> RPOT(&F);
3128 unsigned Counter = 0;
3129 for (auto &B : RPOT) {
3130 auto *Node = DT->getNode(B);
3131 assert(Node && "RPO and Dominator tree should have same reachability");
3132 RPOOrdering[Node] = ++Counter;
3134 // Sort dominator tree children arrays into RPO.
3135 for (auto &B : RPOT) {
3136 auto *Node = DT->getNode(B);
3137 if (Node->getChildren().size() > 1)
3138 std::sort(Node->begin(), Node->end(),
3139 [&](const DomTreeNode *A, const DomTreeNode *B) {
3140 return RPOOrdering[A] < RPOOrdering[B];
3144 // Now a standard depth first ordering of the domtree is equivalent to RPO.
3145 for (auto DTN : depth_first(DT->getRootNode())) {
3146 BasicBlock *B = DTN->getBlock();
3147 const auto &BlockRange = assignDFSNumbers(B, ICount);
3148 BlockInstRange.insert({B, BlockRange});
3149 ICount += BlockRange.second - BlockRange.first;
3151 initializeCongruenceClasses(F);
3153 TouchedInstructions.resize(ICount);
3154 // Ensure we don't end up resizing the expressionToClass map, as
3155 // that can be quite expensive. At most, we have one expression per
3157 ExpressionToClass.reserve(ICount);
3159 // Initialize the touched instructions to include the entry block.
3160 const auto &InstRange = BlockInstRange.lookup(&F.getEntryBlock());
3161 TouchedInstructions.set(InstRange.first, InstRange.second);
3162 DEBUG(dbgs() << "Block " << getBlockName(&F.getEntryBlock())
3163 << " marked reachable\n");
3164 ReachableBlocks.insert(&F.getEntryBlock());
3166 iterateTouchedInstructions();
3167 verifyMemoryCongruency();
3168 verifyIterationSettled(F);
3169 verifyStoreExpressions();
3171 Changed |= eliminateInstructions(F);
3173 // Delete all instructions marked for deletion.
3174 for (Instruction *ToErase : InstructionsToErase) {
3175 if (!ToErase->use_empty())
3176 ToErase->replaceAllUsesWith(UndefValue::get(ToErase->getType()));
3178 if (ToErase->getParent())
3179 ToErase->eraseFromParent();
3182 // Delete all unreachable blocks.
3183 auto UnreachableBlockPred = [&](const BasicBlock &BB) {
3184 return !ReachableBlocks.count(&BB);
3187 for (auto &BB : make_filter_range(F, UnreachableBlockPred)) {
3188 DEBUG(dbgs() << "We believe block " << getBlockName(&BB)
3189 << " is unreachable\n");
3190 deleteInstructionsInBlock(&BB);
3198 struct NewGVN::ValueDFS {
3202 // Only one of Def and U will be set.
3203 // The bool in the Def tells us whether the Def is the stored value of a
3205 PointerIntPair<Value *, 1, bool> Def;
3207 bool operator<(const ValueDFS &Other) const {
3208 // It's not enough that any given field be less than - we have sets
3209 // of fields that need to be evaluated together to give a proper ordering.
3210 // For example, if you have;
3215 // We want the second to be less than the first, but if we just go field
3216 // by field, we will get to Val 0 < Val 50 and say the first is less than
3217 // the second. We only want it to be less than if the DFS orders are equal.
3219 // Each LLVM instruction only produces one value, and thus the lowest-level
3220 // differentiator that really matters for the stack (and what we use as as a
3221 // replacement) is the local dfs number.
3222 // Everything else in the structure is instruction level, and only affects
3223 // the order in which we will replace operands of a given instruction.
3225 // For a given instruction (IE things with equal dfsin, dfsout, localnum),
3226 // the order of replacement of uses does not matter.
3230 // When you hit b, you will have two valuedfs with the same dfsin, out, and
3232 // The .val will be the same as well.
3233 // The .u's will be different.
3234 // You will replace both, and it does not matter what order you replace them
3235 // in (IE whether you replace operand 2, then operand 1, or operand 1, then
3237 // Similarly for the case of same dfsin, dfsout, localnum, but different
3242 // in c, we will a valuedfs for a, and one for b,with everything the same
3244 // It does not matter what order we replace these operands in.
3245 // You will always end up with the same IR, and this is guaranteed.
3246 return std::tie(DFSIn, DFSOut, LocalNum, Def, U) <
3247 std::tie(Other.DFSIn, Other.DFSOut, Other.LocalNum, Other.Def,
3252 // This function converts the set of members for a congruence class from values,
3253 // to sets of defs and uses with associated DFS info. The total number of
3254 // reachable uses for each value is stored in UseCount, and instructions that
3256 // dead (have no non-dead uses) are stored in ProbablyDead.
3257 void NewGVN::convertClassToDFSOrdered(
3258 const CongruenceClass &Dense, SmallVectorImpl<ValueDFS> &DFSOrderedSet,
3259 DenseMap<const Value *, unsigned int> &UseCounts,
3260 SmallPtrSetImpl<Instruction *> &ProbablyDead) const {
3261 for (auto D : Dense) {
3262 // First add the value.
3263 BasicBlock *BB = getBlockForValue(D);
3264 // Constants are handled prior to ever calling this function, so
3265 // we should only be left with instructions as members.
3266 assert(BB && "Should have figured out a basic block for value");
3268 DomTreeNode *DomNode = DT->getNode(BB);
3269 VDDef.DFSIn = DomNode->getDFSNumIn();
3270 VDDef.DFSOut = DomNode->getDFSNumOut();
3271 // If it's a store, use the leader of the value operand, if it's always
3272 // available, or the value operand. TODO: We could do dominance checks to
3273 // find a dominating leader, but not worth it ATM.
3274 if (auto *SI = dyn_cast<StoreInst>(D)) {
3275 auto Leader = lookupOperandLeader(SI->getValueOperand());
3276 if (alwaysAvailable(Leader)) {
3277 VDDef.Def.setPointer(Leader);
3279 VDDef.Def.setPointer(SI->getValueOperand());
3280 VDDef.Def.setInt(true);
3283 VDDef.Def.setPointer(D);
3285 assert(isa<Instruction>(D) &&
3286 "The dense set member should always be an instruction");
3287 Instruction *Def = cast<Instruction>(D);
3288 VDDef.LocalNum = InstrToDFSNum(D);
3289 DFSOrderedSet.push_back(VDDef);
3290 // If there is a phi node equivalent, add it
3291 if (auto *PN = RealToTemp.lookup(Def)) {
3293 dyn_cast_or_null<PHIExpression>(ValueToExpression.lookup(Def));
3295 VDDef.Def.setInt(false);
3296 VDDef.Def.setPointer(PN);
3298 DFSOrderedSet.push_back(VDDef);
3302 unsigned int UseCount = 0;
3303 // Now add the uses.
3304 for (auto &U : Def->uses()) {
3305 if (auto *I = dyn_cast<Instruction>(U.getUser())) {
3306 // Don't try to replace into dead uses
3307 if (InstructionsToErase.count(I))
3310 // Put the phi node uses in the incoming block.
3312 if (auto *P = dyn_cast<PHINode>(I)) {
3313 IBlock = P->getIncomingBlock(U);
3314 // Make phi node users appear last in the incoming block
3316 VDUse.LocalNum = InstrDFS.size() + 1;
3318 IBlock = getBlockForValue(I);
3319 VDUse.LocalNum = InstrToDFSNum(I);
3322 // Skip uses in unreachable blocks, as we're going
3324 if (ReachableBlocks.count(IBlock) == 0)
3327 DomTreeNode *DomNode = DT->getNode(IBlock);
3328 VDUse.DFSIn = DomNode->getDFSNumIn();
3329 VDUse.DFSOut = DomNode->getDFSNumOut();
3332 DFSOrderedSet.emplace_back(VDUse);
3336 // If there are no uses, it's probably dead (but it may have side-effects,
3337 // so not definitely dead. Otherwise, store the number of uses so we can
3338 // track if it becomes dead later).
3340 ProbablyDead.insert(Def);
3342 UseCounts[Def] = UseCount;
3346 // This function converts the set of members for a congruence class from values,
3347 // to the set of defs for loads and stores, with associated DFS info.
3348 void NewGVN::convertClassToLoadsAndStores(
3349 const CongruenceClass &Dense,
3350 SmallVectorImpl<ValueDFS> &LoadsAndStores) const {
3351 for (auto D : Dense) {
3352 if (!isa<LoadInst>(D) && !isa<StoreInst>(D))
3355 BasicBlock *BB = getBlockForValue(D);
3357 DomTreeNode *DomNode = DT->getNode(BB);
3358 VD.DFSIn = DomNode->getDFSNumIn();
3359 VD.DFSOut = DomNode->getDFSNumOut();
3360 VD.Def.setPointer(D);
3362 // If it's an instruction, use the real local dfs number.
3363 if (auto *I = dyn_cast<Instruction>(D))
3364 VD.LocalNum = InstrToDFSNum(I);
3366 llvm_unreachable("Should have been an instruction");
3368 LoadsAndStores.emplace_back(VD);
3372 static void patchReplacementInstruction(Instruction *I, Value *Repl) {
3373 auto *ReplInst = dyn_cast<Instruction>(Repl);
3377 // Patch the replacement so that it is not more restrictive than the value
3379 // Note that if 'I' is a load being replaced by some operation,
3380 // for example, by an arithmetic operation, then andIRFlags()
3381 // would just erase all math flags from the original arithmetic
3382 // operation, which is clearly not wanted and not needed.
3383 if (!isa<LoadInst>(I))
3384 ReplInst->andIRFlags(I);
3386 // FIXME: If both the original and replacement value are part of the
3387 // same control-flow region (meaning that the execution of one
3388 // guarantees the execution of the other), then we can combine the
3389 // noalias scopes here and do better than the general conservative
3390 // answer used in combineMetadata().
3392 // In general, GVN unifies expressions over different control-flow
3393 // regions, and so we need a conservative combination of the noalias
3395 static const unsigned KnownIDs[] = {
3396 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
3397 LLVMContext::MD_noalias, LLVMContext::MD_range,
3398 LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load,
3399 LLVMContext::MD_invariant_group};
3400 combineMetadata(ReplInst, I, KnownIDs);
3403 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
3404 patchReplacementInstruction(I, Repl);
3405 I->replaceAllUsesWith(Repl);
3408 void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) {
3409 DEBUG(dbgs() << " BasicBlock Dead:" << *BB);
3410 ++NumGVNBlocksDeleted;
3412 // Delete the instructions backwards, as it has a reduced likelihood of having
3413 // to update as many def-use and use-def chains. Start after the terminator.
3414 auto StartPoint = BB->rbegin();
3416 // Note that we explicitly recalculate BB->rend() on each iteration,
3417 // as it may change when we remove the first instruction.
3418 for (BasicBlock::reverse_iterator I(StartPoint); I != BB->rend();) {
3419 Instruction &Inst = *I++;
3420 if (!Inst.use_empty())
3421 Inst.replaceAllUsesWith(UndefValue::get(Inst.getType()));
3422 if (isa<LandingPadInst>(Inst))
3425 Inst.eraseFromParent();
3426 ++NumGVNInstrDeleted;
3428 // Now insert something that simplifycfg will turn into an unreachable.
3429 Type *Int8Ty = Type::getInt8Ty(BB->getContext());
3430 new StoreInst(UndefValue::get(Int8Ty),
3431 Constant::getNullValue(Int8Ty->getPointerTo()),
3432 BB->getTerminator());
3435 void NewGVN::markInstructionForDeletion(Instruction *I) {
3436 DEBUG(dbgs() << "Marking " << *I << " for deletion\n");
3437 InstructionsToErase.insert(I);
3440 void NewGVN::replaceInstruction(Instruction *I, Value *V) {
3442 DEBUG(dbgs() << "Replacing " << *I << " with " << *V << "\n");
3443 patchAndReplaceAllUsesWith(I, V);
3444 // We save the actual erasing to avoid invalidating memory
3445 // dependencies until we are done with everything.
3446 markInstructionForDeletion(I);
3451 // This is a stack that contains both the value and dfs info of where
3452 // that value is valid.
3453 class ValueDFSStack {
3455 Value *back() const { return ValueStack.back(); }
3456 std::pair<int, int> dfs_back() const { return DFSStack.back(); }
3458 void push_back(Value *V, int DFSIn, int DFSOut) {
3459 ValueStack.emplace_back(V);
3460 DFSStack.emplace_back(DFSIn, DFSOut);
3462 bool empty() const { return DFSStack.empty(); }
3463 bool isInScope(int DFSIn, int DFSOut) const {
3466 return DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second;
3469 void popUntilDFSScope(int DFSIn, int DFSOut) {
3471 // These two should always be in sync at this point.
3472 assert(ValueStack.size() == DFSStack.size() &&
3473 "Mismatch between ValueStack and DFSStack");
3475 !DFSStack.empty() &&
3476 !(DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second)) {
3477 DFSStack.pop_back();
3478 ValueStack.pop_back();
3483 SmallVector<Value *, 8> ValueStack;
3484 SmallVector<std::pair<int, int>, 8> DFSStack;
3488 // Given a value and a basic block we are trying to see if it is available in,
3489 // see if the value has a leader available in that block.
3490 Value *NewGVN::findPhiOfOpsLeader(const Expression *E,
3491 const BasicBlock *BB) const {
3492 // It would already be constant if we could make it constant
3493 if (auto *CE = dyn_cast<ConstantExpression>(E))
3494 return CE->getConstantValue();
3495 if (auto *VE = dyn_cast<VariableExpression>(E))
3496 return VE->getVariableValue();
3498 auto *CC = ExpressionToClass.lookup(E);
3501 if (alwaysAvailable(CC->getLeader()))
3502 return CC->getLeader();
3504 for (auto Member : *CC) {
3505 auto *MemberInst = dyn_cast<Instruction>(Member);
3506 // Anything that isn't an instruction is always available.
3509 // If we are looking for something in the same block as the member, it must
3510 // be a leader because this function is looking for operands for a phi node.
3511 if (MemberInst->getParent() == BB ||
3512 DT->dominates(MemberInst->getParent(), BB)) {
3519 bool NewGVN::eliminateInstructions(Function &F) {
3520 // This is a non-standard eliminator. The normal way to eliminate is
3521 // to walk the dominator tree in order, keeping track of available
3522 // values, and eliminating them. However, this is mildly
3523 // pointless. It requires doing lookups on every instruction,
3524 // regardless of whether we will ever eliminate it. For
3525 // instructions part of most singleton congruence classes, we know we
3526 // will never eliminate them.
3528 // Instead, this eliminator looks at the congruence classes directly, sorts
3529 // them into a DFS ordering of the dominator tree, and then we just
3530 // perform elimination straight on the sets by walking the congruence
3531 // class member uses in order, and eliminate the ones dominated by the
3532 // last member. This is worst case O(E log E) where E = number of
3533 // instructions in a single congruence class. In theory, this is all
3534 // instructions. In practice, it is much faster, as most instructions are
3535 // either in singleton congruence classes or can't possibly be eliminated
3536 // anyway (if there are no overlapping DFS ranges in class).
3537 // When we find something not dominated, it becomes the new leader
3538 // for elimination purposes.
3539 // TODO: If we wanted to be faster, We could remove any members with no
3540 // overlapping ranges while sorting, as we will never eliminate anything
3541 // with those members, as they don't dominate anything else in our set.
3543 bool AnythingReplaced = false;
3545 // Since we are going to walk the domtree anyway, and we can't guarantee the
3546 // DFS numbers are updated, we compute some ourselves.
3547 DT->updateDFSNumbers();
3549 // Go through all of our phi nodes, and kill the arguments associated with
3550 // unreachable edges.
3551 auto ReplaceUnreachablePHIArgs = [&](PHINode &PHI, BasicBlock *BB) {
3552 for (auto &Operand : PHI.incoming_values())
3553 if (!ReachableEdges.count({PHI.getIncomingBlock(Operand), BB})) {
3554 DEBUG(dbgs() << "Replacing incoming value of " << PHI << " for block "
3555 << getBlockName(PHI.getIncomingBlock(Operand))
3556 << " with undef due to it being unreachable\n");
3557 Operand.set(UndefValue::get(PHI.getType()));
3560 SmallPtrSet<BasicBlock *, 8> BlocksWithPhis;
3562 if ((!B.empty() && isa<PHINode>(*B.begin())) ||
3563 (PHIOfOpsPHIs.find(&B) != PHIOfOpsPHIs.end()))
3564 BlocksWithPhis.insert(&B);
3565 DenseMap<const BasicBlock *, unsigned> ReachablePredCount;
3566 for (auto KV : ReachableEdges)
3567 ReachablePredCount[KV.getEnd()]++;
3568 for (auto *BB : BlocksWithPhis)
3569 // TODO: It would be faster to use getNumIncomingBlocks() on a phi node in
3570 // the block and subtract the pred count, but it's more complicated.
3571 if (ReachablePredCount.lookup(BB) !=
3572 unsigned(std::distance(pred_begin(BB), pred_end(BB)))) {
3573 for (auto II = BB->begin(); isa<PHINode>(II); ++II) {
3574 auto &PHI = cast<PHINode>(*II);
3575 ReplaceUnreachablePHIArgs(PHI, BB);
3577 for_each_found(PHIOfOpsPHIs, BB, [&](PHINode *PHI) {
3578 ReplaceUnreachablePHIArgs(*PHI, BB);
3582 // Map to store the use counts
3583 DenseMap<const Value *, unsigned int> UseCounts;
3584 for (auto *CC : reverse(CongruenceClasses)) {
3585 DEBUG(dbgs() << "Eliminating in congruence class " << CC->getID() << "\n");
3586 // Track the equivalent store info so we can decide whether to try
3587 // dead store elimination.
3588 SmallVector<ValueDFS, 8> PossibleDeadStores;
3589 SmallPtrSet<Instruction *, 8> ProbablyDead;
3590 if (CC->isDead() || CC->empty())
3592 // Everything still in the TOP class is unreachable or dead.
3593 if (CC == TOPClass) {
3594 for (auto M : *CC) {
3595 auto *VTE = ValueToExpression.lookup(M);
3596 if (VTE && isa<DeadExpression>(VTE))
3597 markInstructionForDeletion(cast<Instruction>(M));
3598 assert((!ReachableBlocks.count(cast<Instruction>(M)->getParent()) ||
3599 InstructionsToErase.count(cast<Instruction>(M))) &&
3600 "Everything in TOP should be unreachable or dead at this "
3606 assert(CC->getLeader() && "We should have had a leader");
3607 // If this is a leader that is always available, and it's a
3608 // constant or has no equivalences, just replace everything with
3609 // it. We then update the congruence class with whatever members
3612 CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader();
3613 if (alwaysAvailable(Leader)) {
3614 CongruenceClass::MemberSet MembersLeft;
3615 for (auto M : *CC) {
3617 // Void things have no uses we can replace.
3618 if (Member == Leader || !isa<Instruction>(Member) ||
3619 Member->getType()->isVoidTy()) {
3620 MembersLeft.insert(Member);
3623 DEBUG(dbgs() << "Found replacement " << *(Leader) << " for " << *Member
3625 auto *I = cast<Instruction>(Member);
3626 assert(Leader != I && "About to accidentally remove our leader");
3627 replaceInstruction(I, Leader);
3628 AnythingReplaced = true;
3630 CC->swap(MembersLeft);
3632 // If this is a singleton, we can skip it.
3633 if (CC->size() != 1 || RealToTemp.lookup(Leader)) {
3634 // This is a stack because equality replacement/etc may place
3635 // constants in the middle of the member list, and we want to use
3636 // those constant values in preference to the current leader, over
3637 // the scope of those constants.
3638 ValueDFSStack EliminationStack;
3640 // Convert the members to DFS ordered sets and then merge them.
3641 SmallVector<ValueDFS, 8> DFSOrderedSet;
3642 convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead);
3644 // Sort the whole thing.
3645 std::sort(DFSOrderedSet.begin(), DFSOrderedSet.end());
3646 for (auto &VD : DFSOrderedSet) {
3647 int MemberDFSIn = VD.DFSIn;
3648 int MemberDFSOut = VD.DFSOut;
3649 Value *Def = VD.Def.getPointer();
3650 bool FromStore = VD.Def.getInt();
3652 // We ignore void things because we can't get a value from them.
3653 if (Def && Def->getType()->isVoidTy())
3655 auto *DefInst = dyn_cast_or_null<Instruction>(Def);
3656 if (DefInst && AllTempInstructions.count(DefInst)) {
3657 auto *PN = cast<PHINode>(DefInst);
3659 // If this is a value phi and that's the expression we used, insert
3660 // it into the program
3661 // remove from temp instruction list.
3662 AllTempInstructions.erase(PN);
3663 auto *DefBlock = getBlockForValue(Def);
3664 DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def
3666 << getBlockName(getBlockForValue(Def)) << "\n");
3667 PN->insertBefore(&DefBlock->front());
3669 NumGVNPHIOfOpsEliminations++;
3672 if (EliminationStack.empty()) {
3673 DEBUG(dbgs() << "Elimination Stack is empty\n");
3675 DEBUG(dbgs() << "Elimination Stack Top DFS numbers are ("
3676 << EliminationStack.dfs_back().first << ","
3677 << EliminationStack.dfs_back().second << ")\n");
3680 DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn << ","
3681 << MemberDFSOut << ")\n");
3682 // First, we see if we are out of scope or empty. If so,
3683 // and there equivalences, we try to replace the top of
3684 // stack with equivalences (if it's on the stack, it must
3685 // not have been eliminated yet).
3686 // Then we synchronize to our current scope, by
3687 // popping until we are back within a DFS scope that
3688 // dominates the current member.
3689 // Then, what happens depends on a few factors
3690 // If the stack is now empty, we need to push
3691 // If we have a constant or a local equivalence we want to
3692 // start using, we also push.
3693 // Otherwise, we walk along, processing members who are
3694 // dominated by this scope, and eliminate them.
3695 bool ShouldPush = Def && EliminationStack.empty();
3697 !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut);
3699 if (OutOfScope || ShouldPush) {
3700 // Sync to our current scope.
3701 EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut);
3702 bool ShouldPush = Def && EliminationStack.empty();
3704 EliminationStack.push_back(Def, MemberDFSIn, MemberDFSOut);
3708 // Skip the Def's, we only want to eliminate on their uses. But mark
3709 // dominated defs as dead.
3711 // For anything in this case, what and how we value number
3712 // guarantees that any side-effets that would have occurred (ie
3713 // throwing, etc) can be proven to either still occur (because it's
3714 // dominated by something that has the same side-effects), or never
3715 // occur. Otherwise, we would not have been able to prove it value
3716 // equivalent to something else. For these things, we can just mark
3717 // it all dead. Note that this is different from the "ProbablyDead"
3718 // set, which may not be dominated by anything, and thus, are only
3719 // easy to prove dead if they are also side-effect free. Note that
3720 // because stores are put in terms of the stored value, we skip
3721 // stored values here. If the stored value is really dead, it will
3722 // still be marked for deletion when we process it in its own class.
3723 if (!EliminationStack.empty() && Def != EliminationStack.back() &&
3724 isa<Instruction>(Def) && !FromStore)
3725 markInstructionForDeletion(cast<Instruction>(Def));
3728 // At this point, we know it is a Use we are trying to possibly
3731 assert(isa<Instruction>(U->get()) &&
3732 "Current def should have been an instruction");
3733 assert(isa<Instruction>(U->getUser()) &&
3734 "Current user should have been an instruction");
3736 // If the thing we are replacing into is already marked to be dead,
3737 // this use is dead. Note that this is true regardless of whether
3738 // we have anything dominating the use or not. We do this here
3739 // because we are already walking all the uses anyway.
3740 Instruction *InstUse = cast<Instruction>(U->getUser());
3741 if (InstructionsToErase.count(InstUse)) {
3742 auto &UseCount = UseCounts[U->get()];
3743 if (--UseCount == 0) {
3744 ProbablyDead.insert(cast<Instruction>(U->get()));
3748 // If we get to this point, and the stack is empty we must have a use
3749 // with nothing we can use to eliminate this use, so just skip it.
3750 if (EliminationStack.empty())
3753 Value *DominatingLeader = EliminationStack.back();
3755 auto *II = dyn_cast<IntrinsicInst>(DominatingLeader);
3756 if (II && II->getIntrinsicID() == Intrinsic::ssa_copy)
3757 DominatingLeader = II->getOperand(0);
3759 // Don't replace our existing users with ourselves.
3760 if (U->get() == DominatingLeader)
3762 DEBUG(dbgs() << "Found replacement " << *DominatingLeader << " for "
3763 << *U->get() << " in " << *(U->getUser()) << "\n");
3765 // If we replaced something in an instruction, handle the patching of
3766 // metadata. Skip this if we are replacing predicateinfo with its
3767 // original operand, as we already know we can just drop it.
3768 auto *ReplacedInst = cast<Instruction>(U->get());
3769 auto *PI = PredInfo->getPredicateInfoFor(ReplacedInst);
3770 if (!PI || DominatingLeader != PI->OriginalOp)
3771 patchReplacementInstruction(ReplacedInst, DominatingLeader);
3772 U->set(DominatingLeader);
3773 // This is now a use of the dominating leader, which means if the
3774 // dominating leader was dead, it's now live!
3775 auto &LeaderUseCount = UseCounts[DominatingLeader];
3776 // It's about to be alive again.
3777 if (LeaderUseCount == 0 && isa<Instruction>(DominatingLeader))
3778 ProbablyDead.erase(cast<Instruction>(DominatingLeader));
3779 if (LeaderUseCount == 0 && II)
3780 ProbablyDead.insert(II);
3782 AnythingReplaced = true;
3787 // At this point, anything still in the ProbablyDead set is actually dead if
3788 // would be trivially dead.
3789 for (auto *I : ProbablyDead)
3790 if (wouldInstructionBeTriviallyDead(I))
3791 markInstructionForDeletion(I);
3793 // Cleanup the congruence class.
3794 CongruenceClass::MemberSet MembersLeft;
3795 for (auto *Member : *CC)
3796 if (!isa<Instruction>(Member) ||
3797 !InstructionsToErase.count(cast<Instruction>(Member)))
3798 MembersLeft.insert(Member);
3799 CC->swap(MembersLeft);
3801 // If we have possible dead stores to look at, try to eliminate them.
3802 if (CC->getStoreCount() > 0) {
3803 convertClassToLoadsAndStores(*CC, PossibleDeadStores);
3804 std::sort(PossibleDeadStores.begin(), PossibleDeadStores.end());
3805 ValueDFSStack EliminationStack;
3806 for (auto &VD : PossibleDeadStores) {
3807 int MemberDFSIn = VD.DFSIn;
3808 int MemberDFSOut = VD.DFSOut;
3809 Instruction *Member = cast<Instruction>(VD.Def.getPointer());
3810 if (EliminationStack.empty() ||
3811 !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut)) {
3812 // Sync to our current scope.
3813 EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut);
3814 if (EliminationStack.empty()) {
3815 EliminationStack.push_back(Member, MemberDFSIn, MemberDFSOut);
3819 // We already did load elimination, so nothing to do here.
3820 if (isa<LoadInst>(Member))
3822 assert(!EliminationStack.empty());
3823 Instruction *Leader = cast<Instruction>(EliminationStack.back());
3825 assert(DT->dominates(Leader->getParent(), Member->getParent()));
3826 // Member is dominater by Leader, and thus dead
3827 DEBUG(dbgs() << "Marking dead store " << *Member
3828 << " that is dominated by " << *Leader << "\n");
3829 markInstructionForDeletion(Member);
3835 return AnythingReplaced;
3838 // This function provides global ranking of operations so that we can place them
3839 // in a canonical order. Note that rank alone is not necessarily enough for a
3840 // complete ordering, as constants all have the same rank. However, generally,
3841 // we will simplify an operation with all constants so that it doesn't matter
3842 // what order they appear in.
3843 unsigned int NewGVN::getRank(const Value *V) const {
3844 // Prefer constants to undef to anything else
3845 // Undef is a constant, have to check it first.
3846 // Prefer smaller constants to constantexprs
3847 if (isa<ConstantExpr>(V))
3849 if (isa<UndefValue>(V))
3851 if (isa<Constant>(V))
3853 else if (auto *A = dyn_cast<Argument>(V))
3854 return 3 + A->getArgNo();
3856 // Need to shift the instruction DFS by number of arguments + 3 to account for
3857 // the constant and argument ranking above.
3858 unsigned Result = InstrToDFSNum(V);
3860 return 4 + NumFuncArgs + Result;
3861 // Unreachable or something else, just return a really large number.
3865 // This is a function that says whether two commutative operations should
3866 // have their order swapped when canonicalizing.
3867 bool NewGVN::shouldSwapOperands(const Value *A, const Value *B) const {
3868 // Because we only care about a total ordering, and don't rewrite expressions
3869 // in this order, we order by rank, which will give a strict weak ordering to
3870 // everything but constants, and then we order by pointer address.
3871 return std::make_pair(getRank(A), A) > std::make_pair(getRank(B), B);
3875 class NewGVNLegacyPass : public FunctionPass {
3877 static char ID; // Pass identification, replacement for typeid.
3878 NewGVNLegacyPass() : FunctionPass(ID) {
3879 initializeNewGVNLegacyPassPass(*PassRegistry::getPassRegistry());
3881 bool runOnFunction(Function &F) override;
3884 void getAnalysisUsage(AnalysisUsage &AU) const override {
3885 AU.addRequired<AssumptionCacheTracker>();
3886 AU.addRequired<DominatorTreeWrapperPass>();
3887 AU.addRequired<TargetLibraryInfoWrapperPass>();
3888 AU.addRequired<MemorySSAWrapperPass>();
3889 AU.addRequired<AAResultsWrapperPass>();
3890 AU.addPreserved<DominatorTreeWrapperPass>();
3891 AU.addPreserved<GlobalsAAWrapperPass>();
3896 bool NewGVNLegacyPass::runOnFunction(Function &F) {
3897 if (skipFunction(F))
3899 return NewGVN(F, &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3900 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
3901 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
3902 &getAnalysis<AAResultsWrapperPass>().getAAResults(),
3903 &getAnalysis<MemorySSAWrapperPass>().getMSSA(),
3904 F.getParent()->getDataLayout())
3908 INITIALIZE_PASS_BEGIN(NewGVNLegacyPass, "newgvn", "Global Value Numbering",
3910 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
3911 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
3912 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
3913 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3914 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
3915 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
3916 INITIALIZE_PASS_END(NewGVNLegacyPass, "newgvn", "Global Value Numbering", false,
3919 char NewGVNLegacyPass::ID = 0;
3921 // createGVNPass - The public interface to this file.
3922 FunctionPass *llvm::createNewGVNPass() { return new NewGVNLegacyPass(); }
3924 PreservedAnalyses NewGVNPass::run(Function &F, AnalysisManager<Function> &AM) {
3925 // Apparently the order in which we get these results matter for
3926 // the old GVN (see Chandler's comment in GVN.cpp). I'll keep
3927 // the same order here, just in case.
3928 auto &AC = AM.getResult<AssumptionAnalysis>(F);
3929 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
3930 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
3931 auto &AA = AM.getResult<AAManager>(F);
3932 auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
3934 NewGVN(F, &DT, &AC, &TLI, &AA, &MSSA, F.getParent()->getDataLayout())
3937 return PreservedAnalyses::all();
3938 PreservedAnalyses PA;
3939 PA.preserve<DominatorTreeAnalysis>();
3940 PA.preserve<GlobalsAA>();