1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This file exposes an interface to building/using memory SSA to
12 /// walk memory instructions using a use/def graph.
14 /// Memory SSA class builds an SSA form that links together memory access
15 /// instructions such as loads, stores, atomics, and calls. Additionally, it
16 /// does a trivial form of "heap versioning" Every time the memory state changes
17 /// in the program, we generate a new heap version. It generates
18 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
20 /// As a trivial example,
21 /// define i32 @main() #0 {
23 /// %call = call noalias i8* @_Znwm(i64 4) #2
24 /// %0 = bitcast i8* %call to i32*
25 /// %call1 = call noalias i8* @_Znwm(i64 4) #2
26 /// %1 = bitcast i8* %call1 to i32*
27 /// store i32 5, i32* %0, align 4
28 /// store i32 7, i32* %1, align 4
29 /// %2 = load i32* %0, align 4
30 /// %3 = load i32* %1, align 4
31 /// %add = add nsw i32 %2, %3
36 /// define i32 @main() #0 {
38 /// ; 1 = MemoryDef(0)
39 /// %call = call noalias i8* @_Znwm(i64 4) #3
40 /// %2 = bitcast i8* %call to i32*
41 /// ; 2 = MemoryDef(1)
42 /// %call1 = call noalias i8* @_Znwm(i64 4) #3
43 /// %4 = bitcast i8* %call1 to i32*
44 /// ; 3 = MemoryDef(2)
45 /// store i32 5, i32* %2, align 4
46 /// ; 4 = MemoryDef(3)
47 /// store i32 7, i32* %4, align 4
49 /// %7 = load i32* %2, align 4
51 /// %8 = load i32* %4, align 4
52 /// %add = add nsw i32 %7, %8
56 /// Given this form, all the stores that could ever effect the load at %8 can be
57 /// gotten by using the MemoryUse associated with it, and walking from use to
58 /// def until you hit the top of the function.
60 /// Each def also has a list of users associated with it, so you can walk from
61 /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
62 /// but not the RHS of MemoryDefs. You can see this above at %7, which would
63 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
64 /// store, all the MemoryUses on its use lists are may-aliases of that store
65 /// (but the MemoryDefs on its use list may not be).
67 /// MemoryDefs are not disambiguated because it would require multiple reaching
68 /// definitions, which would require multiple phis, and multiple memoryaccesses
71 //===----------------------------------------------------------------------===//
73 #ifndef LLVM_ANALYSIS_MEMORYSSA_H
74 #define LLVM_ANALYSIS_MEMORYSSA_H
76 #include "llvm/ADT/DenseMap.h"
77 #include "llvm/ADT/GraphTraits.h"
78 #include "llvm/ADT/SmallPtrSet.h"
79 #include "llvm/ADT/SmallVector.h"
80 #include "llvm/ADT/ilist.h"
81 #include "llvm/ADT/ilist_node.h"
82 #include "llvm/ADT/iterator.h"
83 #include "llvm/ADT/iterator_range.h"
84 #include "llvm/ADT/simple_ilist.h"
85 #include "llvm/Analysis/AliasAnalysis.h"
86 #include "llvm/Analysis/MemoryLocation.h"
87 #include "llvm/Analysis/PHITransAddr.h"
88 #include "llvm/IR/BasicBlock.h"
89 #include "llvm/IR/DerivedUser.h"
90 #include "llvm/IR/Dominators.h"
91 #include "llvm/IR/Module.h"
92 #include "llvm/IR/Type.h"
93 #include "llvm/IR/Use.h"
94 #include "llvm/IR/User.h"
95 #include "llvm/IR/Value.h"
96 #include "llvm/Pass.h"
97 #include "llvm/Support/Casting.h"
110 class MemorySSAWalker;
114 namespace MSSAHelpers {
116 struct AllAccessTag {};
117 struct DefsOnlyTag {};
119 } // end namespace MSSAHelpers
122 // Used to signify what the default invalid ID is for MemoryAccess's
124 INVALID_MEMORYACCESS_ID = 0
127 template <class T> class memoryaccess_def_iterator_base;
128 using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
129 using const_memoryaccess_def_iterator =
130 memoryaccess_def_iterator_base<const MemoryAccess>;
132 // \brief The base for all memory accesses. All memory accesses in a block are
133 // linked together using an intrusive list.
135 : public DerivedUser,
136 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
137 public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
139 using AllAccessType =
140 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
142 ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
144 MemoryAccess(const MemoryAccess &) = delete;
145 MemoryAccess &operator=(const MemoryAccess &) = delete;
147 void *operator new(size_t) = delete;
149 // Methods for support type inquiry through isa, cast, and
151 static bool classof(const Value *V) {
152 unsigned ID = V->getValueID();
153 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
156 BasicBlock *getBlock() const { return Block; }
158 void print(raw_ostream &OS) const;
161 /// \brief The user iterators for a memory access
162 using iterator = user_iterator;
163 using const_iterator = const_user_iterator;
165 /// \brief This iterator walks over all of the defs in a given
166 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
167 /// MemoryUse/MemoryDef, this walks the defining access.
168 memoryaccess_def_iterator defs_begin();
169 const_memoryaccess_def_iterator defs_begin() const;
170 memoryaccess_def_iterator defs_end();
171 const_memoryaccess_def_iterator defs_end() const;
173 /// \brief Get the iterators for the all access list and the defs only list
174 /// We default to the all access list.
175 AllAccessType::self_iterator getIterator() {
176 return this->AllAccessType::getIterator();
178 AllAccessType::const_self_iterator getIterator() const {
179 return this->AllAccessType::getIterator();
181 AllAccessType::reverse_self_iterator getReverseIterator() {
182 return this->AllAccessType::getReverseIterator();
184 AllAccessType::const_reverse_self_iterator getReverseIterator() const {
185 return this->AllAccessType::getReverseIterator();
187 DefsOnlyType::self_iterator getDefsIterator() {
188 return this->DefsOnlyType::getIterator();
190 DefsOnlyType::const_self_iterator getDefsIterator() const {
191 return this->DefsOnlyType::getIterator();
193 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
194 return this->DefsOnlyType::getReverseIterator();
196 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
197 return this->DefsOnlyType::getReverseIterator();
201 friend class MemoryDef;
202 friend class MemoryPhi;
203 friend class MemorySSA;
204 friend class MemoryUse;
205 friend class MemoryUseOrDef;
207 /// \brief Used by MemorySSA to change the block of a MemoryAccess when it is
209 void setBlock(BasicBlock *BB) { Block = BB; }
211 /// \brief Used for debugging and tracking things about MemoryAccesses.
212 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
213 inline unsigned getID() const;
215 MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
216 BasicBlock *BB, unsigned NumOperands)
217 : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
224 inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
229 /// \brief Class that has the common methods + fields of memory uses/defs. It's
230 /// a little awkward to have, but there are many cases where we want either a
231 /// use or def, and there are many cases where uses are needed (defs aren't
232 /// acceptable), and vice-versa.
234 /// This class should never be instantiated directly; make a MemoryUse or
235 /// MemoryDef instead.
236 class MemoryUseOrDef : public MemoryAccess {
238 void *operator new(size_t) = delete;
240 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
242 /// \brief Get the instruction that this MemoryUse represents.
243 Instruction *getMemoryInst() const { return MemoryInst; }
245 /// \brief Get the access that produces the memory state used by this Use.
246 MemoryAccess *getDefiningAccess() const { return getOperand(0); }
248 static bool classof(const Value *MA) {
249 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
252 // Sadly, these have to be public because they are needed in some of the
254 inline bool isOptimized() const;
255 inline MemoryAccess *getOptimized() const;
256 inline void setOptimized(MemoryAccess *);
258 /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
259 /// be rewalked by the walker if necessary.
260 /// This really should only be called by tests.
261 inline void resetOptimized();
264 friend class MemorySSA;
265 friend class MemorySSAUpdater;
267 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
268 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
269 : MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInst(MI) {
270 setDefiningAccess(DMA);
273 void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
282 Instruction *MemoryInst;
286 struct OperandTraits<MemoryUseOrDef>
287 : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
288 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
290 /// \brief Represents read-only accesses to memory
292 /// In particular, the set of Instructions that will be represented by
293 /// MemoryUse's is exactly the set of Instructions for which
294 /// AliasAnalysis::getModRefInfo returns "Ref".
295 class MemoryUse final : public MemoryUseOrDef {
297 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
299 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
300 : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB) {}
302 // allocate space for exactly one operand
303 void *operator new(size_t s) { return User::operator new(s, 1); }
305 static bool classof(const Value *MA) {
306 return MA->getValueID() == MemoryUseVal;
309 void print(raw_ostream &OS) const;
311 void setOptimized(MemoryAccess *DMA) {
312 OptimizedID = DMA->getID();
316 bool isOptimized() const {
317 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
320 MemoryAccess *getOptimized() const {
321 return getDefiningAccess();
324 void resetOptimized() {
325 OptimizedID = INVALID_MEMORYACCESS_ID;
329 friend class MemorySSA;
332 static void deleteMe(DerivedUser *Self);
334 unsigned int OptimizedID = 0;
338 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
339 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
341 /// \brief Represents a read-write access to memory, whether it is a must-alias,
344 /// In particular, the set of Instructions that will be represented by
345 /// MemoryDef's is exactly the set of Instructions for which
346 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
347 /// Note that, in order to provide def-def chains, all defs also have a use
348 /// associated with them. This use points to the nearest reaching
349 /// MemoryDef/MemoryPhi.
350 class MemoryDef final : public MemoryUseOrDef {
352 friend class MemorySSA;
354 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
356 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
358 : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB), ID(Ver) {}
360 // allocate space for exactly one operand
361 void *operator new(size_t s) { return User::operator new(s, 1); }
363 static bool classof(const Value *MA) {
364 return MA->getValueID() == MemoryDefVal;
367 void setOptimized(MemoryAccess *MA) {
369 OptimizedID = getDefiningAccess()->getID();
372 MemoryAccess *getOptimized() const { return Optimized; }
374 bool isOptimized() const {
375 return getOptimized() && getDefiningAccess() &&
376 OptimizedID == getDefiningAccess()->getID();
379 void resetOptimized() {
380 OptimizedID = INVALID_MEMORYACCESS_ID;
383 void print(raw_ostream &OS) const;
385 unsigned getID() const { return ID; }
388 static void deleteMe(DerivedUser *Self);
391 MemoryAccess *Optimized = nullptr;
392 unsigned int OptimizedID = INVALID_MEMORYACCESS_ID;
396 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
397 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
399 /// \brief Represents phi nodes for memory accesses.
401 /// These have the same semantic as regular phi nodes, with the exception that
402 /// only one phi will ever exist in a given basic block.
403 /// Guaranteeing one phi per block means guaranteeing there is only ever one
404 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
405 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
406 /// a MemoryPhi's operands.
412 /// it *must* be transformed into
414 /// 1 = MemoryDef(liveOnEntry)
421 /// 1 = MemoryDef(liveOnEntry)
423 /// 2 = MemoryDef(liveOnEntry)
426 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
427 /// end of the branch, and if there are not two phi nodes, one will be
428 /// disconnected completely from the SSA graph below that point.
429 /// Because MemoryUse's do not generate new definitions, they do not have this
431 class MemoryPhi final : public MemoryAccess {
432 // allocate space for exactly zero operands
433 void *operator new(size_t s) { return User::operator new(s); }
436 /// Provide fast operand accessors
437 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
439 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
440 : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
441 ReservedSpace(NumPreds) {
442 allocHungoffUses(ReservedSpace);
445 // Block iterator interface. This provides access to the list of incoming
446 // basic blocks, which parallels the list of incoming values.
447 using block_iterator = BasicBlock **;
448 using const_block_iterator = BasicBlock *const *;
450 block_iterator block_begin() {
451 auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
452 return reinterpret_cast<block_iterator>(Ref + 1);
455 const_block_iterator block_begin() const {
457 reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
458 return reinterpret_cast<const_block_iterator>(Ref + 1);
461 block_iterator block_end() { return block_begin() + getNumOperands(); }
463 const_block_iterator block_end() const {
464 return block_begin() + getNumOperands();
467 iterator_range<block_iterator> blocks() {
468 return make_range(block_begin(), block_end());
471 iterator_range<const_block_iterator> blocks() const {
472 return make_range(block_begin(), block_end());
475 op_range incoming_values() { return operands(); }
477 const_op_range incoming_values() const { return operands(); }
479 /// \brief Return the number of incoming edges
480 unsigned getNumIncomingValues() const { return getNumOperands(); }
482 /// \brief Return incoming value number x
483 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
484 void setIncomingValue(unsigned I, MemoryAccess *V) {
485 assert(V && "PHI node got a null value!");
489 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
490 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
492 /// \brief Return incoming basic block number @p i.
493 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
495 /// \brief Return incoming basic block corresponding
496 /// to an operand of the PHI.
497 BasicBlock *getIncomingBlock(const Use &U) const {
498 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
499 return getIncomingBlock(unsigned(&U - op_begin()));
502 /// \brief Return incoming basic block corresponding
503 /// to value use iterator.
504 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
505 return getIncomingBlock(I.getUse());
508 void setIncomingBlock(unsigned I, BasicBlock *BB) {
509 assert(BB && "PHI node got a null basic block!");
510 block_begin()[I] = BB;
513 /// \brief Add an incoming value to the end of the PHI list
514 void addIncoming(MemoryAccess *V, BasicBlock *BB) {
515 if (getNumOperands() == ReservedSpace)
516 growOperands(); // Get more space!
517 // Initialize some new operands.
518 setNumHungOffUseOperands(getNumOperands() + 1);
519 setIncomingValue(getNumOperands() - 1, V);
520 setIncomingBlock(getNumOperands() - 1, BB);
523 /// \brief Return the first index of the specified basic
524 /// block in the value list for this PHI. Returns -1 if no instance.
525 int getBasicBlockIndex(const BasicBlock *BB) const {
526 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
527 if (block_begin()[I] == BB)
532 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
533 int Idx = getBasicBlockIndex(BB);
534 assert(Idx >= 0 && "Invalid basic block argument!");
535 return getIncomingValue(Idx);
538 static bool classof(const Value *V) {
539 return V->getValueID() == MemoryPhiVal;
542 void print(raw_ostream &OS) const;
544 unsigned getID() const { return ID; }
547 friend class MemorySSA;
549 /// \brief this is more complicated than the generic
550 /// User::allocHungoffUses, because we have to allocate Uses for the incoming
551 /// values and pointers to the incoming blocks, all in one allocation.
552 void allocHungoffUses(unsigned N) {
553 User::allocHungoffUses(N, /* IsPhi */ true);
557 // For debugging only
559 unsigned ReservedSpace;
561 /// \brief This grows the operand list in response to a push_back style of
562 /// operation. This grows the number of ops by 1.5 times.
563 void growOperands() {
564 unsigned E = getNumOperands();
565 // 2 op PHI nodes are VERY common, so reserve at least enough for that.
566 ReservedSpace = std::max(E + E / 2, 2u);
567 growHungoffUses(ReservedSpace, /* IsPhi */ true);
570 static void deleteMe(DerivedUser *Self);
573 inline unsigned MemoryAccess::getID() const {
574 assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
575 "only memory defs and phis have ids");
576 if (const auto *MD = dyn_cast<MemoryDef>(this))
578 return cast<MemoryPhi>(this)->getID();
581 inline bool MemoryUseOrDef::isOptimized() const {
582 if (const auto *MD = dyn_cast<MemoryDef>(this))
583 return MD->isOptimized();
584 return cast<MemoryUse>(this)->isOptimized();
587 inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
588 if (const auto *MD = dyn_cast<MemoryDef>(this))
589 return MD->getOptimized();
590 return cast<MemoryUse>(this)->getOptimized();
593 inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
594 if (auto *MD = dyn_cast<MemoryDef>(this))
595 MD->setOptimized(MA);
597 cast<MemoryUse>(this)->setOptimized(MA);
600 inline void MemoryUseOrDef::resetOptimized() {
601 if (auto *MD = dyn_cast<MemoryDef>(this))
602 MD->resetOptimized();
604 cast<MemoryUse>(this)->resetOptimized();
607 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
608 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
610 /// \brief Encapsulates MemorySSA, including all data associated with memory
614 MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
617 MemorySSAWalker *getWalker();
619 /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
620 /// access associated with it. If passed a basic block gets the memory phi
621 /// node that exists for that block, if there is one. Otherwise, this will get
622 /// a MemoryUseOrDef.
623 MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
624 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
627 void print(raw_ostream &) const;
629 /// \brief Return true if \p MA represents the live on entry value
631 /// Loads and stores from pointer arguments and other global values may be
632 /// defined by memory operations that do not occur in the current function, so
633 /// they may be live on entry to the function. MemorySSA represents such
634 /// memory state by the live on entry definition, which is guaranteed to occur
635 /// before any other memory access in the function.
636 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
637 return MA == LiveOnEntryDef.get();
640 inline MemoryAccess *getLiveOnEntryDef() const {
641 return LiveOnEntryDef.get();
644 // Sadly, iplists, by default, owns and deletes pointers added to the
645 // list. It's not currently possible to have two iplists for the same type,
646 // where one owns the pointers, and one does not. This is because the traits
647 // are per-type, not per-tag. If this ever changes, we should make the
648 // DefList an iplist.
649 using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
651 simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
653 /// \brief Return the list of MemoryAccess's for a given basic block.
655 /// This list is not modifiable by the user.
656 const AccessList *getBlockAccesses(const BasicBlock *BB) const {
657 return getWritableBlockAccesses(BB);
660 /// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic
663 /// This list is not modifiable by the user.
664 const DefsList *getBlockDefs(const BasicBlock *BB) const {
665 return getWritableBlockDefs(BB);
668 /// \brief Given two memory accesses in the same basic block, determine
669 /// whether MemoryAccess \p A dominates MemoryAccess \p B.
670 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
672 /// \brief Given two memory accesses in potentially different blocks,
673 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
674 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
676 /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
677 /// dominates Use \p B.
678 bool dominates(const MemoryAccess *A, const Use &B) const;
680 /// \brief Verify that MemorySSA is self consistent (IE definitions dominate
681 /// all uses, uses appear in the right places). This is used by unit tests.
682 void verifyMemorySSA() const;
684 /// Used in various insertion functions to specify whether we are talking
685 /// about the beginning or end of a block.
686 enum InsertionPlace { Beginning, End };
689 // Used by Memory SSA annotater, dumpers, and wrapper pass
690 friend class MemorySSAAnnotatedWriter;
691 friend class MemorySSAPrinterLegacyPass;
692 friend class MemorySSAUpdater;
694 void verifyDefUses(Function &F) const;
695 void verifyDomination(Function &F) const;
696 void verifyOrdering(Function &F) const;
698 // This is used by the use optimizer and updater.
699 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
700 auto It = PerBlockAccesses.find(BB);
701 return It == PerBlockAccesses.end() ? nullptr : It->second.get();
704 // This is used by the use optimizer and updater.
705 DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
706 auto It = PerBlockDefs.find(BB);
707 return It == PerBlockDefs.end() ? nullptr : It->second.get();
710 // These is used by the updater to perform various internal MemorySSA
711 // machinsations. They do not always leave the IR in a correct state, and
712 // relies on the updater to fixup what it breaks, so it is not public.
714 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
715 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
717 // Rename the dominator tree branch rooted at BB.
718 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
719 SmallPtrSetImpl<BasicBlock *> &Visited) {
720 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
723 void removeFromLookups(MemoryAccess *);
724 void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
725 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
727 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
728 AccessList::iterator);
729 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
735 CachingWalker *getWalkerImpl();
736 void buildMemorySSA();
739 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
741 using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
742 using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
745 determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
746 void markUnreachableAsLiveOnEntry(BasicBlock *BB);
747 bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
748 MemoryPhi *createMemoryPhi(BasicBlock *BB);
749 MemoryUseOrDef *createNewAccess(Instruction *);
750 MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
751 void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &,
752 const DenseMap<const BasicBlock *, unsigned int> &);
753 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
754 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
755 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
756 SmallPtrSetImpl<BasicBlock *> &Visited,
757 bool SkipVisited = false, bool RenameAllUses = false);
758 AccessList *getOrCreateAccessList(const BasicBlock *);
759 DefsList *getOrCreateDefsList(const BasicBlock *);
760 void renumberBlock(const BasicBlock *) const;
765 // Memory SSA mappings
766 DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
768 // These two mappings contain the main block to access/def mappings for
769 // MemorySSA. The list contained in PerBlockAccesses really owns all the
771 // Both maps maintain the invariant that if a block is found in them, the
772 // corresponding list is not empty, and if a block is not found in them, the
773 // corresponding list is empty.
774 AccessMap PerBlockAccesses;
775 DefsMap PerBlockDefs;
776 std::unique_ptr<MemoryAccess> LiveOnEntryDef;
778 // Domination mappings
779 // Note that the numbering is local to a block, even though the map is
781 mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
782 mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
784 // Memory SSA building info
785 std::unique_ptr<CachingWalker> Walker;
789 // Internal MemorySSA utils, for use by MemorySSA classes and walkers
790 class MemorySSAUtil {
792 friend class GVNHoist;
793 friend class MemorySSAWalker;
795 // This function should not be used by new passes.
796 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
800 // This pass does eager building and then printing of MemorySSA. It is used by
801 // the tests to be able to build, dump, and verify Memory SSA.
802 class MemorySSAPrinterLegacyPass : public FunctionPass {
804 MemorySSAPrinterLegacyPass();
806 bool runOnFunction(Function &) override;
807 void getAnalysisUsage(AnalysisUsage &AU) const override;
812 /// An analysis that produces \c MemorySSA for a function.
814 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
815 friend AnalysisInfoMixin<MemorySSAAnalysis>;
817 static AnalysisKey Key;
820 // Wrap MemorySSA result to ensure address stability of internal MemorySSA
821 // pointers after construction. Use a wrapper class instead of plain
822 // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
824 Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
826 MemorySSA &getMSSA() { return *MSSA.get(); }
828 std::unique_ptr<MemorySSA> MSSA;
831 Result run(Function &F, FunctionAnalysisManager &AM);
834 /// \brief Printer pass for \c MemorySSA.
835 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
839 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
841 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
844 /// \brief Verifier pass for \c MemorySSA.
845 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
846 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
849 /// \brief Legacy analysis pass which computes \c MemorySSA.
850 class MemorySSAWrapperPass : public FunctionPass {
852 MemorySSAWrapperPass();
856 bool runOnFunction(Function &) override;
857 void releaseMemory() override;
858 MemorySSA &getMSSA() { return *MSSA; }
859 const MemorySSA &getMSSA() const { return *MSSA; }
861 void getAnalysisUsage(AnalysisUsage &AU) const override;
863 void verifyAnalysis() const override;
864 void print(raw_ostream &OS, const Module *M = nullptr) const override;
867 std::unique_ptr<MemorySSA> MSSA;
870 /// \brief This is the generic walker interface for walkers of MemorySSA.
871 /// Walkers are used to be able to further disambiguate the def-use chains
872 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
874 /// In particular, while the def-use chains provide basic information, and are
875 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
876 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
877 /// information. In particular, they may want to use SCEV info to further
878 /// disambiguate memory accesses, or they may want the nearest dominating
879 /// may-aliasing MemoryDef for a call or a store. This API enables a
880 /// standardized interface to getting and using that info.
881 class MemorySSAWalker {
883 MemorySSAWalker(MemorySSA *);
884 virtual ~MemorySSAWalker() = default;
886 using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
888 /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
889 /// will give you the nearest dominating MemoryAccess that Mod's the location
890 /// the instruction accesses (by skipping any def which AA can prove does not
891 /// alias the location(s) accessed by the instruction given).
893 /// Note that this will return a single access, and it must dominate the
894 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
895 /// this will return the MemoryPhi, not the operand. This means that
898 /// 1 = MemoryDef(liveOnEntry)
901 /// 2 = MemoryDef(liveOnEntry)
904 /// 3 = MemoryPhi(2, 1)
908 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
909 /// in the if (a) branch.
910 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
911 MemoryAccess *MA = MSSA->getMemoryAccess(I);
912 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
913 return getClobberingMemoryAccess(MA);
916 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
917 /// but takes a MemoryAccess instead of an Instruction.
918 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
920 /// \brief Given a potentially clobbering memory access and a new location,
921 /// calling this will give you the nearest dominating clobbering MemoryAccess
922 /// (by skipping non-aliasing def links).
924 /// This version of the function is mainly used to disambiguate phi translated
925 /// pointers, where the value of a pointer may have changed from the initial
926 /// memory access. Note that this expects to be handed either a MemoryUse,
927 /// or an already potentially clobbering access. Unlike the above API, if
928 /// given a MemoryDef that clobbers the pointer as the starting access, it
929 /// will return that MemoryDef, whereas the above would return the clobber
930 /// starting from the use side of the memory def.
931 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
932 const MemoryLocation &) = 0;
934 /// \brief Given a memory access, invalidate anything this walker knows about
936 /// This API is used by walkers that store information to perform basic cache
937 /// invalidation. This will be called by MemorySSA at appropriate times for
938 /// the walker it uses or returns.
939 virtual void invalidateInfo(MemoryAccess *) {}
941 virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
944 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
949 /// \brief A MemorySSAWalker that does no alias queries, or anything else. It
950 /// simply returns the links as they were constructed by the builder.
951 class DoNothingMemorySSAWalker final : public MemorySSAWalker {
953 // Keep the overrides below from hiding the Instruction overload of
954 // getClobberingMemoryAccess.
955 using MemorySSAWalker::getClobberingMemoryAccess;
957 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
958 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
959 const MemoryLocation &) override;
962 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
963 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
965 /// \brief Iterator base class used to implement const and non-const iterators
966 /// over the defining accesses of a MemoryAccess.
968 class memoryaccess_def_iterator_base
969 : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
970 std::forward_iterator_tag, T, ptrdiff_t, T *,
972 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
975 memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
976 memoryaccess_def_iterator_base() = default;
978 bool operator==(const memoryaccess_def_iterator_base &Other) const {
979 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
982 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
983 // block from the operand in constant time (In a PHINode, the uselist has
984 // both, so it's just subtraction). We provide it as part of the
985 // iterator to avoid callers having to linear walk to get the block.
986 // If the operation becomes constant time on MemoryPHI's, this bit of
987 // abstraction breaking should be removed.
988 BasicBlock *getPhiArgBlock() const {
989 MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
990 assert(MP && "Tried to get phi arg block when not iterating over a PHI");
991 return MP->getIncomingBlock(ArgNo);
994 typename BaseT::iterator::pointer operator*() const {
995 assert(Access && "Tried to access past the end of our iterator");
996 // Go to the first argument for phis, and the defining access for everything
998 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
999 return MP->getIncomingValue(ArgNo);
1000 return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1003 using BaseT::operator++;
1004 memoryaccess_def_iterator &operator++() {
1005 assert(Access && "Hit end of iterator");
1006 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1007 if (++ArgNo >= MP->getNumIncomingValues()) {
1018 T *Access = nullptr;
1022 inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
1023 return memoryaccess_def_iterator(this);
1026 inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
1027 return const_memoryaccess_def_iterator(this);
1030 inline memoryaccess_def_iterator MemoryAccess::defs_end() {
1031 return memoryaccess_def_iterator();
1034 inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
1035 return const_memoryaccess_def_iterator();
1038 /// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
1039 /// and uses in the inverse case.
1040 template <> struct GraphTraits<MemoryAccess *> {
1041 using NodeRef = MemoryAccess *;
1042 using ChildIteratorType = memoryaccess_def_iterator;
1044 static NodeRef getEntryNode(NodeRef N) { return N; }
1045 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
1046 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1049 template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1050 using NodeRef = MemoryAccess *;
1051 using ChildIteratorType = MemoryAccess::iterator;
1053 static NodeRef getEntryNode(NodeRef N) { return N; }
1054 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
1055 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1058 /// \brief Provide an iterator that walks defs, giving both the memory access,
1059 /// and the current pointer location, updating the pointer location as it
1060 /// changes due to phi node translation.
1062 /// This iterator, while somewhat specialized, is what most clients actually
1063 /// want when walking upwards through MemorySSA def chains. It takes a pair of
1064 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1065 /// memory location through phi nodes for the user.
1066 class upward_defs_iterator
1067 : public iterator_facade_base<upward_defs_iterator,
1068 std::forward_iterator_tag,
1069 const MemoryAccessPair> {
1070 using BaseT = upward_defs_iterator::iterator_facade_base;
1073 upward_defs_iterator(const MemoryAccessPair &Info)
1074 : DefIterator(Info.first), Location(Info.second),
1075 OriginalAccess(Info.first) {
1076 CurrentPair.first = nullptr;
1078 WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1079 fillInCurrentPair();
1082 upward_defs_iterator() { CurrentPair.first = nullptr; }
1084 bool operator==(const upward_defs_iterator &Other) const {
1085 return DefIterator == Other.DefIterator;
1088 BaseT::iterator::reference operator*() const {
1089 assert(DefIterator != OriginalAccess->defs_end() &&
1090 "Tried to access past the end of our iterator");
1094 using BaseT::operator++;
1095 upward_defs_iterator &operator++() {
1096 assert(DefIterator != OriginalAccess->defs_end() &&
1097 "Tried to access past the end of the iterator");
1099 if (DefIterator != OriginalAccess->defs_end())
1100 fillInCurrentPair();
1104 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1107 void fillInCurrentPair() {
1108 CurrentPair.first = *DefIterator;
1109 if (WalkingPhi && Location.Ptr) {
1110 PHITransAddr Translator(
1111 const_cast<Value *>(Location.Ptr),
1112 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1113 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1114 DefIterator.getPhiArgBlock(), nullptr,
1116 if (Translator.getAddr() != Location.Ptr) {
1117 CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
1121 CurrentPair.second = Location;
1124 MemoryAccessPair CurrentPair;
1125 memoryaccess_def_iterator DefIterator;
1126 MemoryLocation Location;
1127 MemoryAccess *OriginalAccess = nullptr;
1128 bool WalkingPhi = false;
1131 inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
1132 return upward_defs_iterator(Pair);
1135 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
1137 inline iterator_range<upward_defs_iterator>
1138 upward_defs(const MemoryAccessPair &Pair) {
1139 return make_range(upward_defs_begin(Pair), upward_defs_end());
1142 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1143 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1144 /// comparing against a null def_chain_iterator, this will compare equal only
1145 /// after walking said Phi/liveOnEntry.
1147 /// The UseOptimizedChain flag specifies whether to walk the clobbering
1148 /// access chain, or all the accesses.
1150 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1151 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1152 /// a phi node. The optimized chain walks the clobbering access of a store.
1153 /// So if you are just trying to find, given a store, what the next
1154 /// thing that would clobber the same memory is, you want the optimized chain.
1155 template <class T, bool UseOptimizedChain = false>
1156 struct def_chain_iterator
1157 : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1158 std::forward_iterator_tag, MemoryAccess *> {
1159 def_chain_iterator() : MA(nullptr) {}
1160 def_chain_iterator(T MA) : MA(MA) {}
1162 T operator*() const { return MA; }
1164 def_chain_iterator &operator++() {
1165 // N.B. liveOnEntry has a null defining access.
1166 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1167 if (UseOptimizedChain && MUD->isOptimized())
1168 MA = MUD->getOptimized();
1170 MA = MUD->getDefiningAccess();
1178 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1185 inline iterator_range<def_chain_iterator<T>>
1186 def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1187 #ifdef EXPENSIVE_CHECKS
1188 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
1189 "UpTo isn't in the def chain!");
1191 return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
1195 inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
1196 return make_range(def_chain_iterator<T, true>(MA),
1197 def_chain_iterator<T, true>(nullptr));
1200 } // end namespace llvm
1202 #endif // LLVM_ANALYSIS_MEMORYSSA_H