1 //===-- MemorySSA.cpp - Memory SSA Builder---------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------===//
10 // This file implements the MemorySSA class.
12 //===----------------------------------------------------------------===//
13 #include "llvm/Transforms/Utils/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/ADT/GraphTraits.h"
18 #include "llvm/ADT/PostOrderIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/CFG.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/IteratedDominanceFrontier.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/PHITransAddr.h"
29 #include "llvm/IR/AssemblyAnnotationWriter.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/Dominators.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Metadata.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/PatternMatch.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/FormattedStream.h"
41 #include "llvm/Transforms/Scalar.h"
44 #define DEBUG_TYPE "memoryssa"
46 STATISTIC(NumClobberCacheLookups, "Number of Memory SSA version cache lookups");
47 STATISTIC(NumClobberCacheHits, "Number of Memory SSA version cache hits");
48 STATISTIC(NumClobberCacheInserts, "Number of MemorySSA version cache inserts");
50 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
52 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
53 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
54 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
57 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
58 "Memory SSA Printer", false, false)
59 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
60 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
61 "Memory SSA Printer", false, false)
64 VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden,
65 cl::desc("Verify MemorySSA in legacy printer pass."));
68 /// \brief An assembly annotator class to print Memory SSA information in
70 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
71 friend class MemorySSA;
72 const MemorySSA *MSSA;
75 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
77 virtual void emitBasicBlockStartAnnot(const BasicBlock *BB,
78 formatted_raw_ostream &OS) {
79 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
80 OS << "; " << *MA << "\n";
83 virtual void emitInstructionAnnot(const Instruction *I,
84 formatted_raw_ostream &OS) {
85 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
86 OS << "; " << *MA << "\n";
90 /// \brief A MemorySSAWalker that does AA walks and caching of lookups to
91 /// disambiguate accesses.
93 /// FIXME: The current implementation of this can take quadratic space in rare
94 /// cases. This can be fixed, but it is something to note until it is fixed.
96 /// In order to trigger this behavior, you need to store to N distinct locations
97 /// (that AA can prove don't alias), perform M stores to other memory
98 /// locations that AA can prove don't alias any of the initial N locations, and
99 /// then load from all of the N locations. In this case, we insert M cache
100 /// entries for each of the N loads.
103 /// define i32 @foo() {
104 /// %a = alloca i32, align 4
105 /// %b = alloca i32, align 4
106 /// store i32 0, i32* %a, align 4
107 /// store i32 0, i32* %b, align 4
109 /// ; Insert M stores to other memory that doesn't alias %a or %b here
111 /// %c = load i32, i32* %a, align 4 ; Caches M entries in
112 /// ; CachedUpwardsClobberingAccess for the
113 /// ; MemoryLocation %a
114 /// %d = load i32, i32* %b, align 4 ; Caches M entries in
115 /// ; CachedUpwardsClobberingAccess for the
116 /// ; MemoryLocation %b
118 /// ; For completeness' sake, loading %a or %b again would not cache *another*
120 /// %r = add i32 %c, %d
123 class MemorySSA::CachingWalker final : public MemorySSAWalker {
125 CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
126 ~CachingWalker() override;
128 MemoryAccess *getClobberingMemoryAccess(const Instruction *) override;
129 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
130 MemoryLocation &) override;
131 void invalidateInfo(MemoryAccess *) override;
134 struct UpwardsMemoryQuery;
135 MemoryAccess *doCacheLookup(const MemoryAccess *, const UpwardsMemoryQuery &,
136 const MemoryLocation &);
138 void doCacheInsert(const MemoryAccess *, MemoryAccess *,
139 const UpwardsMemoryQuery &, const MemoryLocation &);
141 void doCacheRemove(const MemoryAccess *, const UpwardsMemoryQuery &,
142 const MemoryLocation &);
145 MemoryAccessPair UpwardsDFSWalk(MemoryAccess *, const MemoryLocation &,
146 UpwardsMemoryQuery &, bool);
147 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
148 bool instructionClobbersQuery(const MemoryDef *, UpwardsMemoryQuery &,
149 const MemoryLocation &Loc) const;
150 void verifyRemoved(MemoryAccess *);
151 SmallDenseMap<ConstMemoryAccessPair, MemoryAccess *>
152 CachedUpwardsClobberingAccess;
153 DenseMap<const MemoryAccess *, MemoryAccess *> CachedUpwardsClobberingCall;
160 struct RenamePassData {
162 DomTreeNode::const_iterator ChildIt;
163 MemoryAccess *IncomingVal;
165 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
167 : DTN(D), ChildIt(It), IncomingVal(M) {}
168 void swap(RenamePassData &RHS) {
169 std::swap(DTN, RHS.DTN);
170 std::swap(ChildIt, RHS.ChildIt);
171 std::swap(IncomingVal, RHS.IncomingVal);
177 /// \brief Rename a single basic block into MemorySSA form.
178 /// Uses the standard SSA renaming algorithm.
179 /// \returns The new incoming value.
180 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB,
181 MemoryAccess *IncomingVal) {
182 auto It = PerBlockAccesses.find(BB);
183 // Skip most processing if the list is empty.
184 if (It != PerBlockAccesses.end()) {
185 AccessList *Accesses = It->second.get();
186 for (MemoryAccess &L : *Accesses) {
187 switch (L.getValueID()) {
188 case Value::MemoryUseVal:
189 cast<MemoryUse>(&L)->setDefiningAccess(IncomingVal);
191 case Value::MemoryDefVal:
192 // We can't legally optimize defs, because we only allow single
193 // memory phis/uses on operations, and if we optimize these, we can
194 // end up with multiple reaching defs. Uses do not have this
195 // problem, since they do not produce a value
196 cast<MemoryDef>(&L)->setDefiningAccess(IncomingVal);
199 case Value::MemoryPhiVal:
206 // Pass through values to our successors
207 for (const BasicBlock *S : successors(BB)) {
208 auto It = PerBlockAccesses.find(S);
209 // Rename the phi nodes in our successor block
210 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
212 AccessList *Accesses = It->second.get();
213 auto *Phi = cast<MemoryPhi>(&Accesses->front());
214 Phi->addIncoming(IncomingVal, BB);
220 /// \brief This is the standard SSA renaming algorithm.
222 /// We walk the dominator tree in preorder, renaming accesses, and then filling
223 /// in phi nodes in our successors.
224 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
225 SmallPtrSet<BasicBlock *, 16> &Visited) {
226 SmallVector<RenamePassData, 32> WorkStack;
227 IncomingVal = renameBlock(Root->getBlock(), IncomingVal);
228 WorkStack.push_back({Root, Root->begin(), IncomingVal});
229 Visited.insert(Root->getBlock());
231 while (!WorkStack.empty()) {
232 DomTreeNode *Node = WorkStack.back().DTN;
233 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
234 IncomingVal = WorkStack.back().IncomingVal;
236 if (ChildIt == Node->end()) {
237 WorkStack.pop_back();
239 DomTreeNode *Child = *ChildIt;
240 ++WorkStack.back().ChildIt;
241 BasicBlock *BB = Child->getBlock();
243 IncomingVal = renameBlock(BB, IncomingVal);
244 WorkStack.push_back({Child, Child->begin(), IncomingVal});
249 /// \brief Compute dominator levels, used by the phi insertion algorithm above.
250 void MemorySSA::computeDomLevels(DenseMap<DomTreeNode *, unsigned> &DomLevels) {
251 for (auto DFI = df_begin(DT->getRootNode()), DFE = df_end(DT->getRootNode());
253 DomLevels[*DFI] = DFI.getPathLength() - 1;
256 /// \brief This handles unreachable block accesses by deleting phi nodes in
257 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
258 /// being uses of the live on entry definition.
259 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
260 assert(!DT->isReachableFromEntry(BB) &&
261 "Reachable block found while handling unreachable blocks");
263 // Make sure phi nodes in our reachable successors end up with a
264 // LiveOnEntryDef for our incoming edge, even though our block is forward
265 // unreachable. We could just disconnect these blocks from the CFG fully,
266 // but we do not right now.
267 for (const BasicBlock *S : successors(BB)) {
268 if (!DT->isReachableFromEntry(S))
270 auto It = PerBlockAccesses.find(S);
271 // Rename the phi nodes in our successor block
272 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
274 AccessList *Accesses = It->second.get();
275 auto *Phi = cast<MemoryPhi>(&Accesses->front());
276 Phi->addIncoming(LiveOnEntryDef.get(), BB);
279 auto It = PerBlockAccesses.find(BB);
280 if (It == PerBlockAccesses.end())
283 auto &Accesses = It->second;
284 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
285 auto Next = std::next(AI);
286 // If we have a phi, just remove it. We are going to replace all
287 // users with live on entry.
288 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
289 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
296 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
297 : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
302 MemorySSA::MemorySSA(MemorySSA &&MSSA)
303 : AA(MSSA.AA), DT(MSSA.DT), F(MSSA.F),
304 ValueToMemoryAccess(std::move(MSSA.ValueToMemoryAccess)),
305 PerBlockAccesses(std::move(MSSA.PerBlockAccesses)),
306 LiveOnEntryDef(std::move(MSSA.LiveOnEntryDef)),
307 Walker(std::move(MSSA.Walker)), NextID(MSSA.NextID) {
308 // Update the Walker MSSA pointer so it doesn't point to the moved-from MSSA
313 MemorySSA::~MemorySSA() {
314 // Drop all our references
315 for (const auto &Pair : PerBlockAccesses)
316 for (MemoryAccess &MA : *Pair.second)
317 MA.dropAllReferences();
320 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
321 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
324 Res.first->second = make_unique<AccessList>();
325 return Res.first->second.get();
328 void MemorySSA::buildMemorySSA() {
329 // We create an access to represent "live on entry", for things like
330 // arguments or users of globals, where the memory they use is defined before
331 // the beginning of the function. We do not actually insert it into the IR.
332 // We do not define a live on exit for the immediate uses, and thus our
333 // semantics do *not* imply that something with no immediate uses can simply
335 BasicBlock &StartingPoint = F.getEntryBlock();
336 LiveOnEntryDef = make_unique<MemoryDef>(F.getContext(), nullptr, nullptr,
337 &StartingPoint, NextID++);
339 // We maintain lists of memory accesses per-block, trading memory for time. We
340 // could just look up the memory access for every possible instruction in the
342 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
343 SmallPtrSet<BasicBlock *, 32> DefUseBlocks;
344 // Go through each block, figure out where defs occur, and chain together all
346 for (BasicBlock &B : F) {
347 bool InsertIntoDef = false;
348 AccessList *Accesses = nullptr;
349 for (Instruction &I : B) {
350 MemoryUseOrDef *MUD = createNewAccess(&I);
353 InsertIntoDef |= isa<MemoryDef>(MUD);
356 Accesses = getOrCreateAccessList(&B);
357 Accesses->push_back(MUD);
360 DefiningBlocks.insert(&B);
362 DefUseBlocks.insert(&B);
366 // Live in is normally defined as "all the blocks on the path from each def to
367 // each of it's uses".
368 // MemoryDef's are implicit uses of previous state, so they are also uses.
369 // This means we don't really have def-only instructions. The only
370 // MemoryDef's that are not really uses are those that are of the LiveOnEntry
371 // variable (because LiveOnEntry can reach anywhere, and every def is a
372 // must-kill of LiveOnEntry).
373 // In theory, you could precisely compute live-in by using alias-analysis to
374 // disambiguate defs and uses to see which really pair up with which.
375 // In practice, this would be really expensive and difficult. So we simply
376 // assume all defs are also uses that need to be kept live.
377 // Because of this, the end result of this live-in computation will be "the
378 // entire set of basic blocks that reach any use".
380 SmallPtrSet<BasicBlock *, 32> LiveInBlocks;
381 SmallVector<BasicBlock *, 64> LiveInBlockWorklist(DefUseBlocks.begin(),
383 // Now that we have a set of blocks where a value is live-in, recursively add
384 // predecessors until we find the full region the value is live.
385 while (!LiveInBlockWorklist.empty()) {
386 BasicBlock *BB = LiveInBlockWorklist.pop_back_val();
388 // The block really is live in here, insert it into the set. If already in
389 // the set, then it has already been processed.
390 if (!LiveInBlocks.insert(BB).second)
393 // Since the value is live into BB, it is either defined in a predecessor or
395 LiveInBlockWorklist.append(pred_begin(BB), pred_end(BB));
398 // Determine where our MemoryPhi's should go
399 ForwardIDFCalculator IDFs(*DT);
400 IDFs.setDefiningBlocks(DefiningBlocks);
401 IDFs.setLiveInBlocks(LiveInBlocks);
402 SmallVector<BasicBlock *, 32> IDFBlocks;
403 IDFs.calculate(IDFBlocks);
405 // Now place MemoryPhi nodes.
406 for (auto &BB : IDFBlocks) {
408 AccessList *Accesses = getOrCreateAccessList(BB);
409 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
410 ValueToMemoryAccess.insert(std::make_pair(BB, Phi));
411 // Phi's always are placed at the front of the block.
412 Accesses->push_front(Phi);
415 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
416 // filled in with all blocks.
417 SmallPtrSet<BasicBlock *, 16> Visited;
418 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
420 MemorySSAWalker *Walker = getWalker();
422 // Now optimize the MemoryUse's defining access to point to the nearest
423 // dominating clobbering def.
424 // This ensures that MemoryUse's that are killed by the same store are
425 // immediate users of that store, one of the invariants we guarantee.
426 for (auto DomNode : depth_first(DT)) {
427 BasicBlock *BB = DomNode->getBlock();
428 auto AI = PerBlockAccesses.find(BB);
429 if (AI == PerBlockAccesses.end())
431 AccessList *Accesses = AI->second.get();
432 for (auto &MA : *Accesses) {
433 if (auto *MU = dyn_cast<MemoryUse>(&MA)) {
434 Instruction *Inst = MU->getMemoryInst();
435 MU->setDefiningAccess(Walker->getClobberingMemoryAccess(Inst));
440 // Mark the uses in unreachable blocks as live on entry, so that they go
443 if (!Visited.count(&BB))
444 markUnreachableAsLiveOnEntry(&BB);
447 MemorySSAWalker *MemorySSA::getWalker() {
451 Walker = make_unique<CachingWalker>(this, AA, DT);
455 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
456 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
457 AccessList *Accesses = getOrCreateAccessList(BB);
458 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
459 ValueToMemoryAccess.insert(std::make_pair(BB, Phi));
460 // Phi's always are placed at the front of the block.
461 Accesses->push_front(Phi);
465 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
466 MemoryAccess *Definition) {
467 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
468 MemoryUseOrDef *NewAccess = createNewAccess(I);
470 NewAccess != nullptr &&
471 "Tried to create a memory access for a non-memory touching instruction");
472 NewAccess->setDefiningAccess(Definition);
476 MemoryAccess *MemorySSA::createMemoryAccessInBB(Instruction *I,
477 MemoryAccess *Definition,
478 const BasicBlock *BB,
479 InsertionPlace Point) {
480 MemoryUseOrDef *NewAccess = createDefinedAccess(I, Definition);
481 auto *Accesses = getOrCreateAccessList(BB);
482 if (Point == Beginning) {
483 // It goes after any phi nodes
484 auto AI = std::find_if(
485 Accesses->begin(), Accesses->end(),
486 [](const MemoryAccess &MA) { return !isa<MemoryPhi>(MA); });
488 Accesses->insert(AI, NewAccess);
490 Accesses->push_back(NewAccess);
495 MemoryAccess *MemorySSA::createMemoryAccessBefore(Instruction *I,
496 MemoryAccess *Definition,
497 MemoryAccess *InsertPt) {
498 assert(I->getParent() == InsertPt->getBlock() &&
499 "New and old access must be in the same block");
500 MemoryUseOrDef *NewAccess = createDefinedAccess(I, Definition);
501 auto *Accesses = getOrCreateAccessList(InsertPt->getBlock());
502 Accesses->insert(AccessList::iterator(InsertPt), NewAccess);
506 MemoryAccess *MemorySSA::createMemoryAccessAfter(Instruction *I,
507 MemoryAccess *Definition,
508 MemoryAccess *InsertPt) {
509 assert(I->getParent() == InsertPt->getBlock() &&
510 "New and old access must be in the same block");
511 MemoryUseOrDef *NewAccess = createDefinedAccess(I, Definition);
512 auto *Accesses = getOrCreateAccessList(InsertPt->getBlock());
513 Accesses->insertAfter(AccessList::iterator(InsertPt), NewAccess);
517 /// \brief Helper function to create new memory accesses
518 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
519 // The assume intrinsic has a control dependency which we model by claiming
520 // that it writes arbitrarily. Ignore that fake memory dependency here.
521 // FIXME: Replace this special casing with a more accurate modelling of
522 // assume's control dependency.
523 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
524 if (II->getIntrinsicID() == Intrinsic::assume)
527 // Find out what affect this instruction has on memory.
528 ModRefInfo ModRef = AA->getModRefInfo(I);
529 bool Def = bool(ModRef & MRI_Mod);
530 bool Use = bool(ModRef & MRI_Ref);
532 // It's possible for an instruction to not modify memory at all. During
533 // construction, we ignore them.
537 assert((Def || Use) &&
538 "Trying to create a memory access with a non-memory instruction");
542 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
544 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
545 ValueToMemoryAccess.insert(std::make_pair(I, MUD));
549 MemoryAccess *MemorySSA::findDominatingDef(BasicBlock *UseBlock,
550 enum InsertionPlace Where) {
551 // Handle the initial case
552 if (Where == Beginning)
553 // The only thing that could define us at the beginning is a phi node
554 if (MemoryPhi *Phi = getMemoryAccess(UseBlock))
557 DomTreeNode *CurrNode = DT->getNode(UseBlock);
558 // Need to be defined by our dominator
559 if (Where == Beginning)
560 CurrNode = CurrNode->getIDom();
563 auto It = PerBlockAccesses.find(CurrNode->getBlock());
564 if (It != PerBlockAccesses.end()) {
565 auto &Accesses = It->second;
566 for (MemoryAccess &RA : reverse(*Accesses)) {
567 if (isa<MemoryDef>(RA) || isa<MemoryPhi>(RA))
571 CurrNode = CurrNode->getIDom();
573 return LiveOnEntryDef.get();
576 /// \brief Returns true if \p Replacer dominates \p Replacee .
577 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
578 const MemoryAccess *Replacee) const {
579 if (isa<MemoryUseOrDef>(Replacee))
580 return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
581 const auto *MP = cast<MemoryPhi>(Replacee);
582 // For a phi node, the use occurs in the predecessor block of the phi node.
583 // Since we may occur multiple times in the phi node, we have to check each
584 // operand to ensure Replacer dominates each operand where Replacee occurs.
585 for (const Use &Arg : MP->operands()) {
586 if (Arg.get() != Replacee &&
587 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
593 /// \brief If all arguments of a MemoryPHI are defined by the same incoming
594 /// argument, return that argument.
595 static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
596 MemoryAccess *MA = nullptr;
598 for (auto &Arg : MP->operands()) {
600 MA = cast<MemoryAccess>(Arg);
607 /// \brief Properly remove \p MA from all of MemorySSA's lookup tables.
609 /// Because of the way the intrusive list and use lists work, it is important to
610 /// do removal in the right order.
611 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
612 assert(MA->use_empty() &&
613 "Trying to remove memory access that still has uses");
614 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA))
615 MUD->setDefiningAccess(nullptr);
616 // Invalidate our walker's cache if necessary
617 if (!isa<MemoryUse>(MA))
618 Walker->invalidateInfo(MA);
619 // The call below to erase will destroy MA, so we can't change the order we
620 // are doing things here
622 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
623 MemoryInst = MUD->getMemoryInst();
625 MemoryInst = MA->getBlock();
627 ValueToMemoryAccess.erase(MemoryInst);
629 auto AccessIt = PerBlockAccesses.find(MA->getBlock());
630 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
632 if (Accesses->empty())
633 PerBlockAccesses.erase(AccessIt);
636 void MemorySSA::removeMemoryAccess(MemoryAccess *MA) {
637 assert(!isLiveOnEntryDef(MA) && "Trying to remove the live on entry def");
638 // We can only delete phi nodes if they have no uses, or we can replace all
639 // uses with a single definition.
640 MemoryAccess *NewDefTarget = nullptr;
641 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
642 // Note that it is sufficient to know that all edges of the phi node have
643 // the same argument. If they do, by the definition of dominance frontiers
644 // (which we used to place this phi), that argument must dominate this phi,
645 // and thus, must dominate the phi's uses, and so we will not hit the assert
647 NewDefTarget = onlySingleValue(MP);
648 assert((NewDefTarget || MP->use_empty()) &&
649 "We can't delete this memory phi");
651 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
654 // Re-point the uses at our defining access
655 if (!MA->use_empty())
656 MA->replaceAllUsesWith(NewDefTarget);
658 // The call below to erase will destroy MA, so we can't change the order we
659 // are doing things here
660 removeFromLookups(MA);
663 void MemorySSA::print(raw_ostream &OS) const {
664 MemorySSAAnnotatedWriter Writer(this);
665 F.print(OS, &Writer);
668 void MemorySSA::dump() const {
669 MemorySSAAnnotatedWriter Writer(this);
670 F.print(dbgs(), &Writer);
673 void MemorySSA::verifyMemorySSA() const {
679 /// \brief Verify that the order and existence of MemoryAccesses matches the
680 /// order and existence of memory affecting instructions.
681 void MemorySSA::verifyOrdering(Function &F) const {
682 // Walk all the blocks, comparing what the lookups think and what the access
683 // lists think, as well as the order in the blocks vs the order in the access
685 SmallVector<MemoryAccess *, 32> ActualAccesses;
686 for (BasicBlock &B : F) {
687 const AccessList *AL = getBlockAccesses(&B);
688 MemoryAccess *Phi = getMemoryAccess(&B);
690 ActualAccesses.push_back(Phi);
691 for (Instruction &I : B) {
692 MemoryAccess *MA = getMemoryAccess(&I);
693 assert((!MA || AL) && "We have memory affecting instructions "
694 "in this block but they are not in the "
697 ActualAccesses.push_back(MA);
699 // Either we hit the assert, really have no accesses, or we have both
700 // accesses and an access list
703 assert(AL->size() == ActualAccesses.size() &&
704 "We don't have the same number of accesses in the block as on the "
706 auto ALI = AL->begin();
707 auto AAI = ActualAccesses.begin();
708 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
709 assert(&*ALI == *AAI && "Not the same accesses in the same order");
713 ActualAccesses.clear();
717 /// \brief Verify the domination properties of MemorySSA by checking that each
718 /// definition dominates all of its uses.
719 void MemorySSA::verifyDomination(Function &F) const {
720 for (BasicBlock &B : F) {
721 // Phi nodes are attached to basic blocks
722 if (MemoryPhi *MP = getMemoryAccess(&B)) {
723 for (User *U : MP->users()) {
724 BasicBlock *UseBlock;
725 // Phi operands are used on edges, we simulate the right domination by
726 // acting as if the use occurred at the end of the predecessor block.
727 if (MemoryPhi *P = dyn_cast<MemoryPhi>(U)) {
728 for (const auto &Arg : P->operands()) {
730 UseBlock = P->getIncomingBlock(Arg);
735 UseBlock = cast<MemoryAccess>(U)->getBlock();
738 assert(DT->dominates(MP->getBlock(), UseBlock) &&
739 "Memory PHI does not dominate it's uses");
743 for (Instruction &I : B) {
744 MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
748 for (User *U : MD->users()) {
749 BasicBlock *UseBlock;
751 // Things are allowed to flow to phi nodes over their predecessor edge.
752 if (auto *P = dyn_cast<MemoryPhi>(U)) {
753 for (const auto &Arg : P->operands()) {
755 UseBlock = P->getIncomingBlock(Arg);
760 UseBlock = cast<MemoryAccess>(U)->getBlock();
762 assert(DT->dominates(MD->getBlock(), UseBlock) &&
763 "Memory Def does not dominate it's uses");
769 /// \brief Verify the def-use lists in MemorySSA, by verifying that \p Use
770 /// appears in the use list of \p Def.
772 /// llvm_unreachable is used instead of asserts because this may be called in
773 /// a build without asserts. In that case, we don't want this to turn into a
775 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
776 // The live on entry use may cause us to get a NULL def here
778 if (!isLiveOnEntryDef(Use))
779 llvm_unreachable("Null def but use not point to live on entry def");
780 } else if (std::find(Def->user_begin(), Def->user_end(), Use) ==
782 llvm_unreachable("Did not find use in def's use list");
786 /// \brief Verify the immediate use information, by walking all the memory
787 /// accesses and verifying that, for each use, it appears in the
788 /// appropriate def's use list
789 void MemorySSA::verifyDefUses(Function &F) const {
790 for (BasicBlock &B : F) {
791 // Phi nodes are attached to basic blocks
792 if (MemoryPhi *Phi = getMemoryAccess(&B)) {
793 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
794 pred_begin(&B), pred_end(&B))) &&
795 "Incomplete MemoryPhi Node");
796 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
797 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
800 for (Instruction &I : B) {
801 if (MemoryAccess *MA = getMemoryAccess(&I)) {
802 assert(isa<MemoryUseOrDef>(MA) &&
803 "Found a phi node not attached to a bb");
804 verifyUseInDefs(cast<MemoryUseOrDef>(MA)->getDefiningAccess(), MA);
810 MemoryAccess *MemorySSA::getMemoryAccess(const Value *I) const {
811 return ValueToMemoryAccess.lookup(I);
814 MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const {
815 return cast_or_null<MemoryPhi>(getMemoryAccess((const Value *)BB));
818 /// \brief Determine, for two memory accesses in the same block,
819 /// whether \p Dominator dominates \p Dominatee.
820 /// \returns True if \p Dominator dominates \p Dominatee.
821 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
822 const MemoryAccess *Dominatee) const {
824 assert((Dominator->getBlock() == Dominatee->getBlock()) &&
825 "Asking for local domination when accesses are in different blocks!");
827 // A node dominates itself.
828 if (Dominatee == Dominator)
831 // When Dominatee is defined on function entry, it is not dominated by another
833 if (isLiveOnEntryDef(Dominatee))
836 // When Dominator is defined on function entry, it dominates the other memory
838 if (isLiveOnEntryDef(Dominator))
841 // Get the access list for the block
842 const AccessList *AccessList = getBlockAccesses(Dominator->getBlock());
843 AccessList::const_reverse_iterator It(Dominator->getIterator());
845 // If we hit the beginning of the access list before we hit dominatee, we must
847 return std::none_of(It, AccessList->rend(),
848 [&](const MemoryAccess &MA) { return &MA == Dominatee; });
851 const static char LiveOnEntryStr[] = "liveOnEntry";
853 void MemoryDef::print(raw_ostream &OS) const {
854 MemoryAccess *UO = getDefiningAccess();
856 OS << getID() << " = MemoryDef(";
857 if (UO && UO->getID())
860 OS << LiveOnEntryStr;
864 void MemoryPhi::print(raw_ostream &OS) const {
866 OS << getID() << " = MemoryPhi(";
867 for (const auto &Op : operands()) {
868 BasicBlock *BB = getIncomingBlock(Op);
869 MemoryAccess *MA = cast<MemoryAccess>(Op);
879 BB->printAsOperand(OS, false);
881 if (unsigned ID = MA->getID())
884 OS << LiveOnEntryStr;
890 MemoryAccess::~MemoryAccess() {}
892 void MemoryUse::print(raw_ostream &OS) const {
893 MemoryAccess *UO = getDefiningAccess();
895 if (UO && UO->getID())
898 OS << LiveOnEntryStr;
902 void MemoryAccess::dump() const {
907 char MemorySSAPrinterLegacyPass::ID = 0;
909 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
910 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
913 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
914 AU.setPreservesAll();
915 AU.addRequired<MemorySSAWrapperPass>();
916 AU.addPreserved<MemorySSAWrapperPass>();
919 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
920 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
923 MSSA.verifyMemorySSA();
927 char MemorySSAAnalysis::PassID;
929 MemorySSA MemorySSAAnalysis::run(Function &F, AnalysisManager<Function> &AM) {
930 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
931 auto &AA = AM.getResult<AAManager>(F);
932 return MemorySSA(F, &AA, &DT);
935 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
936 FunctionAnalysisManager &AM) {
937 OS << "MemorySSA for function: " << F.getName() << "\n";
938 AM.getResult<MemorySSAAnalysis>(F).print(OS);
940 return PreservedAnalyses::all();
943 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
944 FunctionAnalysisManager &AM) {
945 AM.getResult<MemorySSAAnalysis>(F).verifyMemorySSA();
947 return PreservedAnalyses::all();
950 char MemorySSAWrapperPass::ID = 0;
952 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
953 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
956 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
958 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
959 AU.setPreservesAll();
960 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
961 AU.addRequiredTransitive<AAResultsWrapperPass>();
964 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
965 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
966 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
967 MSSA.reset(new MemorySSA(F, &AA, &DT));
971 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
973 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
977 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
979 MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
981 : MemorySSAWalker(M), AA(A), DT(D) {}
983 MemorySSA::CachingWalker::~CachingWalker() {}
985 struct MemorySSA::CachingWalker::UpwardsMemoryQuery {
986 // True if we saw a phi whose predecessor was a backedge
988 // True if our original query started off as a call
990 // The pointer location we started the query with. This will be empty if
992 MemoryLocation StartingLoc;
993 // This is the instruction we were querying about.
994 const Instruction *Inst;
995 // Set of visited Instructions for this query.
996 DenseSet<MemoryAccessPair> Visited;
997 // Vector of visited call accesses for this query. This is separated out
998 // because you can always cache and lookup the result of call queries (IE when
999 // IsCall == true) for every call in the chain. The calls have no AA location
1000 // associated with them with them, and thus, no context dependence.
1001 SmallVector<const MemoryAccess *, 32> VisitedCalls;
1002 // The MemoryAccess we actually got called with, used to test local domination
1003 const MemoryAccess *OriginalAccess;
1005 UpwardsMemoryQuery()
1006 : SawBackedgePhi(false), IsCall(false), Inst(nullptr),
1007 OriginalAccess(nullptr) {}
1009 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
1010 : SawBackedgePhi(false), IsCall(ImmutableCallSite(Inst)), Inst(Inst),
1011 OriginalAccess(Access) {}
1014 void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
1016 // TODO: We can do much better cache invalidation with differently stored
1017 // caches. For now, for MemoryUses, we simply remove them
1018 // from the cache, and kill the entire call/non-call cache for everything
1019 // else. The problem is for phis or defs, currently we'd need to follow use
1020 // chains down and invalidate anything below us in the chain that currently
1021 // terminates at this access.
1023 // See if this is a MemoryUse, if so, just remove the cached info. MemoryUse
1024 // is by definition never a barrier, so nothing in the cache could point to
1025 // this use. In that case, we only need invalidate the info for the use
1028 if (MemoryUse *MU = dyn_cast<MemoryUse>(MA)) {
1029 UpwardsMemoryQuery Q;
1030 Instruction *I = MU->getMemoryInst();
1031 Q.IsCall = bool(ImmutableCallSite(I));
1034 Q.StartingLoc = MemoryLocation::get(I);
1035 doCacheRemove(MA, Q, Q.StartingLoc);
1037 // If it is not a use, the best we can do right now is destroy the cache.
1038 CachedUpwardsClobberingCall.clear();
1039 CachedUpwardsClobberingAccess.clear();
1042 #ifdef EXPENSIVE_CHECKS
1043 // Run this only when expensive checks are enabled.
1048 void MemorySSA::CachingWalker::doCacheRemove(const MemoryAccess *M,
1049 const UpwardsMemoryQuery &Q,
1050 const MemoryLocation &Loc) {
1052 CachedUpwardsClobberingCall.erase(M);
1054 CachedUpwardsClobberingAccess.erase({M, Loc});
1057 void MemorySSA::CachingWalker::doCacheInsert(const MemoryAccess *M,
1058 MemoryAccess *Result,
1059 const UpwardsMemoryQuery &Q,
1060 const MemoryLocation &Loc) {
1061 // This is fine for Phis, since there are times where we can't optimize them.
1062 // Making a def its own clobber is never correct, though.
1063 assert((Result != M || isa<MemoryPhi>(M)) &&
1064 "Something can't clobber itself!");
1065 ++NumClobberCacheInserts;
1067 CachedUpwardsClobberingCall[M] = Result;
1069 CachedUpwardsClobberingAccess[{M, Loc}] = Result;
1073 MemorySSA::CachingWalker::doCacheLookup(const MemoryAccess *M,
1074 const UpwardsMemoryQuery &Q,
1075 const MemoryLocation &Loc) {
1076 ++NumClobberCacheLookups;
1077 MemoryAccess *Result;
1080 Result = CachedUpwardsClobberingCall.lookup(M);
1082 Result = CachedUpwardsClobberingAccess.lookup({M, Loc});
1085 ++NumClobberCacheHits;
1089 bool MemorySSA::CachingWalker::instructionClobbersQuery(
1090 const MemoryDef *MD, UpwardsMemoryQuery &Q,
1091 const MemoryLocation &Loc) const {
1092 Instruction *DefMemoryInst = MD->getMemoryInst();
1093 assert(DefMemoryInst && "Defining instruction not actually an instruction");
1096 return AA->getModRefInfo(DefMemoryInst, Loc) & MRI_Mod;
1098 // If this is a call, mark it for caching
1099 if (ImmutableCallSite(DefMemoryInst))
1100 Q.VisitedCalls.push_back(MD);
1101 ModRefInfo I = AA->getModRefInfo(DefMemoryInst, ImmutableCallSite(Q.Inst));
1102 return I != MRI_NoModRef;
1105 MemoryAccessPair MemorySSA::CachingWalker::UpwardsDFSWalk(
1106 MemoryAccess *StartingAccess, const MemoryLocation &Loc,
1107 UpwardsMemoryQuery &Q, bool FollowingBackedge) {
1108 MemoryAccess *ModifyingAccess = nullptr;
1110 auto DFI = df_begin(StartingAccess);
1111 for (auto DFE = df_end(StartingAccess); DFI != DFE;) {
1112 MemoryAccess *CurrAccess = *DFI;
1113 if (MSSA->isLiveOnEntryDef(CurrAccess))
1114 return {CurrAccess, Loc};
1115 // If this is a MemoryDef, check whether it clobbers our current query. This
1116 // needs to be done before consulting the cache, because the cache reports
1117 // the clobber for CurrAccess. If CurrAccess is a clobber for this query,
1118 // and we ask the cache for information first, then we might skip this
1119 // clobber, which is bad.
1120 if (auto *MD = dyn_cast<MemoryDef>(CurrAccess)) {
1121 // If we hit the top, stop following this path.
1122 // While we can do lookups, we can't sanely do inserts here unless we were
1123 // to track everything we saw along the way, since we don't know where we
1125 if (instructionClobbersQuery(MD, Q, Loc)) {
1126 ModifyingAccess = CurrAccess;
1130 if (auto CacheResult = doCacheLookup(CurrAccess, Q, Loc))
1131 return {CacheResult, Loc};
1133 // We need to know whether it is a phi so we can track backedges.
1134 // Otherwise, walk all upward defs.
1135 if (!isa<MemoryPhi>(CurrAccess)) {
1141 // The loop below visits the phi's children for us. Because phis are the
1142 // only things with multiple edges, skipping the children should always lead
1143 // us to the end of the loop.
1145 // Use a copy of DFI because skipChildren would kill our search stack, which
1146 // would make caching anything on the way back impossible.
1148 assert(DFICopy.skipChildren() == DFE &&
1149 "Skipping phi's children doesn't end the DFS?");
1152 const MemoryAccessPair PHIPair(CurrAccess, Loc);
1154 // Don't try to optimize this phi again if we've already tried to do so.
1155 if (!Q.Visited.insert(PHIPair).second) {
1156 ModifyingAccess = CurrAccess;
1160 std::size_t InitialVisitedCallSize = Q.VisitedCalls.size();
1162 // Recurse on PHI nodes, since we need to change locations.
1163 // TODO: Allow graphtraits on pairs, which would turn this whole function
1164 // into a normal single depth first walk.
1165 MemoryAccess *FirstDef = nullptr;
1166 for (auto MPI = upward_defs_begin(PHIPair), MPE = upward_defs_end();
1167 MPI != MPE; ++MPI) {
1169 !FollowingBackedge &&
1170 DT->dominates(CurrAccess->getBlock(), MPI.getPhiArgBlock());
1172 MemoryAccessPair CurrentPair =
1173 UpwardsDFSWalk(MPI->first, MPI->second, Q, Backedge);
1174 // All the phi arguments should reach the same point if we can bypass
1175 // this phi. The alternative is that they hit this phi node, which
1176 // means we can skip this argument.
1177 if (FirstDef && CurrentPair.first != PHIPair.first &&
1178 CurrentPair.first != FirstDef) {
1179 ModifyingAccess = CurrAccess;
1184 FirstDef = CurrentPair.first;
1187 // If we exited the loop early, go with the result it gave us.
1188 if (!ModifyingAccess) {
1189 assert(FirstDef && "Found a Phi with no upward defs?");
1190 ModifyingAccess = FirstDef;
1192 // If we can't optimize this Phi, then we can't safely cache any of the
1193 // calls we visited when trying to optimize it. Wipe them out now.
1194 Q.VisitedCalls.resize(InitialVisitedCallSize);
1199 if (!ModifyingAccess)
1200 return {MSSA->getLiveOnEntryDef(), Q.StartingLoc};
1202 const BasicBlock *OriginalBlock = StartingAccess->getBlock();
1203 assert(DFI.getPathLength() > 0 && "We dropped our path?");
1204 unsigned N = DFI.getPathLength();
1205 // If we found a clobbering def, the last element in the path will be our
1206 // clobber, so we don't want to cache that to itself. OTOH, if we optimized a
1207 // phi, we can add the last thing in the path to the cache, since that won't
1209 if (DFI.getPath(N - 1) == ModifyingAccess)
1211 for (; N > 1; --N) {
1212 MemoryAccess *CacheAccess = DFI.getPath(N - 1);
1213 BasicBlock *CurrBlock = CacheAccess->getBlock();
1214 if (!FollowingBackedge)
1215 doCacheInsert(CacheAccess, ModifyingAccess, Q, Loc);
1216 if (DT->dominates(CurrBlock, OriginalBlock) &&
1217 (CurrBlock != OriginalBlock || !FollowingBackedge ||
1218 MSSA->locallyDominates(CacheAccess, StartingAccess)))
1222 // Cache everything else on the way back. The caller should cache
1223 // StartingAccess for us.
1224 for (; N > 1; --N) {
1225 MemoryAccess *CacheAccess = DFI.getPath(N - 1);
1226 doCacheInsert(CacheAccess, ModifyingAccess, Q, Loc);
1229 return {ModifyingAccess, Loc};
1232 /// \brief Walk the use-def chains starting at \p MA and find
1233 /// the MemoryAccess that actually clobbers Loc.
1235 /// \returns our clobbering memory access
1236 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
1237 MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
1238 return UpwardsDFSWalk(StartingAccess, Q.StartingLoc, Q, false).first;
1241 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
1242 MemoryAccess *StartingAccess, MemoryLocation &Loc) {
1243 if (isa<MemoryPhi>(StartingAccess))
1244 return StartingAccess;
1246 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
1247 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
1248 return StartingUseOrDef;
1250 Instruction *I = StartingUseOrDef->getMemoryInst();
1252 // Conservatively, fences are always clobbers, so don't perform the walk if we
1254 if (!ImmutableCallSite(I) && I->isFenceLike())
1255 return StartingUseOrDef;
1257 UpwardsMemoryQuery Q;
1258 Q.OriginalAccess = StartingUseOrDef;
1259 Q.StartingLoc = Loc;
1260 Q.Inst = StartingUseOrDef->getMemoryInst();
1263 if (auto CacheResult = doCacheLookup(StartingUseOrDef, Q, Q.StartingLoc))
1266 // Unlike the other function, do not walk to the def of a def, because we are
1267 // handed something we already believe is the clobbering access.
1268 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
1269 ? StartingUseOrDef->getDefiningAccess()
1272 MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
1273 // Only cache this if it wouldn't make Clobber point to itself.
1274 if (Clobber != StartingAccess)
1275 doCacheInsert(Q.OriginalAccess, Clobber, Q, Q.StartingLoc);
1276 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
1277 DEBUG(dbgs() << *StartingUseOrDef << "\n");
1278 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
1279 DEBUG(dbgs() << *Clobber << "\n");
1284 MemorySSA::CachingWalker::getClobberingMemoryAccess(const Instruction *I) {
1285 // There should be no way to lookup an instruction and get a phi as the
1286 // access, since we only map BB's to PHI's. So, this must be a use or def.
1287 auto *StartingAccess = cast<MemoryUseOrDef>(MSSA->getMemoryAccess(I));
1289 bool IsCall = bool(ImmutableCallSite(I));
1291 // We can't sanely do anything with a fences, they conservatively
1292 // clobber all memory, and have no locations to get pointers from to
1293 // try to disambiguate.
1294 if (!IsCall && I->isFenceLike())
1295 return StartingAccess;
1297 UpwardsMemoryQuery Q;
1298 Q.OriginalAccess = StartingAccess;
1301 Q.StartingLoc = MemoryLocation::get(I);
1303 if (auto CacheResult = doCacheLookup(StartingAccess, Q, Q.StartingLoc))
1306 // Start with the thing we already think clobbers this location
1307 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
1309 // At this point, DefiningAccess may be the live on entry def.
1310 // If it is, we will not get a better result.
1311 if (MSSA->isLiveOnEntryDef(DefiningAccess))
1312 return DefiningAccess;
1314 MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
1315 // DFS won't cache a result for DefiningAccess. So, if DefiningAccess isn't
1316 // our clobber, be sure that it gets a cache entry, too.
1317 if (Result != DefiningAccess)
1318 doCacheInsert(DefiningAccess, Result, Q, Q.StartingLoc);
1319 doCacheInsert(Q.OriginalAccess, Result, Q, Q.StartingLoc);
1320 // TODO: When this implementation is more mature, we may want to figure out
1321 // what this additional caching buys us. It's most likely A Good Thing.
1323 for (const MemoryAccess *MA : Q.VisitedCalls)
1325 doCacheInsert(MA, Result, Q, Q.StartingLoc);
1327 DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
1328 DEBUG(dbgs() << *DefiningAccess << "\n");
1329 DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
1330 DEBUG(dbgs() << *Result << "\n");
1335 // Verify that MA doesn't exist in any of the caches.
1336 void MemorySSA::CachingWalker::verifyRemoved(MemoryAccess *MA) {
1338 for (auto &P : CachedUpwardsClobberingAccess)
1339 assert(P.first.first != MA && P.second != MA &&
1340 "Found removed MemoryAccess in cache.");
1341 for (auto &P : CachedUpwardsClobberingCall)
1342 assert(P.first != MA && P.second != MA &&
1343 "Found removed MemoryAccess in cache.");
1348 DoNothingMemorySSAWalker::getClobberingMemoryAccess(const Instruction *I) {
1349 MemoryAccess *MA = MSSA->getMemoryAccess(I);
1350 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
1351 return Use->getDefiningAccess();
1355 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
1356 MemoryAccess *StartingAccess, MemoryLocation &) {
1357 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
1358 return Use->getDefiningAccess();
1359 return StartingAccess;