1 //===--- RDFGraph.cpp -----------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Target-independent, SSA-based data flow graph for register data flow (RDF).
13 #include "llvm/ADT/SetVector.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/CodeGen/MachineBasicBlock.h"
16 #include "llvm/CodeGen/MachineDominanceFrontier.h"
17 #include "llvm/CodeGen/MachineDominators.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineInstr.h"
20 #include "llvm/CodeGen/MachineOperand.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/MC/LaneBitmask.h"
24 #include "llvm/MC/MCInstrDesc.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Target/TargetInstrInfo.h"
29 #include "llvm/Target/TargetLowering.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
42 // Printing functions. Have them here first, so that the rest of the code
47 raw_ostream &operator<< (raw_ostream &OS, const PrintLaneMaskOpt &P) {
49 OS << ':' << PrintLaneMask(P.Mask);
54 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterRef> &P) {
55 auto &TRI = P.G.getTRI();
56 if (P.Obj.Reg > 0 && P.Obj.Reg < TRI.getNumRegs())
57 OS << TRI.getName(P.Obj.Reg);
59 OS << '#' << P.Obj.Reg;
60 OS << PrintLaneMaskOpt(P.Obj.Mask);
65 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeId> &P) {
66 auto NA = P.G.addr<NodeBase*>(P.Obj);
67 uint16_t Attrs = NA.Addr->getAttrs();
68 uint16_t Kind = NodeAttrs::kind(Attrs);
69 uint16_t Flags = NodeAttrs::flags(Attrs);
70 switch (NodeAttrs::type(Attrs)) {
73 case NodeAttrs::Func: OS << 'f'; break;
74 case NodeAttrs::Block: OS << 'b'; break;
75 case NodeAttrs::Stmt: OS << 's'; break;
76 case NodeAttrs::Phi: OS << 'p'; break;
77 default: OS << "c?"; break;
81 if (Flags & NodeAttrs::Undef)
83 if (Flags & NodeAttrs::Dead)
85 if (Flags & NodeAttrs::Preserving)
87 if (Flags & NodeAttrs::Clobbering)
90 case NodeAttrs::Use: OS << 'u'; break;
91 case NodeAttrs::Def: OS << 'd'; break;
92 case NodeAttrs::Block: OS << 'b'; break;
93 default: OS << "r?"; break;
101 if (Flags & NodeAttrs::Shadow)
106 static void printRefHeader(raw_ostream &OS, const NodeAddr<RefNode*> RA,
107 const DataFlowGraph &G) {
108 OS << Print<NodeId>(RA.Id, G) << '<'
109 << Print<RegisterRef>(RA.Addr->getRegRef(G), G) << '>';
110 if (RA.Addr->getFlags() & NodeAttrs::Fixed)
115 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<DefNode*>> &P) {
116 printRefHeader(OS, P.Obj, P.G);
118 if (NodeId N = P.Obj.Addr->getReachingDef())
119 OS << Print<NodeId>(N, P.G);
121 if (NodeId N = P.Obj.Addr->getReachedDef())
122 OS << Print<NodeId>(N, P.G);
124 if (NodeId N = P.Obj.Addr->getReachedUse())
125 OS << Print<NodeId>(N, P.G);
127 if (NodeId N = P.Obj.Addr->getSibling())
128 OS << Print<NodeId>(N, P.G);
133 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<UseNode*>> &P) {
134 printRefHeader(OS, P.Obj, P.G);
136 if (NodeId N = P.Obj.Addr->getReachingDef())
137 OS << Print<NodeId>(N, P.G);
139 if (NodeId N = P.Obj.Addr->getSibling())
140 OS << Print<NodeId>(N, P.G);
145 raw_ostream &operator<< (raw_ostream &OS,
146 const Print<NodeAddr<PhiUseNode*>> &P) {
147 printRefHeader(OS, P.Obj, P.G);
149 if (NodeId N = P.Obj.Addr->getReachingDef())
150 OS << Print<NodeId>(N, P.G);
152 if (NodeId N = P.Obj.Addr->getPredecessor())
153 OS << Print<NodeId>(N, P.G);
155 if (NodeId N = P.Obj.Addr->getSibling())
156 OS << Print<NodeId>(N, P.G);
161 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<RefNode*>> &P) {
162 switch (P.Obj.Addr->getKind()) {
164 OS << PrintNode<DefNode*>(P.Obj, P.G);
167 if (P.Obj.Addr->getFlags() & NodeAttrs::PhiRef)
168 OS << PrintNode<PhiUseNode*>(P.Obj, P.G);
170 OS << PrintNode<UseNode*>(P.Obj, P.G);
177 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeList> &P) {
178 unsigned N = P.Obj.size();
179 for (auto I : P.Obj) {
180 OS << Print<NodeId>(I.Id, P.G);
188 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeSet> &P) {
189 unsigned N = P.Obj.size();
190 for (auto I : P.Obj) {
191 OS << Print<NodeId>(I, P.G);
200 template <typename T>
202 PrintListV(const NodeList &L, const DataFlowGraph &G) : List(L), G(G) {}
205 const NodeList &List;
206 const DataFlowGraph &G;
209 template <typename T>
210 raw_ostream &operator<< (raw_ostream &OS, const PrintListV<T> &P) {
211 unsigned N = P.List.size();
212 for (NodeAddr<T> A : P.List) {
213 OS << PrintNode<T>(A, P.G);
220 } // end anonymous namespace
223 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<PhiNode*>> &P) {
224 OS << Print<NodeId>(P.Obj.Id, P.G) << ": phi ["
225 << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']';
230 raw_ostream &operator<< (raw_ostream &OS,
231 const Print<NodeAddr<StmtNode*>> &P) {
232 const MachineInstr &MI = *P.Obj.Addr->getCode();
233 unsigned Opc = MI.getOpcode();
234 OS << Print<NodeId>(P.Obj.Id, P.G) << ": " << P.G.getTII().getName(Opc);
235 // Print the target for calls and branches (for readability).
236 if (MI.isCall() || MI.isBranch()) {
237 MachineInstr::const_mop_iterator T =
238 llvm::find_if(MI.operands(),
239 [] (const MachineOperand &Op) -> bool {
240 return Op.isMBB() || Op.isGlobal() || Op.isSymbol();
242 if (T != MI.operands_end()) {
245 OS << "BB#" << T->getMBB()->getNumber();
246 else if (T->isGlobal())
247 OS << T->getGlobal()->getName();
248 else if (T->isSymbol())
249 OS << T->getSymbolName();
252 OS << " [" << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']';
257 raw_ostream &operator<< (raw_ostream &OS,
258 const Print<NodeAddr<InstrNode*>> &P) {
259 switch (P.Obj.Addr->getKind()) {
261 OS << PrintNode<PhiNode*>(P.Obj, P.G);
263 case NodeAttrs::Stmt:
264 OS << PrintNode<StmtNode*>(P.Obj, P.G);
267 OS << "instr? " << Print<NodeId>(P.Obj.Id, P.G);
274 raw_ostream &operator<< (raw_ostream &OS,
275 const Print<NodeAddr<BlockNode*>> &P) {
276 MachineBasicBlock *BB = P.Obj.Addr->getCode();
277 unsigned NP = BB->pred_size();
279 auto PrintBBs = [&OS,&P] (std::vector<int> Ns) -> void {
280 unsigned N = Ns.size();
288 OS << Print<NodeId>(P.Obj.Id, P.G) << ": --- BB#" << BB->getNumber()
289 << " --- preds(" << NP << "): ";
290 for (MachineBasicBlock *B : BB->predecessors())
291 Ns.push_back(B->getNumber());
294 unsigned NS = BB->succ_size();
295 OS << " succs(" << NS << "): ";
297 for (MachineBasicBlock *B : BB->successors())
298 Ns.push_back(B->getNumber());
302 for (auto I : P.Obj.Addr->members(P.G))
303 OS << PrintNode<InstrNode*>(I, P.G) << '\n';
308 raw_ostream &operator<< (raw_ostream &OS,
309 const Print<NodeAddr<FuncNode*>> &P) {
310 OS << "DFG dump:[\n" << Print<NodeId>(P.Obj.Id, P.G) << ": Function: "
311 << P.Obj.Addr->getCode()->getName() << '\n';
312 for (auto I : P.Obj.Addr->members(P.G))
313 OS << PrintNode<BlockNode*>(I, P.G) << '\n';
319 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterSet> &P) {
322 OS << ' ' << Print<RegisterRef>(I, P.G);
328 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterAggr> &P) {
334 raw_ostream &operator<< (raw_ostream &OS,
335 const Print<DataFlowGraph::DefStack> &P) {
336 for (auto I = P.Obj.top(), E = P.Obj.bottom(); I != E; ) {
337 OS << Print<NodeId>(I->Id, P.G)
338 << '<' << Print<RegisterRef>(I->Addr->getRegRef(P.G), P.G) << '>';
346 } // end namespace rdf
347 } // end namespace llvm
349 // Node allocation functions.
351 // Node allocator is like a slab memory allocator: it allocates blocks of
352 // memory in sizes that are multiples of the size of a node. Each block has
353 // the same size. Nodes are allocated from the currently active block, and
354 // when it becomes full, a new one is created.
355 // There is a mapping scheme between node id and its location in a block,
356 // and within that block is described in the header file.
358 void NodeAllocator::startNewBlock() {
359 void *T = MemPool.Allocate(NodesPerBlock*NodeMemSize, NodeMemSize);
360 char *P = static_cast<char*>(T);
362 // Check if the block index is still within the allowed range, i.e. less
363 // than 2^N, where N is the number of bits in NodeId for the block index.
364 // BitsPerIndex is the number of bits per node index.
365 assert((Blocks.size() < ((size_t)1 << (8*sizeof(NodeId)-BitsPerIndex))) &&
366 "Out of bits for block index");
370 bool NodeAllocator::needNewBlock() {
374 char *ActiveBegin = Blocks.back();
375 uint32_t Index = (ActiveEnd-ActiveBegin)/NodeMemSize;
376 return Index >= NodesPerBlock;
379 NodeAddr<NodeBase*> NodeAllocator::New() {
383 uint32_t ActiveB = Blocks.size()-1;
384 uint32_t Index = (ActiveEnd - Blocks[ActiveB])/NodeMemSize;
385 NodeAddr<NodeBase*> NA = { reinterpret_cast<NodeBase*>(ActiveEnd),
386 makeId(ActiveB, Index) };
387 ActiveEnd += NodeMemSize;
391 NodeId NodeAllocator::id(const NodeBase *P) const {
392 uintptr_t A = reinterpret_cast<uintptr_t>(P);
393 for (unsigned i = 0, n = Blocks.size(); i != n; ++i) {
394 uintptr_t B = reinterpret_cast<uintptr_t>(Blocks[i]);
395 if (A < B || A >= B + NodesPerBlock*NodeMemSize)
397 uint32_t Idx = (A-B)/NodeMemSize;
398 return makeId(i, Idx);
400 llvm_unreachable("Invalid node address");
403 void NodeAllocator::clear() {
409 // Insert node NA after "this" in the circular chain.
410 void NodeBase::append(NodeAddr<NodeBase*> NA) {
412 // If NA is already "next", do nothing.
419 // Fundamental node manipulator functions.
421 // Obtain the register reference from a reference node.
422 RegisterRef RefNode::getRegRef(const DataFlowGraph &G) const {
423 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
424 if (NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef)
425 return G.unpack(Ref.PR);
426 assert(Ref.Op != nullptr);
427 return G.makeRegRef(Ref.Op->getReg(), Ref.Op->getSubReg());
430 // Set the register reference in the reference node directly (for references
432 void RefNode::setRegRef(RegisterRef RR, DataFlowGraph &G) {
433 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
434 assert(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef);
438 // Set the register reference in the reference node based on a machine
439 // operand (for references in statement nodes).
440 void RefNode::setRegRef(MachineOperand *Op, DataFlowGraph &G) {
441 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
442 assert(!(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef));
447 // Get the owner of a given reference node.
448 NodeAddr<NodeBase*> RefNode::getOwner(const DataFlowGraph &G) {
449 NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext());
451 while (NA.Addr != this) {
452 if (NA.Addr->getType() == NodeAttrs::Code)
454 NA = G.addr<NodeBase*>(NA.Addr->getNext());
456 llvm_unreachable("No owner in circular list");
459 // Connect the def node to the reaching def node.
460 void DefNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) {
462 Ref.Sib = DA.Addr->getReachedDef();
463 DA.Addr->setReachedDef(Self);
466 // Connect the use node to the reaching def node.
467 void UseNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) {
469 Ref.Sib = DA.Addr->getReachedUse();
470 DA.Addr->setReachedUse(Self);
473 // Get the first member of the code node.
474 NodeAddr<NodeBase*> CodeNode::getFirstMember(const DataFlowGraph &G) const {
475 if (Code.FirstM == 0)
476 return NodeAddr<NodeBase*>();
477 return G.addr<NodeBase*>(Code.FirstM);
480 // Get the last member of the code node.
481 NodeAddr<NodeBase*> CodeNode::getLastMember(const DataFlowGraph &G) const {
483 return NodeAddr<NodeBase*>();
484 return G.addr<NodeBase*>(Code.LastM);
487 // Add node NA at the end of the member list of the given code node.
488 void CodeNode::addMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) {
489 NodeAddr<NodeBase*> ML = getLastMember(G);
494 NodeId Self = G.id(this);
495 NA.Addr->setNext(Self);
500 // Add node NA after member node MA in the given code node.
501 void CodeNode::addMemberAfter(NodeAddr<NodeBase*> MA, NodeAddr<NodeBase*> NA,
502 const DataFlowGraph &G) {
504 if (Code.LastM == MA.Id)
508 // Remove member node NA from the given code node.
509 void CodeNode::removeMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) {
510 NodeAddr<NodeBase*> MA = getFirstMember(G);
513 // Special handling if the member to remove is the first member.
514 if (MA.Id == NA.Id) {
515 if (Code.LastM == MA.Id) {
516 // If it is the only member, set both first and last to 0.
517 Code.FirstM = Code.LastM = 0;
519 // Otherwise, advance the first member.
520 Code.FirstM = MA.Addr->getNext();
525 while (MA.Addr != this) {
526 NodeId MX = MA.Addr->getNext();
528 MA.Addr->setNext(NA.Addr->getNext());
529 // If the member to remove happens to be the last one, update the
531 if (Code.LastM == NA.Id)
535 MA = G.addr<NodeBase*>(MX);
537 llvm_unreachable("No such member");
540 // Return the list of all members of the code node.
541 NodeList CodeNode::members(const DataFlowGraph &G) const {
542 static auto True = [] (NodeAddr<NodeBase*>) -> bool { return true; };
543 return members_if(True, G);
546 // Return the owner of the given instr node.
547 NodeAddr<NodeBase*> InstrNode::getOwner(const DataFlowGraph &G) {
548 NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext());
550 while (NA.Addr != this) {
551 assert(NA.Addr->getType() == NodeAttrs::Code);
552 if (NA.Addr->getKind() == NodeAttrs::Block)
554 NA = G.addr<NodeBase*>(NA.Addr->getNext());
556 llvm_unreachable("No owner in circular list");
559 // Add the phi node PA to the given block node.
560 void BlockNode::addPhi(NodeAddr<PhiNode*> PA, const DataFlowGraph &G) {
561 NodeAddr<NodeBase*> M = getFirstMember(G);
567 assert(M.Addr->getType() == NodeAttrs::Code);
568 if (M.Addr->getKind() == NodeAttrs::Stmt) {
569 // If the first member of the block is a statement, insert the phi as
572 PA.Addr->setNext(M.Id);
574 // If the first member is a phi, find the last phi, and append PA to it.
575 assert(M.Addr->getKind() == NodeAttrs::Phi);
576 NodeAddr<NodeBase*> MN = M;
579 MN = G.addr<NodeBase*>(M.Addr->getNext());
580 assert(MN.Addr->getType() == NodeAttrs::Code);
581 } while (MN.Addr->getKind() == NodeAttrs::Phi);
583 // M is the last phi.
584 addMemberAfter(M, PA, G);
588 // Find the block node corresponding to the machine basic block BB in the
590 NodeAddr<BlockNode*> FuncNode::findBlock(const MachineBasicBlock *BB,
591 const DataFlowGraph &G) const {
592 auto EqBB = [BB] (NodeAddr<NodeBase*> NA) -> bool {
593 return NodeAddr<BlockNode*>(NA).Addr->getCode() == BB;
595 NodeList Ms = members_if(EqBB, G);
598 return NodeAddr<BlockNode*>();
601 // Get the block node for the entry block in the given function.
602 NodeAddr<BlockNode*> FuncNode::getEntryBlock(const DataFlowGraph &G) {
603 MachineBasicBlock *EntryB = &getCode()->front();
604 return findBlock(EntryB, G);
607 // Target operand information.
610 // For a given instruction, check if there are any bits of RR that can remain
611 // unchanged across this def.
612 bool TargetOperandInfo::isPreserving(const MachineInstr &In, unsigned OpNum)
614 return TII.isPredicated(In);
617 // Check if the definition of RR produces an unspecified value.
618 bool TargetOperandInfo::isClobbering(const MachineInstr &In, unsigned OpNum)
621 if (In.getOperand(OpNum).isImplicit())
626 // Check if the given instruction specifically requires
627 bool TargetOperandInfo::isFixedReg(const MachineInstr &In, unsigned OpNum)
629 if (In.isCall() || In.isReturn() || In.isInlineAsm())
631 // Check for a tail call.
633 for (const MachineOperand &O : In.operands())
634 if (O.isGlobal() || O.isSymbol())
637 const MCInstrDesc &D = In.getDesc();
638 if (!D.getImplicitDefs() && !D.getImplicitUses())
640 const MachineOperand &Op = In.getOperand(OpNum);
641 // If there is a sub-register, treat the operand as non-fixed. Currently,
642 // fixed registers are those that are listed in the descriptor as implicit
643 // uses or defs, and those lists do not allow sub-registers.
644 if (Op.getSubReg() != 0)
646 RegisterId Reg = Op.getReg();
647 const MCPhysReg *ImpR = Op.isDef() ? D.getImplicitDefs()
648 : D.getImplicitUses();
657 RegisterRef RegisterAggr::normalize(RegisterRef RR) const {
658 RegisterId SuperReg = RR.Reg;
660 MCSuperRegIterator SR(SuperReg, &TRI, false);
666 const TargetRegisterClass &RC = *TRI.getMinimalPhysRegClass(RR.Reg);
667 LaneBitmask Common = RR.Mask & RC.LaneMask;
668 uint32_t Sub = TRI.getSubRegIndex(SuperReg, RR.Reg);
669 LaneBitmask SuperMask = TRI.composeSubRegIndexLaneMask(Sub, Common);
670 return RegisterRef(SuperReg, SuperMask);
673 bool RegisterAggr::hasAliasOf(RegisterRef RR) const {
674 RegisterRef NR = normalize(RR);
675 auto F = Masks.find(NR.Reg);
676 if (F != Masks.end()) {
677 if ((F->second & NR.Mask).any())
681 for (MCRegUnitIterator U(RR.Reg, &TRI); U.isValid(); ++U)
682 if (ExpAliasUnits.test(*U))
688 bool RegisterAggr::hasCoverOf(RegisterRef RR) const {
689 // Always have a cover for empty lane mask.
690 RegisterRef NR = normalize(RR);
693 auto F = Masks.find(NR.Reg);
694 if (F == Masks.end())
696 return (NR.Mask & F->second) == NR.Mask;
699 RegisterAggr &RegisterAggr::insert(RegisterRef RR) {
700 RegisterRef NR = normalize(RR);
701 auto F = Masks.find(NR.Reg);
702 if (F == Masks.end())
703 Masks.insert({NR.Reg, NR.Mask});
705 F->second |= NR.Mask;
707 // Visit all register units to see if there are any that were created
708 // by explicit aliases. Add those that were to the bit vector.
709 for (MCRegUnitIterator U(RR.Reg, &TRI); U.isValid(); ++U) {
710 MCRegUnitRootIterator R(*U, &TRI);
714 ExpAliasUnits.set(*U);
720 RegisterAggr &RegisterAggr::insert(const RegisterAggr &RG) {
721 for (std::pair<RegisterId,LaneBitmask> P : RG.Masks)
722 insert(RegisterRef(P.first, P.second));
726 RegisterAggr &RegisterAggr::clear(RegisterRef RR) {
727 RegisterRef NR = normalize(RR);
728 auto F = Masks.find(NR.Reg);
729 if (F == Masks.end())
731 LaneBitmask NewM = F->second & ~NR.Mask;
739 RegisterAggr &RegisterAggr::clear(const RegisterAggr &RG) {
740 for (std::pair<RegisterId,LaneBitmask> P : RG.Masks)
741 clear(RegisterRef(P.first, P.second));
745 RegisterRef RegisterAggr::clearIn(RegisterRef RR) const {
747 T.insert(RR).clear(*this);
749 return RegisterRef();
750 return RegisterRef(T.begin()->first, T.begin()->second);
753 void RegisterAggr::print(raw_ostream &OS) const {
756 OS << ' ' << PrintReg(I.first, &TRI) << PrintLaneMaskOpt(I.second);
761 // The data flow graph construction.
764 DataFlowGraph::DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii,
765 const TargetRegisterInfo &tri, const MachineDominatorTree &mdt,
766 const MachineDominanceFrontier &mdf, const TargetOperandInfo &toi)
767 : MF(mf), TII(tii), TRI(tri), MDT(mdt), MDF(mdf), TOI(toi) {
770 // The implementation of the definition stack.
771 // Each register reference has its own definition stack. In particular,
772 // for a register references "Reg" and "Reg:subreg" will each have their
773 // own definition stacks.
775 // Construct a stack iterator.
776 DataFlowGraph::DefStack::Iterator::Iterator(const DataFlowGraph::DefStack &S,
779 // Initialize to bottom.
783 // Initialize to the top, i.e. top-most non-delimiter (or 0, if empty).
784 Pos = DS.Stack.size();
785 while (Pos > 0 && DS.isDelimiter(DS.Stack[Pos-1]))
789 // Return the size of the stack, including block delimiters.
790 unsigned DataFlowGraph::DefStack::size() const {
792 for (auto I = top(), E = bottom(); I != E; I.down())
797 // Remove the top entry from the stack. Remove all intervening delimiters
798 // so that after this, the stack is either empty, or the top of the stack
799 // is a non-delimiter.
800 void DataFlowGraph::DefStack::pop() {
802 unsigned P = nextDown(Stack.size());
806 // Push a delimiter for block node N on the stack.
807 void DataFlowGraph::DefStack::start_block(NodeId N) {
809 Stack.push_back(NodeAddr<DefNode*>(nullptr, N));
812 // Remove all nodes from the top of the stack, until the delimited for
813 // block node N is encountered. Remove the delimiter as well. In effect,
814 // this will remove from the stack all definitions from block N.
815 void DataFlowGraph::DefStack::clear_block(NodeId N) {
817 unsigned P = Stack.size();
819 bool Found = isDelimiter(Stack[P-1], N);
824 // This will also remove the delimiter, if found.
828 // Move the stack iterator up by one.
829 unsigned DataFlowGraph::DefStack::nextUp(unsigned P) const {
830 // Get the next valid position after P (skipping all delimiters).
831 // The input position P does not have to point to a non-delimiter.
832 unsigned SS = Stack.size();
837 IsDelim = isDelimiter(Stack[P-1]);
838 } while (P < SS && IsDelim);
843 // Move the stack iterator down by one.
844 unsigned DataFlowGraph::DefStack::nextDown(unsigned P) const {
845 // Get the preceding valid position before P (skipping all delimiters).
846 // The input position P does not have to point to a non-delimiter.
847 assert(P > 0 && P <= Stack.size());
848 bool IsDelim = isDelimiter(Stack[P-1]);
852 IsDelim = isDelimiter(Stack[P-1]);
853 } while (P > 0 && IsDelim);
858 // Register information.
860 // Get the list of references aliased to RR. Lane masks are ignored.
861 RegisterSet DataFlowGraph::getAliasSet(RegisterId Reg) const {
862 // Do not include RR in the alias set.
864 assert(TargetRegisterInfo::isPhysicalRegister(Reg));
866 for (MCRegAliasIterator AI(Reg, &TRI, false); AI.isValid(); ++AI)
867 AS.insert(RegisterRef(*AI));
871 RegisterSet DataFlowGraph::getLandingPadLiveIns() const {
873 const Function &F = *MF.getFunction();
874 const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn()
876 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
877 if (RegisterId R = TLI.getExceptionPointerRegister(PF))
878 LR.insert(RegisterRef(R));
879 if (RegisterId R = TLI.getExceptionSelectorRegister(PF))
880 LR.insert(RegisterRef(R));
884 // Node management functions.
886 // Get the pointer to the node with the id N.
887 NodeBase *DataFlowGraph::ptr(NodeId N) const {
890 return Memory.ptr(N);
893 // Get the id of the node at the address P.
894 NodeId DataFlowGraph::id(const NodeBase *P) const {
900 // Allocate a new node and set the attributes to Attrs.
901 NodeAddr<NodeBase*> DataFlowGraph::newNode(uint16_t Attrs) {
902 NodeAddr<NodeBase*> P = Memory.New();
904 P.Addr->setAttrs(Attrs);
908 // Make a copy of the given node B, except for the data-flow links, which
910 NodeAddr<NodeBase*> DataFlowGraph::cloneNode(const NodeAddr<NodeBase*> B) {
911 NodeAddr<NodeBase*> NA = newNode(0);
912 memcpy(NA.Addr, B.Addr, sizeof(NodeBase));
913 // Ref nodes need to have the data-flow links reset.
914 if (NA.Addr->getType() == NodeAttrs::Ref) {
915 NodeAddr<RefNode*> RA = NA;
916 RA.Addr->setReachingDef(0);
917 RA.Addr->setSibling(0);
918 if (NA.Addr->getKind() == NodeAttrs::Def) {
919 NodeAddr<DefNode*> DA = NA;
920 DA.Addr->setReachedDef(0);
921 DA.Addr->setReachedUse(0);
927 // Allocation routines for specific node types/kinds.
929 NodeAddr<UseNode*> DataFlowGraph::newUse(NodeAddr<InstrNode*> Owner,
930 MachineOperand &Op, uint16_t Flags) {
931 NodeAddr<UseNode*> UA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags);
932 UA.Addr->setRegRef(&Op, *this);
936 NodeAddr<PhiUseNode*> DataFlowGraph::newPhiUse(NodeAddr<PhiNode*> Owner,
937 RegisterRef RR, NodeAddr<BlockNode*> PredB, uint16_t Flags) {
938 NodeAddr<PhiUseNode*> PUA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags);
939 assert(Flags & NodeAttrs::PhiRef);
940 PUA.Addr->setRegRef(RR, *this);
941 PUA.Addr->setPredecessor(PredB.Id);
945 NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner,
946 MachineOperand &Op, uint16_t Flags) {
947 NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags);
948 DA.Addr->setRegRef(&Op, *this);
952 NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner,
953 RegisterRef RR, uint16_t Flags) {
954 NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags);
955 assert(Flags & NodeAttrs::PhiRef);
956 DA.Addr->setRegRef(RR, *this);
960 NodeAddr<PhiNode*> DataFlowGraph::newPhi(NodeAddr<BlockNode*> Owner) {
961 NodeAddr<PhiNode*> PA = newNode(NodeAttrs::Code | NodeAttrs::Phi);
962 Owner.Addr->addPhi(PA, *this);
966 NodeAddr<StmtNode*> DataFlowGraph::newStmt(NodeAddr<BlockNode*> Owner,
968 NodeAddr<StmtNode*> SA = newNode(NodeAttrs::Code | NodeAttrs::Stmt);
969 SA.Addr->setCode(MI);
970 Owner.Addr->addMember(SA, *this);
974 NodeAddr<BlockNode*> DataFlowGraph::newBlock(NodeAddr<FuncNode*> Owner,
975 MachineBasicBlock *BB) {
976 NodeAddr<BlockNode*> BA = newNode(NodeAttrs::Code | NodeAttrs::Block);
977 BA.Addr->setCode(BB);
978 Owner.Addr->addMember(BA, *this);
982 NodeAddr<FuncNode*> DataFlowGraph::newFunc(MachineFunction *MF) {
983 NodeAddr<FuncNode*> FA = newNode(NodeAttrs::Code | NodeAttrs::Func);
984 FA.Addr->setCode(MF);
988 // Build the data flow graph.
989 void DataFlowGraph::build(unsigned Options) {
996 for (MachineBasicBlock &B : MF) {
997 NodeAddr<BlockNode*> BA = newBlock(Func, &B);
998 BlockNodes.insert(std::make_pair(&B, BA));
999 for (MachineInstr &I : B) {
1000 if (I.isDebugValue())
1006 NodeAddr<BlockNode*> EA = Func.Addr->getEntryBlock(*this);
1007 NodeList Blocks = Func.Addr->members(*this);
1009 // Collect information about block references.
1011 buildBlockRefs(EA, RefM);
1013 // Add function-entry phi nodes.
1014 MachineRegisterInfo &MRI = MF.getRegInfo();
1015 for (auto I = MRI.livein_begin(), E = MRI.livein_end(); I != E; ++I) {
1016 NodeAddr<PhiNode*> PA = newPhi(EA);
1017 RegisterRef RR = RegisterRef(I->first);
1018 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
1019 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
1020 PA.Addr->addMember(DA, *this);
1023 // Add phis for landing pads.
1024 // Landing pads, unlike usual backs blocks, are not entered through
1025 // branches in the program, or fall-throughs from other blocks. They
1026 // are entered from the exception handling runtime and target's ABI
1027 // may define certain registers as defined on entry to such a block.
1028 RegisterSet EHRegs = getLandingPadLiveIns();
1029 if (!EHRegs.empty()) {
1030 for (NodeAddr<BlockNode*> BA : Blocks) {
1031 const MachineBasicBlock &B = *BA.Addr->getCode();
1035 // Prepare a list of NodeIds of the block's predecessors.
1037 for (MachineBasicBlock *PB : B.predecessors())
1038 Preds.push_back(findBlock(PB));
1040 // Build phi nodes for each live-in.
1041 for (RegisterRef RR : EHRegs) {
1042 NodeAddr<PhiNode*> PA = newPhi(BA);
1043 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
1045 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
1046 PA.Addr->addMember(DA, *this);
1047 // Add uses (no reaching defs for phi uses):
1048 for (NodeAddr<BlockNode*> PBA : Preds) {
1049 NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA);
1050 PA.Addr->addMember(PUA, *this);
1056 // Build a map "PhiM" which will contain, for each block, the set
1057 // of references that will require phi definitions in that block.
1059 for (NodeAddr<BlockNode*> BA : Blocks)
1060 recordDefsForDF(PhiM, RefM, BA);
1061 for (NodeAddr<BlockNode*> BA : Blocks)
1062 buildPhis(PhiM, RefM, BA);
1064 // Link all the refs. This will recursively traverse the dominator tree.
1066 linkBlockRefs(DM, EA);
1068 // Finally, remove all unused phi nodes.
1069 if (!(Options & BuildOptions::KeepDeadPhis))
1073 RegisterRef DataFlowGraph::makeRegRef(unsigned Reg, unsigned Sub) const {
1074 assert(TargetRegisterInfo::isPhysicalRegister(Reg));
1076 Reg = TRI.getSubReg(Reg, Sub);
1077 return RegisterRef(Reg);
1080 RegisterRef DataFlowGraph::normalizeRef(RegisterRef RR) const {
1081 // FIXME copied from RegisterAggr
1082 RegisterId SuperReg = RR.Reg;
1084 MCSuperRegIterator SR(SuperReg, &TRI, false);
1090 uint32_t Sub = TRI.getSubRegIndex(SuperReg, RR.Reg);
1091 const TargetRegisterClass &RC = *TRI.getMinimalPhysRegClass(RR.Reg);
1092 LaneBitmask SuperMask = RR.Mask &
1093 TRI.composeSubRegIndexLaneMask(Sub, RC.LaneMask);
1094 return RegisterRef(SuperReg, SuperMask);
1097 RegisterRef DataFlowGraph::restrictRef(RegisterRef AR, RegisterRef BR) const {
1098 if (AR.Reg == BR.Reg) {
1099 LaneBitmask M = AR.Mask & BR.Mask;
1100 return M.any() ? RegisterRef(AR.Reg, M) : RegisterRef();
1103 RegisterRef NAR = normalizeRef(AR);
1104 RegisterRef NBR = normalizeRef(BR);
1105 assert(NAR.Reg != NBR.Reg);
1107 // This isn't strictly correct, because the overlap may happen in the
1109 if (TRI.regsOverlap(AR.Reg, BR.Reg))
1111 return RegisterRef();
1114 // For each stack in the map DefM, push the delimiter for block B on it.
1115 void DataFlowGraph::markBlock(NodeId B, DefStackMap &DefM) {
1116 // Push block delimiters.
1117 for (auto I = DefM.begin(), E = DefM.end(); I != E; ++I)
1118 I->second.start_block(B);
1121 // Remove all definitions coming from block B from each stack in DefM.
1122 void DataFlowGraph::releaseBlock(NodeId B, DefStackMap &DefM) {
1123 // Pop all defs from this block from the definition stack. Defs that were
1124 // added to the map during the traversal of instructions will not have a
1125 // delimiter, but for those, the whole stack will be emptied.
1126 for (auto I = DefM.begin(), E = DefM.end(); I != E; ++I)
1127 I->second.clear_block(B);
1129 // Finally, remove empty stacks from the map.
1130 for (auto I = DefM.begin(), E = DefM.end(), NextI = I; I != E; I = NextI) {
1131 NextI = std::next(I);
1132 // This preserves the validity of iterators other than I.
1133 if (I->second.empty())
1138 // Push all definitions from the instruction node IA to an appropriate
1140 void DataFlowGraph::pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) {
1141 NodeList Defs = IA.Addr->members_if(IsDef, *this);
1144 RegisterSet Defined;
1147 // The important objectives of this function are:
1148 // - to be able to handle instructions both while the graph is being
1149 // constructed, and after the graph has been constructed, and
1150 // - maintain proper ordering of definitions on the stack for each
1151 // register reference:
1152 // - if there are two or more related defs in IA (i.e. coming from
1153 // the same machine operand), then only push one def on the stack,
1154 // - if there are multiple unrelated defs of non-overlapping
1155 // subregisters of S, then the stack for S will have both (in an
1156 // unspecified order), but the order does not matter from the data-
1157 // -flow perspective.
1159 for (NodeAddr<DefNode*> DA : Defs) {
1160 if (Visited.count(DA.Id))
1163 NodeList Rel = getRelatedRefs(IA, DA);
1164 NodeAddr<DefNode*> PDA = Rel.front();
1165 RegisterRef RR = PDA.Addr->getRegRef(*this);
1167 // Assert if the register is defined in two or more unrelated defs.
1168 // This could happen if there are two or more def operands defining it.
1169 if (!Defined.insert(RR).second) {
1170 MachineInstr *MI = NodeAddr<StmtNode*>(IA).Addr->getCode();
1171 dbgs() << "Multiple definitions of register: "
1172 << Print<RegisterRef>(RR, *this) << " in\n " << *MI
1173 << "in BB#" << MI->getParent()->getNumber() << '\n';
1174 llvm_unreachable(nullptr);
1177 // Push the definition on the stack for the register and all aliases.
1178 // The def stack traversal in linkNodeUp will check the exact aliasing.
1179 DefM[RR.Reg].push(DA);
1180 for (RegisterRef A : getAliasSet(RR.Reg /*FIXME? use RegisterRef*/)) {
1181 // Check that we don't push the same def twice.
1183 DefM[A.Reg].push(DA);
1185 // Mark all the related defs as visited.
1186 for (NodeAddr<NodeBase*> T : Rel)
1187 Visited.insert(T.Id);
1191 // Return the list of all reference nodes related to RA, including RA itself.
1192 // See "getNextRelated" for the meaning of a "related reference".
1193 NodeList DataFlowGraph::getRelatedRefs(NodeAddr<InstrNode*> IA,
1194 NodeAddr<RefNode*> RA) const {
1195 assert(IA.Id != 0 && RA.Id != 0);
1198 NodeId Start = RA.Id;
1201 RA = getNextRelated(IA, RA);
1202 } while (RA.Id != 0 && RA.Id != Start);
1206 // Return true if RA and RB overlap, false otherwise.
1207 bool DataFlowGraph::alias(RegisterRef RA, RegisterRef RB) const {
1208 assert(TargetRegisterInfo::isPhysicalRegister(RA.Reg));
1209 assert(TargetRegisterInfo::isPhysicalRegister(RB.Reg));
1211 MCRegUnitMaskIterator UMA(RA.Reg, &TRI);
1212 MCRegUnitMaskIterator UMB(RB.Reg, &TRI);
1213 // Reg units are returned in the numerical order.
1214 while (UMA.isValid() && UMB.isValid()) {
1215 std::pair<uint32_t,LaneBitmask> PA = *UMA;
1216 std::pair<uint32_t,LaneBitmask> PB = *UMB;
1217 if (PA.first == PB.first) {
1218 // Lane mask of 0 (given by the iterator) should be treated as "full".
1219 // This can happen when the register has only one unit, or when the
1220 // unit corresponds to explicit aliasing. In such cases, the lane mask
1221 // from RegisterRef should be ignored.
1222 if (PA.second.none() || PB.second.none())
1225 // At this point the common unit corresponds to a subregister. The lane
1226 // masks correspond to the lane mask of that unit within the original
1227 // register, for example assuming register quadruple q0 = r3:0, and
1228 // a register pair d1 = r3:2, the lane mask of r2 in q0 may be 0b0100,
1229 // while the lane mask of r2 in d1 may be 0b0001.
1230 LaneBitmask LA = PA.second & RA.Mask;
1231 LaneBitmask LB = PB.second & RB.Mask;
1232 if (LA.any() && LB.any()) {
1233 unsigned Root = *MCRegUnitRootIterator(PA.first, &TRI);
1234 // If register units were guaranteed to only have 1 bit in any lane
1235 // mask, the code below would not be necessary. This is because LA
1236 // and LB would have at most 1 bit set each, and that bit would be
1237 // guaranteed to correspond to the given register unit.
1238 uint32_t SubA = TRI.getSubRegIndex(RA.Reg, Root);
1239 uint32_t SubB = TRI.getSubRegIndex(RB.Reg, Root);
1240 const TargetRegisterClass &RC = *TRI.getMinimalPhysRegClass(Root);
1241 LaneBitmask MaskA = TRI.reverseComposeSubRegIndexLaneMask(SubA, LA);
1242 LaneBitmask MaskB = TRI.reverseComposeSubRegIndexLaneMask(SubB, LB);
1243 if ((MaskA & MaskB & RC.LaneMask).any())
1251 if (PA.first < PB.first)
1253 else if (PB.first < PA.first)
1259 // Clear all information in the graph.
1260 void DataFlowGraph::reset() {
1263 Func = NodeAddr<FuncNode*>();
1266 // Return the next reference node in the instruction node IA that is related
1267 // to RA. Conceptually, two reference nodes are related if they refer to the
1268 // same instance of a register access, but differ in flags or other minor
1269 // characteristics. Specific examples of related nodes are shadow reference
1271 // Return the equivalent of nullptr if there are no more related references.
1272 NodeAddr<RefNode*> DataFlowGraph::getNextRelated(NodeAddr<InstrNode*> IA,
1273 NodeAddr<RefNode*> RA) const {
1274 assert(IA.Id != 0 && RA.Id != 0);
1276 auto Related = [this,RA](NodeAddr<RefNode*> TA) -> bool {
1277 if (TA.Addr->getKind() != RA.Addr->getKind())
1279 if (TA.Addr->getRegRef(*this) != RA.Addr->getRegRef(*this))
1283 auto RelatedStmt = [&Related,RA](NodeAddr<RefNode*> TA) -> bool {
1284 return Related(TA) &&
1285 &RA.Addr->getOp() == &TA.Addr->getOp();
1287 auto RelatedPhi = [&Related,RA](NodeAddr<RefNode*> TA) -> bool {
1290 if (TA.Addr->getKind() != NodeAttrs::Use)
1292 // For phi uses, compare predecessor blocks.
1293 const NodeAddr<const PhiUseNode*> TUA = TA;
1294 const NodeAddr<const PhiUseNode*> RUA = RA;
1295 return TUA.Addr->getPredecessor() == RUA.Addr->getPredecessor();
1298 RegisterRef RR = RA.Addr->getRegRef(*this);
1299 if (IA.Addr->getKind() == NodeAttrs::Stmt)
1300 return RA.Addr->getNextRef(RR, RelatedStmt, true, *this);
1301 return RA.Addr->getNextRef(RR, RelatedPhi, true, *this);
1304 // Find the next node related to RA in IA that satisfies condition P.
1305 // If such a node was found, return a pair where the second element is the
1306 // located node. If such a node does not exist, return a pair where the
1307 // first element is the element after which such a node should be inserted,
1308 // and the second element is a null-address.
1309 template <typename Predicate>
1310 std::pair<NodeAddr<RefNode*>,NodeAddr<RefNode*>>
1311 DataFlowGraph::locateNextRef(NodeAddr<InstrNode*> IA, NodeAddr<RefNode*> RA,
1312 Predicate P) const {
1313 assert(IA.Id != 0 && RA.Id != 0);
1315 NodeAddr<RefNode*> NA;
1316 NodeId Start = RA.Id;
1318 NA = getNextRelated(IA, RA);
1319 if (NA.Id == 0 || NA.Id == Start)
1326 if (NA.Id != 0 && NA.Id != Start)
1327 return std::make_pair(RA, NA);
1328 return std::make_pair(RA, NodeAddr<RefNode*>());
1331 // Get the next shadow node in IA corresponding to RA, and optionally create
1332 // such a node if it does not exist.
1333 NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA,
1334 NodeAddr<RefNode*> RA, bool Create) {
1335 assert(IA.Id != 0 && RA.Id != 0);
1337 uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow;
1338 auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool {
1339 return TA.Addr->getFlags() == Flags;
1341 auto Loc = locateNextRef(IA, RA, IsShadow);
1342 if (Loc.second.Id != 0 || !Create)
1345 // Create a copy of RA and mark is as shadow.
1346 NodeAddr<RefNode*> NA = cloneNode(RA);
1347 NA.Addr->setFlags(Flags | NodeAttrs::Shadow);
1348 IA.Addr->addMemberAfter(Loc.first, NA, *this);
1352 // Get the next shadow node in IA corresponding to RA. Return null-address
1353 // if such a node does not exist.
1354 NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA,
1355 NodeAddr<RefNode*> RA) const {
1356 assert(IA.Id != 0 && RA.Id != 0);
1357 uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow;
1358 auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool {
1359 return TA.Addr->getFlags() == Flags;
1361 return locateNextRef(IA, RA, IsShadow).second;
1364 // Create a new statement node in the block node BA that corresponds to
1365 // the machine instruction MI.
1366 void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) {
1367 NodeAddr<StmtNode*> SA = newStmt(BA, &In);
1369 auto isCall = [] (const MachineInstr &In) -> bool {
1374 for (const MachineOperand &Op : In.operands())
1375 if (Op.isGlobal() || Op.isSymbol())
1380 auto isDefUndef = [this] (const MachineInstr &In, RegisterRef DR) -> bool {
1381 // This instruction defines DR. Check if there is a use operand that
1382 // would make DR live on entry to the instruction.
1383 for (const MachineOperand &UseOp : In.operands()) {
1384 if (!UseOp.isReg() || !UseOp.isUse() || UseOp.isUndef())
1386 RegisterRef UR = makeRegRef(UseOp.getReg(), UseOp.getSubReg());
1393 // Collect a set of registers that this instruction implicitly uses
1394 // or defines. Implicit operands from an instruction will be ignored
1395 // unless they are listed here.
1396 RegisterSet ImpUses, ImpDefs;
1397 if (const uint16_t *ImpD = In.getDesc().getImplicitDefs())
1398 while (uint16_t R = *ImpD++)
1399 ImpDefs.insert(RegisterRef(R));
1400 if (const uint16_t *ImpU = In.getDesc().getImplicitUses())
1401 while (uint16_t R = *ImpU++)
1402 ImpUses.insert(RegisterRef(R));
1404 bool IsCall = isCall(In);
1405 bool NeedsImplicit = IsCall || In.isInlineAsm() || In.isReturn();
1406 bool IsPredicated = TII.isPredicated(In);
1407 unsigned NumOps = In.getNumOperands();
1409 // Avoid duplicate implicit defs. This will not detect cases of implicit
1410 // defs that define registers that overlap, but it is not clear how to
1411 // interpret that in the absence of explicit defs. Overlapping explicit
1412 // defs are likely illegal already.
1413 RegisterSet DoneDefs;
1414 // Process explicit defs first.
1415 for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
1416 MachineOperand &Op = In.getOperand(OpN);
1417 if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
1419 RegisterRef RR = makeRegRef(Op.getReg(), Op.getSubReg());
1420 uint16_t Flags = NodeAttrs::None;
1421 if (TOI.isPreserving(In, OpN)) {
1422 Flags |= NodeAttrs::Preserving;
1423 // If the def is preserving, check if it is also undefined.
1424 if (isDefUndef(In, RR))
1425 Flags |= NodeAttrs::Undef;
1427 if (TOI.isClobbering(In, OpN))
1428 Flags |= NodeAttrs::Clobbering;
1429 if (TOI.isFixedReg(In, OpN))
1430 Flags |= NodeAttrs::Fixed;
1431 if (IsCall && Op.isDead())
1432 Flags |= NodeAttrs::Dead;
1433 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags);
1434 SA.Addr->addMember(DA, *this);
1435 DoneDefs.insert(RR);
1438 // Process implicit defs, skipping those that have already been added
1440 for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
1441 MachineOperand &Op = In.getOperand(OpN);
1442 if (!Op.isReg() || !Op.isDef() || !Op.isImplicit())
1444 RegisterRef RR = makeRegRef(Op.getReg(), Op.getSubReg());
1445 if (!NeedsImplicit && !ImpDefs.count(RR))
1447 if (DoneDefs.count(RR))
1449 uint16_t Flags = NodeAttrs::None;
1450 if (TOI.isPreserving(In, OpN)) {
1451 Flags |= NodeAttrs::Preserving;
1452 // If the def is preserving, check if it is also undefined.
1453 if (isDefUndef(In, RR))
1454 Flags |= NodeAttrs::Undef;
1456 if (TOI.isClobbering(In, OpN))
1457 Flags |= NodeAttrs::Clobbering;
1458 if (TOI.isFixedReg(In, OpN))
1459 Flags |= NodeAttrs::Fixed;
1460 if (IsCall && Op.isDead())
1461 Flags |= NodeAttrs::Dead;
1462 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags);
1463 SA.Addr->addMember(DA, *this);
1464 DoneDefs.insert(RR);
1467 for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
1468 MachineOperand &Op = In.getOperand(OpN);
1469 if (!Op.isReg() || !Op.isUse())
1471 RegisterRef RR = makeRegRef(Op.getReg(), Op.getSubReg());
1472 // Add implicit uses on return and call instructions, and on predicated
1473 // instructions regardless of whether or not they appear in the instruction
1474 // descriptor's list.
1475 bool Implicit = Op.isImplicit();
1476 bool TakeImplicit = NeedsImplicit || IsPredicated;
1477 if (Implicit && !TakeImplicit && !ImpUses.count(RR))
1479 uint16_t Flags = NodeAttrs::None;
1481 Flags |= NodeAttrs::Undef;
1482 if (TOI.isFixedReg(In, OpN))
1483 Flags |= NodeAttrs::Fixed;
1484 NodeAddr<UseNode*> UA = newUse(SA, Op, Flags);
1485 SA.Addr->addMember(UA, *this);
1489 // Build a map that for each block will have the set of all references from
1490 // that block, and from all blocks dominated by it.
1491 void DataFlowGraph::buildBlockRefs(NodeAddr<BlockNode*> BA,
1492 BlockRefsMap &RefM) {
1493 RegisterSet &Refs = RefM[BA.Id];
1494 MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode());
1497 MachineBasicBlock *SB = I->getBlock();
1498 NodeAddr<BlockNode*> SBA = findBlock(SB);
1499 buildBlockRefs(SBA, RefM);
1500 const RegisterSet &RefsS = RefM[SBA.Id];
1501 Refs.insert(RefsS.begin(), RefsS.end());
1504 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this))
1505 for (NodeAddr<RefNode*> RA : IA.Addr->members(*this))
1506 Refs.insert(RA.Addr->getRegRef(*this));
1509 // Scan all defs in the block node BA and record in PhiM the locations of
1510 // phi nodes corresponding to these defs.
1511 void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, BlockRefsMap &RefM,
1512 NodeAddr<BlockNode*> BA) {
1513 // Check all defs from block BA and record them in each block in BA's
1514 // iterated dominance frontier. This information will later be used to
1515 // create phi nodes.
1516 MachineBasicBlock *BB = BA.Addr->getCode();
1518 auto DFLoc = MDF.find(BB);
1519 if (DFLoc == MDF.end() || DFLoc->second.empty())
1522 // Traverse all instructions in the block and collect the set of all
1523 // defined references. For each reference there will be a phi created
1524 // in the block's iterated dominance frontier.
1525 // This is done to make sure that each defined reference gets only one
1526 // phi node, even if it is defined multiple times.
1528 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this))
1529 for (NodeAddr<RefNode*> RA : IA.Addr->members_if(IsDef, *this))
1530 Defs.insert(RA.Addr->getRegRef(*this));
1532 // Calculate the iterated dominance frontier of BB.
1533 const MachineDominanceFrontier::DomSetType &DF = DFLoc->second;
1534 SetVector<MachineBasicBlock*> IDF(DF.begin(), DF.end());
1535 for (unsigned i = 0; i < IDF.size(); ++i) {
1536 auto F = MDF.find(IDF[i]);
1538 IDF.insert(F->second.begin(), F->second.end());
1541 // Get the register references that are reachable from this block.
1542 RegisterSet &Refs = RefM[BA.Id];
1543 for (auto DB : IDF) {
1544 NodeAddr<BlockNode*> DBA = findBlock(DB);
1545 const RegisterSet &RefsD = RefM[DBA.Id];
1546 Refs.insert(RefsD.begin(), RefsD.end());
1549 // Finally, add the set of defs to each block in the iterated dominance
1551 for (auto DB : IDF) {
1552 NodeAddr<BlockNode*> DBA = findBlock(DB);
1553 PhiM[DBA.Id].insert(Defs.begin(), Defs.end());
1557 // Given the locations of phi nodes in the map PhiM, create the phi nodes
1558 // that are located in the block node BA.
1559 void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, BlockRefsMap &RefM,
1560 NodeAddr<BlockNode*> BA) {
1561 // Check if this blocks has any DF defs, i.e. if there are any defs
1562 // that this block is in the iterated dominance frontier of.
1563 auto HasDF = PhiM.find(BA.Id);
1564 if (HasDF == PhiM.end() || HasDF->second.empty())
1567 // First, remove all R in Refs in such that there exists T in Refs
1568 // such that T covers R. In other words, only leave those refs that
1569 // are not covered by another ref (i.e. maximal with respect to covering).
1571 auto MaxCoverIn = [this] (RegisterRef RR, RegisterSet &RRs) -> RegisterRef {
1572 for (RegisterRef I : RRs)
1573 if (I != RR && RegisterAggr::isCoverOf(I, RR, TRI))
1579 for (RegisterRef I : HasDF->second)
1580 MaxDF.insert(MaxCoverIn(I, HasDF->second));
1582 std::vector<RegisterRef> MaxRefs;
1583 RegisterSet &RefB = RefM[BA.Id];
1584 for (RegisterRef I : MaxDF)
1585 MaxRefs.push_back(MaxCoverIn(I, RefB));
1587 // Now, for each R in MaxRefs, get the alias closure of R. If the closure
1588 // only has R in it, create a phi a def for R. Otherwise, create a phi,
1589 // and add a def for each S in the closure.
1591 // Sort the refs so that the phis will be created in a deterministic order.
1592 std::sort(MaxRefs.begin(), MaxRefs.end());
1593 // Remove duplicates.
1594 auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end());
1595 MaxRefs.erase(NewEnd, MaxRefs.end());
1597 auto Aliased = [this,&MaxRefs](RegisterRef RR,
1598 std::vector<unsigned> &Closure) -> bool {
1599 for (unsigned I : Closure)
1600 if (alias(RR, MaxRefs[I]))
1605 // Prepare a list of NodeIds of the block's predecessors.
1607 const MachineBasicBlock *MBB = BA.Addr->getCode();
1608 for (MachineBasicBlock *PB : MBB->predecessors())
1609 Preds.push_back(findBlock(PB));
1611 while (!MaxRefs.empty()) {
1612 // Put the first element in the closure, and then add all subsequent
1613 // elements from MaxRefs to it, if they alias at least one element
1614 // already in the closure.
1615 // ClosureIdx: vector of indices in MaxRefs of members of the closure.
1616 std::vector<unsigned> ClosureIdx = { 0 };
1617 for (unsigned i = 1; i != MaxRefs.size(); ++i)
1618 if (Aliased(MaxRefs[i], ClosureIdx))
1619 ClosureIdx.push_back(i);
1621 // Build a phi for the closure.
1622 unsigned CS = ClosureIdx.size();
1623 NodeAddr<PhiNode*> PA = newPhi(BA);
1626 for (unsigned X = 0; X != CS; ++X) {
1627 RegisterRef RR = MaxRefs[ClosureIdx[X]];
1628 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
1629 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
1630 PA.Addr->addMember(DA, *this);
1633 for (NodeAddr<BlockNode*> PBA : Preds) {
1634 for (unsigned X = 0; X != CS; ++X) {
1635 RegisterRef RR = MaxRefs[ClosureIdx[X]];
1636 NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA);
1637 PA.Addr->addMember(PUA, *this);
1641 // Erase from MaxRefs all elements in the closure.
1642 auto Begin = MaxRefs.begin();
1643 for (unsigned i = ClosureIdx.size(); i != 0; --i)
1644 MaxRefs.erase(Begin + ClosureIdx[i-1]);
1648 // Remove any unneeded phi nodes that were created during the build process.
1649 void DataFlowGraph::removeUnusedPhis() {
1650 // This will remove unused phis, i.e. phis where each def does not reach
1651 // any uses or other defs. This will not detect or remove circular phi
1652 // chains that are otherwise dead. Unused/dead phis are created during
1653 // the build process and this function is intended to remove these cases
1654 // that are easily determinable to be unnecessary.
1656 SetVector<NodeId> PhiQ;
1657 for (NodeAddr<BlockNode*> BA : Func.Addr->members(*this)) {
1658 for (auto P : BA.Addr->members_if(IsPhi, *this))
1662 static auto HasUsedDef = [](NodeList &Ms) -> bool {
1663 for (NodeAddr<NodeBase*> M : Ms) {
1664 if (M.Addr->getKind() != NodeAttrs::Def)
1666 NodeAddr<DefNode*> DA = M;
1667 if (DA.Addr->getReachedDef() != 0 || DA.Addr->getReachedUse() != 0)
1673 // Any phi, if it is removed, may affect other phis (make them dead).
1674 // For each removed phi, collect the potentially affected phis and add
1675 // them back to the queue.
1676 while (!PhiQ.empty()) {
1677 auto PA = addr<PhiNode*>(PhiQ[0]);
1679 NodeList Refs = PA.Addr->members(*this);
1680 if (HasUsedDef(Refs))
1682 for (NodeAddr<RefNode*> RA : Refs) {
1683 if (NodeId RD = RA.Addr->getReachingDef()) {
1684 auto RDA = addr<DefNode*>(RD);
1685 NodeAddr<InstrNode*> OA = RDA.Addr->getOwner(*this);
1689 if (RA.Addr->isDef())
1690 unlinkDef(RA, true);
1692 unlinkUse(RA, true);
1694 NodeAddr<BlockNode*> BA = PA.Addr->getOwner(*this);
1695 BA.Addr->removeMember(PA, *this);
1699 // For a given reference node TA in an instruction node IA, connect the
1700 // reaching def of TA to the appropriate def node. Create any shadow nodes
1702 template <typename T>
1703 void DataFlowGraph::linkRefUp(NodeAddr<InstrNode*> IA, NodeAddr<T> TA,
1707 RegisterRef RR = TA.Addr->getRegRef(*this);
1710 // References from the def stack that have been examined so far.
1711 RegisterAggr Defs(TRI);
1713 for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()) {
1714 RegisterRef QR = I->Addr->getRegRef(*this);
1716 // Skip all defs that are aliased to any of the defs that we have already
1717 // seen. If this completes a cover of RR, stop the stack traversal.
1718 bool Alias = Defs.hasAliasOf(QR);
1719 bool Cover = Defs.insert(QR).hasCoverOf(RR);
1726 // The reaching def.
1727 NodeAddr<DefNode*> RDA = *I;
1729 // Pick the reached node.
1733 // Mark the existing ref as "shadow" and create a new shadow.
1734 TAP.Addr->setFlags(TAP.Addr->getFlags() | NodeAttrs::Shadow);
1735 TAP = getNextShadow(IA, TAP, true);
1739 TAP.Addr->linkToDef(TAP.Id, RDA);
1746 // Create data-flow links for all reference nodes in the statement node SA.
1747 void DataFlowGraph::linkStmtRefs(DefStackMap &DefM, NodeAddr<StmtNode*> SA) {
1752 // Link all nodes (upwards in the data-flow) with their reaching defs.
1753 for (NodeAddr<RefNode*> RA : SA.Addr->members(*this)) {
1754 uint16_t Kind = RA.Addr->getKind();
1755 assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use);
1756 RegisterRef RR = RA.Addr->getRegRef(*this);
1758 // Do not expect multiple defs of the same reference.
1759 assert(Kind != NodeAttrs::Def || !Defs.count(RR));
1763 auto F = DefM.find(RR.Reg);
1764 if (F == DefM.end())
1766 DefStack &DS = F->second;
1767 if (Kind == NodeAttrs::Use)
1768 linkRefUp<UseNode*>(SA, RA, DS);
1769 else if (Kind == NodeAttrs::Def)
1770 linkRefUp<DefNode*>(SA, RA, DS);
1772 llvm_unreachable("Unexpected node in instruction");
1776 // Create data-flow links for all instructions in the block node BA. This
1777 // will include updating any phi nodes in BA.
1778 void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) {
1779 // Push block delimiters.
1780 markBlock(BA.Id, DefM);
1782 assert(BA.Addr && "block node address is needed to create a data-flow link");
1783 // For each non-phi instruction in the block, link all the defs and uses
1784 // to their reaching defs. For any member of the block (including phis),
1785 // push the defs on the corresponding stacks.
1786 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) {
1787 // Ignore phi nodes here. They will be linked part by part from the
1789 if (IA.Addr->getKind() == NodeAttrs::Stmt)
1790 linkStmtRefs(DefM, IA);
1792 // Push the definitions on the stack.
1796 // Recursively process all children in the dominator tree.
1797 MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode());
1799 MachineBasicBlock *SB = I->getBlock();
1800 NodeAddr<BlockNode*> SBA = findBlock(SB);
1801 linkBlockRefs(DefM, SBA);
1804 // Link the phi uses from the successor blocks.
1805 auto IsUseForBA = [BA](NodeAddr<NodeBase*> NA) -> bool {
1806 if (NA.Addr->getKind() != NodeAttrs::Use)
1808 assert(NA.Addr->getFlags() & NodeAttrs::PhiRef);
1809 NodeAddr<PhiUseNode*> PUA = NA;
1810 return PUA.Addr->getPredecessor() == BA.Id;
1813 RegisterSet EHLiveIns = getLandingPadLiveIns();
1814 MachineBasicBlock *MBB = BA.Addr->getCode();
1816 for (MachineBasicBlock *SB : MBB->successors()) {
1817 bool IsEHPad = SB->isEHPad();
1818 NodeAddr<BlockNode*> SBA = findBlock(SB);
1819 for (NodeAddr<InstrNode*> IA : SBA.Addr->members_if(IsPhi, *this)) {
1820 // Do not link phi uses for landing pad live-ins.
1822 // Find what register this phi is for.
1823 NodeAddr<RefNode*> RA = IA.Addr->getFirstMember(*this);
1825 if (EHLiveIns.count(RA.Addr->getRegRef(*this)))
1828 // Go over each phi use associated with MBB, and link it.
1829 for (auto U : IA.Addr->members_if(IsUseForBA, *this)) {
1830 NodeAddr<PhiUseNode*> PUA = U;
1831 RegisterRef RR = PUA.Addr->getRegRef(*this);
1832 linkRefUp<UseNode*>(IA, PUA, DefM[RR.Reg]);
1837 // Pop all defs from this block from the definition stacks.
1838 releaseBlock(BA.Id, DefM);
1841 // Remove the use node UA from any data-flow and structural links.
1842 void DataFlowGraph::unlinkUseDF(NodeAddr<UseNode*> UA) {
1843 NodeId RD = UA.Addr->getReachingDef();
1844 NodeId Sib = UA.Addr->getSibling();
1851 auto RDA = addr<DefNode*>(RD);
1852 auto TA = addr<UseNode*>(RDA.Addr->getReachedUse());
1853 if (TA.Id == UA.Id) {
1854 RDA.Addr->setReachedUse(Sib);
1858 while (TA.Id != 0) {
1859 NodeId S = TA.Addr->getSibling();
1861 TA.Addr->setSibling(UA.Addr->getSibling());
1864 TA = addr<UseNode*>(S);
1868 // Remove the def node DA from any data-flow and structural links.
1869 void DataFlowGraph::unlinkDefDF(NodeAddr<DefNode*> DA) {
1877 // ... -- | DA | -- ... -- 0 : sibling chain of DA
1882 // | ... : Siblings (defs)
1886 // ... : sibling chain of reached uses
1888 NodeId RD = DA.Addr->getReachingDef();
1890 // Visit all siblings of the reached def and reset their reaching defs.
1891 // Also, defs reached by DA are now "promoted" to being reached by RD,
1892 // so all of them will need to be spliced into the sibling chain where
1894 auto getAllNodes = [this] (NodeId N) -> NodeList {
1897 auto RA = addr<RefNode*>(N);
1898 // Keep the nodes in the exact sibling order.
1900 N = RA.Addr->getSibling();
1904 NodeList ReachedDefs = getAllNodes(DA.Addr->getReachedDef());
1905 NodeList ReachedUses = getAllNodes(DA.Addr->getReachedUse());
1908 for (NodeAddr<RefNode*> I : ReachedDefs)
1909 I.Addr->setSibling(0);
1910 for (NodeAddr<RefNode*> I : ReachedUses)
1911 I.Addr->setSibling(0);
1913 for (NodeAddr<DefNode*> I : ReachedDefs)
1914 I.Addr->setReachingDef(RD);
1915 for (NodeAddr<UseNode*> I : ReachedUses)
1916 I.Addr->setReachingDef(RD);
1918 NodeId Sib = DA.Addr->getSibling();
1924 // Update the reaching def node and remove DA from the sibling list.
1925 auto RDA = addr<DefNode*>(RD);
1926 auto TA = addr<DefNode*>(RDA.Addr->getReachedDef());
1927 if (TA.Id == DA.Id) {
1928 // If DA is the first reached def, just update the RD's reached def
1929 // to the DA's sibling.
1930 RDA.Addr->setReachedDef(Sib);
1932 // Otherwise, traverse the sibling list of the reached defs and remove
1934 while (TA.Id != 0) {
1935 NodeId S = TA.Addr->getSibling();
1937 TA.Addr->setSibling(Sib);
1940 TA = addr<DefNode*>(S);
1944 // Splice the DA's reached defs into the RDA's reached def chain.
1945 if (!ReachedDefs.empty()) {
1946 auto Last = NodeAddr<DefNode*>(ReachedDefs.back());
1947 Last.Addr->setSibling(RDA.Addr->getReachedDef());
1948 RDA.Addr->setReachedDef(ReachedDefs.front().Id);
1950 // Splice the DA's reached uses into the RDA's reached use chain.
1951 if (!ReachedUses.empty()) {
1952 auto Last = NodeAddr<UseNode*>(ReachedUses.back());
1953 Last.Addr->setSibling(RDA.Addr->getReachedUse());
1954 RDA.Addr->setReachedUse(ReachedUses.front().Id);