1 //===- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// Copies from VGPR to SGPR registers are illegal and the register coalescer
12 /// will sometimes generate these illegal copies in situations like this:
14 /// Register Class <vsrc> is the union of <vgpr> and <sgpr>
17 /// %0 <sgpr> = SCALAR_INST
18 /// %1 <vsrc> = COPY %0 <sgpr>
20 /// BRANCH %cond BB1, BB2
22 /// %2 <vgpr> = VECTOR_INST
23 /// %3 <vsrc> = COPY %2 <vgpr>
25 /// %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1>
26 /// %5 <vgpr> = VECTOR_INST %4 <vsrc>
29 /// The coalescer will begin at BB0 and eliminate its copy, then the resulting
30 /// code will look like this:
33 /// %0 <sgpr> = SCALAR_INST
35 /// BRANCH %cond BB1, BB2
37 /// %2 <vgpr> = VECTOR_INST
38 /// %3 <vsrc> = COPY %2 <vgpr>
40 /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1>
41 /// %5 <vgpr> = VECTOR_INST %4 <sgpr>
43 /// Now that the result of the PHI instruction is an SGPR, the register
44 /// allocator is now forced to constrain the register class of %3 to
45 /// <sgpr> so we end up with final code like this:
48 /// %0 <sgpr> = SCALAR_INST
50 /// BRANCH %cond BB1, BB2
52 /// %2 <vgpr> = VECTOR_INST
53 /// %3 <sgpr> = COPY %2 <vgpr>
55 /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1>
56 /// %5 <vgpr> = VECTOR_INST %4 <sgpr>
58 /// Now this code contains an illegal copy from a VGPR to an SGPR.
60 /// In order to avoid this problem, this pass searches for PHI instructions
61 /// which define a <vsrc> register and constrains its definition class to
62 /// <vgpr> if the user of the PHI's definition register is a vector instruction.
63 /// If the PHI's definition class is constrained to <vgpr> then the coalescer
64 /// will be unable to perform the COPY removal from the above example which
65 /// ultimately led to the creation of an illegal COPY.
66 //===----------------------------------------------------------------------===//
69 #include "AMDGPUSubtarget.h"
70 #include "SIInstrInfo.h"
71 #include "SIRegisterInfo.h"
72 #include "llvm/ADT/DenseSet.h"
73 #include "llvm/ADT/STLExtras.h"
74 #include "llvm/ADT/SmallSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/CodeGen/MachineBasicBlock.h"
77 #include "llvm/CodeGen/MachineDominators.h"
78 #include "llvm/CodeGen/MachineFunction.h"
79 #include "llvm/CodeGen/MachineFunctionPass.h"
80 #include "llvm/CodeGen/MachineInstr.h"
81 #include "llvm/CodeGen/MachineInstrBuilder.h"
82 #include "llvm/CodeGen/MachineOperand.h"
83 #include "llvm/CodeGen/MachineRegisterInfo.h"
84 #include "llvm/CodeGen/MachinePostDominators.h"
85 #include "llvm/CodeGen/TargetRegisterInfo.h"
86 #include "llvm/Pass.h"
87 #include "llvm/Support/CodeGen.h"
88 #include "llvm/Support/CommandLine.h"
89 #include "llvm/Support/Debug.h"
90 #include "llvm/Support/raw_ostream.h"
91 #include "llvm/Target/TargetMachine.h"
100 using namespace llvm;
102 #define DEBUG_TYPE "si-fix-sgpr-copies"
104 static cl::opt<bool> EnableM0Merge(
105 "amdgpu-enable-merge-m0",
106 cl::desc("Merge and hoist M0 initializations"),
111 class SIFixSGPRCopies : public MachineFunctionPass {
112 MachineDominatorTree *MDT;
113 MachinePostDominatorTree *MPDT;
114 DenseMap<MachineBasicBlock *, SetVector<MachineBasicBlock*>> PDF;
115 void computePDF(MachineFunction * MF);
116 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
122 SIFixSGPRCopies() : MachineFunctionPass(ID) {}
124 bool runOnMachineFunction(MachineFunction &MF) override;
126 StringRef getPassName() const override { return "SI Fix SGPR copies"; }
128 void getAnalysisUsage(AnalysisUsage &AU) const override {
129 AU.addRequired<MachineDominatorTree>();
130 AU.addPreserved<MachineDominatorTree>();
131 AU.addRequired<MachinePostDominatorTree>();
132 AU.addPreserved<MachinePostDominatorTree>();
133 AU.setPreservesCFG();
134 MachineFunctionPass::getAnalysisUsage(AU);
138 } // end anonymous namespace
140 INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE,
141 "SI Fix SGPR copies", false, false)
142 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
143 INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE,
144 "SI Fix SGPR copies", false, false)
146 char SIFixSGPRCopies::ID = 0;
148 char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID;
150 FunctionPass *llvm::createSIFixSGPRCopiesPass() {
151 return new SIFixSGPRCopies();
154 static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
155 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
156 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
157 if (!MI.getOperand(i).isReg() ||
158 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
161 if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
167 static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
168 getCopyRegClasses(const MachineInstr &Copy,
169 const SIRegisterInfo &TRI,
170 const MachineRegisterInfo &MRI) {
171 unsigned DstReg = Copy.getOperand(0).getReg();
172 unsigned SrcReg = Copy.getOperand(1).getReg();
174 const TargetRegisterClass *SrcRC =
175 TargetRegisterInfo::isVirtualRegister(SrcReg) ?
176 MRI.getRegClass(SrcReg) :
177 TRI.getPhysRegClass(SrcReg);
179 // We don't really care about the subregister here.
180 // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
182 const TargetRegisterClass *DstRC =
183 TargetRegisterInfo::isVirtualRegister(DstReg) ?
184 MRI.getRegClass(DstReg) :
185 TRI.getPhysRegClass(DstReg);
187 return std::make_pair(SrcRC, DstRC);
190 static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
191 const TargetRegisterClass *DstRC,
192 const SIRegisterInfo &TRI) {
193 return TRI.isSGPRClass(DstRC) && TRI.hasVGPRs(SrcRC);
196 static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC,
197 const TargetRegisterClass *DstRC,
198 const SIRegisterInfo &TRI) {
199 return TRI.isSGPRClass(SrcRC) && TRI.hasVGPRs(DstRC);
202 static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI,
203 const SIRegisterInfo *TRI,
204 const SIInstrInfo *TII) {
205 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
206 auto &Src = MI.getOperand(1);
207 unsigned DstReg = MI.getOperand(0).getReg();
208 unsigned SrcReg = Src.getReg();
209 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
210 !TargetRegisterInfo::isVirtualRegister(DstReg))
213 for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) {
214 const auto *UseMI = MO.getParent();
217 if (MO.isDef() || UseMI->getParent() != MI.getParent() ||
218 UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END ||
219 !TII->isOperandLegal(*UseMI, UseMI->getOperandNo(&MO), &Src))
222 // Change VGPR to SGPR destination.
223 MRI.setRegClass(DstReg, TRI->getEquivalentSGPRClass(MRI.getRegClass(DstReg)));
227 // Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
230 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
231 // VGPRz = COPY SGPRy
235 // VGPRx = COPY SGPRx
236 // VGPRz = REG_SEQUENCE VGPRx, sub0
238 // This exposes immediate folding opportunities when materializing 64-bit
240 static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
241 const SIRegisterInfo *TRI,
242 const SIInstrInfo *TII,
243 MachineRegisterInfo &MRI) {
244 assert(MI.isRegSequence());
246 unsigned DstReg = MI.getOperand(0).getReg();
247 if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
250 if (!MRI.hasOneUse(DstReg))
253 MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg);
254 if (!CopyUse.isCopy())
257 // It is illegal to have vreg inputs to a physreg defining reg_sequence.
258 if (TargetRegisterInfo::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
261 const TargetRegisterClass *SrcRC, *DstRC;
262 std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI);
264 if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI))
267 if (tryChangeVGPRtoSGPRinCopy(CopyUse, TRI, TII))
270 // TODO: Could have multiple extracts?
271 unsigned SubReg = CopyUse.getOperand(1).getSubReg();
272 if (SubReg != AMDGPU::NoSubRegister)
275 MRI.setRegClass(DstReg, DstRC);
278 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
279 // VGPRz = COPY SGPRy
282 // VGPRx = COPY SGPRx
283 // VGPRz = REG_SEQUENCE VGPRx, sub0
285 MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg());
287 for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
288 unsigned SrcReg = MI.getOperand(I).getReg();
289 unsigned SrcSubReg = MI.getOperand(I).getSubReg();
291 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
292 assert(TRI->isSGPRClass(SrcRC) &&
293 "Expected SGPR REG_SEQUENCE to only have SGPR inputs");
295 SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
296 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
298 unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC);
300 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
302 .add(MI.getOperand(I));
304 MI.getOperand(I).setReg(TmpReg);
307 CopyUse.eraseFromParent();
311 static bool phiHasVGPROperands(const MachineInstr &PHI,
312 const MachineRegisterInfo &MRI,
313 const SIRegisterInfo *TRI,
314 const SIInstrInfo *TII) {
315 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
316 unsigned Reg = PHI.getOperand(i).getReg();
317 if (TRI->hasVGPRs(MRI.getRegClass(Reg)))
323 static bool phiHasBreakDef(const MachineInstr &PHI,
324 const MachineRegisterInfo &MRI,
325 SmallSet<unsigned, 8> &Visited) {
326 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
327 unsigned Reg = PHI.getOperand(i).getReg();
328 if (Visited.count(Reg))
333 MachineInstr *DefInstr = MRI.getVRegDef(Reg);
334 switch (DefInstr->getOpcode()) {
337 case AMDGPU::SI_BREAK:
338 case AMDGPU::SI_IF_BREAK:
339 case AMDGPU::SI_ELSE_BREAK:
342 if (phiHasBreakDef(*DefInstr, MRI, Visited))
349 static bool hasTerminatorThatModifiesExec(const MachineBasicBlock &MBB,
350 const TargetRegisterInfo &TRI) {
351 for (MachineBasicBlock::const_iterator I = MBB.getFirstTerminator(),
352 E = MBB.end(); I != E; ++I) {
353 if (I->modifiesRegister(AMDGPU::EXEC, &TRI))
359 static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy,
360 const MachineInstr *MoveImm,
361 const SIInstrInfo *TII,
364 if (Copy->getOpcode() != AMDGPU::COPY)
367 if (!MoveImm->isMoveImmediate())
370 const MachineOperand *ImmOp =
371 TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
375 // FIXME: Handle copies with sub-regs.
376 if (Copy->getOperand(0).getSubReg())
379 switch (MoveImm->getOpcode()) {
382 case AMDGPU::V_MOV_B32_e32:
383 SMovOp = AMDGPU::S_MOV_B32;
385 case AMDGPU::V_MOV_B64_PSEUDO:
386 SMovOp = AMDGPU::S_MOV_B64;
389 Imm = ImmOp->getImm();
393 template <class UnaryPredicate>
394 bool searchPredecessors(const MachineBasicBlock *MBB,
395 const MachineBasicBlock *CutOff,
396 UnaryPredicate Predicate) {
400 DenseSet<const MachineBasicBlock *> Visited;
401 SmallVector<MachineBasicBlock *, 4> Worklist(MBB->pred_begin(),
404 while (!Worklist.empty()) {
405 MachineBasicBlock *MBB = Worklist.pop_back_val();
407 if (!Visited.insert(MBB).second)
414 Worklist.append(MBB->pred_begin(), MBB->pred_end());
420 // Checks if there is potential path From instruction To instruction.
421 // If CutOff is specified and it sits in between of that path we ignore
422 // a higher portion of the path and report it is not reachable.
423 static bool isReachable(const MachineInstr *From,
424 const MachineInstr *To,
425 const MachineBasicBlock *CutOff,
426 MachineDominatorTree &MDT) {
427 // If either From block dominates To block or instructions are in the same
428 // block and From is higher.
429 if (MDT.dominates(From, To))
432 const MachineBasicBlock *MBBFrom = From->getParent();
433 const MachineBasicBlock *MBBTo = To->getParent();
434 if (MBBFrom == MBBTo)
437 // Instructions are in different blocks, do predecessor search.
438 // We should almost never get here since we do not usually produce M0 stores
440 return searchPredecessors(MBBTo, CutOff, [MBBFrom]
441 (const MachineBasicBlock *MBB) { return MBB == MBBFrom; });
444 // Hoist and merge identical SGPR initializations into a common predecessor.
445 // This is intended to combine M0 initializations, but can work with any
446 // SGPR. A VGPR cannot be processed since we cannot guarantee vector
448 static bool hoistAndMergeSGPRInits(unsigned Reg,
449 const MachineRegisterInfo &MRI,
450 MachineDominatorTree &MDT) {
451 // List of inits by immediate value.
452 using InitListMap = std::map<unsigned, std::list<MachineInstr *>>;
454 // List of clobbering instructions.
455 SmallVector<MachineInstr*, 8> Clobbers;
456 bool Changed = false;
458 for (auto &MI : MRI.def_instructions(Reg)) {
459 MachineOperand *Imm = nullptr;
460 for (auto &MO: MI.operands()) {
461 if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) ||
462 (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) {
465 } else if (MO.isImm())
469 Inits[Imm->getImm()].push_front(&MI);
471 Clobbers.push_back(&MI);
474 for (auto &Init : Inits) {
475 auto &Defs = Init.second;
477 for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) {
478 MachineInstr *MI1 = *I1;
480 for (auto I2 = std::next(I1); I2 != E; ) {
481 MachineInstr *MI2 = *I2;
483 // Check any possible interference
484 auto intereferes = [&](MachineBasicBlock::iterator From,
485 MachineBasicBlock::iterator To) -> bool {
487 assert(MDT.dominates(&*To, &*From));
489 auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool {
490 const MachineBasicBlock *MBBFrom = From->getParent();
491 const MachineBasicBlock *MBBTo = To->getParent();
492 bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT);
493 bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT);
494 if (!MayClobberFrom && !MayClobberTo)
496 if ((MayClobberFrom && !MayClobberTo) ||
497 (!MayClobberFrom && MayClobberTo))
499 // Both can clobber, this is not an interference only if both are
500 // dominated by Clobber and belong to the same block or if Clobber
501 // properly dominates To, given that To >> From, so it dominates
502 // both and located in a common dominator.
503 return !((MBBFrom == MBBTo &&
504 MDT.dominates(Clobber, &*From) &&
505 MDT.dominates(Clobber, &*To)) ||
506 MDT.properlyDominates(Clobber->getParent(), MBBTo));
509 return (llvm::any_of(Clobbers, interferes)) ||
510 (llvm::any_of(Inits, [&](InitListMap::value_type &C) {
511 return C.first != Init.first &&
512 llvm::any_of(C.second, interferes);
516 if (MDT.dominates(MI1, MI2)) {
517 if (!intereferes(MI2, MI1)) {
518 DEBUG(dbgs() << "Erasing from "
519 << printMBBReference(*MI2->getParent()) << " "
521 MI2->eraseFromParent();
526 } else if (MDT.dominates(MI2, MI1)) {
527 if (!intereferes(MI1, MI2)) {
528 DEBUG(dbgs() << "Erasing from "
529 << printMBBReference(*MI1->getParent()) << " "
531 MI1->eraseFromParent();
537 auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(),
544 MachineBasicBlock::iterator I = MBB->getFirstNonPHI();
545 if (!intereferes(MI1, I) && !intereferes(MI2, I)) {
546 DEBUG(dbgs() << "Erasing from "
547 << printMBBReference(*MI1->getParent()) << " " << *MI1
548 << "and moving from "
549 << printMBBReference(*MI2->getParent()) << " to "
550 << printMBBReference(*I->getParent()) << " " << *MI2);
551 I->getParent()->splice(I, MI2->getParent(), MI2);
552 MI1->eraseFromParent();
565 MRI.clearKillFlags(Reg);
570 void SIFixSGPRCopies::computePDF(MachineFunction *MF) {
571 MachineFunction::iterator B = MF->begin();
572 MachineFunction::iterator E = MF->end();
573 for (; B != E; ++B) {
574 if (B->succ_size() > 1) {
575 for (auto S : B->successors()) {
576 MachineDomTreeNode *runner = MPDT->getNode(&*S);
577 MachineDomTreeNode *sentinel = MPDT->getNode(&*B)->getIDom();
578 while (runner && runner != sentinel) {
579 PDF[runner->getBlock()].insert(&*B);
580 runner = runner->getIDom();
587 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
588 void SIFixSGPRCopies::printPDF() {
589 dbgs() << "\n######## PostDominanceFrontiers set #########\n";
590 for (auto &I : PDF) {
591 dbgs() << "PDF[ " << I.first->getNumber() << "] : ";
592 for (auto &J : I.second) {
593 dbgs() << J->getNumber() << ' ';
597 dbgs() << "\n##############################################\n";
601 bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
602 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
603 MachineRegisterInfo &MRI = MF.getRegInfo();
604 const SIRegisterInfo *TRI = ST.getRegisterInfo();
605 const SIInstrInfo *TII = ST.getInstrInfo();
606 MDT = &getAnalysis<MachineDominatorTree>();
607 MPDT = &getAnalysis<MachinePostDominatorTree>();
612 SmallVector<MachineInstr *, 16> Worklist;
614 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
616 MachineBasicBlock &MBB = *BI;
617 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
619 MachineInstr &MI = *I;
621 switch (MI.getOpcode()) {
627 // If the destination register is a physical register there isn't really
628 // much we can do to fix this.
629 if (!TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg()))
632 const TargetRegisterClass *SrcRC, *DstRC;
633 std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI);
634 if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
635 unsigned SrcReg = MI.getOperand(1).getReg();
636 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
641 MachineInstr *DefMI = MRI.getVRegDef(SrcReg);
644 // If we are just copying an immediate, we can replace the copy with
646 if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) {
647 MI.getOperand(1).ChangeToImmediate(Imm);
648 MI.addImplicitDefUseOperands(MF);
649 MI.setDesc(TII->get(SMovOp));
653 } else if (isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) {
654 tryChangeVGPRtoSGPRinCopy(MI, TRI, TII);
660 unsigned Reg = MI.getOperand(0).getReg();
661 if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
664 // We don't need to fix the PHI if all the source blocks
665 // have no divergent control dependecies
666 bool HasVGPROperand = phiHasVGPROperands(MI, MRI, TRI, TII);
667 if (!HasVGPROperand) {
669 MachineBasicBlock * Join = MI.getParent();
670 for (auto &O : MI.explicit_operands()) {
672 MachineBasicBlock * Source = O.getMBB();
673 SetVector<MachineBasicBlock*> &SourcePDF = PDF[Source];
674 SetVector<MachineBasicBlock*> &JoinPDF = PDF[Join];
675 SetVector<MachineBasicBlock*> CDList;
676 for (auto &I : SourcePDF) {
677 if (!JoinPDF.count(I) || /* back edge */MDT->dominates(Join, I)) {
678 if (hasTerminatorThatModifiesExec(*I, *TRI))
685 DEBUG(dbgs() << "Not fixing PHI for uniform branch: " << MI << '\n');
690 // If a PHI node defines an SGPR and any of its operands are VGPRs,
691 // then we need to move it to the VALU.
693 // Also, if a PHI node defines an SGPR and has all SGPR operands
694 // we must move it to the VALU, because the SGPR operands will
695 // all end up being assigned the same register, which means
696 // there is a potential for a conflict if different threads take
697 // different control flow paths.
705 // sgpr2 = PHI sgpr0, sgpr1
716 // The one exception to this rule is when one of the operands
717 // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
718 // instruction. In this case, there we know the program will
719 // never enter the second block (the loop) without entering
720 // the first block (where the condition is computed), so there
721 // is no chance for values to be over-written.
723 SmallSet<unsigned, 8> Visited;
724 if (HasVGPROperand || !phiHasBreakDef(MI, MRI, Visited)) {
725 DEBUG(dbgs() << "Fixing PHI: " << MI);
730 case AMDGPU::REG_SEQUENCE:
731 if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
732 !hasVGPROperands(MI, TRI)) {
733 foldVGPRCopyIntoRegSequence(MI, TRI, TII, MRI);
737 DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
741 case AMDGPU::INSERT_SUBREG: {
742 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
743 DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
744 Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
745 Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
746 if (TRI->isSGPRClass(DstRC) &&
747 (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
748 DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
757 if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge)
758 hoistAndMergeSGPRInits(AMDGPU::M0, MRI, *MDT);