1 //===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// Copies from VGPR to SGPR registers are illegal and the register coalescer
12 /// will sometimes generate these illegal copies in situations like this:
14 /// Register Class <vsrc> is the union of <vgpr> and <sgpr>
17 /// %vreg0 <sgpr> = SCALAR_INST
18 /// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
20 /// BRANCH %cond BB1, BB2
22 /// %vreg2 <vgpr> = VECTOR_INST
23 /// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
25 /// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
26 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
29 /// The coalescer will begin at BB0 and eliminate its copy, then the resulting
30 /// code will look like this:
33 /// %vreg0 <sgpr> = SCALAR_INST
35 /// BRANCH %cond BB1, BB2
37 /// %vreg2 <vgpr> = VECTOR_INST
38 /// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
40 /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
41 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
43 /// Now that the result of the PHI instruction is an SGPR, the register
44 /// allocator is now forced to constrain the register class of %vreg3 to
45 /// <sgpr> so we end up with final code like this:
48 /// %vreg0 <sgpr> = SCALAR_INST
50 /// BRANCH %cond BB1, BB2
52 /// %vreg2 <vgpr> = VECTOR_INST
53 /// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
55 /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
56 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
58 /// Now this code contains an illegal copy from a VGPR to an SGPR.
60 /// In order to avoid this problem, this pass searches for PHI instructions
61 /// which define a <vsrc> register and constrains its definition class to
62 /// <vgpr> if the user of the PHI's definition register is a vector instruction.
63 /// If the PHI's definition class is constrained to <vgpr> then the coalescer
64 /// will be unable to perform the COPY removal from the above example which
65 /// ultimately led to the creation of an illegal COPY.
66 //===----------------------------------------------------------------------===//
69 #include "AMDGPUSubtarget.h"
70 #include "SIInstrInfo.h"
71 #include "llvm/CodeGen/MachineDominators.h"
72 #include "llvm/CodeGen/MachineFunctionPass.h"
73 #include "llvm/CodeGen/MachineInstrBuilder.h"
74 #include "llvm/CodeGen/MachineRegisterInfo.h"
75 #include "llvm/Support/Debug.h"
76 #include "llvm/Support/raw_ostream.h"
77 #include "llvm/Target/TargetMachine.h"
81 #define DEBUG_TYPE "si-fix-sgpr-copies"
85 class SIFixSGPRCopies : public MachineFunctionPass {
87 MachineDominatorTree *MDT;
92 SIFixSGPRCopies() : MachineFunctionPass(ID) { }
94 bool runOnMachineFunction(MachineFunction &MF) override;
96 StringRef getPassName() const override { return "SI Fix SGPR copies"; }
98 void getAnalysisUsage(AnalysisUsage &AU) const override {
99 AU.addRequired<MachineDominatorTree>();
100 AU.addPreserved<MachineDominatorTree>();
101 AU.setPreservesCFG();
102 MachineFunctionPass::getAnalysisUsage(AU);
106 } // End anonymous namespace
108 INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE,
109 "SI Fix SGPR copies", false, false)
110 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
111 INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE,
112 "SI Fix SGPR copies", false, false)
115 char SIFixSGPRCopies::ID = 0;
117 char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID;
119 FunctionPass *llvm::createSIFixSGPRCopiesPass() {
120 return new SIFixSGPRCopies();
123 static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
124 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
125 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
126 if (!MI.getOperand(i).isReg() ||
127 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
130 if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
136 static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
137 getCopyRegClasses(const MachineInstr &Copy,
138 const SIRegisterInfo &TRI,
139 const MachineRegisterInfo &MRI) {
140 unsigned DstReg = Copy.getOperand(0).getReg();
141 unsigned SrcReg = Copy.getOperand(1).getReg();
143 const TargetRegisterClass *SrcRC =
144 TargetRegisterInfo::isVirtualRegister(SrcReg) ?
145 MRI.getRegClass(SrcReg) :
146 TRI.getPhysRegClass(SrcReg);
148 // We don't really care about the subregister here.
149 // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
151 const TargetRegisterClass *DstRC =
152 TargetRegisterInfo::isVirtualRegister(DstReg) ?
153 MRI.getRegClass(DstReg) :
154 TRI.getPhysRegClass(DstReg);
156 return std::make_pair(SrcRC, DstRC);
159 static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
160 const TargetRegisterClass *DstRC,
161 const SIRegisterInfo &TRI) {
162 return TRI.isSGPRClass(DstRC) && TRI.hasVGPRs(SrcRC);
165 static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC,
166 const TargetRegisterClass *DstRC,
167 const SIRegisterInfo &TRI) {
168 return TRI.isSGPRClass(SrcRC) && TRI.hasVGPRs(DstRC);
171 // Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
174 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
175 // VGPRz = COPY SGPRy
179 // VGPRx = COPY SGPRx
180 // VGPRz = REG_SEQUENCE VGPRx, sub0
182 // This exposes immediate folding opportunities when materializing 64-bit
184 static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
185 const SIRegisterInfo *TRI,
186 const SIInstrInfo *TII,
187 MachineRegisterInfo &MRI) {
188 assert(MI.isRegSequence());
190 unsigned DstReg = MI.getOperand(0).getReg();
191 if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
194 if (!MRI.hasOneUse(DstReg))
197 MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg);
198 if (!CopyUse.isCopy())
201 const TargetRegisterClass *SrcRC, *DstRC;
202 std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI);
204 if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI))
207 // TODO: Could have multiple extracts?
208 unsigned SubReg = CopyUse.getOperand(1).getSubReg();
209 if (SubReg != AMDGPU::NoSubRegister)
212 MRI.setRegClass(DstReg, DstRC);
215 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
216 // VGPRz = COPY SGPRy
219 // VGPRx = COPY SGPRx
220 // VGPRz = REG_SEQUENCE VGPRx, sub0
222 MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg());
224 for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
225 unsigned SrcReg = MI.getOperand(I).getReg();
226 unsigned SrcSubReg = MI.getOperand(I).getSubReg();
228 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
229 assert(TRI->isSGPRClass(SrcRC) &&
230 "Expected SGPR REG_SEQUENCE to only have SGPR inputs");
232 SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
233 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
235 unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC);
237 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), TmpReg)
238 .addOperand(MI.getOperand(I));
240 MI.getOperand(I).setReg(TmpReg);
243 CopyUse.eraseFromParent();
247 static bool phiHasVGPROperands(const MachineInstr &PHI,
248 const MachineRegisterInfo &MRI,
249 const SIRegisterInfo *TRI,
250 const SIInstrInfo *TII) {
252 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
253 unsigned Reg = PHI.getOperand(i).getReg();
254 if (TRI->hasVGPRs(MRI.getRegClass(Reg)))
259 static bool phiHasBreakDef(const MachineInstr &PHI,
260 const MachineRegisterInfo &MRI,
261 SmallSet<unsigned, 8> &Visited) {
263 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
264 unsigned Reg = PHI.getOperand(i).getReg();
265 if (Visited.count(Reg))
270 MachineInstr *DefInstr = MRI.getUniqueVRegDef(Reg);
272 switch (DefInstr->getOpcode()) {
275 case AMDGPU::SI_BREAK:
276 case AMDGPU::SI_IF_BREAK:
277 case AMDGPU::SI_ELSE_BREAK:
280 if (phiHasBreakDef(*DefInstr, MRI, Visited))
287 static bool hasTerminatorThatModifiesExec(const MachineBasicBlock &MBB,
288 const TargetRegisterInfo &TRI) {
289 for (MachineBasicBlock::const_iterator I = MBB.getFirstTerminator(),
290 E = MBB.end(); I != E; ++I) {
291 if (I->modifiesRegister(AMDGPU::EXEC, &TRI))
297 static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy,
298 const MachineInstr *MoveImm,
299 const SIInstrInfo *TII,
303 if (!MoveImm->isMoveImmediate())
306 const MachineOperand *ImmOp =
307 TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
311 // FIXME: Handle copies with sub-regs.
312 if (Copy->getOperand(0).getSubReg())
315 switch (MoveImm->getOpcode()) {
318 case AMDGPU::V_MOV_B32_e32:
319 SMovOp = AMDGPU::S_MOV_B32;
321 case AMDGPU::V_MOV_B64_PSEUDO:
322 SMovOp = AMDGPU::S_MOV_B64;
325 Imm = ImmOp->getImm();
329 bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
330 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
331 MachineRegisterInfo &MRI = MF.getRegInfo();
332 const SIRegisterInfo *TRI = ST.getRegisterInfo();
333 const SIInstrInfo *TII = ST.getInstrInfo();
334 MDT = &getAnalysis<MachineDominatorTree>();
336 SmallVector<MachineInstr *, 16> Worklist;
338 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
341 MachineBasicBlock &MBB = *BI;
342 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
344 MachineInstr &MI = *I;
346 switch (MI.getOpcode()) {
350 // If the destination register is a physical register there isn't really
351 // much we can do to fix this.
352 if (!TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg()))
355 const TargetRegisterClass *SrcRC, *DstRC;
356 std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI);
357 if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
358 MachineInstr *DefMI = MRI.getVRegDef(MI.getOperand(1).getReg());
361 // If we are just copying an immediate, we can replace the copy with
363 if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) {
364 MI.getOperand(1).ChangeToImmediate(Imm);
365 MI.addImplicitDefUseOperands(MF);
366 MI.setDesc(TII->get(SMovOp));
375 unsigned Reg = MI.getOperand(0).getReg();
376 if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
379 // We don't need to fix the PHI if the common dominator of the
380 // two incoming blocks terminates with a uniform branch.
381 if (MI.getNumExplicitOperands() == 5) {
382 MachineBasicBlock *MBB0 = MI.getOperand(2).getMBB();
383 MachineBasicBlock *MBB1 = MI.getOperand(4).getMBB();
385 MachineBasicBlock *NCD = MDT->findNearestCommonDominator(MBB0, MBB1);
386 if (NCD && !hasTerminatorThatModifiesExec(*NCD, *TRI)) {
387 DEBUG(dbgs() << "Not fixing PHI for uniform branch: " << MI << '\n');
392 // If a PHI node defines an SGPR and any of its operands are VGPRs,
393 // then we need to move it to the VALU.
395 // Also, if a PHI node defines an SGPR and has all SGPR operands
396 // we must move it to the VALU, because the SGPR operands will
397 // all end up being assigned the same register, which means
398 // there is a potential for a conflict if different threads take
399 // different control flow paths.
407 // sgpr2 = PHI sgpr0, sgpr1
418 // The one exception to this rule is when one of the operands
419 // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
420 // instruction. In this case, there we know the program will
421 // never enter the second block (the loop) without entering
422 // the first block (where the condition is computed), so there
423 // is no chance for values to be over-written.
425 SmallSet<unsigned, 8> Visited;
426 if (phiHasVGPROperands(MI, MRI, TRI, TII) ||
427 !phiHasBreakDef(MI, MRI, Visited)) {
428 DEBUG(dbgs() << "Fixing PHI: " << MI);
433 case AMDGPU::REG_SEQUENCE: {
434 if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
435 !hasVGPROperands(MI, TRI)) {
436 foldVGPRCopyIntoRegSequence(MI, TRI, TII, MRI);
440 DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
445 case AMDGPU::INSERT_SUBREG: {
446 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
447 DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
448 Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
449 Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
450 if (TRI->isSGPRClass(DstRC) &&
451 (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
452 DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);