1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "SIRegisterInfo.h"
16 #include "AMDGPUSubtarget.h"
17 #include "SIInstrInfo.h"
18 #include "SIMachineFunctionInfo.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/RegisterScavenging.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/LLVMContext.h"
27 static bool hasPressureSet(const int *PSets, unsigned PSetID) {
28 for (unsigned i = 0; PSets[i] != -1; ++i) {
29 if (PSets[i] == (int)PSetID)
35 void SIRegisterInfo::classifyPressureSet(unsigned PSetID, unsigned Reg,
36 BitVector &PressureSets) const {
37 for (MCRegUnitIterator U(Reg, this); U.isValid(); ++U) {
38 const int *PSets = getRegUnitPressureSets(*U);
39 if (hasPressureSet(PSets, PSetID)) {
40 PressureSets.set(PSetID);
46 static cl::opt<bool> EnableSpillSGPRToSMEM(
47 "amdgpu-spill-sgpr-to-smem",
48 cl::desc("Use scalar stores to spill SGPRs if supported by subtarget"),
51 static cl::opt<bool> EnableSpillSGPRToVGPR(
52 "amdgpu-spill-sgpr-to-vgpr",
53 cl::desc("Enable spilling VGPRs to SGPRs"),
57 SIRegisterInfo::SIRegisterInfo(const SISubtarget &ST) :
59 SGPRPressureSets(getNumRegPressureSets()),
60 VGPRPressureSets(getNumRegPressureSets()),
61 SpillSGPRToVGPR(false),
62 SpillSGPRToSMEM(false) {
63 if (EnableSpillSGPRToSMEM && ST.hasScalarStores())
64 SpillSGPRToSMEM = true;
65 else if (EnableSpillSGPRToVGPR)
66 SpillSGPRToVGPR = true;
68 unsigned NumRegPressureSets = getNumRegPressureSets();
70 SGPRSetID = NumRegPressureSets;
71 VGPRSetID = NumRegPressureSets;
73 for (unsigned i = 0; i < NumRegPressureSets; ++i) {
74 classifyPressureSet(i, AMDGPU::SGPR0, SGPRPressureSets);
75 classifyPressureSet(i, AMDGPU::VGPR0, VGPRPressureSets);
78 // Determine the number of reg units for each pressure set.
79 std::vector<unsigned> PressureSetRegUnits(NumRegPressureSets, 0);
80 for (unsigned i = 0, e = getNumRegUnits(); i != e; ++i) {
81 const int *PSets = getRegUnitPressureSets(i);
82 for (unsigned j = 0; PSets[j] != -1; ++j) {
83 ++PressureSetRegUnits[PSets[j]];
87 unsigned VGPRMax = 0, SGPRMax = 0;
88 for (unsigned i = 0; i < NumRegPressureSets; ++i) {
89 if (isVGPRPressureSet(i) && PressureSetRegUnits[i] > VGPRMax) {
91 VGPRMax = PressureSetRegUnits[i];
94 if (isSGPRPressureSet(i) && PressureSetRegUnits[i] > SGPRMax) {
96 SGPRMax = PressureSetRegUnits[i];
100 assert(SGPRSetID < NumRegPressureSets &&
101 VGPRSetID < NumRegPressureSets);
104 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, unsigned Reg) const {
105 MCRegAliasIterator R(Reg, this, true);
107 for (; R.isValid(); ++R)
111 unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
112 const MachineFunction &MF) const {
114 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
115 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
116 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
117 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
120 static unsigned findPrivateSegmentWaveByteOffsetRegIndex(unsigned RegCount) {
123 // Try to place it in a hole after PrivateSegmentBufferReg.
125 // We cannot put the segment buffer in (Idx - 4) ... (Idx - 1) due to
126 // alignment constraints, so we have a hole where can put the wave offset.
129 // We can put the segment buffer in (Idx - 4) ... (Idx - 1) and put the
130 // wave offset before it.
137 unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
138 const MachineFunction &MF) const {
139 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
140 unsigned Reg = findPrivateSegmentWaveByteOffsetRegIndex(ST.getMaxNumSGPRs(MF));
141 return AMDGPU::SGPR_32RegClass.getRegister(Reg);
144 unsigned SIRegisterInfo::reservedStackPtrOffsetReg(
145 const MachineFunction &MF) const {
146 return AMDGPU::SGPR32;
149 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
150 BitVector Reserved(getNumRegs());
151 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
153 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
154 // this seems likely to result in bugs, so I'm marking them as reserved.
155 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
156 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
158 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
159 reserveRegisterTuples(Reserved, AMDGPU::M0);
161 // Reserve the memory aperture registers.
162 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
163 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
164 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
165 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
167 // Reserve Trap Handler registers - support is not implemented in Codegen.
168 reserveRegisterTuples(Reserved, AMDGPU::TBA);
169 reserveRegisterTuples(Reserved, AMDGPU::TMA);
170 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
171 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
172 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
173 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
174 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
175 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
177 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
179 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
180 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
181 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
182 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
183 reserveRegisterTuples(Reserved, Reg);
186 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
187 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
188 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
189 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
190 reserveRegisterTuples(Reserved, Reg);
193 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
195 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
196 if (ScratchWaveOffsetReg != AMDGPU::NoRegister) {
197 // Reserve 1 SGPR for scratch wave offset in case we need to spill.
198 reserveRegisterTuples(Reserved, ScratchWaveOffsetReg);
201 unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
202 if (ScratchRSrcReg != AMDGPU::NoRegister) {
203 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
205 // TODO: May need to reserve a VGPR if doing LDS spilling.
206 reserveRegisterTuples(Reserved, ScratchRSrcReg);
207 assert(!isSubRegister(ScratchRSrcReg, ScratchWaveOffsetReg));
210 // We have to assume the SP is needed in case there are calls in the function,
211 // which is detected after the function is lowered. If we aren't really going
212 // to need SP, don't bother reserving it.
213 unsigned StackPtrReg = MFI->getStackPtrOffsetReg();
215 if (StackPtrReg != AMDGPU::NoRegister) {
216 reserveRegisterTuples(Reserved, StackPtrReg);
217 assert(!isSubRegister(ScratchRSrcReg, StackPtrReg));
220 unsigned FrameReg = MFI->getFrameOffsetReg();
221 if (FrameReg != AMDGPU::NoRegister) {
222 reserveRegisterTuples(Reserved, FrameReg);
223 assert(!isSubRegister(ScratchRSrcReg, FrameReg));
229 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
230 const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>();
231 if (Info->isEntryFunction()) {
232 const MachineFrameInfo &MFI = Fn.getFrameInfo();
233 return MFI.hasStackObjects() || MFI.hasCalls();
236 // May need scavenger for dealing with callee saved registers.
240 bool SIRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
241 return MF.getFrameInfo().hasStackObjects();
244 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
245 const MachineFunction &MF) const {
246 // m0 is needed for the scalar store offset. m0 is unallocatable, so we can't
247 // create a virtual register for it during frame index elimination, so the
248 // scavenger is directly needed.
249 return MF.getFrameInfo().hasStackObjects() &&
250 MF.getSubtarget<SISubtarget>().hasScalarStores() &&
251 MF.getInfo<SIMachineFunctionInfo>()->hasSpilledSGPRs();
254 bool SIRegisterInfo::requiresVirtualBaseRegisters(
255 const MachineFunction &) const {
256 // There are no special dedicated stack or frame pointers.
260 bool SIRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
261 // This helps catch bugs as verifier errors.
265 int64_t SIRegisterInfo::getMUBUFInstrOffset(const MachineInstr *MI) const {
266 assert(SIInstrInfo::isMUBUF(*MI));
268 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
269 AMDGPU::OpName::offset);
270 return MI->getOperand(OffIdx).getImm();
273 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
275 if (!SIInstrInfo::isMUBUF(*MI))
278 assert(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
279 AMDGPU::OpName::vaddr) &&
280 "Should never see frame index on non-address operand");
282 return getMUBUFInstrOffset(MI);
285 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
286 if (!MI->mayLoadOrStore())
289 int64_t FullOffset = Offset + getMUBUFInstrOffset(MI);
291 return !isUInt<12>(FullOffset);
294 void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
297 int64_t Offset) const {
298 MachineBasicBlock::iterator Ins = MBB->begin();
299 DebugLoc DL; // Defaults to "unknown"
301 if (Ins != MBB->end())
302 DL = Ins->getDebugLoc();
304 MachineFunction *MF = MBB->getParent();
305 const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>();
306 const SIInstrInfo *TII = Subtarget.getInstrInfo();
309 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), BaseReg)
310 .addFrameIndex(FrameIdx);
314 MachineRegisterInfo &MRI = MF->getRegInfo();
315 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
317 unsigned FIReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
319 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
321 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), FIReg)
322 .addFrameIndex(FrameIdx);
324 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
325 .addReg(OffsetReg, RegState::Kill)
329 void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
330 int64_t Offset) const {
332 MachineBasicBlock *MBB = MI.getParent();
333 MachineFunction *MF = MBB->getParent();
334 const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>();
335 const SIInstrInfo *TII = Subtarget.getInstrInfo();
338 // FIXME: Is it possible to be storing a frame index to itself?
340 for (const MachineOperand &MO: MI.operands()) {
343 llvm_unreachable("should not see multiple frame indices");
350 MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
351 assert(FIOp && FIOp->isFI() && "frame index must be address operand");
352 assert(TII->isMUBUF(MI));
353 assert(TII->getNamedOperand(MI, AMDGPU::OpName::soffset)->getReg() ==
354 MF->getInfo<SIMachineFunctionInfo>()->getFrameOffsetReg() &&
355 "should only be seeing frame offset relative FrameIndex");
358 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
359 int64_t NewOffset = OffsetOp->getImm() + Offset;
360 assert(isUInt<12>(NewOffset) && "offset should be legal");
362 FIOp->ChangeToRegister(BaseReg, false);
363 OffsetOp->setImm(NewOffset);
366 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
368 int64_t Offset) const {
369 if (!SIInstrInfo::isMUBUF(*MI))
372 int64_t NewOffset = Offset + getMUBUFInstrOffset(MI);
374 return isUInt<12>(NewOffset);
377 const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
378 const MachineFunction &MF, unsigned Kind) const {
379 // This is inaccurate. It depends on the instruction and address space. The
380 // only place where we should hit this is for dealing with frame indexes /
381 // private accesses, so this is correct in that case.
382 return &AMDGPU::VGPR_32RegClass;
385 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
388 case AMDGPU::SI_SPILL_S512_SAVE:
389 case AMDGPU::SI_SPILL_S512_RESTORE:
390 case AMDGPU::SI_SPILL_V512_SAVE:
391 case AMDGPU::SI_SPILL_V512_RESTORE:
393 case AMDGPU::SI_SPILL_S256_SAVE:
394 case AMDGPU::SI_SPILL_S256_RESTORE:
395 case AMDGPU::SI_SPILL_V256_SAVE:
396 case AMDGPU::SI_SPILL_V256_RESTORE:
398 case AMDGPU::SI_SPILL_S128_SAVE:
399 case AMDGPU::SI_SPILL_S128_RESTORE:
400 case AMDGPU::SI_SPILL_V128_SAVE:
401 case AMDGPU::SI_SPILL_V128_RESTORE:
403 case AMDGPU::SI_SPILL_V96_SAVE:
404 case AMDGPU::SI_SPILL_V96_RESTORE:
406 case AMDGPU::SI_SPILL_S64_SAVE:
407 case AMDGPU::SI_SPILL_S64_RESTORE:
408 case AMDGPU::SI_SPILL_V64_SAVE:
409 case AMDGPU::SI_SPILL_V64_RESTORE:
411 case AMDGPU::SI_SPILL_S32_SAVE:
412 case AMDGPU::SI_SPILL_S32_RESTORE:
413 case AMDGPU::SI_SPILL_V32_SAVE:
414 case AMDGPU::SI_SPILL_V32_RESTORE:
416 default: llvm_unreachable("Invalid spill opcode");
420 static int getOffsetMUBUFStore(unsigned Opc) {
422 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
423 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
424 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
425 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
426 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
427 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
428 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
429 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
430 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
431 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
437 static int getOffsetMUBUFLoad(unsigned Opc) {
439 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
440 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
441 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
442 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
443 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
444 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
445 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
446 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
447 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
448 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
449 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
450 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
451 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
452 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
458 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
459 // need to handle the case where an SGPR may need to be spilled while spilling.
460 static bool buildMUBUFOffsetLoadStore(const SIInstrInfo *TII,
461 MachineFrameInfo &MFI,
462 MachineBasicBlock::iterator MI,
465 MachineBasicBlock *MBB = MI->getParent();
466 const DebugLoc &DL = MI->getDebugLoc();
467 bool IsStore = MI->mayStore();
469 unsigned Opc = MI->getOpcode();
470 int LoadStoreOp = IsStore ?
471 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
472 if (LoadStoreOp == -1)
475 unsigned Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata)->getReg();
477 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
478 .addReg(Reg, getDefRegState(!IsStore))
479 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
480 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
485 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
489 void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
490 unsigned LoadStoreOp,
494 unsigned ScratchRsrcReg,
495 unsigned ScratchOffsetReg,
497 MachineMemOperand *MMO,
498 RegScavenger *RS) const {
499 MachineBasicBlock *MBB = MI->getParent();
500 MachineFunction *MF = MI->getParent()->getParent();
501 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
502 const SIInstrInfo *TII = ST.getInstrInfo();
503 const MachineFrameInfo &MFI = MF->getFrameInfo();
505 const MCInstrDesc &Desc = TII->get(LoadStoreOp);
506 const DebugLoc &DL = MI->getDebugLoc();
507 bool IsStore = Desc.mayStore();
509 bool RanOutOfSGPRs = false;
510 bool Scavenged = false;
511 unsigned SOffset = ScratchOffsetReg;
513 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
514 unsigned NumSubRegs = AMDGPU::getRegBitWidth(RC->getID()) / 32;
515 unsigned Size = NumSubRegs * 4;
516 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
517 const int64_t OriginalImmOffset = Offset;
519 unsigned Align = MFI.getObjectAlignment(Index);
520 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
522 if (!isUInt<12>(Offset + Size)) {
523 SOffset = AMDGPU::NoRegister;
525 // We don't have access to the register scavenger if this function is called
526 // during PEI::scavengeFrameVirtualRegs().
528 SOffset = RS->FindUnusedReg(&AMDGPU::SGPR_32RegClass);
530 if (SOffset == AMDGPU::NoRegister) {
531 // There are no free SGPRs, and since we are in the process of spilling
532 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
533 // on SI/CI and on VI it is true until we implement spilling using scalar
534 // stores), we have no way to free up an SGPR. Our solution here is to
535 // add the offset directly to the ScratchOffset register, and then
536 // subtract the offset after the spill to return ScratchOffset to it's
538 RanOutOfSGPRs = true;
539 SOffset = ScratchOffsetReg;
544 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
545 .addReg(ScratchOffsetReg)
551 const unsigned EltSize = 4;
553 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += EltSize) {
554 unsigned SubReg = NumSubRegs == 1 ?
555 ValueReg : getSubReg(ValueReg, getSubRegFromChannel(i));
557 unsigned SOffsetRegState = 0;
558 unsigned SrcDstRegState = getDefRegState(!IsStore);
560 SOffsetRegState |= getKillRegState(Scavenged);
561 // The last implicit use carries the "Kill" flag.
562 SrcDstRegState |= getKillRegState(IsKill);
565 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(EltSize * i);
566 MachineMemOperand *NewMMO
567 = MF->getMachineMemOperand(PInfo, MMO->getFlags(),
568 EltSize, MinAlign(Align, EltSize * i));
570 auto MIB = BuildMI(*MBB, MI, DL, Desc)
571 .addReg(SubReg, getDefRegState(!IsStore) | getKillRegState(IsKill))
572 .addReg(ScratchRsrcReg)
573 .addReg(SOffset, SOffsetRegState)
578 .addMemOperand(NewMMO);
581 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
585 // Subtract the offset we added to the ScratchOffset register.
586 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScratchOffsetReg)
587 .addReg(ScratchOffsetReg)
588 .addImm(OriginalImmOffset);
592 static std::pair<unsigned, unsigned> getSpillEltSize(unsigned SuperRegSize,
594 if (SuperRegSize % 16 == 0) {
595 return { 16, Store ? AMDGPU::S_BUFFER_STORE_DWORDX4_SGPR :
596 AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR };
599 if (SuperRegSize % 8 == 0) {
600 return { 8, Store ? AMDGPU::S_BUFFER_STORE_DWORDX2_SGPR :
601 AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR };
604 return { 4, Store ? AMDGPU::S_BUFFER_STORE_DWORD_SGPR :
605 AMDGPU::S_BUFFER_LOAD_DWORD_SGPR};
608 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
611 bool OnlyToVGPR) const {
612 MachineBasicBlock *MBB = MI->getParent();
613 MachineFunction *MF = MBB->getParent();
614 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
616 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
617 = MFI->getSGPRToVGPRSpills(Index);
618 bool SpillToVGPR = !VGPRSpills.empty();
619 if (OnlyToVGPR && !SpillToVGPR)
622 MachineRegisterInfo &MRI = MF->getRegInfo();
623 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
624 const SIInstrInfo *TII = ST.getInstrInfo();
626 unsigned SuperReg = MI->getOperand(0).getReg();
627 bool IsKill = MI->getOperand(0).isKill();
628 const DebugLoc &DL = MI->getDebugLoc();
630 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
632 bool SpillToSMEM = spillSGPRToSMEM();
633 if (SpillToSMEM && OnlyToVGPR)
636 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
638 unsigned OffsetReg = AMDGPU::M0;
639 unsigned M0CopyReg = AMDGPU::NoRegister;
642 if (RS->isRegUsed(AMDGPU::M0)) {
643 M0CopyReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
644 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), M0CopyReg)
649 unsigned ScalarStoreOp;
650 unsigned EltSize = 4;
651 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
652 if (SpillToSMEM && isSGPRClass(RC)) {
653 // XXX - if private_element_size is larger than 4 it might be useful to be
654 // able to spill wider vmem spills.
655 std::tie(EltSize, ScalarStoreOp) =
656 getSpillEltSize(getRegSizeInBits(*RC) / 8, true);
659 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
660 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
662 // SubReg carries the "Kill" flag when SubReg == SuperReg.
663 unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill);
664 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
665 unsigned SubReg = NumSubRegs == 1 ?
666 SuperReg : getSubReg(SuperReg, SplitParts[i]);
669 int64_t FrOffset = FrameInfo.getObjectOffset(Index);
671 // The allocated memory size is really the wavefront size * the frame
672 // index size. The widest register class is 64 bytes, so a 4-byte scratch
673 // allocation is enough to spill this in a single stack object.
675 // FIXME: Frame size/offsets are computed earlier than this, so the extra
676 // space is still unnecessarily allocated.
678 unsigned Align = FrameInfo.getObjectAlignment(Index);
679 MachinePointerInfo PtrInfo
680 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
681 MachineMemOperand *MMO
682 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
683 EltSize, MinAlign(Align, EltSize * i));
685 // SMEM instructions only support a single offset, so increment the wave
688 int64_t Offset = (ST.getWavefrontSize() * FrOffset) + (EltSize * i);
690 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
691 .addReg(MFI->getFrameOffsetReg())
694 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
695 .addReg(MFI->getFrameOffsetReg());
698 BuildMI(*MBB, MI, DL, TII->get(ScalarStoreOp))
699 .addReg(SubReg, getKillRegState(IsKill)) // sdata
700 .addReg(MFI->getScratchRSrcReg()) // sbase
701 .addReg(OffsetReg, RegState::Kill) // soff
709 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
711 BuildMI(*MBB, MI, DL,
712 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
714 .addReg(SubReg, getKillRegState(IsKill))
717 // FIXME: Since this spills to another register instead of an actual
718 // frame index, we should delete the frame index when all references to
721 // XXX - Can to VGPR spill fail for some subregisters but not others?
725 // Spill SGPR to a frame index.
726 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
727 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
728 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
730 MachineInstrBuilder Mov
731 = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
732 .addReg(SubReg, SubKillState);
735 // There could be undef components of a spilled super register.
736 // TODO: Can we detect this and skip the spill?
737 if (NumSubRegs > 1) {
738 // The last implicit use of the SuperReg carries the "Kill" flag.
739 unsigned SuperKillState = 0;
741 SuperKillState |= getKillRegState(IsKill);
742 Mov.addReg(SuperReg, RegState::Implicit | SuperKillState);
745 unsigned Align = FrameInfo.getObjectAlignment(Index);
746 MachinePointerInfo PtrInfo
747 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
748 MachineMemOperand *MMO
749 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
750 EltSize, MinAlign(Align, EltSize * i));
751 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_SAVE))
752 .addReg(TmpReg, RegState::Kill) // src
753 .addFrameIndex(Index) // vaddr
754 .addReg(MFI->getScratchRSrcReg()) // srrsrc
755 .addReg(MFI->getFrameOffsetReg()) // soffset
756 .addImm(i * 4) // offset
761 if (M0CopyReg != AMDGPU::NoRegister) {
762 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), AMDGPU::M0)
763 .addReg(M0CopyReg, RegState::Kill);
766 MI->eraseFromParent();
767 MFI->addToSpilledSGPRs(NumSubRegs);
771 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
774 bool OnlyToVGPR) const {
775 MachineFunction *MF = MI->getParent()->getParent();
776 MachineRegisterInfo &MRI = MF->getRegInfo();
777 MachineBasicBlock *MBB = MI->getParent();
778 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
780 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
781 = MFI->getSGPRToVGPRSpills(Index);
782 bool SpillToVGPR = !VGPRSpills.empty();
783 if (OnlyToVGPR && !SpillToVGPR)
786 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
787 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
788 const SIInstrInfo *TII = ST.getInstrInfo();
789 const DebugLoc &DL = MI->getDebugLoc();
791 unsigned SuperReg = MI->getOperand(0).getReg();
792 bool SpillToSMEM = spillSGPRToSMEM();
793 if (SpillToSMEM && OnlyToVGPR)
796 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
798 unsigned OffsetReg = AMDGPU::M0;
799 unsigned M0CopyReg = AMDGPU::NoRegister;
802 if (RS->isRegUsed(AMDGPU::M0)) {
803 M0CopyReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
804 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), M0CopyReg)
809 unsigned EltSize = 4;
810 unsigned ScalarLoadOp;
812 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
813 if (SpillToSMEM && isSGPRClass(RC)) {
814 // XXX - if private_element_size is larger than 4 it might be useful to be
815 // able to spill wider vmem spills.
816 std::tie(EltSize, ScalarLoadOp) =
817 getSpillEltSize(getRegSizeInBits(*RC) / 8, false);
820 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
821 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
823 // SubReg carries the "Kill" flag when SubReg == SuperReg.
824 int64_t FrOffset = FrameInfo.getObjectOffset(Index);
826 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
827 unsigned SubReg = NumSubRegs == 1 ?
828 SuperReg : getSubReg(SuperReg, SplitParts[i]);
831 // FIXME: Size may be > 4 but extra bytes wasted.
832 unsigned Align = FrameInfo.getObjectAlignment(Index);
833 MachinePointerInfo PtrInfo
834 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
835 MachineMemOperand *MMO
836 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
837 EltSize, MinAlign(Align, EltSize * i));
840 int64_t Offset = (ST.getWavefrontSize() * FrOffset) + (EltSize * i);
842 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
843 .addReg(MFI->getFrameOffsetReg())
846 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
847 .addReg(MFI->getFrameOffsetReg());
851 BuildMI(*MBB, MI, DL, TII->get(ScalarLoadOp), SubReg)
852 .addReg(MFI->getScratchRSrcReg()) // sbase
853 .addReg(OffsetReg, RegState::Kill) // soff
858 MIB.addReg(SuperReg, RegState::ImplicitDefine);
864 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
866 BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
872 MIB.addReg(SuperReg, RegState::ImplicitDefine);
877 // Restore SGPR from a stack slot.
878 // FIXME: We should use S_LOAD_DWORD here for VI.
879 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
880 unsigned Align = FrameInfo.getObjectAlignment(Index);
882 MachinePointerInfo PtrInfo
883 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
885 MachineMemOperand *MMO = MF->getMachineMemOperand(PtrInfo,
886 MachineMemOperand::MOLoad, EltSize,
887 MinAlign(Align, EltSize * i));
889 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_RESTORE), TmpReg)
890 .addFrameIndex(Index) // vaddr
891 .addReg(MFI->getScratchRSrcReg()) // srsrc
892 .addReg(MFI->getFrameOffsetReg()) // soffset
893 .addImm(i * 4) // offset
897 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg)
898 .addReg(TmpReg, RegState::Kill);
901 MIB.addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
905 if (M0CopyReg != AMDGPU::NoRegister) {
906 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), AMDGPU::M0)
907 .addReg(M0CopyReg, RegState::Kill);
910 MI->eraseFromParent();
914 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
915 /// a VGPR and the stack slot can be safely eliminated when all other users are
917 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
918 MachineBasicBlock::iterator MI,
920 RegScavenger *RS) const {
921 switch (MI->getOpcode()) {
922 case AMDGPU::SI_SPILL_S512_SAVE:
923 case AMDGPU::SI_SPILL_S256_SAVE:
924 case AMDGPU::SI_SPILL_S128_SAVE:
925 case AMDGPU::SI_SPILL_S64_SAVE:
926 case AMDGPU::SI_SPILL_S32_SAVE:
927 return spillSGPR(MI, FI, RS, true);
928 case AMDGPU::SI_SPILL_S512_RESTORE:
929 case AMDGPU::SI_SPILL_S256_RESTORE:
930 case AMDGPU::SI_SPILL_S128_RESTORE:
931 case AMDGPU::SI_SPILL_S64_RESTORE:
932 case AMDGPU::SI_SPILL_S32_RESTORE:
933 return restoreSGPR(MI, FI, RS, true);
935 llvm_unreachable("not an SGPR spill instruction");
939 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
940 int SPAdj, unsigned FIOperandNum,
941 RegScavenger *RS) const {
942 MachineFunction *MF = MI->getParent()->getParent();
943 MachineRegisterInfo &MRI = MF->getRegInfo();
944 MachineBasicBlock *MBB = MI->getParent();
945 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
946 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
947 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
948 const SIInstrInfo *TII = ST.getInstrInfo();
949 DebugLoc DL = MI->getDebugLoc();
951 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
952 int Index = MI->getOperand(FIOperandNum).getIndex();
954 switch (MI->getOpcode()) {
955 // SGPR register spill
956 case AMDGPU::SI_SPILL_S512_SAVE:
957 case AMDGPU::SI_SPILL_S256_SAVE:
958 case AMDGPU::SI_SPILL_S128_SAVE:
959 case AMDGPU::SI_SPILL_S64_SAVE:
960 case AMDGPU::SI_SPILL_S32_SAVE: {
961 spillSGPR(MI, Index, RS);
965 // SGPR register restore
966 case AMDGPU::SI_SPILL_S512_RESTORE:
967 case AMDGPU::SI_SPILL_S256_RESTORE:
968 case AMDGPU::SI_SPILL_S128_RESTORE:
969 case AMDGPU::SI_SPILL_S64_RESTORE:
970 case AMDGPU::SI_SPILL_S32_RESTORE: {
971 restoreSGPR(MI, Index, RS);
975 // VGPR register spill
976 case AMDGPU::SI_SPILL_V512_SAVE:
977 case AMDGPU::SI_SPILL_V256_SAVE:
978 case AMDGPU::SI_SPILL_V128_SAVE:
979 case AMDGPU::SI_SPILL_V96_SAVE:
980 case AMDGPU::SI_SPILL_V64_SAVE:
981 case AMDGPU::SI_SPILL_V32_SAVE: {
982 const MachineOperand *VData = TII->getNamedOperand(*MI,
983 AMDGPU::OpName::vdata);
984 buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
986 VData->getReg(), VData->isKill(),
987 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
988 TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
989 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
990 *MI->memoperands_begin(),
992 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
993 MI->eraseFromParent();
996 case AMDGPU::SI_SPILL_V32_RESTORE:
997 case AMDGPU::SI_SPILL_V64_RESTORE:
998 case AMDGPU::SI_SPILL_V96_RESTORE:
999 case AMDGPU::SI_SPILL_V128_RESTORE:
1000 case AMDGPU::SI_SPILL_V256_RESTORE:
1001 case AMDGPU::SI_SPILL_V512_RESTORE: {
1002 const MachineOperand *VData = TII->getNamedOperand(*MI,
1003 AMDGPU::OpName::vdata);
1005 buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
1007 VData->getReg(), VData->isKill(),
1008 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
1009 TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
1010 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1011 *MI->memoperands_begin(),
1013 MI->eraseFromParent();
1018 const DebugLoc &DL = MI->getDebugLoc();
1019 bool IsMUBUF = TII->isMUBUF(*MI);
1022 MFI->getFrameOffsetReg() != MFI->getScratchWaveOffsetReg()) {
1023 // Convert to an absolute stack address by finding the offset from the
1024 // scratch wave base and scaling by the wave size.
1026 // In an entry function/kernel the stack address is already the absolute
1027 // address relative to the the scratch wave offset.
1030 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1032 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
1033 unsigned ResultReg = IsCopy ?
1034 MI->getOperand(0).getReg() :
1035 MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1037 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), DiffReg)
1038 .addReg(MFI->getFrameOffsetReg())
1039 .addReg(MFI->getScratchWaveOffsetReg());
1041 int64_t Offset = FrameInfo.getObjectOffset(Index);
1043 // XXX - This never happens because of emergency scavenging slot at 0?
1044 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg)
1045 .addImm(Log2_32(ST.getWavefrontSize()))
1049 = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
1051 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1053 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ScaledReg)
1054 .addImm(Log2_32(ST.getWavefrontSize()))
1055 .addReg(DiffReg, RegState::Kill);
1057 // TODO: Fold if use instruction is another add of a constant.
1058 if (AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
1059 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ADD_I32_e64), ResultReg)
1060 .addReg(CarryOut, RegState::Define | RegState::Dead)
1062 .addReg(ScaledReg, RegState::Kill);
1064 unsigned ConstOffsetReg
1065 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1067 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
1069 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ADD_I32_e64), ResultReg)
1070 .addReg(CarryOut, RegState::Define | RegState::Dead)
1071 .addReg(ConstOffsetReg, RegState::Kill)
1072 .addReg(ScaledReg, RegState::Kill);
1075 MRI.setRegAllocationHint(CarryOut, 0, AMDGPU::VCC);
1078 // Don't introduce an extra copy if we're just materializing in a mov.
1080 MI->eraseFromParent();
1082 FIOp.ChangeToRegister(ResultReg, false, false, true);
1087 // Disable offen so we don't need a 0 vgpr base.
1088 assert(static_cast<int>(FIOperandNum) ==
1089 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
1090 AMDGPU::OpName::vaddr));
1092 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg()
1093 == MFI->getFrameOffsetReg());
1095 int64_t Offset = FrameInfo.getObjectOffset(Index);
1097 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
1098 int64_t NewOffset = OldImm + Offset;
1100 if (isUInt<12>(NewOffset) &&
1101 buildMUBUFOffsetLoadStore(TII, FrameInfo, MI, Index, NewOffset)) {
1102 MI->eraseFromParent();
1107 // If the offset is simply too big, don't convert to a scratch wave offset
1110 int64_t Offset = FrameInfo.getObjectOffset(Index);
1111 FIOp.ChangeToImmediate(Offset);
1112 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
1113 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1114 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1116 FIOp.ChangeToRegister(TmpReg, false, false, true);
1122 StringRef SIRegisterInfo::getRegAsmName(unsigned Reg) const {
1123 #define AMDGPU_REG_ASM_NAMES
1124 #include "AMDGPURegAsmNames.inc.cpp"
1126 #define REG_RANGE(BeginReg, EndReg, RegTable) \
1127 if (Reg >= BeginReg && Reg <= EndReg) { \
1128 unsigned Index = Reg - BeginReg; \
1129 assert(Index < array_lengthof(RegTable)); \
1130 return RegTable[Index]; \
1133 REG_RANGE(AMDGPU::VGPR0, AMDGPU::VGPR255, VGPR32RegNames);
1134 REG_RANGE(AMDGPU::SGPR0, AMDGPU::SGPR103, SGPR32RegNames);
1135 REG_RANGE(AMDGPU::VGPR0_VGPR1, AMDGPU::VGPR254_VGPR255, VGPR64RegNames);
1136 REG_RANGE(AMDGPU::SGPR0_SGPR1, AMDGPU::SGPR102_SGPR103, SGPR64RegNames);
1137 REG_RANGE(AMDGPU::VGPR0_VGPR1_VGPR2, AMDGPU::VGPR253_VGPR254_VGPR255,
1140 REG_RANGE(AMDGPU::VGPR0_VGPR1_VGPR2_VGPR3,
1141 AMDGPU::VGPR252_VGPR253_VGPR254_VGPR255,
1143 REG_RANGE(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3,
1144 AMDGPU::SGPR100_SGPR101_SGPR102_SGPR103,
1147 REG_RANGE(AMDGPU::VGPR0_VGPR1_VGPR2_VGPR3_VGPR4_VGPR5_VGPR6_VGPR7,
1148 AMDGPU::VGPR248_VGPR249_VGPR250_VGPR251_VGPR252_VGPR253_VGPR254_VGPR255,
1152 AMDGPU::VGPR0_VGPR1_VGPR2_VGPR3_VGPR4_VGPR5_VGPR6_VGPR7_VGPR8_VGPR9_VGPR10_VGPR11_VGPR12_VGPR13_VGPR14_VGPR15,
1153 AMDGPU::VGPR240_VGPR241_VGPR242_VGPR243_VGPR244_VGPR245_VGPR246_VGPR247_VGPR248_VGPR249_VGPR250_VGPR251_VGPR252_VGPR253_VGPR254_VGPR255,
1156 REG_RANGE(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3_SGPR4_SGPR5_SGPR6_SGPR7,
1157 AMDGPU::SGPR96_SGPR97_SGPR98_SGPR99_SGPR100_SGPR101_SGPR102_SGPR103,
1161 AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3_SGPR4_SGPR5_SGPR6_SGPR7_SGPR8_SGPR9_SGPR10_SGPR11_SGPR12_SGPR13_SGPR14_SGPR15,
1162 AMDGPU::SGPR88_SGPR89_SGPR90_SGPR91_SGPR92_SGPR93_SGPR94_SGPR95_SGPR96_SGPR97_SGPR98_SGPR99_SGPR100_SGPR101_SGPR102_SGPR103,
1168 // FIXME: Rename flat_scr so we don't need to special case this.
1170 case AMDGPU::FLAT_SCR:
1171 return "flat_scratch";
1172 case AMDGPU::FLAT_SCR_LO:
1173 return "flat_scratch_lo";
1174 case AMDGPU::FLAT_SCR_HI:
1175 return "flat_scratch_hi";
1177 // For the special named registers the default is fine.
1178 return TargetRegisterInfo::getRegAsmName(Reg);
1182 // FIXME: This is very slow. It might be worth creating a map from physreg to
1184 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
1185 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
1187 static const TargetRegisterClass *const BaseClasses[] = {
1188 &AMDGPU::VGPR_32RegClass,
1189 &AMDGPU::SReg_32RegClass,
1190 &AMDGPU::VReg_64RegClass,
1191 &AMDGPU::SReg_64RegClass,
1192 &AMDGPU::VReg_96RegClass,
1193 &AMDGPU::VReg_128RegClass,
1194 &AMDGPU::SReg_128RegClass,
1195 &AMDGPU::VReg_256RegClass,
1196 &AMDGPU::SReg_256RegClass,
1197 &AMDGPU::VReg_512RegClass,
1198 &AMDGPU::SReg_512RegClass,
1199 &AMDGPU::SCC_CLASSRegClass,
1202 for (const TargetRegisterClass *BaseClass : BaseClasses) {
1203 if (BaseClass->contains(Reg)) {
1210 // TODO: It might be helpful to have some target specific flags in
1211 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
1212 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
1213 unsigned Size = getRegSizeInBits(*RC);
1218 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
1220 return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
1222 return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
1224 return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
1226 return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
1228 return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
1230 llvm_unreachable("Invalid register class size");
1234 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
1235 const TargetRegisterClass *SRC) const {
1236 switch (getRegSizeInBits(*SRC)) {
1238 return &AMDGPU::VGPR_32RegClass;
1240 return &AMDGPU::VReg_64RegClass;
1242 return &AMDGPU::VReg_96RegClass;
1244 return &AMDGPU::VReg_128RegClass;
1246 return &AMDGPU::VReg_256RegClass;
1248 return &AMDGPU::VReg_512RegClass;
1250 llvm_unreachable("Invalid register class size");
1254 const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass(
1255 const TargetRegisterClass *VRC) const {
1256 switch (getRegSizeInBits(*VRC)) {
1258 return &AMDGPU::SGPR_32RegClass;
1260 return &AMDGPU::SReg_64RegClass;
1262 return &AMDGPU::SReg_128RegClass;
1264 return &AMDGPU::SReg_256RegClass;
1266 return &AMDGPU::SReg_512RegClass;
1268 llvm_unreachable("Invalid register class size");
1272 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
1273 const TargetRegisterClass *RC, unsigned SubIdx) const {
1274 if (SubIdx == AMDGPU::NoSubRegister)
1277 // We can assume that each lane corresponds to one 32-bit register.
1278 LaneBitmask::Type Mask = getSubRegIndexLaneMask(SubIdx).getAsInteger();
1279 unsigned Count = countPopulation(Mask);
1280 if (isSGPRClass(RC)) {
1283 return &AMDGPU::SGPR_32RegClass;
1285 return &AMDGPU::SReg_64RegClass;
1287 return &AMDGPU::SReg_128RegClass;
1289 return &AMDGPU::SReg_256RegClass;
1290 case 16: /* fall-through */
1292 llvm_unreachable("Invalid sub-register class size");
1297 return &AMDGPU::VGPR_32RegClass;
1299 return &AMDGPU::VReg_64RegClass;
1301 return &AMDGPU::VReg_96RegClass;
1303 return &AMDGPU::VReg_128RegClass;
1305 return &AMDGPU::VReg_256RegClass;
1306 case 16: /* fall-through */
1308 llvm_unreachable("Invalid sub-register class size");
1313 bool SIRegisterInfo::shouldRewriteCopySrc(
1314 const TargetRegisterClass *DefRC,
1316 const TargetRegisterClass *SrcRC,
1317 unsigned SrcSubReg) const {
1318 // We want to prefer the smallest register class possible, so we don't want to
1319 // stop and rewrite on anything that looks like a subregister
1320 // extract. Operations mostly don't care about the super register class, so we
1321 // only want to stop on the most basic of copies between the same register
1324 // e.g. if we have something like
1327 // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
1328 // vreg3 = COPY vreg2, sub0
1330 // We want to look through the COPY to find:
1331 // => vreg3 = COPY vreg0
1334 return getCommonSubClass(DefRC, SrcRC) != nullptr;
1337 // FIXME: Most of these are flexible with HSA and we don't need to reserve them
1338 // as input registers if unused. Whether the dispatch ptr is necessary should be
1339 // easy to detect from used intrinsics. Scratch setup is harder to know.
1340 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
1341 enum PreloadedValue Value) const {
1343 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1344 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1347 case SIRegisterInfo::WORKGROUP_ID_X:
1348 assert(MFI->hasWorkGroupIDX());
1349 return MFI->WorkGroupIDXSystemSGPR;
1350 case SIRegisterInfo::WORKGROUP_ID_Y:
1351 assert(MFI->hasWorkGroupIDY());
1352 return MFI->WorkGroupIDYSystemSGPR;
1353 case SIRegisterInfo::WORKGROUP_ID_Z:
1354 assert(MFI->hasWorkGroupIDZ());
1355 return MFI->WorkGroupIDZSystemSGPR;
1356 case SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET:
1357 return MFI->PrivateSegmentWaveByteOffsetSystemSGPR;
1358 case SIRegisterInfo::PRIVATE_SEGMENT_BUFFER:
1359 assert(MFI->hasPrivateSegmentBuffer());
1360 return MFI->PrivateSegmentBufferUserSGPR;
1361 case SIRegisterInfo::IMPLICIT_BUFFER_PTR:
1362 assert(MFI->hasImplicitBufferPtr());
1363 return MFI->ImplicitBufferPtrUserSGPR;
1364 case SIRegisterInfo::KERNARG_SEGMENT_PTR:
1365 assert(MFI->hasKernargSegmentPtr());
1366 return MFI->KernargSegmentPtrUserSGPR;
1367 case SIRegisterInfo::DISPATCH_ID:
1368 assert(MFI->hasDispatchID());
1369 return MFI->DispatchIDUserSGPR;
1370 case SIRegisterInfo::FLAT_SCRATCH_INIT:
1371 assert(MFI->hasFlatScratchInit());
1372 return MFI->FlatScratchInitUserSGPR;
1373 case SIRegisterInfo::DISPATCH_PTR:
1374 assert(MFI->hasDispatchPtr());
1375 return MFI->DispatchPtrUserSGPR;
1376 case SIRegisterInfo::QUEUE_PTR:
1377 assert(MFI->hasQueuePtr());
1378 return MFI->QueuePtrUserSGPR;
1379 case SIRegisterInfo::WORKITEM_ID_X:
1380 assert(MFI->hasWorkItemIDX());
1381 return AMDGPU::VGPR0;
1382 case SIRegisterInfo::WORKITEM_ID_Y:
1383 assert(MFI->hasWorkItemIDY());
1384 return AMDGPU::VGPR1;
1385 case SIRegisterInfo::WORKITEM_ID_Z:
1386 assert(MFI->hasWorkItemIDZ());
1387 return AMDGPU::VGPR2;
1389 llvm_unreachable("unexpected preloaded value type");
1392 /// \brief Returns a register that is not used at any point in the function.
1393 /// If all registers are used, then this function will return
1394 // AMDGPU::NoRegister.
1396 SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
1397 const TargetRegisterClass *RC,
1398 const MachineFunction &MF) const {
1400 for (unsigned Reg : *RC)
1401 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
1403 return AMDGPU::NoRegister;
1406 ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
1407 unsigned EltSize) const {
1409 static const int16_t Sub0_15[] = {
1410 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1411 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1412 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
1413 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
1416 static const int16_t Sub0_7[] = {
1417 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1418 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1421 static const int16_t Sub0_3[] = {
1422 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1425 static const int16_t Sub0_2[] = {
1426 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2,
1429 static const int16_t Sub0_1[] = {
1430 AMDGPU::sub0, AMDGPU::sub1,
1433 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1437 return makeArrayRef(Sub0_1);
1439 return makeArrayRef(Sub0_2);
1441 return makeArrayRef(Sub0_3);
1443 return makeArrayRef(Sub0_7);
1445 return makeArrayRef(Sub0_15);
1447 llvm_unreachable("unhandled register size");
1452 static const int16_t Sub0_15_64[] = {
1453 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1454 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
1455 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
1456 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15
1459 static const int16_t Sub0_7_64[] = {
1460 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1461 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7
1465 static const int16_t Sub0_3_64[] = {
1466 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3
1469 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1473 return makeArrayRef(Sub0_3_64);
1475 return makeArrayRef(Sub0_7_64);
1477 return makeArrayRef(Sub0_15_64);
1479 llvm_unreachable("unhandled register size");
1483 assert(EltSize == 16 && "unhandled register spill split size");
1485 static const int16_t Sub0_15_128[] = {
1486 AMDGPU::sub0_sub1_sub2_sub3,
1487 AMDGPU::sub4_sub5_sub6_sub7,
1488 AMDGPU::sub8_sub9_sub10_sub11,
1489 AMDGPU::sub12_sub13_sub14_sub15
1492 static const int16_t Sub0_7_128[] = {
1493 AMDGPU::sub0_sub1_sub2_sub3,
1494 AMDGPU::sub4_sub5_sub6_sub7
1497 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1501 return makeArrayRef(Sub0_7_128);
1503 return makeArrayRef(Sub0_15_128);
1505 llvm_unreachable("unhandled register size");
1509 const TargetRegisterClass*
1510 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
1511 unsigned Reg) const {
1512 if (TargetRegisterInfo::isVirtualRegister(Reg))
1513 return MRI.getRegClass(Reg);
1515 return getPhysRegClass(Reg);
1518 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
1519 unsigned Reg) const {
1520 return hasVGPRs(getRegClassForReg(MRI, Reg));
1523 bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
1524 const TargetRegisterClass *SrcRC,
1526 const TargetRegisterClass *DstRC,
1528 const TargetRegisterClass *NewRC) const {
1529 unsigned SrcSize = getRegSizeInBits(*SrcRC);
1530 unsigned DstSize = getRegSizeInBits(*DstRC);
1531 unsigned NewSize = getRegSizeInBits(*NewRC);
1533 // Do not increase size of registers beyond dword, we would need to allocate
1534 // adjacent registers and constraint regalloc more than needed.
1536 // Always allow dword coalescing.
1537 if (SrcSize <= 32 || DstSize <= 32)
1540 return NewSize <= DstSize || NewSize <= SrcSize;
1543 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
1544 MachineFunction &MF) const {
1546 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1547 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1549 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
1551 switch (RC->getID()) {
1553 return AMDGPURegisterInfo::getRegPressureLimit(RC, MF);
1554 case AMDGPU::VGPR_32RegClassID:
1555 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
1556 case AMDGPU::SGPR_32RegClassID:
1557 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
1561 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
1562 unsigned Idx) const {
1563 if (Idx == getVGPRPressureSet())
1564 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
1565 const_cast<MachineFunction &>(MF));
1567 if (Idx == getSGPRPressureSet())
1568 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
1569 const_cast<MachineFunction &>(MF));
1571 return AMDGPURegisterInfo::getRegPressureSetLimit(MF, Idx);
1574 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
1575 static const int Empty[] = { -1 };
1577 if (hasRegUnit(AMDGPU::M0, RegUnit))
1579 return AMDGPURegisterInfo::getRegUnitPressureSets(RegUnit);