1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "SIRegisterInfo.h"
16 #include "SIInstrInfo.h"
17 #include "SIMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/RegisterScavenging.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/LLVMContext.h"
26 SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo() {
27 unsigned NumRegPressureSets = getNumRegPressureSets();
29 SGPR32SetID = NumRegPressureSets;
30 VGPR32SetID = NumRegPressureSets;
31 for (unsigned i = 0; i < NumRegPressureSets; ++i) {
32 if (strncmp("SGPR_32", getRegPressureSetName(i), 7) == 0)
34 else if (strncmp("VGPR_32", getRegPressureSetName(i), 7) == 0)
37 assert(SGPR32SetID < NumRegPressureSets &&
38 VGPR32SetID < NumRegPressureSets);
41 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, unsigned Reg) const {
42 MCRegAliasIterator R(Reg, this, true);
44 for (; R.isValid(); ++R)
48 unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
49 const MachineFunction &MF) const {
50 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
51 if (ST.hasSGPRInitBug()) {
52 // Leave space for flat_scr, xnack_mask, vcc, and alignment
53 unsigned BaseIdx = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 8 - 4;
54 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
55 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
58 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
59 // 96/97 need to be reserved for flat_scr, 98/99 for xnack_mask, and
60 // 100/101 for vcc. This is the next sgpr128 down.
61 return AMDGPU::SGPR92_SGPR93_SGPR94_SGPR95;
64 return AMDGPU::SGPR96_SGPR97_SGPR98_SGPR99;
67 unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
68 const MachineFunction &MF) const {
69 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
70 if (ST.hasSGPRInitBug()) {
71 unsigned Idx = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 6 - 1;
72 return AMDGPU::SGPR_32RegClass.getRegister(Idx);
75 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
76 // Next register before reservations for flat_scr, xnack_mask, vcc,
77 // and scratch resource.
78 return AMDGPU::SGPR91;
81 return AMDGPU::SGPR95;
84 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
85 BitVector Reserved(getNumRegs());
86 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
88 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
89 // this seems likely to result in bugs, so I'm marking them as reserved.
90 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
91 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
93 // Reserve the last 2 registers so we will always have at least 2 more that
94 // will physically contain VCC.
95 reserveRegisterTuples(Reserved, AMDGPU::SGPR102_SGPR103);
97 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
99 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
100 // SI/CI have 104 SGPRs. VI has 102. We need to shift down the reservation
101 // for VCC/XNACK_MASK/FLAT_SCR.
103 // TODO The SGPRs that alias to XNACK_MASK could be used as general purpose
104 // SGPRs when the XNACK feature is not used. This is currently not done
105 // because the code that counts SGPRs cannot account for such holes.
106 reserveRegisterTuples(Reserved, AMDGPU::SGPR96_SGPR97);
107 reserveRegisterTuples(Reserved, AMDGPU::SGPR98_SGPR99);
108 reserveRegisterTuples(Reserved, AMDGPU::SGPR100_SGPR101);
111 // Tonga and Iceland can only allocate a fixed number of SGPRs due
113 if (ST.hasSGPRInitBug()) {
114 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
115 // Reserve some SGPRs for FLAT_SCRATCH, XNACK_MASK, and VCC (6 SGPRs).
116 unsigned Limit = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 6;
118 for (unsigned i = Limit; i < NumSGPRs; ++i) {
119 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
120 reserveRegisterTuples(Reserved, Reg);
124 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
126 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
127 if (ScratchWaveOffsetReg != AMDGPU::NoRegister) {
128 // Reserve 1 SGPR for scratch wave offset in case we need to spill.
129 reserveRegisterTuples(Reserved, ScratchWaveOffsetReg);
132 unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
133 if (ScratchRSrcReg != AMDGPU::NoRegister) {
134 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
136 // TODO: May need to reserve a VGPR if doing LDS spilling.
137 reserveRegisterTuples(Reserved, ScratchRSrcReg);
138 assert(!isSubRegister(ScratchRSrcReg, ScratchWaveOffsetReg));
144 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
145 unsigned Idx) const {
146 const AMDGPUSubtarget &STI = MF.getSubtarget<AMDGPUSubtarget>();
147 // FIXME: We should adjust the max number of waves based on LDS size.
148 unsigned SGPRLimit = getNumSGPRsAllowed(STI.getGeneration(),
149 STI.getMaxWavesPerCU());
150 unsigned VGPRLimit = getNumVGPRsAllowed(STI.getMaxWavesPerCU());
152 unsigned VSLimit = SGPRLimit + VGPRLimit;
154 for (regclass_iterator I = regclass_begin(), E = regclass_end();
156 const TargetRegisterClass *RC = *I;
158 unsigned NumSubRegs = std::max((int)RC->getSize() / 4, 1);
161 if (isPseudoRegClass(RC)) {
162 // FIXME: This is a hack. We should never be considering the pressure of
163 // these since no virtual register should ever have this class.
165 } else if (isSGPRClass(RC)) {
166 Limit = SGPRLimit / NumSubRegs;
168 Limit = VGPRLimit / NumSubRegs;
171 const int *Sets = getRegClassPressureSets(RC);
173 for (unsigned i = 0; Sets[i] != -1; ++i) {
174 if (Sets[i] == (int)Idx)
181 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
182 return Fn.getFrameInfo()->hasStackObjects();
185 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
188 case AMDGPU::SI_SPILL_S512_SAVE:
189 case AMDGPU::SI_SPILL_S512_RESTORE:
190 case AMDGPU::SI_SPILL_V512_SAVE:
191 case AMDGPU::SI_SPILL_V512_RESTORE:
193 case AMDGPU::SI_SPILL_S256_SAVE:
194 case AMDGPU::SI_SPILL_S256_RESTORE:
195 case AMDGPU::SI_SPILL_V256_SAVE:
196 case AMDGPU::SI_SPILL_V256_RESTORE:
198 case AMDGPU::SI_SPILL_S128_SAVE:
199 case AMDGPU::SI_SPILL_S128_RESTORE:
200 case AMDGPU::SI_SPILL_V128_SAVE:
201 case AMDGPU::SI_SPILL_V128_RESTORE:
203 case AMDGPU::SI_SPILL_V96_SAVE:
204 case AMDGPU::SI_SPILL_V96_RESTORE:
206 case AMDGPU::SI_SPILL_S64_SAVE:
207 case AMDGPU::SI_SPILL_S64_RESTORE:
208 case AMDGPU::SI_SPILL_V64_SAVE:
209 case AMDGPU::SI_SPILL_V64_RESTORE:
211 case AMDGPU::SI_SPILL_S32_SAVE:
212 case AMDGPU::SI_SPILL_S32_RESTORE:
213 case AMDGPU::SI_SPILL_V32_SAVE:
214 case AMDGPU::SI_SPILL_V32_RESTORE:
216 default: llvm_unreachable("Invalid spill opcode");
220 void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
221 unsigned LoadStoreOp,
223 unsigned ScratchRsrcReg,
224 unsigned ScratchOffset,
226 RegScavenger *RS) const {
228 MachineBasicBlock *MBB = MI->getParent();
229 const MachineFunction *MF = MI->getParent()->getParent();
230 const SIInstrInfo *TII =
231 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
232 LLVMContext &Ctx = MF->getFunction()->getContext();
233 DebugLoc DL = MI->getDebugLoc();
234 bool IsLoad = TII->get(LoadStoreOp).mayLoad();
236 bool RanOutOfSGPRs = false;
237 bool Scavenged = false;
238 unsigned SOffset = ScratchOffset;
240 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
241 unsigned Size = NumSubRegs * 4;
243 if (!isUInt<12>(Offset + Size)) {
244 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
245 if (SOffset == AMDGPU::NoRegister) {
246 RanOutOfSGPRs = true;
247 SOffset = AMDGPU::SGPR0;
251 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
252 .addReg(ScratchOffset)
258 Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
260 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
261 unsigned SubReg = NumSubRegs > 1 ?
262 getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
265 unsigned SOffsetRegState = 0;
266 if (i + 1 == e && Scavenged)
267 SOffsetRegState |= RegState::Kill;
269 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
270 .addReg(SubReg, getDefRegState(IsLoad))
271 .addReg(ScratchRsrcReg)
272 .addReg(SOffset, SOffsetRegState)
277 .addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
278 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
282 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
283 int SPAdj, unsigned FIOperandNum,
284 RegScavenger *RS) const {
285 MachineFunction *MF = MI->getParent()->getParent();
286 MachineBasicBlock *MBB = MI->getParent();
287 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
288 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
289 const SIInstrInfo *TII =
290 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
291 DebugLoc DL = MI->getDebugLoc();
293 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
294 int Index = MI->getOperand(FIOperandNum).getIndex();
296 switch (MI->getOpcode()) {
297 // SGPR register spill
298 case AMDGPU::SI_SPILL_S512_SAVE:
299 case AMDGPU::SI_SPILL_S256_SAVE:
300 case AMDGPU::SI_SPILL_S128_SAVE:
301 case AMDGPU::SI_SPILL_S64_SAVE:
302 case AMDGPU::SI_SPILL_S32_SAVE: {
303 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
305 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
306 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
307 &AMDGPU::SGPR_32RegClass, i);
308 struct SIMachineFunctionInfo::SpilledReg Spill =
309 MFI->getSpilledReg(MF, Index, i);
311 BuildMI(*MBB, MI, DL,
312 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
317 // FIXME: Since this spills to another register instead of an actual
318 // frame index, we should delete the frame index when all references to
321 MI->eraseFromParent();
325 // SGPR register restore
326 case AMDGPU::SI_SPILL_S512_RESTORE:
327 case AMDGPU::SI_SPILL_S256_RESTORE:
328 case AMDGPU::SI_SPILL_S128_RESTORE:
329 case AMDGPU::SI_SPILL_S64_RESTORE:
330 case AMDGPU::SI_SPILL_S32_RESTORE: {
331 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
333 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
334 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
335 &AMDGPU::SGPR_32RegClass, i);
336 struct SIMachineFunctionInfo::SpilledReg Spill =
337 MFI->getSpilledReg(MF, Index, i);
339 BuildMI(*MBB, MI, DL,
340 TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
344 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
347 // TODO: only do this when it is needed
348 switch (MF->getSubtarget<AMDGPUSubtarget>().getGeneration()) {
349 case AMDGPUSubtarget::SOUTHERN_ISLANDS:
350 // "VALU writes SGPR" -> "SMRD reads that SGPR" needs 4 wait states
352 TII->insertWaitStates(MI, 4);
354 case AMDGPUSubtarget::SEA_ISLANDS:
356 default: // VOLCANIC_ISLANDS and later
357 // "VALU writes SGPR -> VMEM reads that SGPR" needs 5 wait states
358 // ("S_NOP 4") on VI and later. This also applies to VALUs which write
359 // VCC, but we're unlikely to see VMEM use VCC.
360 TII->insertWaitStates(MI, 5);
363 MI->eraseFromParent();
367 // VGPR register spill
368 case AMDGPU::SI_SPILL_V512_SAVE:
369 case AMDGPU::SI_SPILL_V256_SAVE:
370 case AMDGPU::SI_SPILL_V128_SAVE:
371 case AMDGPU::SI_SPILL_V96_SAVE:
372 case AMDGPU::SI_SPILL_V64_SAVE:
373 case AMDGPU::SI_SPILL_V32_SAVE:
374 buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
375 TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
376 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
377 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
378 FrameInfo->getObjectOffset(Index), RS);
379 MI->eraseFromParent();
381 case AMDGPU::SI_SPILL_V32_RESTORE:
382 case AMDGPU::SI_SPILL_V64_RESTORE:
383 case AMDGPU::SI_SPILL_V96_RESTORE:
384 case AMDGPU::SI_SPILL_V128_RESTORE:
385 case AMDGPU::SI_SPILL_V256_RESTORE:
386 case AMDGPU::SI_SPILL_V512_RESTORE: {
387 buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
388 TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
389 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
390 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
391 FrameInfo->getObjectOffset(Index), RS);
392 MI->eraseFromParent();
397 int64_t Offset = FrameInfo->getObjectOffset(Index);
398 FIOp.ChangeToImmediate(Offset);
399 if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
400 unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
401 BuildMI(*MBB, MI, MI->getDebugLoc(),
402 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
404 FIOp.ChangeToRegister(TmpReg, false, false, true);
410 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
411 return getEncodingValue(Reg) & 0xff;
414 // FIXME: This is very slow. It might be worth creating a map from physreg to
416 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
417 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
419 static const TargetRegisterClass *const BaseClasses[] = {
420 &AMDGPU::VGPR_32RegClass,
421 &AMDGPU::SReg_32RegClass,
422 &AMDGPU::VReg_64RegClass,
423 &AMDGPU::SReg_64RegClass,
424 &AMDGPU::VReg_96RegClass,
425 &AMDGPU::VReg_128RegClass,
426 &AMDGPU::SReg_128RegClass,
427 &AMDGPU::VReg_256RegClass,
428 &AMDGPU::SReg_256RegClass,
429 &AMDGPU::VReg_512RegClass,
430 &AMDGPU::SReg_512RegClass
433 for (const TargetRegisterClass *BaseClass : BaseClasses) {
434 if (BaseClass->contains(Reg)) {
441 // TODO: It might be helpful to have some target specific flags in
442 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
443 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
444 switch (RC->getSize()) {
446 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
448 return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
450 return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
452 return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
454 return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
456 return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
458 llvm_unreachable("Invalid register class size");
462 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
463 const TargetRegisterClass *SRC) const {
464 switch (SRC->getSize()) {
466 return &AMDGPU::VGPR_32RegClass;
468 return &AMDGPU::VReg_64RegClass;
470 return &AMDGPU::VReg_96RegClass;
472 return &AMDGPU::VReg_128RegClass;
474 return &AMDGPU::VReg_256RegClass;
476 return &AMDGPU::VReg_512RegClass;
478 llvm_unreachable("Invalid register class size");
482 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
483 const TargetRegisterClass *RC, unsigned SubIdx) const {
484 if (SubIdx == AMDGPU::NoSubRegister)
487 // We can assume that each lane corresponds to one 32-bit register.
488 unsigned Count = countPopulation(getSubRegIndexLaneMask(SubIdx));
489 if (isSGPRClass(RC)) {
492 return &AMDGPU::SGPR_32RegClass;
494 return &AMDGPU::SReg_64RegClass;
496 return &AMDGPU::SReg_128RegClass;
498 return &AMDGPU::SReg_256RegClass;
499 case 16: /* fall-through */
501 llvm_unreachable("Invalid sub-register class size");
506 return &AMDGPU::VGPR_32RegClass;
508 return &AMDGPU::VReg_64RegClass;
510 return &AMDGPU::VReg_96RegClass;
512 return &AMDGPU::VReg_128RegClass;
514 return &AMDGPU::VReg_256RegClass;
515 case 16: /* fall-through */
517 llvm_unreachable("Invalid sub-register class size");
522 bool SIRegisterInfo::shouldRewriteCopySrc(
523 const TargetRegisterClass *DefRC,
525 const TargetRegisterClass *SrcRC,
526 unsigned SrcSubReg) const {
527 // We want to prefer the smallest register class possible, so we don't want to
528 // stop and rewrite on anything that looks like a subregister
529 // extract. Operations mostly don't care about the super register class, so we
530 // only want to stop on the most basic of copies between the smae register
533 // e.g. if we have something like
536 // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
537 // vreg3 = COPY vreg2, sub0
539 // We want to look through the COPY to find:
540 // => vreg3 = COPY vreg0
543 return getCommonSubClass(DefRC, SrcRC) != nullptr;
546 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
547 const TargetRegisterClass *SubRC,
548 unsigned Channel) const {
553 case 0: return AMDGPU::VCC_LO;
554 case 1: return AMDGPU::VCC_HI;
555 default: llvm_unreachable("Invalid SubIdx for VCC");
558 case AMDGPU::FLAT_SCR:
561 return AMDGPU::FLAT_SCR_LO;
563 return AMDGPU::FLAT_SCR_HI;
565 llvm_unreachable("Invalid SubIdx for FLAT_SCR");
572 return AMDGPU::EXEC_LO;
574 return AMDGPU::EXEC_HI;
576 llvm_unreachable("Invalid SubIdx for EXEC");
581 const TargetRegisterClass *RC = getPhysRegClass(Reg);
582 // 32-bit registers don't have sub-registers, so we can just return the
583 // Reg. We need to have this check here, because the calculation below
584 // using getHWRegIndex() will fail with special 32-bit registers like
585 // VCC_LO, VCC_HI, EXEC_LO, EXEC_HI and M0.
586 if (RC->getSize() == 4) {
587 assert(Channel == 0);
591 unsigned Index = getHWRegIndex(Reg);
592 return SubRC->getRegister(Index + Channel);
595 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
596 return OpType == AMDGPU::OPERAND_REG_IMM32;
599 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
600 if (opCanUseLiteralConstant(OpType))
603 return OpType == AMDGPU::OPERAND_REG_INLINE_C;
606 // FIXME: Most of these are flexible with HSA and we don't need to reserve them
607 // as input registers if unused. Whether the dispatch ptr is necessary should be
608 // easy to detect from used intrinsics. Scratch setup is harder to know.
609 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
610 enum PreloadedValue Value) const {
612 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
613 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
616 case SIRegisterInfo::WORKGROUP_ID_X:
617 assert(MFI->hasWorkGroupIDX());
618 return MFI->WorkGroupIDXSystemSGPR;
619 case SIRegisterInfo::WORKGROUP_ID_Y:
620 assert(MFI->hasWorkGroupIDY());
621 return MFI->WorkGroupIDYSystemSGPR;
622 case SIRegisterInfo::WORKGROUP_ID_Z:
623 assert(MFI->hasWorkGroupIDZ());
624 return MFI->WorkGroupIDZSystemSGPR;
625 case SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET:
626 return MFI->PrivateSegmentWaveByteOffsetSystemSGPR;
627 case SIRegisterInfo::PRIVATE_SEGMENT_BUFFER:
628 assert(ST.isAmdHsaOS() && "Non-HSA ABI currently uses relocations");
629 assert(MFI->hasPrivateSegmentBuffer());
630 return MFI->PrivateSegmentBufferUserSGPR;
631 case SIRegisterInfo::KERNARG_SEGMENT_PTR:
632 assert(MFI->hasKernargSegmentPtr());
633 return MFI->KernargSegmentPtrUserSGPR;
634 case SIRegisterInfo::DISPATCH_PTR:
635 assert(MFI->hasDispatchPtr());
636 return MFI->DispatchPtrUserSGPR;
637 case SIRegisterInfo::QUEUE_PTR:
638 llvm_unreachable("not implemented");
639 case SIRegisterInfo::WORKITEM_ID_X:
640 assert(MFI->hasWorkItemIDX());
641 return AMDGPU::VGPR0;
642 case SIRegisterInfo::WORKITEM_ID_Y:
643 assert(MFI->hasWorkItemIDY());
644 return AMDGPU::VGPR1;
645 case SIRegisterInfo::WORKITEM_ID_Z:
646 assert(MFI->hasWorkItemIDZ());
647 return AMDGPU::VGPR2;
649 llvm_unreachable("unexpected preloaded value type");
652 /// \brief Returns a register that is not used at any point in the function.
653 /// If all registers are used, then this function will return
654 // AMDGPU::NoRegister.
655 unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
656 const TargetRegisterClass *RC) const {
657 for (unsigned Reg : *RC)
658 if (!MRI.isPhysRegUsed(Reg))
660 return AMDGPU::NoRegister;
663 unsigned SIRegisterInfo::getNumVGPRsAllowed(unsigned WaveCount) const {
678 unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
679 unsigned WaveCount) const {
680 if (gen >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {