1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "SIRegisterInfo.h"
16 #include "SIInstrInfo.h"
17 #include "SIMachineFunctionInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/RegisterScavenging.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/LLVMContext.h"
27 static bool hasPressureSet(const int *PSets, unsigned PSetID) {
28 for (unsigned i = 0; PSets[i] != -1; ++i) {
29 if (PSets[i] == (int)PSetID)
35 void SIRegisterInfo::classifyPressureSet(unsigned PSetID, unsigned Reg,
36 BitVector &PressureSets) const {
37 for (MCRegUnitIterator U(Reg, this); U.isValid(); ++U) {
38 const int *PSets = getRegUnitPressureSets(*U);
39 if (hasPressureSet(PSets, PSetID)) {
40 PressureSets.set(PSetID);
46 static cl::opt<bool> EnableSpillSGPRToSMEM(
47 "amdgpu-spill-sgpr-to-smem",
48 cl::desc("Use scalar stores to spill SGPRs if supported by subtarget"),
51 static cl::opt<bool> EnableSpillSGPRToVGPR(
52 "amdgpu-spill-sgpr-to-vgpr",
53 cl::desc("Enable spilling VGPRs to SGPRs"),
57 SIRegisterInfo::SIRegisterInfo(const SISubtarget &ST) :
59 SGPRPressureSets(getNumRegPressureSets()),
60 VGPRPressureSets(getNumRegPressureSets()),
61 SpillSGPRToVGPR(false),
62 SpillSGPRToSMEM(false) {
63 if (EnableSpillSGPRToSMEM && ST.hasScalarStores())
64 SpillSGPRToSMEM = true;
65 else if (EnableSpillSGPRToVGPR)
66 SpillSGPRToVGPR = true;
68 unsigned NumRegPressureSets = getNumRegPressureSets();
70 SGPRSetID = NumRegPressureSets;
71 VGPRSetID = NumRegPressureSets;
73 for (unsigned i = 0; i < NumRegPressureSets; ++i) {
74 classifyPressureSet(i, AMDGPU::SGPR0, SGPRPressureSets);
75 classifyPressureSet(i, AMDGPU::VGPR0, VGPRPressureSets);
78 // Determine the number of reg units for each pressure set.
79 std::vector<unsigned> PressureSetRegUnits(NumRegPressureSets, 0);
80 for (unsigned i = 0, e = getNumRegUnits(); i != e; ++i) {
81 const int *PSets = getRegUnitPressureSets(i);
82 for (unsigned j = 0; PSets[j] != -1; ++j) {
83 ++PressureSetRegUnits[PSets[j]];
87 unsigned VGPRMax = 0, SGPRMax = 0;
88 for (unsigned i = 0; i < NumRegPressureSets; ++i) {
89 if (isVGPRPressureSet(i) && PressureSetRegUnits[i] > VGPRMax) {
91 VGPRMax = PressureSetRegUnits[i];
94 if (isSGPRPressureSet(i) && PressureSetRegUnits[i] > SGPRMax) {
96 SGPRMax = PressureSetRegUnits[i];
100 assert(SGPRSetID < NumRegPressureSets &&
101 VGPRSetID < NumRegPressureSets);
104 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, unsigned Reg) const {
105 MCRegAliasIterator R(Reg, this, true);
107 for (; R.isValid(); ++R)
111 unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
112 const MachineFunction &MF) const {
114 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
115 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
116 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
117 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
120 unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
121 const MachineFunction &MF) const {
123 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
124 unsigned RegCount = ST.getMaxNumSGPRs(MF);
127 // Try to place it in a hole after PrivateSegmentBufferReg.
129 // We cannot put the segment buffer in (Idx - 4) ... (Idx - 1) due to
130 // alignment constraints, so we have a hole where can put the wave offset.
133 // We can put the segment buffer in (Idx - 4) ... (Idx - 1) and put the
134 // wave offset before it.
137 return AMDGPU::SGPR_32RegClass.getRegister(Reg);
140 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
141 BitVector Reserved(getNumRegs());
142 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
144 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
145 // this seems likely to result in bugs, so I'm marking them as reserved.
146 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
147 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
149 // Reserve the memory aperture registers.
150 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
151 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
152 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
153 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
155 // Reserve Trap Handler registers - support is not implemented in Codegen.
156 reserveRegisterTuples(Reserved, AMDGPU::TBA);
157 reserveRegisterTuples(Reserved, AMDGPU::TMA);
158 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
159 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
160 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
161 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
162 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
163 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
165 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
167 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
168 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
169 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
170 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
171 reserveRegisterTuples(Reserved, Reg);
174 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
175 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
176 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
177 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
178 reserveRegisterTuples(Reserved, Reg);
181 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
183 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
184 if (ScratchWaveOffsetReg != AMDGPU::NoRegister) {
185 // Reserve 1 SGPR for scratch wave offset in case we need to spill.
186 reserveRegisterTuples(Reserved, ScratchWaveOffsetReg);
189 unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
190 if (ScratchRSrcReg != AMDGPU::NoRegister) {
191 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
193 // TODO: May need to reserve a VGPR if doing LDS spilling.
194 reserveRegisterTuples(Reserved, ScratchRSrcReg);
195 assert(!isSubRegister(ScratchRSrcReg, ScratchWaveOffsetReg));
201 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
202 return Fn.getFrameInfo().hasStackObjects();
206 SIRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
207 return MF.getFrameInfo().hasStackObjects();
210 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
211 const MachineFunction &MF) const {
212 // m0 is needed for the scalar store offset. m0 is unallocatable, so we can't
213 // create a virtual register for it during frame index elimination, so the
214 // scavenger is directly needed.
215 return MF.getFrameInfo().hasStackObjects() &&
216 MF.getSubtarget<SISubtarget>().hasScalarStores() &&
217 MF.getInfo<SIMachineFunctionInfo>()->hasSpilledSGPRs();
220 bool SIRegisterInfo::requiresVirtualBaseRegisters(
221 const MachineFunction &) const {
222 // There are no special dedicated stack or frame pointers.
226 bool SIRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
227 // This helps catch bugs as verifier errors.
231 int64_t SIRegisterInfo::getMUBUFInstrOffset(const MachineInstr *MI) const {
232 assert(SIInstrInfo::isMUBUF(*MI));
234 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
235 AMDGPU::OpName::offset);
236 return MI->getOperand(OffIdx).getImm();
239 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
241 if (!SIInstrInfo::isMUBUF(*MI))
244 assert(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
245 AMDGPU::OpName::vaddr) &&
246 "Should never see frame index on non-address operand");
248 return getMUBUFInstrOffset(MI);
251 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
252 if (!MI->mayLoadOrStore())
255 int64_t FullOffset = Offset + getMUBUFInstrOffset(MI);
257 return !isUInt<12>(FullOffset);
260 void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
263 int64_t Offset) const {
264 MachineBasicBlock::iterator Ins = MBB->begin();
265 DebugLoc DL; // Defaults to "unknown"
267 if (Ins != MBB->end())
268 DL = Ins->getDebugLoc();
270 MachineFunction *MF = MBB->getParent();
271 const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>();
272 const SIInstrInfo *TII = Subtarget.getInstrInfo();
275 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), BaseReg)
276 .addFrameIndex(FrameIdx);
280 MachineRegisterInfo &MRI = MF->getRegInfo();
281 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
283 unsigned FIReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
285 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
287 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), FIReg)
288 .addFrameIndex(FrameIdx);
290 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
291 .addReg(OffsetReg, RegState::Kill)
295 void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
296 int64_t Offset) const {
298 MachineBasicBlock *MBB = MI.getParent();
299 MachineFunction *MF = MBB->getParent();
300 const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>();
301 const SIInstrInfo *TII = Subtarget.getInstrInfo();
304 // FIXME: Is it possible to be storing a frame index to itself?
306 for (const MachineOperand &MO: MI.operands()) {
309 llvm_unreachable("should not see multiple frame indices");
316 MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
317 assert(FIOp && FIOp->isFI() && "frame index must be address operand");
319 assert(TII->isMUBUF(MI));
321 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
322 int64_t NewOffset = OffsetOp->getImm() + Offset;
323 assert(isUInt<12>(NewOffset) && "offset should be legal");
325 FIOp->ChangeToRegister(BaseReg, false);
326 OffsetOp->setImm(NewOffset);
329 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
331 int64_t Offset) const {
332 if (!SIInstrInfo::isMUBUF(*MI))
335 int64_t NewOffset = Offset + getMUBUFInstrOffset(MI);
337 return isUInt<12>(NewOffset);
340 const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
341 const MachineFunction &MF, unsigned Kind) const {
342 // This is inaccurate. It depends on the instruction and address space. The
343 // only place where we should hit this is for dealing with frame indexes /
344 // private accesses, so this is correct in that case.
345 return &AMDGPU::VGPR_32RegClass;
348 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
351 case AMDGPU::SI_SPILL_S512_SAVE:
352 case AMDGPU::SI_SPILL_S512_RESTORE:
353 case AMDGPU::SI_SPILL_V512_SAVE:
354 case AMDGPU::SI_SPILL_V512_RESTORE:
356 case AMDGPU::SI_SPILL_S256_SAVE:
357 case AMDGPU::SI_SPILL_S256_RESTORE:
358 case AMDGPU::SI_SPILL_V256_SAVE:
359 case AMDGPU::SI_SPILL_V256_RESTORE:
361 case AMDGPU::SI_SPILL_S128_SAVE:
362 case AMDGPU::SI_SPILL_S128_RESTORE:
363 case AMDGPU::SI_SPILL_V128_SAVE:
364 case AMDGPU::SI_SPILL_V128_RESTORE:
366 case AMDGPU::SI_SPILL_V96_SAVE:
367 case AMDGPU::SI_SPILL_V96_RESTORE:
369 case AMDGPU::SI_SPILL_S64_SAVE:
370 case AMDGPU::SI_SPILL_S64_RESTORE:
371 case AMDGPU::SI_SPILL_V64_SAVE:
372 case AMDGPU::SI_SPILL_V64_RESTORE:
374 case AMDGPU::SI_SPILL_S32_SAVE:
375 case AMDGPU::SI_SPILL_S32_RESTORE:
376 case AMDGPU::SI_SPILL_V32_SAVE:
377 case AMDGPU::SI_SPILL_V32_RESTORE:
379 default: llvm_unreachable("Invalid spill opcode");
383 static int getOffsetMUBUFStore(unsigned Opc) {
385 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
386 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
387 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
388 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
389 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
390 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
391 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
392 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
393 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
394 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
400 static int getOffsetMUBUFLoad(unsigned Opc) {
402 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
403 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
404 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
405 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
406 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
407 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
408 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
409 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
410 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
411 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
412 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
413 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
414 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
415 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
421 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
422 // need to handle the case where an SGPR may need to be spilled while spilling.
423 static bool buildMUBUFOffsetLoadStore(const SIInstrInfo *TII,
424 MachineFrameInfo &MFI,
425 MachineBasicBlock::iterator MI,
428 MachineBasicBlock *MBB = MI->getParent();
429 const DebugLoc &DL = MI->getDebugLoc();
430 bool IsStore = MI->mayStore();
432 unsigned Opc = MI->getOpcode();
433 int LoadStoreOp = IsStore ?
434 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
435 if (LoadStoreOp == -1)
438 unsigned Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata)->getReg();
440 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
441 .addReg(Reg, getDefRegState(!IsStore))
442 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
443 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
448 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
452 void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
453 unsigned LoadStoreOp,
457 unsigned ScratchRsrcReg,
458 unsigned ScratchOffsetReg,
460 MachineMemOperand *MMO,
461 RegScavenger *RS) const {
462 MachineBasicBlock *MBB = MI->getParent();
463 MachineFunction *MF = MI->getParent()->getParent();
464 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
465 const SIInstrInfo *TII = ST.getInstrInfo();
466 const MachineFrameInfo &MFI = MF->getFrameInfo();
468 const MCInstrDesc &Desc = TII->get(LoadStoreOp);
469 const DebugLoc &DL = MI->getDebugLoc();
470 bool IsStore = Desc.mayStore();
472 bool RanOutOfSGPRs = false;
473 bool Scavenged = false;
474 unsigned SOffset = ScratchOffsetReg;
476 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
477 unsigned NumSubRegs = AMDGPU::getRegBitWidth(RC->getID()) / 32;
478 unsigned Size = NumSubRegs * 4;
479 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
480 const int64_t OriginalImmOffset = Offset;
482 unsigned Align = MFI.getObjectAlignment(Index);
483 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
485 if (!isUInt<12>(Offset + Size)) {
486 SOffset = AMDGPU::NoRegister;
488 // We don't have access to the register scavenger if this function is called
489 // during PEI::scavengeFrameVirtualRegs().
491 SOffset = RS->FindUnusedReg(&AMDGPU::SGPR_32RegClass);
493 if (SOffset == AMDGPU::NoRegister) {
494 // There are no free SGPRs, and since we are in the process of spilling
495 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
496 // on SI/CI and on VI it is true until we implement spilling using scalar
497 // stores), we have no way to free up an SGPR. Our solution here is to
498 // add the offset directly to the ScratchOffset register, and then
499 // subtract the offset after the spill to return ScratchOffset to it's
501 RanOutOfSGPRs = true;
502 SOffset = ScratchOffsetReg;
507 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
508 .addReg(ScratchOffsetReg)
514 const unsigned EltSize = 4;
516 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += EltSize) {
517 unsigned SubReg = NumSubRegs == 1 ?
518 ValueReg : getSubReg(ValueReg, getSubRegFromChannel(i));
520 unsigned SOffsetRegState = 0;
521 unsigned SrcDstRegState = getDefRegState(!IsStore);
523 SOffsetRegState |= getKillRegState(Scavenged);
524 // The last implicit use carries the "Kill" flag.
525 SrcDstRegState |= getKillRegState(IsKill);
528 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(EltSize * i);
529 MachineMemOperand *NewMMO
530 = MF->getMachineMemOperand(PInfo, MMO->getFlags(),
531 EltSize, MinAlign(Align, EltSize * i));
533 auto MIB = BuildMI(*MBB, MI, DL, Desc)
534 .addReg(SubReg, getDefRegState(!IsStore) | getKillRegState(IsKill))
535 .addReg(ScratchRsrcReg)
536 .addReg(SOffset, SOffsetRegState)
541 .addMemOperand(NewMMO);
544 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
548 // Subtract the offset we added to the ScratchOffset register.
549 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScratchOffsetReg)
550 .addReg(ScratchOffsetReg)
551 .addImm(OriginalImmOffset);
555 static std::pair<unsigned, unsigned> getSpillEltSize(unsigned SuperRegSize,
557 if (SuperRegSize % 16 == 0) {
558 return { 16, Store ? AMDGPU::S_BUFFER_STORE_DWORDX4_SGPR :
559 AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR };
562 if (SuperRegSize % 8 == 0) {
563 return { 8, Store ? AMDGPU::S_BUFFER_STORE_DWORDX2_SGPR :
564 AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR };
567 return { 4, Store ? AMDGPU::S_BUFFER_STORE_DWORD_SGPR :
568 AMDGPU::S_BUFFER_LOAD_DWORD_SGPR};
571 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
574 bool OnlyToVGPR) const {
575 MachineBasicBlock *MBB = MI->getParent();
576 MachineFunction *MF = MBB->getParent();
577 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
579 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
580 = MFI->getSGPRToVGPRSpills(Index);
581 bool SpillToVGPR = !VGPRSpills.empty();
582 if (OnlyToVGPR && !SpillToVGPR)
585 MachineRegisterInfo &MRI = MF->getRegInfo();
586 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
587 const SIInstrInfo *TII = ST.getInstrInfo();
589 unsigned SuperReg = MI->getOperand(0).getReg();
590 bool IsKill = MI->getOperand(0).isKill();
591 const DebugLoc &DL = MI->getDebugLoc();
593 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
595 bool SpillToSMEM = spillSGPRToSMEM();
596 if (SpillToSMEM && OnlyToVGPR)
599 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
601 unsigned OffsetReg = AMDGPU::M0;
602 unsigned M0CopyReg = AMDGPU::NoRegister;
605 if (RS->isRegUsed(AMDGPU::M0)) {
606 M0CopyReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
607 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), M0CopyReg)
612 unsigned ScalarStoreOp;
613 unsigned EltSize = 4;
614 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
615 if (SpillToSMEM && isSGPRClass(RC)) {
616 // XXX - if private_element_size is larger than 4 it might be useful to be
617 // able to spill wider vmem spills.
618 std::tie(EltSize, ScalarStoreOp) = getSpillEltSize(RC->getSize(), true);
621 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
622 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
624 // SubReg carries the "Kill" flag when SubReg == SuperReg.
625 unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill);
626 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
627 unsigned SubReg = NumSubRegs == 1 ?
628 SuperReg : getSubReg(SuperReg, SplitParts[i]);
631 int64_t FrOffset = FrameInfo.getObjectOffset(Index);
633 // The allocated memory size is really the wavefront size * the frame
634 // index size. The widest register class is 64 bytes, so a 4-byte scratch
635 // allocation is enough to spill this in a single stack object.
637 // FIXME: Frame size/offsets are computed earlier than this, so the extra
638 // space is still unnecessarily allocated.
640 unsigned Align = FrameInfo.getObjectAlignment(Index);
641 MachinePointerInfo PtrInfo
642 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
643 MachineMemOperand *MMO
644 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
645 EltSize, MinAlign(Align, EltSize * i));
647 // SMEM instructions only support a single offset, so increment the wave
650 int64_t Offset = (ST.getWavefrontSize() * FrOffset) + (EltSize * i);
652 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
653 .addReg(MFI->getScratchWaveOffsetReg())
656 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
657 .addReg(MFI->getScratchWaveOffsetReg());
660 BuildMI(*MBB, MI, DL, TII->get(ScalarStoreOp))
661 .addReg(SubReg, getKillRegState(IsKill)) // sdata
662 .addReg(MFI->getScratchRSrcReg()) // sbase
663 .addReg(OffsetReg, RegState::Kill) // soff
671 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
673 BuildMI(*MBB, MI, DL,
674 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
676 .addReg(SubReg, getKillRegState(IsKill))
679 // FIXME: Since this spills to another register instead of an actual
680 // frame index, we should delete the frame index when all references to
683 // XXX - Can to VGPR spill fail for some subregisters but not others?
687 // Spill SGPR to a frame index.
688 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
689 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
690 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
692 MachineInstrBuilder Mov
693 = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
694 .addReg(SubReg, SubKillState);
697 // There could be undef components of a spilled super register.
698 // TODO: Can we detect this and skip the spill?
699 if (NumSubRegs > 1) {
700 // The last implicit use of the SuperReg carries the "Kill" flag.
701 unsigned SuperKillState = 0;
703 SuperKillState |= getKillRegState(IsKill);
704 Mov.addReg(SuperReg, RegState::Implicit | SuperKillState);
707 unsigned Align = FrameInfo.getObjectAlignment(Index);
708 MachinePointerInfo PtrInfo
709 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
710 MachineMemOperand *MMO
711 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
712 EltSize, MinAlign(Align, EltSize * i));
713 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_SAVE))
714 .addReg(TmpReg, RegState::Kill) // src
715 .addFrameIndex(Index) // vaddr
716 .addReg(MFI->getScratchRSrcReg()) // srrsrc
717 .addReg(MFI->getScratchWaveOffsetReg()) // soffset
718 .addImm(i * 4) // offset
723 if (M0CopyReg != AMDGPU::NoRegister) {
724 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), AMDGPU::M0)
725 .addReg(M0CopyReg, RegState::Kill);
728 MI->eraseFromParent();
729 MFI->addToSpilledSGPRs(NumSubRegs);
733 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
736 bool OnlyToVGPR) const {
737 MachineFunction *MF = MI->getParent()->getParent();
738 MachineRegisterInfo &MRI = MF->getRegInfo();
739 MachineBasicBlock *MBB = MI->getParent();
740 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
742 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
743 = MFI->getSGPRToVGPRSpills(Index);
744 bool SpillToVGPR = !VGPRSpills.empty();
745 if (OnlyToVGPR && !SpillToVGPR)
748 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
749 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
750 const SIInstrInfo *TII = ST.getInstrInfo();
751 const DebugLoc &DL = MI->getDebugLoc();
753 unsigned SuperReg = MI->getOperand(0).getReg();
754 bool SpillToSMEM = spillSGPRToSMEM();
755 if (SpillToSMEM && OnlyToVGPR)
758 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
760 unsigned OffsetReg = AMDGPU::M0;
761 unsigned M0CopyReg = AMDGPU::NoRegister;
764 if (RS->isRegUsed(AMDGPU::M0)) {
765 M0CopyReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
766 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), M0CopyReg)
771 unsigned EltSize = 4;
772 unsigned ScalarLoadOp;
774 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
775 if (SpillToSMEM && isSGPRClass(RC)) {
776 // XXX - if private_element_size is larger than 4 it might be useful to be
777 // able to spill wider vmem spills.
778 std::tie(EltSize, ScalarLoadOp) = getSpillEltSize(RC->getSize(), false);
781 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
782 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
784 // SubReg carries the "Kill" flag when SubReg == SuperReg.
785 int64_t FrOffset = FrameInfo.getObjectOffset(Index);
787 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
788 unsigned SubReg = NumSubRegs == 1 ?
789 SuperReg : getSubReg(SuperReg, SplitParts[i]);
792 // FIXME: Size may be > 4 but extra bytes wasted.
793 unsigned Align = FrameInfo.getObjectAlignment(Index);
794 MachinePointerInfo PtrInfo
795 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
796 MachineMemOperand *MMO
797 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
798 EltSize, MinAlign(Align, EltSize * i));
801 int64_t Offset = (ST.getWavefrontSize() * FrOffset) + (EltSize * i);
803 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
804 .addReg(MFI->getScratchWaveOffsetReg())
807 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
808 .addReg(MFI->getScratchWaveOffsetReg());
812 BuildMI(*MBB, MI, DL, TII->get(ScalarLoadOp), SubReg)
813 .addReg(MFI->getScratchRSrcReg()) // sbase
814 .addReg(OffsetReg, RegState::Kill) // soff
819 MIB.addReg(SuperReg, RegState::ImplicitDefine);
825 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
827 BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
833 MIB.addReg(SuperReg, RegState::ImplicitDefine);
838 // Restore SGPR from a stack slot.
839 // FIXME: We should use S_LOAD_DWORD here for VI.
840 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
841 unsigned Align = FrameInfo.getObjectAlignment(Index);
843 MachinePointerInfo PtrInfo
844 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
846 MachineMemOperand *MMO = MF->getMachineMemOperand(PtrInfo,
847 MachineMemOperand::MOLoad, EltSize,
848 MinAlign(Align, EltSize * i));
850 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_RESTORE), TmpReg)
851 .addFrameIndex(Index) // vaddr
852 .addReg(MFI->getScratchRSrcReg()) // srsrc
853 .addReg(MFI->getScratchWaveOffsetReg()) // soffset
854 .addImm(i * 4) // offset
858 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg)
859 .addReg(TmpReg, RegState::Kill);
862 MIB.addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
866 if (M0CopyReg != AMDGPU::NoRegister) {
867 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), AMDGPU::M0)
868 .addReg(M0CopyReg, RegState::Kill);
871 MI->eraseFromParent();
875 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
876 /// a VGPR and the stack slot can be safely eliminated when all other users are
878 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
879 MachineBasicBlock::iterator MI,
881 RegScavenger *RS) const {
882 switch (MI->getOpcode()) {
883 case AMDGPU::SI_SPILL_S512_SAVE:
884 case AMDGPU::SI_SPILL_S256_SAVE:
885 case AMDGPU::SI_SPILL_S128_SAVE:
886 case AMDGPU::SI_SPILL_S64_SAVE:
887 case AMDGPU::SI_SPILL_S32_SAVE:
888 return spillSGPR(MI, FI, RS, true);
889 case AMDGPU::SI_SPILL_S512_RESTORE:
890 case AMDGPU::SI_SPILL_S256_RESTORE:
891 case AMDGPU::SI_SPILL_S128_RESTORE:
892 case AMDGPU::SI_SPILL_S64_RESTORE:
893 case AMDGPU::SI_SPILL_S32_RESTORE:
894 return restoreSGPR(MI, FI, RS, true);
896 llvm_unreachable("not an SGPR spill instruction");
900 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
901 int SPAdj, unsigned FIOperandNum,
902 RegScavenger *RS) const {
903 MachineFunction *MF = MI->getParent()->getParent();
904 MachineRegisterInfo &MRI = MF->getRegInfo();
905 MachineBasicBlock *MBB = MI->getParent();
906 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
907 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
908 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
909 const SIInstrInfo *TII = ST.getInstrInfo();
910 DebugLoc DL = MI->getDebugLoc();
912 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
913 int Index = MI->getOperand(FIOperandNum).getIndex();
915 switch (MI->getOpcode()) {
916 // SGPR register spill
917 case AMDGPU::SI_SPILL_S512_SAVE:
918 case AMDGPU::SI_SPILL_S256_SAVE:
919 case AMDGPU::SI_SPILL_S128_SAVE:
920 case AMDGPU::SI_SPILL_S64_SAVE:
921 case AMDGPU::SI_SPILL_S32_SAVE: {
922 spillSGPR(MI, Index, RS);
926 // SGPR register restore
927 case AMDGPU::SI_SPILL_S512_RESTORE:
928 case AMDGPU::SI_SPILL_S256_RESTORE:
929 case AMDGPU::SI_SPILL_S128_RESTORE:
930 case AMDGPU::SI_SPILL_S64_RESTORE:
931 case AMDGPU::SI_SPILL_S32_RESTORE: {
932 restoreSGPR(MI, Index, RS);
936 // VGPR register spill
937 case AMDGPU::SI_SPILL_V512_SAVE:
938 case AMDGPU::SI_SPILL_V256_SAVE:
939 case AMDGPU::SI_SPILL_V128_SAVE:
940 case AMDGPU::SI_SPILL_V96_SAVE:
941 case AMDGPU::SI_SPILL_V64_SAVE:
942 case AMDGPU::SI_SPILL_V32_SAVE: {
943 const MachineOperand *VData = TII->getNamedOperand(*MI,
944 AMDGPU::OpName::vdata);
945 buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
947 VData->getReg(), VData->isKill(),
948 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
949 TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
950 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
951 *MI->memoperands_begin(),
953 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
954 MI->eraseFromParent();
957 case AMDGPU::SI_SPILL_V32_RESTORE:
958 case AMDGPU::SI_SPILL_V64_RESTORE:
959 case AMDGPU::SI_SPILL_V96_RESTORE:
960 case AMDGPU::SI_SPILL_V128_RESTORE:
961 case AMDGPU::SI_SPILL_V256_RESTORE:
962 case AMDGPU::SI_SPILL_V512_RESTORE: {
963 const MachineOperand *VData = TII->getNamedOperand(*MI,
964 AMDGPU::OpName::vdata);
966 buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
968 VData->getReg(), VData->isKill(),
969 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
970 TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
971 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
972 *MI->memoperands_begin(),
974 MI->eraseFromParent();
979 if (TII->isMUBUF(*MI)) {
980 // Disable offen so we don't need a 0 vgpr base.
981 assert(static_cast<int>(FIOperandNum) ==
982 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
983 AMDGPU::OpName::vaddr));
985 int64_t Offset = FrameInfo.getObjectOffset(Index);
987 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
988 int64_t NewOffset = OldImm + Offset;
990 if (isUInt<12>(NewOffset) &&
991 buildMUBUFOffsetLoadStore(TII, FrameInfo, MI, Index, NewOffset)) {
992 MI->eraseFromParent();
997 int64_t Offset = FrameInfo.getObjectOffset(Index);
998 FIOp.ChangeToImmediate(Offset);
999 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
1000 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1001 BuildMI(*MBB, MI, MI->getDebugLoc(),
1002 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1004 FIOp.ChangeToRegister(TmpReg, false, false, true);
1010 // FIXME: This is very slow. It might be worth creating a map from physreg to
1012 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
1013 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
1015 static const TargetRegisterClass *const BaseClasses[] = {
1016 &AMDGPU::VGPR_32RegClass,
1017 &AMDGPU::SReg_32RegClass,
1018 &AMDGPU::VReg_64RegClass,
1019 &AMDGPU::SReg_64RegClass,
1020 &AMDGPU::VReg_96RegClass,
1021 &AMDGPU::VReg_128RegClass,
1022 &AMDGPU::SReg_128RegClass,
1023 &AMDGPU::VReg_256RegClass,
1024 &AMDGPU::SReg_256RegClass,
1025 &AMDGPU::VReg_512RegClass,
1026 &AMDGPU::SReg_512RegClass,
1027 &AMDGPU::SCC_CLASSRegClass,
1030 for (const TargetRegisterClass *BaseClass : BaseClasses) {
1031 if (BaseClass->contains(Reg)) {
1038 // TODO: It might be helpful to have some target specific flags in
1039 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
1040 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
1041 switch (RC->getSize()) {
1042 case 0: return false;
1043 case 1: return false;
1045 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
1047 return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
1049 return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
1051 return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
1053 return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
1055 return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
1057 llvm_unreachable("Invalid register class size");
1061 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
1062 const TargetRegisterClass *SRC) const {
1063 switch (SRC->getSize()) {
1065 return &AMDGPU::VGPR_32RegClass;
1067 return &AMDGPU::VReg_64RegClass;
1069 return &AMDGPU::VReg_96RegClass;
1071 return &AMDGPU::VReg_128RegClass;
1073 return &AMDGPU::VReg_256RegClass;
1075 return &AMDGPU::VReg_512RegClass;
1077 llvm_unreachable("Invalid register class size");
1081 const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass(
1082 const TargetRegisterClass *VRC) const {
1083 switch (VRC->getSize()) {
1085 return &AMDGPU::SGPR_32RegClass;
1087 return &AMDGPU::SReg_64RegClass;
1089 return &AMDGPU::SReg_128RegClass;
1091 return &AMDGPU::SReg_256RegClass;
1093 return &AMDGPU::SReg_512RegClass;
1095 llvm_unreachable("Invalid register class size");
1099 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
1100 const TargetRegisterClass *RC, unsigned SubIdx) const {
1101 if (SubIdx == AMDGPU::NoSubRegister)
1104 // We can assume that each lane corresponds to one 32-bit register.
1105 LaneBitmask::Type Mask = getSubRegIndexLaneMask(SubIdx).getAsInteger();
1106 unsigned Count = countPopulation(Mask);
1107 if (isSGPRClass(RC)) {
1110 return &AMDGPU::SGPR_32RegClass;
1112 return &AMDGPU::SReg_64RegClass;
1114 return &AMDGPU::SReg_128RegClass;
1116 return &AMDGPU::SReg_256RegClass;
1117 case 16: /* fall-through */
1119 llvm_unreachable("Invalid sub-register class size");
1124 return &AMDGPU::VGPR_32RegClass;
1126 return &AMDGPU::VReg_64RegClass;
1128 return &AMDGPU::VReg_96RegClass;
1130 return &AMDGPU::VReg_128RegClass;
1132 return &AMDGPU::VReg_256RegClass;
1133 case 16: /* fall-through */
1135 llvm_unreachable("Invalid sub-register class size");
1140 bool SIRegisterInfo::shouldRewriteCopySrc(
1141 const TargetRegisterClass *DefRC,
1143 const TargetRegisterClass *SrcRC,
1144 unsigned SrcSubReg) const {
1145 // We want to prefer the smallest register class possible, so we don't want to
1146 // stop and rewrite on anything that looks like a subregister
1147 // extract. Operations mostly don't care about the super register class, so we
1148 // only want to stop on the most basic of copies between the same register
1151 // e.g. if we have something like
1154 // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
1155 // vreg3 = COPY vreg2, sub0
1157 // We want to look through the COPY to find:
1158 // => vreg3 = COPY vreg0
1161 return getCommonSubClass(DefRC, SrcRC) != nullptr;
1164 // FIXME: Most of these are flexible with HSA and we don't need to reserve them
1165 // as input registers if unused. Whether the dispatch ptr is necessary should be
1166 // easy to detect from used intrinsics. Scratch setup is harder to know.
1167 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
1168 enum PreloadedValue Value) const {
1170 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1171 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1174 case SIRegisterInfo::WORKGROUP_ID_X:
1175 assert(MFI->hasWorkGroupIDX());
1176 return MFI->WorkGroupIDXSystemSGPR;
1177 case SIRegisterInfo::WORKGROUP_ID_Y:
1178 assert(MFI->hasWorkGroupIDY());
1179 return MFI->WorkGroupIDYSystemSGPR;
1180 case SIRegisterInfo::WORKGROUP_ID_Z:
1181 assert(MFI->hasWorkGroupIDZ());
1182 return MFI->WorkGroupIDZSystemSGPR;
1183 case SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET:
1184 return MFI->PrivateSegmentWaveByteOffsetSystemSGPR;
1185 case SIRegisterInfo::PRIVATE_SEGMENT_BUFFER:
1186 if (ST.isAmdCodeObjectV2(MF)) {
1187 assert(MFI->hasPrivateSegmentBuffer());
1188 return MFI->PrivateSegmentBufferUserSGPR;
1190 assert(MFI->hasPrivateMemoryInputPtr());
1191 return MFI->PrivateMemoryPtrUserSGPR;
1192 case SIRegisterInfo::KERNARG_SEGMENT_PTR:
1193 assert(MFI->hasKernargSegmentPtr());
1194 return MFI->KernargSegmentPtrUserSGPR;
1195 case SIRegisterInfo::DISPATCH_ID:
1196 assert(MFI->hasDispatchID());
1197 return MFI->DispatchIDUserSGPR;
1198 case SIRegisterInfo::FLAT_SCRATCH_INIT:
1199 assert(MFI->hasFlatScratchInit());
1200 return MFI->FlatScratchInitUserSGPR;
1201 case SIRegisterInfo::DISPATCH_PTR:
1202 assert(MFI->hasDispatchPtr());
1203 return MFI->DispatchPtrUserSGPR;
1204 case SIRegisterInfo::QUEUE_PTR:
1205 assert(MFI->hasQueuePtr());
1206 return MFI->QueuePtrUserSGPR;
1207 case SIRegisterInfo::WORKITEM_ID_X:
1208 assert(MFI->hasWorkItemIDX());
1209 return AMDGPU::VGPR0;
1210 case SIRegisterInfo::WORKITEM_ID_Y:
1211 assert(MFI->hasWorkItemIDY());
1212 return AMDGPU::VGPR1;
1213 case SIRegisterInfo::WORKITEM_ID_Z:
1214 assert(MFI->hasWorkItemIDZ());
1215 return AMDGPU::VGPR2;
1217 llvm_unreachable("unexpected preloaded value type");
1220 /// \brief Returns a register that is not used at any point in the function.
1221 /// If all registers are used, then this function will return
1222 // AMDGPU::NoRegister.
1224 SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
1225 const TargetRegisterClass *RC,
1226 const MachineFunction &MF) const {
1228 for (unsigned Reg : *RC)
1229 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
1231 return AMDGPU::NoRegister;
1234 ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
1235 unsigned EltSize) const {
1237 static const int16_t Sub0_15[] = {
1238 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1239 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1240 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
1241 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
1244 static const int16_t Sub0_7[] = {
1245 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1246 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1249 static const int16_t Sub0_3[] = {
1250 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1253 static const int16_t Sub0_2[] = {
1254 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2,
1257 static const int16_t Sub0_1[] = {
1258 AMDGPU::sub0, AMDGPU::sub1,
1261 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1265 return makeArrayRef(Sub0_1);
1267 return makeArrayRef(Sub0_2);
1269 return makeArrayRef(Sub0_3);
1271 return makeArrayRef(Sub0_7);
1273 return makeArrayRef(Sub0_15);
1275 llvm_unreachable("unhandled register size");
1280 static const int16_t Sub0_15_64[] = {
1281 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1282 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
1283 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
1284 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15
1287 static const int16_t Sub0_7_64[] = {
1288 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1289 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7
1293 static const int16_t Sub0_3_64[] = {
1294 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3
1297 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1301 return makeArrayRef(Sub0_3_64);
1303 return makeArrayRef(Sub0_7_64);
1305 return makeArrayRef(Sub0_15_64);
1307 llvm_unreachable("unhandled register size");
1311 assert(EltSize == 16 && "unhandled register spill split size");
1313 static const int16_t Sub0_15_128[] = {
1314 AMDGPU::sub0_sub1_sub2_sub3,
1315 AMDGPU::sub4_sub5_sub6_sub7,
1316 AMDGPU::sub8_sub9_sub10_sub11,
1317 AMDGPU::sub12_sub13_sub14_sub15
1320 static const int16_t Sub0_7_128[] = {
1321 AMDGPU::sub0_sub1_sub2_sub3,
1322 AMDGPU::sub4_sub5_sub6_sub7
1325 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1329 return makeArrayRef(Sub0_7_128);
1331 return makeArrayRef(Sub0_15_128);
1333 llvm_unreachable("unhandled register size");
1337 const TargetRegisterClass*
1338 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
1339 unsigned Reg) const {
1340 if (TargetRegisterInfo::isVirtualRegister(Reg))
1341 return MRI.getRegClass(Reg);
1343 return getPhysRegClass(Reg);
1346 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
1347 unsigned Reg) const {
1348 return hasVGPRs(getRegClassForReg(MRI, Reg));
1351 bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
1352 const TargetRegisterClass *SrcRC,
1354 const TargetRegisterClass *DstRC,
1356 const TargetRegisterClass *NewRC) const {
1357 unsigned SrcSize = SrcRC->getSize();
1358 unsigned DstSize = DstRC->getSize();
1359 unsigned NewSize = NewRC->getSize();
1361 // Do not increase size of registers beyond dword, we would need to allocate
1362 // adjacent registers and constraint regalloc more than needed.
1364 // Always allow dword coalescing.
1365 if (SrcSize <= 4 || DstSize <= 4)
1368 return NewSize <= DstSize || NewSize <= SrcSize;
1371 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
1372 MachineFunction &MF) const {
1374 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1375 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1377 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
1379 switch (RC->getID()) {
1381 return AMDGPURegisterInfo::getRegPressureLimit(RC, MF);
1382 case AMDGPU::VGPR_32RegClassID:
1383 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
1384 case AMDGPU::SGPR_32RegClassID:
1385 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
1389 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
1390 unsigned Idx) const {
1391 if (Idx == getVGPRPressureSet())
1392 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
1393 const_cast<MachineFunction &>(MF));
1395 if (Idx == getSGPRPressureSet())
1396 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
1397 const_cast<MachineFunction &>(MF));
1399 return AMDGPURegisterInfo::getRegPressureSetLimit(MF, Idx);
1402 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
1403 static const int Empty[] = { -1 };
1405 if (hasRegUnit(AMDGPU::M0, RegUnit))
1407 return AMDGPURegisterInfo::getRegUnitPressureSets(RegUnit);