1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "SIRegisterInfo.h"
16 #include "SIInstrInfo.h"
17 #include "SIMachineFunctionInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/RegisterScavenging.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/LLVMContext.h"
27 static bool hasPressureSet(const int *PSets, unsigned PSetID) {
28 for (unsigned i = 0; PSets[i] != -1; ++i) {
29 if (PSets[i] == (int)PSetID)
35 void SIRegisterInfo::classifyPressureSet(unsigned PSetID, unsigned Reg,
36 BitVector &PressureSets) const {
37 for (MCRegUnitIterator U(Reg, this); U.isValid(); ++U) {
38 const int *PSets = getRegUnitPressureSets(*U);
39 if (hasPressureSet(PSets, PSetID)) {
40 PressureSets.set(PSetID);
46 static cl::opt<bool> EnableSpillSGPRToSMEM(
47 "amdgpu-spill-sgpr-to-smem",
48 cl::desc("Use scalar stores to spill SGPRs if supported by subtarget"),
51 static cl::opt<bool> EnableSpillSGPRToVGPR(
52 "amdgpu-spill-sgpr-to-vgpr",
53 cl::desc("Enable spilling VGPRs to SGPRs"),
57 SIRegisterInfo::SIRegisterInfo(const SISubtarget &ST) :
59 SGPRPressureSets(getNumRegPressureSets()),
60 VGPRPressureSets(getNumRegPressureSets()),
61 SpillSGPRToVGPR(false),
62 SpillSGPRToSMEM(false) {
63 if (EnableSpillSGPRToSMEM && ST.hasScalarStores())
64 SpillSGPRToSMEM = true;
65 else if (EnableSpillSGPRToVGPR)
66 SpillSGPRToVGPR = true;
68 unsigned NumRegPressureSets = getNumRegPressureSets();
70 SGPRSetID = NumRegPressureSets;
71 VGPRSetID = NumRegPressureSets;
73 for (unsigned i = 0; i < NumRegPressureSets; ++i) {
74 classifyPressureSet(i, AMDGPU::SGPR0, SGPRPressureSets);
75 classifyPressureSet(i, AMDGPU::VGPR0, VGPRPressureSets);
78 // Determine the number of reg units for each pressure set.
79 std::vector<unsigned> PressureSetRegUnits(NumRegPressureSets, 0);
80 for (unsigned i = 0, e = getNumRegUnits(); i != e; ++i) {
81 const int *PSets = getRegUnitPressureSets(i);
82 for (unsigned j = 0; PSets[j] != -1; ++j) {
83 ++PressureSetRegUnits[PSets[j]];
87 unsigned VGPRMax = 0, SGPRMax = 0;
88 for (unsigned i = 0; i < NumRegPressureSets; ++i) {
89 if (isVGPRPressureSet(i) && PressureSetRegUnits[i] > VGPRMax) {
91 VGPRMax = PressureSetRegUnits[i];
94 if (isSGPRPressureSet(i) && PressureSetRegUnits[i] > SGPRMax) {
96 SGPRMax = PressureSetRegUnits[i];
100 assert(SGPRSetID < NumRegPressureSets &&
101 VGPRSetID < NumRegPressureSets);
104 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, unsigned Reg) const {
105 MCRegAliasIterator R(Reg, this, true);
107 for (; R.isValid(); ++R)
111 unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
112 const MachineFunction &MF) const {
114 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
115 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
116 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
117 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
120 unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
121 const MachineFunction &MF) const {
123 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
124 unsigned RegCount = ST.getMaxNumSGPRs(MF);
127 // Try to place it in a hole after PrivateSegmentBufferReg.
129 // We cannot put the segment buffer in (Idx - 4) ... (Idx - 1) due to
130 // alignment constraints, so we have a hole where can put the wave offset.
133 // We can put the segment buffer in (Idx - 4) ... (Idx - 1) and put the
134 // wave offset before it.
137 return AMDGPU::SGPR_32RegClass.getRegister(Reg);
140 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
141 BitVector Reserved(getNumRegs());
142 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
144 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
145 // this seems likely to result in bugs, so I'm marking them as reserved.
146 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
147 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
149 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
150 reserveRegisterTuples(Reserved, AMDGPU::M0);
152 // Reserve the memory aperture registers.
153 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
154 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
155 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
156 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
158 // Reserve Trap Handler registers - support is not implemented in Codegen.
159 reserveRegisterTuples(Reserved, AMDGPU::TBA);
160 reserveRegisterTuples(Reserved, AMDGPU::TMA);
161 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
162 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
163 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
164 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
165 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
166 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
168 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
170 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
171 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
172 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
173 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
174 reserveRegisterTuples(Reserved, Reg);
177 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
178 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
179 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
180 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
181 reserveRegisterTuples(Reserved, Reg);
184 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
186 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
187 if (ScratchWaveOffsetReg != AMDGPU::NoRegister) {
188 // Reserve 1 SGPR for scratch wave offset in case we need to spill.
189 reserveRegisterTuples(Reserved, ScratchWaveOffsetReg);
192 unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
193 if (ScratchRSrcReg != AMDGPU::NoRegister) {
194 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
196 // TODO: May need to reserve a VGPR if doing LDS spilling.
197 reserveRegisterTuples(Reserved, ScratchRSrcReg);
198 assert(!isSubRegister(ScratchRSrcReg, ScratchWaveOffsetReg));
204 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
205 return Fn.getFrameInfo().hasStackObjects();
209 SIRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
210 return MF.getFrameInfo().hasStackObjects();
213 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
214 const MachineFunction &MF) const {
215 // m0 is needed for the scalar store offset. m0 is unallocatable, so we can't
216 // create a virtual register for it during frame index elimination, so the
217 // scavenger is directly needed.
218 return MF.getFrameInfo().hasStackObjects() &&
219 MF.getSubtarget<SISubtarget>().hasScalarStores() &&
220 MF.getInfo<SIMachineFunctionInfo>()->hasSpilledSGPRs();
223 bool SIRegisterInfo::requiresVirtualBaseRegisters(
224 const MachineFunction &) const {
225 // There are no special dedicated stack or frame pointers.
229 bool SIRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
230 // This helps catch bugs as verifier errors.
234 int64_t SIRegisterInfo::getMUBUFInstrOffset(const MachineInstr *MI) const {
235 assert(SIInstrInfo::isMUBUF(*MI));
237 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
238 AMDGPU::OpName::offset);
239 return MI->getOperand(OffIdx).getImm();
242 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
244 if (!SIInstrInfo::isMUBUF(*MI))
247 assert(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
248 AMDGPU::OpName::vaddr) &&
249 "Should never see frame index on non-address operand");
251 return getMUBUFInstrOffset(MI);
254 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
255 if (!MI->mayLoadOrStore())
258 int64_t FullOffset = Offset + getMUBUFInstrOffset(MI);
260 return !isUInt<12>(FullOffset);
263 void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
266 int64_t Offset) const {
267 MachineBasicBlock::iterator Ins = MBB->begin();
268 DebugLoc DL; // Defaults to "unknown"
270 if (Ins != MBB->end())
271 DL = Ins->getDebugLoc();
273 MachineFunction *MF = MBB->getParent();
274 const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>();
275 const SIInstrInfo *TII = Subtarget.getInstrInfo();
278 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), BaseReg)
279 .addFrameIndex(FrameIdx);
283 MachineRegisterInfo &MRI = MF->getRegInfo();
284 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
286 unsigned FIReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
288 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
290 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), FIReg)
291 .addFrameIndex(FrameIdx);
293 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
294 .addReg(OffsetReg, RegState::Kill)
298 void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
299 int64_t Offset) const {
301 MachineBasicBlock *MBB = MI.getParent();
302 MachineFunction *MF = MBB->getParent();
303 const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>();
304 const SIInstrInfo *TII = Subtarget.getInstrInfo();
307 // FIXME: Is it possible to be storing a frame index to itself?
309 for (const MachineOperand &MO: MI.operands()) {
312 llvm_unreachable("should not see multiple frame indices");
319 MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
320 assert(FIOp && FIOp->isFI() && "frame index must be address operand");
322 assert(TII->isMUBUF(MI));
324 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
325 int64_t NewOffset = OffsetOp->getImm() + Offset;
326 assert(isUInt<12>(NewOffset) && "offset should be legal");
328 FIOp->ChangeToRegister(BaseReg, false);
329 OffsetOp->setImm(NewOffset);
332 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
334 int64_t Offset) const {
335 if (!SIInstrInfo::isMUBUF(*MI))
338 int64_t NewOffset = Offset + getMUBUFInstrOffset(MI);
340 return isUInt<12>(NewOffset);
343 const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
344 const MachineFunction &MF, unsigned Kind) const {
345 // This is inaccurate. It depends on the instruction and address space. The
346 // only place where we should hit this is for dealing with frame indexes /
347 // private accesses, so this is correct in that case.
348 return &AMDGPU::VGPR_32RegClass;
351 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
354 case AMDGPU::SI_SPILL_S512_SAVE:
355 case AMDGPU::SI_SPILL_S512_RESTORE:
356 case AMDGPU::SI_SPILL_V512_SAVE:
357 case AMDGPU::SI_SPILL_V512_RESTORE:
359 case AMDGPU::SI_SPILL_S256_SAVE:
360 case AMDGPU::SI_SPILL_S256_RESTORE:
361 case AMDGPU::SI_SPILL_V256_SAVE:
362 case AMDGPU::SI_SPILL_V256_RESTORE:
364 case AMDGPU::SI_SPILL_S128_SAVE:
365 case AMDGPU::SI_SPILL_S128_RESTORE:
366 case AMDGPU::SI_SPILL_V128_SAVE:
367 case AMDGPU::SI_SPILL_V128_RESTORE:
369 case AMDGPU::SI_SPILL_V96_SAVE:
370 case AMDGPU::SI_SPILL_V96_RESTORE:
372 case AMDGPU::SI_SPILL_S64_SAVE:
373 case AMDGPU::SI_SPILL_S64_RESTORE:
374 case AMDGPU::SI_SPILL_V64_SAVE:
375 case AMDGPU::SI_SPILL_V64_RESTORE:
377 case AMDGPU::SI_SPILL_S32_SAVE:
378 case AMDGPU::SI_SPILL_S32_RESTORE:
379 case AMDGPU::SI_SPILL_V32_SAVE:
380 case AMDGPU::SI_SPILL_V32_RESTORE:
382 default: llvm_unreachable("Invalid spill opcode");
386 static int getOffsetMUBUFStore(unsigned Opc) {
388 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
389 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
390 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
391 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
392 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
393 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
394 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
395 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
396 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
397 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
403 static int getOffsetMUBUFLoad(unsigned Opc) {
405 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
406 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
407 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
408 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
409 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
410 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
411 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
412 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
413 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
414 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
415 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
416 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
417 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
418 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
424 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
425 // need to handle the case where an SGPR may need to be spilled while spilling.
426 static bool buildMUBUFOffsetLoadStore(const SIInstrInfo *TII,
427 MachineFrameInfo &MFI,
428 MachineBasicBlock::iterator MI,
431 MachineBasicBlock *MBB = MI->getParent();
432 const DebugLoc &DL = MI->getDebugLoc();
433 bool IsStore = MI->mayStore();
435 unsigned Opc = MI->getOpcode();
436 int LoadStoreOp = IsStore ?
437 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
438 if (LoadStoreOp == -1)
441 unsigned Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata)->getReg();
443 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
444 .addReg(Reg, getDefRegState(!IsStore))
445 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
446 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
451 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
455 void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
456 unsigned LoadStoreOp,
460 unsigned ScratchRsrcReg,
461 unsigned ScratchOffsetReg,
463 MachineMemOperand *MMO,
464 RegScavenger *RS) const {
465 MachineBasicBlock *MBB = MI->getParent();
466 MachineFunction *MF = MI->getParent()->getParent();
467 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
468 const SIInstrInfo *TII = ST.getInstrInfo();
469 const MachineFrameInfo &MFI = MF->getFrameInfo();
471 const MCInstrDesc &Desc = TII->get(LoadStoreOp);
472 const DebugLoc &DL = MI->getDebugLoc();
473 bool IsStore = Desc.mayStore();
475 bool RanOutOfSGPRs = false;
476 bool Scavenged = false;
477 unsigned SOffset = ScratchOffsetReg;
479 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
480 unsigned NumSubRegs = AMDGPU::getRegBitWidth(RC->getID()) / 32;
481 unsigned Size = NumSubRegs * 4;
482 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
483 const int64_t OriginalImmOffset = Offset;
485 unsigned Align = MFI.getObjectAlignment(Index);
486 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
488 if (!isUInt<12>(Offset + Size)) {
489 SOffset = AMDGPU::NoRegister;
491 // We don't have access to the register scavenger if this function is called
492 // during PEI::scavengeFrameVirtualRegs().
494 SOffset = RS->FindUnusedReg(&AMDGPU::SGPR_32RegClass);
496 if (SOffset == AMDGPU::NoRegister) {
497 // There are no free SGPRs, and since we are in the process of spilling
498 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
499 // on SI/CI and on VI it is true until we implement spilling using scalar
500 // stores), we have no way to free up an SGPR. Our solution here is to
501 // add the offset directly to the ScratchOffset register, and then
502 // subtract the offset after the spill to return ScratchOffset to it's
504 RanOutOfSGPRs = true;
505 SOffset = ScratchOffsetReg;
510 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
511 .addReg(ScratchOffsetReg)
517 const unsigned EltSize = 4;
519 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += EltSize) {
520 unsigned SubReg = NumSubRegs == 1 ?
521 ValueReg : getSubReg(ValueReg, getSubRegFromChannel(i));
523 unsigned SOffsetRegState = 0;
524 unsigned SrcDstRegState = getDefRegState(!IsStore);
526 SOffsetRegState |= getKillRegState(Scavenged);
527 // The last implicit use carries the "Kill" flag.
528 SrcDstRegState |= getKillRegState(IsKill);
531 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(EltSize * i);
532 MachineMemOperand *NewMMO
533 = MF->getMachineMemOperand(PInfo, MMO->getFlags(),
534 EltSize, MinAlign(Align, EltSize * i));
536 auto MIB = BuildMI(*MBB, MI, DL, Desc)
537 .addReg(SubReg, getDefRegState(!IsStore) | getKillRegState(IsKill))
538 .addReg(ScratchRsrcReg)
539 .addReg(SOffset, SOffsetRegState)
544 .addMemOperand(NewMMO);
547 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
551 // Subtract the offset we added to the ScratchOffset register.
552 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScratchOffsetReg)
553 .addReg(ScratchOffsetReg)
554 .addImm(OriginalImmOffset);
558 static std::pair<unsigned, unsigned> getSpillEltSize(unsigned SuperRegSize,
560 if (SuperRegSize % 16 == 0) {
561 return { 16, Store ? AMDGPU::S_BUFFER_STORE_DWORDX4_SGPR :
562 AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR };
565 if (SuperRegSize % 8 == 0) {
566 return { 8, Store ? AMDGPU::S_BUFFER_STORE_DWORDX2_SGPR :
567 AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR };
570 return { 4, Store ? AMDGPU::S_BUFFER_STORE_DWORD_SGPR :
571 AMDGPU::S_BUFFER_LOAD_DWORD_SGPR};
574 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
577 bool OnlyToVGPR) const {
578 MachineBasicBlock *MBB = MI->getParent();
579 MachineFunction *MF = MBB->getParent();
580 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
582 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
583 = MFI->getSGPRToVGPRSpills(Index);
584 bool SpillToVGPR = !VGPRSpills.empty();
585 if (OnlyToVGPR && !SpillToVGPR)
588 MachineRegisterInfo &MRI = MF->getRegInfo();
589 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
590 const SIInstrInfo *TII = ST.getInstrInfo();
592 unsigned SuperReg = MI->getOperand(0).getReg();
593 bool IsKill = MI->getOperand(0).isKill();
594 const DebugLoc &DL = MI->getDebugLoc();
596 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
598 bool SpillToSMEM = spillSGPRToSMEM();
599 if (SpillToSMEM && OnlyToVGPR)
602 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
604 unsigned OffsetReg = AMDGPU::M0;
605 unsigned M0CopyReg = AMDGPU::NoRegister;
608 if (RS->isRegUsed(AMDGPU::M0)) {
609 M0CopyReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
610 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), M0CopyReg)
615 unsigned ScalarStoreOp;
616 unsigned EltSize = 4;
617 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
618 if (SpillToSMEM && isSGPRClass(RC)) {
619 // XXX - if private_element_size is larger than 4 it might be useful to be
620 // able to spill wider vmem spills.
621 std::tie(EltSize, ScalarStoreOp) =
622 getSpillEltSize(getRegSizeInBits(*RC) / 8, true);
625 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
626 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
628 // SubReg carries the "Kill" flag when SubReg == SuperReg.
629 unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill);
630 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
631 unsigned SubReg = NumSubRegs == 1 ?
632 SuperReg : getSubReg(SuperReg, SplitParts[i]);
635 int64_t FrOffset = FrameInfo.getObjectOffset(Index);
637 // The allocated memory size is really the wavefront size * the frame
638 // index size. The widest register class is 64 bytes, so a 4-byte scratch
639 // allocation is enough to spill this in a single stack object.
641 // FIXME: Frame size/offsets are computed earlier than this, so the extra
642 // space is still unnecessarily allocated.
644 unsigned Align = FrameInfo.getObjectAlignment(Index);
645 MachinePointerInfo PtrInfo
646 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
647 MachineMemOperand *MMO
648 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
649 EltSize, MinAlign(Align, EltSize * i));
651 // SMEM instructions only support a single offset, so increment the wave
654 int64_t Offset = (ST.getWavefrontSize() * FrOffset) + (EltSize * i);
656 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
657 .addReg(MFI->getFrameOffsetReg())
660 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
661 .addReg(MFI->getFrameOffsetReg());
664 BuildMI(*MBB, MI, DL, TII->get(ScalarStoreOp))
665 .addReg(SubReg, getKillRegState(IsKill)) // sdata
666 .addReg(MFI->getScratchRSrcReg()) // sbase
667 .addReg(OffsetReg, RegState::Kill) // soff
675 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
677 BuildMI(*MBB, MI, DL,
678 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
680 .addReg(SubReg, getKillRegState(IsKill))
683 // FIXME: Since this spills to another register instead of an actual
684 // frame index, we should delete the frame index when all references to
687 // XXX - Can to VGPR spill fail for some subregisters but not others?
691 // Spill SGPR to a frame index.
692 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
693 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
694 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
696 MachineInstrBuilder Mov
697 = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
698 .addReg(SubReg, SubKillState);
701 // There could be undef components of a spilled super register.
702 // TODO: Can we detect this and skip the spill?
703 if (NumSubRegs > 1) {
704 // The last implicit use of the SuperReg carries the "Kill" flag.
705 unsigned SuperKillState = 0;
707 SuperKillState |= getKillRegState(IsKill);
708 Mov.addReg(SuperReg, RegState::Implicit | SuperKillState);
711 unsigned Align = FrameInfo.getObjectAlignment(Index);
712 MachinePointerInfo PtrInfo
713 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
714 MachineMemOperand *MMO
715 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
716 EltSize, MinAlign(Align, EltSize * i));
717 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_SAVE))
718 .addReg(TmpReg, RegState::Kill) // src
719 .addFrameIndex(Index) // vaddr
720 .addReg(MFI->getScratchRSrcReg()) // srrsrc
721 .addReg(MFI->getFrameOffsetReg()) // soffset
722 .addImm(i * 4) // offset
727 if (M0CopyReg != AMDGPU::NoRegister) {
728 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), AMDGPU::M0)
729 .addReg(M0CopyReg, RegState::Kill);
732 MI->eraseFromParent();
733 MFI->addToSpilledSGPRs(NumSubRegs);
737 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
740 bool OnlyToVGPR) const {
741 MachineFunction *MF = MI->getParent()->getParent();
742 MachineRegisterInfo &MRI = MF->getRegInfo();
743 MachineBasicBlock *MBB = MI->getParent();
744 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
746 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
747 = MFI->getSGPRToVGPRSpills(Index);
748 bool SpillToVGPR = !VGPRSpills.empty();
749 if (OnlyToVGPR && !SpillToVGPR)
752 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
753 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
754 const SIInstrInfo *TII = ST.getInstrInfo();
755 const DebugLoc &DL = MI->getDebugLoc();
757 unsigned SuperReg = MI->getOperand(0).getReg();
758 bool SpillToSMEM = spillSGPRToSMEM();
759 if (SpillToSMEM && OnlyToVGPR)
762 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
764 unsigned OffsetReg = AMDGPU::M0;
765 unsigned M0CopyReg = AMDGPU::NoRegister;
768 if (RS->isRegUsed(AMDGPU::M0)) {
769 M0CopyReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
770 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), M0CopyReg)
775 unsigned EltSize = 4;
776 unsigned ScalarLoadOp;
778 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
779 if (SpillToSMEM && isSGPRClass(RC)) {
780 // XXX - if private_element_size is larger than 4 it might be useful to be
781 // able to spill wider vmem spills.
782 std::tie(EltSize, ScalarLoadOp) =
783 getSpillEltSize(getRegSizeInBits(*RC) / 8, false);
786 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
787 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
789 // SubReg carries the "Kill" flag when SubReg == SuperReg.
790 int64_t FrOffset = FrameInfo.getObjectOffset(Index);
792 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
793 unsigned SubReg = NumSubRegs == 1 ?
794 SuperReg : getSubReg(SuperReg, SplitParts[i]);
797 // FIXME: Size may be > 4 but extra bytes wasted.
798 unsigned Align = FrameInfo.getObjectAlignment(Index);
799 MachinePointerInfo PtrInfo
800 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
801 MachineMemOperand *MMO
802 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
803 EltSize, MinAlign(Align, EltSize * i));
806 int64_t Offset = (ST.getWavefrontSize() * FrOffset) + (EltSize * i);
808 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
809 .addReg(MFI->getFrameOffsetReg())
812 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
813 .addReg(MFI->getFrameOffsetReg());
817 BuildMI(*MBB, MI, DL, TII->get(ScalarLoadOp), SubReg)
818 .addReg(MFI->getScratchRSrcReg()) // sbase
819 .addReg(OffsetReg, RegState::Kill) // soff
824 MIB.addReg(SuperReg, RegState::ImplicitDefine);
830 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
832 BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
838 MIB.addReg(SuperReg, RegState::ImplicitDefine);
843 // Restore SGPR from a stack slot.
844 // FIXME: We should use S_LOAD_DWORD here for VI.
845 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
846 unsigned Align = FrameInfo.getObjectAlignment(Index);
848 MachinePointerInfo PtrInfo
849 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
851 MachineMemOperand *MMO = MF->getMachineMemOperand(PtrInfo,
852 MachineMemOperand::MOLoad, EltSize,
853 MinAlign(Align, EltSize * i));
855 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_RESTORE), TmpReg)
856 .addFrameIndex(Index) // vaddr
857 .addReg(MFI->getScratchRSrcReg()) // srsrc
858 .addReg(MFI->getFrameOffsetReg()) // soffset
859 .addImm(i * 4) // offset
863 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg)
864 .addReg(TmpReg, RegState::Kill);
867 MIB.addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
871 if (M0CopyReg != AMDGPU::NoRegister) {
872 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), AMDGPU::M0)
873 .addReg(M0CopyReg, RegState::Kill);
876 MI->eraseFromParent();
880 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
881 /// a VGPR and the stack slot can be safely eliminated when all other users are
883 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
884 MachineBasicBlock::iterator MI,
886 RegScavenger *RS) const {
887 switch (MI->getOpcode()) {
888 case AMDGPU::SI_SPILL_S512_SAVE:
889 case AMDGPU::SI_SPILL_S256_SAVE:
890 case AMDGPU::SI_SPILL_S128_SAVE:
891 case AMDGPU::SI_SPILL_S64_SAVE:
892 case AMDGPU::SI_SPILL_S32_SAVE:
893 return spillSGPR(MI, FI, RS, true);
894 case AMDGPU::SI_SPILL_S512_RESTORE:
895 case AMDGPU::SI_SPILL_S256_RESTORE:
896 case AMDGPU::SI_SPILL_S128_RESTORE:
897 case AMDGPU::SI_SPILL_S64_RESTORE:
898 case AMDGPU::SI_SPILL_S32_RESTORE:
899 return restoreSGPR(MI, FI, RS, true);
901 llvm_unreachable("not an SGPR spill instruction");
905 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
906 int SPAdj, unsigned FIOperandNum,
907 RegScavenger *RS) const {
908 MachineFunction *MF = MI->getParent()->getParent();
909 MachineRegisterInfo &MRI = MF->getRegInfo();
910 MachineBasicBlock *MBB = MI->getParent();
911 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
912 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
913 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
914 const SIInstrInfo *TII = ST.getInstrInfo();
915 DebugLoc DL = MI->getDebugLoc();
917 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
918 int Index = MI->getOperand(FIOperandNum).getIndex();
920 switch (MI->getOpcode()) {
921 // SGPR register spill
922 case AMDGPU::SI_SPILL_S512_SAVE:
923 case AMDGPU::SI_SPILL_S256_SAVE:
924 case AMDGPU::SI_SPILL_S128_SAVE:
925 case AMDGPU::SI_SPILL_S64_SAVE:
926 case AMDGPU::SI_SPILL_S32_SAVE: {
927 spillSGPR(MI, Index, RS);
931 // SGPR register restore
932 case AMDGPU::SI_SPILL_S512_RESTORE:
933 case AMDGPU::SI_SPILL_S256_RESTORE:
934 case AMDGPU::SI_SPILL_S128_RESTORE:
935 case AMDGPU::SI_SPILL_S64_RESTORE:
936 case AMDGPU::SI_SPILL_S32_RESTORE: {
937 restoreSGPR(MI, Index, RS);
941 // VGPR register spill
942 case AMDGPU::SI_SPILL_V512_SAVE:
943 case AMDGPU::SI_SPILL_V256_SAVE:
944 case AMDGPU::SI_SPILL_V128_SAVE:
945 case AMDGPU::SI_SPILL_V96_SAVE:
946 case AMDGPU::SI_SPILL_V64_SAVE:
947 case AMDGPU::SI_SPILL_V32_SAVE: {
948 const MachineOperand *VData = TII->getNamedOperand(*MI,
949 AMDGPU::OpName::vdata);
950 buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
952 VData->getReg(), VData->isKill(),
953 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
954 TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
955 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
956 *MI->memoperands_begin(),
958 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
959 MI->eraseFromParent();
962 case AMDGPU::SI_SPILL_V32_RESTORE:
963 case AMDGPU::SI_SPILL_V64_RESTORE:
964 case AMDGPU::SI_SPILL_V96_RESTORE:
965 case AMDGPU::SI_SPILL_V128_RESTORE:
966 case AMDGPU::SI_SPILL_V256_RESTORE:
967 case AMDGPU::SI_SPILL_V512_RESTORE: {
968 const MachineOperand *VData = TII->getNamedOperand(*MI,
969 AMDGPU::OpName::vdata);
971 buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
973 VData->getReg(), VData->isKill(),
974 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
975 TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
976 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
977 *MI->memoperands_begin(),
979 MI->eraseFromParent();
984 if (TII->isMUBUF(*MI)) {
985 // Disable offen so we don't need a 0 vgpr base.
986 assert(static_cast<int>(FIOperandNum) ==
987 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
988 AMDGPU::OpName::vaddr));
990 int64_t Offset = FrameInfo.getObjectOffset(Index);
992 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
993 int64_t NewOffset = OldImm + Offset;
995 if (isUInt<12>(NewOffset) &&
996 buildMUBUFOffsetLoadStore(TII, FrameInfo, MI, Index, NewOffset)) {
997 MI->eraseFromParent();
1002 int64_t Offset = FrameInfo.getObjectOffset(Index);
1003 FIOp.ChangeToImmediate(Offset);
1004 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
1005 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1006 BuildMI(*MBB, MI, MI->getDebugLoc(),
1007 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1009 FIOp.ChangeToRegister(TmpReg, false, false, true);
1015 // FIXME: This is very slow. It might be worth creating a map from physreg to
1017 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
1018 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
1020 static const TargetRegisterClass *const BaseClasses[] = {
1021 &AMDGPU::VGPR_32RegClass,
1022 &AMDGPU::SReg_32RegClass,
1023 &AMDGPU::VReg_64RegClass,
1024 &AMDGPU::SReg_64RegClass,
1025 &AMDGPU::VReg_96RegClass,
1026 &AMDGPU::VReg_128RegClass,
1027 &AMDGPU::SReg_128RegClass,
1028 &AMDGPU::VReg_256RegClass,
1029 &AMDGPU::SReg_256RegClass,
1030 &AMDGPU::VReg_512RegClass,
1031 &AMDGPU::SReg_512RegClass,
1032 &AMDGPU::SCC_CLASSRegClass,
1035 for (const TargetRegisterClass *BaseClass : BaseClasses) {
1036 if (BaseClass->contains(Reg)) {
1043 // TODO: It might be helpful to have some target specific flags in
1044 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
1045 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
1046 unsigned Size = getRegSizeInBits(*RC);
1051 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
1053 return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
1055 return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
1057 return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
1059 return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
1061 return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
1063 llvm_unreachable("Invalid register class size");
1067 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
1068 const TargetRegisterClass *SRC) const {
1069 switch (getRegSizeInBits(*SRC)) {
1071 return &AMDGPU::VGPR_32RegClass;
1073 return &AMDGPU::VReg_64RegClass;
1075 return &AMDGPU::VReg_96RegClass;
1077 return &AMDGPU::VReg_128RegClass;
1079 return &AMDGPU::VReg_256RegClass;
1081 return &AMDGPU::VReg_512RegClass;
1083 llvm_unreachable("Invalid register class size");
1087 const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass(
1088 const TargetRegisterClass *VRC) const {
1089 switch (getRegSizeInBits(*VRC)) {
1091 return &AMDGPU::SGPR_32RegClass;
1093 return &AMDGPU::SReg_64RegClass;
1095 return &AMDGPU::SReg_128RegClass;
1097 return &AMDGPU::SReg_256RegClass;
1099 return &AMDGPU::SReg_512RegClass;
1101 llvm_unreachable("Invalid register class size");
1105 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
1106 const TargetRegisterClass *RC, unsigned SubIdx) const {
1107 if (SubIdx == AMDGPU::NoSubRegister)
1110 // We can assume that each lane corresponds to one 32-bit register.
1111 LaneBitmask::Type Mask = getSubRegIndexLaneMask(SubIdx).getAsInteger();
1112 unsigned Count = countPopulation(Mask);
1113 if (isSGPRClass(RC)) {
1116 return &AMDGPU::SGPR_32RegClass;
1118 return &AMDGPU::SReg_64RegClass;
1120 return &AMDGPU::SReg_128RegClass;
1122 return &AMDGPU::SReg_256RegClass;
1123 case 16: /* fall-through */
1125 llvm_unreachable("Invalid sub-register class size");
1130 return &AMDGPU::VGPR_32RegClass;
1132 return &AMDGPU::VReg_64RegClass;
1134 return &AMDGPU::VReg_96RegClass;
1136 return &AMDGPU::VReg_128RegClass;
1138 return &AMDGPU::VReg_256RegClass;
1139 case 16: /* fall-through */
1141 llvm_unreachable("Invalid sub-register class size");
1146 bool SIRegisterInfo::shouldRewriteCopySrc(
1147 const TargetRegisterClass *DefRC,
1149 const TargetRegisterClass *SrcRC,
1150 unsigned SrcSubReg) const {
1151 // We want to prefer the smallest register class possible, so we don't want to
1152 // stop and rewrite on anything that looks like a subregister
1153 // extract. Operations mostly don't care about the super register class, so we
1154 // only want to stop on the most basic of copies between the same register
1157 // e.g. if we have something like
1160 // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
1161 // vreg3 = COPY vreg2, sub0
1163 // We want to look through the COPY to find:
1164 // => vreg3 = COPY vreg0
1167 return getCommonSubClass(DefRC, SrcRC) != nullptr;
1170 // FIXME: Most of these are flexible with HSA and we don't need to reserve them
1171 // as input registers if unused. Whether the dispatch ptr is necessary should be
1172 // easy to detect from used intrinsics. Scratch setup is harder to know.
1173 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
1174 enum PreloadedValue Value) const {
1176 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1177 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1180 case SIRegisterInfo::WORKGROUP_ID_X:
1181 assert(MFI->hasWorkGroupIDX());
1182 return MFI->WorkGroupIDXSystemSGPR;
1183 case SIRegisterInfo::WORKGROUP_ID_Y:
1184 assert(MFI->hasWorkGroupIDY());
1185 return MFI->WorkGroupIDYSystemSGPR;
1186 case SIRegisterInfo::WORKGROUP_ID_Z:
1187 assert(MFI->hasWorkGroupIDZ());
1188 return MFI->WorkGroupIDZSystemSGPR;
1189 case SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET:
1190 return MFI->PrivateSegmentWaveByteOffsetSystemSGPR;
1191 case SIRegisterInfo::PRIVATE_SEGMENT_BUFFER:
1192 if (ST.isAmdCodeObjectV2(MF)) {
1193 assert(MFI->hasPrivateSegmentBuffer());
1194 return MFI->PrivateSegmentBufferUserSGPR;
1196 assert(MFI->hasPrivateMemoryInputPtr());
1197 return MFI->PrivateMemoryPtrUserSGPR;
1198 case SIRegisterInfo::KERNARG_SEGMENT_PTR:
1199 assert(MFI->hasKernargSegmentPtr());
1200 return MFI->KernargSegmentPtrUserSGPR;
1201 case SIRegisterInfo::DISPATCH_ID:
1202 assert(MFI->hasDispatchID());
1203 return MFI->DispatchIDUserSGPR;
1204 case SIRegisterInfo::FLAT_SCRATCH_INIT:
1205 assert(MFI->hasFlatScratchInit());
1206 return MFI->FlatScratchInitUserSGPR;
1207 case SIRegisterInfo::DISPATCH_PTR:
1208 assert(MFI->hasDispatchPtr());
1209 return MFI->DispatchPtrUserSGPR;
1210 case SIRegisterInfo::QUEUE_PTR:
1211 assert(MFI->hasQueuePtr());
1212 return MFI->QueuePtrUserSGPR;
1213 case SIRegisterInfo::WORKITEM_ID_X:
1214 assert(MFI->hasWorkItemIDX());
1215 return AMDGPU::VGPR0;
1216 case SIRegisterInfo::WORKITEM_ID_Y:
1217 assert(MFI->hasWorkItemIDY());
1218 return AMDGPU::VGPR1;
1219 case SIRegisterInfo::WORKITEM_ID_Z:
1220 assert(MFI->hasWorkItemIDZ());
1221 return AMDGPU::VGPR2;
1223 llvm_unreachable("unexpected preloaded value type");
1226 /// \brief Returns a register that is not used at any point in the function.
1227 /// If all registers are used, then this function will return
1228 // AMDGPU::NoRegister.
1230 SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
1231 const TargetRegisterClass *RC,
1232 const MachineFunction &MF) const {
1234 for (unsigned Reg : *RC)
1235 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
1237 return AMDGPU::NoRegister;
1240 ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
1241 unsigned EltSize) const {
1243 static const int16_t Sub0_15[] = {
1244 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1245 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1246 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
1247 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
1250 static const int16_t Sub0_7[] = {
1251 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1252 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1255 static const int16_t Sub0_3[] = {
1256 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1259 static const int16_t Sub0_2[] = {
1260 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2,
1263 static const int16_t Sub0_1[] = {
1264 AMDGPU::sub0, AMDGPU::sub1,
1267 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1271 return makeArrayRef(Sub0_1);
1273 return makeArrayRef(Sub0_2);
1275 return makeArrayRef(Sub0_3);
1277 return makeArrayRef(Sub0_7);
1279 return makeArrayRef(Sub0_15);
1281 llvm_unreachable("unhandled register size");
1286 static const int16_t Sub0_15_64[] = {
1287 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1288 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
1289 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
1290 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15
1293 static const int16_t Sub0_7_64[] = {
1294 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1295 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7
1299 static const int16_t Sub0_3_64[] = {
1300 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3
1303 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1307 return makeArrayRef(Sub0_3_64);
1309 return makeArrayRef(Sub0_7_64);
1311 return makeArrayRef(Sub0_15_64);
1313 llvm_unreachable("unhandled register size");
1317 assert(EltSize == 16 && "unhandled register spill split size");
1319 static const int16_t Sub0_15_128[] = {
1320 AMDGPU::sub0_sub1_sub2_sub3,
1321 AMDGPU::sub4_sub5_sub6_sub7,
1322 AMDGPU::sub8_sub9_sub10_sub11,
1323 AMDGPU::sub12_sub13_sub14_sub15
1326 static const int16_t Sub0_7_128[] = {
1327 AMDGPU::sub0_sub1_sub2_sub3,
1328 AMDGPU::sub4_sub5_sub6_sub7
1331 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1335 return makeArrayRef(Sub0_7_128);
1337 return makeArrayRef(Sub0_15_128);
1339 llvm_unreachable("unhandled register size");
1343 const TargetRegisterClass*
1344 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
1345 unsigned Reg) const {
1346 if (TargetRegisterInfo::isVirtualRegister(Reg))
1347 return MRI.getRegClass(Reg);
1349 return getPhysRegClass(Reg);
1352 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
1353 unsigned Reg) const {
1354 return hasVGPRs(getRegClassForReg(MRI, Reg));
1357 bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
1358 const TargetRegisterClass *SrcRC,
1360 const TargetRegisterClass *DstRC,
1362 const TargetRegisterClass *NewRC) const {
1363 unsigned SrcSize = getRegSizeInBits(*SrcRC);
1364 unsigned DstSize = getRegSizeInBits(*DstRC);
1365 unsigned NewSize = getRegSizeInBits(*NewRC);
1367 // Do not increase size of registers beyond dword, we would need to allocate
1368 // adjacent registers and constraint regalloc more than needed.
1370 // Always allow dword coalescing.
1371 if (SrcSize <= 32 || DstSize <= 32)
1374 return NewSize <= DstSize || NewSize <= SrcSize;
1377 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
1378 MachineFunction &MF) const {
1380 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1381 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1383 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
1385 switch (RC->getID()) {
1387 return AMDGPURegisterInfo::getRegPressureLimit(RC, MF);
1388 case AMDGPU::VGPR_32RegClassID:
1389 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
1390 case AMDGPU::SGPR_32RegClassID:
1391 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
1395 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
1396 unsigned Idx) const {
1397 if (Idx == getVGPRPressureSet())
1398 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
1399 const_cast<MachineFunction &>(MF));
1401 if (Idx == getSGPRPressureSet())
1402 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
1403 const_cast<MachineFunction &>(MF));
1405 return AMDGPURegisterInfo::getRegPressureSetLimit(MF, Idx);
1408 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
1409 static const int Empty[] = { -1 };
1411 if (hasRegUnit(AMDGPU::M0, RegUnit))
1413 return AMDGPURegisterInfo::getRegUnitPressureSets(RegUnit);