1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "SIRegisterInfo.h"
16 #include "AMDGPURegisterBankInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "SIInstrInfo.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "llvm/CodeGen/LiveIntervals.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/CodeGen/SlotIndexes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/LLVMContext.h"
32 static bool hasPressureSet(const int *PSets, unsigned PSetID) {
33 for (unsigned i = 0; PSets[i] != -1; ++i) {
34 if (PSets[i] == (int)PSetID)
40 void SIRegisterInfo::classifyPressureSet(unsigned PSetID, unsigned Reg,
41 BitVector &PressureSets) const {
42 for (MCRegUnitIterator U(Reg, this); U.isValid(); ++U) {
43 const int *PSets = getRegUnitPressureSets(*U);
44 if (hasPressureSet(PSets, PSetID)) {
45 PressureSets.set(PSetID);
51 static cl::opt<bool> EnableSpillSGPRToSMEM(
52 "amdgpu-spill-sgpr-to-smem",
53 cl::desc("Use scalar stores to spill SGPRs if supported by subtarget"),
56 static cl::opt<bool> EnableSpillSGPRToVGPR(
57 "amdgpu-spill-sgpr-to-vgpr",
58 cl::desc("Enable spilling VGPRs to SGPRs"),
62 SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST) :
64 SGPRPressureSets(getNumRegPressureSets()),
65 VGPRPressureSets(getNumRegPressureSets()),
66 SpillSGPRToVGPR(false),
67 SpillSGPRToSMEM(false) {
68 if (EnableSpillSGPRToSMEM && ST.hasScalarStores())
69 SpillSGPRToSMEM = true;
70 else if (EnableSpillSGPRToVGPR)
71 SpillSGPRToVGPR = true;
73 unsigned NumRegPressureSets = getNumRegPressureSets();
75 SGPRSetID = NumRegPressureSets;
76 VGPRSetID = NumRegPressureSets;
78 for (unsigned i = 0; i < NumRegPressureSets; ++i) {
79 classifyPressureSet(i, AMDGPU::SGPR0, SGPRPressureSets);
80 classifyPressureSet(i, AMDGPU::VGPR0, VGPRPressureSets);
83 // Determine the number of reg units for each pressure set.
84 std::vector<unsigned> PressureSetRegUnits(NumRegPressureSets, 0);
85 for (unsigned i = 0, e = getNumRegUnits(); i != e; ++i) {
86 const int *PSets = getRegUnitPressureSets(i);
87 for (unsigned j = 0; PSets[j] != -1; ++j) {
88 ++PressureSetRegUnits[PSets[j]];
92 unsigned VGPRMax = 0, SGPRMax = 0;
93 for (unsigned i = 0; i < NumRegPressureSets; ++i) {
94 if (isVGPRPressureSet(i) && PressureSetRegUnits[i] > VGPRMax) {
96 VGPRMax = PressureSetRegUnits[i];
99 if (isSGPRPressureSet(i) && PressureSetRegUnits[i] > SGPRMax) {
101 SGPRMax = PressureSetRegUnits[i];
105 assert(SGPRSetID < NumRegPressureSets &&
106 VGPRSetID < NumRegPressureSets);
109 unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
110 const MachineFunction &MF) const {
112 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
113 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
114 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
115 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
118 static unsigned findPrivateSegmentWaveByteOffsetRegIndex(unsigned RegCount) {
121 // Try to place it in a hole after PrivateSegmentBufferReg.
123 // We cannot put the segment buffer in (Idx - 4) ... (Idx - 1) due to
124 // alignment constraints, so we have a hole where can put the wave offset.
127 // We can put the segment buffer in (Idx - 4) ... (Idx - 1) and put the
128 // wave offset before it.
135 unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
136 const MachineFunction &MF) const {
137 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
138 unsigned Reg = findPrivateSegmentWaveByteOffsetRegIndex(ST.getMaxNumSGPRs(MF));
139 return AMDGPU::SGPR_32RegClass.getRegister(Reg);
142 unsigned SIRegisterInfo::reservedStackPtrOffsetReg(
143 const MachineFunction &MF) const {
144 return AMDGPU::SGPR32;
147 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
148 BitVector Reserved(getNumRegs());
150 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
151 // this seems likely to result in bugs, so I'm marking them as reserved.
152 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
153 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
155 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
156 reserveRegisterTuples(Reserved, AMDGPU::M0);
158 // Reserve the memory aperture registers.
159 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
160 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
161 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
162 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
164 // Reserve xnack_mask registers - support is not implemented in Codegen.
165 reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK);
167 // Reserve Trap Handler registers - support is not implemented in Codegen.
168 reserveRegisterTuples(Reserved, AMDGPU::TBA);
169 reserveRegisterTuples(Reserved, AMDGPU::TMA);
170 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
171 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
172 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
173 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
174 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
175 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
176 reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13);
177 reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15);
179 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
181 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
182 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
183 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
184 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
185 reserveRegisterTuples(Reserved, Reg);
188 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
189 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
190 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
191 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
192 reserveRegisterTuples(Reserved, Reg);
195 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
197 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
198 if (ScratchWaveOffsetReg != AMDGPU::NoRegister) {
199 // Reserve 1 SGPR for scratch wave offset in case we need to spill.
200 reserveRegisterTuples(Reserved, ScratchWaveOffsetReg);
203 unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
204 if (ScratchRSrcReg != AMDGPU::NoRegister) {
205 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
207 // TODO: May need to reserve a VGPR if doing LDS spilling.
208 reserveRegisterTuples(Reserved, ScratchRSrcReg);
209 assert(!isSubRegister(ScratchRSrcReg, ScratchWaveOffsetReg));
212 // We have to assume the SP is needed in case there are calls in the function,
213 // which is detected after the function is lowered. If we aren't really going
214 // to need SP, don't bother reserving it.
215 unsigned StackPtrReg = MFI->getStackPtrOffsetReg();
217 if (StackPtrReg != AMDGPU::NoRegister) {
218 reserveRegisterTuples(Reserved, StackPtrReg);
219 assert(!isSubRegister(ScratchRSrcReg, StackPtrReg));
222 unsigned FrameReg = MFI->getFrameOffsetReg();
223 if (FrameReg != AMDGPU::NoRegister) {
224 reserveRegisterTuples(Reserved, FrameReg);
225 assert(!isSubRegister(ScratchRSrcReg, FrameReg));
231 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
232 const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>();
233 if (Info->isEntryFunction()) {
234 const MachineFrameInfo &MFI = Fn.getFrameInfo();
235 return MFI.hasStackObjects() || MFI.hasCalls();
238 // May need scavenger for dealing with callee saved registers.
242 bool SIRegisterInfo::requiresFrameIndexScavenging(
243 const MachineFunction &MF) const {
244 const MachineFrameInfo &MFI = MF.getFrameInfo();
245 if (MFI.hasStackObjects())
248 // May need to deal with callee saved registers.
249 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
250 return !Info->isEntryFunction();
253 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
254 const MachineFunction &MF) const {
255 // m0 is needed for the scalar store offset. m0 is unallocatable, so we can't
256 // create a virtual register for it during frame index elimination, so the
257 // scavenger is directly needed.
258 return MF.getFrameInfo().hasStackObjects() &&
259 MF.getSubtarget<GCNSubtarget>().hasScalarStores() &&
260 MF.getInfo<SIMachineFunctionInfo>()->hasSpilledSGPRs();
263 bool SIRegisterInfo::requiresVirtualBaseRegisters(
264 const MachineFunction &) const {
265 // There are no special dedicated stack or frame pointers.
269 bool SIRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
270 // This helps catch bugs as verifier errors.
274 int64_t SIRegisterInfo::getMUBUFInstrOffset(const MachineInstr *MI) const {
275 assert(SIInstrInfo::isMUBUF(*MI));
277 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
278 AMDGPU::OpName::offset);
279 return MI->getOperand(OffIdx).getImm();
282 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
284 if (!SIInstrInfo::isMUBUF(*MI))
287 assert(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
288 AMDGPU::OpName::vaddr) &&
289 "Should never see frame index on non-address operand");
291 return getMUBUFInstrOffset(MI);
294 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
295 if (!MI->mayLoadOrStore())
298 int64_t FullOffset = Offset + getMUBUFInstrOffset(MI);
300 return !isUInt<12>(FullOffset);
303 void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
306 int64_t Offset) const {
307 MachineBasicBlock::iterator Ins = MBB->begin();
308 DebugLoc DL; // Defaults to "unknown"
310 if (Ins != MBB->end())
311 DL = Ins->getDebugLoc();
313 MachineFunction *MF = MBB->getParent();
314 const GCNSubtarget &Subtarget = MF->getSubtarget<GCNSubtarget>();
315 const SIInstrInfo *TII = Subtarget.getInstrInfo();
318 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), BaseReg)
319 .addFrameIndex(FrameIdx);
323 MachineRegisterInfo &MRI = MF->getRegInfo();
324 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
326 unsigned FIReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
328 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
330 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), FIReg)
331 .addFrameIndex(FrameIdx);
333 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
334 .addReg(OffsetReg, RegState::Kill)
338 void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
339 int64_t Offset) const {
341 MachineBasicBlock *MBB = MI.getParent();
342 MachineFunction *MF = MBB->getParent();
343 const GCNSubtarget &Subtarget = MF->getSubtarget<GCNSubtarget>();
344 const SIInstrInfo *TII = Subtarget.getInstrInfo();
347 // FIXME: Is it possible to be storing a frame index to itself?
349 for (const MachineOperand &MO: MI.operands()) {
352 llvm_unreachable("should not see multiple frame indices");
359 MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
360 assert(FIOp && FIOp->isFI() && "frame index must be address operand");
361 assert(TII->isMUBUF(MI));
362 assert(TII->getNamedOperand(MI, AMDGPU::OpName::soffset)->getReg() ==
363 MF->getInfo<SIMachineFunctionInfo>()->getFrameOffsetReg() &&
364 "should only be seeing frame offset relative FrameIndex");
367 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
368 int64_t NewOffset = OffsetOp->getImm() + Offset;
369 assert(isUInt<12>(NewOffset) && "offset should be legal");
371 FIOp->ChangeToRegister(BaseReg, false);
372 OffsetOp->setImm(NewOffset);
375 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
377 int64_t Offset) const {
378 if (!SIInstrInfo::isMUBUF(*MI))
381 int64_t NewOffset = Offset + getMUBUFInstrOffset(MI);
383 return isUInt<12>(NewOffset);
386 const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
387 const MachineFunction &MF, unsigned Kind) const {
388 // This is inaccurate. It depends on the instruction and address space. The
389 // only place where we should hit this is for dealing with frame indexes /
390 // private accesses, so this is correct in that case.
391 return &AMDGPU::VGPR_32RegClass;
394 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
397 case AMDGPU::SI_SPILL_S512_SAVE:
398 case AMDGPU::SI_SPILL_S512_RESTORE:
399 case AMDGPU::SI_SPILL_V512_SAVE:
400 case AMDGPU::SI_SPILL_V512_RESTORE:
402 case AMDGPU::SI_SPILL_S256_SAVE:
403 case AMDGPU::SI_SPILL_S256_RESTORE:
404 case AMDGPU::SI_SPILL_V256_SAVE:
405 case AMDGPU::SI_SPILL_V256_RESTORE:
407 case AMDGPU::SI_SPILL_S128_SAVE:
408 case AMDGPU::SI_SPILL_S128_RESTORE:
409 case AMDGPU::SI_SPILL_V128_SAVE:
410 case AMDGPU::SI_SPILL_V128_RESTORE:
412 case AMDGPU::SI_SPILL_V96_SAVE:
413 case AMDGPU::SI_SPILL_V96_RESTORE:
415 case AMDGPU::SI_SPILL_S64_SAVE:
416 case AMDGPU::SI_SPILL_S64_RESTORE:
417 case AMDGPU::SI_SPILL_V64_SAVE:
418 case AMDGPU::SI_SPILL_V64_RESTORE:
420 case AMDGPU::SI_SPILL_S32_SAVE:
421 case AMDGPU::SI_SPILL_S32_RESTORE:
422 case AMDGPU::SI_SPILL_V32_SAVE:
423 case AMDGPU::SI_SPILL_V32_RESTORE:
425 default: llvm_unreachable("Invalid spill opcode");
429 static int getOffsetMUBUFStore(unsigned Opc) {
431 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
432 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
433 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
434 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
435 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
436 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
437 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
438 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
439 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
440 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
441 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN:
442 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET;
443 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN:
444 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET;
450 static int getOffsetMUBUFLoad(unsigned Opc) {
452 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
453 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
454 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
455 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
456 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
457 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
458 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
459 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
460 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
461 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
462 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
463 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
464 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
465 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
466 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN:
467 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET;
468 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN:
469 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET;
470 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN:
471 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET;
472 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN:
473 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET;
474 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN:
475 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET;
476 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN:
477 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET;
483 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
484 // need to handle the case where an SGPR may need to be spilled while spilling.
485 static bool buildMUBUFOffsetLoadStore(const SIInstrInfo *TII,
486 MachineFrameInfo &MFI,
487 MachineBasicBlock::iterator MI,
490 MachineBasicBlock *MBB = MI->getParent();
491 const DebugLoc &DL = MI->getDebugLoc();
492 bool IsStore = MI->mayStore();
494 unsigned Opc = MI->getOpcode();
495 int LoadStoreOp = IsStore ?
496 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
497 if (LoadStoreOp == -1)
500 const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
501 MachineInstrBuilder NewMI =
502 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
504 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
505 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
512 const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
513 AMDGPU::OpName::vdata_in);
519 void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
520 unsigned LoadStoreOp,
524 unsigned ScratchRsrcReg,
525 unsigned ScratchOffsetReg,
527 MachineMemOperand *MMO,
528 RegScavenger *RS) const {
529 MachineBasicBlock *MBB = MI->getParent();
530 MachineFunction *MF = MI->getParent()->getParent();
531 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
532 const SIInstrInfo *TII = ST.getInstrInfo();
533 const MachineFrameInfo &MFI = MF->getFrameInfo();
535 const MCInstrDesc &Desc = TII->get(LoadStoreOp);
536 const DebugLoc &DL = MI->getDebugLoc();
537 bool IsStore = Desc.mayStore();
539 bool Scavenged = false;
540 unsigned SOffset = ScratchOffsetReg;
542 const unsigned EltSize = 4;
543 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
544 unsigned NumSubRegs = AMDGPU::getRegBitWidth(RC->getID()) / (EltSize * CHAR_BIT);
545 unsigned Size = NumSubRegs * EltSize;
546 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
547 int64_t ScratchOffsetRegDelta = 0;
549 unsigned Align = MFI.getObjectAlignment(Index);
550 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
552 assert((Offset % EltSize) == 0 && "unexpected VGPR spill offset");
554 if (!isUInt<12>(Offset + Size - EltSize)) {
555 SOffset = AMDGPU::NoRegister;
557 // We currently only support spilling VGPRs to EltSize boundaries, meaning
558 // we can simplify the adjustment of Offset here to just scale with
560 Offset *= ST.getWavefrontSize();
562 // We don't have access to the register scavenger if this function is called
563 // during PEI::scavengeFrameVirtualRegs().
565 SOffset = RS->FindUnusedReg(&AMDGPU::SGPR_32RegClass);
567 if (SOffset == AMDGPU::NoRegister) {
568 // There are no free SGPRs, and since we are in the process of spilling
569 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
570 // on SI/CI and on VI it is true until we implement spilling using scalar
571 // stores), we have no way to free up an SGPR. Our solution here is to
572 // add the offset directly to the ScratchOffset register, and then
573 // subtract the offset after the spill to return ScratchOffset to it's
575 SOffset = ScratchOffsetReg;
576 ScratchOffsetRegDelta = Offset;
581 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
582 .addReg(ScratchOffsetReg)
588 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += EltSize) {
589 unsigned SubReg = NumSubRegs == 1 ?
590 ValueReg : getSubReg(ValueReg, getSubRegFromChannel(i));
592 unsigned SOffsetRegState = 0;
593 unsigned SrcDstRegState = getDefRegState(!IsStore);
595 SOffsetRegState |= getKillRegState(Scavenged);
596 // The last implicit use carries the "Kill" flag.
597 SrcDstRegState |= getKillRegState(IsKill);
600 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(EltSize * i);
601 MachineMemOperand *NewMMO
602 = MF->getMachineMemOperand(PInfo, MMO->getFlags(),
603 EltSize, MinAlign(Align, EltSize * i));
605 auto MIB = BuildMI(*MBB, MI, DL, Desc)
606 .addReg(SubReg, getDefRegState(!IsStore) | getKillRegState(IsKill))
607 .addReg(ScratchRsrcReg)
608 .addReg(SOffset, SOffsetRegState)
613 .addMemOperand(NewMMO);
616 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
619 if (ScratchOffsetRegDelta != 0) {
620 // Subtract the offset we added to the ScratchOffset register.
621 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScratchOffsetReg)
622 .addReg(ScratchOffsetReg)
623 .addImm(ScratchOffsetRegDelta);
627 static std::pair<unsigned, unsigned> getSpillEltSize(unsigned SuperRegSize,
629 if (SuperRegSize % 16 == 0) {
630 return { 16, Store ? AMDGPU::S_BUFFER_STORE_DWORDX4_SGPR :
631 AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR };
634 if (SuperRegSize % 8 == 0) {
635 return { 8, Store ? AMDGPU::S_BUFFER_STORE_DWORDX2_SGPR :
636 AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR };
639 return { 4, Store ? AMDGPU::S_BUFFER_STORE_DWORD_SGPR :
640 AMDGPU::S_BUFFER_LOAD_DWORD_SGPR};
643 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
646 bool OnlyToVGPR) const {
647 MachineBasicBlock *MBB = MI->getParent();
648 MachineFunction *MF = MBB->getParent();
649 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
650 DenseSet<unsigned> SGPRSpillVGPRDefinedSet;
652 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
653 = MFI->getSGPRToVGPRSpills(Index);
654 bool SpillToVGPR = !VGPRSpills.empty();
655 if (OnlyToVGPR && !SpillToVGPR)
658 MachineRegisterInfo &MRI = MF->getRegInfo();
659 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
660 const SIInstrInfo *TII = ST.getInstrInfo();
662 unsigned SuperReg = MI->getOperand(0).getReg();
663 bool IsKill = MI->getOperand(0).isKill();
664 const DebugLoc &DL = MI->getDebugLoc();
666 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
668 bool SpillToSMEM = spillSGPRToSMEM();
669 if (SpillToSMEM && OnlyToVGPR)
672 assert(SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() &&
673 SuperReg != MFI->getFrameOffsetReg() &&
674 SuperReg != MFI->getScratchWaveOffsetReg()));
676 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
678 unsigned OffsetReg = AMDGPU::M0;
679 unsigned M0CopyReg = AMDGPU::NoRegister;
682 if (RS->isRegUsed(AMDGPU::M0)) {
683 M0CopyReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
684 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), M0CopyReg)
689 unsigned ScalarStoreOp;
690 unsigned EltSize = 4;
691 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
692 if (SpillToSMEM && isSGPRClass(RC)) {
693 // XXX - if private_element_size is larger than 4 it might be useful to be
694 // able to spill wider vmem spills.
695 std::tie(EltSize, ScalarStoreOp) =
696 getSpillEltSize(getRegSizeInBits(*RC) / 8, true);
699 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
700 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
702 // SubReg carries the "Kill" flag when SubReg == SuperReg.
703 unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill);
704 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
705 unsigned SubReg = NumSubRegs == 1 ?
706 SuperReg : getSubReg(SuperReg, SplitParts[i]);
709 int64_t FrOffset = FrameInfo.getObjectOffset(Index);
711 // The allocated memory size is really the wavefront size * the frame
712 // index size. The widest register class is 64 bytes, so a 4-byte scratch
713 // allocation is enough to spill this in a single stack object.
715 // FIXME: Frame size/offsets are computed earlier than this, so the extra
716 // space is still unnecessarily allocated.
718 unsigned Align = FrameInfo.getObjectAlignment(Index);
719 MachinePointerInfo PtrInfo
720 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
721 MachineMemOperand *MMO
722 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
723 EltSize, MinAlign(Align, EltSize * i));
725 // SMEM instructions only support a single offset, so increment the wave
728 int64_t Offset = (ST.getWavefrontSize() * FrOffset) + (EltSize * i);
730 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
731 .addReg(MFI->getFrameOffsetReg())
734 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
735 .addReg(MFI->getFrameOffsetReg());
738 BuildMI(*MBB, MI, DL, TII->get(ScalarStoreOp))
739 .addReg(SubReg, getKillRegState(IsKill)) // sdata
740 .addReg(MFI->getScratchRSrcReg()) // sbase
741 .addReg(OffsetReg, RegState::Kill) // soff
749 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
751 // During SGPR spilling to VGPR, determine if the VGPR is defined. The
752 // only circumstance in which we say it is undefined is when it is the
753 // first spill to this VGPR in the first basic block.
754 bool VGPRDefined = true;
755 if (MBB == &MF->front())
756 VGPRDefined = !SGPRSpillVGPRDefinedSet.insert(Spill.VGPR).second;
758 // Mark the "old value of vgpr" input undef only if this is the first sgpr
759 // spill to this specific vgpr in the first basic block.
760 BuildMI(*MBB, MI, DL,
761 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
763 .addReg(SubReg, getKillRegState(IsKill))
765 .addReg(Spill.VGPR, VGPRDefined ? 0 : RegState::Undef);
767 // FIXME: Since this spills to another register instead of an actual
768 // frame index, we should delete the frame index when all references to
771 // XXX - Can to VGPR spill fail for some subregisters but not others?
775 // Spill SGPR to a frame index.
776 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
777 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
778 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
780 MachineInstrBuilder Mov
781 = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
782 .addReg(SubReg, SubKillState);
785 // There could be undef components of a spilled super register.
786 // TODO: Can we detect this and skip the spill?
787 if (NumSubRegs > 1) {
788 // The last implicit use of the SuperReg carries the "Kill" flag.
789 unsigned SuperKillState = 0;
791 SuperKillState |= getKillRegState(IsKill);
792 Mov.addReg(SuperReg, RegState::Implicit | SuperKillState);
795 unsigned Align = FrameInfo.getObjectAlignment(Index);
796 MachinePointerInfo PtrInfo
797 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
798 MachineMemOperand *MMO
799 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
800 EltSize, MinAlign(Align, EltSize * i));
801 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_SAVE))
802 .addReg(TmpReg, RegState::Kill) // src
803 .addFrameIndex(Index) // vaddr
804 .addReg(MFI->getScratchRSrcReg()) // srrsrc
805 .addReg(MFI->getFrameOffsetReg()) // soffset
806 .addImm(i * 4) // offset
811 if (M0CopyReg != AMDGPU::NoRegister) {
812 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), AMDGPU::M0)
813 .addReg(M0CopyReg, RegState::Kill);
816 MI->eraseFromParent();
817 MFI->addToSpilledSGPRs(NumSubRegs);
821 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
824 bool OnlyToVGPR) const {
825 MachineFunction *MF = MI->getParent()->getParent();
826 MachineRegisterInfo &MRI = MF->getRegInfo();
827 MachineBasicBlock *MBB = MI->getParent();
828 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
830 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
831 = MFI->getSGPRToVGPRSpills(Index);
832 bool SpillToVGPR = !VGPRSpills.empty();
833 if (OnlyToVGPR && !SpillToVGPR)
836 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
837 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
838 const SIInstrInfo *TII = ST.getInstrInfo();
839 const DebugLoc &DL = MI->getDebugLoc();
841 unsigned SuperReg = MI->getOperand(0).getReg();
842 bool SpillToSMEM = spillSGPRToSMEM();
843 if (SpillToSMEM && OnlyToVGPR)
846 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
848 unsigned OffsetReg = AMDGPU::M0;
849 unsigned M0CopyReg = AMDGPU::NoRegister;
852 if (RS->isRegUsed(AMDGPU::M0)) {
853 M0CopyReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
854 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), M0CopyReg)
859 unsigned EltSize = 4;
860 unsigned ScalarLoadOp;
862 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
863 if (SpillToSMEM && isSGPRClass(RC)) {
864 // XXX - if private_element_size is larger than 4 it might be useful to be
865 // able to spill wider vmem spills.
866 std::tie(EltSize, ScalarLoadOp) =
867 getSpillEltSize(getRegSizeInBits(*RC) / 8, false);
870 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
871 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
873 // SubReg carries the "Kill" flag when SubReg == SuperReg.
874 int64_t FrOffset = FrameInfo.getObjectOffset(Index);
876 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
877 unsigned SubReg = NumSubRegs == 1 ?
878 SuperReg : getSubReg(SuperReg, SplitParts[i]);
881 // FIXME: Size may be > 4 but extra bytes wasted.
882 unsigned Align = FrameInfo.getObjectAlignment(Index);
883 MachinePointerInfo PtrInfo
884 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
885 MachineMemOperand *MMO
886 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
887 EltSize, MinAlign(Align, EltSize * i));
890 int64_t Offset = (ST.getWavefrontSize() * FrOffset) + (EltSize * i);
892 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg)
893 .addReg(MFI->getFrameOffsetReg())
896 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
897 .addReg(MFI->getFrameOffsetReg());
901 BuildMI(*MBB, MI, DL, TII->get(ScalarLoadOp), SubReg)
902 .addReg(MFI->getScratchRSrcReg()) // sbase
903 .addReg(OffsetReg, RegState::Kill) // soff
907 if (NumSubRegs > 1 && i == 0)
908 MIB.addReg(SuperReg, RegState::ImplicitDefine);
914 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
916 BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
921 if (NumSubRegs > 1 && i == 0)
922 MIB.addReg(SuperReg, RegState::ImplicitDefine);
927 // Restore SGPR from a stack slot.
928 // FIXME: We should use S_LOAD_DWORD here for VI.
929 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
930 unsigned Align = FrameInfo.getObjectAlignment(Index);
932 MachinePointerInfo PtrInfo
933 = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i);
935 MachineMemOperand *MMO = MF->getMachineMemOperand(PtrInfo,
936 MachineMemOperand::MOLoad, EltSize,
937 MinAlign(Align, EltSize * i));
939 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_RESTORE), TmpReg)
940 .addFrameIndex(Index) // vaddr
941 .addReg(MFI->getScratchRSrcReg()) // srsrc
942 .addReg(MFI->getFrameOffsetReg()) // soffset
943 .addImm(i * 4) // offset
947 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg)
948 .addReg(TmpReg, RegState::Kill);
951 MIB.addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
955 if (M0CopyReg != AMDGPU::NoRegister) {
956 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), AMDGPU::M0)
957 .addReg(M0CopyReg, RegState::Kill);
960 MI->eraseFromParent();
964 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
965 /// a VGPR and the stack slot can be safely eliminated when all other users are
967 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
968 MachineBasicBlock::iterator MI,
970 RegScavenger *RS) const {
971 switch (MI->getOpcode()) {
972 case AMDGPU::SI_SPILL_S512_SAVE:
973 case AMDGPU::SI_SPILL_S256_SAVE:
974 case AMDGPU::SI_SPILL_S128_SAVE:
975 case AMDGPU::SI_SPILL_S64_SAVE:
976 case AMDGPU::SI_SPILL_S32_SAVE:
977 return spillSGPR(MI, FI, RS, true);
978 case AMDGPU::SI_SPILL_S512_RESTORE:
979 case AMDGPU::SI_SPILL_S256_RESTORE:
980 case AMDGPU::SI_SPILL_S128_RESTORE:
981 case AMDGPU::SI_SPILL_S64_RESTORE:
982 case AMDGPU::SI_SPILL_S32_RESTORE:
983 return restoreSGPR(MI, FI, RS, true);
985 llvm_unreachable("not an SGPR spill instruction");
989 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
990 int SPAdj, unsigned FIOperandNum,
991 RegScavenger *RS) const {
992 MachineFunction *MF = MI->getParent()->getParent();
993 MachineRegisterInfo &MRI = MF->getRegInfo();
994 MachineBasicBlock *MBB = MI->getParent();
995 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
996 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
997 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
998 const SIInstrInfo *TII = ST.getInstrInfo();
999 DebugLoc DL = MI->getDebugLoc();
1001 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
1002 int Index = MI->getOperand(FIOperandNum).getIndex();
1004 switch (MI->getOpcode()) {
1005 // SGPR register spill
1006 case AMDGPU::SI_SPILL_S512_SAVE:
1007 case AMDGPU::SI_SPILL_S256_SAVE:
1008 case AMDGPU::SI_SPILL_S128_SAVE:
1009 case AMDGPU::SI_SPILL_S64_SAVE:
1010 case AMDGPU::SI_SPILL_S32_SAVE: {
1011 spillSGPR(MI, Index, RS);
1015 // SGPR register restore
1016 case AMDGPU::SI_SPILL_S512_RESTORE:
1017 case AMDGPU::SI_SPILL_S256_RESTORE:
1018 case AMDGPU::SI_SPILL_S128_RESTORE:
1019 case AMDGPU::SI_SPILL_S64_RESTORE:
1020 case AMDGPU::SI_SPILL_S32_RESTORE: {
1021 restoreSGPR(MI, Index, RS);
1025 // VGPR register spill
1026 case AMDGPU::SI_SPILL_V512_SAVE:
1027 case AMDGPU::SI_SPILL_V256_SAVE:
1028 case AMDGPU::SI_SPILL_V128_SAVE:
1029 case AMDGPU::SI_SPILL_V96_SAVE:
1030 case AMDGPU::SI_SPILL_V64_SAVE:
1031 case AMDGPU::SI_SPILL_V32_SAVE: {
1032 const MachineOperand *VData = TII->getNamedOperand(*MI,
1033 AMDGPU::OpName::vdata);
1034 buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
1036 VData->getReg(), VData->isKill(),
1037 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
1038 TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
1039 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1040 *MI->memoperands_begin(),
1042 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
1043 MI->eraseFromParent();
1046 case AMDGPU::SI_SPILL_V32_RESTORE:
1047 case AMDGPU::SI_SPILL_V64_RESTORE:
1048 case AMDGPU::SI_SPILL_V96_RESTORE:
1049 case AMDGPU::SI_SPILL_V128_RESTORE:
1050 case AMDGPU::SI_SPILL_V256_RESTORE:
1051 case AMDGPU::SI_SPILL_V512_RESTORE: {
1052 const MachineOperand *VData = TII->getNamedOperand(*MI,
1053 AMDGPU::OpName::vdata);
1055 buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
1057 VData->getReg(), VData->isKill(),
1058 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
1059 TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg(),
1060 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1061 *MI->memoperands_begin(),
1063 MI->eraseFromParent();
1068 const DebugLoc &DL = MI->getDebugLoc();
1069 bool IsMUBUF = TII->isMUBUF(*MI);
1072 MFI->getFrameOffsetReg() != MFI->getScratchWaveOffsetReg()) {
1073 // Convert to an absolute stack address by finding the offset from the
1074 // scratch wave base and scaling by the wave size.
1076 // In an entry function/kernel the stack address is already the
1077 // absolute address relative to the scratch wave offset.
1080 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1082 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
1083 unsigned ResultReg = IsCopy ?
1084 MI->getOperand(0).getReg() :
1085 MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1087 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), DiffReg)
1088 .addReg(MFI->getFrameOffsetReg())
1089 .addReg(MFI->getScratchWaveOffsetReg());
1091 int64_t Offset = FrameInfo.getObjectOffset(Index);
1093 // XXX - This never happens because of emergency scavenging slot at 0?
1094 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg)
1095 .addImm(Log2_32(ST.getWavefrontSize()))
1099 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1101 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ScaledReg)
1102 .addImm(Log2_32(ST.getWavefrontSize()))
1103 .addReg(DiffReg, RegState::Kill);
1105 // TODO: Fold if use instruction is another add of a constant.
1106 if (AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
1107 TII->getAddNoCarry(*MBB, MI, DL, ResultReg)
1109 .addReg(ScaledReg, RegState::Kill);
1111 unsigned ConstOffsetReg
1112 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1114 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
1116 TII->getAddNoCarry(*MBB, MI, DL, ResultReg)
1117 .addReg(ConstOffsetReg, RegState::Kill)
1118 .addReg(ScaledReg, RegState::Kill);
1122 // Don't introduce an extra copy if we're just materializing in a mov.
1124 MI->eraseFromParent();
1126 FIOp.ChangeToRegister(ResultReg, false, false, true);
1131 // Disable offen so we don't need a 0 vgpr base.
1132 assert(static_cast<int>(FIOperandNum) ==
1133 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
1134 AMDGPU::OpName::vaddr));
1136 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg()
1137 == MFI->getFrameOffsetReg());
1139 int64_t Offset = FrameInfo.getObjectOffset(Index);
1141 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
1142 int64_t NewOffset = OldImm + Offset;
1144 if (isUInt<12>(NewOffset) &&
1145 buildMUBUFOffsetLoadStore(TII, FrameInfo, MI, Index, NewOffset)) {
1146 MI->eraseFromParent();
1151 // If the offset is simply too big, don't convert to a scratch wave offset
1154 int64_t Offset = FrameInfo.getObjectOffset(Index);
1155 FIOp.ChangeToImmediate(Offset);
1156 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
1157 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1158 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1160 FIOp.ChangeToRegister(TmpReg, false, false, true);
1166 StringRef SIRegisterInfo::getRegAsmName(unsigned Reg) const {
1167 #define AMDGPU_REG_ASM_NAMES
1168 #include "AMDGPURegAsmNames.inc.cpp"
1170 #define REG_RANGE(BeginReg, EndReg, RegTable) \
1171 if (Reg >= BeginReg && Reg <= EndReg) { \
1172 unsigned Index = Reg - BeginReg; \
1173 assert(Index < array_lengthof(RegTable)); \
1174 return RegTable[Index]; \
1177 REG_RANGE(AMDGPU::VGPR0, AMDGPU::VGPR255, VGPR32RegNames);
1178 REG_RANGE(AMDGPU::SGPR0, AMDGPU::SGPR103, SGPR32RegNames);
1179 REG_RANGE(AMDGPU::VGPR0_VGPR1, AMDGPU::VGPR254_VGPR255, VGPR64RegNames);
1180 REG_RANGE(AMDGPU::SGPR0_SGPR1, AMDGPU::SGPR102_SGPR103, SGPR64RegNames);
1181 REG_RANGE(AMDGPU::VGPR0_VGPR1_VGPR2, AMDGPU::VGPR253_VGPR254_VGPR255,
1184 REG_RANGE(AMDGPU::VGPR0_VGPR1_VGPR2_VGPR3,
1185 AMDGPU::VGPR252_VGPR253_VGPR254_VGPR255,
1187 REG_RANGE(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3,
1188 AMDGPU::SGPR100_SGPR101_SGPR102_SGPR103,
1191 REG_RANGE(AMDGPU::VGPR0_VGPR1_VGPR2_VGPR3_VGPR4_VGPR5_VGPR6_VGPR7,
1192 AMDGPU::VGPR248_VGPR249_VGPR250_VGPR251_VGPR252_VGPR253_VGPR254_VGPR255,
1196 AMDGPU::VGPR0_VGPR1_VGPR2_VGPR3_VGPR4_VGPR5_VGPR6_VGPR7_VGPR8_VGPR9_VGPR10_VGPR11_VGPR12_VGPR13_VGPR14_VGPR15,
1197 AMDGPU::VGPR240_VGPR241_VGPR242_VGPR243_VGPR244_VGPR245_VGPR246_VGPR247_VGPR248_VGPR249_VGPR250_VGPR251_VGPR252_VGPR253_VGPR254_VGPR255,
1200 REG_RANGE(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3_SGPR4_SGPR5_SGPR6_SGPR7,
1201 AMDGPU::SGPR96_SGPR97_SGPR98_SGPR99_SGPR100_SGPR101_SGPR102_SGPR103,
1205 AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3_SGPR4_SGPR5_SGPR6_SGPR7_SGPR8_SGPR9_SGPR10_SGPR11_SGPR12_SGPR13_SGPR14_SGPR15,
1206 AMDGPU::SGPR88_SGPR89_SGPR90_SGPR91_SGPR92_SGPR93_SGPR94_SGPR95_SGPR96_SGPR97_SGPR98_SGPR99_SGPR100_SGPR101_SGPR102_SGPR103,
1212 // FIXME: Rename flat_scr so we don't need to special case this.
1214 case AMDGPU::FLAT_SCR:
1215 return "flat_scratch";
1216 case AMDGPU::FLAT_SCR_LO:
1217 return "flat_scratch_lo";
1218 case AMDGPU::FLAT_SCR_HI:
1219 return "flat_scratch_hi";
1221 // For the special named registers the default is fine.
1222 return TargetRegisterInfo::getRegAsmName(Reg);
1226 // FIXME: This is very slow. It might be worth creating a map from physreg to
1228 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
1229 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
1231 static const TargetRegisterClass *const BaseClasses[] = {
1232 &AMDGPU::VGPR_32RegClass,
1233 &AMDGPU::SReg_32RegClass,
1234 &AMDGPU::VReg_64RegClass,
1235 &AMDGPU::SReg_64RegClass,
1236 &AMDGPU::VReg_96RegClass,
1237 &AMDGPU::VReg_128RegClass,
1238 &AMDGPU::SReg_128RegClass,
1239 &AMDGPU::VReg_256RegClass,
1240 &AMDGPU::SReg_256RegClass,
1241 &AMDGPU::VReg_512RegClass,
1242 &AMDGPU::SReg_512RegClass,
1243 &AMDGPU::SCC_CLASSRegClass,
1244 &AMDGPU::Pseudo_SReg_32RegClass,
1245 &AMDGPU::Pseudo_SReg_128RegClass,
1248 for (const TargetRegisterClass *BaseClass : BaseClasses) {
1249 if (BaseClass->contains(Reg)) {
1256 // TODO: It might be helpful to have some target specific flags in
1257 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
1258 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
1259 unsigned Size = getRegSizeInBits(*RC);
1264 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
1266 return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
1268 return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
1270 return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
1272 return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
1274 return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
1276 llvm_unreachable("Invalid register class size");
1280 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
1281 const TargetRegisterClass *SRC) const {
1282 switch (getRegSizeInBits(*SRC)) {
1284 return &AMDGPU::VGPR_32RegClass;
1286 return &AMDGPU::VReg_64RegClass;
1288 return &AMDGPU::VReg_96RegClass;
1290 return &AMDGPU::VReg_128RegClass;
1292 return &AMDGPU::VReg_256RegClass;
1294 return &AMDGPU::VReg_512RegClass;
1296 llvm_unreachable("Invalid register class size");
1300 const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass(
1301 const TargetRegisterClass *VRC) const {
1302 switch (getRegSizeInBits(*VRC)) {
1304 return &AMDGPU::SGPR_32RegClass;
1306 return &AMDGPU::SReg_64RegClass;
1308 return &AMDGPU::SReg_128RegClass;
1310 return &AMDGPU::SReg_256RegClass;
1312 return &AMDGPU::SReg_512RegClass;
1314 llvm_unreachable("Invalid register class size");
1318 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
1319 const TargetRegisterClass *RC, unsigned SubIdx) const {
1320 if (SubIdx == AMDGPU::NoSubRegister)
1323 // We can assume that each lane corresponds to one 32-bit register.
1324 unsigned Count = getSubRegIndexLaneMask(SubIdx).getNumLanes();
1325 if (isSGPRClass(RC)) {
1328 return &AMDGPU::SGPR_32RegClass;
1330 return &AMDGPU::SReg_64RegClass;
1332 return &AMDGPU::SReg_128RegClass;
1334 return &AMDGPU::SReg_256RegClass;
1335 case 16: /* fall-through */
1337 llvm_unreachable("Invalid sub-register class size");
1342 return &AMDGPU::VGPR_32RegClass;
1344 return &AMDGPU::VReg_64RegClass;
1346 return &AMDGPU::VReg_96RegClass;
1348 return &AMDGPU::VReg_128RegClass;
1350 return &AMDGPU::VReg_256RegClass;
1351 case 16: /* fall-through */
1353 llvm_unreachable("Invalid sub-register class size");
1358 bool SIRegisterInfo::shouldRewriteCopySrc(
1359 const TargetRegisterClass *DefRC,
1361 const TargetRegisterClass *SrcRC,
1362 unsigned SrcSubReg) const {
1363 // We want to prefer the smallest register class possible, so we don't want to
1364 // stop and rewrite on anything that looks like a subregister
1365 // extract. Operations mostly don't care about the super register class, so we
1366 // only want to stop on the most basic of copies between the same register
1369 // e.g. if we have something like
1372 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
1373 // %3 = COPY %2, sub0
1375 // We want to look through the COPY to find:
1379 return getCommonSubClass(DefRC, SrcRC) != nullptr;
1382 /// Returns a register that is not used at any point in the function.
1383 /// If all registers are used, then this function will return
1384 // AMDGPU::NoRegister.
1386 SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
1387 const TargetRegisterClass *RC,
1388 const MachineFunction &MF) const {
1390 for (unsigned Reg : *RC)
1391 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
1393 return AMDGPU::NoRegister;
1396 ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
1397 unsigned EltSize) const {
1399 static const int16_t Sub0_15[] = {
1400 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1401 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1402 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
1403 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
1406 static const int16_t Sub0_7[] = {
1407 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1408 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
1411 static const int16_t Sub0_3[] = {
1412 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
1415 static const int16_t Sub0_2[] = {
1416 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2,
1419 static const int16_t Sub0_1[] = {
1420 AMDGPU::sub0, AMDGPU::sub1,
1423 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1427 return makeArrayRef(Sub0_1);
1429 return makeArrayRef(Sub0_2);
1431 return makeArrayRef(Sub0_3);
1433 return makeArrayRef(Sub0_7);
1435 return makeArrayRef(Sub0_15);
1437 llvm_unreachable("unhandled register size");
1442 static const int16_t Sub0_15_64[] = {
1443 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1444 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
1445 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
1446 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15
1449 static const int16_t Sub0_7_64[] = {
1450 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
1451 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7
1455 static const int16_t Sub0_3_64[] = {
1456 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3
1459 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1463 return makeArrayRef(Sub0_3_64);
1465 return makeArrayRef(Sub0_7_64);
1467 return makeArrayRef(Sub0_15_64);
1469 llvm_unreachable("unhandled register size");
1473 assert(EltSize == 16 && "unhandled register spill split size");
1475 static const int16_t Sub0_15_128[] = {
1476 AMDGPU::sub0_sub1_sub2_sub3,
1477 AMDGPU::sub4_sub5_sub6_sub7,
1478 AMDGPU::sub8_sub9_sub10_sub11,
1479 AMDGPU::sub12_sub13_sub14_sub15
1482 static const int16_t Sub0_7_128[] = {
1483 AMDGPU::sub0_sub1_sub2_sub3,
1484 AMDGPU::sub4_sub5_sub6_sub7
1487 switch (AMDGPU::getRegBitWidth(*RC->MC)) {
1491 return makeArrayRef(Sub0_7_128);
1493 return makeArrayRef(Sub0_15_128);
1495 llvm_unreachable("unhandled register size");
1499 const TargetRegisterClass*
1500 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
1501 unsigned Reg) const {
1502 if (TargetRegisterInfo::isVirtualRegister(Reg))
1503 return MRI.getRegClass(Reg);
1505 return getPhysRegClass(Reg);
1508 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
1509 unsigned Reg) const {
1510 const TargetRegisterClass * RC = getRegClassForReg(MRI, Reg);
1511 assert(RC && "Register class for the reg not found");
1512 return hasVGPRs(RC);
1515 bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
1516 const TargetRegisterClass *SrcRC,
1518 const TargetRegisterClass *DstRC,
1520 const TargetRegisterClass *NewRC,
1521 LiveIntervals &LIS) const {
1522 unsigned SrcSize = getRegSizeInBits(*SrcRC);
1523 unsigned DstSize = getRegSizeInBits(*DstRC);
1524 unsigned NewSize = getRegSizeInBits(*NewRC);
1526 // Do not increase size of registers beyond dword, we would need to allocate
1527 // adjacent registers and constraint regalloc more than needed.
1529 // Always allow dword coalescing.
1530 if (SrcSize <= 32 || DstSize <= 32)
1533 return NewSize <= DstSize || NewSize <= SrcSize;
1536 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
1537 MachineFunction &MF) const {
1539 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1540 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1542 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
1544 switch (RC->getID()) {
1546 return AMDGPURegisterInfo::getRegPressureLimit(RC, MF);
1547 case AMDGPU::VGPR_32RegClassID:
1548 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
1549 case AMDGPU::SGPR_32RegClassID:
1550 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
1554 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
1555 unsigned Idx) const {
1556 if (Idx == getVGPRPressureSet())
1557 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
1558 const_cast<MachineFunction &>(MF));
1560 if (Idx == getSGPRPressureSet())
1561 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
1562 const_cast<MachineFunction &>(MF));
1564 return AMDGPURegisterInfo::getRegPressureSetLimit(MF, Idx);
1567 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
1568 static const int Empty[] = { -1 };
1570 if (hasRegUnit(AMDGPU::M0, RegUnit))
1572 return AMDGPURegisterInfo::getRegUnitPressureSets(RegUnit);
1575 unsigned SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
1576 // Not a callee saved register.
1577 return AMDGPU::SGPR30_SGPR31;
1580 const TargetRegisterClass *
1581 SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
1582 const MachineRegisterInfo &MRI) const {
1583 unsigned Size = getRegSizeInBits(MO.getReg(), MRI);
1584 const RegisterBank *RB = MRI.getRegBankOrNull(MO.getReg());
1590 return RB->getID() == AMDGPU::VGPRRegBankID ? &AMDGPU::VGPR_32RegClass :
1591 &AMDGPU::SReg_32_XM0RegClass;
1593 return RB->getID() == AMDGPU::VGPRRegBankID ? &AMDGPU::VReg_64RegClass :
1594 &AMDGPU::SReg_64_XEXECRegClass;
1596 return RB->getID() == AMDGPU::VGPRRegBankID ? &AMDGPU::VReg_96RegClass :
1599 return RB->getID() == AMDGPU::VGPRRegBankID ? &AMDGPU::VReg_128RegClass :
1600 &AMDGPU::SReg_128RegClass;
1602 llvm_unreachable("not implemented");
1606 // Find reaching register definition
1607 MachineInstr *SIRegisterInfo::findReachingDef(unsigned Reg, unsigned SubReg,
1609 MachineRegisterInfo &MRI,
1610 LiveIntervals *LIS) const {
1611 auto &MDT = LIS->getAnalysis<MachineDominatorTree>();
1612 SlotIndex UseIdx = LIS->getInstructionIndex(Use);
1615 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1616 if (!LIS->hasInterval(Reg))
1618 LiveInterval &LI = LIS->getInterval(Reg);
1619 LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg)
1620 : MRI.getMaxLaneMaskForVReg(Reg);
1621 VNInfo *V = nullptr;
1622 if (LI.hasSubRanges()) {
1623 for (auto &S : LI.subranges()) {
1624 if ((S.LaneMask & SubLanes) == SubLanes) {
1625 V = S.getVNInfoAt(UseIdx);
1630 V = LI.getVNInfoAt(UseIdx);
1637 for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units) {
1638 LiveRange &LR = LIS->getRegUnit(*Units);
1639 if (VNInfo *V = LR.getVNInfoAt(UseIdx)) {
1640 if (!DefIdx.isValid() ||
1641 MDT.dominates(LIS->getInstructionFromIndex(DefIdx),
1642 LIS->getInstructionFromIndex(V->def)))
1650 MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx);
1652 if (!Def || !MDT.dominates(Def, &Use))
1655 assert(Def->modifiesRegister(Reg, this));