1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// SI implementation of the TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SIRegisterInfo.h"
16 #include "AMDGPURegisterBankInfo.h"
17 #include "GCNSubtarget.h"
18 #include "MCTargetDesc/AMDGPUInstPrinter.h"
19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "llvm/CodeGen/LiveIntervals.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/RegisterScavenging.h"
27 #define GET_REGINFO_TARGET_DESC
28 #include "AMDGPUGenRegisterInfo.inc"
30 static cl::opt<bool> EnableSpillSGPRToVGPR(
31 "amdgpu-spill-sgpr-to-vgpr",
32 cl::desc("Enable spilling VGPRs to SGPRs"),
36 std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts;
37 std::array<std::array<uint16_t, 32>, 9> SIRegisterInfo::SubRegFromChannelTable;
39 // Map numbers of DWORDs to indexes in SubRegFromChannelTable.
40 // Valid indexes are shifted 1, such that a 0 mapping means unsupported.
41 // e.g. for 8 DWORDs (256-bit), SubRegFromChannelTableWidthMap[8] = 8,
42 // meaning index 7 in SubRegFromChannelTable.
43 static const std::array<unsigned, 17> SubRegFromChannelTableWidthMap = {
44 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 9};
46 SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
47 : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST),
48 SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) {
50 assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
51 getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&
52 (getSubRegIndexLaneMask(AMDGPU::lo16) |
53 getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==
54 getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
55 "getNumCoveredRegs() will not work with generated subreg masks!");
57 RegPressureIgnoredUnits.resize(getNumRegUnits());
58 RegPressureIgnoredUnits.set(
59 *MCRegUnitIterator(MCRegister::from(AMDGPU::M0), this));
60 for (auto Reg : AMDGPU::VGPR_HI16RegClass)
61 RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this));
63 // HACK: Until this is fully tablegen'd.
64 static llvm::once_flag InitializeRegSplitPartsFlag;
66 static auto InitializeRegSplitPartsOnce = [this]() {
67 for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) {
68 unsigned Size = getSubRegIdxSize(Idx);
71 std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1];
72 unsigned Pos = getSubRegIdxOffset(Idx);
77 unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits.
78 Vec.resize(MaxNumParts);
84 static llvm::once_flag InitializeSubRegFromChannelTableFlag;
86 static auto InitializeSubRegFromChannelTableOnce = [this]() {
87 for (auto &Row : SubRegFromChannelTable)
88 Row.fill(AMDGPU::NoSubRegister);
89 for (uint16_t Idx = 1; Idx < getNumSubRegIndices(); ++Idx) {
90 unsigned Width = AMDGPUSubRegIdxRanges[Idx].Size / 32;
91 unsigned Offset = AMDGPUSubRegIdxRanges[Idx].Offset / 32;
92 assert(Width < SubRegFromChannelTableWidthMap.size());
93 Width = SubRegFromChannelTableWidthMap[Width];
96 unsigned TableIdx = Width - 1;
97 assert(TableIdx < SubRegFromChannelTable.size());
98 assert(Offset < SubRegFromChannelTable[TableIdx].size());
99 SubRegFromChannelTable[TableIdx][Offset] = Idx;
103 llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce);
104 llvm::call_once(InitializeSubRegFromChannelTableFlag,
105 InitializeSubRegFromChannelTableOnce);
108 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved,
109 MCRegister Reg) const {
110 MCRegAliasIterator R(Reg, this, true);
112 for (; R.isValid(); ++R)
116 // Forced to be here by one .inc
117 const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
118 const MachineFunction *MF) const {
119 CallingConv::ID CC = MF->getFunction().getCallingConv();
122 case CallingConv::Fast:
123 case CallingConv::Cold:
124 case CallingConv::AMDGPU_Gfx:
125 return CSR_AMDGPU_HighRegs_SaveList;
127 // Dummy to not crash RegisterClassInfo.
128 static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister;
129 return &NoCalleeSavedReg;
135 SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
139 const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
140 CallingConv::ID CC) const {
143 case CallingConv::Fast:
144 case CallingConv::Cold:
145 case CallingConv::AMDGPU_Gfx:
146 return CSR_AMDGPU_HighRegs_RegMask;
152 const uint32_t *SIRegisterInfo::getNoPreservedMask() const {
153 return CSR_AMDGPU_NoRegs_RegMask;
156 Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
157 const SIFrameLowering *TFI =
158 MF.getSubtarget<GCNSubtarget>().getFrameLowering();
159 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
160 // During ISel lowering we always reserve the stack pointer in entry
161 // functions, but never actually want to reference it when accessing our own
162 // frame. If we need a frame pointer we use it, but otherwise we can just use
163 // an immediate "0" which we represent by returning NoRegister.
164 if (FuncInfo->isEntryFunction()) {
165 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register();
167 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg()
168 : FuncInfo->getStackPtrOffsetReg();
171 bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
172 // When we need stack realignment, we can't reference off of the
173 // stack pointer, so we reserve a base pointer.
174 const MachineFrameInfo &MFI = MF.getFrameInfo();
175 return MFI.getNumFixedObjects() && needsStackRealignment(MF);
178 Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; }
180 const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const {
181 return CSR_AMDGPU_AllVGPRs_RegMask;
184 const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const {
185 return CSR_AMDGPU_AllAllocatableSRegs_RegMask;
188 unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel,
190 assert(NumRegs < SubRegFromChannelTableWidthMap.size());
191 unsigned NumRegIndex = SubRegFromChannelTableWidthMap[NumRegs];
192 assert(NumRegIndex && "Not implemented");
193 assert(Channel < SubRegFromChannelTable[NumRegIndex - 1].size());
194 return SubRegFromChannelTable[NumRegIndex - 1][Channel];
197 MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg(
198 const MachineFunction &MF) const {
199 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
200 MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
201 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass);
204 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
205 BitVector Reserved(getNumRegs());
206 Reserved.set(AMDGPU::MODE);
208 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
209 // this seems likely to result in bugs, so I'm marking them as reserved.
210 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
211 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
213 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
214 reserveRegisterTuples(Reserved, AMDGPU::M0);
216 // Reserve src_vccz, src_execz, src_scc.
217 reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ);
218 reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ);
219 reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC);
221 // Reserve the memory aperture registers.
222 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
223 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
224 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
225 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
227 // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
228 reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID);
230 // Reserve xnack_mask registers - support is not implemented in Codegen.
231 reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK);
233 // Reserve lds_direct register - support is not implemented in Codegen.
234 reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT);
236 // Reserve Trap Handler registers - support is not implemented in Codegen.
237 reserveRegisterTuples(Reserved, AMDGPU::TBA);
238 reserveRegisterTuples(Reserved, AMDGPU::TMA);
239 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
240 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
241 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
242 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
243 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
244 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
245 reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13);
246 reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15);
248 // Reserve null register - it shall never be allocated
249 reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL);
251 // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
252 // will result in bugs.
254 Reserved.set(AMDGPU::VCC);
255 Reserved.set(AMDGPU::VCC_HI);
258 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
259 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
260 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
261 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
262 reserveRegisterTuples(Reserved, Reg);
265 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
266 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
267 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
268 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
269 reserveRegisterTuples(Reserved, Reg);
270 Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
271 reserveRegisterTuples(Reserved, Reg);
274 for (auto Reg : AMDGPU::SReg_32RegClass) {
275 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
276 Register Low = getSubReg(Reg, AMDGPU::lo16);
277 // This is to prevent BB vcc liveness errors.
278 if (!AMDGPU::SGPR_LO16RegClass.contains(Low))
282 for (auto Reg : AMDGPU::AGPR_32RegClass) {
283 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
286 // Reserve all the rest AGPRs if there are no instructions to use it.
287 if (!ST.hasMAIInsts()) {
288 for (unsigned i = 0; i < MaxNumVGPRs; ++i) {
289 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
290 reserveRegisterTuples(Reserved, Reg);
294 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
296 Register ScratchRSrcReg = MFI->getScratchRSrcReg();
297 if (ScratchRSrcReg != AMDGPU::NoRegister) {
298 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
300 // TODO: May need to reserve a VGPR if doing LDS spilling.
301 reserveRegisterTuples(Reserved, ScratchRSrcReg);
304 // We have to assume the SP is needed in case there are calls in the function,
305 // which is detected after the function is lowered. If we aren't really going
306 // to need SP, don't bother reserving it.
307 MCRegister StackPtrReg = MFI->getStackPtrOffsetReg();
310 reserveRegisterTuples(Reserved, StackPtrReg);
311 assert(!isSubRegister(ScratchRSrcReg, StackPtrReg));
314 MCRegister FrameReg = MFI->getFrameOffsetReg();
316 reserveRegisterTuples(Reserved, FrameReg);
317 assert(!isSubRegister(ScratchRSrcReg, FrameReg));
320 if (hasBasePointer(MF)) {
321 MCRegister BasePtrReg = getBaseRegister();
322 reserveRegisterTuples(Reserved, BasePtrReg);
323 assert(!isSubRegister(ScratchRSrcReg, BasePtrReg));
326 for (MCRegister Reg : MFI->WWMReservedRegs) {
327 reserveRegisterTuples(Reserved, Reg);
330 // FIXME: Stop using reserved registers for this.
331 for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs())
332 reserveRegisterTuples(Reserved, Reg);
334 for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs())
335 reserveRegisterTuples(Reserved, Reg);
337 for (auto SSpill : MFI->getSGPRSpillVGPRs())
338 reserveRegisterTuples(Reserved, SSpill.VGPR);
343 bool SIRegisterInfo::canRealignStack(const MachineFunction &MF) const {
344 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
345 // On entry, the base address is 0, so it can't possibly need any more
348 // FIXME: Should be able to specify the entry frame alignment per calling
349 // convention instead.
350 if (Info->isEntryFunction())
353 return TargetRegisterInfo::canRealignStack(MF);
356 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
357 const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>();
358 if (Info->isEntryFunction()) {
359 const MachineFrameInfo &MFI = Fn.getFrameInfo();
360 return MFI.hasStackObjects() || MFI.hasCalls();
363 // May need scavenger for dealing with callee saved registers.
367 bool SIRegisterInfo::requiresFrameIndexScavenging(
368 const MachineFunction &MF) const {
369 // Do not use frame virtual registers. They used to be used for SGPRs, but
370 // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the
371 // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a
376 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
377 const MachineFunction &MF) const {
378 const MachineFrameInfo &MFI = MF.getFrameInfo();
379 return MFI.hasStackObjects();
382 bool SIRegisterInfo::requiresVirtualBaseRegisters(
383 const MachineFunction &) const {
384 // There are no special dedicated stack or frame pointers.
388 int64_t SIRegisterInfo::getScratchInstrOffset(const MachineInstr *MI) const {
389 assert(SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI));
391 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
392 AMDGPU::OpName::offset);
393 return MI->getOperand(OffIdx).getImm();
396 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
398 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
401 assert((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
402 AMDGPU::OpName::vaddr) ||
403 (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
404 AMDGPU::OpName::saddr))) &&
405 "Should never see frame index on non-address operand");
407 return getScratchInstrOffset(MI);
410 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
411 if (!MI->mayLoadOrStore())
414 int64_t FullOffset = Offset + getScratchInstrOffset(MI);
416 if (SIInstrInfo::isMUBUF(*MI))
417 return !SIInstrInfo::isLegalMUBUFImmOffset(FullOffset);
419 const SIInstrInfo *TII = ST.getInstrInfo();
420 return !TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS, true);
423 Register SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
425 int64_t Offset) const {
426 MachineBasicBlock::iterator Ins = MBB->begin();
427 DebugLoc DL; // Defaults to "unknown"
429 if (Ins != MBB->end())
430 DL = Ins->getDebugLoc();
432 MachineFunction *MF = MBB->getParent();
433 const SIInstrInfo *TII = ST.getInstrInfo();
434 MachineRegisterInfo &MRI = MF->getRegInfo();
435 unsigned MovOpc = ST.enableFlatScratch() ? AMDGPU::S_MOV_B32
436 : AMDGPU::V_MOV_B32_e32;
438 Register BaseReg = MRI.createVirtualRegister(
439 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XEXEC_HIRegClass
440 : &AMDGPU::VGPR_32RegClass);
443 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), BaseReg)
444 .addFrameIndex(FrameIdx);
448 Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
450 Register FIReg = MRI.createVirtualRegister(
451 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XM0RegClass
452 : &AMDGPU::VGPR_32RegClass);
454 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
456 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), FIReg)
457 .addFrameIndex(FrameIdx);
459 if (ST.enableFlatScratch() ) {
460 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_ADD_U32), BaseReg)
461 .addReg(OffsetReg, RegState::Kill)
466 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
467 .addReg(OffsetReg, RegState::Kill)
469 .addImm(0); // clamp bit
474 void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
475 int64_t Offset) const {
476 const SIInstrInfo *TII = ST.getInstrInfo();
477 bool IsFlat = TII->isFLATScratch(MI);
480 // FIXME: Is it possible to be storing a frame index to itself?
482 for (const MachineOperand &MO: MI.operands()) {
485 llvm_unreachable("should not see multiple frame indices");
492 MachineOperand *FIOp =
493 TII->getNamedOperand(MI, IsFlat ? AMDGPU::OpName::saddr
494 : AMDGPU::OpName::vaddr);
496 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
497 int64_t NewOffset = OffsetOp->getImm() + Offset;
499 assert(FIOp && FIOp->isFI() && "frame index must be address operand");
500 assert(TII->isMUBUF(MI) || TII->isFLATScratch(MI));
503 assert(TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, true) &&
504 "offset should be legal");
505 FIOp->ChangeToRegister(BaseReg, false);
506 OffsetOp->setImm(NewOffset);
511 MachineOperand *SOffset = TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
512 assert(SOffset->isImm() && SOffset->getImm() == 0);
515 assert(SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&
516 "offset should be legal");
518 FIOp->ChangeToRegister(BaseReg, false);
519 OffsetOp->setImm(NewOffset);
522 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
524 int64_t Offset) const {
525 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
528 int64_t NewOffset = Offset + getScratchInstrOffset(MI);
530 if (SIInstrInfo::isMUBUF(*MI))
531 return SIInstrInfo::isLegalMUBUFImmOffset(NewOffset);
533 const SIInstrInfo *TII = ST.getInstrInfo();
534 return TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, true);
537 const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
538 const MachineFunction &MF, unsigned Kind) const {
539 // This is inaccurate. It depends on the instruction and address space. The
540 // only place where we should hit this is for dealing with frame indexes /
541 // private accesses, so this is correct in that case.
542 return &AMDGPU::VGPR_32RegClass;
545 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
548 case AMDGPU::SI_SPILL_S1024_SAVE:
549 case AMDGPU::SI_SPILL_S1024_RESTORE:
550 case AMDGPU::SI_SPILL_V1024_SAVE:
551 case AMDGPU::SI_SPILL_V1024_RESTORE:
552 case AMDGPU::SI_SPILL_A1024_SAVE:
553 case AMDGPU::SI_SPILL_A1024_RESTORE:
555 case AMDGPU::SI_SPILL_S512_SAVE:
556 case AMDGPU::SI_SPILL_S512_RESTORE:
557 case AMDGPU::SI_SPILL_V512_SAVE:
558 case AMDGPU::SI_SPILL_V512_RESTORE:
559 case AMDGPU::SI_SPILL_A512_SAVE:
560 case AMDGPU::SI_SPILL_A512_RESTORE:
562 case AMDGPU::SI_SPILL_S256_SAVE:
563 case AMDGPU::SI_SPILL_S256_RESTORE:
564 case AMDGPU::SI_SPILL_V256_SAVE:
565 case AMDGPU::SI_SPILL_V256_RESTORE:
566 case AMDGPU::SI_SPILL_A256_SAVE:
567 case AMDGPU::SI_SPILL_A256_RESTORE:
569 case AMDGPU::SI_SPILL_S192_SAVE:
570 case AMDGPU::SI_SPILL_S192_RESTORE:
571 case AMDGPU::SI_SPILL_V192_SAVE:
572 case AMDGPU::SI_SPILL_V192_RESTORE:
573 case AMDGPU::SI_SPILL_A192_SAVE:
574 case AMDGPU::SI_SPILL_A192_RESTORE:
576 case AMDGPU::SI_SPILL_S160_SAVE:
577 case AMDGPU::SI_SPILL_S160_RESTORE:
578 case AMDGPU::SI_SPILL_V160_SAVE:
579 case AMDGPU::SI_SPILL_V160_RESTORE:
580 case AMDGPU::SI_SPILL_A160_SAVE:
581 case AMDGPU::SI_SPILL_A160_RESTORE:
583 case AMDGPU::SI_SPILL_S128_SAVE:
584 case AMDGPU::SI_SPILL_S128_RESTORE:
585 case AMDGPU::SI_SPILL_V128_SAVE:
586 case AMDGPU::SI_SPILL_V128_RESTORE:
587 case AMDGPU::SI_SPILL_A128_SAVE:
588 case AMDGPU::SI_SPILL_A128_RESTORE:
590 case AMDGPU::SI_SPILL_S96_SAVE:
591 case AMDGPU::SI_SPILL_S96_RESTORE:
592 case AMDGPU::SI_SPILL_V96_SAVE:
593 case AMDGPU::SI_SPILL_V96_RESTORE:
594 case AMDGPU::SI_SPILL_A96_SAVE:
595 case AMDGPU::SI_SPILL_A96_RESTORE:
597 case AMDGPU::SI_SPILL_S64_SAVE:
598 case AMDGPU::SI_SPILL_S64_RESTORE:
599 case AMDGPU::SI_SPILL_V64_SAVE:
600 case AMDGPU::SI_SPILL_V64_RESTORE:
601 case AMDGPU::SI_SPILL_A64_SAVE:
602 case AMDGPU::SI_SPILL_A64_RESTORE:
604 case AMDGPU::SI_SPILL_S32_SAVE:
605 case AMDGPU::SI_SPILL_S32_RESTORE:
606 case AMDGPU::SI_SPILL_V32_SAVE:
607 case AMDGPU::SI_SPILL_V32_RESTORE:
608 case AMDGPU::SI_SPILL_A32_SAVE:
609 case AMDGPU::SI_SPILL_A32_RESTORE:
611 default: llvm_unreachable("Invalid spill opcode");
615 static int getOffsetMUBUFStore(unsigned Opc) {
617 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
618 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
619 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
620 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
621 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
622 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
623 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
624 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
625 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
626 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
627 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN:
628 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET;
629 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN:
630 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET;
636 static int getOffsetMUBUFLoad(unsigned Opc) {
638 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
639 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
640 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
641 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
642 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
643 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
644 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
645 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
646 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
647 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
648 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
649 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
650 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
651 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
652 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN:
653 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET;
654 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN:
655 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET;
656 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN:
657 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET;
658 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN:
659 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET;
660 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN:
661 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET;
662 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN:
663 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET;
669 static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST,
670 MachineBasicBlock::iterator MI,
675 MachineBasicBlock *MBB = MI->getParent();
676 MachineFunction *MF = MI->getParent()->getParent();
677 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
678 const SIInstrInfo *TII = ST.getInstrInfo();
680 MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane);
682 if (Reg == AMDGPU::NoRegister)
683 return MachineInstrBuilder();
685 bool IsStore = MI->mayStore();
686 MachineRegisterInfo &MRI = MF->getRegInfo();
687 auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
689 unsigned Dst = IsStore ? Reg : ValueReg;
690 unsigned Src = IsStore ? ValueReg : Reg;
691 unsigned Opc = (IsStore ^ TRI->isVGPR(MRI, Reg)) ? AMDGPU::V_ACCVGPR_WRITE_B32_e64
692 : AMDGPU::V_ACCVGPR_READ_B32_e64;
694 auto MIB = BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(Opc), Dst)
695 .addReg(Src, getKillRegState(IsKill));
696 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
700 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
701 // need to handle the case where an SGPR may need to be spilled while spilling.
702 static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST,
703 MachineFrameInfo &MFI,
704 MachineBasicBlock::iterator MI,
707 const SIInstrInfo *TII = ST.getInstrInfo();
708 MachineBasicBlock *MBB = MI->getParent();
709 const DebugLoc &DL = MI->getDebugLoc();
710 bool IsStore = MI->mayStore();
712 unsigned Opc = MI->getOpcode();
713 int LoadStoreOp = IsStore ?
714 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
715 if (LoadStoreOp == -1)
718 const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
719 if (spillVGPRtoAGPR(ST, MI, Index, 0, Reg->getReg(), false).getInstr())
722 MachineInstrBuilder NewMI =
723 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
725 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
726 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
735 const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
736 AMDGPU::OpName::vdata_in);
742 static unsigned getFlatScratchSpillOpcode(const SIInstrInfo *TII,
743 unsigned LoadStoreOp,
745 bool IsStore = TII->get(LoadStoreOp).mayStore();
747 AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 &&
748 AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::saddr) < 0;
752 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
753 : AMDGPU::SCRATCH_LOAD_DWORD_SADDR;
756 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX2_SADDR
757 : AMDGPU::SCRATCH_LOAD_DWORDX2_SADDR;
760 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX3_SADDR
761 : AMDGPU::SCRATCH_LOAD_DWORDX3_SADDR;
764 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX4_SADDR
765 : AMDGPU::SCRATCH_LOAD_DWORDX4_SADDR;
768 llvm_unreachable("Unexpected spill load/store size!");
772 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
777 void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
778 unsigned LoadStoreOp,
782 MCRegister ScratchOffsetReg,
784 MachineMemOperand *MMO,
785 RegScavenger *RS) const {
786 MachineBasicBlock *MBB = MI->getParent();
787 MachineFunction *MF = MI->getParent()->getParent();
788 const SIInstrInfo *TII = ST.getInstrInfo();
789 const MachineFrameInfo &MFI = MF->getFrameInfo();
790 const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
792 const MCInstrDesc *Desc = &TII->get(LoadStoreOp);
793 const DebugLoc &DL = MI->getDebugLoc();
794 bool IsStore = Desc->mayStore();
795 bool IsFlat = TII->isFLATScratch(LoadStoreOp);
797 bool Scavenged = false;
798 MCRegister SOffset = ScratchOffsetReg;
800 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
801 const bool IsAGPR = hasAGPRs(RC);
802 const unsigned RegWidth = AMDGPU::getRegBitWidth(RC->getID()) / 8;
804 // Always use 4 byte operations for AGPRs because we need to scavenge
806 unsigned EltSize = (IsFlat && !IsAGPR) ? std::min(RegWidth, 16u) : 4u;
807 unsigned NumSubRegs = RegWidth / EltSize;
808 unsigned Size = NumSubRegs * EltSize;
809 unsigned RemSize = RegWidth - Size;
810 unsigned NumRemSubRegs = RemSize ? 1 : 0;
811 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
812 int64_t MaxOffset = Offset + Size + RemSize - EltSize;
813 int64_t ScratchOffsetRegDelta = 0;
815 if (IsFlat && EltSize > 4) {
816 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
817 Desc = &TII->get(LoadStoreOp);
820 Align Alignment = MFI.getObjectAlign(Index);
821 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
823 assert((IsFlat || ((Offset % EltSize) == 0)) &&
824 "unexpected VGPR spill offset");
826 bool IsOffsetLegal = IsFlat
827 ? TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS, true)
828 : SIInstrInfo::isLegalMUBUFImmOffset(MaxOffset);
829 if (!IsOffsetLegal || (IsFlat && !SOffset && !ST.hasFlatScratchSTMode())) {
830 SOffset = MCRegister();
832 // We currently only support spilling VGPRs to EltSize boundaries, meaning
833 // we can simplify the adjustment of Offset here to just scale with
836 Offset *= ST.getWavefrontSize();
838 // We don't have access to the register scavenger if this function is called
839 // during PEI::scavengeFrameVirtualRegs().
841 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false);
844 // There are no free SGPRs, and since we are in the process of spilling
845 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
846 // on SI/CI and on VI it is true until we implement spilling using scalar
847 // stores), we have no way to free up an SGPR. Our solution here is to
848 // add the offset directly to the ScratchOffset or StackPtrOffset
849 // register, and then subtract the offset after the spill to return the
850 // register to it's original value.
851 if (!ScratchOffsetReg)
852 ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg();
853 SOffset = ScratchOffsetReg;
854 ScratchOffsetRegDelta = Offset;
860 report_fatal_error("could not scavenge SGPR to spill in entry function");
862 if (ScratchOffsetReg == AMDGPU::NoRegister) {
863 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset)
866 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
867 .addReg(ScratchOffsetReg)
874 if (IsFlat && SOffset == AMDGPU::NoRegister) {
875 assert(AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0
876 && "Unexpected vaddr for flat scratch with a FI operand");
878 assert(ST.hasFlatScratchSTMode());
879 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
880 Desc = &TII->get(LoadStoreOp);
885 for (unsigned i = 0, e = NumSubRegs + NumRemSubRegs, RegOffset = 0; i != e;
886 ++i, RegOffset += EltSize) {
887 if (i == NumSubRegs) {
889 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
891 Desc = &TII->get(LoadStoreOp);
893 unsigned NumRegs = EltSize / 4;
894 Register SubReg = e == 1
896 : Register(getSubReg(ValueReg,
897 getSubRegFromChannel(RegOffset / 4, NumRegs)));
899 unsigned SOffsetRegState = 0;
900 unsigned SrcDstRegState = getDefRegState(!IsStore);
902 SOffsetRegState |= getKillRegState(Scavenged);
903 // The last implicit use carries the "Kill" flag.
904 SrcDstRegState |= getKillRegState(IsKill);
907 // Make sure the whole register is defined if there are undef components by
908 // adding an implicit def of the super-reg on the first instruction.
909 bool NeedSuperRegDef = e > 1 && IsStore && i == 0;
910 bool NeedSuperRegImpOperand = e > 1;
912 unsigned Lane = RegOffset / 4;
913 unsigned LaneE = (RegOffset + EltSize) / 4;
914 for ( ; Lane != LaneE; ++Lane) {
915 bool IsSubReg = e > 1 || EltSize > 4;
916 Register Sub = IsSubReg
917 ? Register(getSubReg(ValueReg, getSubRegFromChannel(Lane)))
919 auto MIB = spillVGPRtoAGPR(ST, MI, Index, Lane, Sub, IsKill);
922 if (NeedSuperRegDef || (IsSubReg && IsStore && Lane == 0)) {
923 MIB.addReg(ValueReg, RegState::ImplicitDefine);
924 NeedSuperRegDef = false;
926 if (IsSubReg || NeedSuperRegImpOperand) {
927 NeedSuperRegImpOperand = true;
928 unsigned State = SrcDstRegState;
929 if (Lane + 1 != LaneE)
930 State &= ~RegState::Kill;
931 MIB.addReg(ValueReg, RegState::Implicit | State);
935 if (Lane == LaneE) // Fully spilled into AGPRs.
938 // Offset in bytes from the beginning of the ValueReg to its portion we
939 // still need to spill. It may differ from RegOffset if a portion of
940 // current SubReg has been already spilled into AGPRs by the loop above.
941 unsigned RemRegOffset = Lane * 4;
942 unsigned RemEltSize = EltSize - (RemRegOffset - RegOffset);
943 if (RemEltSize != EltSize) { // Partially spilled to AGPRs
944 assert(IsFlat && EltSize > 4);
946 unsigned NumRegs = RemEltSize / 4;
947 SubReg = Register(getSubReg(ValueReg,
948 getSubRegFromChannel(RemRegOffset / 4, NumRegs)));
949 unsigned Opc = getFlatScratchSpillOpcode(TII, LoadStoreOp, RemEltSize);
950 Desc = &TII->get(Opc);
953 unsigned FinalReg = SubReg;
956 assert(EltSize == 4);
959 assert(RS && "Needs to have RegScavenger to spill an AGPR!");
960 // FIXME: change to scavengeRegisterBackwards()
961 TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
962 RS->setRegUsed(TmpReg);
965 auto AccRead = BuildMI(*MBB, MI, DL,
966 TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TmpReg)
967 .addReg(SubReg, getKillRegState(IsKill));
969 AccRead.addReg(ValueReg, RegState::ImplicitDefine);
970 AccRead->setAsmPrinterFlag(MachineInstr::ReloadReuse);
975 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(RemRegOffset);
976 MachineMemOperand *NewMMO =
977 MF->getMachineMemOperand(PInfo, MMO->getFlags(), RemEltSize,
978 commonAlignment(Alignment, RemRegOffset));
980 auto MIB = BuildMI(*MBB, MI, DL, *Desc)
982 getDefRegState(!IsStore) | getKillRegState(IsKill));
984 MIB.addReg(FuncInfo->getScratchRSrcReg());
986 if (SOffset == AMDGPU::NoRegister) {
990 MIB.addReg(SOffset, SOffsetRegState);
992 MIB.addImm(Offset + RemRegOffset)
995 .addImm(0); // tfe for MUBUF or dlc for FLAT
999 MIB.addMemOperand(NewMMO);
1001 if (!IsAGPR && NeedSuperRegDef)
1002 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1004 if (!IsStore && TmpReg != AMDGPU::NoRegister) {
1005 MIB = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64),
1007 .addReg(TmpReg, RegState::Kill);
1008 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1011 if (NeedSuperRegImpOperand)
1012 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
1015 if (ScratchOffsetRegDelta != 0) {
1016 // Subtract the offset we added to the ScratchOffset register.
1017 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), SOffset)
1019 .addImm(ScratchOffsetRegDelta);
1023 // Generate a VMEM access which loads or stores the VGPR containing an SGPR
1024 // spill such that all the lanes set in VGPRLanes are loaded or stored.
1025 // This generates exec mask manipulation and will use SGPRs available in MI
1026 // or VGPR lanes in the VGPR to save and restore the exec mask.
1027 void SIRegisterInfo::buildSGPRSpillLoadStore(MachineBasicBlock::iterator MI,
1028 int Index, int Offset,
1029 unsigned EltSize, Register VGPR,
1032 bool IsLoad) const {
1033 MachineBasicBlock *MBB = MI->getParent();
1034 MachineFunction *MF = MBB->getParent();
1035 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1036 const SIInstrInfo *TII = ST.getInstrInfo();
1038 Register SuperReg = MI->getOperand(0).getReg();
1039 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
1040 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
1041 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
1042 unsigned FirstPart = Offset * 32;
1043 unsigned ExecLane = 0;
1045 bool IsKill = MI->getOperand(0).isKill();
1046 const DebugLoc &DL = MI->getDebugLoc();
1048 // Cannot handle load/store to EXEC
1049 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
1050 SuperReg != AMDGPU::EXEC && "exec should never spill");
1052 // On Wave32 only handle EXEC_LO.
1053 // On Wave64 only update EXEC_HI if there is sufficent space for a copy.
1054 bool OnlyExecLo = isWave32 || NumSubRegs == 1 || SuperReg == AMDGPU::EXEC_HI;
1056 unsigned ExecMovOpc = OnlyExecLo ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
1057 Register ExecReg = OnlyExecLo ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
1058 Register SavedExecReg;
1065 : Register(getSubReg(SuperReg, SplitParts[FirstPart + ExecLane]));
1067 // If src/dst is an odd size it is possible subreg0 is not aligned.
1068 for (; ExecLane < (NumSubRegs - 1); ++ExecLane) {
1069 SavedExecReg = getMatchingSuperReg(
1070 getSubReg(SuperReg, SplitParts[FirstPart + ExecLane]), AMDGPU::sub0,
1071 &AMDGPU::SReg_64_XEXECRegClass);
1076 assert(SavedExecReg);
1077 BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), SavedExecReg).addReg(ExecReg);
1080 BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), ExecReg).addImm(VGPRLanes);
1083 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1084 assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill);
1086 Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
1088 : getFrameRegister(*MF);
1090 Align Alignment = FrameInfo.getObjectAlign(Index);
1091 MachinePointerInfo PtrInfo =
1092 MachinePointerInfo::getFixedStack(*MF, Index);
1093 MachineMemOperand *MMO = MF->getMachineMemOperand(
1094 PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore,
1095 EltSize, Alignment);
1098 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1099 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1100 buildSpillLoadStore(MI, Opc,
1104 Offset * EltSize, MMO,
1107 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1108 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1109 buildSpillLoadStore(MI, Opc, Index, VGPR,
1111 Offset * EltSize, MMO, RS);
1112 // This only ever adds one VGPR spill
1113 MFI->addToSpilledVGPRs(1);
1117 BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), ExecReg)
1118 .addReg(SavedExecReg, getKillRegState(IsLoad || IsKill));
1120 // Restore clobbered SGPRs
1122 // Nothing to do; register will be overwritten
1123 } else if (!IsKill) {
1124 // Restore SGPRs from appropriate VGPR lanes
1126 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32),
1127 getSubReg(SuperReg, SplitParts[FirstPart + ExecLane + 1]))
1129 .addImm(ExecLane + 1);
1131 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32),
1132 NumSubRegs == 1 ? SavedExecReg
1133 : Register(getSubReg(
1134 SuperReg, SplitParts[FirstPart + ExecLane])))
1135 .addReg(VGPR, RegState::Kill)
1140 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
1143 bool OnlyToVGPR) const {
1144 MachineBasicBlock *MBB = MI->getParent();
1145 MachineFunction *MF = MBB->getParent();
1146 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1148 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
1149 = MFI->getSGPRToVGPRSpills(Index);
1150 bool SpillToVGPR = !VGPRSpills.empty();
1151 if (OnlyToVGPR && !SpillToVGPR)
1154 const SIInstrInfo *TII = ST.getInstrInfo();
1156 Register SuperReg = MI->getOperand(0).getReg();
1157 bool IsKill = MI->getOperand(0).isKill();
1158 const DebugLoc &DL = MI->getDebugLoc();
1160 assert(SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() &&
1161 SuperReg != MFI->getFrameOffsetReg()));
1163 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
1164 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
1165 SuperReg != AMDGPU::EXEC && "exec should never spill");
1167 unsigned EltSize = 4;
1168 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
1170 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
1171 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
1174 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
1175 Register SubReg = NumSubRegs == 1
1177 : Register(getSubReg(SuperReg, SplitParts[i]));
1178 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1180 bool UseKill = IsKill && i == NumSubRegs - 1;
1182 // Mark the "old value of vgpr" input undef only if this is the first sgpr
1183 // spill to this specific vgpr in the first basic block.
1185 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill.VGPR)
1186 .addReg(SubReg, getKillRegState(UseKill))
1188 .addReg(Spill.VGPR);
1190 if (i == 0 && NumSubRegs > 1) {
1191 // We may be spilling a super-register which is only partially defined,
1192 // and need to ensure later spills think the value is defined.
1193 MIB.addReg(SuperReg, RegState::ImplicitDefine);
1197 MIB.addReg(SuperReg, getKillRegState(UseKill) | RegState::Implicit);
1199 // FIXME: Since this spills to another register instead of an actual
1200 // frame index, we should delete the frame index when all references to
1204 // Scavenged temporary VGPR to use. It must be scavenged once for any number
1205 // of spilled subregs.
1206 Register TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1207 RS->setRegUsed(TmpVGPR);
1209 // SubReg carries the "Kill" flag when SubReg == SuperReg.
1210 unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill);
1212 unsigned PerVGPR = 32;
1213 unsigned NumVGPRs = (NumSubRegs + (PerVGPR - 1)) / PerVGPR;
1214 int64_t VGPRLanes = (1LL << std::min(PerVGPR, NumSubRegs)) - 1LL;
1216 for (unsigned Offset = 0; Offset < NumVGPRs; ++Offset) {
1217 unsigned TmpVGPRFlags = RegState::Undef;
1219 // Write sub registers into the VGPR
1220 for (unsigned i = Offset * PerVGPR,
1221 e = std::min((Offset + 1) * PerVGPR, NumSubRegs);
1223 Register SubReg = NumSubRegs == 1
1225 : Register(getSubReg(SuperReg, SplitParts[i]));
1227 MachineInstrBuilder WriteLane =
1228 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), TmpVGPR)
1229 .addReg(SubReg, SubKillState)
1230 .addImm(i % PerVGPR)
1231 .addReg(TmpVGPR, TmpVGPRFlags);
1234 // There could be undef components of a spilled super register.
1235 // TODO: Can we detect this and skip the spill?
1236 if (NumSubRegs > 1) {
1237 // The last implicit use of the SuperReg carries the "Kill" flag.
1238 unsigned SuperKillState = 0;
1239 if (i + 1 == NumSubRegs)
1240 SuperKillState |= getKillRegState(IsKill);
1241 WriteLane.addReg(SuperReg, RegState::Implicit | SuperKillState);
1246 buildSGPRSpillLoadStore(MI, Index, Offset, EltSize, TmpVGPR, VGPRLanes,
1251 MI->eraseFromParent();
1252 MFI->addToSpilledSGPRs(NumSubRegs);
1256 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
1259 bool OnlyToVGPR) const {
1260 MachineFunction *MF = MI->getParent()->getParent();
1261 MachineBasicBlock *MBB = MI->getParent();
1262 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1264 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
1265 = MFI->getSGPRToVGPRSpills(Index);
1266 bool SpillToVGPR = !VGPRSpills.empty();
1267 if (OnlyToVGPR && !SpillToVGPR)
1270 const SIInstrInfo *TII = ST.getInstrInfo();
1271 const DebugLoc &DL = MI->getDebugLoc();
1273 Register SuperReg = MI->getOperand(0).getReg();
1275 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
1276 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
1277 SuperReg != AMDGPU::EXEC && "exec should never spill");
1279 unsigned EltSize = 4;
1281 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
1283 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
1284 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
1287 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
1288 Register SubReg = NumSubRegs == 1
1290 : Register(getSubReg(SuperReg, SplitParts[i]));
1292 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1293 auto MIB = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), SubReg)
1295 .addImm(Spill.Lane);
1296 if (NumSubRegs > 1 && i == 0)
1297 MIB.addReg(SuperReg, RegState::ImplicitDefine);
1300 Register TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1301 RS->setRegUsed(TmpVGPR);
1303 unsigned PerVGPR = 32;
1304 unsigned NumVGPRs = (NumSubRegs + (PerVGPR - 1)) / PerVGPR;
1305 int64_t VGPRLanes = (1LL << std::min(PerVGPR, NumSubRegs)) - 1LL;
1307 for (unsigned Offset = 0; Offset < NumVGPRs; ++Offset) {
1308 // Load in VGPR data
1309 buildSGPRSpillLoadStore(MI, Index, Offset, EltSize, TmpVGPR, VGPRLanes,
1313 for (unsigned i = Offset * PerVGPR,
1314 e = std::min((Offset + 1) * PerVGPR, NumSubRegs);
1316 Register SubReg = NumSubRegs == 1
1318 : Register(getSubReg(SuperReg, SplitParts[i]));
1320 bool LastSubReg = (i + 1 == e);
1322 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), SubReg)
1323 .addReg(TmpVGPR, getKillRegState(LastSubReg))
1325 if (NumSubRegs > 1 && i == 0)
1326 MIB.addReg(SuperReg, RegState::ImplicitDefine);
1331 MI->eraseFromParent();
1335 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
1336 /// a VGPR and the stack slot can be safely eliminated when all other users are
1338 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
1339 MachineBasicBlock::iterator MI,
1341 RegScavenger *RS) const {
1342 switch (MI->getOpcode()) {
1343 case AMDGPU::SI_SPILL_S1024_SAVE:
1344 case AMDGPU::SI_SPILL_S512_SAVE:
1345 case AMDGPU::SI_SPILL_S256_SAVE:
1346 case AMDGPU::SI_SPILL_S192_SAVE:
1347 case AMDGPU::SI_SPILL_S160_SAVE:
1348 case AMDGPU::SI_SPILL_S128_SAVE:
1349 case AMDGPU::SI_SPILL_S96_SAVE:
1350 case AMDGPU::SI_SPILL_S64_SAVE:
1351 case AMDGPU::SI_SPILL_S32_SAVE:
1352 return spillSGPR(MI, FI, RS, true);
1353 case AMDGPU::SI_SPILL_S1024_RESTORE:
1354 case AMDGPU::SI_SPILL_S512_RESTORE:
1355 case AMDGPU::SI_SPILL_S256_RESTORE:
1356 case AMDGPU::SI_SPILL_S192_RESTORE:
1357 case AMDGPU::SI_SPILL_S160_RESTORE:
1358 case AMDGPU::SI_SPILL_S128_RESTORE:
1359 case AMDGPU::SI_SPILL_S96_RESTORE:
1360 case AMDGPU::SI_SPILL_S64_RESTORE:
1361 case AMDGPU::SI_SPILL_S32_RESTORE:
1362 return restoreSGPR(MI, FI, RS, true);
1364 llvm_unreachable("not an SGPR spill instruction");
1368 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
1369 int SPAdj, unsigned FIOperandNum,
1370 RegScavenger *RS) const {
1371 MachineFunction *MF = MI->getParent()->getParent();
1372 MachineBasicBlock *MBB = MI->getParent();
1373 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1374 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1375 const SIInstrInfo *TII = ST.getInstrInfo();
1376 DebugLoc DL = MI->getDebugLoc();
1378 assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?");
1380 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
1381 int Index = MI->getOperand(FIOperandNum).getIndex();
1383 Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
1385 : getFrameRegister(*MF);
1387 switch (MI->getOpcode()) {
1388 // SGPR register spill
1389 case AMDGPU::SI_SPILL_S1024_SAVE:
1390 case AMDGPU::SI_SPILL_S512_SAVE:
1391 case AMDGPU::SI_SPILL_S256_SAVE:
1392 case AMDGPU::SI_SPILL_S192_SAVE:
1393 case AMDGPU::SI_SPILL_S160_SAVE:
1394 case AMDGPU::SI_SPILL_S128_SAVE:
1395 case AMDGPU::SI_SPILL_S96_SAVE:
1396 case AMDGPU::SI_SPILL_S64_SAVE:
1397 case AMDGPU::SI_SPILL_S32_SAVE: {
1398 spillSGPR(MI, Index, RS);
1402 // SGPR register restore
1403 case AMDGPU::SI_SPILL_S1024_RESTORE:
1404 case AMDGPU::SI_SPILL_S512_RESTORE:
1405 case AMDGPU::SI_SPILL_S256_RESTORE:
1406 case AMDGPU::SI_SPILL_S192_RESTORE:
1407 case AMDGPU::SI_SPILL_S160_RESTORE:
1408 case AMDGPU::SI_SPILL_S128_RESTORE:
1409 case AMDGPU::SI_SPILL_S96_RESTORE:
1410 case AMDGPU::SI_SPILL_S64_RESTORE:
1411 case AMDGPU::SI_SPILL_S32_RESTORE: {
1412 restoreSGPR(MI, Index, RS);
1416 // VGPR register spill
1417 case AMDGPU::SI_SPILL_V1024_SAVE:
1418 case AMDGPU::SI_SPILL_V512_SAVE:
1419 case AMDGPU::SI_SPILL_V256_SAVE:
1420 case AMDGPU::SI_SPILL_V192_SAVE:
1421 case AMDGPU::SI_SPILL_V160_SAVE:
1422 case AMDGPU::SI_SPILL_V128_SAVE:
1423 case AMDGPU::SI_SPILL_V96_SAVE:
1424 case AMDGPU::SI_SPILL_V64_SAVE:
1425 case AMDGPU::SI_SPILL_V32_SAVE:
1426 case AMDGPU::SI_SPILL_A1024_SAVE:
1427 case AMDGPU::SI_SPILL_A512_SAVE:
1428 case AMDGPU::SI_SPILL_A256_SAVE:
1429 case AMDGPU::SI_SPILL_A192_SAVE:
1430 case AMDGPU::SI_SPILL_A160_SAVE:
1431 case AMDGPU::SI_SPILL_A128_SAVE:
1432 case AMDGPU::SI_SPILL_A96_SAVE:
1433 case AMDGPU::SI_SPILL_A64_SAVE:
1434 case AMDGPU::SI_SPILL_A32_SAVE: {
1435 const MachineOperand *VData = TII->getNamedOperand(*MI,
1436 AMDGPU::OpName::vdata);
1437 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
1438 MFI->getStackPtrOffsetReg());
1440 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1441 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1442 buildSpillLoadStore(MI, Opc,
1444 VData->getReg(), VData->isKill(),
1446 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1447 *MI->memoperands_begin(),
1449 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
1450 MI->eraseFromParent();
1453 case AMDGPU::SI_SPILL_V32_RESTORE:
1454 case AMDGPU::SI_SPILL_V64_RESTORE:
1455 case AMDGPU::SI_SPILL_V96_RESTORE:
1456 case AMDGPU::SI_SPILL_V128_RESTORE:
1457 case AMDGPU::SI_SPILL_V160_RESTORE:
1458 case AMDGPU::SI_SPILL_V192_RESTORE:
1459 case AMDGPU::SI_SPILL_V256_RESTORE:
1460 case AMDGPU::SI_SPILL_V512_RESTORE:
1461 case AMDGPU::SI_SPILL_V1024_RESTORE:
1462 case AMDGPU::SI_SPILL_A32_RESTORE:
1463 case AMDGPU::SI_SPILL_A64_RESTORE:
1464 case AMDGPU::SI_SPILL_A96_RESTORE:
1465 case AMDGPU::SI_SPILL_A128_RESTORE:
1466 case AMDGPU::SI_SPILL_A160_RESTORE:
1467 case AMDGPU::SI_SPILL_A192_RESTORE:
1468 case AMDGPU::SI_SPILL_A256_RESTORE:
1469 case AMDGPU::SI_SPILL_A512_RESTORE:
1470 case AMDGPU::SI_SPILL_A1024_RESTORE: {
1471 const MachineOperand *VData = TII->getNamedOperand(*MI,
1472 AMDGPU::OpName::vdata);
1473 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
1474 MFI->getStackPtrOffsetReg());
1476 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1477 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1478 buildSpillLoadStore(MI, Opc,
1480 VData->getReg(), VData->isKill(),
1482 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1483 *MI->memoperands_begin(),
1485 MI->eraseFromParent();
1490 const DebugLoc &DL = MI->getDebugLoc();
1492 int64_t Offset = FrameInfo.getObjectOffset(Index);
1493 if (ST.enableFlatScratch()) {
1494 if (TII->isFLATScratch(*MI)) {
1495 assert((int16_t)FIOperandNum ==
1496 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
1497 AMDGPU::OpName::saddr));
1499 // The offset is always swizzled, just replace it
1501 FIOp.ChangeToRegister(FrameReg, false);
1506 MachineOperand *OffsetOp =
1507 TII->getNamedOperand(*MI, AMDGPU::OpName::offset);
1508 int64_t NewOffset = Offset + OffsetOp->getImm();
1509 if (TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
1511 OffsetOp->setImm(NewOffset);
1517 assert(!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) &&
1518 "Unexpected vaddr for flat scratch with a FI operand");
1520 // On GFX10 we have ST mode to use no registers for an address.
1521 // Otherwise we need to materialize 0 into an SGPR.
1522 if (!Offset && ST.hasFlatScratchSTMode()) {
1523 unsigned Opc = MI->getOpcode();
1524 unsigned NewOpc = AMDGPU::getFlatScratchInstSTfromSS(Opc);
1526 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr));
1527 MI->setDesc(TII->get(NewOpc));
1533 FIOp.ChangeToImmediate(Offset);
1534 if (TII->isImmOperandLegal(*MI, FIOperandNum, FIOp))
1538 // We need to use register here. Check if we can use an SGPR or need
1540 FIOp.ChangeToRegister(AMDGPU::M0, false);
1541 bool UseSGPR = TII->isOperandLegal(*MI, FIOperandNum, &FIOp);
1543 if (!Offset && FrameReg && UseSGPR) {
1544 FIOp.setReg(FrameReg);
1548 const TargetRegisterClass *RC = UseSGPR ? &AMDGPU::SReg_32_XM0RegClass
1549 : &AMDGPU::VGPR_32RegClass;
1551 Register TmpReg = RS->scavengeRegister(RC, MI, 0, !UseSGPR);
1552 FIOp.setReg(TmpReg);
1553 FIOp.setIsKill(true);
1555 if ((!FrameReg || !Offset) && TmpReg) {
1556 unsigned Opc = UseSGPR ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1557 auto MIB = BuildMI(*MBB, MI, DL, TII->get(Opc), TmpReg);
1559 MIB.addReg(FrameReg);
1568 : RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0,
1571 // TODO: for flat scratch another attempt can be made with a VGPR index
1572 // if no SGPRs can be scavenged.
1573 if ((!TmpSReg && !FrameReg) || (!TmpReg && !UseSGPR))
1574 report_fatal_error("Cannot scavenge register in FI elimination!");
1577 // Use frame register and restore it after.
1579 FIOp.setReg(FrameReg);
1580 FIOp.setIsKill(false);
1583 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), TmpSReg)
1588 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1589 .addReg(TmpSReg, RegState::Kill);
1591 if (TmpSReg == FrameReg) {
1592 // Undo frame register modification.
1593 BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_SUB_U32),
1602 bool IsMUBUF = TII->isMUBUF(*MI);
1604 if (!IsMUBUF && !MFI->isEntryFunction()) {
1605 // Convert to a swizzled stack address by scaling by the wave size.
1607 // In an entry function/kernel the offset is already swizzled.
1609 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
1610 Register ResultReg =
1611 IsCopy ? MI->getOperand(0).getReg()
1612 : RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1614 int64_t Offset = FrameInfo.getObjectOffset(Index);
1616 // XXX - This never happens because of emergency scavenging slot at 0?
1617 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg)
1618 .addImm(ST.getWavefrontSizeLog2())
1621 if (auto MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) {
1622 // Reuse ResultReg in intermediate step.
1623 Register ScaledReg = ResultReg;
1625 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64),
1627 .addImm(ST.getWavefrontSizeLog2())
1630 const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32;
1632 // TODO: Fold if use instruction is another add of a constant.
1633 if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
1634 // FIXME: This can fail
1636 MIB.addReg(ScaledReg, RegState::Kill);
1638 MIB.addImm(0); // clamp bit
1640 assert(MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 &&
1641 "Need to reuse carry out register");
1643 // Use scavenged unused carry out as offset register.
1644 Register ConstOffsetReg;
1646 ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0);
1648 ConstOffsetReg = MIB.getReg(1);
1650 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
1652 MIB.addReg(ConstOffsetReg, RegState::Kill);
1653 MIB.addReg(ScaledReg, RegState::Kill);
1654 MIB.addImm(0); // clamp bit
1657 // We have to produce a carry out, and there isn't a free SGPR pair
1658 // for it. We can keep the whole computation on the SALU to avoid
1659 // clobbering an additional register at the cost of an extra mov.
1661 // We may have 1 free scratch SGPR even though a carry out is
1662 // unavailable. Only one additional mov is needed.
1663 Register TmpScaledReg =
1664 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false);
1665 Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg;
1667 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg)
1669 .addImm(ST.getWavefrontSizeLog2());
1670 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), ScaledReg)
1671 .addReg(ScaledReg, RegState::Kill)
1673 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg)
1674 .addReg(ScaledReg, RegState::Kill);
1676 // If there were truly no free SGPRs, we need to undo everything.
1677 if (!TmpScaledReg.isValid()) {
1678 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScaledReg)
1679 .addReg(ScaledReg, RegState::Kill)
1681 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg)
1683 .addImm(ST.getWavefrontSizeLog2());
1688 // Don't introduce an extra copy if we're just materializing in a mov.
1690 MI->eraseFromParent();
1692 FIOp.ChangeToRegister(ResultReg, false, false, true);
1697 // Disable offen so we don't need a 0 vgpr base.
1698 assert(static_cast<int>(FIOperandNum) ==
1699 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
1700 AMDGPU::OpName::vaddr));
1702 auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset);
1703 assert((SOffset.isImm() && SOffset.getImm() == 0));
1705 if (FrameReg != AMDGPU::NoRegister)
1706 SOffset.ChangeToRegister(FrameReg, false);
1708 int64_t Offset = FrameInfo.getObjectOffset(Index);
1710 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
1711 int64_t NewOffset = OldImm + Offset;
1713 if (SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&
1714 buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) {
1715 MI->eraseFromParent();
1720 // If the offset is simply too big, don't convert to a scratch wave offset
1723 FIOp.ChangeToImmediate(Offset);
1724 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
1725 Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1726 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1728 FIOp.ChangeToRegister(TmpReg, false, false, true);
1734 StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const {
1735 return AMDGPUInstPrinter::getRegisterName(Reg);
1738 const TargetRegisterClass *
1739 SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) {
1741 return &AMDGPU::VReg_1RegClass;
1743 return &AMDGPU::VGPR_LO16RegClass;
1745 return &AMDGPU::VGPR_32RegClass;
1747 return &AMDGPU::VReg_64RegClass;
1749 return &AMDGPU::VReg_96RegClass;
1750 if (BitWidth <= 128)
1751 return &AMDGPU::VReg_128RegClass;
1752 if (BitWidth <= 160)
1753 return &AMDGPU::VReg_160RegClass;
1754 if (BitWidth <= 192)
1755 return &AMDGPU::VReg_192RegClass;
1756 if (BitWidth <= 256)
1757 return &AMDGPU::VReg_256RegClass;
1758 if (BitWidth <= 512)
1759 return &AMDGPU::VReg_512RegClass;
1760 if (BitWidth <= 1024)
1761 return &AMDGPU::VReg_1024RegClass;
1766 const TargetRegisterClass *
1767 SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) {
1769 return &AMDGPU::AGPR_LO16RegClass;
1771 return &AMDGPU::AGPR_32RegClass;
1773 return &AMDGPU::AReg_64RegClass;
1775 return &AMDGPU::AReg_96RegClass;
1776 if (BitWidth <= 128)
1777 return &AMDGPU::AReg_128RegClass;
1778 if (BitWidth <= 160)
1779 return &AMDGPU::AReg_160RegClass;
1780 if (BitWidth <= 192)
1781 return &AMDGPU::AReg_192RegClass;
1782 if (BitWidth <= 256)
1783 return &AMDGPU::AReg_256RegClass;
1784 if (BitWidth <= 512)
1785 return &AMDGPU::AReg_512RegClass;
1786 if (BitWidth <= 1024)
1787 return &AMDGPU::AReg_1024RegClass;
1792 const TargetRegisterClass *
1793 SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
1795 return &AMDGPU::SGPR_LO16RegClass;
1797 return &AMDGPU::SReg_32RegClass;
1799 return &AMDGPU::SReg_64RegClass;
1801 return &AMDGPU::SGPR_96RegClass;
1802 if (BitWidth <= 128)
1803 return &AMDGPU::SGPR_128RegClass;
1804 if (BitWidth <= 160)
1805 return &AMDGPU::SGPR_160RegClass;
1806 if (BitWidth <= 192)
1807 return &AMDGPU::SGPR_192RegClass;
1808 if (BitWidth <= 256)
1809 return &AMDGPU::SGPR_256RegClass;
1810 if (BitWidth <= 512)
1811 return &AMDGPU::SGPR_512RegClass;
1812 if (BitWidth <= 1024)
1813 return &AMDGPU::SGPR_1024RegClass;
1818 // FIXME: This is very slow. It might be worth creating a map from physreg to
1820 const TargetRegisterClass *
1821 SIRegisterInfo::getPhysRegClass(MCRegister Reg) const {
1822 static const TargetRegisterClass *const BaseClasses[] = {
1823 &AMDGPU::VGPR_LO16RegClass,
1824 &AMDGPU::VGPR_HI16RegClass,
1825 &AMDGPU::SReg_LO16RegClass,
1826 &AMDGPU::AGPR_LO16RegClass,
1827 &AMDGPU::VGPR_32RegClass,
1828 &AMDGPU::SReg_32RegClass,
1829 &AMDGPU::AGPR_32RegClass,
1830 &AMDGPU::VReg_64RegClass,
1831 &AMDGPU::SReg_64RegClass,
1832 &AMDGPU::AReg_64RegClass,
1833 &AMDGPU::VReg_96RegClass,
1834 &AMDGPU::SReg_96RegClass,
1835 &AMDGPU::AReg_96RegClass,
1836 &AMDGPU::VReg_128RegClass,
1837 &AMDGPU::SReg_128RegClass,
1838 &AMDGPU::AReg_128RegClass,
1839 &AMDGPU::VReg_160RegClass,
1840 &AMDGPU::SReg_160RegClass,
1841 &AMDGPU::AReg_160RegClass,
1842 &AMDGPU::VReg_192RegClass,
1843 &AMDGPU::SReg_192RegClass,
1844 &AMDGPU::AReg_192RegClass,
1845 &AMDGPU::VReg_256RegClass,
1846 &AMDGPU::SReg_256RegClass,
1847 &AMDGPU::AReg_256RegClass,
1848 &AMDGPU::VReg_512RegClass,
1849 &AMDGPU::SReg_512RegClass,
1850 &AMDGPU::AReg_512RegClass,
1851 &AMDGPU::SReg_1024RegClass,
1852 &AMDGPU::VReg_1024RegClass,
1853 &AMDGPU::AReg_1024RegClass,
1854 &AMDGPU::SCC_CLASSRegClass,
1855 &AMDGPU::Pseudo_SReg_32RegClass,
1856 &AMDGPU::Pseudo_SReg_128RegClass,
1859 for (const TargetRegisterClass *BaseClass : BaseClasses) {
1860 if (BaseClass->contains(Reg)) {
1867 bool SIRegisterInfo::isSGPRReg(const MachineRegisterInfo &MRI,
1868 Register Reg) const {
1869 const TargetRegisterClass *RC;
1870 if (Reg.isVirtual())
1871 RC = MRI.getRegClass(Reg);
1873 RC = getPhysRegClass(Reg);
1874 return isSGPRClass(RC);
1877 // TODO: It might be helpful to have some target specific flags in
1878 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
1879 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
1880 unsigned Size = getRegSizeInBits(*RC);
1882 return getCommonSubClass(&AMDGPU::VGPR_LO16RegClass, RC) != nullptr ||
1883 getCommonSubClass(&AMDGPU::VGPR_HI16RegClass, RC) != nullptr;
1885 const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
1887 assert(Size < 32 && "Invalid register class size");
1890 return getCommonSubClass(VRC, RC) != nullptr;
1893 bool SIRegisterInfo::hasAGPRs(const TargetRegisterClass *RC) const {
1894 unsigned Size = getRegSizeInBits(*RC);
1897 const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
1899 assert(getVGPRClassForBitWidth(Size) && "Invalid register class size");
1902 return getCommonSubClass(ARC, RC) != nullptr;
1905 const TargetRegisterClass *
1906 SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const {
1907 unsigned Size = getRegSizeInBits(*SRC);
1908 const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
1909 assert(VRC && "Invalid register class size");
1913 const TargetRegisterClass *
1914 SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const {
1915 unsigned Size = getRegSizeInBits(*SRC);
1916 const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
1917 assert(ARC && "Invalid register class size");
1921 const TargetRegisterClass *
1922 SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const {
1923 unsigned Size = getRegSizeInBits(*VRC);
1925 return &AMDGPU::SGPR_32RegClass;
1926 const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size);
1927 assert(SRC && "Invalid register class size");
1931 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
1932 const TargetRegisterClass *RC, unsigned SubIdx) const {
1933 if (SubIdx == AMDGPU::NoSubRegister)
1936 // We can assume that each lane corresponds to one 32-bit register.
1937 unsigned Size = getNumChannelsFromSubReg(SubIdx) * 32;
1938 if (isSGPRClass(RC)) {
1940 RC = &AMDGPU::SGPR_32RegClass;
1942 RC = getSGPRClassForBitWidth(Size);
1943 } else if (hasAGPRs(RC)) {
1944 RC = getAGPRClassForBitWidth(Size);
1946 RC = getVGPRClassForBitWidth(Size);
1948 assert(RC && "Invalid sub-register class size");
1952 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
1953 if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST &&
1954 OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST)
1955 return !ST.hasMFMAInlineLiteralBug();
1957 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
1958 OpType <= AMDGPU::OPERAND_SRC_LAST;
1961 bool SIRegisterInfo::shouldRewriteCopySrc(
1962 const TargetRegisterClass *DefRC,
1964 const TargetRegisterClass *SrcRC,
1965 unsigned SrcSubReg) const {
1966 // We want to prefer the smallest register class possible, so we don't want to
1967 // stop and rewrite on anything that looks like a subregister
1968 // extract. Operations mostly don't care about the super register class, so we
1969 // only want to stop on the most basic of copies between the same register
1972 // e.g. if we have something like
1975 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
1976 // %3 = COPY %2, sub0
1978 // We want to look through the COPY to find:
1982 return getCommonSubClass(DefRC, SrcRC) != nullptr;
1985 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
1986 // TODO: 64-bit operands have extending behavior from 32-bit literal.
1987 return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST &&
1988 OpType <= AMDGPU::OPERAND_REG_IMM_LAST;
1991 /// Returns a lowest register that is not used at any point in the function.
1992 /// If all registers are used, then this function will return
1993 /// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return
1994 /// highest unused register.
1995 MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
1996 const TargetRegisterClass *RC,
1997 const MachineFunction &MF,
1998 bool ReserveHighestVGPR) const {
1999 if (ReserveHighestVGPR) {
2000 for (MCRegister Reg : reverse(*RC))
2001 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2004 for (MCRegister Reg : *RC)
2005 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2008 return MCRegister();
2011 ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
2012 unsigned EltSize) const {
2013 const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC);
2014 assert(RegBitWidth >= 32 && RegBitWidth <= 1024);
2016 const unsigned RegDWORDs = RegBitWidth / 32;
2017 const unsigned EltDWORDs = EltSize / 4;
2018 assert(RegSplitParts.size() + 1 >= EltDWORDs);
2020 const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1];
2021 const unsigned NumParts = RegDWORDs / EltDWORDs;
2023 return makeArrayRef(Parts.data(), NumParts);
2026 const TargetRegisterClass*
2027 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
2028 Register Reg) const {
2029 return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegClass(Reg);
2032 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
2033 Register Reg) const {
2034 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2035 // Registers without classes are unaddressable, SGPR-like registers.
2036 return RC && hasVGPRs(RC);
2039 bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI,
2040 Register Reg) const {
2041 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2043 // Registers without classes are unaddressable, SGPR-like registers.
2044 return RC && hasAGPRs(RC);
2047 bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
2048 const TargetRegisterClass *SrcRC,
2050 const TargetRegisterClass *DstRC,
2052 const TargetRegisterClass *NewRC,
2053 LiveIntervals &LIS) const {
2054 unsigned SrcSize = getRegSizeInBits(*SrcRC);
2055 unsigned DstSize = getRegSizeInBits(*DstRC);
2056 unsigned NewSize = getRegSizeInBits(*NewRC);
2058 // Do not increase size of registers beyond dword, we would need to allocate
2059 // adjacent registers and constraint regalloc more than needed.
2061 // Always allow dword coalescing.
2062 if (SrcSize <= 32 || DstSize <= 32)
2065 return NewSize <= DstSize || NewSize <= SrcSize;
2068 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
2069 MachineFunction &MF) const {
2070 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2072 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
2074 switch (RC->getID()) {
2076 return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF);
2077 case AMDGPU::VGPR_32RegClassID:
2078 case AMDGPU::VGPR_LO16RegClassID:
2079 case AMDGPU::VGPR_HI16RegClassID:
2080 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
2081 case AMDGPU::SGPR_32RegClassID:
2082 case AMDGPU::SGPR_LO16RegClassID:
2083 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
2087 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
2088 unsigned Idx) const {
2089 if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 ||
2090 Idx == AMDGPU::RegisterPressureSets::AGPR_32)
2091 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
2092 const_cast<MachineFunction &>(MF));
2094 if (Idx == AMDGPU::RegisterPressureSets::SReg_32)
2095 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
2096 const_cast<MachineFunction &>(MF));
2098 llvm_unreachable("Unexpected register pressure set!");
2101 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
2102 static const int Empty[] = { -1 };
2104 if (RegPressureIgnoredUnits[RegUnit])
2107 return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit);
2110 MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
2111 // Not a callee saved register.
2112 return AMDGPU::SGPR30_SGPR31;
2115 const TargetRegisterClass *
2116 SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size,
2117 const RegisterBank &RB,
2118 const MachineRegisterInfo &MRI) const {
2119 switch (RB.getID()) {
2120 case AMDGPU::VGPRRegBankID:
2121 return getVGPRClassForBitWidth(std::max(32u, Size));
2122 case AMDGPU::VCCRegBankID:
2124 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2125 : &AMDGPU::SReg_64_XEXECRegClass;
2126 case AMDGPU::SGPRRegBankID:
2127 return getSGPRClassForBitWidth(std::max(32u, Size));
2128 case AMDGPU::AGPRRegBankID:
2129 return getAGPRClassForBitWidth(std::max(32u, Size));
2131 llvm_unreachable("unknown register bank");
2135 const TargetRegisterClass *
2136 SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
2137 const MachineRegisterInfo &MRI) const {
2138 const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg());
2139 if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>())
2140 return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB, MRI);
2142 const TargetRegisterClass *RC = RCOrRB.get<const TargetRegisterClass*>();
2143 return getAllocatableClass(RC);
2146 MCRegister SIRegisterInfo::getVCC() const {
2147 return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
2150 const TargetRegisterClass *
2151 SIRegisterInfo::getRegClass(unsigned RCID) const {
2152 switch ((int)RCID) {
2153 case AMDGPU::SReg_1RegClassID:
2155 case AMDGPU::SReg_1_XEXECRegClassID:
2156 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2157 : &AMDGPU::SReg_64_XEXECRegClass;
2161 return AMDGPUGenRegisterInfo::getRegClass(RCID);
2165 // Find reaching register definition
2166 MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg,
2168 MachineRegisterInfo &MRI,
2169 LiveIntervals *LIS) const {
2170 auto &MDT = LIS->getAnalysis<MachineDominatorTree>();
2171 SlotIndex UseIdx = LIS->getInstructionIndex(Use);
2174 if (Reg.isVirtual()) {
2175 if (!LIS->hasInterval(Reg))
2177 LiveInterval &LI = LIS->getInterval(Reg);
2178 LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg)
2179 : MRI.getMaxLaneMaskForVReg(Reg);
2180 VNInfo *V = nullptr;
2181 if (LI.hasSubRanges()) {
2182 for (auto &S : LI.subranges()) {
2183 if ((S.LaneMask & SubLanes) == SubLanes) {
2184 V = S.getVNInfoAt(UseIdx);
2189 V = LI.getVNInfoAt(UseIdx);
2196 for (MCRegUnitIterator Units(Reg.asMCReg(), this); Units.isValid();
2198 LiveRange &LR = LIS->getRegUnit(*Units);
2199 if (VNInfo *V = LR.getVNInfoAt(UseIdx)) {
2200 if (!DefIdx.isValid() ||
2201 MDT.dominates(LIS->getInstructionFromIndex(DefIdx),
2202 LIS->getInstructionFromIndex(V->def)))
2210 MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx);
2212 if (!Def || !MDT.dominates(Def, &Use))
2215 assert(Def->modifiesRegister(Reg, this));
2220 MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const {
2221 assert(getRegSizeInBits(*getPhysRegClass(Reg)) <= 32);
2223 for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass,
2224 AMDGPU::SReg_32RegClass,
2225 AMDGPU::AGPR_32RegClass } ) {
2226 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC))
2229 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16,
2230 &AMDGPU::VGPR_32RegClass)) {
2234 return AMDGPU::NoRegister;
2237 bool SIRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
2239 case AMDGPU::SGPR_NULL:
2240 case AMDGPU::SRC_SHARED_BASE:
2241 case AMDGPU::SRC_PRIVATE_BASE:
2242 case AMDGPU::SRC_SHARED_LIMIT:
2243 case AMDGPU::SRC_PRIVATE_LIMIT:
2251 SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const {
2252 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
2253 ST.getMaxNumSGPRs(MF) / 4);
2257 SIRegisterInfo::getAllSGPR64(const MachineFunction &MF) const {
2258 return makeArrayRef(AMDGPU::SGPR_64RegClass.begin(),
2259 ST.getMaxNumSGPRs(MF) / 2);
2263 SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const {
2264 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));