1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// SI implementation of the TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
15 #include "AMDGPURegisterBankInfo.h"
16 #include "GCNSubtarget.h"
17 #include "MCTargetDesc/AMDGPUInstPrinter.h"
18 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "SIRegisterInfo.h"
21 #include "llvm/CodeGen/LiveIntervals.h"
22 #include "llvm/CodeGen/LivePhysRegs.h"
23 #include "llvm/CodeGen/MachineDominators.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
29 #define GET_REGINFO_TARGET_DESC
30 #include "AMDGPUGenRegisterInfo.inc"
32 static cl::opt<bool> EnableSpillSGPRToVGPR(
33 "amdgpu-spill-sgpr-to-vgpr",
34 cl::desc("Enable spilling VGPRs to SGPRs"),
38 std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts;
39 std::array<std::array<uint16_t, 32>, 9> SIRegisterInfo::SubRegFromChannelTable;
41 // Map numbers of DWORDs to indexes in SubRegFromChannelTable.
42 // Valid indexes are shifted 1, such that a 0 mapping means unsupported.
43 // e.g. for 8 DWORDs (256-bit), SubRegFromChannelTableWidthMap[8] = 8,
44 // meaning index 7 in SubRegFromChannelTable.
45 static const std::array<unsigned, 17> SubRegFromChannelTableWidthMap = {
46 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 9};
50 // A temporary struct to spill SGPRs.
51 // This is mostly to spill SGPRs to memory. Spilling SGPRs into VGPR lanes emits
52 // just v_writelane and v_readlane.
54 // When spilling to memory, the SGPRs are written into VGPR lanes and the VGPR
55 // is saved to scratch (or the other way around for loads).
56 // For this, a VGPR is required where the needed lanes can be clobbered. The
57 // RegScavenger can provide a VGPR where currently active lanes can be
58 // clobbered, but we still need to save inactive lanes.
59 // The high-level steps are:
60 // - Try to scavenge SGPR(s) to save exec
61 // - Try to scavenge VGPR
62 // - Save needed, all or inactive lanes of a TmpVGPR
63 // - Spill/Restore SGPRs using TmpVGPR
66 // To save all lanes of TmpVGPR, exec needs to be saved and modified. If we
67 // cannot scavenge temporary SGPRs to save exec, we use the following code:
68 // buffer_store_dword TmpVGPR ; only if active lanes need to be saved
70 // buffer_store_dword TmpVGPR ; save inactive lanes
72 struct SGPRSpillBuilder {
81 MachineBasicBlock::iterator MI;
82 ArrayRef<int16_t> SplitParts;
87 /* When spilling to stack */
88 // The SGPRs are written into this VGPR, which is then written to scratch
89 // (or vice versa for loads).
90 Register TmpVGPR = AMDGPU::NoRegister;
91 // Temporary spill slot to save TmpVGPR to.
93 // If TmpVGPR is live before the spill or if it is scavenged.
94 bool TmpVGPRLive = false;
95 // Scavenged SGPR to save EXEC.
96 Register SavedExecReg = AMDGPU::NoRegister;
97 // Stack index to write the SGPRs to.
102 MachineBasicBlock *MBB;
104 SIMachineFunctionInfo &MFI;
105 const SIInstrInfo &TII;
106 const SIRegisterInfo &TRI;
112 SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII,
113 bool IsWave32, MachineBasicBlock::iterator MI, int Index,
115 : SGPRSpillBuilder(TRI, TII, IsWave32, MI, MI->getOperand(0).getReg(),
116 MI->getOperand(0).isKill(), Index, RS) {}
118 SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII,
119 bool IsWave32, MachineBasicBlock::iterator MI, Register Reg,
120 bool IsKill, int Index, RegScavenger *RS)
121 : SuperReg(Reg), MI(MI), IsKill(IsKill), DL(MI->getDebugLoc()),
122 Index(Index), RS(RS), MBB(MI->getParent()), MF(*MBB->getParent()),
123 MFI(*MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI),
125 const TargetRegisterClass *RC = TRI.getPhysRegBaseClass(SuperReg);
126 SplitParts = TRI.getRegSplitParts(RC, EltSize);
127 NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
130 ExecReg = AMDGPU::EXEC_LO;
131 MovOpc = AMDGPU::S_MOV_B32;
132 NotOpc = AMDGPU::S_NOT_B32;
134 ExecReg = AMDGPU::EXEC;
135 MovOpc = AMDGPU::S_MOV_B64;
136 NotOpc = AMDGPU::S_NOT_B64;
139 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
140 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
141 SuperReg != AMDGPU::EXEC && "exec should never spill");
144 PerVGPRData getPerVGPRData() {
146 Data.PerVGPR = IsWave32 ? 32 : 64;
147 Data.NumVGPRs = (NumSubRegs + (Data.PerVGPR - 1)) / Data.PerVGPR;
148 Data.VGPRLanes = (1LL << std::min(Data.PerVGPR, NumSubRegs)) - 1LL;
152 // Tries to scavenge SGPRs to save EXEC and a VGPR. Uses v0 if no VGPR is
154 // Writes these instructions if an SGPR can be scavenged:
155 // s_mov_b64 s[6:7], exec ; Save exec
156 // s_mov_b64 exec, 3 ; Wanted lanemask
157 // buffer_store_dword v1 ; Write scavenged VGPR to emergency slot
159 // Writes these instructions if no SGPR can be scavenged:
160 // buffer_store_dword v0 ; Only if no free VGPR was found
161 // s_not_b64 exec, exec
162 // buffer_store_dword v0 ; Save inactive lanes
163 // ; exec stays inverted, it is flipped back in
166 // Scavenged temporary VGPR to use. It must be scavenged once for any number
167 // of spilled subregs.
168 // FIXME: The liveness analysis is limited and does not tell if a register
169 // is in use in lanes that are currently inactive. We can never be sure if
170 // a register as actually in use in another lane, so we need to save all
171 // used lanes of the chosen VGPR.
172 assert(RS && "Cannot spill SGPR to memory without RegScavenger");
173 TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0, false);
175 // Reserve temporary stack slot
176 TmpVGPRIndex = MFI.getScavengeFI(MF.getFrameInfo(), TRI);
178 // Found a register that is dead in the currently active lanes, we only
179 // need to spill inactive lanes.
182 // Pick v0 because it doesn't make a difference.
183 TmpVGPR = AMDGPU::VGPR0;
188 // We need to inform the scavenger that this index is already in use until
189 // we're done with the custom emergency spill.
190 RS->assignRegToScavengingIndex(TmpVGPRIndex, TmpVGPR);
193 // We may end up recursively calling the scavenger, and don't want to re-use
194 // the same register.
195 RS->setRegUsed(TmpVGPR);
197 // Try to scavenge SGPRs to save exec
198 assert(!SavedExecReg && "Exec is already saved, refuse to save again");
199 const TargetRegisterClass &RC =
200 IsWave32 ? AMDGPU::SGPR_32RegClass : AMDGPU::SGPR_64RegClass;
201 RS->setRegUsed(SuperReg);
202 SavedExecReg = RS->scavengeRegister(&RC, MI, 0, false);
204 int64_t VGPRLanes = getPerVGPRData().VGPRLanes;
207 RS->setRegUsed(SavedExecReg);
208 // Set exec to needed lanes
209 BuildMI(*MBB, MI, DL, TII.get(MovOpc), SavedExecReg).addReg(ExecReg);
211 BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg).addImm(VGPRLanes);
213 I.addReg(TmpVGPR, RegState::ImplicitDefine);
214 // Spill needed lanes
215 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
217 // The modify and restore of exec clobber SCC, which we would have to save
218 // and restore. FIXME: We probably would need to reserve a register for
220 if (RS->isRegUsed(AMDGPU::SCC))
221 MI->emitError("unhandled SGPR spill to memory");
223 // Spill active lanes
225 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false,
227 // Spill inactive lanes
228 auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
230 I.addReg(TmpVGPR, RegState::ImplicitDefine);
231 I->getOperand(2).setIsDead(); // Mark SCC as dead.
232 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
236 // Writes these instructions if an SGPR can be scavenged:
237 // buffer_load_dword v1 ; Write scavenged VGPR to emergency slot
238 // s_waitcnt vmcnt(0) ; If a free VGPR was found
239 // s_mov_b64 exec, s[6:7] ; Save exec
241 // Writes these instructions if no SGPR can be scavenged:
242 // buffer_load_dword v0 ; Restore inactive lanes
243 // s_waitcnt vmcnt(0) ; If a free VGPR was found
244 // s_not_b64 exec, exec
245 // buffer_load_dword v0 ; Only if no free VGPR was found
248 // Restore used lanes
249 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
252 auto I = BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg)
253 .addReg(SavedExecReg, RegState::Kill);
254 // Add an implicit use of the load so it is not dead.
255 // FIXME This inserts an unnecessary waitcnt
257 I.addReg(TmpVGPR, RegState::ImplicitKill);
260 // Restore inactive lanes
261 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
263 auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
265 I.addReg(TmpVGPR, RegState::ImplicitKill);
266 I->getOperand(2).setIsDead(); // Mark SCC as dead.
268 // Restore active lanes
270 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true);
273 // Inform the scavenger where we're releasing our custom scavenged register.
275 MachineBasicBlock::iterator RestorePt = std::prev(MI);
276 RS->assignRegToScavengingIndex(TmpVGPRIndex, TmpVGPR, &*RestorePt);
280 // Write TmpVGPR to memory or read TmpVGPR from memory.
281 // Either using a single buffer_load/store if exec is set to the needed mask
287 void readWriteTmpVGPR(unsigned Offset, bool IsLoad) {
289 // Spill needed lanes
290 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
292 // The modify and restore of exec clobber SCC, which we would have to save
293 // and restore. FIXME: We probably would need to reserve a register for
295 if (RS->isRegUsed(AMDGPU::SCC))
296 MI->emitError("unhandled SGPR spill to memory");
298 // Spill active lanes
299 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad,
301 // Spill inactive lanes
302 auto Not0 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
303 Not0->getOperand(2).setIsDead(); // Mark SCC as dead.
304 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
305 auto Not1 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
306 Not1->getOperand(2).setIsDead(); // Mark SCC as dead.
310 void setMI(MachineBasicBlock *NewMBB, MachineBasicBlock::iterator NewMI) {
311 assert(MBB->getParent() == &MF);
319 SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
320 : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST),
321 SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) {
323 assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
324 getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&
325 (getSubRegIndexLaneMask(AMDGPU::lo16) |
326 getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==
327 getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
328 "getNumCoveredRegs() will not work with generated subreg masks!");
330 RegPressureIgnoredUnits.resize(getNumRegUnits());
331 RegPressureIgnoredUnits.set(
332 *MCRegUnitIterator(MCRegister::from(AMDGPU::M0), this));
333 for (auto Reg : AMDGPU::VGPR_HI16RegClass)
334 RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this));
336 // HACK: Until this is fully tablegen'd.
337 static llvm::once_flag InitializeRegSplitPartsFlag;
339 static auto InitializeRegSplitPartsOnce = [this]() {
340 for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) {
341 unsigned Size = getSubRegIdxSize(Idx);
344 std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1];
345 unsigned Pos = getSubRegIdxOffset(Idx);
350 unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits.
351 Vec.resize(MaxNumParts);
357 static llvm::once_flag InitializeSubRegFromChannelTableFlag;
359 static auto InitializeSubRegFromChannelTableOnce = [this]() {
360 for (auto &Row : SubRegFromChannelTable)
361 Row.fill(AMDGPU::NoSubRegister);
362 for (unsigned Idx = 1; Idx < getNumSubRegIndices(); ++Idx) {
363 unsigned Width = AMDGPUSubRegIdxRanges[Idx].Size / 32;
364 unsigned Offset = AMDGPUSubRegIdxRanges[Idx].Offset / 32;
365 assert(Width < SubRegFromChannelTableWidthMap.size());
366 Width = SubRegFromChannelTableWidthMap[Width];
369 unsigned TableIdx = Width - 1;
370 assert(TableIdx < SubRegFromChannelTable.size());
371 assert(Offset < SubRegFromChannelTable[TableIdx].size());
372 SubRegFromChannelTable[TableIdx][Offset] = Idx;
376 llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce);
377 llvm::call_once(InitializeSubRegFromChannelTableFlag,
378 InitializeSubRegFromChannelTableOnce);
381 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved,
382 MCRegister Reg) const {
383 MCRegAliasIterator R(Reg, this, true);
385 for (; R.isValid(); ++R)
389 // Forced to be here by one .inc
390 const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
391 const MachineFunction *MF) const {
392 CallingConv::ID CC = MF->getFunction().getCallingConv();
395 case CallingConv::Fast:
396 case CallingConv::Cold:
397 return ST.hasGFX90AInsts() ? CSR_AMDGPU_GFX90AInsts_SaveList
398 : CSR_AMDGPU_SaveList;
399 case CallingConv::AMDGPU_Gfx:
400 return ST.hasGFX90AInsts() ? CSR_AMDGPU_SI_Gfx_GFX90AInsts_SaveList
401 : CSR_AMDGPU_SI_Gfx_SaveList;
403 // Dummy to not crash RegisterClassInfo.
404 static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister;
405 return &NoCalleeSavedReg;
411 SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
415 const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
416 CallingConv::ID CC) const {
419 case CallingConv::Fast:
420 case CallingConv::Cold:
421 return ST.hasGFX90AInsts() ? CSR_AMDGPU_GFX90AInsts_RegMask
422 : CSR_AMDGPU_RegMask;
423 case CallingConv::AMDGPU_Gfx:
424 return ST.hasGFX90AInsts() ? CSR_AMDGPU_SI_Gfx_GFX90AInsts_RegMask
425 : CSR_AMDGPU_SI_Gfx_RegMask;
431 const uint32_t *SIRegisterInfo::getNoPreservedMask() const {
432 return CSR_AMDGPU_NoRegs_RegMask;
435 const TargetRegisterClass *
436 SIRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
437 const MachineFunction &MF) const {
438 // FIXME: Should have a helper function like getEquivalentVGPRClass to get the
439 // equivalent AV class. If used one, the verifier will crash after
440 // RegBankSelect in the GISel flow. The aligned regclasses are not fully given
441 // until Instruction selection.
442 if (ST.hasMAIInsts() && (isVGPRClass(RC) || isAGPRClass(RC))) {
443 if (RC == &AMDGPU::VGPR_32RegClass || RC == &AMDGPU::AGPR_32RegClass)
444 return &AMDGPU::AV_32RegClass;
445 if (RC == &AMDGPU::VReg_64RegClass || RC == &AMDGPU::AReg_64RegClass)
446 return &AMDGPU::AV_64RegClass;
447 if (RC == &AMDGPU::VReg_64_Align2RegClass ||
448 RC == &AMDGPU::AReg_64_Align2RegClass)
449 return &AMDGPU::AV_64_Align2RegClass;
450 if (RC == &AMDGPU::VReg_96RegClass || RC == &AMDGPU::AReg_96RegClass)
451 return &AMDGPU::AV_96RegClass;
452 if (RC == &AMDGPU::VReg_96_Align2RegClass ||
453 RC == &AMDGPU::AReg_96_Align2RegClass)
454 return &AMDGPU::AV_96_Align2RegClass;
455 if (RC == &AMDGPU::VReg_128RegClass || RC == &AMDGPU::AReg_128RegClass)
456 return &AMDGPU::AV_128RegClass;
457 if (RC == &AMDGPU::VReg_128_Align2RegClass ||
458 RC == &AMDGPU::AReg_128_Align2RegClass)
459 return &AMDGPU::AV_128_Align2RegClass;
460 if (RC == &AMDGPU::VReg_160RegClass || RC == &AMDGPU::AReg_160RegClass)
461 return &AMDGPU::AV_160RegClass;
462 if (RC == &AMDGPU::VReg_160_Align2RegClass ||
463 RC == &AMDGPU::AReg_160_Align2RegClass)
464 return &AMDGPU::AV_160_Align2RegClass;
465 if (RC == &AMDGPU::VReg_192RegClass || RC == &AMDGPU::AReg_192RegClass)
466 return &AMDGPU::AV_192RegClass;
467 if (RC == &AMDGPU::VReg_192_Align2RegClass ||
468 RC == &AMDGPU::AReg_192_Align2RegClass)
469 return &AMDGPU::AV_192_Align2RegClass;
470 if (RC == &AMDGPU::VReg_256RegClass || RC == &AMDGPU::AReg_256RegClass)
471 return &AMDGPU::AV_256RegClass;
472 if (RC == &AMDGPU::VReg_256_Align2RegClass ||
473 RC == &AMDGPU::AReg_256_Align2RegClass)
474 return &AMDGPU::AV_256_Align2RegClass;
475 if (RC == &AMDGPU::VReg_512RegClass || RC == &AMDGPU::AReg_512RegClass)
476 return &AMDGPU::AV_512RegClass;
477 if (RC == &AMDGPU::VReg_512_Align2RegClass ||
478 RC == &AMDGPU::AReg_512_Align2RegClass)
479 return &AMDGPU::AV_512_Align2RegClass;
480 if (RC == &AMDGPU::VReg_1024RegClass || RC == &AMDGPU::AReg_1024RegClass)
481 return &AMDGPU::AV_1024RegClass;
482 if (RC == &AMDGPU::VReg_1024_Align2RegClass ||
483 RC == &AMDGPU::AReg_1024_Align2RegClass)
484 return &AMDGPU::AV_1024_Align2RegClass;
487 return TargetRegisterInfo::getLargestLegalSuperClass(RC, MF);
490 Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
491 const SIFrameLowering *TFI = ST.getFrameLowering();
492 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
493 // During ISel lowering we always reserve the stack pointer in entry
494 // functions, but never actually want to reference it when accessing our own
495 // frame. If we need a frame pointer we use it, but otherwise we can just use
496 // an immediate "0" which we represent by returning NoRegister.
497 if (FuncInfo->isEntryFunction()) {
498 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register();
500 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg()
501 : FuncInfo->getStackPtrOffsetReg();
504 bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
505 // When we need stack realignment, we can't reference off of the
506 // stack pointer, so we reserve a base pointer.
507 const MachineFrameInfo &MFI = MF.getFrameInfo();
508 return MFI.getNumFixedObjects() && shouldRealignStack(MF);
511 Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; }
513 const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const {
514 return AMDGPU_AllVGPRs_RegMask;
517 const uint32_t *SIRegisterInfo::getAllAGPRRegMask() const {
518 return AMDGPU_AllAGPRs_RegMask;
521 const uint32_t *SIRegisterInfo::getAllVectorRegMask() const {
522 return AMDGPU_AllVectorRegs_RegMask;
525 const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const {
526 return AMDGPU_AllAllocatableSRegs_RegMask;
529 unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel,
531 assert(NumRegs < SubRegFromChannelTableWidthMap.size());
532 unsigned NumRegIndex = SubRegFromChannelTableWidthMap[NumRegs];
533 assert(NumRegIndex && "Not implemented");
534 assert(Channel < SubRegFromChannelTable[NumRegIndex - 1].size());
535 return SubRegFromChannelTable[NumRegIndex - 1][Channel];
538 MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg(
539 const MachineFunction &MF) const {
540 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
541 MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
542 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass);
545 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
546 BitVector Reserved(getNumRegs());
547 Reserved.set(AMDGPU::MODE);
549 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
551 // Reserve special purpose registers.
553 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
554 // this seems likely to result in bugs, so I'm marking them as reserved.
555 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
556 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
558 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
559 reserveRegisterTuples(Reserved, AMDGPU::M0);
561 // Reserve src_vccz, src_execz, src_scc.
562 reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ);
563 reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ);
564 reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC);
566 // Reserve the memory aperture registers
567 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
568 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
569 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
570 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
572 // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
573 reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID);
575 // Reserve xnack_mask registers - support is not implemented in Codegen.
576 reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK);
578 // Reserve lds_direct register - support is not implemented in Codegen.
579 reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT);
581 // Reserve Trap Handler registers - support is not implemented in Codegen.
582 reserveRegisterTuples(Reserved, AMDGPU::TBA);
583 reserveRegisterTuples(Reserved, AMDGPU::TMA);
584 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
585 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
586 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
587 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
588 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
589 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
590 reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13);
591 reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15);
593 // Reserve null register - it shall never be allocated
594 reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL64);
596 // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
597 // will result in bugs.
599 Reserved.set(AMDGPU::VCC);
600 Reserved.set(AMDGPU::VCC_HI);
605 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
606 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
607 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
608 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
609 reserveRegisterTuples(Reserved, Reg);
612 for (auto Reg : AMDGPU::SReg_32RegClass) {
613 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
614 Register Low = getSubReg(Reg, AMDGPU::lo16);
615 // This is to prevent BB vcc liveness errors.
616 if (!AMDGPU::SGPR_LO16RegClass.contains(Low))
620 Register ScratchRSrcReg = MFI->getScratchRSrcReg();
621 if (ScratchRSrcReg != AMDGPU::NoRegister) {
622 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we
624 // TODO: May need to reserve a VGPR if doing LDS spilling.
625 reserveRegisterTuples(Reserved, ScratchRSrcReg);
628 // We have to assume the SP is needed in case there are calls in the function,
629 // which is detected after the function is lowered. If we aren't really going
630 // to need SP, don't bother reserving it.
631 MCRegister StackPtrReg = MFI->getStackPtrOffsetReg();
633 reserveRegisterTuples(Reserved, StackPtrReg);
634 assert(!isSubRegister(ScratchRSrcReg, StackPtrReg));
637 MCRegister FrameReg = MFI->getFrameOffsetReg();
639 reserveRegisterTuples(Reserved, FrameReg);
640 assert(!isSubRegister(ScratchRSrcReg, FrameReg));
643 if (hasBasePointer(MF)) {
644 MCRegister BasePtrReg = getBaseRegister();
645 reserveRegisterTuples(Reserved, BasePtrReg);
646 assert(!isSubRegister(ScratchRSrcReg, BasePtrReg));
649 // Reserve VGPRs/AGPRs.
651 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
652 unsigned MaxNumAGPRs = MaxNumVGPRs;
653 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
655 // Reserve all the AGPRs if there are no instructions to use it.
656 if (!ST.hasMAIInsts()) {
657 for (unsigned i = 0; i < MaxNumAGPRs; ++i) {
658 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
659 reserveRegisterTuples(Reserved, Reg);
663 for (auto Reg : AMDGPU::AGPR_32RegClass) {
664 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
667 // On GFX90A, the number of VGPRs and AGPRs need not be equal. Theoretically,
668 // a wave may have up to 512 total vector registers combining together both
669 // VGPRs and AGPRs. Hence, in an entry function without calls and without
670 // AGPRs used within it, it is possible to use the whole vector register
673 // TODO: it shall be possible to estimate maximum AGPR/VGPR pressure and split
674 // register file accordingly.
675 if (ST.hasGFX90AInsts()) {
676 if (MFI->usesAGPRs(MF)) {
678 MaxNumAGPRs = MaxNumVGPRs;
680 if (MaxNumVGPRs > TotalNumVGPRs) {
681 MaxNumAGPRs = MaxNumVGPRs - TotalNumVGPRs;
682 MaxNumVGPRs = TotalNumVGPRs;
688 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
689 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
690 reserveRegisterTuples(Reserved, Reg);
693 for (unsigned i = MaxNumAGPRs; i < TotalNumVGPRs; ++i) {
694 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
695 reserveRegisterTuples(Reserved, Reg);
698 // On GFX908, in order to guarantee copying between AGPRs, we need a scratch
699 // VGPR available at all times.
700 if (ST.hasMAIInsts() && !ST.hasGFX90AInsts()) {
701 reserveRegisterTuples(Reserved, MFI->getVGPRForAGPRCopy());
704 for (Register Reg : MFI->getWWMReservedRegs())
705 reserveRegisterTuples(Reserved, Reg);
707 // FIXME: Stop using reserved registers for this.
708 for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs())
709 reserveRegisterTuples(Reserved, Reg);
711 for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs())
712 reserveRegisterTuples(Reserved, Reg);
714 for (auto Reg : MFI->getSGPRSpillVGPRs())
715 reserveRegisterTuples(Reserved, Reg);
720 bool SIRegisterInfo::isAsmClobberable(const MachineFunction &MF,
721 MCRegister PhysReg) const {
722 return !MF.getRegInfo().isReserved(PhysReg);
725 bool SIRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
726 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
727 // On entry, the base address is 0, so it can't possibly need any more
730 // FIXME: Should be able to specify the entry frame alignment per calling
731 // convention instead.
732 if (Info->isEntryFunction())
735 return TargetRegisterInfo::shouldRealignStack(MF);
738 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
739 const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>();
740 if (Info->isEntryFunction()) {
741 const MachineFrameInfo &MFI = Fn.getFrameInfo();
742 return MFI.hasStackObjects() || MFI.hasCalls();
745 // May need scavenger for dealing with callee saved registers.
749 bool SIRegisterInfo::requiresFrameIndexScavenging(
750 const MachineFunction &MF) const {
751 // Do not use frame virtual registers. They used to be used for SGPRs, but
752 // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the
753 // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a
758 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
759 const MachineFunction &MF) const {
760 const MachineFrameInfo &MFI = MF.getFrameInfo();
761 return MFI.hasStackObjects();
764 bool SIRegisterInfo::requiresVirtualBaseRegisters(
765 const MachineFunction &) const {
766 // There are no special dedicated stack or frame pointers.
770 int64_t SIRegisterInfo::getScratchInstrOffset(const MachineInstr *MI) const {
771 assert(SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI));
773 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
774 AMDGPU::OpName::offset);
775 return MI->getOperand(OffIdx).getImm();
778 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
780 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
783 assert((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
784 AMDGPU::OpName::vaddr) ||
785 (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
786 AMDGPU::OpName::saddr))) &&
787 "Should never see frame index on non-address operand");
789 return getScratchInstrOffset(MI);
792 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
793 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
796 int64_t FullOffset = Offset + getScratchInstrOffset(MI);
798 if (SIInstrInfo::isMUBUF(*MI))
799 return !SIInstrInfo::isLegalMUBUFImmOffset(FullOffset);
801 const SIInstrInfo *TII = ST.getInstrInfo();
802 return !TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS,
803 SIInstrFlags::FlatScratch);
806 Register SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
808 int64_t Offset) const {
809 MachineBasicBlock::iterator Ins = MBB->begin();
810 DebugLoc DL; // Defaults to "unknown"
812 if (Ins != MBB->end())
813 DL = Ins->getDebugLoc();
815 MachineFunction *MF = MBB->getParent();
816 const SIInstrInfo *TII = ST.getInstrInfo();
817 MachineRegisterInfo &MRI = MF->getRegInfo();
818 unsigned MovOpc = ST.enableFlatScratch() ? AMDGPU::S_MOV_B32
819 : AMDGPU::V_MOV_B32_e32;
821 Register BaseReg = MRI.createVirtualRegister(
822 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XEXEC_HIRegClass
823 : &AMDGPU::VGPR_32RegClass);
826 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), BaseReg)
827 .addFrameIndex(FrameIdx);
831 Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
833 Register FIReg = MRI.createVirtualRegister(
834 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XM0RegClass
835 : &AMDGPU::VGPR_32RegClass);
837 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
839 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), FIReg)
840 .addFrameIndex(FrameIdx);
842 if (ST.enableFlatScratch() ) {
843 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_ADD_I32), BaseReg)
844 .addReg(OffsetReg, RegState::Kill)
849 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
850 .addReg(OffsetReg, RegState::Kill)
852 .addImm(0); // clamp bit
857 void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
858 int64_t Offset) const {
859 const SIInstrInfo *TII = ST.getInstrInfo();
860 bool IsFlat = TII->isFLATScratch(MI);
863 // FIXME: Is it possible to be storing a frame index to itself?
865 for (const MachineOperand &MO: MI.operands()) {
868 llvm_unreachable("should not see multiple frame indices");
875 MachineOperand *FIOp =
876 TII->getNamedOperand(MI, IsFlat ? AMDGPU::OpName::saddr
877 : AMDGPU::OpName::vaddr);
879 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
880 int64_t NewOffset = OffsetOp->getImm() + Offset;
882 assert(FIOp && FIOp->isFI() && "frame index must be address operand");
883 assert(TII->isMUBUF(MI) || TII->isFLATScratch(MI));
886 assert(TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
887 SIInstrFlags::FlatScratch) &&
888 "offset should be legal");
889 FIOp->ChangeToRegister(BaseReg, false);
890 OffsetOp->setImm(NewOffset);
895 MachineOperand *SOffset = TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
896 assert(SOffset->isImm() && SOffset->getImm() == 0);
899 assert(SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&
900 "offset should be legal");
902 FIOp->ChangeToRegister(BaseReg, false);
903 OffsetOp->setImm(NewOffset);
906 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
908 int64_t Offset) const {
909 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
912 int64_t NewOffset = Offset + getScratchInstrOffset(MI);
914 if (SIInstrInfo::isMUBUF(*MI))
915 return SIInstrInfo::isLegalMUBUFImmOffset(NewOffset);
917 const SIInstrInfo *TII = ST.getInstrInfo();
918 return TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
919 SIInstrFlags::FlatScratch);
922 const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
923 const MachineFunction &MF, unsigned Kind) const {
924 // This is inaccurate. It depends on the instruction and address space. The
925 // only place where we should hit this is for dealing with frame indexes /
926 // private accesses, so this is correct in that case.
927 return &AMDGPU::VGPR_32RegClass;
930 const TargetRegisterClass *
931 SIRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
932 if (isAGPRClass(RC) && !ST.hasGFX90AInsts())
933 return getEquivalentVGPRClass(RC);
934 if (RC == &AMDGPU::SCC_CLASSRegClass)
935 return getWaveMaskRegClass();
940 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
943 case AMDGPU::SI_SPILL_S1024_SAVE:
944 case AMDGPU::SI_SPILL_S1024_RESTORE:
945 case AMDGPU::SI_SPILL_V1024_SAVE:
946 case AMDGPU::SI_SPILL_V1024_RESTORE:
947 case AMDGPU::SI_SPILL_A1024_SAVE:
948 case AMDGPU::SI_SPILL_A1024_RESTORE:
949 case AMDGPU::SI_SPILL_AV1024_SAVE:
950 case AMDGPU::SI_SPILL_AV1024_RESTORE:
952 case AMDGPU::SI_SPILL_S512_SAVE:
953 case AMDGPU::SI_SPILL_S512_RESTORE:
954 case AMDGPU::SI_SPILL_V512_SAVE:
955 case AMDGPU::SI_SPILL_V512_RESTORE:
956 case AMDGPU::SI_SPILL_A512_SAVE:
957 case AMDGPU::SI_SPILL_A512_RESTORE:
958 case AMDGPU::SI_SPILL_AV512_SAVE:
959 case AMDGPU::SI_SPILL_AV512_RESTORE:
961 case AMDGPU::SI_SPILL_S384_SAVE:
962 case AMDGPU::SI_SPILL_S384_RESTORE:
963 case AMDGPU::SI_SPILL_V384_SAVE:
964 case AMDGPU::SI_SPILL_V384_RESTORE:
965 case AMDGPU::SI_SPILL_A384_SAVE:
966 case AMDGPU::SI_SPILL_A384_RESTORE:
967 case AMDGPU::SI_SPILL_AV384_SAVE:
968 case AMDGPU::SI_SPILL_AV384_RESTORE:
970 case AMDGPU::SI_SPILL_S352_SAVE:
971 case AMDGPU::SI_SPILL_S352_RESTORE:
972 case AMDGPU::SI_SPILL_V352_SAVE:
973 case AMDGPU::SI_SPILL_V352_RESTORE:
974 case AMDGPU::SI_SPILL_A352_SAVE:
975 case AMDGPU::SI_SPILL_A352_RESTORE:
976 case AMDGPU::SI_SPILL_AV352_SAVE:
977 case AMDGPU::SI_SPILL_AV352_RESTORE:
979 case AMDGPU::SI_SPILL_S320_SAVE:
980 case AMDGPU::SI_SPILL_S320_RESTORE:
981 case AMDGPU::SI_SPILL_V320_SAVE:
982 case AMDGPU::SI_SPILL_V320_RESTORE:
983 case AMDGPU::SI_SPILL_A320_SAVE:
984 case AMDGPU::SI_SPILL_A320_RESTORE:
985 case AMDGPU::SI_SPILL_AV320_SAVE:
986 case AMDGPU::SI_SPILL_AV320_RESTORE:
988 case AMDGPU::SI_SPILL_S288_SAVE:
989 case AMDGPU::SI_SPILL_S288_RESTORE:
990 case AMDGPU::SI_SPILL_V288_SAVE:
991 case AMDGPU::SI_SPILL_V288_RESTORE:
992 case AMDGPU::SI_SPILL_A288_SAVE:
993 case AMDGPU::SI_SPILL_A288_RESTORE:
994 case AMDGPU::SI_SPILL_AV288_SAVE:
995 case AMDGPU::SI_SPILL_AV288_RESTORE:
997 case AMDGPU::SI_SPILL_S256_SAVE:
998 case AMDGPU::SI_SPILL_S256_RESTORE:
999 case AMDGPU::SI_SPILL_V256_SAVE:
1000 case AMDGPU::SI_SPILL_V256_RESTORE:
1001 case AMDGPU::SI_SPILL_A256_SAVE:
1002 case AMDGPU::SI_SPILL_A256_RESTORE:
1003 case AMDGPU::SI_SPILL_AV256_SAVE:
1004 case AMDGPU::SI_SPILL_AV256_RESTORE:
1006 case AMDGPU::SI_SPILL_S224_SAVE:
1007 case AMDGPU::SI_SPILL_S224_RESTORE:
1008 case AMDGPU::SI_SPILL_V224_SAVE:
1009 case AMDGPU::SI_SPILL_V224_RESTORE:
1010 case AMDGPU::SI_SPILL_A224_SAVE:
1011 case AMDGPU::SI_SPILL_A224_RESTORE:
1012 case AMDGPU::SI_SPILL_AV224_SAVE:
1013 case AMDGPU::SI_SPILL_AV224_RESTORE:
1015 case AMDGPU::SI_SPILL_S192_SAVE:
1016 case AMDGPU::SI_SPILL_S192_RESTORE:
1017 case AMDGPU::SI_SPILL_V192_SAVE:
1018 case AMDGPU::SI_SPILL_V192_RESTORE:
1019 case AMDGPU::SI_SPILL_A192_SAVE:
1020 case AMDGPU::SI_SPILL_A192_RESTORE:
1021 case AMDGPU::SI_SPILL_AV192_SAVE:
1022 case AMDGPU::SI_SPILL_AV192_RESTORE:
1024 case AMDGPU::SI_SPILL_S160_SAVE:
1025 case AMDGPU::SI_SPILL_S160_RESTORE:
1026 case AMDGPU::SI_SPILL_V160_SAVE:
1027 case AMDGPU::SI_SPILL_V160_RESTORE:
1028 case AMDGPU::SI_SPILL_A160_SAVE:
1029 case AMDGPU::SI_SPILL_A160_RESTORE:
1030 case AMDGPU::SI_SPILL_AV160_SAVE:
1031 case AMDGPU::SI_SPILL_AV160_RESTORE:
1033 case AMDGPU::SI_SPILL_S128_SAVE:
1034 case AMDGPU::SI_SPILL_S128_RESTORE:
1035 case AMDGPU::SI_SPILL_V128_SAVE:
1036 case AMDGPU::SI_SPILL_V128_RESTORE:
1037 case AMDGPU::SI_SPILL_A128_SAVE:
1038 case AMDGPU::SI_SPILL_A128_RESTORE:
1039 case AMDGPU::SI_SPILL_AV128_SAVE:
1040 case AMDGPU::SI_SPILL_AV128_RESTORE:
1042 case AMDGPU::SI_SPILL_S96_SAVE:
1043 case AMDGPU::SI_SPILL_S96_RESTORE:
1044 case AMDGPU::SI_SPILL_V96_SAVE:
1045 case AMDGPU::SI_SPILL_V96_RESTORE:
1046 case AMDGPU::SI_SPILL_A96_SAVE:
1047 case AMDGPU::SI_SPILL_A96_RESTORE:
1048 case AMDGPU::SI_SPILL_AV96_SAVE:
1049 case AMDGPU::SI_SPILL_AV96_RESTORE:
1051 case AMDGPU::SI_SPILL_S64_SAVE:
1052 case AMDGPU::SI_SPILL_S64_RESTORE:
1053 case AMDGPU::SI_SPILL_V64_SAVE:
1054 case AMDGPU::SI_SPILL_V64_RESTORE:
1055 case AMDGPU::SI_SPILL_A64_SAVE:
1056 case AMDGPU::SI_SPILL_A64_RESTORE:
1057 case AMDGPU::SI_SPILL_AV64_SAVE:
1058 case AMDGPU::SI_SPILL_AV64_RESTORE:
1060 case AMDGPU::SI_SPILL_S32_SAVE:
1061 case AMDGPU::SI_SPILL_S32_RESTORE:
1062 case AMDGPU::SI_SPILL_V32_SAVE:
1063 case AMDGPU::SI_SPILL_V32_RESTORE:
1064 case AMDGPU::SI_SPILL_A32_SAVE:
1065 case AMDGPU::SI_SPILL_A32_RESTORE:
1066 case AMDGPU::SI_SPILL_AV32_SAVE:
1067 case AMDGPU::SI_SPILL_AV32_RESTORE:
1069 default: llvm_unreachable("Invalid spill opcode");
1073 static int getOffsetMUBUFStore(unsigned Opc) {
1075 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
1076 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1077 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
1078 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
1079 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
1080 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
1081 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
1082 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
1083 case AMDGPU::BUFFER_STORE_DWORDX3_OFFEN:
1084 return AMDGPU::BUFFER_STORE_DWORDX3_OFFSET;
1085 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
1086 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
1087 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN:
1088 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET;
1089 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN:
1090 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET;
1096 static int getOffsetMUBUFLoad(unsigned Opc) {
1098 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
1099 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1100 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
1101 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
1102 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
1103 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
1104 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
1105 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
1106 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
1107 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
1108 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
1109 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
1110 case AMDGPU::BUFFER_LOAD_DWORDX3_OFFEN:
1111 return AMDGPU::BUFFER_LOAD_DWORDX3_OFFSET;
1112 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
1113 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
1114 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN:
1115 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET;
1116 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN:
1117 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET;
1118 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN:
1119 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET;
1120 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN:
1121 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET;
1122 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN:
1123 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET;
1124 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN:
1125 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET;
1131 static int getOffenMUBUFStore(unsigned Opc) {
1133 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
1134 return AMDGPU::BUFFER_STORE_DWORD_OFFEN;
1135 case AMDGPU::BUFFER_STORE_BYTE_OFFSET:
1136 return AMDGPU::BUFFER_STORE_BYTE_OFFEN;
1137 case AMDGPU::BUFFER_STORE_SHORT_OFFSET:
1138 return AMDGPU::BUFFER_STORE_SHORT_OFFEN;
1139 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET:
1140 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN;
1141 case AMDGPU::BUFFER_STORE_DWORDX3_OFFSET:
1142 return AMDGPU::BUFFER_STORE_DWORDX3_OFFEN;
1143 case AMDGPU::BUFFER_STORE_DWORDX4_OFFSET:
1144 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN;
1145 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET:
1146 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN;
1147 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET:
1148 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN;
1154 static int getOffenMUBUFLoad(unsigned Opc) {
1156 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
1157 return AMDGPU::BUFFER_LOAD_DWORD_OFFEN;
1158 case AMDGPU::BUFFER_LOAD_UBYTE_OFFSET:
1159 return AMDGPU::BUFFER_LOAD_UBYTE_OFFEN;
1160 case AMDGPU::BUFFER_LOAD_SBYTE_OFFSET:
1161 return AMDGPU::BUFFER_LOAD_SBYTE_OFFEN;
1162 case AMDGPU::BUFFER_LOAD_USHORT_OFFSET:
1163 return AMDGPU::BUFFER_LOAD_USHORT_OFFEN;
1164 case AMDGPU::BUFFER_LOAD_SSHORT_OFFSET:
1165 return AMDGPU::BUFFER_LOAD_SSHORT_OFFEN;
1166 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET:
1167 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN;
1168 case AMDGPU::BUFFER_LOAD_DWORDX3_OFFSET:
1169 return AMDGPU::BUFFER_LOAD_DWORDX3_OFFEN;
1170 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET:
1171 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN;
1172 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET:
1173 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN;
1174 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET:
1175 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN;
1176 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET:
1177 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN;
1178 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET:
1179 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN;
1180 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET:
1181 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN;
1182 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET:
1183 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN;
1189 static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST,
1190 MachineBasicBlock &MBB,
1191 MachineBasicBlock::iterator MI,
1192 int Index, unsigned Lane,
1193 unsigned ValueReg, bool IsKill) {
1194 MachineFunction *MF = MBB.getParent();
1195 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1196 const SIInstrInfo *TII = ST.getInstrInfo();
1198 MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane);
1200 if (Reg == AMDGPU::NoRegister)
1201 return MachineInstrBuilder();
1203 bool IsStore = MI->mayStore();
1204 MachineRegisterInfo &MRI = MF->getRegInfo();
1205 auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
1207 unsigned Dst = IsStore ? Reg : ValueReg;
1208 unsigned Src = IsStore ? ValueReg : Reg;
1209 bool IsVGPR = TRI->isVGPR(MRI, Reg);
1210 DebugLoc DL = MI->getDebugLoc();
1211 if (IsVGPR == TRI->isVGPR(MRI, ValueReg)) {
1212 // Spiller during regalloc may restore a spilled register to its superclass.
1213 // It could result in AGPR spills restored to VGPRs or the other way around,
1214 // making the src and dst with identical regclasses at this point. It just
1215 // needs a copy in such cases.
1216 auto CopyMIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), Dst)
1217 .addReg(Src, getKillRegState(IsKill));
1218 CopyMIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1221 unsigned Opc = (IsStore ^ IsVGPR) ? AMDGPU::V_ACCVGPR_WRITE_B32_e64
1222 : AMDGPU::V_ACCVGPR_READ_B32_e64;
1224 auto MIB = BuildMI(MBB, MI, DL, TII->get(Opc), Dst)
1225 .addReg(Src, getKillRegState(IsKill));
1226 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1230 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
1231 // need to handle the case where an SGPR may need to be spilled while spilling.
1232 static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST,
1233 MachineFrameInfo &MFI,
1234 MachineBasicBlock::iterator MI,
1237 const SIInstrInfo *TII = ST.getInstrInfo();
1238 MachineBasicBlock *MBB = MI->getParent();
1239 const DebugLoc &DL = MI->getDebugLoc();
1240 bool IsStore = MI->mayStore();
1242 unsigned Opc = MI->getOpcode();
1243 int LoadStoreOp = IsStore ?
1244 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
1245 if (LoadStoreOp == -1)
1248 const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
1249 if (spillVGPRtoAGPR(ST, *MBB, MI, Index, 0, Reg->getReg(), false).getInstr())
1252 MachineInstrBuilder NewMI =
1253 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
1255 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
1256 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
1262 const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
1263 AMDGPU::OpName::vdata_in);
1265 NewMI.add(*VDataIn);
1269 static unsigned getFlatScratchSpillOpcode(const SIInstrInfo *TII,
1270 unsigned LoadStoreOp,
1272 bool IsStore = TII->get(LoadStoreOp).mayStore();
1273 bool HasVAddr = AMDGPU::hasNamedOperand(LoadStoreOp, AMDGPU::OpName::vaddr);
1275 !HasVAddr && !AMDGPU::hasNamedOperand(LoadStoreOp, AMDGPU::OpName::saddr);
1279 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1280 : AMDGPU::SCRATCH_LOAD_DWORD_SADDR;
1283 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX2_SADDR
1284 : AMDGPU::SCRATCH_LOAD_DWORDX2_SADDR;
1287 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX3_SADDR
1288 : AMDGPU::SCRATCH_LOAD_DWORDX3_SADDR;
1291 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX4_SADDR
1292 : AMDGPU::SCRATCH_LOAD_DWORDX4_SADDR;
1295 llvm_unreachable("Unexpected spill load/store size!");
1299 LoadStoreOp = AMDGPU::getFlatScratchInstSVfromSS(LoadStoreOp);
1301 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1306 void SIRegisterInfo::buildSpillLoadStore(
1307 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL,
1308 unsigned LoadStoreOp, int Index, Register ValueReg, bool IsKill,
1309 MCRegister ScratchOffsetReg, int64_t InstOffset, MachineMemOperand *MMO,
1310 RegScavenger *RS, LivePhysRegs *LiveRegs) const {
1311 assert((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both");
1313 MachineFunction *MF = MBB.getParent();
1314 const SIInstrInfo *TII = ST.getInstrInfo();
1315 const MachineFrameInfo &MFI = MF->getFrameInfo();
1316 const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
1318 const MCInstrDesc *Desc = &TII->get(LoadStoreOp);
1319 bool IsStore = Desc->mayStore();
1320 bool IsFlat = TII->isFLATScratch(LoadStoreOp);
1322 bool CanClobberSCC = false;
1323 bool Scavenged = false;
1324 MCRegister SOffset = ScratchOffsetReg;
1326 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
1327 // On gfx90a+ AGPR is a regular VGPR acceptable for loads and stores.
1328 const bool IsAGPR = !ST.hasGFX90AInsts() && isAGPRClass(RC);
1329 const unsigned RegWidth = AMDGPU::getRegBitWidth(RC->getID()) / 8;
1331 // Always use 4 byte operations for AGPRs because we need to scavenge
1332 // a temporary VGPR.
1333 unsigned EltSize = (IsFlat && !IsAGPR) ? std::min(RegWidth, 16u) : 4u;
1334 unsigned NumSubRegs = RegWidth / EltSize;
1335 unsigned Size = NumSubRegs * EltSize;
1336 unsigned RemSize = RegWidth - Size;
1337 unsigned NumRemSubRegs = RemSize ? 1 : 0;
1338 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
1339 int64_t MaterializedOffset = Offset;
1341 int64_t MaxOffset = Offset + Size + RemSize - EltSize;
1342 int64_t ScratchOffsetRegDelta = 0;
1344 if (IsFlat && EltSize > 4) {
1345 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1346 Desc = &TII->get(LoadStoreOp);
1349 Align Alignment = MFI.getObjectAlign(Index);
1350 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
1352 assert((IsFlat || ((Offset % EltSize) == 0)) &&
1353 "unexpected VGPR spill offset");
1355 // Track a VGPR to use for a constant offset we need to materialize.
1356 Register TmpOffsetVGPR;
1358 // Track a VGPR to use as an intermediate value.
1359 Register TmpIntermediateVGPR;
1360 bool UseVGPROffset = false;
1362 // Materialize a VGPR offset required for the given SGPR/VGPR/Immediate
1364 auto MaterializeVOffset = [&](Register SGPRBase, Register TmpVGPR,
1366 // We are using a VGPR offset
1367 if (IsFlat && SGPRBase) {
1368 // We only have 1 VGPR offset, or 1 SGPR offset. We don't have a free
1369 // SGPR, so perform the add as vector.
1370 // We don't need a base SGPR in the kernel.
1372 if (ST.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) >= 2) {
1373 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ADD_U32_e64), TmpVGPR)
1376 .addImm(0); // clamp
1378 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR)
1380 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ADD_U32_e32), TmpVGPR)
1382 .addReg(TmpOffsetVGPR);
1385 assert(TmpOffsetVGPR);
1386 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR)
1391 bool IsOffsetLegal =
1392 IsFlat ? TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS,
1393 SIInstrFlags::FlatScratch)
1394 : SIInstrInfo::isLegalMUBUFImmOffset(MaxOffset);
1395 if (!IsOffsetLegal || (IsFlat && !SOffset && !ST.hasFlatScratchSTMode())) {
1396 SOffset = MCRegister();
1398 // We don't have access to the register scavenger if this function is called
1399 // during PEI::scavengeFrameVirtualRegs() so use LiveRegs in this case.
1400 // TODO: Clobbering SCC is not necessary for scratch instructions in the
1403 SOffset = RS->scavengeRegisterBackwards(AMDGPU::SGPR_32RegClass, MI, false, 0, false);
1405 // Piggy back on the liveness scan we just did see if SCC is dead.
1406 CanClobberSCC = !RS->isRegUsed(AMDGPU::SCC);
1407 } else if (LiveRegs) {
1408 CanClobberSCC = !LiveRegs->contains(AMDGPU::SCC);
1409 for (MCRegister Reg : AMDGPU::SGPR_32RegClass) {
1410 if (LiveRegs->available(MF->getRegInfo(), Reg)) {
1417 if (ScratchOffsetReg != AMDGPU::NoRegister && !CanClobberSCC)
1418 SOffset = Register();
1421 UseVGPROffset = true;
1424 TmpOffsetVGPR = RS->scavengeRegisterBackwards(AMDGPU::VGPR_32RegClass, MI, false, 0);
1427 for (MCRegister Reg : AMDGPU::VGPR_32RegClass) {
1428 if (LiveRegs->available(MF->getRegInfo(), Reg)) {
1429 TmpOffsetVGPR = Reg;
1435 assert(TmpOffsetVGPR);
1436 } else if (!SOffset && CanClobberSCC) {
1437 // There are no free SGPRs, and since we are in the process of spilling
1438 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
1439 // on SI/CI and on VI it is true until we implement spilling using scalar
1440 // stores), we have no way to free up an SGPR. Our solution here is to
1441 // add the offset directly to the ScratchOffset or StackPtrOffset
1442 // register, and then subtract the offset after the spill to return the
1443 // register to it's original value.
1445 // TODO: If we don't have to do an emergency stack slot spill, converting
1446 // to use the VGPR offset is fewer instructions.
1447 if (!ScratchOffsetReg)
1448 ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg();
1449 SOffset = ScratchOffsetReg;
1450 ScratchOffsetRegDelta = Offset;
1455 // We currently only support spilling VGPRs to EltSize boundaries, meaning
1456 // we can simplify the adjustment of Offset here to just scale with
1458 if (!IsFlat && !UseVGPROffset)
1459 Offset *= ST.getWavefrontSize();
1461 if (!UseVGPROffset && !SOffset)
1462 report_fatal_error("could not scavenge SGPR to spill in entry function");
1464 if (UseVGPROffset) {
1465 // We are using a VGPR offset
1466 MaterializeVOffset(ScratchOffsetReg, TmpOffsetVGPR, Offset);
1467 } else if (ScratchOffsetReg == AMDGPU::NoRegister) {
1468 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset).addImm(Offset);
1470 assert(Offset != 0);
1471 auto Add = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset)
1472 .addReg(ScratchOffsetReg)
1474 Add->getOperand(3).setIsDead(); // Mark SCC as dead.
1480 if (IsFlat && SOffset == AMDGPU::NoRegister) {
1481 assert(AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0
1482 && "Unexpected vaddr for flat scratch with a FI operand");
1484 if (UseVGPROffset) {
1485 LoadStoreOp = AMDGPU::getFlatScratchInstSVfromSS(LoadStoreOp);
1487 assert(ST.hasFlatScratchSTMode());
1488 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1491 Desc = &TII->get(LoadStoreOp);
1494 for (unsigned i = 0, e = NumSubRegs + NumRemSubRegs, RegOffset = 0; i != e;
1495 ++i, RegOffset += EltSize) {
1496 if (i == NumSubRegs) {
1498 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1500 Desc = &TII->get(LoadStoreOp);
1502 if (!IsFlat && UseVGPROffset) {
1503 int NewLoadStoreOp = IsStore ? getOffenMUBUFStore(LoadStoreOp)
1504 : getOffenMUBUFLoad(LoadStoreOp);
1505 Desc = &TII->get(NewLoadStoreOp);
1508 if (UseVGPROffset && TmpOffsetVGPR == TmpIntermediateVGPR) {
1509 // If we are spilling an AGPR beyond the range of the memory instruction
1510 // offset and need to use a VGPR offset, we ideally have at least 2
1511 // scratch VGPRs. If we don't have a second free VGPR without spilling,
1512 // recycle the VGPR used for the offset which requires resetting after
1513 // each subregister.
1515 MaterializeVOffset(ScratchOffsetReg, TmpOffsetVGPR, MaterializedOffset);
1518 unsigned NumRegs = EltSize / 4;
1519 Register SubReg = e == 1
1521 : Register(getSubReg(ValueReg,
1522 getSubRegFromChannel(RegOffset / 4, NumRegs)));
1524 unsigned SOffsetRegState = 0;
1525 unsigned SrcDstRegState = getDefRegState(!IsStore);
1526 const bool IsLastSubReg = i + 1 == e;
1527 const bool IsFirstSubReg = i == 0;
1529 SOffsetRegState |= getKillRegState(Scavenged);
1530 // The last implicit use carries the "Kill" flag.
1531 SrcDstRegState |= getKillRegState(IsKill);
1534 // Make sure the whole register is defined if there are undef components by
1535 // adding an implicit def of the super-reg on the first instruction.
1536 bool NeedSuperRegDef = e > 1 && IsStore && IsFirstSubReg;
1537 bool NeedSuperRegImpOperand = e > 1;
1539 // Remaining element size to spill into memory after some parts of it
1540 // spilled into either AGPRs or VGPRs.
1541 unsigned RemEltSize = EltSize;
1543 // AGPRs to spill VGPRs and vice versa are allocated in a reverse order,
1544 // starting from the last lane. In case if a register cannot be completely
1545 // spilled into another register that will ensure its alignment does not
1546 // change. For targets with VGPR alignment requirement this is important
1547 // in case of flat scratch usage as we might get a scratch_load or
1548 // scratch_store of an unaligned register otherwise.
1549 for (int LaneS = (RegOffset + EltSize) / 4 - 1, Lane = LaneS,
1550 LaneE = RegOffset / 4;
1551 Lane >= LaneE; --Lane) {
1552 bool IsSubReg = e > 1 || EltSize > 4;
1553 Register Sub = IsSubReg
1554 ? Register(getSubReg(ValueReg, getSubRegFromChannel(Lane)))
1556 auto MIB = spillVGPRtoAGPR(ST, MBB, MI, Index, Lane, Sub, IsKill);
1557 if (!MIB.getInstr())
1559 if (NeedSuperRegDef || (IsSubReg && IsStore && Lane == LaneS && IsFirstSubReg)) {
1560 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1561 NeedSuperRegDef = false;
1563 if ((IsSubReg || NeedSuperRegImpOperand) && (IsFirstSubReg || IsLastSubReg)) {
1564 NeedSuperRegImpOperand = true;
1565 unsigned State = SrcDstRegState;
1566 if (!IsLastSubReg || (Lane != LaneE))
1567 State &= ~RegState::Kill;
1568 if (!IsFirstSubReg || (Lane != LaneS))
1569 State &= ~RegState::Define;
1570 MIB.addReg(ValueReg, RegState::Implicit | State);
1575 if (!RemEltSize) // Fully spilled into AGPRs.
1578 if (RemEltSize != EltSize) { // Partially spilled to AGPRs
1579 assert(IsFlat && EltSize > 4);
1581 unsigned NumRegs = RemEltSize / 4;
1582 SubReg = Register(getSubReg(ValueReg,
1583 getSubRegFromChannel(RegOffset / 4, NumRegs)));
1584 unsigned Opc = getFlatScratchSpillOpcode(TII, LoadStoreOp, RemEltSize);
1585 Desc = &TII->get(Opc);
1588 unsigned FinalReg = SubReg;
1591 assert(EltSize == 4);
1593 if (!TmpIntermediateVGPR) {
1594 TmpIntermediateVGPR = FuncInfo->getVGPRForAGPRCopy();
1595 assert(MF->getRegInfo().isReserved(TmpIntermediateVGPR));
1598 auto AccRead = BuildMI(MBB, MI, DL,
1599 TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64),
1600 TmpIntermediateVGPR)
1601 .addReg(SubReg, getKillRegState(IsKill));
1602 if (NeedSuperRegDef)
1603 AccRead.addReg(ValueReg, RegState::ImplicitDefine);
1604 AccRead->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1606 SubReg = TmpIntermediateVGPR;
1607 } else if (UseVGPROffset) {
1608 // FIXME: change to scavengeRegisterBackwards()
1609 if (!TmpOffsetVGPR) {
1610 TmpOffsetVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1611 RS->setRegUsed(TmpOffsetVGPR);
1615 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(RegOffset);
1616 MachineMemOperand *NewMMO =
1617 MF->getMachineMemOperand(PInfo, MMO->getFlags(), RemEltSize,
1618 commonAlignment(Alignment, RegOffset));
1621 BuildMI(MBB, MI, DL, *Desc)
1622 .addReg(SubReg, getDefRegState(!IsStore) | getKillRegState(IsKill));
1624 if (UseVGPROffset) {
1625 // For an AGPR spill, we reuse the same temp VGPR for the offset and the
1626 // intermediate accvgpr_write.
1627 MIB.addReg(TmpOffsetVGPR, getKillRegState(IsLastSubReg && !IsAGPR));
1631 MIB.addReg(FuncInfo->getScratchRSrcReg());
1633 if (SOffset == AMDGPU::NoRegister) {
1635 if (UseVGPROffset && ScratchOffsetReg) {
1636 MIB.addReg(ScratchOffsetReg);
1638 assert(FuncInfo->isEntryFunction());
1643 MIB.addReg(SOffset, SOffsetRegState);
1645 MIB.addImm(Offset + RegOffset)
1648 MIB.addImm(0); // swz
1649 MIB.addMemOperand(NewMMO);
1651 if (!IsAGPR && NeedSuperRegDef)
1652 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1654 if (!IsStore && IsAGPR && TmpIntermediateVGPR != AMDGPU::NoRegister) {
1655 MIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64),
1657 .addReg(TmpIntermediateVGPR, RegState::Kill);
1658 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1661 if (NeedSuperRegImpOperand && (IsFirstSubReg || IsLastSubReg))
1662 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
1665 if (ScratchOffsetRegDelta != 0) {
1666 // Subtract the offset we added to the ScratchOffset register.
1667 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset)
1669 .addImm(-ScratchOffsetRegDelta);
1673 void SIRegisterInfo::buildVGPRSpillLoadStore(SGPRSpillBuilder &SB, int Index,
1674 int Offset, bool IsLoad,
1675 bool IsKill) const {
1677 MachineFrameInfo &FrameInfo = SB.MF.getFrameInfo();
1678 assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill);
1681 FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(SB.MF)
1683 : getFrameRegister(SB.MF);
1685 Align Alignment = FrameInfo.getObjectAlign(Index);
1686 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SB.MF, Index);
1687 MachineMemOperand *MMO = SB.MF.getMachineMemOperand(
1688 PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore,
1689 SB.EltSize, Alignment);
1692 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1693 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1694 buildSpillLoadStore(*SB.MBB, SB.MI, SB.DL, Opc, Index, SB.TmpVGPR, false,
1695 FrameReg, Offset * SB.EltSize, MMO, SB.RS);
1697 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1698 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1699 buildSpillLoadStore(*SB.MBB, SB.MI, SB.DL, Opc, Index, SB.TmpVGPR, IsKill,
1700 FrameReg, Offset * SB.EltSize, MMO, SB.RS);
1701 // This only ever adds one VGPR spill
1702 SB.MFI.addToSpilledVGPRs(1);
1706 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, int Index,
1707 RegScavenger *RS, SlotIndexes *Indexes,
1708 LiveIntervals *LIS, bool OnlyToVGPR) const {
1709 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1711 ArrayRef<SpilledReg> VGPRSpills = SB.MFI.getSGPRSpillToVGPRLanes(Index);
1712 bool SpillToVGPR = !VGPRSpills.empty();
1713 if (OnlyToVGPR && !SpillToVGPR)
1716 assert(SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() &&
1717 SB.SuperReg != SB.MFI.getFrameOffsetReg()));
1721 assert(SB.NumSubRegs == VGPRSpills.size() &&
1722 "Num of VGPR lanes should be equal to num of SGPRs spilled");
1724 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1728 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1729 SpilledReg Spill = VGPRSpills[i];
1731 bool IsFirstSubreg = i == 0;
1732 bool IsLastSubreg = i == SB.NumSubRegs - 1;
1733 bool UseKill = SB.IsKill && IsLastSubreg;
1736 // Mark the "old value of vgpr" input undef only if this is the first sgpr
1737 // spill to this specific vgpr in the first basic block.
1738 auto MIB = BuildMI(*SB.MBB, MI, SB.DL,
1739 SB.TII.get(AMDGPU::V_WRITELANE_B32), Spill.VGPR)
1740 .addReg(SubReg, getKillRegState(UseKill))
1742 .addReg(Spill.VGPR);
1745 Indexes->replaceMachineInstrInMaps(*MI, *MIB);
1747 Indexes->insertMachineInstrInMaps(*MIB);
1750 if (IsFirstSubreg && SB.NumSubRegs > 1) {
1751 // We may be spilling a super-register which is only partially defined,
1752 // and need to ensure later spills think the value is defined.
1753 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1756 if (SB.NumSubRegs > 1 && (IsFirstSubreg || IsLastSubreg))
1757 MIB.addReg(SB.SuperReg, getKillRegState(UseKill) | RegState::Implicit);
1759 // FIXME: Since this spills to another register instead of an actual
1760 // frame index, we should delete the frame index when all references to
1766 // SubReg carries the "Kill" flag when SubReg == SB.SuperReg.
1767 unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill);
1769 // Per VGPR helper data
1770 auto PVD = SB.getPerVGPRData();
1772 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1773 unsigned TmpVGPRFlags = RegState::Undef;
1775 // Write sub registers into the VGPR
1776 for (unsigned i = Offset * PVD.PerVGPR,
1777 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1782 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1784 MachineInstrBuilder WriteLane =
1785 BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1787 .addReg(SubReg, SubKillState)
1788 .addImm(i % PVD.PerVGPR)
1789 .addReg(SB.TmpVGPR, TmpVGPRFlags);
1794 Indexes->replaceMachineInstrInMaps(*MI, *WriteLane);
1796 Indexes->insertMachineInstrInMaps(*WriteLane);
1799 // There could be undef components of a spilled super register.
1800 // TODO: Can we detect this and skip the spill?
1801 if (SB.NumSubRegs > 1) {
1802 // The last implicit use of the SB.SuperReg carries the "Kill" flag.
1803 unsigned SuperKillState = 0;
1804 if (i + 1 == SB.NumSubRegs)
1805 SuperKillState |= getKillRegState(SB.IsKill);
1806 WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState);
1811 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ false);
1817 MI->eraseFromParent();
1818 SB.MFI.addToSpilledSGPRs(SB.NumSubRegs);
1821 LIS->removeAllRegUnitsForPhysReg(SB.SuperReg);
1826 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI, int Index,
1827 RegScavenger *RS, SlotIndexes *Indexes,
1828 LiveIntervals *LIS, bool OnlyToVGPR) const {
1829 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1831 ArrayRef<SpilledReg> VGPRSpills = SB.MFI.getSGPRSpillToVGPRLanes(Index);
1832 bool SpillToVGPR = !VGPRSpills.empty();
1833 if (OnlyToVGPR && !SpillToVGPR)
1837 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1841 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1843 SpilledReg Spill = VGPRSpills[i];
1844 auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32),
1847 .addImm(Spill.Lane);
1848 if (SB.NumSubRegs > 1 && i == 0)
1849 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1852 Indexes->replaceMachineInstrInMaps(*MI, *MIB);
1854 Indexes->insertMachineInstrInMaps(*MIB);
1860 // Per VGPR helper data
1861 auto PVD = SB.getPerVGPRData();
1863 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1864 // Load in VGPR data
1865 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ true);
1868 for (unsigned i = Offset * PVD.PerVGPR,
1869 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1874 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1876 bool LastSubReg = (i + 1 == e);
1877 auto MIB = BuildMI(*SB.MBB, MI, SB.DL,
1878 SB.TII.get(AMDGPU::V_READLANE_B32), SubReg)
1879 .addReg(SB.TmpVGPR, getKillRegState(LastSubReg))
1881 if (SB.NumSubRegs > 1 && i == 0)
1882 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1885 Indexes->replaceMachineInstrInMaps(*MI, *MIB);
1887 Indexes->insertMachineInstrInMaps(*MIB);
1895 MI->eraseFromParent();
1898 LIS->removeAllRegUnitsForPhysReg(SB.SuperReg);
1903 bool SIRegisterInfo::spillEmergencySGPR(MachineBasicBlock::iterator MI,
1904 MachineBasicBlock &RestoreMBB,
1905 Register SGPR, RegScavenger *RS) const {
1906 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, SGPR, false, 0,
1909 // Generate the spill of SGPR to SB.TmpVGPR.
1910 unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill);
1911 auto PVD = SB.getPerVGPRData();
1912 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1913 unsigned TmpVGPRFlags = RegState::Undef;
1914 // Write sub registers into the VGPR
1915 for (unsigned i = Offset * PVD.PerVGPR,
1916 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1921 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1923 MachineInstrBuilder WriteLane =
1924 BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1926 .addReg(SubReg, SubKillState)
1927 .addImm(i % PVD.PerVGPR)
1928 .addReg(SB.TmpVGPR, TmpVGPRFlags);
1930 // There could be undef components of a spilled super register.
1931 // TODO: Can we detect this and skip the spill?
1932 if (SB.NumSubRegs > 1) {
1933 // The last implicit use of the SB.SuperReg carries the "Kill" flag.
1934 unsigned SuperKillState = 0;
1935 if (i + 1 == SB.NumSubRegs)
1936 SuperKillState |= getKillRegState(SB.IsKill);
1937 WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState);
1940 // Don't need to write VGPR out.
1943 // Restore clobbered registers in the specified restore block.
1944 MI = RestoreMBB.end();
1945 SB.setMI(&RestoreMBB, MI);
1946 // Generate the restore of SGPR from SB.TmpVGPR.
1947 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1948 // Don't need to load VGPR in.
1950 for (unsigned i = Offset * PVD.PerVGPR,
1951 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1956 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1957 bool LastSubReg = (i + 1 == e);
1958 auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32),
1960 .addReg(SB.TmpVGPR, getKillRegState(LastSubReg))
1962 if (SB.NumSubRegs > 1 && i == 0)
1963 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1968 SB.MFI.addToSpilledSGPRs(SB.NumSubRegs);
1972 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
1973 /// a VGPR and the stack slot can be safely eliminated when all other users are
1975 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
1976 MachineBasicBlock::iterator MI, int FI, RegScavenger *RS,
1977 SlotIndexes *Indexes, LiveIntervals *LIS) const {
1978 switch (MI->getOpcode()) {
1979 case AMDGPU::SI_SPILL_S1024_SAVE:
1980 case AMDGPU::SI_SPILL_S512_SAVE:
1981 case AMDGPU::SI_SPILL_S384_SAVE:
1982 case AMDGPU::SI_SPILL_S352_SAVE:
1983 case AMDGPU::SI_SPILL_S320_SAVE:
1984 case AMDGPU::SI_SPILL_S288_SAVE:
1985 case AMDGPU::SI_SPILL_S256_SAVE:
1986 case AMDGPU::SI_SPILL_S224_SAVE:
1987 case AMDGPU::SI_SPILL_S192_SAVE:
1988 case AMDGPU::SI_SPILL_S160_SAVE:
1989 case AMDGPU::SI_SPILL_S128_SAVE:
1990 case AMDGPU::SI_SPILL_S96_SAVE:
1991 case AMDGPU::SI_SPILL_S64_SAVE:
1992 case AMDGPU::SI_SPILL_S32_SAVE:
1993 return spillSGPR(MI, FI, RS, Indexes, LIS, true);
1994 case AMDGPU::SI_SPILL_S1024_RESTORE:
1995 case AMDGPU::SI_SPILL_S512_RESTORE:
1996 case AMDGPU::SI_SPILL_S384_RESTORE:
1997 case AMDGPU::SI_SPILL_S352_RESTORE:
1998 case AMDGPU::SI_SPILL_S320_RESTORE:
1999 case AMDGPU::SI_SPILL_S288_RESTORE:
2000 case AMDGPU::SI_SPILL_S256_RESTORE:
2001 case AMDGPU::SI_SPILL_S224_RESTORE:
2002 case AMDGPU::SI_SPILL_S192_RESTORE:
2003 case AMDGPU::SI_SPILL_S160_RESTORE:
2004 case AMDGPU::SI_SPILL_S128_RESTORE:
2005 case AMDGPU::SI_SPILL_S96_RESTORE:
2006 case AMDGPU::SI_SPILL_S64_RESTORE:
2007 case AMDGPU::SI_SPILL_S32_RESTORE:
2008 return restoreSGPR(MI, FI, RS, Indexes, LIS, true);
2010 llvm_unreachable("not an SGPR spill instruction");
2014 bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
2015 int SPAdj, unsigned FIOperandNum,
2016 RegScavenger *RS) const {
2017 MachineFunction *MF = MI->getParent()->getParent();
2018 MachineBasicBlock *MBB = MI->getParent();
2019 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
2020 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
2021 const SIInstrInfo *TII = ST.getInstrInfo();
2022 DebugLoc DL = MI->getDebugLoc();
2024 assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?");
2026 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
2027 int Index = MI->getOperand(FIOperandNum).getIndex();
2029 Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
2031 : getFrameRegister(*MF);
2033 switch (MI->getOpcode()) {
2034 // SGPR register spill
2035 case AMDGPU::SI_SPILL_S1024_SAVE:
2036 case AMDGPU::SI_SPILL_S512_SAVE:
2037 case AMDGPU::SI_SPILL_S384_SAVE:
2038 case AMDGPU::SI_SPILL_S352_SAVE:
2039 case AMDGPU::SI_SPILL_S320_SAVE:
2040 case AMDGPU::SI_SPILL_S288_SAVE:
2041 case AMDGPU::SI_SPILL_S256_SAVE:
2042 case AMDGPU::SI_SPILL_S224_SAVE:
2043 case AMDGPU::SI_SPILL_S192_SAVE:
2044 case AMDGPU::SI_SPILL_S160_SAVE:
2045 case AMDGPU::SI_SPILL_S128_SAVE:
2046 case AMDGPU::SI_SPILL_S96_SAVE:
2047 case AMDGPU::SI_SPILL_S64_SAVE:
2048 case AMDGPU::SI_SPILL_S32_SAVE: {
2049 return spillSGPR(MI, Index, RS);
2052 // SGPR register restore
2053 case AMDGPU::SI_SPILL_S1024_RESTORE:
2054 case AMDGPU::SI_SPILL_S512_RESTORE:
2055 case AMDGPU::SI_SPILL_S384_RESTORE:
2056 case AMDGPU::SI_SPILL_S352_RESTORE:
2057 case AMDGPU::SI_SPILL_S320_RESTORE:
2058 case AMDGPU::SI_SPILL_S288_RESTORE:
2059 case AMDGPU::SI_SPILL_S256_RESTORE:
2060 case AMDGPU::SI_SPILL_S224_RESTORE:
2061 case AMDGPU::SI_SPILL_S192_RESTORE:
2062 case AMDGPU::SI_SPILL_S160_RESTORE:
2063 case AMDGPU::SI_SPILL_S128_RESTORE:
2064 case AMDGPU::SI_SPILL_S96_RESTORE:
2065 case AMDGPU::SI_SPILL_S64_RESTORE:
2066 case AMDGPU::SI_SPILL_S32_RESTORE: {
2067 return restoreSGPR(MI, Index, RS);
2070 // VGPR register spill
2071 case AMDGPU::SI_SPILL_V1024_SAVE:
2072 case AMDGPU::SI_SPILL_V512_SAVE:
2073 case AMDGPU::SI_SPILL_V384_SAVE:
2074 case AMDGPU::SI_SPILL_V352_SAVE:
2075 case AMDGPU::SI_SPILL_V320_SAVE:
2076 case AMDGPU::SI_SPILL_V288_SAVE:
2077 case AMDGPU::SI_SPILL_V256_SAVE:
2078 case AMDGPU::SI_SPILL_V224_SAVE:
2079 case AMDGPU::SI_SPILL_V192_SAVE:
2080 case AMDGPU::SI_SPILL_V160_SAVE:
2081 case AMDGPU::SI_SPILL_V128_SAVE:
2082 case AMDGPU::SI_SPILL_V96_SAVE:
2083 case AMDGPU::SI_SPILL_V64_SAVE:
2084 case AMDGPU::SI_SPILL_V32_SAVE:
2085 case AMDGPU::SI_SPILL_A1024_SAVE:
2086 case AMDGPU::SI_SPILL_A512_SAVE:
2087 case AMDGPU::SI_SPILL_A384_SAVE:
2088 case AMDGPU::SI_SPILL_A352_SAVE:
2089 case AMDGPU::SI_SPILL_A320_SAVE:
2090 case AMDGPU::SI_SPILL_A288_SAVE:
2091 case AMDGPU::SI_SPILL_A256_SAVE:
2092 case AMDGPU::SI_SPILL_A224_SAVE:
2093 case AMDGPU::SI_SPILL_A192_SAVE:
2094 case AMDGPU::SI_SPILL_A160_SAVE:
2095 case AMDGPU::SI_SPILL_A128_SAVE:
2096 case AMDGPU::SI_SPILL_A96_SAVE:
2097 case AMDGPU::SI_SPILL_A64_SAVE:
2098 case AMDGPU::SI_SPILL_A32_SAVE:
2099 case AMDGPU::SI_SPILL_AV1024_SAVE:
2100 case AMDGPU::SI_SPILL_AV512_SAVE:
2101 case AMDGPU::SI_SPILL_AV384_SAVE:
2102 case AMDGPU::SI_SPILL_AV352_SAVE:
2103 case AMDGPU::SI_SPILL_AV320_SAVE:
2104 case AMDGPU::SI_SPILL_AV288_SAVE:
2105 case AMDGPU::SI_SPILL_AV256_SAVE:
2106 case AMDGPU::SI_SPILL_AV224_SAVE:
2107 case AMDGPU::SI_SPILL_AV192_SAVE:
2108 case AMDGPU::SI_SPILL_AV160_SAVE:
2109 case AMDGPU::SI_SPILL_AV128_SAVE:
2110 case AMDGPU::SI_SPILL_AV96_SAVE:
2111 case AMDGPU::SI_SPILL_AV64_SAVE:
2112 case AMDGPU::SI_SPILL_AV32_SAVE: {
2113 const MachineOperand *VData = TII->getNamedOperand(*MI,
2114 AMDGPU::OpName::vdata);
2115 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
2116 MFI->getStackPtrOffsetReg());
2118 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
2119 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
2120 auto *MBB = MI->getParent();
2121 buildSpillLoadStore(
2122 *MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
2123 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
2124 *MI->memoperands_begin(), RS);
2125 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
2126 MI->eraseFromParent();
2129 case AMDGPU::SI_SPILL_V32_RESTORE:
2130 case AMDGPU::SI_SPILL_V64_RESTORE:
2131 case AMDGPU::SI_SPILL_V96_RESTORE:
2132 case AMDGPU::SI_SPILL_V128_RESTORE:
2133 case AMDGPU::SI_SPILL_V160_RESTORE:
2134 case AMDGPU::SI_SPILL_V192_RESTORE:
2135 case AMDGPU::SI_SPILL_V224_RESTORE:
2136 case AMDGPU::SI_SPILL_V256_RESTORE:
2137 case AMDGPU::SI_SPILL_V288_RESTORE:
2138 case AMDGPU::SI_SPILL_V320_RESTORE:
2139 case AMDGPU::SI_SPILL_V352_RESTORE:
2140 case AMDGPU::SI_SPILL_V384_RESTORE:
2141 case AMDGPU::SI_SPILL_V512_RESTORE:
2142 case AMDGPU::SI_SPILL_V1024_RESTORE:
2143 case AMDGPU::SI_SPILL_A32_RESTORE:
2144 case AMDGPU::SI_SPILL_A64_RESTORE:
2145 case AMDGPU::SI_SPILL_A96_RESTORE:
2146 case AMDGPU::SI_SPILL_A128_RESTORE:
2147 case AMDGPU::SI_SPILL_A160_RESTORE:
2148 case AMDGPU::SI_SPILL_A192_RESTORE:
2149 case AMDGPU::SI_SPILL_A224_RESTORE:
2150 case AMDGPU::SI_SPILL_A256_RESTORE:
2151 case AMDGPU::SI_SPILL_A288_RESTORE:
2152 case AMDGPU::SI_SPILL_A320_RESTORE:
2153 case AMDGPU::SI_SPILL_A352_RESTORE:
2154 case AMDGPU::SI_SPILL_A384_RESTORE:
2155 case AMDGPU::SI_SPILL_A512_RESTORE:
2156 case AMDGPU::SI_SPILL_A1024_RESTORE:
2157 case AMDGPU::SI_SPILL_AV32_RESTORE:
2158 case AMDGPU::SI_SPILL_AV64_RESTORE:
2159 case AMDGPU::SI_SPILL_AV96_RESTORE:
2160 case AMDGPU::SI_SPILL_AV128_RESTORE:
2161 case AMDGPU::SI_SPILL_AV160_RESTORE:
2162 case AMDGPU::SI_SPILL_AV192_RESTORE:
2163 case AMDGPU::SI_SPILL_AV224_RESTORE:
2164 case AMDGPU::SI_SPILL_AV256_RESTORE:
2165 case AMDGPU::SI_SPILL_AV288_RESTORE:
2166 case AMDGPU::SI_SPILL_AV320_RESTORE:
2167 case AMDGPU::SI_SPILL_AV352_RESTORE:
2168 case AMDGPU::SI_SPILL_AV384_RESTORE:
2169 case AMDGPU::SI_SPILL_AV512_RESTORE:
2170 case AMDGPU::SI_SPILL_AV1024_RESTORE: {
2171 const MachineOperand *VData = TII->getNamedOperand(*MI,
2172 AMDGPU::OpName::vdata);
2173 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
2174 MFI->getStackPtrOffsetReg());
2176 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
2177 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
2178 auto *MBB = MI->getParent();
2179 buildSpillLoadStore(
2180 *MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
2181 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
2182 *MI->memoperands_begin(), RS);
2183 MI->eraseFromParent();
2188 // Other access to frame index
2189 const DebugLoc &DL = MI->getDebugLoc();
2191 int64_t Offset = FrameInfo.getObjectOffset(Index);
2192 if (ST.enableFlatScratch()) {
2193 if (TII->isFLATScratch(*MI)) {
2194 assert((int16_t)FIOperandNum ==
2195 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
2196 AMDGPU::OpName::saddr));
2198 // The offset is always swizzled, just replace it
2200 FIOp.ChangeToRegister(FrameReg, false);
2205 MachineOperand *OffsetOp =
2206 TII->getNamedOperand(*MI, AMDGPU::OpName::offset);
2207 int64_t NewOffset = Offset + OffsetOp->getImm();
2208 if (TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
2209 SIInstrFlags::FlatScratch)) {
2210 OffsetOp->setImm(NewOffset);
2217 unsigned Opc = MI->getOpcode();
2219 if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vaddr)) {
2220 NewOpc = AMDGPU::getFlatScratchInstSVfromSVS(Opc);
2221 } else if (ST.hasFlatScratchSTMode()) {
2222 // On GFX10 we have ST mode to use no registers for an address.
2223 // Otherwise we need to materialize 0 into an SGPR.
2224 NewOpc = AMDGPU::getFlatScratchInstSTfromSS(Opc);
2228 // removeOperand doesn't fixup tied operand indexes as it goes, so
2229 // it asserts. Untie vdst_in for now and retie them afterwards.
2230 int VDstIn = AMDGPU::getNamedOperandIdx(Opc,
2231 AMDGPU::OpName::vdst_in);
2232 bool TiedVDst = VDstIn != -1 &&
2233 MI->getOperand(VDstIn).isReg() &&
2234 MI->getOperand(VDstIn).isTied();
2236 MI->untieRegOperand(VDstIn);
2239 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr));
2243 AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst);
2245 AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst_in);
2246 assert (NewVDst != -1 && NewVDstIn != -1 && "Must be tied!");
2247 MI->tieOperands(NewVDst, NewVDstIn);
2249 MI->setDesc(TII->get(NewOpc));
2256 FIOp.ChangeToImmediate(Offset);
2257 if (TII->isImmOperandLegal(*MI, FIOperandNum, FIOp))
2261 // We need to use register here. Check if we can use an SGPR or need
2263 FIOp.ChangeToRegister(AMDGPU::M0, false);
2264 bool UseSGPR = TII->isOperandLegal(*MI, FIOperandNum, &FIOp);
2266 if (!Offset && FrameReg && UseSGPR) {
2267 FIOp.setReg(FrameReg);
2271 const TargetRegisterClass *RC = UseSGPR ? &AMDGPU::SReg_32_XM0RegClass
2272 : &AMDGPU::VGPR_32RegClass;
2274 Register TmpReg = RS->scavengeRegister(RC, MI, 0, !UseSGPR);
2275 FIOp.setReg(TmpReg);
2278 if ((!FrameReg || !Offset) && TmpReg) {
2279 unsigned Opc = UseSGPR ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2280 auto MIB = BuildMI(*MBB, MI, DL, TII->get(Opc), TmpReg);
2282 MIB.addReg(FrameReg);
2290 RS->isRegUsed(AMDGPU::SCC) && !MI->definesRegister(AMDGPU::SCC);
2294 : RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0,
2297 // TODO: for flat scratch another attempt can be made with a VGPR index
2298 // if no SGPRs can be scavenged.
2299 if ((!TmpSReg && !FrameReg) || (!TmpReg && !UseSGPR))
2300 report_fatal_error("Cannot scavenge register in FI elimination!");
2303 // Use frame register and restore it after.
2305 FIOp.setReg(FrameReg);
2306 FIOp.setIsKill(false);
2310 assert(!(Offset & 0x1) && "Flat scratch offset must be aligned!");
2311 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADDC_U32), TmpSReg)
2314 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_BITCMP1_B32))
2317 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_BITSET0_B32), TmpSReg)
2321 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), TmpSReg)
2327 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
2328 .addReg(TmpSReg, RegState::Kill);
2330 if (TmpSReg == FrameReg) {
2331 // Undo frame register modification.
2332 if (NeedSaveSCC && !MI->registerDefIsDead(AMDGPU::SCC)) {
2333 MachineBasicBlock::iterator I =
2334 BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_ADDC_U32),
2338 I = BuildMI(*MBB, std::next(I), DL, TII->get(AMDGPU::S_BITCMP1_B32))
2341 BuildMI(*MBB, std::next(I), DL, TII->get(AMDGPU::S_BITSET0_B32),
2346 BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_ADD_I32),
2356 bool IsMUBUF = TII->isMUBUF(*MI);
2358 if (!IsMUBUF && !MFI->isEntryFunction()) {
2359 // Convert to a swizzled stack address by scaling by the wave size.
2360 // In an entry function/kernel the offset is already swizzled.
2361 bool IsSALU = isSGPRClass(TII->getOpRegClass(*MI, FIOperandNum));
2363 RS->isRegUsed(AMDGPU::SCC) && !MI->definesRegister(AMDGPU::SCC);
2364 const TargetRegisterClass *RC = IsSALU && !LiveSCC
2365 ? &AMDGPU::SReg_32RegClass
2366 : &AMDGPU::VGPR_32RegClass;
2367 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32 ||
2368 MI->getOpcode() == AMDGPU::V_MOV_B32_e64;
2369 Register ResultReg = IsCopy ? MI->getOperand(0).getReg()
2370 : RS->scavengeRegister(RC, MI, 0);
2372 int64_t Offset = FrameInfo.getObjectOffset(Index);
2374 unsigned OpCode = IsSALU && !LiveSCC ? AMDGPU::S_LSHR_B32
2375 : AMDGPU::V_LSHRREV_B32_e64;
2376 // XXX - This never happens because of emergency scavenging slot at 0?
2377 auto Shift = BuildMI(*MBB, MI, DL, TII->get(OpCode), ResultReg)
2378 .addImm(ST.getWavefrontSizeLog2())
2380 if (IsSALU && !LiveSCC)
2381 Shift.getInstr()->getOperand(3).setIsDead(); // Mark SCC as dead.
2382 if (IsSALU && LiveSCC) {
2384 RS->scavengeRegister(&AMDGPU::SReg_32RegClass, Shift, 0);
2385 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
2388 ResultReg = NewDest;
2391 MachineInstrBuilder MIB;
2393 if ((MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) !=
2395 // Reuse ResultReg in intermediate step.
2396 Register ScaledReg = ResultReg;
2398 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64),
2400 .addImm(ST.getWavefrontSizeLog2())
2403 const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32;
2405 // TODO: Fold if use instruction is another add of a constant.
2406 if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
2407 // FIXME: This can fail
2409 MIB.addReg(ScaledReg, RegState::Kill);
2411 MIB.addImm(0); // clamp bit
2413 assert(MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 &&
2414 "Need to reuse carry out register");
2416 // Use scavenged unused carry out as offset register.
2417 Register ConstOffsetReg;
2419 ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0);
2421 ConstOffsetReg = MIB.getReg(1);
2423 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
2425 MIB.addReg(ConstOffsetReg, RegState::Kill);
2426 MIB.addReg(ScaledReg, RegState::Kill);
2427 MIB.addImm(0); // clamp bit
2431 if (!MIB || IsSALU) {
2432 // We have to produce a carry out, and there isn't a free SGPR pair
2433 // for it. We can keep the whole computation on the SALU to avoid
2434 // clobbering an additional register at the cost of an extra mov.
2436 // We may have 1 free scratch SGPR even though a carry out is
2437 // unavailable. Only one additional mov is needed.
2438 Register TmpScaledReg =
2439 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false);
2440 Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg;
2442 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg)
2444 .addImm(ST.getWavefrontSizeLog2());
2445 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg)
2446 .addReg(ScaledReg, RegState::Kill)
2449 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg)
2450 .addReg(ScaledReg, RegState::Kill);
2452 ResultReg = ScaledReg;
2454 // If there were truly no free SGPRs, we need to undo everything.
2455 if (!TmpScaledReg.isValid()) {
2456 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg)
2457 .addReg(ScaledReg, RegState::Kill)
2459 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg)
2461 .addImm(ST.getWavefrontSizeLog2());
2466 // Don't introduce an extra copy if we're just materializing in a mov.
2468 MI->eraseFromParent();
2471 FIOp.ChangeToRegister(ResultReg, false, false, true);
2476 // Disable offen so we don't need a 0 vgpr base.
2477 assert(static_cast<int>(FIOperandNum) ==
2478 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
2479 AMDGPU::OpName::vaddr));
2481 auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset);
2482 assert((SOffset.isImm() && SOffset.getImm() == 0));
2484 if (FrameReg != AMDGPU::NoRegister)
2485 SOffset.ChangeToRegister(FrameReg, false);
2487 int64_t Offset = FrameInfo.getObjectOffset(Index);
2489 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
2490 int64_t NewOffset = OldImm + Offset;
2492 if (SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&
2493 buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) {
2494 MI->eraseFromParent();
2499 // If the offset is simply too big, don't convert to a scratch wave offset
2502 FIOp.ChangeToImmediate(Offset);
2503 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
2504 Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
2505 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
2507 FIOp.ChangeToRegister(TmpReg, false, false, true);
2514 StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const {
2515 return AMDGPUInstPrinter::getRegisterName(Reg);
2518 static const TargetRegisterClass *
2519 getAnyVGPRClassForBitWidth(unsigned BitWidth) {
2521 return &AMDGPU::VReg_64RegClass;
2523 return &AMDGPU::VReg_96RegClass;
2524 if (BitWidth <= 128)
2525 return &AMDGPU::VReg_128RegClass;
2526 if (BitWidth <= 160)
2527 return &AMDGPU::VReg_160RegClass;
2528 if (BitWidth <= 192)
2529 return &AMDGPU::VReg_192RegClass;
2530 if (BitWidth <= 224)
2531 return &AMDGPU::VReg_224RegClass;
2532 if (BitWidth <= 256)
2533 return &AMDGPU::VReg_256RegClass;
2534 if (BitWidth <= 288)
2535 return &AMDGPU::VReg_288RegClass;
2536 if (BitWidth <= 320)
2537 return &AMDGPU::VReg_320RegClass;
2538 if (BitWidth <= 352)
2539 return &AMDGPU::VReg_352RegClass;
2540 if (BitWidth <= 384)
2541 return &AMDGPU::VReg_384RegClass;
2542 if (BitWidth <= 512)
2543 return &AMDGPU::VReg_512RegClass;
2544 if (BitWidth <= 1024)
2545 return &AMDGPU::VReg_1024RegClass;
2550 static const TargetRegisterClass *
2551 getAlignedVGPRClassForBitWidth(unsigned BitWidth) {
2553 return &AMDGPU::VReg_64_Align2RegClass;
2555 return &AMDGPU::VReg_96_Align2RegClass;
2556 if (BitWidth <= 128)
2557 return &AMDGPU::VReg_128_Align2RegClass;
2558 if (BitWidth <= 160)
2559 return &AMDGPU::VReg_160_Align2RegClass;
2560 if (BitWidth <= 192)
2561 return &AMDGPU::VReg_192_Align2RegClass;
2562 if (BitWidth <= 224)
2563 return &AMDGPU::VReg_224_Align2RegClass;
2564 if (BitWidth <= 256)
2565 return &AMDGPU::VReg_256_Align2RegClass;
2566 if (BitWidth <= 288)
2567 return &AMDGPU::VReg_288_Align2RegClass;
2568 if (BitWidth <= 320)
2569 return &AMDGPU::VReg_320_Align2RegClass;
2570 if (BitWidth <= 352)
2571 return &AMDGPU::VReg_352_Align2RegClass;
2572 if (BitWidth <= 384)
2573 return &AMDGPU::VReg_384_Align2RegClass;
2574 if (BitWidth <= 512)
2575 return &AMDGPU::VReg_512_Align2RegClass;
2576 if (BitWidth <= 1024)
2577 return &AMDGPU::VReg_1024_Align2RegClass;
2582 const TargetRegisterClass *
2583 SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) const {
2585 return &AMDGPU::VReg_1RegClass;
2587 return &AMDGPU::VGPR_LO16RegClass;
2589 return &AMDGPU::VGPR_32RegClass;
2590 return ST.needsAlignedVGPRs() ? getAlignedVGPRClassForBitWidth(BitWidth)
2591 : getAnyVGPRClassForBitWidth(BitWidth);
2594 static const TargetRegisterClass *
2595 getAnyAGPRClassForBitWidth(unsigned BitWidth) {
2597 return &AMDGPU::AReg_64RegClass;
2599 return &AMDGPU::AReg_96RegClass;
2600 if (BitWidth <= 128)
2601 return &AMDGPU::AReg_128RegClass;
2602 if (BitWidth <= 160)
2603 return &AMDGPU::AReg_160RegClass;
2604 if (BitWidth <= 192)
2605 return &AMDGPU::AReg_192RegClass;
2606 if (BitWidth <= 224)
2607 return &AMDGPU::AReg_224RegClass;
2608 if (BitWidth <= 256)
2609 return &AMDGPU::AReg_256RegClass;
2610 if (BitWidth <= 288)
2611 return &AMDGPU::AReg_288RegClass;
2612 if (BitWidth <= 320)
2613 return &AMDGPU::AReg_320RegClass;
2614 if (BitWidth <= 352)
2615 return &AMDGPU::AReg_352RegClass;
2616 if (BitWidth <= 384)
2617 return &AMDGPU::AReg_384RegClass;
2618 if (BitWidth <= 512)
2619 return &AMDGPU::AReg_512RegClass;
2620 if (BitWidth <= 1024)
2621 return &AMDGPU::AReg_1024RegClass;
2626 static const TargetRegisterClass *
2627 getAlignedAGPRClassForBitWidth(unsigned BitWidth) {
2629 return &AMDGPU::AReg_64_Align2RegClass;
2631 return &AMDGPU::AReg_96_Align2RegClass;
2632 if (BitWidth <= 128)
2633 return &AMDGPU::AReg_128_Align2RegClass;
2634 if (BitWidth <= 160)
2635 return &AMDGPU::AReg_160_Align2RegClass;
2636 if (BitWidth <= 192)
2637 return &AMDGPU::AReg_192_Align2RegClass;
2638 if (BitWidth <= 224)
2639 return &AMDGPU::AReg_224_Align2RegClass;
2640 if (BitWidth <= 256)
2641 return &AMDGPU::AReg_256_Align2RegClass;
2642 if (BitWidth <= 288)
2643 return &AMDGPU::AReg_288_Align2RegClass;
2644 if (BitWidth <= 320)
2645 return &AMDGPU::AReg_320_Align2RegClass;
2646 if (BitWidth <= 352)
2647 return &AMDGPU::AReg_352_Align2RegClass;
2648 if (BitWidth <= 384)
2649 return &AMDGPU::AReg_384_Align2RegClass;
2650 if (BitWidth <= 512)
2651 return &AMDGPU::AReg_512_Align2RegClass;
2652 if (BitWidth <= 1024)
2653 return &AMDGPU::AReg_1024_Align2RegClass;
2658 const TargetRegisterClass *
2659 SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) const {
2661 return &AMDGPU::AGPR_LO16RegClass;
2663 return &AMDGPU::AGPR_32RegClass;
2664 return ST.needsAlignedVGPRs() ? getAlignedAGPRClassForBitWidth(BitWidth)
2665 : getAnyAGPRClassForBitWidth(BitWidth);
2668 static const TargetRegisterClass *
2669 getAnyVectorSuperClassForBitWidth(unsigned BitWidth) {
2671 return &AMDGPU::AV_64RegClass;
2673 return &AMDGPU::AV_96RegClass;
2674 if (BitWidth <= 128)
2675 return &AMDGPU::AV_128RegClass;
2676 if (BitWidth <= 160)
2677 return &AMDGPU::AV_160RegClass;
2678 if (BitWidth <= 192)
2679 return &AMDGPU::AV_192RegClass;
2680 if (BitWidth <= 224)
2681 return &AMDGPU::AV_224RegClass;
2682 if (BitWidth <= 256)
2683 return &AMDGPU::AV_256RegClass;
2684 if (BitWidth <= 288)
2685 return &AMDGPU::AV_288RegClass;
2686 if (BitWidth <= 320)
2687 return &AMDGPU::AV_320RegClass;
2688 if (BitWidth <= 352)
2689 return &AMDGPU::AV_352RegClass;
2690 if (BitWidth <= 384)
2691 return &AMDGPU::AV_384RegClass;
2692 if (BitWidth <= 512)
2693 return &AMDGPU::AV_512RegClass;
2694 if (BitWidth <= 1024)
2695 return &AMDGPU::AV_1024RegClass;
2700 static const TargetRegisterClass *
2701 getAlignedVectorSuperClassForBitWidth(unsigned BitWidth) {
2703 return &AMDGPU::AV_64_Align2RegClass;
2705 return &AMDGPU::AV_96_Align2RegClass;
2706 if (BitWidth <= 128)
2707 return &AMDGPU::AV_128_Align2RegClass;
2708 if (BitWidth <= 160)
2709 return &AMDGPU::AV_160_Align2RegClass;
2710 if (BitWidth <= 192)
2711 return &AMDGPU::AV_192_Align2RegClass;
2712 if (BitWidth <= 224)
2713 return &AMDGPU::AV_224_Align2RegClass;
2714 if (BitWidth <= 256)
2715 return &AMDGPU::AV_256_Align2RegClass;
2716 if (BitWidth <= 288)
2717 return &AMDGPU::AV_288_Align2RegClass;
2718 if (BitWidth <= 320)
2719 return &AMDGPU::AV_320_Align2RegClass;
2720 if (BitWidth <= 352)
2721 return &AMDGPU::AV_352_Align2RegClass;
2722 if (BitWidth <= 384)
2723 return &AMDGPU::AV_384_Align2RegClass;
2724 if (BitWidth <= 512)
2725 return &AMDGPU::AV_512_Align2RegClass;
2726 if (BitWidth <= 1024)
2727 return &AMDGPU::AV_1024_Align2RegClass;
2732 const TargetRegisterClass *
2733 SIRegisterInfo::getVectorSuperClassForBitWidth(unsigned BitWidth) const {
2735 return &AMDGPU::VGPR_LO16RegClass;
2737 return &AMDGPU::AV_32RegClass;
2738 return ST.needsAlignedVGPRs()
2739 ? getAlignedVectorSuperClassForBitWidth(BitWidth)
2740 : getAnyVectorSuperClassForBitWidth(BitWidth);
2743 const TargetRegisterClass *
2744 SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
2746 return &AMDGPU::SGPR_LO16RegClass;
2748 return &AMDGPU::SReg_32RegClass;
2750 return &AMDGPU::SReg_64RegClass;
2752 return &AMDGPU::SGPR_96RegClass;
2753 if (BitWidth <= 128)
2754 return &AMDGPU::SGPR_128RegClass;
2755 if (BitWidth <= 160)
2756 return &AMDGPU::SGPR_160RegClass;
2757 if (BitWidth <= 192)
2758 return &AMDGPU::SGPR_192RegClass;
2759 if (BitWidth <= 224)
2760 return &AMDGPU::SGPR_224RegClass;
2761 if (BitWidth <= 256)
2762 return &AMDGPU::SGPR_256RegClass;
2763 if (BitWidth <= 288)
2764 return &AMDGPU::SGPR_288RegClass;
2765 if (BitWidth <= 320)
2766 return &AMDGPU::SGPR_320RegClass;
2767 if (BitWidth <= 352)
2768 return &AMDGPU::SGPR_352RegClass;
2769 if (BitWidth <= 384)
2770 return &AMDGPU::SGPR_384RegClass;
2771 if (BitWidth <= 512)
2772 return &AMDGPU::SGPR_512RegClass;
2773 if (BitWidth <= 1024)
2774 return &AMDGPU::SGPR_1024RegClass;
2779 bool SIRegisterInfo::isSGPRReg(const MachineRegisterInfo &MRI,
2780 Register Reg) const {
2781 const TargetRegisterClass *RC;
2782 if (Reg.isVirtual())
2783 RC = MRI.getRegClass(Reg);
2785 RC = getPhysRegBaseClass(Reg);
2786 return RC ? isSGPRClass(RC) : false;
2789 const TargetRegisterClass *
2790 SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const {
2791 unsigned Size = getRegSizeInBits(*SRC);
2792 const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
2793 assert(VRC && "Invalid register class size");
2797 const TargetRegisterClass *
2798 SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const {
2799 unsigned Size = getRegSizeInBits(*SRC);
2800 const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
2801 assert(ARC && "Invalid register class size");
2805 const TargetRegisterClass *
2806 SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const {
2807 unsigned Size = getRegSizeInBits(*VRC);
2809 return &AMDGPU::SGPR_32RegClass;
2810 const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size);
2811 assert(SRC && "Invalid register class size");
2815 const TargetRegisterClass *
2816 SIRegisterInfo::getCompatibleSubRegClass(const TargetRegisterClass *SuperRC,
2817 const TargetRegisterClass *SubRC,
2818 unsigned SubIdx) const {
2819 // Ensure this subregister index is aligned in the super register.
2820 const TargetRegisterClass *MatchRC =
2821 getMatchingSuperRegClass(SuperRC, SubRC, SubIdx);
2822 return MatchRC && MatchRC->hasSubClassEq(SuperRC) ? MatchRC : nullptr;
2825 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
2826 if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST &&
2827 OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST)
2828 return !ST.hasMFMAInlineLiteralBug();
2830 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
2831 OpType <= AMDGPU::OPERAND_SRC_LAST;
2834 bool SIRegisterInfo::shouldRewriteCopySrc(
2835 const TargetRegisterClass *DefRC,
2837 const TargetRegisterClass *SrcRC,
2838 unsigned SrcSubReg) const {
2839 // We want to prefer the smallest register class possible, so we don't want to
2840 // stop and rewrite on anything that looks like a subregister
2841 // extract. Operations mostly don't care about the super register class, so we
2842 // only want to stop on the most basic of copies between the same register
2845 // e.g. if we have something like
2848 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
2849 // %3 = COPY %2, sub0
2851 // We want to look through the COPY to find:
2855 return getCommonSubClass(DefRC, SrcRC) != nullptr;
2858 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
2859 // TODO: 64-bit operands have extending behavior from 32-bit literal.
2860 return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST &&
2861 OpType <= AMDGPU::OPERAND_REG_IMM_LAST;
2864 /// Returns a lowest register that is not used at any point in the function.
2865 /// If all registers are used, then this function will return
2866 /// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return
2867 /// highest unused register.
2868 MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
2869 const TargetRegisterClass *RC,
2870 const MachineFunction &MF,
2871 bool ReserveHighestVGPR) const {
2872 if (ReserveHighestVGPR) {
2873 for (MCRegister Reg : reverse(*RC))
2874 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2877 for (MCRegister Reg : *RC)
2878 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2881 return MCRegister();
2884 ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
2885 unsigned EltSize) const {
2886 const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC);
2887 assert(RegBitWidth >= 32 && RegBitWidth <= 1024);
2889 const unsigned RegDWORDs = RegBitWidth / 32;
2890 const unsigned EltDWORDs = EltSize / 4;
2891 assert(RegSplitParts.size() + 1 >= EltDWORDs);
2893 const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1];
2894 const unsigned NumParts = RegDWORDs / EltDWORDs;
2896 return ArrayRef(Parts.data(), NumParts);
2899 const TargetRegisterClass*
2900 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
2901 Register Reg) const {
2902 return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegBaseClass(Reg);
2905 const TargetRegisterClass *
2906 SIRegisterInfo::getRegClassForOperandReg(const MachineRegisterInfo &MRI,
2907 const MachineOperand &MO) const {
2908 const TargetRegisterClass *SrcRC = getRegClassForReg(MRI, MO.getReg());
2909 return getSubRegisterClass(SrcRC, MO.getSubReg());
2912 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
2913 Register Reg) const {
2914 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2915 // Registers without classes are unaddressable, SGPR-like registers.
2916 return RC && isVGPRClass(RC);
2919 bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI,
2920 Register Reg) const {
2921 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2923 // Registers without classes are unaddressable, SGPR-like registers.
2924 return RC && isAGPRClass(RC);
2927 bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
2928 const TargetRegisterClass *SrcRC,
2930 const TargetRegisterClass *DstRC,
2932 const TargetRegisterClass *NewRC,
2933 LiveIntervals &LIS) const {
2934 unsigned SrcSize = getRegSizeInBits(*SrcRC);
2935 unsigned DstSize = getRegSizeInBits(*DstRC);
2936 unsigned NewSize = getRegSizeInBits(*NewRC);
2938 // Do not increase size of registers beyond dword, we would need to allocate
2939 // adjacent registers and constraint regalloc more than needed.
2941 // Always allow dword coalescing.
2942 if (SrcSize <= 32 || DstSize <= 32)
2945 return NewSize <= DstSize || NewSize <= SrcSize;
2948 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
2949 MachineFunction &MF) const {
2950 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2952 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
2954 switch (RC->getID()) {
2956 return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF);
2957 case AMDGPU::VGPR_32RegClassID:
2958 case AMDGPU::VGPR_LO16RegClassID:
2959 case AMDGPU::VGPR_HI16RegClassID:
2960 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
2961 case AMDGPU::SGPR_32RegClassID:
2962 case AMDGPU::SGPR_LO16RegClassID:
2963 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
2967 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
2968 unsigned Idx) const {
2969 if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 ||
2970 Idx == AMDGPU::RegisterPressureSets::AGPR_32)
2971 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
2972 const_cast<MachineFunction &>(MF));
2974 if (Idx == AMDGPU::RegisterPressureSets::SReg_32)
2975 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
2976 const_cast<MachineFunction &>(MF));
2978 llvm_unreachable("Unexpected register pressure set!");
2981 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
2982 static const int Empty[] = { -1 };
2984 if (RegPressureIgnoredUnits[RegUnit])
2987 return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit);
2990 MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
2991 // Not a callee saved register.
2992 return AMDGPU::SGPR30_SGPR31;
2995 const TargetRegisterClass *
2996 SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size,
2997 const RegisterBank &RB) const {
2998 switch (RB.getID()) {
2999 case AMDGPU::VGPRRegBankID:
3000 return getVGPRClassForBitWidth(std::max(32u, Size));
3001 case AMDGPU::VCCRegBankID:
3003 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
3004 : &AMDGPU::SReg_64_XEXECRegClass;
3005 case AMDGPU::SGPRRegBankID:
3006 return getSGPRClassForBitWidth(std::max(32u, Size));
3007 case AMDGPU::AGPRRegBankID:
3008 return getAGPRClassForBitWidth(std::max(32u, Size));
3010 llvm_unreachable("unknown register bank");
3014 const TargetRegisterClass *
3015 SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
3016 const MachineRegisterInfo &MRI) const {
3017 const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg());
3018 if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>())
3019 return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB);
3021 if (const auto *RC = RCOrRB.dyn_cast<const TargetRegisterClass *>())
3022 return getAllocatableClass(RC);
3027 MCRegister SIRegisterInfo::getVCC() const {
3028 return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
3031 MCRegister SIRegisterInfo::getExec() const {
3032 return isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
3035 const TargetRegisterClass *SIRegisterInfo::getVGPR64Class() const {
3036 // VGPR tuples have an alignment requirement on gfx90a variants.
3037 return ST.needsAlignedVGPRs() ? &AMDGPU::VReg_64_Align2RegClass
3038 : &AMDGPU::VReg_64RegClass;
3041 const TargetRegisterClass *
3042 SIRegisterInfo::getRegClass(unsigned RCID) const {
3043 switch ((int)RCID) {
3044 case AMDGPU::SReg_1RegClassID:
3046 case AMDGPU::SReg_1_XEXECRegClassID:
3047 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
3048 : &AMDGPU::SReg_64_XEXECRegClass;
3052 return AMDGPUGenRegisterInfo::getRegClass(RCID);
3056 // Find reaching register definition
3057 MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg,
3059 MachineRegisterInfo &MRI,
3060 LiveIntervals *LIS) const {
3061 auto &MDT = LIS->getAnalysis<MachineDominatorTree>();
3062 SlotIndex UseIdx = LIS->getInstructionIndex(Use);
3065 if (Reg.isVirtual()) {
3066 if (!LIS->hasInterval(Reg))
3068 LiveInterval &LI = LIS->getInterval(Reg);
3069 LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg)
3070 : MRI.getMaxLaneMaskForVReg(Reg);
3071 VNInfo *V = nullptr;
3072 if (LI.hasSubRanges()) {
3073 for (auto &S : LI.subranges()) {
3074 if ((S.LaneMask & SubLanes) == SubLanes) {
3075 V = S.getVNInfoAt(UseIdx);
3080 V = LI.getVNInfoAt(UseIdx);
3087 for (MCRegUnitIterator Units(Reg.asMCReg(), this); Units.isValid();
3089 LiveRange &LR = LIS->getRegUnit(*Units);
3090 if (VNInfo *V = LR.getVNInfoAt(UseIdx)) {
3091 if (!DefIdx.isValid() ||
3092 MDT.dominates(LIS->getInstructionFromIndex(DefIdx),
3093 LIS->getInstructionFromIndex(V->def)))
3101 MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx);
3103 if (!Def || !MDT.dominates(Def, &Use))
3106 assert(Def->modifiesRegister(Reg, this));
3111 MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const {
3112 assert(getRegSizeInBits(*getPhysRegBaseClass(Reg)) <= 32);
3114 for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass,
3115 AMDGPU::SReg_32RegClass,
3116 AMDGPU::AGPR_32RegClass } ) {
3117 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC))
3120 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16,
3121 &AMDGPU::VGPR_32RegClass)) {
3125 return AMDGPU::NoRegister;
3128 bool SIRegisterInfo::isProperlyAlignedRC(const TargetRegisterClass &RC) const {
3129 if (!ST.needsAlignedVGPRs())
3132 if (isVGPRClass(&RC))
3133 return RC.hasSuperClassEq(getVGPRClassForBitWidth(getRegSizeInBits(RC)));
3134 if (isAGPRClass(&RC))
3135 return RC.hasSuperClassEq(getAGPRClassForBitWidth(getRegSizeInBits(RC)));
3136 if (isVectorSuperClass(&RC))
3137 return RC.hasSuperClassEq(
3138 getVectorSuperClassForBitWidth(getRegSizeInBits(RC)));
3143 const TargetRegisterClass *
3144 SIRegisterInfo::getProperlyAlignedRC(const TargetRegisterClass *RC) const {
3145 if (!RC || !ST.needsAlignedVGPRs())
3148 unsigned Size = getRegSizeInBits(*RC);
3152 if (isVGPRClass(RC))
3153 return getAlignedVGPRClassForBitWidth(Size);
3154 if (isAGPRClass(RC))
3155 return getAlignedAGPRClassForBitWidth(Size);
3156 if (isVectorSuperClass(RC))
3157 return getAlignedVectorSuperClassForBitWidth(Size);
3163 SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const {
3164 return ArrayRef(AMDGPU::SGPR_128RegClass.begin(), ST.getMaxNumSGPRs(MF) / 4);
3168 SIRegisterInfo::getAllSGPR64(const MachineFunction &MF) const {
3169 return ArrayRef(AMDGPU::SGPR_64RegClass.begin(), ST.getMaxNumSGPRs(MF) / 2);
3173 SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const {
3174 return ArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));