1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// SI implementation of the TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SIRegisterInfo.h"
16 #include "AMDGPURegisterBankInfo.h"
17 #include "GCNSubtarget.h"
18 #include "MCTargetDesc/AMDGPUInstPrinter.h"
19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "llvm/CodeGen/LiveIntervals.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/RegisterScavenging.h"
27 #define GET_REGINFO_TARGET_DESC
28 #include "AMDGPUGenRegisterInfo.inc"
30 static cl::opt<bool> EnableSpillSGPRToVGPR(
31 "amdgpu-spill-sgpr-to-vgpr",
32 cl::desc("Enable spilling VGPRs to SGPRs"),
36 std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts;
37 std::array<std::array<uint16_t, 32>, 9> SIRegisterInfo::SubRegFromChannelTable;
39 // Map numbers of DWORDs to indexes in SubRegFromChannelTable.
40 // Valid indexes are shifted 1, such that a 0 mapping means unsupported.
41 // e.g. for 8 DWORDs (256-bit), SubRegFromChannelTableWidthMap[8] = 8,
42 // meaning index 7 in SubRegFromChannelTable.
43 static const std::array<unsigned, 17> SubRegFromChannelTableWidthMap = {
44 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 9};
48 // A temporary struct to spill SGPRs.
49 // This is mostly to spill SGPRs to memory. Spilling SGPRs into VGPR lanes emits
50 // just v_writelane and v_readlane.
52 // When spilling to memory, the SGPRs are written into VGPR lanes and the VGPR
53 // is saved to scratch (or the other way around for loads).
54 // For this, a VGPR is required where the needed lanes can be clobbered. The
55 // RegScavenger can provide a VGPR where currently active lanes can be
56 // clobbered, but we still need to save inactive lanes.
57 // The high-level steps are:
58 // - Try to scavenge SGPR(s) to save exec
59 // - Try to scavenge VGPR
60 // - Save needed, all or inactive lanes of a TmpVGPR
61 // - Spill/Restore SGPRs using TmpVGPR
64 // To save all lanes of TmpVGPR, exec needs to be saved and modified. If we
65 // cannot scavenge temporary SGPRs to save exec, we use the following code:
66 // buffer_store_dword TmpVGPR ; only if active lanes need to be saved
68 // buffer_store_dword TmpVGPR ; save inactive lanes
70 struct SGPRSpillBuilder {
79 MachineBasicBlock::iterator MI;
80 ArrayRef<int16_t> SplitParts;
85 /* When spilling to stack */
86 // The SGPRs are written into this VGPR, which is then written to scratch
87 // (or vice versa for loads).
88 Register TmpVGPR = AMDGPU::NoRegister;
89 // Temporary spill slot to save TmpVGPR to.
91 // If TmpVGPR is live before the spill or if it is scavenged.
92 bool TmpVGPRLive = false;
93 // Scavenged SGPR to save EXEC.
94 Register SavedExecReg = AMDGPU::NoRegister;
95 // Stack index to write the SGPRs to.
100 MachineBasicBlock *MBB;
102 SIMachineFunctionInfo &MFI;
103 const SIInstrInfo &TII;
104 const SIRegisterInfo &TRI;
110 SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII,
111 bool IsWave32, MachineBasicBlock::iterator MI, int Index,
113 : SGPRSpillBuilder(TRI, TII, IsWave32, MI, MI->getOperand(0).getReg(),
114 MI->getOperand(0).isKill(), Index, RS) {}
116 SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII,
117 bool IsWave32, MachineBasicBlock::iterator MI, Register Reg,
118 bool IsKill, int Index, RegScavenger *RS)
119 : SuperReg(Reg), MI(MI), IsKill(IsKill), DL(MI->getDebugLoc()),
120 Index(Index), RS(RS), MBB(MI->getParent()), MF(*MBB->getParent()),
121 MFI(*MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI),
123 const TargetRegisterClass *RC = TRI.getPhysRegClass(SuperReg);
124 SplitParts = TRI.getRegSplitParts(RC, EltSize);
125 NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
128 ExecReg = AMDGPU::EXEC_LO;
129 MovOpc = AMDGPU::S_MOV_B32;
130 NotOpc = AMDGPU::S_NOT_B32;
132 ExecReg = AMDGPU::EXEC;
133 MovOpc = AMDGPU::S_MOV_B64;
134 NotOpc = AMDGPU::S_NOT_B64;
137 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
138 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
139 SuperReg != AMDGPU::EXEC && "exec should never spill");
142 PerVGPRData getPerVGPRData() {
144 Data.PerVGPR = IsWave32 ? 32 : 64;
145 Data.NumVGPRs = (NumSubRegs + (Data.PerVGPR - 1)) / Data.PerVGPR;
146 Data.VGPRLanes = (1LL << std::min(Data.PerVGPR, NumSubRegs)) - 1LL;
150 // Tries to scavenge SGPRs to save EXEC and a VGPR. Uses v0 if no VGPR is
152 // Writes these instructions if an SGPR can be scavenged:
153 // s_mov_b64 s[6:7], exec ; Save exec
154 // s_mov_b64 exec, 3 ; Wanted lanemask
155 // buffer_store_dword v1 ; Write scavenged VGPR to emergency slot
157 // Writes these instructions if no SGPR can be scavenged:
158 // buffer_store_dword v0 ; Only if no free VGPR was found
159 // s_not_b64 exec, exec
160 // buffer_store_dword v0 ; Save inactive lanes
161 // ; exec stays inverted, it is flipped back in
164 // Scavenged temporary VGPR to use. It must be scavenged once for any number
165 // of spilled subregs.
166 // FIXME: The liveness analysis is limited and does not tell if a register
167 // is in use in lanes that are currently inactive. We can never be sure if
168 // a register as actually in use in another lane, so we need to save all
169 // used lanes of the chosen VGPR.
170 assert(RS && "Cannot spill SGPR to memory without RegScavenger");
171 TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0, false);
173 // Reserve temporary stack slot
174 TmpVGPRIndex = MFI.getScavengeFI(MF.getFrameInfo(), TRI);
176 // Found a register that is dead in the currently active lanes, we only
177 // need to spill inactive lanes.
180 // Pick v0 because it doesn't make a difference.
181 TmpVGPR = AMDGPU::VGPR0;
185 // Try to scavenge SGPRs to save exec
186 assert(!SavedExecReg && "Exec is already saved, refuse to save again");
187 const TargetRegisterClass &RC =
188 IsWave32 ? AMDGPU::SGPR_32RegClass : AMDGPU::SGPR_64RegClass;
189 RS->setRegUsed(SuperReg);
190 SavedExecReg = RS->scavengeRegister(&RC, MI, 0, false);
192 int64_t VGPRLanes = getPerVGPRData().VGPRLanes;
195 RS->setRegUsed(SavedExecReg);
196 // Set exec to needed lanes
197 BuildMI(*MBB, MI, DL, TII.get(MovOpc), SavedExecReg).addReg(ExecReg);
199 BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg).addImm(VGPRLanes);
201 I.addReg(TmpVGPR, RegState::ImplicitDefine);
202 // Spill needed lanes
203 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
205 // Spill active lanes
207 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false,
209 // Spill inactive lanes
210 auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
212 I.addReg(TmpVGPR, RegState::ImplicitDefine);
213 I->getOperand(2).setIsDead(true); // Mark SCC as dead.
214 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
218 // Writes these instructions if an SGPR can be scavenged:
219 // buffer_load_dword v1 ; Write scavenged VGPR to emergency slot
220 // s_waitcnt vmcnt(0) ; If a free VGPR was found
221 // s_mov_b64 exec, s[6:7] ; Save exec
223 // Writes these instructions if no SGPR can be scavenged:
224 // buffer_load_dword v0 ; Restore inactive lanes
225 // s_waitcnt vmcnt(0) ; If a free VGPR was found
226 // s_not_b64 exec, exec
227 // buffer_load_dword v0 ; Only if no free VGPR was found
230 // Restore used lanes
231 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
234 auto I = BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg)
235 .addReg(SavedExecReg, RegState::Kill);
236 // Add an implicit use of the load so it is not dead.
237 // FIXME This inserts an unnecessary waitcnt
239 I.addReg(TmpVGPR, RegState::ImplicitKill);
242 // Restore inactive lanes
243 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
245 auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
247 I.addReg(TmpVGPR, RegState::ImplicitKill);
248 I->getOperand(2).setIsDead(true); // Mark SCC as dead.
250 // Restore active lanes
252 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true);
256 // Write TmpVGPR to memory or read TmpVGPR from memory.
257 // Either using a single buffer_load/store if exec is set to the needed mask
263 void readWriteTmpVGPR(unsigned Offset, bool IsLoad) {
265 // Spill needed lanes
266 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
268 // Spill active lanes
269 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad,
271 // Spill inactive lanes
272 auto Not0 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
273 Not0->getOperand(2).setIsDead(); // Mark SCC as dead.
274 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
275 auto Not1 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
276 Not1->getOperand(2).setIsDead(); // Mark SCC as dead.
280 void setMI(MachineBasicBlock *NewMBB, MachineBasicBlock::iterator NewMI) {
281 assert(MBB->getParent() == &MF);
289 SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
290 : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST),
291 SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) {
293 assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
294 getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&
295 (getSubRegIndexLaneMask(AMDGPU::lo16) |
296 getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==
297 getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
298 "getNumCoveredRegs() will not work with generated subreg masks!");
300 RegPressureIgnoredUnits.resize(getNumRegUnits());
301 RegPressureIgnoredUnits.set(
302 *MCRegUnitIterator(MCRegister::from(AMDGPU::M0), this));
303 for (auto Reg : AMDGPU::VGPR_HI16RegClass)
304 RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this));
306 // HACK: Until this is fully tablegen'd.
307 static llvm::once_flag InitializeRegSplitPartsFlag;
309 static auto InitializeRegSplitPartsOnce = [this]() {
310 for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) {
311 unsigned Size = getSubRegIdxSize(Idx);
314 std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1];
315 unsigned Pos = getSubRegIdxOffset(Idx);
320 unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits.
321 Vec.resize(MaxNumParts);
327 static llvm::once_flag InitializeSubRegFromChannelTableFlag;
329 static auto InitializeSubRegFromChannelTableOnce = [this]() {
330 for (auto &Row : SubRegFromChannelTable)
331 Row.fill(AMDGPU::NoSubRegister);
332 for (uint16_t Idx = 1; Idx < getNumSubRegIndices(); ++Idx) {
333 unsigned Width = AMDGPUSubRegIdxRanges[Idx].Size / 32;
334 unsigned Offset = AMDGPUSubRegIdxRanges[Idx].Offset / 32;
335 assert(Width < SubRegFromChannelTableWidthMap.size());
336 Width = SubRegFromChannelTableWidthMap[Width];
339 unsigned TableIdx = Width - 1;
340 assert(TableIdx < SubRegFromChannelTable.size());
341 assert(Offset < SubRegFromChannelTable[TableIdx].size());
342 SubRegFromChannelTable[TableIdx][Offset] = Idx;
346 llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce);
347 llvm::call_once(InitializeSubRegFromChannelTableFlag,
348 InitializeSubRegFromChannelTableOnce);
351 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved,
352 MCRegister Reg) const {
353 MCRegAliasIterator R(Reg, this, true);
355 for (; R.isValid(); ++R)
359 // Forced to be here by one .inc
360 const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
361 const MachineFunction *MF) const {
362 CallingConv::ID CC = MF->getFunction().getCallingConv();
365 case CallingConv::Fast:
366 case CallingConv::Cold:
367 return MF->getSubtarget<GCNSubtarget>().hasGFX90AInsts()
368 ? CSR_AMDGPU_HighRegs_With_AGPRs_SaveList
369 : CSR_AMDGPU_HighRegs_SaveList;
370 case CallingConv::AMDGPU_Gfx:
371 return MF->getSubtarget<GCNSubtarget>().hasGFX90AInsts()
372 ? CSR_AMDGPU_SI_Gfx_With_AGPRs_SaveList
373 : CSR_AMDGPU_SI_Gfx_SaveList;
375 // Dummy to not crash RegisterClassInfo.
376 static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister;
377 return &NoCalleeSavedReg;
383 SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
387 const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
388 CallingConv::ID CC) const {
391 case CallingConv::Fast:
392 case CallingConv::Cold:
393 return MF.getSubtarget<GCNSubtarget>().hasGFX90AInsts()
394 ? CSR_AMDGPU_HighRegs_With_AGPRs_RegMask
395 : CSR_AMDGPU_HighRegs_RegMask;
396 case CallingConv::AMDGPU_Gfx:
397 return MF.getSubtarget<GCNSubtarget>().hasGFX90AInsts()
398 ? CSR_AMDGPU_SI_Gfx_With_AGPRs_RegMask
399 : CSR_AMDGPU_SI_Gfx_RegMask;
405 const uint32_t *SIRegisterInfo::getNoPreservedMask() const {
406 return CSR_AMDGPU_NoRegs_RegMask;
409 const TargetRegisterClass *
410 SIRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
411 const MachineFunction &MF) const {
412 // FIXME: Should have a helper function like getEquivalentVGPRClass to get the
413 // equivalent AV class. If used one, the verifier will crash after
414 // RegBankSelect in the GISel flow. The aligned regclasses are not fully given
415 // until Instruction selection.
416 if (MF.getSubtarget<GCNSubtarget>().hasMAIInsts() &&
417 (isVGPRClass(RC) || isAGPRClass(RC))) {
418 if (RC == &AMDGPU::VGPR_32RegClass || RC == &AMDGPU::AGPR_32RegClass)
419 return &AMDGPU::AV_32RegClass;
420 if (RC == &AMDGPU::VReg_64RegClass || RC == &AMDGPU::AReg_64RegClass)
421 return &AMDGPU::AV_64RegClass;
422 if (RC == &AMDGPU::VReg_64_Align2RegClass ||
423 RC == &AMDGPU::AReg_64_Align2RegClass)
424 return &AMDGPU::AV_64_Align2RegClass;
425 if (RC == &AMDGPU::VReg_96RegClass || RC == &AMDGPU::AReg_96RegClass)
426 return &AMDGPU::AV_96RegClass;
427 if (RC == &AMDGPU::VReg_96_Align2RegClass ||
428 RC == &AMDGPU::AReg_96_Align2RegClass)
429 return &AMDGPU::AV_96_Align2RegClass;
430 if (RC == &AMDGPU::VReg_128RegClass || RC == &AMDGPU::AReg_128RegClass)
431 return &AMDGPU::AV_128RegClass;
432 if (RC == &AMDGPU::VReg_128_Align2RegClass ||
433 RC == &AMDGPU::AReg_128_Align2RegClass)
434 return &AMDGPU::AV_128_Align2RegClass;
435 if (RC == &AMDGPU::VReg_160RegClass || RC == &AMDGPU::AReg_160RegClass)
436 return &AMDGPU::AV_160RegClass;
437 if (RC == &AMDGPU::VReg_160_Align2RegClass ||
438 RC == &AMDGPU::AReg_160_Align2RegClass)
439 return &AMDGPU::AV_160_Align2RegClass;
440 if (RC == &AMDGPU::VReg_192RegClass || RC == &AMDGPU::AReg_192RegClass)
441 return &AMDGPU::AV_192RegClass;
442 if (RC == &AMDGPU::VReg_192_Align2RegClass ||
443 RC == &AMDGPU::AReg_192_Align2RegClass)
444 return &AMDGPU::AV_192_Align2RegClass;
445 if (RC == &AMDGPU::VReg_256RegClass || RC == &AMDGPU::AReg_256RegClass)
446 return &AMDGPU::AV_256RegClass;
447 if (RC == &AMDGPU::VReg_256_Align2RegClass ||
448 RC == &AMDGPU::AReg_256_Align2RegClass)
449 return &AMDGPU::AV_256_Align2RegClass;
450 if (RC == &AMDGPU::VReg_512RegClass || RC == &AMDGPU::AReg_512RegClass)
451 return &AMDGPU::AV_512RegClass;
452 if (RC == &AMDGPU::VReg_512_Align2RegClass ||
453 RC == &AMDGPU::AReg_512_Align2RegClass)
454 return &AMDGPU::AV_512_Align2RegClass;
455 if (RC == &AMDGPU::VReg_1024RegClass || RC == &AMDGPU::AReg_1024RegClass)
456 return &AMDGPU::AV_1024RegClass;
457 if (RC == &AMDGPU::VReg_1024_Align2RegClass ||
458 RC == &AMDGPU::AReg_1024_Align2RegClass)
459 return &AMDGPU::AV_1024_Align2RegClass;
462 return TargetRegisterInfo::getLargestLegalSuperClass(RC, MF);
465 Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
466 const SIFrameLowering *TFI =
467 MF.getSubtarget<GCNSubtarget>().getFrameLowering();
468 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
469 // During ISel lowering we always reserve the stack pointer in entry
470 // functions, but never actually want to reference it when accessing our own
471 // frame. If we need a frame pointer we use it, but otherwise we can just use
472 // an immediate "0" which we represent by returning NoRegister.
473 if (FuncInfo->isEntryFunction()) {
474 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register();
476 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg()
477 : FuncInfo->getStackPtrOffsetReg();
480 bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
481 // When we need stack realignment, we can't reference off of the
482 // stack pointer, so we reserve a base pointer.
483 const MachineFrameInfo &MFI = MF.getFrameInfo();
484 return MFI.getNumFixedObjects() && shouldRealignStack(MF);
487 Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; }
489 const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const {
490 return CSR_AMDGPU_AllVGPRs_RegMask;
493 const uint32_t *SIRegisterInfo::getAllAGPRRegMask() const {
494 return CSR_AMDGPU_AllAGPRs_RegMask;
497 const uint32_t *SIRegisterInfo::getAllVectorRegMask() const {
498 return CSR_AMDGPU_AllVectorRegs_RegMask;
501 const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const {
502 return CSR_AMDGPU_AllAllocatableSRegs_RegMask;
505 unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel,
507 assert(NumRegs < SubRegFromChannelTableWidthMap.size());
508 unsigned NumRegIndex = SubRegFromChannelTableWidthMap[NumRegs];
509 assert(NumRegIndex && "Not implemented");
510 assert(Channel < SubRegFromChannelTable[NumRegIndex - 1].size());
511 return SubRegFromChannelTable[NumRegIndex - 1][Channel];
514 MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg(
515 const MachineFunction &MF) const {
516 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
517 MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
518 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass);
521 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
522 BitVector Reserved(getNumRegs());
523 Reserved.set(AMDGPU::MODE);
525 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
526 // this seems likely to result in bugs, so I'm marking them as reserved.
527 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
528 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
530 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
531 reserveRegisterTuples(Reserved, AMDGPU::M0);
533 // Reserve src_vccz, src_execz, src_scc.
534 reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ);
535 reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ);
536 reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC);
538 // Reserve the memory aperture registers.
539 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
540 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
541 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
542 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
544 // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
545 reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID);
547 // Reserve xnack_mask registers - support is not implemented in Codegen.
548 reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK);
550 // Reserve lds_direct register - support is not implemented in Codegen.
551 reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT);
553 // Reserve Trap Handler registers - support is not implemented in Codegen.
554 reserveRegisterTuples(Reserved, AMDGPU::TBA);
555 reserveRegisterTuples(Reserved, AMDGPU::TMA);
556 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
557 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
558 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
559 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
560 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
561 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
562 reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13);
563 reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15);
565 // Reserve null register - it shall never be allocated
566 reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL);
568 // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
569 // will result in bugs.
571 Reserved.set(AMDGPU::VCC);
572 Reserved.set(AMDGPU::VCC_HI);
575 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
576 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
577 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
578 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
579 reserveRegisterTuples(Reserved, Reg);
582 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
583 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
584 unsigned MaxNumAGPRs = MaxNumVGPRs;
585 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
587 if (ST.hasGFX90AInsts()) {
588 // In an entry function without calls and AGPRs used it is possible to use
589 // the whole register budget for VGPRs.
591 // TODO: it shall be possible to estimate maximum AGPR/VGPR pressure and
592 // split register file accordingly.
593 if (MFI->usesAGPRs(MF)) {
595 MaxNumAGPRs = MaxNumVGPRs;
597 if (MaxNumVGPRs > TotalNumVGPRs) {
598 MaxNumAGPRs = MaxNumVGPRs - TotalNumVGPRs;
599 MaxNumVGPRs = TotalNumVGPRs;
605 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
606 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
607 reserveRegisterTuples(Reserved, Reg);
610 for (unsigned i = MaxNumAGPRs; i < TotalNumVGPRs; ++i) {
611 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
612 reserveRegisterTuples(Reserved, Reg);
615 for (auto Reg : AMDGPU::SReg_32RegClass) {
616 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
617 Register Low = getSubReg(Reg, AMDGPU::lo16);
618 // This is to prevent BB vcc liveness errors.
619 if (!AMDGPU::SGPR_LO16RegClass.contains(Low))
623 for (auto Reg : AMDGPU::AGPR_32RegClass) {
624 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
627 // Reserve all the rest AGPRs if there are no instructions to use it.
628 if (!ST.hasMAIInsts()) {
629 for (unsigned i = 0; i < MaxNumVGPRs; ++i) {
630 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
631 reserveRegisterTuples(Reserved, Reg);
635 Register ScratchRSrcReg = MFI->getScratchRSrcReg();
636 if (ScratchRSrcReg != AMDGPU::NoRegister) {
637 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
639 // TODO: May need to reserve a VGPR if doing LDS spilling.
640 reserveRegisterTuples(Reserved, ScratchRSrcReg);
643 // We have to assume the SP is needed in case there are calls in the function,
644 // which is detected after the function is lowered. If we aren't really going
645 // to need SP, don't bother reserving it.
646 MCRegister StackPtrReg = MFI->getStackPtrOffsetReg();
649 reserveRegisterTuples(Reserved, StackPtrReg);
650 assert(!isSubRegister(ScratchRSrcReg, StackPtrReg));
653 MCRegister FrameReg = MFI->getFrameOffsetReg();
655 reserveRegisterTuples(Reserved, FrameReg);
656 assert(!isSubRegister(ScratchRSrcReg, FrameReg));
659 if (hasBasePointer(MF)) {
660 MCRegister BasePtrReg = getBaseRegister();
661 reserveRegisterTuples(Reserved, BasePtrReg);
662 assert(!isSubRegister(ScratchRSrcReg, BasePtrReg));
665 for (auto Reg : MFI->WWMReservedRegs) {
666 reserveRegisterTuples(Reserved, Reg.first);
669 // Reserve VGPRs used for SGPR spilling.
670 // Note we treat freezeReservedRegs unusually because we run register
671 // allocation in two phases. It's OK to re-freeze with new registers for the
674 for (auto &SpilledFI : MFI->sgpr_spill_vgprs()) {
675 for (auto &SpilledVGPR : SpilledFI.second)
676 reserveRegisterTuples(Reserved, SpilledVGPR.VGPR);
680 // FIXME: Stop using reserved registers for this.
681 for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs())
682 reserveRegisterTuples(Reserved, Reg);
684 for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs())
685 reserveRegisterTuples(Reserved, Reg);
687 for (auto SSpill : MFI->getSGPRSpillVGPRs())
688 reserveRegisterTuples(Reserved, SSpill.VGPR);
693 bool SIRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
694 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
695 // On entry, the base address is 0, so it can't possibly need any more
698 // FIXME: Should be able to specify the entry frame alignment per calling
699 // convention instead.
700 if (Info->isEntryFunction())
703 return TargetRegisterInfo::shouldRealignStack(MF);
706 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
707 const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>();
708 if (Info->isEntryFunction()) {
709 const MachineFrameInfo &MFI = Fn.getFrameInfo();
710 return MFI.hasStackObjects() || MFI.hasCalls();
713 // May need scavenger for dealing with callee saved registers.
717 bool SIRegisterInfo::requiresFrameIndexScavenging(
718 const MachineFunction &MF) const {
719 // Do not use frame virtual registers. They used to be used for SGPRs, but
720 // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the
721 // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a
726 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
727 const MachineFunction &MF) const {
728 const MachineFrameInfo &MFI = MF.getFrameInfo();
729 return MFI.hasStackObjects();
732 bool SIRegisterInfo::requiresVirtualBaseRegisters(
733 const MachineFunction &) const {
734 // There are no special dedicated stack or frame pointers.
738 int64_t SIRegisterInfo::getScratchInstrOffset(const MachineInstr *MI) const {
739 assert(SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI));
741 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
742 AMDGPU::OpName::offset);
743 return MI->getOperand(OffIdx).getImm();
746 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
748 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
751 assert((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
752 AMDGPU::OpName::vaddr) ||
753 (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
754 AMDGPU::OpName::saddr))) &&
755 "Should never see frame index on non-address operand");
757 return getScratchInstrOffset(MI);
760 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
761 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
764 int64_t FullOffset = Offset + getScratchInstrOffset(MI);
766 if (SIInstrInfo::isMUBUF(*MI))
767 return !SIInstrInfo::isLegalMUBUFImmOffset(FullOffset);
769 const SIInstrInfo *TII = ST.getInstrInfo();
770 return !TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS,
771 SIInstrFlags::FlatScratch);
774 Register SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
776 int64_t Offset) const {
777 MachineBasicBlock::iterator Ins = MBB->begin();
778 DebugLoc DL; // Defaults to "unknown"
780 if (Ins != MBB->end())
781 DL = Ins->getDebugLoc();
783 MachineFunction *MF = MBB->getParent();
784 const SIInstrInfo *TII = ST.getInstrInfo();
785 MachineRegisterInfo &MRI = MF->getRegInfo();
786 unsigned MovOpc = ST.enableFlatScratch() ? AMDGPU::S_MOV_B32
787 : AMDGPU::V_MOV_B32_e32;
789 Register BaseReg = MRI.createVirtualRegister(
790 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XEXEC_HIRegClass
791 : &AMDGPU::VGPR_32RegClass);
794 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), BaseReg)
795 .addFrameIndex(FrameIdx);
799 Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
801 Register FIReg = MRI.createVirtualRegister(
802 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XM0RegClass
803 : &AMDGPU::VGPR_32RegClass);
805 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
807 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), FIReg)
808 .addFrameIndex(FrameIdx);
810 if (ST.enableFlatScratch() ) {
811 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_ADD_I32), BaseReg)
812 .addReg(OffsetReg, RegState::Kill)
817 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
818 .addReg(OffsetReg, RegState::Kill)
820 .addImm(0); // clamp bit
825 void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
826 int64_t Offset) const {
827 const SIInstrInfo *TII = ST.getInstrInfo();
828 bool IsFlat = TII->isFLATScratch(MI);
831 // FIXME: Is it possible to be storing a frame index to itself?
833 for (const MachineOperand &MO: MI.operands()) {
836 llvm_unreachable("should not see multiple frame indices");
843 MachineOperand *FIOp =
844 TII->getNamedOperand(MI, IsFlat ? AMDGPU::OpName::saddr
845 : AMDGPU::OpName::vaddr);
847 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
848 int64_t NewOffset = OffsetOp->getImm() + Offset;
850 assert(FIOp && FIOp->isFI() && "frame index must be address operand");
851 assert(TII->isMUBUF(MI) || TII->isFLATScratch(MI));
854 assert(TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
855 SIInstrFlags::FlatScratch) &&
856 "offset should be legal");
857 FIOp->ChangeToRegister(BaseReg, false);
858 OffsetOp->setImm(NewOffset);
863 MachineOperand *SOffset = TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
864 assert(SOffset->isImm() && SOffset->getImm() == 0);
867 assert(SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&
868 "offset should be legal");
870 FIOp->ChangeToRegister(BaseReg, false);
871 OffsetOp->setImm(NewOffset);
874 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
876 int64_t Offset) const {
877 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
880 int64_t NewOffset = Offset + getScratchInstrOffset(MI);
882 if (SIInstrInfo::isMUBUF(*MI))
883 return SIInstrInfo::isLegalMUBUFImmOffset(NewOffset);
885 const SIInstrInfo *TII = ST.getInstrInfo();
886 return TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
887 SIInstrFlags::FlatScratch);
890 const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
891 const MachineFunction &MF, unsigned Kind) const {
892 // This is inaccurate. It depends on the instruction and address space. The
893 // only place where we should hit this is for dealing with frame indexes /
894 // private accesses, so this is correct in that case.
895 return &AMDGPU::VGPR_32RegClass;
898 const TargetRegisterClass *
899 SIRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
900 if (isAGPRClass(RC) && !ST.hasGFX90AInsts())
901 return getEquivalentVGPRClass(RC);
906 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
909 case AMDGPU::SI_SPILL_S1024_SAVE:
910 case AMDGPU::SI_SPILL_S1024_RESTORE:
911 case AMDGPU::SI_SPILL_V1024_SAVE:
912 case AMDGPU::SI_SPILL_V1024_RESTORE:
913 case AMDGPU::SI_SPILL_A1024_SAVE:
914 case AMDGPU::SI_SPILL_A1024_RESTORE:
915 case AMDGPU::SI_SPILL_AV1024_SAVE:
916 case AMDGPU::SI_SPILL_AV1024_RESTORE:
918 case AMDGPU::SI_SPILL_S512_SAVE:
919 case AMDGPU::SI_SPILL_S512_RESTORE:
920 case AMDGPU::SI_SPILL_V512_SAVE:
921 case AMDGPU::SI_SPILL_V512_RESTORE:
922 case AMDGPU::SI_SPILL_A512_SAVE:
923 case AMDGPU::SI_SPILL_A512_RESTORE:
924 case AMDGPU::SI_SPILL_AV512_SAVE:
925 case AMDGPU::SI_SPILL_AV512_RESTORE:
927 case AMDGPU::SI_SPILL_S256_SAVE:
928 case AMDGPU::SI_SPILL_S256_RESTORE:
929 case AMDGPU::SI_SPILL_V256_SAVE:
930 case AMDGPU::SI_SPILL_V256_RESTORE:
931 case AMDGPU::SI_SPILL_A256_SAVE:
932 case AMDGPU::SI_SPILL_A256_RESTORE:
933 case AMDGPU::SI_SPILL_AV256_SAVE:
934 case AMDGPU::SI_SPILL_AV256_RESTORE:
936 case AMDGPU::SI_SPILL_S224_SAVE:
937 case AMDGPU::SI_SPILL_S224_RESTORE:
938 case AMDGPU::SI_SPILL_V224_SAVE:
939 case AMDGPU::SI_SPILL_V224_RESTORE:
940 case AMDGPU::SI_SPILL_A224_SAVE:
941 case AMDGPU::SI_SPILL_A224_RESTORE:
942 case AMDGPU::SI_SPILL_AV224_SAVE:
943 case AMDGPU::SI_SPILL_AV224_RESTORE:
945 case AMDGPU::SI_SPILL_S192_SAVE:
946 case AMDGPU::SI_SPILL_S192_RESTORE:
947 case AMDGPU::SI_SPILL_V192_SAVE:
948 case AMDGPU::SI_SPILL_V192_RESTORE:
949 case AMDGPU::SI_SPILL_A192_SAVE:
950 case AMDGPU::SI_SPILL_A192_RESTORE:
951 case AMDGPU::SI_SPILL_AV192_SAVE:
952 case AMDGPU::SI_SPILL_AV192_RESTORE:
954 case AMDGPU::SI_SPILL_S160_SAVE:
955 case AMDGPU::SI_SPILL_S160_RESTORE:
956 case AMDGPU::SI_SPILL_V160_SAVE:
957 case AMDGPU::SI_SPILL_V160_RESTORE:
958 case AMDGPU::SI_SPILL_A160_SAVE:
959 case AMDGPU::SI_SPILL_A160_RESTORE:
960 case AMDGPU::SI_SPILL_AV160_SAVE:
961 case AMDGPU::SI_SPILL_AV160_RESTORE:
963 case AMDGPU::SI_SPILL_S128_SAVE:
964 case AMDGPU::SI_SPILL_S128_RESTORE:
965 case AMDGPU::SI_SPILL_V128_SAVE:
966 case AMDGPU::SI_SPILL_V128_RESTORE:
967 case AMDGPU::SI_SPILL_A128_SAVE:
968 case AMDGPU::SI_SPILL_A128_RESTORE:
969 case AMDGPU::SI_SPILL_AV128_SAVE:
970 case AMDGPU::SI_SPILL_AV128_RESTORE:
972 case AMDGPU::SI_SPILL_S96_SAVE:
973 case AMDGPU::SI_SPILL_S96_RESTORE:
974 case AMDGPU::SI_SPILL_V96_SAVE:
975 case AMDGPU::SI_SPILL_V96_RESTORE:
976 case AMDGPU::SI_SPILL_A96_SAVE:
977 case AMDGPU::SI_SPILL_A96_RESTORE:
978 case AMDGPU::SI_SPILL_AV96_SAVE:
979 case AMDGPU::SI_SPILL_AV96_RESTORE:
981 case AMDGPU::SI_SPILL_S64_SAVE:
982 case AMDGPU::SI_SPILL_S64_RESTORE:
983 case AMDGPU::SI_SPILL_V64_SAVE:
984 case AMDGPU::SI_SPILL_V64_RESTORE:
985 case AMDGPU::SI_SPILL_A64_SAVE:
986 case AMDGPU::SI_SPILL_A64_RESTORE:
987 case AMDGPU::SI_SPILL_AV64_SAVE:
988 case AMDGPU::SI_SPILL_AV64_RESTORE:
990 case AMDGPU::SI_SPILL_S32_SAVE:
991 case AMDGPU::SI_SPILL_S32_RESTORE:
992 case AMDGPU::SI_SPILL_V32_SAVE:
993 case AMDGPU::SI_SPILL_V32_RESTORE:
994 case AMDGPU::SI_SPILL_A32_SAVE:
995 case AMDGPU::SI_SPILL_A32_RESTORE:
996 case AMDGPU::SI_SPILL_AV32_SAVE:
997 case AMDGPU::SI_SPILL_AV32_RESTORE:
999 default: llvm_unreachable("Invalid spill opcode");
1003 static int getOffsetMUBUFStore(unsigned Opc) {
1005 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
1006 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1007 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
1008 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
1009 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
1010 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
1011 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
1012 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
1013 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
1014 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
1015 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN:
1016 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET;
1017 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN:
1018 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET;
1024 static int getOffsetMUBUFLoad(unsigned Opc) {
1026 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
1027 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1028 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
1029 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
1030 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
1031 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
1032 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
1033 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
1034 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
1035 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
1036 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
1037 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
1038 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
1039 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
1040 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN:
1041 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET;
1042 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN:
1043 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET;
1044 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN:
1045 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET;
1046 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN:
1047 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET;
1048 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN:
1049 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET;
1050 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN:
1051 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET;
1057 static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST,
1058 MachineBasicBlock &MBB,
1059 MachineBasicBlock::iterator MI,
1060 int Index, unsigned Lane,
1061 unsigned ValueReg, bool IsKill) {
1062 MachineFunction *MF = MBB.getParent();
1063 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1064 const SIInstrInfo *TII = ST.getInstrInfo();
1066 MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane);
1068 if (Reg == AMDGPU::NoRegister)
1069 return MachineInstrBuilder();
1071 bool IsStore = MI->mayStore();
1072 MachineRegisterInfo &MRI = MF->getRegInfo();
1073 auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
1075 unsigned Dst = IsStore ? Reg : ValueReg;
1076 unsigned Src = IsStore ? ValueReg : Reg;
1077 bool IsVGPR = TRI->isVGPR(MRI, Reg);
1078 DebugLoc DL = MI->getDebugLoc();
1079 if (IsVGPR == TRI->isVGPR(MRI, ValueReg)) {
1080 // Spiller during regalloc may restore a spilled register to its superclass.
1081 // It could result in AGPR spills restored to VGPRs or the other way around,
1082 // making the src and dst with identical regclasses at this point. It just
1083 // needs a copy in such cases.
1084 auto CopyMIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), Dst)
1085 .addReg(Src, getKillRegState(IsKill));
1086 CopyMIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1089 unsigned Opc = (IsStore ^ IsVGPR) ? AMDGPU::V_ACCVGPR_WRITE_B32_e64
1090 : AMDGPU::V_ACCVGPR_READ_B32_e64;
1092 auto MIB = BuildMI(MBB, MI, DL, TII->get(Opc), Dst)
1093 .addReg(Src, getKillRegState(IsKill));
1094 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1098 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
1099 // need to handle the case where an SGPR may need to be spilled while spilling.
1100 static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST,
1101 MachineFrameInfo &MFI,
1102 MachineBasicBlock::iterator MI,
1105 const SIInstrInfo *TII = ST.getInstrInfo();
1106 MachineBasicBlock *MBB = MI->getParent();
1107 const DebugLoc &DL = MI->getDebugLoc();
1108 bool IsStore = MI->mayStore();
1110 unsigned Opc = MI->getOpcode();
1111 int LoadStoreOp = IsStore ?
1112 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
1113 if (LoadStoreOp == -1)
1116 const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
1117 if (spillVGPRtoAGPR(ST, *MBB, MI, Index, 0, Reg->getReg(), false).getInstr())
1120 MachineInstrBuilder NewMI =
1121 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
1123 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
1124 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
1131 const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
1132 AMDGPU::OpName::vdata_in);
1134 NewMI.add(*VDataIn);
1138 static unsigned getFlatScratchSpillOpcode(const SIInstrInfo *TII,
1139 unsigned LoadStoreOp,
1141 bool IsStore = TII->get(LoadStoreOp).mayStore();
1143 AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 &&
1144 AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::saddr) < 0;
1148 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1149 : AMDGPU::SCRATCH_LOAD_DWORD_SADDR;
1152 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX2_SADDR
1153 : AMDGPU::SCRATCH_LOAD_DWORDX2_SADDR;
1156 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX3_SADDR
1157 : AMDGPU::SCRATCH_LOAD_DWORDX3_SADDR;
1160 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX4_SADDR
1161 : AMDGPU::SCRATCH_LOAD_DWORDX4_SADDR;
1164 llvm_unreachable("Unexpected spill load/store size!");
1168 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1173 void SIRegisterInfo::buildSpillLoadStore(
1174 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL,
1175 unsigned LoadStoreOp, int Index, Register ValueReg, bool IsKill,
1176 MCRegister ScratchOffsetReg, int64_t InstOffset, MachineMemOperand *MMO,
1177 RegScavenger *RS, LivePhysRegs *LiveRegs) const {
1178 assert((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both");
1180 MachineFunction *MF = MBB.getParent();
1181 const SIInstrInfo *TII = ST.getInstrInfo();
1182 const MachineFrameInfo &MFI = MF->getFrameInfo();
1183 const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
1185 const MCInstrDesc *Desc = &TII->get(LoadStoreOp);
1186 bool IsStore = Desc->mayStore();
1187 bool IsFlat = TII->isFLATScratch(LoadStoreOp);
1189 bool Scavenged = false;
1190 MCRegister SOffset = ScratchOffsetReg;
1192 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
1193 // On gfx90a+ AGPR is a regular VGPR acceptable for loads and stores.
1194 const bool IsAGPR = !ST.hasGFX90AInsts() && isAGPRClass(RC);
1195 const unsigned RegWidth = AMDGPU::getRegBitWidth(RC->getID()) / 8;
1197 // Always use 4 byte operations for AGPRs because we need to scavenge
1198 // a temporary VGPR.
1199 unsigned EltSize = (IsFlat && !IsAGPR) ? std::min(RegWidth, 16u) : 4u;
1200 unsigned NumSubRegs = RegWidth / EltSize;
1201 unsigned Size = NumSubRegs * EltSize;
1202 unsigned RemSize = RegWidth - Size;
1203 unsigned NumRemSubRegs = RemSize ? 1 : 0;
1204 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
1205 int64_t MaxOffset = Offset + Size + RemSize - EltSize;
1206 int64_t ScratchOffsetRegDelta = 0;
1208 if (IsFlat && EltSize > 4) {
1209 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1210 Desc = &TII->get(LoadStoreOp);
1213 Align Alignment = MFI.getObjectAlign(Index);
1214 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
1216 assert((IsFlat || ((Offset % EltSize) == 0)) &&
1217 "unexpected VGPR spill offset");
1219 bool IsOffsetLegal =
1220 IsFlat ? TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS,
1221 SIInstrFlags::FlatScratch)
1222 : SIInstrInfo::isLegalMUBUFImmOffset(MaxOffset);
1223 if (!IsOffsetLegal || (IsFlat && !SOffset && !ST.hasFlatScratchSTMode())) {
1224 SOffset = MCRegister();
1226 // We currently only support spilling VGPRs to EltSize boundaries, meaning
1227 // we can simplify the adjustment of Offset here to just scale with
1230 Offset *= ST.getWavefrontSize();
1232 // We don't have access to the register scavenger if this function is called
1233 // during PEI::scavengeFrameVirtualRegs() so use LiveRegs in this case.
1235 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false);
1236 } else if (LiveRegs) {
1237 for (MCRegister Reg : AMDGPU::SGPR_32RegClass) {
1238 if (LiveRegs->available(MF->getRegInfo(), Reg)) {
1246 // There are no free SGPRs, and since we are in the process of spilling
1247 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
1248 // on SI/CI and on VI it is true until we implement spilling using scalar
1249 // stores), we have no way to free up an SGPR. Our solution here is to
1250 // add the offset directly to the ScratchOffset or StackPtrOffset
1251 // register, and then subtract the offset after the spill to return the
1252 // register to it's original value.
1253 if (!ScratchOffsetReg)
1254 ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg();
1255 SOffset = ScratchOffsetReg;
1256 ScratchOffsetRegDelta = Offset;
1262 report_fatal_error("could not scavenge SGPR to spill in entry function");
1264 if (ScratchOffsetReg == AMDGPU::NoRegister) {
1265 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset).addImm(Offset);
1267 auto Add = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset)
1268 .addReg(ScratchOffsetReg)
1270 Add->getOperand(3).setIsDead(); // Mark SCC as dead.
1276 if (IsFlat && SOffset == AMDGPU::NoRegister) {
1277 assert(AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0
1278 && "Unexpected vaddr for flat scratch with a FI operand");
1280 assert(ST.hasFlatScratchSTMode());
1281 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1282 Desc = &TII->get(LoadStoreOp);
1287 for (unsigned i = 0, e = NumSubRegs + NumRemSubRegs, RegOffset = 0; i != e;
1288 ++i, RegOffset += EltSize) {
1289 if (i == NumSubRegs) {
1291 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1293 Desc = &TII->get(LoadStoreOp);
1295 unsigned NumRegs = EltSize / 4;
1296 Register SubReg = e == 1
1298 : Register(getSubReg(ValueReg,
1299 getSubRegFromChannel(RegOffset / 4, NumRegs)));
1301 unsigned SOffsetRegState = 0;
1302 unsigned SrcDstRegState = getDefRegState(!IsStore);
1304 SOffsetRegState |= getKillRegState(Scavenged);
1305 // The last implicit use carries the "Kill" flag.
1306 SrcDstRegState |= getKillRegState(IsKill);
1309 // Make sure the whole register is defined if there are undef components by
1310 // adding an implicit def of the super-reg on the first instruction.
1311 bool NeedSuperRegDef = e > 1 && IsStore && i == 0;
1312 bool NeedSuperRegImpOperand = e > 1;
1314 // Remaining element size to spill into memory after some parts of it
1315 // spilled into either AGPRs or VGPRs.
1316 unsigned RemEltSize = EltSize;
1318 // AGPRs to spill VGPRs and vice versa are allocated in a reverse order,
1319 // starting from the last lane. In case if a register cannot be completely
1320 // spilled into another register that will ensure its alignment does not
1321 // change. For targets with VGPR alignment requirement this is important
1322 // in case of flat scratch usage as we might get a scratch_load or
1323 // scratch_store of an unaligned register otherwise.
1324 for (int LaneS = (RegOffset + EltSize) / 4 - 1, Lane = LaneS,
1325 LaneE = RegOffset / 4;
1326 Lane >= LaneE; --Lane) {
1327 bool IsSubReg = e > 1 || EltSize > 4;
1328 Register Sub = IsSubReg
1329 ? Register(getSubReg(ValueReg, getSubRegFromChannel(Lane)))
1331 auto MIB = spillVGPRtoAGPR(ST, MBB, MI, Index, Lane, Sub, IsKill);
1332 if (!MIB.getInstr())
1334 if (NeedSuperRegDef || (IsSubReg && IsStore && Lane == LaneS && !i)) {
1335 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1336 NeedSuperRegDef = false;
1338 if (IsSubReg || NeedSuperRegImpOperand) {
1339 NeedSuperRegImpOperand = true;
1340 unsigned State = SrcDstRegState;
1342 State &= ~RegState::Kill;
1343 MIB.addReg(ValueReg, RegState::Implicit | State);
1348 if (!RemEltSize) // Fully spilled into AGPRs.
1351 if (RemEltSize != EltSize) { // Partially spilled to AGPRs
1352 assert(IsFlat && EltSize > 4);
1354 unsigned NumRegs = RemEltSize / 4;
1355 SubReg = Register(getSubReg(ValueReg,
1356 getSubRegFromChannel(RegOffset / 4, NumRegs)));
1357 unsigned Opc = getFlatScratchSpillOpcode(TII, LoadStoreOp, RemEltSize);
1358 Desc = &TII->get(Opc);
1361 unsigned FinalReg = SubReg;
1364 assert(EltSize == 4);
1367 assert(RS && "Needs to have RegScavenger to spill an AGPR!");
1368 // FIXME: change to scavengeRegisterBackwards()
1369 TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1370 RS->setRegUsed(TmpReg);
1373 auto AccRead = BuildMI(MBB, MI, DL,
1374 TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TmpReg)
1375 .addReg(SubReg, getKillRegState(IsKill));
1376 if (NeedSuperRegDef)
1377 AccRead.addReg(ValueReg, RegState::ImplicitDefine);
1378 AccRead->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1383 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(RegOffset);
1384 MachineMemOperand *NewMMO =
1385 MF->getMachineMemOperand(PInfo, MMO->getFlags(), RemEltSize,
1386 commonAlignment(Alignment, RegOffset));
1389 BuildMI(MBB, MI, DL, *Desc)
1390 .addReg(SubReg, getDefRegState(!IsStore) | getKillRegState(IsKill));
1392 MIB.addReg(FuncInfo->getScratchRSrcReg());
1394 if (SOffset == AMDGPU::NoRegister) {
1398 MIB.addReg(SOffset, SOffsetRegState);
1400 MIB.addImm(Offset + RegOffset)
1403 MIB.addImm(0) // tfe
1405 MIB.addMemOperand(NewMMO);
1407 if (!IsAGPR && NeedSuperRegDef)
1408 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1410 if (!IsStore && TmpReg != AMDGPU::NoRegister) {
1411 MIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64),
1413 .addReg(TmpReg, RegState::Kill);
1414 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1417 if (NeedSuperRegImpOperand)
1418 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
1421 if (ScratchOffsetRegDelta != 0) {
1422 // Subtract the offset we added to the ScratchOffset register.
1423 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset)
1425 .addImm(-ScratchOffsetRegDelta);
1429 void SIRegisterInfo::buildVGPRSpillLoadStore(SGPRSpillBuilder &SB, int Index,
1430 int Offset, bool IsLoad,
1431 bool IsKill) const {
1433 MachineFrameInfo &FrameInfo = SB.MF.getFrameInfo();
1434 assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill);
1437 FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(SB.MF)
1439 : getFrameRegister(SB.MF);
1441 Align Alignment = FrameInfo.getObjectAlign(Index);
1442 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SB.MF, Index);
1443 MachineMemOperand *MMO = SB.MF.getMachineMemOperand(
1444 PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore,
1445 SB.EltSize, Alignment);
1448 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1449 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1450 buildSpillLoadStore(*SB.MBB, SB.MI, SB.DL, Opc, Index, SB.TmpVGPR, false,
1451 FrameReg, Offset * SB.EltSize, MMO, SB.RS);
1453 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1454 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1455 buildSpillLoadStore(*SB.MBB, SB.MI, SB.DL, Opc, Index, SB.TmpVGPR, IsKill,
1456 FrameReg, Offset * SB.EltSize, MMO, SB.RS);
1457 // This only ever adds one VGPR spill
1458 SB.MFI.addToSpilledVGPRs(1);
1462 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
1466 bool OnlyToVGPR) const {
1467 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1469 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills =
1470 SB.MFI.getSGPRToVGPRSpills(Index);
1471 bool SpillToVGPR = !VGPRSpills.empty();
1472 if (OnlyToVGPR && !SpillToVGPR)
1475 assert(SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() &&
1476 SB.SuperReg != SB.MFI.getFrameOffsetReg()));
1480 assert(SB.NumSubRegs == VGPRSpills.size() &&
1481 "Num of VGPR lanes should be equal to num of SGPRs spilled");
1483 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1487 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1488 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1490 bool UseKill = SB.IsKill && i == SB.NumSubRegs - 1;
1492 // Mark the "old value of vgpr" input undef only if this is the first sgpr
1493 // spill to this specific vgpr in the first basic block.
1494 auto MIB = BuildMI(*SB.MBB, MI, SB.DL,
1495 SB.TII.get(AMDGPU::V_WRITELANE_B32), Spill.VGPR)
1496 .addReg(SubReg, getKillRegState(UseKill))
1498 .addReg(Spill.VGPR);
1501 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1503 LIS->InsertMachineInstrInMaps(*MIB);
1506 if (i == 0 && SB.NumSubRegs > 1) {
1507 // We may be spilling a super-register which is only partially defined,
1508 // and need to ensure later spills think the value is defined.
1509 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1512 if (SB.NumSubRegs > 1)
1513 MIB.addReg(SB.SuperReg, getKillRegState(UseKill) | RegState::Implicit);
1515 // FIXME: Since this spills to another register instead of an actual
1516 // frame index, we should delete the frame index when all references to
1522 // SubReg carries the "Kill" flag when SubReg == SB.SuperReg.
1523 unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill);
1525 // Per VGPR helper data
1526 auto PVD = SB.getPerVGPRData();
1528 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1529 unsigned TmpVGPRFlags = RegState::Undef;
1531 // Write sub registers into the VGPR
1532 for (unsigned i = Offset * PVD.PerVGPR,
1533 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1538 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1540 MachineInstrBuilder WriteLane =
1541 BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1543 .addReg(SubReg, SubKillState)
1544 .addImm(i % PVD.PerVGPR)
1545 .addReg(SB.TmpVGPR, TmpVGPRFlags);
1550 LIS->ReplaceMachineInstrInMaps(*MI, *WriteLane);
1552 LIS->InsertMachineInstrInMaps(*WriteLane);
1555 // There could be undef components of a spilled super register.
1556 // TODO: Can we detect this and skip the spill?
1557 if (SB.NumSubRegs > 1) {
1558 // The last implicit use of the SB.SuperReg carries the "Kill" flag.
1559 unsigned SuperKillState = 0;
1560 if (i + 1 == SB.NumSubRegs)
1561 SuperKillState |= getKillRegState(SB.IsKill);
1562 WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState);
1567 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ false);
1573 MI->eraseFromParent();
1574 SB.MFI.addToSpilledSGPRs(SB.NumSubRegs);
1577 LIS->removeAllRegUnitsForPhysReg(SB.SuperReg);
1582 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
1586 bool OnlyToVGPR) const {
1587 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1589 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills =
1590 SB.MFI.getSGPRToVGPRSpills(Index);
1591 bool SpillToVGPR = !VGPRSpills.empty();
1592 if (OnlyToVGPR && !SpillToVGPR)
1596 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1600 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1602 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1603 auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32),
1606 .addImm(Spill.Lane);
1607 if (SB.NumSubRegs > 1 && i == 0)
1608 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1611 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1613 LIS->InsertMachineInstrInMaps(*MIB);
1620 // Per VGPR helper data
1621 auto PVD = SB.getPerVGPRData();
1623 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1624 // Load in VGPR data
1625 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ true);
1628 for (unsigned i = Offset * PVD.PerVGPR,
1629 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1634 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1636 bool LastSubReg = (i + 1 == e);
1637 auto MIB = BuildMI(*SB.MBB, MI, SB.DL,
1638 SB.TII.get(AMDGPU::V_READLANE_B32), SubReg)
1639 .addReg(SB.TmpVGPR, getKillRegState(LastSubReg))
1641 if (SB.NumSubRegs > 1 && i == 0)
1642 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1645 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1647 LIS->InsertMachineInstrInMaps(*MIB);
1655 MI->eraseFromParent();
1658 LIS->removeAllRegUnitsForPhysReg(SB.SuperReg);
1663 bool SIRegisterInfo::spillEmergencySGPR(MachineBasicBlock::iterator MI,
1664 MachineBasicBlock &RestoreMBB,
1665 Register SGPR, RegScavenger *RS) const {
1666 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, SGPR, false, 0,
1669 // Generate the spill of SGPR to SB.TmpVGPR.
1670 unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill);
1671 auto PVD = SB.getPerVGPRData();
1672 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1673 unsigned TmpVGPRFlags = RegState::Undef;
1674 // Write sub registers into the VGPR
1675 for (unsigned i = Offset * PVD.PerVGPR,
1676 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1681 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1683 MachineInstrBuilder WriteLane =
1684 BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1686 .addReg(SubReg, SubKillState)
1687 .addImm(i % PVD.PerVGPR)
1688 .addReg(SB.TmpVGPR, TmpVGPRFlags);
1690 // There could be undef components of a spilled super register.
1691 // TODO: Can we detect this and skip the spill?
1692 if (SB.NumSubRegs > 1) {
1693 // The last implicit use of the SB.SuperReg carries the "Kill" flag.
1694 unsigned SuperKillState = 0;
1695 if (i + 1 == SB.NumSubRegs)
1696 SuperKillState |= getKillRegState(SB.IsKill);
1697 WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState);
1700 // Don't need to write VGPR out.
1703 // Restore clobbered registers in the specified restore block.
1704 MI = RestoreMBB.end();
1705 SB.setMI(&RestoreMBB, MI);
1706 // Generate the restore of SGPR from SB.TmpVGPR.
1707 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1708 // Don't need to load VGPR in.
1710 for (unsigned i = Offset * PVD.PerVGPR,
1711 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1716 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1717 bool LastSubReg = (i + 1 == e);
1718 auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32),
1720 .addReg(SB.TmpVGPR, getKillRegState(LastSubReg))
1722 if (SB.NumSubRegs > 1 && i == 0)
1723 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1728 SB.MFI.addToSpilledSGPRs(SB.NumSubRegs);
1732 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
1733 /// a VGPR and the stack slot can be safely eliminated when all other users are
1735 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
1736 MachineBasicBlock::iterator MI,
1739 LiveIntervals *LIS) const {
1740 switch (MI->getOpcode()) {
1741 case AMDGPU::SI_SPILL_S1024_SAVE:
1742 case AMDGPU::SI_SPILL_S512_SAVE:
1743 case AMDGPU::SI_SPILL_S256_SAVE:
1744 case AMDGPU::SI_SPILL_S224_SAVE:
1745 case AMDGPU::SI_SPILL_S192_SAVE:
1746 case AMDGPU::SI_SPILL_S160_SAVE:
1747 case AMDGPU::SI_SPILL_S128_SAVE:
1748 case AMDGPU::SI_SPILL_S96_SAVE:
1749 case AMDGPU::SI_SPILL_S64_SAVE:
1750 case AMDGPU::SI_SPILL_S32_SAVE:
1751 return spillSGPR(MI, FI, RS, LIS, true);
1752 case AMDGPU::SI_SPILL_S1024_RESTORE:
1753 case AMDGPU::SI_SPILL_S512_RESTORE:
1754 case AMDGPU::SI_SPILL_S256_RESTORE:
1755 case AMDGPU::SI_SPILL_S224_RESTORE:
1756 case AMDGPU::SI_SPILL_S192_RESTORE:
1757 case AMDGPU::SI_SPILL_S160_RESTORE:
1758 case AMDGPU::SI_SPILL_S128_RESTORE:
1759 case AMDGPU::SI_SPILL_S96_RESTORE:
1760 case AMDGPU::SI_SPILL_S64_RESTORE:
1761 case AMDGPU::SI_SPILL_S32_RESTORE:
1762 return restoreSGPR(MI, FI, RS, LIS, true);
1764 llvm_unreachable("not an SGPR spill instruction");
1768 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
1769 int SPAdj, unsigned FIOperandNum,
1770 RegScavenger *RS) const {
1771 MachineFunction *MF = MI->getParent()->getParent();
1772 MachineBasicBlock *MBB = MI->getParent();
1773 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1774 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1775 const SIInstrInfo *TII = ST.getInstrInfo();
1776 DebugLoc DL = MI->getDebugLoc();
1778 assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?");
1780 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
1781 int Index = MI->getOperand(FIOperandNum).getIndex();
1783 Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
1785 : getFrameRegister(*MF);
1787 switch (MI->getOpcode()) {
1788 // SGPR register spill
1789 case AMDGPU::SI_SPILL_S1024_SAVE:
1790 case AMDGPU::SI_SPILL_S512_SAVE:
1791 case AMDGPU::SI_SPILL_S256_SAVE:
1792 case AMDGPU::SI_SPILL_S224_SAVE:
1793 case AMDGPU::SI_SPILL_S192_SAVE:
1794 case AMDGPU::SI_SPILL_S160_SAVE:
1795 case AMDGPU::SI_SPILL_S128_SAVE:
1796 case AMDGPU::SI_SPILL_S96_SAVE:
1797 case AMDGPU::SI_SPILL_S64_SAVE:
1798 case AMDGPU::SI_SPILL_S32_SAVE: {
1799 spillSGPR(MI, Index, RS);
1803 // SGPR register restore
1804 case AMDGPU::SI_SPILL_S1024_RESTORE:
1805 case AMDGPU::SI_SPILL_S512_RESTORE:
1806 case AMDGPU::SI_SPILL_S256_RESTORE:
1807 case AMDGPU::SI_SPILL_S224_RESTORE:
1808 case AMDGPU::SI_SPILL_S192_RESTORE:
1809 case AMDGPU::SI_SPILL_S160_RESTORE:
1810 case AMDGPU::SI_SPILL_S128_RESTORE:
1811 case AMDGPU::SI_SPILL_S96_RESTORE:
1812 case AMDGPU::SI_SPILL_S64_RESTORE:
1813 case AMDGPU::SI_SPILL_S32_RESTORE: {
1814 restoreSGPR(MI, Index, RS);
1818 // VGPR register spill
1819 case AMDGPU::SI_SPILL_V1024_SAVE:
1820 case AMDGPU::SI_SPILL_V512_SAVE:
1821 case AMDGPU::SI_SPILL_V256_SAVE:
1822 case AMDGPU::SI_SPILL_V224_SAVE:
1823 case AMDGPU::SI_SPILL_V192_SAVE:
1824 case AMDGPU::SI_SPILL_V160_SAVE:
1825 case AMDGPU::SI_SPILL_V128_SAVE:
1826 case AMDGPU::SI_SPILL_V96_SAVE:
1827 case AMDGPU::SI_SPILL_V64_SAVE:
1828 case AMDGPU::SI_SPILL_V32_SAVE:
1829 case AMDGPU::SI_SPILL_A1024_SAVE:
1830 case AMDGPU::SI_SPILL_A512_SAVE:
1831 case AMDGPU::SI_SPILL_A256_SAVE:
1832 case AMDGPU::SI_SPILL_A224_SAVE:
1833 case AMDGPU::SI_SPILL_A192_SAVE:
1834 case AMDGPU::SI_SPILL_A160_SAVE:
1835 case AMDGPU::SI_SPILL_A128_SAVE:
1836 case AMDGPU::SI_SPILL_A96_SAVE:
1837 case AMDGPU::SI_SPILL_A64_SAVE:
1838 case AMDGPU::SI_SPILL_A32_SAVE:
1839 case AMDGPU::SI_SPILL_AV1024_SAVE:
1840 case AMDGPU::SI_SPILL_AV512_SAVE:
1841 case AMDGPU::SI_SPILL_AV256_SAVE:
1842 case AMDGPU::SI_SPILL_AV224_SAVE:
1843 case AMDGPU::SI_SPILL_AV192_SAVE:
1844 case AMDGPU::SI_SPILL_AV160_SAVE:
1845 case AMDGPU::SI_SPILL_AV128_SAVE:
1846 case AMDGPU::SI_SPILL_AV96_SAVE:
1847 case AMDGPU::SI_SPILL_AV64_SAVE:
1848 case AMDGPU::SI_SPILL_AV32_SAVE: {
1849 const MachineOperand *VData = TII->getNamedOperand(*MI,
1850 AMDGPU::OpName::vdata);
1851 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
1852 MFI->getStackPtrOffsetReg());
1854 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1855 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1856 auto *MBB = MI->getParent();
1857 buildSpillLoadStore(
1858 *MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
1859 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1860 *MI->memoperands_begin(), RS);
1861 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
1862 MI->eraseFromParent();
1865 case AMDGPU::SI_SPILL_V32_RESTORE:
1866 case AMDGPU::SI_SPILL_V64_RESTORE:
1867 case AMDGPU::SI_SPILL_V96_RESTORE:
1868 case AMDGPU::SI_SPILL_V128_RESTORE:
1869 case AMDGPU::SI_SPILL_V160_RESTORE:
1870 case AMDGPU::SI_SPILL_V192_RESTORE:
1871 case AMDGPU::SI_SPILL_V224_RESTORE:
1872 case AMDGPU::SI_SPILL_V256_RESTORE:
1873 case AMDGPU::SI_SPILL_V512_RESTORE:
1874 case AMDGPU::SI_SPILL_V1024_RESTORE:
1875 case AMDGPU::SI_SPILL_A32_RESTORE:
1876 case AMDGPU::SI_SPILL_A64_RESTORE:
1877 case AMDGPU::SI_SPILL_A96_RESTORE:
1878 case AMDGPU::SI_SPILL_A128_RESTORE:
1879 case AMDGPU::SI_SPILL_A160_RESTORE:
1880 case AMDGPU::SI_SPILL_A192_RESTORE:
1881 case AMDGPU::SI_SPILL_A224_RESTORE:
1882 case AMDGPU::SI_SPILL_A256_RESTORE:
1883 case AMDGPU::SI_SPILL_A512_RESTORE:
1884 case AMDGPU::SI_SPILL_A1024_RESTORE:
1885 case AMDGPU::SI_SPILL_AV32_RESTORE:
1886 case AMDGPU::SI_SPILL_AV64_RESTORE:
1887 case AMDGPU::SI_SPILL_AV96_RESTORE:
1888 case AMDGPU::SI_SPILL_AV128_RESTORE:
1889 case AMDGPU::SI_SPILL_AV160_RESTORE:
1890 case AMDGPU::SI_SPILL_AV192_RESTORE:
1891 case AMDGPU::SI_SPILL_AV224_RESTORE:
1892 case AMDGPU::SI_SPILL_AV256_RESTORE:
1893 case AMDGPU::SI_SPILL_AV512_RESTORE:
1894 case AMDGPU::SI_SPILL_AV1024_RESTORE: {
1895 const MachineOperand *VData = TII->getNamedOperand(*MI,
1896 AMDGPU::OpName::vdata);
1897 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
1898 MFI->getStackPtrOffsetReg());
1900 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1901 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1902 auto *MBB = MI->getParent();
1903 buildSpillLoadStore(
1904 *MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
1905 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1906 *MI->memoperands_begin(), RS);
1907 MI->eraseFromParent();
1912 // Other access to frame index
1913 const DebugLoc &DL = MI->getDebugLoc();
1915 int64_t Offset = FrameInfo.getObjectOffset(Index);
1916 if (ST.enableFlatScratch()) {
1917 if (TII->isFLATScratch(*MI)) {
1918 assert((int16_t)FIOperandNum ==
1919 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
1920 AMDGPU::OpName::saddr));
1922 // The offset is always swizzled, just replace it
1924 FIOp.ChangeToRegister(FrameReg, false);
1929 MachineOperand *OffsetOp =
1930 TII->getNamedOperand(*MI, AMDGPU::OpName::offset);
1931 int64_t NewOffset = Offset + OffsetOp->getImm();
1932 if (TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
1933 SIInstrFlags::FlatScratch)) {
1934 OffsetOp->setImm(NewOffset);
1940 assert(!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) &&
1941 "Unexpected vaddr for flat scratch with a FI operand");
1943 // On GFX10 we have ST mode to use no registers for an address.
1944 // Otherwise we need to materialize 0 into an SGPR.
1945 if (!Offset && ST.hasFlatScratchSTMode()) {
1946 unsigned Opc = MI->getOpcode();
1947 unsigned NewOpc = AMDGPU::getFlatScratchInstSTfromSS(Opc);
1949 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr));
1950 MI->setDesc(TII->get(NewOpc));
1956 FIOp.ChangeToImmediate(Offset);
1957 if (TII->isImmOperandLegal(*MI, FIOperandNum, FIOp))
1961 // We need to use register here. Check if we can use an SGPR or need
1963 FIOp.ChangeToRegister(AMDGPU::M0, false);
1964 bool UseSGPR = TII->isOperandLegal(*MI, FIOperandNum, &FIOp);
1966 if (!Offset && FrameReg && UseSGPR) {
1967 FIOp.setReg(FrameReg);
1971 const TargetRegisterClass *RC = UseSGPR ? &AMDGPU::SReg_32_XM0RegClass
1972 : &AMDGPU::VGPR_32RegClass;
1974 Register TmpReg = RS->scavengeRegister(RC, MI, 0, !UseSGPR);
1975 FIOp.setReg(TmpReg);
1976 FIOp.setIsKill(true);
1978 if ((!FrameReg || !Offset) && TmpReg) {
1979 unsigned Opc = UseSGPR ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1980 auto MIB = BuildMI(*MBB, MI, DL, TII->get(Opc), TmpReg);
1982 MIB.addReg(FrameReg);
1991 : RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0,
1994 // TODO: for flat scratch another attempt can be made with a VGPR index
1995 // if no SGPRs can be scavenged.
1996 if ((!TmpSReg && !FrameReg) || (!TmpReg && !UseSGPR))
1997 report_fatal_error("Cannot scavenge register in FI elimination!");
2000 // Use frame register and restore it after.
2002 FIOp.setReg(FrameReg);
2003 FIOp.setIsKill(false);
2006 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), TmpSReg)
2011 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
2012 .addReg(TmpSReg, RegState::Kill);
2014 if (TmpSReg == FrameReg) {
2015 // Undo frame register modification.
2016 BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_ADD_I32),
2025 bool IsMUBUF = TII->isMUBUF(*MI);
2027 if (!IsMUBUF && !MFI->isEntryFunction()) {
2028 // Convert to a swizzled stack address by scaling by the wave size.
2030 // In an entry function/kernel the offset is already swizzled.
2032 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
2033 Register ResultReg =
2034 IsCopy ? MI->getOperand(0).getReg()
2035 : RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
2037 int64_t Offset = FrameInfo.getObjectOffset(Index);
2039 // XXX - This never happens because of emergency scavenging slot at 0?
2040 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg)
2041 .addImm(ST.getWavefrontSizeLog2())
2044 if (auto MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) {
2045 // Reuse ResultReg in intermediate step.
2046 Register ScaledReg = ResultReg;
2048 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64),
2050 .addImm(ST.getWavefrontSizeLog2())
2053 const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32;
2055 // TODO: Fold if use instruction is another add of a constant.
2056 if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
2057 // FIXME: This can fail
2059 MIB.addReg(ScaledReg, RegState::Kill);
2061 MIB.addImm(0); // clamp bit
2063 assert(MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 &&
2064 "Need to reuse carry out register");
2066 // Use scavenged unused carry out as offset register.
2067 Register ConstOffsetReg;
2069 ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0);
2071 ConstOffsetReg = MIB.getReg(1);
2073 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
2075 MIB.addReg(ConstOffsetReg, RegState::Kill);
2076 MIB.addReg(ScaledReg, RegState::Kill);
2077 MIB.addImm(0); // clamp bit
2080 // We have to produce a carry out, and there isn't a free SGPR pair
2081 // for it. We can keep the whole computation on the SALU to avoid
2082 // clobbering an additional register at the cost of an extra mov.
2084 // We may have 1 free scratch SGPR even though a carry out is
2085 // unavailable. Only one additional mov is needed.
2086 Register TmpScaledReg =
2087 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false);
2088 Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg;
2090 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg)
2092 .addImm(ST.getWavefrontSizeLog2());
2093 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg)
2094 .addReg(ScaledReg, RegState::Kill)
2096 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg)
2097 .addReg(ScaledReg, RegState::Kill);
2099 // If there were truly no free SGPRs, we need to undo everything.
2100 if (!TmpScaledReg.isValid()) {
2101 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg)
2102 .addReg(ScaledReg, RegState::Kill)
2104 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg)
2106 .addImm(ST.getWavefrontSizeLog2());
2111 // Don't introduce an extra copy if we're just materializing in a mov.
2113 MI->eraseFromParent();
2115 FIOp.ChangeToRegister(ResultReg, false, false, true);
2120 // Disable offen so we don't need a 0 vgpr base.
2121 assert(static_cast<int>(FIOperandNum) ==
2122 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
2123 AMDGPU::OpName::vaddr));
2125 auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset);
2126 assert((SOffset.isImm() && SOffset.getImm() == 0));
2128 if (FrameReg != AMDGPU::NoRegister)
2129 SOffset.ChangeToRegister(FrameReg, false);
2131 int64_t Offset = FrameInfo.getObjectOffset(Index);
2133 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
2134 int64_t NewOffset = OldImm + Offset;
2136 if (SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&
2137 buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) {
2138 MI->eraseFromParent();
2143 // If the offset is simply too big, don't convert to a scratch wave offset
2146 FIOp.ChangeToImmediate(Offset);
2147 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
2148 Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
2149 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
2151 FIOp.ChangeToRegister(TmpReg, false, false, true);
2157 StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const {
2158 return AMDGPUInstPrinter::getRegisterName(Reg);
2161 static const TargetRegisterClass *
2162 getAnyVGPRClassForBitWidth(unsigned BitWidth) {
2164 return &AMDGPU::VReg_64RegClass;
2166 return &AMDGPU::VReg_96RegClass;
2167 if (BitWidth <= 128)
2168 return &AMDGPU::VReg_128RegClass;
2169 if (BitWidth <= 160)
2170 return &AMDGPU::VReg_160RegClass;
2171 if (BitWidth <= 192)
2172 return &AMDGPU::VReg_192RegClass;
2173 if (BitWidth <= 224)
2174 return &AMDGPU::VReg_224RegClass;
2175 if (BitWidth <= 256)
2176 return &AMDGPU::VReg_256RegClass;
2177 if (BitWidth <= 512)
2178 return &AMDGPU::VReg_512RegClass;
2179 if (BitWidth <= 1024)
2180 return &AMDGPU::VReg_1024RegClass;
2185 static const TargetRegisterClass *
2186 getAlignedVGPRClassForBitWidth(unsigned BitWidth) {
2188 return &AMDGPU::VReg_64_Align2RegClass;
2190 return &AMDGPU::VReg_96_Align2RegClass;
2191 if (BitWidth <= 128)
2192 return &AMDGPU::VReg_128_Align2RegClass;
2193 if (BitWidth <= 160)
2194 return &AMDGPU::VReg_160_Align2RegClass;
2195 if (BitWidth <= 192)
2196 return &AMDGPU::VReg_192_Align2RegClass;
2197 if (BitWidth <= 224)
2198 return &AMDGPU::VReg_224_Align2RegClass;
2199 if (BitWidth <= 256)
2200 return &AMDGPU::VReg_256_Align2RegClass;
2201 if (BitWidth <= 512)
2202 return &AMDGPU::VReg_512_Align2RegClass;
2203 if (BitWidth <= 1024)
2204 return &AMDGPU::VReg_1024_Align2RegClass;
2209 const TargetRegisterClass *
2210 SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) const {
2212 return &AMDGPU::VReg_1RegClass;
2214 return &AMDGPU::VGPR_LO16RegClass;
2216 return &AMDGPU::VGPR_32RegClass;
2217 return ST.needsAlignedVGPRs() ? getAlignedVGPRClassForBitWidth(BitWidth)
2218 : getAnyVGPRClassForBitWidth(BitWidth);
2221 static const TargetRegisterClass *
2222 getAnyAGPRClassForBitWidth(unsigned BitWidth) {
2224 return &AMDGPU::AReg_64RegClass;
2226 return &AMDGPU::AReg_96RegClass;
2227 if (BitWidth <= 128)
2228 return &AMDGPU::AReg_128RegClass;
2229 if (BitWidth <= 160)
2230 return &AMDGPU::AReg_160RegClass;
2231 if (BitWidth <= 192)
2232 return &AMDGPU::AReg_192RegClass;
2233 if (BitWidth <= 224)
2234 return &AMDGPU::AReg_224RegClass;
2235 if (BitWidth <= 256)
2236 return &AMDGPU::AReg_256RegClass;
2237 if (BitWidth <= 512)
2238 return &AMDGPU::AReg_512RegClass;
2239 if (BitWidth <= 1024)
2240 return &AMDGPU::AReg_1024RegClass;
2245 static const TargetRegisterClass *
2246 getAlignedAGPRClassForBitWidth(unsigned BitWidth) {
2248 return &AMDGPU::AReg_64_Align2RegClass;
2250 return &AMDGPU::AReg_96_Align2RegClass;
2251 if (BitWidth <= 128)
2252 return &AMDGPU::AReg_128_Align2RegClass;
2253 if (BitWidth <= 160)
2254 return &AMDGPU::AReg_160_Align2RegClass;
2255 if (BitWidth <= 192)
2256 return &AMDGPU::AReg_192_Align2RegClass;
2257 if (BitWidth <= 224)
2258 return &AMDGPU::AReg_224_Align2RegClass;
2259 if (BitWidth <= 256)
2260 return &AMDGPU::AReg_256_Align2RegClass;
2261 if (BitWidth <= 512)
2262 return &AMDGPU::AReg_512_Align2RegClass;
2263 if (BitWidth <= 1024)
2264 return &AMDGPU::AReg_1024_Align2RegClass;
2269 const TargetRegisterClass *
2270 SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) const {
2272 return &AMDGPU::AGPR_LO16RegClass;
2274 return &AMDGPU::AGPR_32RegClass;
2275 return ST.needsAlignedVGPRs() ? getAlignedAGPRClassForBitWidth(BitWidth)
2276 : getAnyAGPRClassForBitWidth(BitWidth);
2279 static const TargetRegisterClass *
2280 getAnyVectorSuperClassForBitWidth(unsigned BitWidth) {
2282 return &AMDGPU::AV_64RegClass;
2284 return &AMDGPU::AV_96RegClass;
2285 if (BitWidth <= 128)
2286 return &AMDGPU::AV_128RegClass;
2287 if (BitWidth <= 160)
2288 return &AMDGPU::AV_160RegClass;
2289 if (BitWidth <= 192)
2290 return &AMDGPU::AV_192RegClass;
2291 if (BitWidth <= 224)
2292 return &AMDGPU::AV_224RegClass;
2293 if (BitWidth <= 256)
2294 return &AMDGPU::AV_256RegClass;
2295 if (BitWidth <= 512)
2296 return &AMDGPU::AV_512RegClass;
2297 if (BitWidth <= 1024)
2298 return &AMDGPU::AV_1024RegClass;
2303 static const TargetRegisterClass *
2304 getAlignedVectorSuperClassForBitWidth(unsigned BitWidth) {
2306 return &AMDGPU::AV_64_Align2RegClass;
2308 return &AMDGPU::AV_96_Align2RegClass;
2309 if (BitWidth <= 128)
2310 return &AMDGPU::AV_128_Align2RegClass;
2311 if (BitWidth <= 160)
2312 return &AMDGPU::AV_160_Align2RegClass;
2313 if (BitWidth <= 192)
2314 return &AMDGPU::AV_192_Align2RegClass;
2315 if (BitWidth <= 224)
2316 return &AMDGPU::AV_224_Align2RegClass;
2317 if (BitWidth <= 256)
2318 return &AMDGPU::AV_256_Align2RegClass;
2319 if (BitWidth <= 512)
2320 return &AMDGPU::AV_512_Align2RegClass;
2321 if (BitWidth <= 1024)
2322 return &AMDGPU::AV_1024_Align2RegClass;
2327 const TargetRegisterClass *
2328 SIRegisterInfo::getVectorSuperClassForBitWidth(unsigned BitWidth) const {
2330 return &AMDGPU::VGPR_LO16RegClass;
2332 return &AMDGPU::AV_32RegClass;
2333 return ST.needsAlignedVGPRs()
2334 ? getAlignedVectorSuperClassForBitWidth(BitWidth)
2335 : getAnyVectorSuperClassForBitWidth(BitWidth);
2338 const TargetRegisterClass *
2339 SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
2341 return &AMDGPU::SGPR_LO16RegClass;
2343 return &AMDGPU::SReg_32RegClass;
2345 return &AMDGPU::SReg_64RegClass;
2347 return &AMDGPU::SGPR_96RegClass;
2348 if (BitWidth <= 128)
2349 return &AMDGPU::SGPR_128RegClass;
2350 if (BitWidth <= 160)
2351 return &AMDGPU::SGPR_160RegClass;
2352 if (BitWidth <= 192)
2353 return &AMDGPU::SGPR_192RegClass;
2354 if (BitWidth <= 224)
2355 return &AMDGPU::SGPR_224RegClass;
2356 if (BitWidth <= 256)
2357 return &AMDGPU::SGPR_256RegClass;
2358 if (BitWidth <= 512)
2359 return &AMDGPU::SGPR_512RegClass;
2360 if (BitWidth <= 1024)
2361 return &AMDGPU::SGPR_1024RegClass;
2366 // FIXME: This is very slow. It might be worth creating a map from physreg to
2368 const TargetRegisterClass *
2369 SIRegisterInfo::getPhysRegClass(MCRegister Reg) const {
2370 static const TargetRegisterClass *const BaseClasses[] = {
2371 &AMDGPU::VGPR_LO16RegClass,
2372 &AMDGPU::VGPR_HI16RegClass,
2373 &AMDGPU::SReg_LO16RegClass,
2374 &AMDGPU::AGPR_LO16RegClass,
2375 &AMDGPU::VGPR_32RegClass,
2376 &AMDGPU::SReg_32RegClass,
2377 &AMDGPU::AGPR_32RegClass,
2378 &AMDGPU::AGPR_32RegClass,
2379 &AMDGPU::VReg_64_Align2RegClass,
2380 &AMDGPU::VReg_64RegClass,
2381 &AMDGPU::SReg_64RegClass,
2382 &AMDGPU::AReg_64_Align2RegClass,
2383 &AMDGPU::AReg_64RegClass,
2384 &AMDGPU::VReg_96_Align2RegClass,
2385 &AMDGPU::VReg_96RegClass,
2386 &AMDGPU::SReg_96RegClass,
2387 &AMDGPU::AReg_96_Align2RegClass,
2388 &AMDGPU::AReg_96RegClass,
2389 &AMDGPU::VReg_128_Align2RegClass,
2390 &AMDGPU::VReg_128RegClass,
2391 &AMDGPU::SReg_128RegClass,
2392 &AMDGPU::AReg_128_Align2RegClass,
2393 &AMDGPU::AReg_128RegClass,
2394 &AMDGPU::VReg_160_Align2RegClass,
2395 &AMDGPU::VReg_160RegClass,
2396 &AMDGPU::SReg_160RegClass,
2397 &AMDGPU::AReg_160_Align2RegClass,
2398 &AMDGPU::AReg_160RegClass,
2399 &AMDGPU::VReg_192_Align2RegClass,
2400 &AMDGPU::VReg_192RegClass,
2401 &AMDGPU::SReg_192RegClass,
2402 &AMDGPU::AReg_192_Align2RegClass,
2403 &AMDGPU::AReg_192RegClass,
2404 &AMDGPU::VReg_224_Align2RegClass,
2405 &AMDGPU::VReg_224RegClass,
2406 &AMDGPU::SReg_224RegClass,
2407 &AMDGPU::AReg_224_Align2RegClass,
2408 &AMDGPU::AReg_224RegClass,
2409 &AMDGPU::VReg_256_Align2RegClass,
2410 &AMDGPU::VReg_256RegClass,
2411 &AMDGPU::SReg_256RegClass,
2412 &AMDGPU::AReg_256_Align2RegClass,
2413 &AMDGPU::AReg_256RegClass,
2414 &AMDGPU::VReg_512_Align2RegClass,
2415 &AMDGPU::VReg_512RegClass,
2416 &AMDGPU::SReg_512RegClass,
2417 &AMDGPU::AReg_512_Align2RegClass,
2418 &AMDGPU::AReg_512RegClass,
2419 &AMDGPU::SReg_1024RegClass,
2420 &AMDGPU::VReg_1024_Align2RegClass,
2421 &AMDGPU::VReg_1024RegClass,
2422 &AMDGPU::AReg_1024_Align2RegClass,
2423 &AMDGPU::AReg_1024RegClass,
2424 &AMDGPU::SCC_CLASSRegClass,
2425 &AMDGPU::Pseudo_SReg_32RegClass,
2426 &AMDGPU::Pseudo_SReg_128RegClass,
2429 for (const TargetRegisterClass *BaseClass : BaseClasses) {
2430 if (BaseClass->contains(Reg)) {
2437 bool SIRegisterInfo::isSGPRReg(const MachineRegisterInfo &MRI,
2438 Register Reg) const {
2439 const TargetRegisterClass *RC;
2440 if (Reg.isVirtual())
2441 RC = MRI.getRegClass(Reg);
2443 RC = getPhysRegClass(Reg);
2444 return isSGPRClass(RC);
2447 const TargetRegisterClass *
2448 SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const {
2449 unsigned Size = getRegSizeInBits(*SRC);
2450 const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
2451 assert(VRC && "Invalid register class size");
2455 const TargetRegisterClass *
2456 SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const {
2457 unsigned Size = getRegSizeInBits(*SRC);
2458 const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
2459 assert(ARC && "Invalid register class size");
2463 const TargetRegisterClass *
2464 SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const {
2465 unsigned Size = getRegSizeInBits(*VRC);
2467 return &AMDGPU::SGPR_32RegClass;
2468 const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size);
2469 assert(SRC && "Invalid register class size");
2473 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
2474 const TargetRegisterClass *RC, unsigned SubIdx) const {
2475 if (SubIdx == AMDGPU::NoSubRegister)
2478 // We can assume that each lane corresponds to one 32-bit register.
2479 unsigned Size = getNumChannelsFromSubReg(SubIdx) * 32;
2480 if (isAGPRClass(RC)) {
2481 RC = getAGPRClassForBitWidth(Size);
2482 } else if (isVGPRClass(RC)) {
2483 RC = getVGPRClassForBitWidth(Size);
2484 } else if (isVectorSuperClass(RC)) {
2485 RC = getVectorSuperClassForBitWidth(Size);
2487 RC = getSGPRClassForBitWidth(Size);
2489 assert(RC && "Invalid sub-register class size");
2493 const TargetRegisterClass *
2494 SIRegisterInfo::getCompatibleSubRegClass(const TargetRegisterClass *SuperRC,
2495 const TargetRegisterClass *SubRC,
2496 unsigned SubIdx) const {
2497 // Ensure this subregister index is aligned in the super register.
2498 const TargetRegisterClass *MatchRC =
2499 getMatchingSuperRegClass(SuperRC, SubRC, SubIdx);
2500 return MatchRC && MatchRC->hasSubClassEq(SuperRC) ? MatchRC : nullptr;
2503 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
2504 if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST &&
2505 OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST)
2506 return !ST.hasMFMAInlineLiteralBug();
2508 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
2509 OpType <= AMDGPU::OPERAND_SRC_LAST;
2512 bool SIRegisterInfo::shouldRewriteCopySrc(
2513 const TargetRegisterClass *DefRC,
2515 const TargetRegisterClass *SrcRC,
2516 unsigned SrcSubReg) const {
2517 // We want to prefer the smallest register class possible, so we don't want to
2518 // stop and rewrite on anything that looks like a subregister
2519 // extract. Operations mostly don't care about the super register class, so we
2520 // only want to stop on the most basic of copies between the same register
2523 // e.g. if we have something like
2526 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
2527 // %3 = COPY %2, sub0
2529 // We want to look through the COPY to find:
2533 return getCommonSubClass(DefRC, SrcRC) != nullptr;
2536 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
2537 // TODO: 64-bit operands have extending behavior from 32-bit literal.
2538 return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST &&
2539 OpType <= AMDGPU::OPERAND_REG_IMM_LAST;
2542 /// Returns a lowest register that is not used at any point in the function.
2543 /// If all registers are used, then this function will return
2544 /// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return
2545 /// highest unused register.
2546 MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
2547 const TargetRegisterClass *RC,
2548 const MachineFunction &MF,
2549 bool ReserveHighestVGPR) const {
2550 if (ReserveHighestVGPR) {
2551 for (MCRegister Reg : reverse(*RC))
2552 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2555 for (MCRegister Reg : *RC)
2556 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2559 return MCRegister();
2562 ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
2563 unsigned EltSize) const {
2564 const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC);
2565 assert(RegBitWidth >= 32 && RegBitWidth <= 1024);
2567 const unsigned RegDWORDs = RegBitWidth / 32;
2568 const unsigned EltDWORDs = EltSize / 4;
2569 assert(RegSplitParts.size() + 1 >= EltDWORDs);
2571 const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1];
2572 const unsigned NumParts = RegDWORDs / EltDWORDs;
2574 return makeArrayRef(Parts.data(), NumParts);
2577 const TargetRegisterClass*
2578 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
2579 Register Reg) const {
2580 return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegClass(Reg);
2583 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
2584 Register Reg) const {
2585 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2586 // Registers without classes are unaddressable, SGPR-like registers.
2587 return RC && isVGPRClass(RC);
2590 bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI,
2591 Register Reg) const {
2592 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2594 // Registers without classes are unaddressable, SGPR-like registers.
2595 return RC && isAGPRClass(RC);
2598 bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
2599 const TargetRegisterClass *SrcRC,
2601 const TargetRegisterClass *DstRC,
2603 const TargetRegisterClass *NewRC,
2604 LiveIntervals &LIS) const {
2605 unsigned SrcSize = getRegSizeInBits(*SrcRC);
2606 unsigned DstSize = getRegSizeInBits(*DstRC);
2607 unsigned NewSize = getRegSizeInBits(*NewRC);
2609 // Do not increase size of registers beyond dword, we would need to allocate
2610 // adjacent registers and constraint regalloc more than needed.
2612 // Always allow dword coalescing.
2613 if (SrcSize <= 32 || DstSize <= 32)
2616 return NewSize <= DstSize || NewSize <= SrcSize;
2619 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
2620 MachineFunction &MF) const {
2621 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2623 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
2625 switch (RC->getID()) {
2627 return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF);
2628 case AMDGPU::VGPR_32RegClassID:
2629 case AMDGPU::VGPR_LO16RegClassID:
2630 case AMDGPU::VGPR_HI16RegClassID:
2631 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
2632 case AMDGPU::SGPR_32RegClassID:
2633 case AMDGPU::SGPR_LO16RegClassID:
2634 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
2638 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
2639 unsigned Idx) const {
2640 if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 ||
2641 Idx == AMDGPU::RegisterPressureSets::AGPR_32)
2642 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
2643 const_cast<MachineFunction &>(MF));
2645 if (Idx == AMDGPU::RegisterPressureSets::SReg_32)
2646 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
2647 const_cast<MachineFunction &>(MF));
2649 llvm_unreachable("Unexpected register pressure set!");
2652 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
2653 static const int Empty[] = { -1 };
2655 if (RegPressureIgnoredUnits[RegUnit])
2658 return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit);
2661 MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
2662 // Not a callee saved register.
2663 return AMDGPU::SGPR30_SGPR31;
2666 const TargetRegisterClass *
2667 SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size,
2668 const RegisterBank &RB,
2669 const MachineRegisterInfo &MRI) const {
2670 switch (RB.getID()) {
2671 case AMDGPU::VGPRRegBankID:
2672 return getVGPRClassForBitWidth(std::max(32u, Size));
2673 case AMDGPU::VCCRegBankID:
2675 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2676 : &AMDGPU::SReg_64_XEXECRegClass;
2677 case AMDGPU::SGPRRegBankID:
2678 return getSGPRClassForBitWidth(std::max(32u, Size));
2679 case AMDGPU::AGPRRegBankID:
2680 return getAGPRClassForBitWidth(std::max(32u, Size));
2682 llvm_unreachable("unknown register bank");
2686 const TargetRegisterClass *
2687 SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
2688 const MachineRegisterInfo &MRI) const {
2689 const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg());
2690 if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>())
2691 return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB, MRI);
2693 if (const auto *RC = RCOrRB.dyn_cast<const TargetRegisterClass *>())
2694 return getAllocatableClass(RC);
2699 MCRegister SIRegisterInfo::getVCC() const {
2700 return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
2703 const TargetRegisterClass *SIRegisterInfo::getVGPR64Class() const {
2704 // VGPR tuples have an alignment requirement on gfx90a variants.
2705 return ST.needsAlignedVGPRs() ? &AMDGPU::VReg_64_Align2RegClass
2706 : &AMDGPU::VReg_64RegClass;
2709 const TargetRegisterClass *
2710 SIRegisterInfo::getRegClass(unsigned RCID) const {
2711 switch ((int)RCID) {
2712 case AMDGPU::SReg_1RegClassID:
2714 case AMDGPU::SReg_1_XEXECRegClassID:
2715 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2716 : &AMDGPU::SReg_64_XEXECRegClass;
2720 return AMDGPUGenRegisterInfo::getRegClass(RCID);
2724 // Find reaching register definition
2725 MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg,
2727 MachineRegisterInfo &MRI,
2728 LiveIntervals *LIS) const {
2729 auto &MDT = LIS->getAnalysis<MachineDominatorTree>();
2730 SlotIndex UseIdx = LIS->getInstructionIndex(Use);
2733 if (Reg.isVirtual()) {
2734 if (!LIS->hasInterval(Reg))
2736 LiveInterval &LI = LIS->getInterval(Reg);
2737 LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg)
2738 : MRI.getMaxLaneMaskForVReg(Reg);
2739 VNInfo *V = nullptr;
2740 if (LI.hasSubRanges()) {
2741 for (auto &S : LI.subranges()) {
2742 if ((S.LaneMask & SubLanes) == SubLanes) {
2743 V = S.getVNInfoAt(UseIdx);
2748 V = LI.getVNInfoAt(UseIdx);
2755 for (MCRegUnitIterator Units(Reg.asMCReg(), this); Units.isValid();
2757 LiveRange &LR = LIS->getRegUnit(*Units);
2758 if (VNInfo *V = LR.getVNInfoAt(UseIdx)) {
2759 if (!DefIdx.isValid() ||
2760 MDT.dominates(LIS->getInstructionFromIndex(DefIdx),
2761 LIS->getInstructionFromIndex(V->def)))
2769 MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx);
2771 if (!Def || !MDT.dominates(Def, &Use))
2774 assert(Def->modifiesRegister(Reg, this));
2779 MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const {
2780 assert(getRegSizeInBits(*getPhysRegClass(Reg)) <= 32);
2782 for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass,
2783 AMDGPU::SReg_32RegClass,
2784 AMDGPU::AGPR_32RegClass } ) {
2785 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC))
2788 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16,
2789 &AMDGPU::VGPR_32RegClass)) {
2793 return AMDGPU::NoRegister;
2796 bool SIRegisterInfo::isProperlyAlignedRC(const TargetRegisterClass &RC) const {
2797 if (!ST.needsAlignedVGPRs())
2800 if (isVGPRClass(&RC))
2801 return RC.hasSuperClassEq(getVGPRClassForBitWidth(getRegSizeInBits(RC)));
2802 if (isAGPRClass(&RC))
2803 return RC.hasSuperClassEq(getAGPRClassForBitWidth(getRegSizeInBits(RC)));
2804 if (isVectorSuperClass(&RC))
2805 return RC.hasSuperClassEq(
2806 getVectorSuperClassForBitWidth(getRegSizeInBits(RC)));
2811 bool SIRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
2813 case AMDGPU::SGPR_NULL:
2814 case AMDGPU::SRC_SHARED_BASE:
2815 case AMDGPU::SRC_PRIVATE_BASE:
2816 case AMDGPU::SRC_SHARED_LIMIT:
2817 case AMDGPU::SRC_PRIVATE_LIMIT:
2825 SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const {
2826 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
2827 ST.getMaxNumSGPRs(MF) / 4);
2831 SIRegisterInfo::getAllSGPR64(const MachineFunction &MF) const {
2832 return makeArrayRef(AMDGPU::SGPR_64RegClass.begin(),
2833 ST.getMaxNumSGPRs(MF) / 2);
2837 SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const {
2838 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));