1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// SI implementation of the TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SIRegisterInfo.h"
15 #include "AMDGPURegisterBankInfo.h"
16 #include "AMDGPUSubtarget.h"
17 #include "SIInstrInfo.h"
18 #include "SIMachineFunctionInfo.h"
19 #include "MCTargetDesc/AMDGPUInstPrinter.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "llvm/CodeGen/LiveIntervals.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/CodeGen/SlotIndexes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/LLVMContext.h"
33 #define GET_REGINFO_TARGET_DESC
34 #include "AMDGPUGenRegisterInfo.inc"
36 static cl::opt<bool> EnableSpillSGPRToVGPR(
37 "amdgpu-spill-sgpr-to-vgpr",
38 cl::desc("Enable spilling VGPRs to SGPRs"),
42 std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts;
44 SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
45 : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST),
46 SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) {
48 assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
49 getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&
50 (getSubRegIndexLaneMask(AMDGPU::lo16) |
51 getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==
52 getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
53 "getNumCoveredRegs() will not work with generated subreg masks!");
55 RegPressureIgnoredUnits.resize(getNumRegUnits());
56 RegPressureIgnoredUnits.set(*MCRegUnitIterator(AMDGPU::M0, this));
57 for (auto Reg : AMDGPU::VGPR_HI16RegClass)
58 RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this));
60 // HACK: Until this is fully tablegen'd.
61 static llvm::once_flag InitializeRegSplitPartsFlag;
63 static auto InitializeRegSplitPartsOnce = [this]() {
64 for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) {
65 unsigned Size = getSubRegIdxSize(Idx);
68 std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1];
69 unsigned Pos = getSubRegIdxOffset(Idx);
74 unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits.
75 Vec.resize(MaxNumParts);
82 llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce);
85 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved,
86 MCRegister Reg) const {
87 MCRegAliasIterator R(Reg, this, true);
89 for (; R.isValid(); ++R)
93 // Forced to be here by one .inc
94 const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
95 const MachineFunction *MF) const {
96 CallingConv::ID CC = MF->getFunction().getCallingConv();
99 case CallingConv::Fast:
100 case CallingConv::Cold:
101 return CSR_AMDGPU_HighRegs_SaveList;
103 // Dummy to not crash RegisterClassInfo.
104 static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister;
105 return &NoCalleeSavedReg;
111 SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
115 const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
116 CallingConv::ID CC) const {
119 case CallingConv::Fast:
120 case CallingConv::Cold:
121 return CSR_AMDGPU_HighRegs_RegMask;
127 Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
128 const SIFrameLowering *TFI =
129 MF.getSubtarget<GCNSubtarget>().getFrameLowering();
130 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
131 // During ISel lowering we always reserve the stack pointer in entry
132 // functions, but never actually want to reference it when accessing our own
133 // frame. If we need a frame pointer we use it, but otherwise we can just use
134 // an immediate "0" which we represent by returning NoRegister.
135 if (FuncInfo->isEntryFunction()) {
136 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register();
138 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg()
139 : FuncInfo->getStackPtrOffsetReg();
142 bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
143 // When we need stack realignment, we can't reference off of the
144 // stack pointer, so we reserve a base pointer.
145 const MachineFrameInfo &MFI = MF.getFrameInfo();
146 return MFI.getNumFixedObjects() && needsStackRealignment(MF);
149 Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; }
151 const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const {
152 return CSR_AMDGPU_AllVGPRs_RegMask;
155 const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const {
156 return CSR_AMDGPU_AllAllocatableSRegs_RegMask;
159 // FIXME: TableGen should generate something to make this manageable for all
160 // register classes. At a minimum we could use the opposite of
161 // composeSubRegIndices and go up from the base 32-bit subreg.
162 unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel,
164 // Table of NumRegs sized pieces at every 32-bit offset.
165 static const uint16_t SubRegFromChannelTable[][32] = {
166 {AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
167 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
168 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
169 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
170 AMDGPU::sub16, AMDGPU::sub17, AMDGPU::sub18, AMDGPU::sub19,
171 AMDGPU::sub20, AMDGPU::sub21, AMDGPU::sub22, AMDGPU::sub23,
172 AMDGPU::sub24, AMDGPU::sub25, AMDGPU::sub26, AMDGPU::sub27,
173 AMDGPU::sub28, AMDGPU::sub29, AMDGPU::sub30, AMDGPU::sub31},
174 {AMDGPU::sub0_sub1, AMDGPU::sub1_sub2, AMDGPU::sub2_sub3,
175 AMDGPU::sub3_sub4, AMDGPU::sub4_sub5, AMDGPU::sub5_sub6,
176 AMDGPU::sub6_sub7, AMDGPU::sub7_sub8, AMDGPU::sub8_sub9,
177 AMDGPU::sub9_sub10, AMDGPU::sub10_sub11, AMDGPU::sub11_sub12,
178 AMDGPU::sub12_sub13, AMDGPU::sub13_sub14, AMDGPU::sub14_sub15,
179 AMDGPU::sub15_sub16, AMDGPU::sub16_sub17, AMDGPU::sub17_sub18,
180 AMDGPU::sub18_sub19, AMDGPU::sub19_sub20, AMDGPU::sub20_sub21,
181 AMDGPU::sub21_sub22, AMDGPU::sub22_sub23, AMDGPU::sub23_sub24,
182 AMDGPU::sub24_sub25, AMDGPU::sub25_sub26, AMDGPU::sub26_sub27,
183 AMDGPU::sub27_sub28, AMDGPU::sub28_sub29, AMDGPU::sub29_sub30,
184 AMDGPU::sub30_sub31, AMDGPU::NoSubRegister},
185 {AMDGPU::sub0_sub1_sub2, AMDGPU::sub1_sub2_sub3,
186 AMDGPU::sub2_sub3_sub4, AMDGPU::sub3_sub4_sub5,
187 AMDGPU::sub4_sub5_sub6, AMDGPU::sub5_sub6_sub7,
188 AMDGPU::sub6_sub7_sub8, AMDGPU::sub7_sub8_sub9,
189 AMDGPU::sub8_sub9_sub10, AMDGPU::sub9_sub10_sub11,
190 AMDGPU::sub10_sub11_sub12, AMDGPU::sub11_sub12_sub13,
191 AMDGPU::sub12_sub13_sub14, AMDGPU::sub13_sub14_sub15,
192 AMDGPU::sub14_sub15_sub16, AMDGPU::sub15_sub16_sub17,
193 AMDGPU::sub16_sub17_sub18, AMDGPU::sub17_sub18_sub19,
194 AMDGPU::sub18_sub19_sub20, AMDGPU::sub19_sub20_sub21,
195 AMDGPU::sub20_sub21_sub22, AMDGPU::sub21_sub22_sub23,
196 AMDGPU::sub22_sub23_sub24, AMDGPU::sub23_sub24_sub25,
197 AMDGPU::sub24_sub25_sub26, AMDGPU::sub25_sub26_sub27,
198 AMDGPU::sub26_sub27_sub28, AMDGPU::sub27_sub28_sub29,
199 AMDGPU::sub28_sub29_sub30, AMDGPU::sub29_sub30_sub31,
200 AMDGPU::NoSubRegister, AMDGPU::NoSubRegister},
201 {AMDGPU::sub0_sub1_sub2_sub3, AMDGPU::sub1_sub2_sub3_sub4,
202 AMDGPU::sub2_sub3_sub4_sub5, AMDGPU::sub3_sub4_sub5_sub6,
203 AMDGPU::sub4_sub5_sub6_sub7, AMDGPU::sub5_sub6_sub7_sub8,
204 AMDGPU::sub6_sub7_sub8_sub9, AMDGPU::sub7_sub8_sub9_sub10,
205 AMDGPU::sub8_sub9_sub10_sub11, AMDGPU::sub9_sub10_sub11_sub12,
206 AMDGPU::sub10_sub11_sub12_sub13, AMDGPU::sub11_sub12_sub13_sub14,
207 AMDGPU::sub12_sub13_sub14_sub15, AMDGPU::sub13_sub14_sub15_sub16,
208 AMDGPU::sub14_sub15_sub16_sub17, AMDGPU::sub15_sub16_sub17_sub18,
209 AMDGPU::sub16_sub17_sub18_sub19, AMDGPU::sub17_sub18_sub19_sub20,
210 AMDGPU::sub18_sub19_sub20_sub21, AMDGPU::sub19_sub20_sub21_sub22,
211 AMDGPU::sub20_sub21_sub22_sub23, AMDGPU::sub21_sub22_sub23_sub24,
212 AMDGPU::sub22_sub23_sub24_sub25, AMDGPU::sub23_sub24_sub25_sub26,
213 AMDGPU::sub24_sub25_sub26_sub27, AMDGPU::sub25_sub26_sub27_sub28,
214 AMDGPU::sub26_sub27_sub28_sub29, AMDGPU::sub27_sub28_sub29_sub30,
215 AMDGPU::sub28_sub29_sub30_sub31, AMDGPU::NoSubRegister,
216 AMDGPU::NoSubRegister, AMDGPU::NoSubRegister}};
218 const unsigned NumRegIndex = NumRegs - 1;
220 assert(NumRegIndex < array_lengthof(SubRegFromChannelTable) &&
222 assert(Channel < array_lengthof(SubRegFromChannelTable[0]));
223 return SubRegFromChannelTable[NumRegIndex][Channel];
226 MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg(
227 const MachineFunction &MF) const {
228 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
229 MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
230 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass);
233 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
234 BitVector Reserved(getNumRegs());
235 Reserved.set(AMDGPU::MODE);
237 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
238 // this seems likely to result in bugs, so I'm marking them as reserved.
239 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
240 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
242 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
243 reserveRegisterTuples(Reserved, AMDGPU::M0);
245 // Reserve src_vccz, src_execz, src_scc.
246 reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ);
247 reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ);
248 reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC);
250 // Reserve the memory aperture registers.
251 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
252 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
253 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
254 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
256 // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
257 reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID);
259 // Reserve xnack_mask registers - support is not implemented in Codegen.
260 reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK);
262 // Reserve lds_direct register - support is not implemented in Codegen.
263 reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT);
265 // Reserve Trap Handler registers - support is not implemented in Codegen.
266 reserveRegisterTuples(Reserved, AMDGPU::TBA);
267 reserveRegisterTuples(Reserved, AMDGPU::TMA);
268 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
269 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
270 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
271 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
272 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
273 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
274 reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13);
275 reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15);
277 // Reserve null register - it shall never be allocated
278 reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL);
280 // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
281 // will result in bugs.
283 Reserved.set(AMDGPU::VCC);
284 Reserved.set(AMDGPU::VCC_HI);
287 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
288 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
289 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
290 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
291 reserveRegisterTuples(Reserved, Reg);
294 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
295 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
296 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
297 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
298 reserveRegisterTuples(Reserved, Reg);
299 Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
300 reserveRegisterTuples(Reserved, Reg);
303 for (auto Reg : AMDGPU::SReg_32RegClass) {
304 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
305 Register Low = getSubReg(Reg, AMDGPU::lo16);
306 // This is to prevent BB vcc liveness errors.
307 if (!AMDGPU::SGPR_LO16RegClass.contains(Low))
311 for (auto Reg : AMDGPU::AGPR_32RegClass) {
312 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
315 // Reserve all the rest AGPRs if there are no instructions to use it.
316 if (!ST.hasMAIInsts()) {
317 for (unsigned i = 0; i < MaxNumVGPRs; ++i) {
318 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
319 reserveRegisterTuples(Reserved, Reg);
323 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
325 unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
326 if (ScratchRSrcReg != AMDGPU::NoRegister) {
327 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
329 // TODO: May need to reserve a VGPR if doing LDS spilling.
330 reserveRegisterTuples(Reserved, ScratchRSrcReg);
333 // We have to assume the SP is needed in case there are calls in the function,
334 // which is detected after the function is lowered. If we aren't really going
335 // to need SP, don't bother reserving it.
336 MCRegister StackPtrReg = MFI->getStackPtrOffsetReg();
339 reserveRegisterTuples(Reserved, StackPtrReg);
340 assert(!isSubRegister(ScratchRSrcReg, StackPtrReg));
343 MCRegister FrameReg = MFI->getFrameOffsetReg();
345 reserveRegisterTuples(Reserved, FrameReg);
346 assert(!isSubRegister(ScratchRSrcReg, FrameReg));
349 if (hasBasePointer(MF)) {
350 MCRegister BasePtrReg = getBaseRegister();
351 reserveRegisterTuples(Reserved, BasePtrReg);
352 assert(!isSubRegister(ScratchRSrcReg, BasePtrReg));
355 for (MCRegister Reg : MFI->WWMReservedRegs) {
356 reserveRegisterTuples(Reserved, Reg);
359 // FIXME: Stop using reserved registers for this.
360 for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs())
361 reserveRegisterTuples(Reserved, Reg);
363 for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs())
364 reserveRegisterTuples(Reserved, Reg);
366 if (MFI->VGPRReservedForSGPRSpill)
367 for (auto SSpill : MFI->getSGPRSpillVGPRs())
368 reserveRegisterTuples(Reserved, SSpill.VGPR);
373 bool SIRegisterInfo::canRealignStack(const MachineFunction &MF) const {
374 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
375 // On entry, the base address is 0, so it can't possibly need any more
378 // FIXME: Should be able to specify the entry frame alignment per calling
379 // convention instead.
380 if (Info->isEntryFunction())
383 return TargetRegisterInfo::canRealignStack(MF);
386 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
387 const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>();
388 if (Info->isEntryFunction()) {
389 const MachineFrameInfo &MFI = Fn.getFrameInfo();
390 return MFI.hasStackObjects() || MFI.hasCalls();
393 // May need scavenger for dealing with callee saved registers.
397 bool SIRegisterInfo::requiresFrameIndexScavenging(
398 const MachineFunction &MF) const {
399 // Do not use frame virtual registers. They used to be used for SGPRs, but
400 // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the
401 // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a
406 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
407 const MachineFunction &MF) const {
408 const MachineFrameInfo &MFI = MF.getFrameInfo();
409 return MFI.hasStackObjects();
412 bool SIRegisterInfo::requiresVirtualBaseRegisters(
413 const MachineFunction &) const {
414 // There are no special dedicated stack or frame pointers.
418 int64_t SIRegisterInfo::getMUBUFInstrOffset(const MachineInstr *MI) const {
419 assert(SIInstrInfo::isMUBUF(*MI));
421 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
422 AMDGPU::OpName::offset);
423 return MI->getOperand(OffIdx).getImm();
426 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
428 if (!SIInstrInfo::isMUBUF(*MI))
431 assert(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
432 AMDGPU::OpName::vaddr) &&
433 "Should never see frame index on non-address operand");
435 return getMUBUFInstrOffset(MI);
438 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
439 if (!MI->mayLoadOrStore())
442 int64_t FullOffset = Offset + getMUBUFInstrOffset(MI);
444 return !isUInt<12>(FullOffset);
447 void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
450 int64_t Offset) const {
451 MachineBasicBlock::iterator Ins = MBB->begin();
452 DebugLoc DL; // Defaults to "unknown"
454 if (Ins != MBB->end())
455 DL = Ins->getDebugLoc();
457 MachineFunction *MF = MBB->getParent();
458 const SIInstrInfo *TII = ST.getInstrInfo();
461 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), BaseReg)
462 .addFrameIndex(FrameIdx);
466 MachineRegisterInfo &MRI = MF->getRegInfo();
467 Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
469 Register FIReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
471 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
473 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), FIReg)
474 .addFrameIndex(FrameIdx);
476 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
477 .addReg(OffsetReg, RegState::Kill)
479 .addImm(0); // clamp bit
482 void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
483 int64_t Offset) const {
484 const SIInstrInfo *TII = ST.getInstrInfo();
487 // FIXME: Is it possible to be storing a frame index to itself?
489 for (const MachineOperand &MO: MI.operands()) {
492 llvm_unreachable("should not see multiple frame indices");
499 MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
501 MachineBasicBlock *MBB = MI.getParent();
502 MachineFunction *MF = MBB->getParent();
504 assert(FIOp && FIOp->isFI() && "frame index must be address operand");
505 assert(TII->isMUBUF(MI));
506 assert(TII->getNamedOperand(MI, AMDGPU::OpName::soffset)->getReg() ==
507 MF->getInfo<SIMachineFunctionInfo>()->getStackPtrOffsetReg() &&
508 "should only be seeing stack pointer offset relative FrameIndex");
510 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
511 int64_t NewOffset = OffsetOp->getImm() + Offset;
512 assert(isUInt<12>(NewOffset) && "offset should be legal");
514 FIOp->ChangeToRegister(BaseReg, false);
515 OffsetOp->setImm(NewOffset);
518 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
520 int64_t Offset) const {
521 if (!SIInstrInfo::isMUBUF(*MI))
524 int64_t NewOffset = Offset + getMUBUFInstrOffset(MI);
526 return isUInt<12>(NewOffset);
529 const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
530 const MachineFunction &MF, unsigned Kind) const {
531 // This is inaccurate. It depends on the instruction and address space. The
532 // only place where we should hit this is for dealing with frame indexes /
533 // private accesses, so this is correct in that case.
534 return &AMDGPU::VGPR_32RegClass;
537 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
540 case AMDGPU::SI_SPILL_S1024_SAVE:
541 case AMDGPU::SI_SPILL_S1024_RESTORE:
542 case AMDGPU::SI_SPILL_V1024_SAVE:
543 case AMDGPU::SI_SPILL_V1024_RESTORE:
544 case AMDGPU::SI_SPILL_A1024_SAVE:
545 case AMDGPU::SI_SPILL_A1024_RESTORE:
547 case AMDGPU::SI_SPILL_S512_SAVE:
548 case AMDGPU::SI_SPILL_S512_RESTORE:
549 case AMDGPU::SI_SPILL_V512_SAVE:
550 case AMDGPU::SI_SPILL_V512_RESTORE:
551 case AMDGPU::SI_SPILL_A512_SAVE:
552 case AMDGPU::SI_SPILL_A512_RESTORE:
554 case AMDGPU::SI_SPILL_S256_SAVE:
555 case AMDGPU::SI_SPILL_S256_RESTORE:
556 case AMDGPU::SI_SPILL_V256_SAVE:
557 case AMDGPU::SI_SPILL_V256_RESTORE:
559 case AMDGPU::SI_SPILL_S192_SAVE:
560 case AMDGPU::SI_SPILL_S192_RESTORE:
561 case AMDGPU::SI_SPILL_V192_SAVE:
562 case AMDGPU::SI_SPILL_V192_RESTORE:
564 case AMDGPU::SI_SPILL_S160_SAVE:
565 case AMDGPU::SI_SPILL_S160_RESTORE:
566 case AMDGPU::SI_SPILL_V160_SAVE:
567 case AMDGPU::SI_SPILL_V160_RESTORE:
569 case AMDGPU::SI_SPILL_S128_SAVE:
570 case AMDGPU::SI_SPILL_S128_RESTORE:
571 case AMDGPU::SI_SPILL_V128_SAVE:
572 case AMDGPU::SI_SPILL_V128_RESTORE:
573 case AMDGPU::SI_SPILL_A128_SAVE:
574 case AMDGPU::SI_SPILL_A128_RESTORE:
576 case AMDGPU::SI_SPILL_S96_SAVE:
577 case AMDGPU::SI_SPILL_S96_RESTORE:
578 case AMDGPU::SI_SPILL_V96_SAVE:
579 case AMDGPU::SI_SPILL_V96_RESTORE:
581 case AMDGPU::SI_SPILL_S64_SAVE:
582 case AMDGPU::SI_SPILL_S64_RESTORE:
583 case AMDGPU::SI_SPILL_V64_SAVE:
584 case AMDGPU::SI_SPILL_V64_RESTORE:
585 case AMDGPU::SI_SPILL_A64_SAVE:
586 case AMDGPU::SI_SPILL_A64_RESTORE:
588 case AMDGPU::SI_SPILL_S32_SAVE:
589 case AMDGPU::SI_SPILL_S32_RESTORE:
590 case AMDGPU::SI_SPILL_V32_SAVE:
591 case AMDGPU::SI_SPILL_V32_RESTORE:
592 case AMDGPU::SI_SPILL_A32_SAVE:
593 case AMDGPU::SI_SPILL_A32_RESTORE:
595 default: llvm_unreachable("Invalid spill opcode");
599 static int getOffsetMUBUFStore(unsigned Opc) {
601 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
602 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
603 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
604 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
605 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
606 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
607 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
608 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
609 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
610 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
611 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN:
612 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET;
613 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN:
614 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET;
620 static int getOffsetMUBUFLoad(unsigned Opc) {
622 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
623 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
624 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
625 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
626 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
627 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
628 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
629 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
630 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
631 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
632 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
633 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
634 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
635 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
636 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN:
637 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET;
638 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN:
639 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET;
640 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN:
641 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET;
642 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN:
643 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET;
644 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN:
645 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET;
646 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN:
647 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET;
653 static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST,
654 MachineBasicBlock::iterator MI,
659 MachineBasicBlock *MBB = MI->getParent();
660 MachineFunction *MF = MI->getParent()->getParent();
661 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
662 const SIInstrInfo *TII = ST.getInstrInfo();
664 MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane);
666 if (Reg == AMDGPU::NoRegister)
667 return MachineInstrBuilder();
669 bool IsStore = MI->mayStore();
670 MachineRegisterInfo &MRI = MF->getRegInfo();
671 auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
673 unsigned Dst = IsStore ? Reg : ValueReg;
674 unsigned Src = IsStore ? ValueReg : Reg;
675 unsigned Opc = (IsStore ^ TRI->isVGPR(MRI, Reg)) ? AMDGPU::V_ACCVGPR_WRITE_B32
676 : AMDGPU::V_ACCVGPR_READ_B32;
678 return BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(Opc), Dst)
679 .addReg(Src, getKillRegState(IsKill));
682 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
683 // need to handle the case where an SGPR may need to be spilled while spilling.
684 static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST,
685 MachineFrameInfo &MFI,
686 MachineBasicBlock::iterator MI,
689 const SIInstrInfo *TII = ST.getInstrInfo();
690 MachineBasicBlock *MBB = MI->getParent();
691 const DebugLoc &DL = MI->getDebugLoc();
692 bool IsStore = MI->mayStore();
694 unsigned Opc = MI->getOpcode();
695 int LoadStoreOp = IsStore ?
696 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
697 if (LoadStoreOp == -1)
700 const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
701 if (spillVGPRtoAGPR(ST, MI, Index, 0, Reg->getReg(), false).getInstr())
704 MachineInstrBuilder NewMI =
705 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
707 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
708 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
717 const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
718 AMDGPU::OpName::vdata_in);
724 void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
725 unsigned LoadStoreOp,
729 MCRegister ScratchRsrcReg,
730 MCRegister ScratchOffsetReg,
732 MachineMemOperand *MMO,
733 RegScavenger *RS) const {
734 MachineBasicBlock *MBB = MI->getParent();
735 MachineFunction *MF = MI->getParent()->getParent();
736 const SIInstrInfo *TII = ST.getInstrInfo();
737 const MachineFrameInfo &MFI = MF->getFrameInfo();
738 const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
740 const MCInstrDesc &Desc = TII->get(LoadStoreOp);
741 const DebugLoc &DL = MI->getDebugLoc();
742 bool IsStore = Desc.mayStore();
744 bool Scavenged = false;
745 MCRegister SOffset = ScratchOffsetReg;
747 const unsigned EltSize = 4;
748 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
749 unsigned NumSubRegs = AMDGPU::getRegBitWidth(RC->getID()) / (EltSize * CHAR_BIT);
750 unsigned Size = NumSubRegs * EltSize;
751 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
752 int64_t ScratchOffsetRegDelta = 0;
754 Align Alignment = MFI.getObjectAlign(Index);
755 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
758 hasAGPRs(RC) ? TII->getNamedOperand(*MI, AMDGPU::OpName::tmp)->getReg()
761 assert((Offset % EltSize) == 0 && "unexpected VGPR spill offset");
763 if (!isUInt<12>(Offset + Size - EltSize)) {
764 SOffset = MCRegister();
766 // We currently only support spilling VGPRs to EltSize boundaries, meaning
767 // we can simplify the adjustment of Offset here to just scale with
769 Offset *= ST.getWavefrontSize();
771 // We don't have access to the register scavenger if this function is called
772 // during PEI::scavengeFrameVirtualRegs().
774 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false);
777 // There are no free SGPRs, and since we are in the process of spilling
778 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
779 // on SI/CI and on VI it is true until we implement spilling using scalar
780 // stores), we have no way to free up an SGPR. Our solution here is to
781 // add the offset directly to the ScratchOffset or StackPtrOffset
782 // register, and then subtract the offset after the spill to return the
783 // register to it's original value.
784 if (!ScratchOffsetReg)
785 ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg();
786 SOffset = ScratchOffsetReg;
787 ScratchOffsetRegDelta = Offset;
793 report_fatal_error("could not scavenge SGPR to spill in entry function");
795 if (ScratchOffsetReg == AMDGPU::NoRegister) {
796 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset)
799 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
800 .addReg(ScratchOffsetReg)
807 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += EltSize) {
808 Register SubReg = NumSubRegs == 1
810 : getSubReg(ValueReg, getSubRegFromChannel(i));
812 unsigned SOffsetRegState = 0;
813 unsigned SrcDstRegState = getDefRegState(!IsStore);
815 SOffsetRegState |= getKillRegState(Scavenged);
816 // The last implicit use carries the "Kill" flag.
817 SrcDstRegState |= getKillRegState(IsKill);
820 auto MIB = spillVGPRtoAGPR(ST, MI, Index, i, SubReg, IsKill);
822 if (!MIB.getInstr()) {
823 unsigned FinalReg = SubReg;
824 if (TmpReg != AMDGPU::NoRegister) {
826 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_READ_B32), TmpReg)
827 .addReg(SubReg, getKillRegState(IsKill));
831 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(EltSize * i);
832 MachineMemOperand *NewMMO =
833 MF->getMachineMemOperand(PInfo, MMO->getFlags(), EltSize,
834 commonAlignment(Alignment, EltSize * i));
836 MIB = BuildMI(*MBB, MI, DL, Desc)
838 getDefRegState(!IsStore) | getKillRegState(IsKill))
839 .addReg(ScratchRsrcReg);
840 if (SOffset == AMDGPU::NoRegister) {
843 MIB.addReg(SOffset, SOffsetRegState);
851 .addMemOperand(NewMMO);
853 if (!IsStore && TmpReg != AMDGPU::NoRegister)
854 MIB = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32),
856 .addReg(TmpReg, RegState::Kill);
860 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
863 if (ScratchOffsetRegDelta != 0) {
864 // Subtract the offset we added to the ScratchOffset register.
865 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), SOffset)
867 .addImm(ScratchOffsetRegDelta);
871 // Generate a VMEM access which loads or stores the VGPR containing an SGPR
872 // spill such that all the lanes set in VGPRLanes are loaded or stored.
873 // This generates exec mask manipulation and will use SGPRs available in MI
874 // or VGPR lanes in the VGPR to save and restore the exec mask.
875 void SIRegisterInfo::buildSGPRSpillLoadStore(MachineBasicBlock::iterator MI,
876 int Index, int Offset,
877 unsigned EltSize, Register VGPR,
881 MachineBasicBlock *MBB = MI->getParent();
882 MachineFunction *MF = MBB->getParent();
883 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
884 const SIInstrInfo *TII = ST.getInstrInfo();
886 Register SuperReg = MI->getOperand(0).getReg();
887 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
888 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
889 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
890 unsigned FirstPart = Offset * 32;
891 unsigned ExecLane = 0;
893 bool IsKill = MI->getOperand(0).isKill();
894 const DebugLoc &DL = MI->getDebugLoc();
896 // Cannot handle load/store to EXEC
897 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
898 SuperReg != AMDGPU::EXEC && "exec should never spill");
900 // On Wave32 only handle EXEC_LO.
901 // On Wave64 only update EXEC_HI if there is sufficent space for a copy.
902 bool OnlyExecLo = isWave32 || NumSubRegs == 1 || SuperReg == AMDGPU::EXEC_HI;
904 unsigned ExecMovOpc = OnlyExecLo ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
905 Register ExecReg = OnlyExecLo ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
906 Register SavedExecReg;
910 SavedExecReg = NumSubRegs == 1
912 : getSubReg(SuperReg, SplitParts[FirstPart + ExecLane]);
914 // If src/dst is an odd size it is possible subreg0 is not aligned.
915 for (; ExecLane < (NumSubRegs - 1); ++ExecLane) {
916 SavedExecReg = getMatchingSuperReg(
917 getSubReg(SuperReg, SplitParts[FirstPart + ExecLane]), AMDGPU::sub0,
918 &AMDGPU::SReg_64_XEXECRegClass);
923 assert(SavedExecReg);
924 BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), SavedExecReg).addReg(ExecReg);
927 BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), ExecReg).addImm(VGPRLanes);
930 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
931 assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill);
933 Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
935 : getFrameRegister(*MF);
937 Align Alignment = FrameInfo.getObjectAlign(Index);
938 MachinePointerInfo PtrInfo =
939 MachinePointerInfo::getFixedStack(*MF, Index);
940 MachineMemOperand *MMO = MF->getMachineMemOperand(
941 PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore,
945 buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
948 MFI->getScratchRSrcReg(), FrameReg,
949 Offset * EltSize, MMO,
952 buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET, Index, VGPR,
953 IsKill, MFI->getScratchRSrcReg(), FrameReg,
954 Offset * EltSize, MMO, RS);
955 // This only ever adds one VGPR spill
956 MFI->addToSpilledVGPRs(1);
960 BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), ExecReg)
961 .addReg(SavedExecReg, getKillRegState(IsLoad || IsKill));
963 // Restore clobbered SGPRs
965 // Nothing to do; register will be overwritten
966 } else if (!IsKill) {
967 // Restore SGPRs from appropriate VGPR lanes
969 BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
970 getSubReg(SuperReg, SplitParts[FirstPart + ExecLane + 1]))
972 .addImm(ExecLane + 1);
974 BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
977 : getSubReg(SuperReg, SplitParts[FirstPart + ExecLane]))
978 .addReg(VGPR, RegState::Kill)
983 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
986 bool OnlyToVGPR) const {
987 MachineBasicBlock *MBB = MI->getParent();
988 MachineFunction *MF = MBB->getParent();
989 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
990 DenseSet<Register> SGPRSpillVGPRDefinedSet;
992 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
993 = MFI->getSGPRToVGPRSpills(Index);
994 bool SpillToVGPR = !VGPRSpills.empty();
995 if (OnlyToVGPR && !SpillToVGPR)
998 const SIInstrInfo *TII = ST.getInstrInfo();
1000 Register SuperReg = MI->getOperand(0).getReg();
1001 bool IsKill = MI->getOperand(0).isKill();
1002 const DebugLoc &DL = MI->getDebugLoc();
1004 assert(SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() &&
1005 SuperReg != MFI->getFrameOffsetReg()));
1007 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
1008 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
1009 SuperReg != AMDGPU::EXEC && "exec should never spill");
1011 unsigned EltSize = 4;
1012 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
1014 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
1015 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
1018 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
1020 NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]);
1021 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1023 // During SGPR spilling to VGPR, determine if the VGPR is defined. The
1024 // only circumstance in which we say it is undefined is when it is the
1025 // first spill to this VGPR in the first basic block.
1026 bool VGPRDefined = true;
1027 if (MBB == &MF->front())
1028 VGPRDefined = !SGPRSpillVGPRDefinedSet.insert(Spill.VGPR).second;
1030 // Mark the "old value of vgpr" input undef only if this is the first sgpr
1031 // spill to this specific vgpr in the first basic block.
1032 BuildMI(*MBB, MI, DL,
1033 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
1035 .addReg(SubReg, getKillRegState(IsKill))
1037 .addReg(Spill.VGPR, VGPRDefined ? 0 : RegState::Undef);
1039 // FIXME: Since this spills to another register instead of an actual
1040 // frame index, we should delete the frame index when all references to
1044 // Scavenged temporary VGPR to use. It must be scavenged once for any number
1045 // of spilled subregs.
1046 Register TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1047 RS->setRegUsed(TmpVGPR);
1049 // SubReg carries the "Kill" flag when SubReg == SuperReg.
1050 unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill);
1052 unsigned PerVGPR = 32;
1053 unsigned NumVGPRs = (NumSubRegs + (PerVGPR - 1)) / PerVGPR;
1054 int64_t VGPRLanes = (1LL << std::min(PerVGPR, NumSubRegs)) - 1LL;
1056 for (unsigned Offset = 0; Offset < NumVGPRs; ++Offset) {
1057 unsigned TmpVGPRFlags = RegState::Undef;
1059 // Write sub registers into the VGPR
1060 for (unsigned i = Offset * PerVGPR,
1061 e = std::min((Offset + 1) * PerVGPR, NumSubRegs);
1064 NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]);
1066 MachineInstrBuilder WriteLane =
1067 BuildMI(*MBB, MI, DL,
1068 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
1070 .addReg(SubReg, SubKillState)
1071 .addImm(i % PerVGPR)
1072 .addReg(TmpVGPR, TmpVGPRFlags);
1075 // There could be undef components of a spilled super register.
1076 // TODO: Can we detect this and skip the spill?
1077 if (NumSubRegs > 1) {
1078 // The last implicit use of the SuperReg carries the "Kill" flag.
1079 unsigned SuperKillState = 0;
1080 if (i + 1 == NumSubRegs)
1081 SuperKillState |= getKillRegState(IsKill);
1082 WriteLane.addReg(SuperReg, RegState::Implicit | SuperKillState);
1087 buildSGPRSpillLoadStore(MI, Index, Offset, EltSize, TmpVGPR, VGPRLanes,
1092 MI->eraseFromParent();
1093 MFI->addToSpilledSGPRs(NumSubRegs);
1097 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
1100 bool OnlyToVGPR) const {
1101 MachineFunction *MF = MI->getParent()->getParent();
1102 MachineBasicBlock *MBB = MI->getParent();
1103 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1105 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills
1106 = MFI->getSGPRToVGPRSpills(Index);
1107 bool SpillToVGPR = !VGPRSpills.empty();
1108 if (OnlyToVGPR && !SpillToVGPR)
1111 const SIInstrInfo *TII = ST.getInstrInfo();
1112 const DebugLoc &DL = MI->getDebugLoc();
1114 Register SuperReg = MI->getOperand(0).getReg();
1116 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
1117 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
1118 SuperReg != AMDGPU::EXEC && "exec should never spill");
1120 unsigned EltSize = 4;
1122 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
1124 ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
1125 unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
1128 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
1130 NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]);
1132 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1134 BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
1137 .addImm(Spill.Lane);
1138 if (NumSubRegs > 1 && i == 0)
1139 MIB.addReg(SuperReg, RegState::ImplicitDefine);
1142 Register TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1143 RS->setRegUsed(TmpVGPR);
1145 unsigned PerVGPR = 32;
1146 unsigned NumVGPRs = (NumSubRegs + (PerVGPR - 1)) / PerVGPR;
1147 int64_t VGPRLanes = (1LL << std::min(PerVGPR, NumSubRegs)) - 1LL;
1149 for (unsigned Offset = 0; Offset < NumVGPRs; ++Offset) {
1150 // Load in VGPR data
1151 buildSGPRSpillLoadStore(MI, Index, Offset, EltSize, TmpVGPR, VGPRLanes,
1155 for (unsigned i = Offset * PerVGPR,
1156 e = std::min((Offset + 1) * PerVGPR, NumSubRegs);
1159 NumSubRegs == 1 ? SuperReg : getSubReg(SuperReg, SplitParts[i]);
1161 bool LastSubReg = (i + 1 == e);
1163 BuildMI(*MBB, MI, DL,
1164 TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32), SubReg)
1165 .addReg(TmpVGPR, getKillRegState(LastSubReg))
1167 if (NumSubRegs > 1 && i == 0)
1168 MIB.addReg(SuperReg, RegState::ImplicitDefine);
1173 MI->eraseFromParent();
1177 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
1178 /// a VGPR and the stack slot can be safely eliminated when all other users are
1180 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
1181 MachineBasicBlock::iterator MI,
1183 RegScavenger *RS) const {
1184 switch (MI->getOpcode()) {
1185 case AMDGPU::SI_SPILL_S1024_SAVE:
1186 case AMDGPU::SI_SPILL_S512_SAVE:
1187 case AMDGPU::SI_SPILL_S256_SAVE:
1188 case AMDGPU::SI_SPILL_S192_SAVE:
1189 case AMDGPU::SI_SPILL_S160_SAVE:
1190 case AMDGPU::SI_SPILL_S128_SAVE:
1191 case AMDGPU::SI_SPILL_S96_SAVE:
1192 case AMDGPU::SI_SPILL_S64_SAVE:
1193 case AMDGPU::SI_SPILL_S32_SAVE:
1194 return spillSGPR(MI, FI, RS, true);
1195 case AMDGPU::SI_SPILL_S1024_RESTORE:
1196 case AMDGPU::SI_SPILL_S512_RESTORE:
1197 case AMDGPU::SI_SPILL_S256_RESTORE:
1198 case AMDGPU::SI_SPILL_S192_RESTORE:
1199 case AMDGPU::SI_SPILL_S160_RESTORE:
1200 case AMDGPU::SI_SPILL_S128_RESTORE:
1201 case AMDGPU::SI_SPILL_S96_RESTORE:
1202 case AMDGPU::SI_SPILL_S64_RESTORE:
1203 case AMDGPU::SI_SPILL_S32_RESTORE:
1204 return restoreSGPR(MI, FI, RS, true);
1206 llvm_unreachable("not an SGPR spill instruction");
1210 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
1211 int SPAdj, unsigned FIOperandNum,
1212 RegScavenger *RS) const {
1213 MachineFunction *MF = MI->getParent()->getParent();
1214 MachineBasicBlock *MBB = MI->getParent();
1215 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1216 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1217 const SIInstrInfo *TII = ST.getInstrInfo();
1218 DebugLoc DL = MI->getDebugLoc();
1220 assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?");
1222 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
1223 int Index = MI->getOperand(FIOperandNum).getIndex();
1225 Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
1227 : getFrameRegister(*MF);
1229 switch (MI->getOpcode()) {
1230 // SGPR register spill
1231 case AMDGPU::SI_SPILL_S1024_SAVE:
1232 case AMDGPU::SI_SPILL_S512_SAVE:
1233 case AMDGPU::SI_SPILL_S256_SAVE:
1234 case AMDGPU::SI_SPILL_S192_SAVE:
1235 case AMDGPU::SI_SPILL_S160_SAVE:
1236 case AMDGPU::SI_SPILL_S128_SAVE:
1237 case AMDGPU::SI_SPILL_S96_SAVE:
1238 case AMDGPU::SI_SPILL_S64_SAVE:
1239 case AMDGPU::SI_SPILL_S32_SAVE: {
1240 spillSGPR(MI, Index, RS);
1244 // SGPR register restore
1245 case AMDGPU::SI_SPILL_S1024_RESTORE:
1246 case AMDGPU::SI_SPILL_S512_RESTORE:
1247 case AMDGPU::SI_SPILL_S256_RESTORE:
1248 case AMDGPU::SI_SPILL_S192_RESTORE:
1249 case AMDGPU::SI_SPILL_S160_RESTORE:
1250 case AMDGPU::SI_SPILL_S128_RESTORE:
1251 case AMDGPU::SI_SPILL_S96_RESTORE:
1252 case AMDGPU::SI_SPILL_S64_RESTORE:
1253 case AMDGPU::SI_SPILL_S32_RESTORE: {
1254 restoreSGPR(MI, Index, RS);
1258 // VGPR register spill
1259 case AMDGPU::SI_SPILL_V1024_SAVE:
1260 case AMDGPU::SI_SPILL_V512_SAVE:
1261 case AMDGPU::SI_SPILL_V256_SAVE:
1262 case AMDGPU::SI_SPILL_V160_SAVE:
1263 case AMDGPU::SI_SPILL_V128_SAVE:
1264 case AMDGPU::SI_SPILL_V96_SAVE:
1265 case AMDGPU::SI_SPILL_V64_SAVE:
1266 case AMDGPU::SI_SPILL_V32_SAVE:
1267 case AMDGPU::SI_SPILL_A1024_SAVE:
1268 case AMDGPU::SI_SPILL_A512_SAVE:
1269 case AMDGPU::SI_SPILL_A128_SAVE:
1270 case AMDGPU::SI_SPILL_A64_SAVE:
1271 case AMDGPU::SI_SPILL_A32_SAVE: {
1272 const MachineOperand *VData = TII->getNamedOperand(*MI,
1273 AMDGPU::OpName::vdata);
1274 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
1275 MFI->getStackPtrOffsetReg());
1277 buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
1279 VData->getReg(), VData->isKill(),
1280 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
1282 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1283 *MI->memoperands_begin(),
1285 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
1286 MI->eraseFromParent();
1289 case AMDGPU::SI_SPILL_V32_RESTORE:
1290 case AMDGPU::SI_SPILL_V64_RESTORE:
1291 case AMDGPU::SI_SPILL_V96_RESTORE:
1292 case AMDGPU::SI_SPILL_V128_RESTORE:
1293 case AMDGPU::SI_SPILL_V160_RESTORE:
1294 case AMDGPU::SI_SPILL_V256_RESTORE:
1295 case AMDGPU::SI_SPILL_V512_RESTORE:
1296 case AMDGPU::SI_SPILL_V1024_RESTORE:
1297 case AMDGPU::SI_SPILL_A32_RESTORE:
1298 case AMDGPU::SI_SPILL_A64_RESTORE:
1299 case AMDGPU::SI_SPILL_A128_RESTORE:
1300 case AMDGPU::SI_SPILL_A512_RESTORE:
1301 case AMDGPU::SI_SPILL_A1024_RESTORE: {
1302 const MachineOperand *VData = TII->getNamedOperand(*MI,
1303 AMDGPU::OpName::vdata);
1304 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
1305 MFI->getStackPtrOffsetReg());
1307 buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
1309 VData->getReg(), VData->isKill(),
1310 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
1312 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1313 *MI->memoperands_begin(),
1315 MI->eraseFromParent();
1320 const DebugLoc &DL = MI->getDebugLoc();
1321 bool IsMUBUF = TII->isMUBUF(*MI);
1323 if (!IsMUBUF && !MFI->isEntryFunction()) {
1324 // Convert to a swizzled stack address by scaling by the wave size.
1326 // In an entry function/kernel the offset is already swizzled.
1328 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
1329 Register ResultReg =
1330 IsCopy ? MI->getOperand(0).getReg()
1331 : RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1333 int64_t Offset = FrameInfo.getObjectOffset(Index);
1335 // XXX - This never happens because of emergency scavenging slot at 0?
1336 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg)
1337 .addImm(ST.getWavefrontSizeLog2())
1340 if (auto MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) {
1341 // Reuse ResultReg in intermediate step.
1342 Register ScaledReg = ResultReg;
1344 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64),
1346 .addImm(ST.getWavefrontSizeLog2())
1349 const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32;
1351 // TODO: Fold if use instruction is another add of a constant.
1352 if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
1353 // FIXME: This can fail
1355 MIB.addReg(ScaledReg, RegState::Kill);
1357 MIB.addImm(0); // clamp bit
1359 assert(MIB->getOpcode() == AMDGPU::V_ADD_I32_e64 &&
1360 "Need to reuse carry out register");
1362 // Use scavenged unused carry out as offset register.
1363 Register ConstOffsetReg;
1365 ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0);
1367 ConstOffsetReg = MIB.getReg(1);
1369 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
1371 MIB.addReg(ConstOffsetReg, RegState::Kill);
1372 MIB.addReg(ScaledReg, RegState::Kill);
1373 MIB.addImm(0); // clamp bit
1376 // We have to produce a carry out, and there isn't a free SGPR pair
1377 // for it. We can keep the whole computation on the SALU to avoid
1378 // clobbering an additional register at the cost of an extra mov.
1380 // We may have 1 free scratch SGPR even though a carry out is
1381 // unavailable. Only one additional mov is needed.
1382 Register TmpScaledReg =
1383 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false);
1384 Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg;
1386 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg)
1388 .addImm(ST.getWavefrontSizeLog2());
1389 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), ScaledReg)
1390 .addReg(ScaledReg, RegState::Kill)
1392 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg)
1393 .addReg(ScaledReg, RegState::Kill);
1395 // If there were truly no free SGPRs, we need to undo everything.
1396 if (!TmpScaledReg.isValid()) {
1397 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScaledReg)
1398 .addReg(ScaledReg, RegState::Kill)
1400 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg)
1402 .addImm(ST.getWavefrontSizeLog2());
1407 // Don't introduce an extra copy if we're just materializing in a mov.
1409 MI->eraseFromParent();
1411 FIOp.ChangeToRegister(ResultReg, false, false, true);
1416 // Disable offen so we don't need a 0 vgpr base.
1417 assert(static_cast<int>(FIOperandNum) ==
1418 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
1419 AMDGPU::OpName::vaddr));
1421 auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset);
1422 assert((SOffset.isReg() &&
1423 SOffset.getReg() == MFI->getStackPtrOffsetReg()) ||
1424 (SOffset.isImm() && SOffset.getImm() == 0));
1425 if (SOffset.isReg()) {
1426 if (FrameReg == AMDGPU::NoRegister) {
1427 SOffset.ChangeToImmediate(0);
1429 SOffset.setReg(FrameReg);
1433 int64_t Offset = FrameInfo.getObjectOffset(Index);
1435 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
1436 int64_t NewOffset = OldImm + Offset;
1438 if (isUInt<12>(NewOffset) &&
1439 buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) {
1440 MI->eraseFromParent();
1445 // If the offset is simply too big, don't convert to a scratch wave offset
1448 int64_t Offset = FrameInfo.getObjectOffset(Index);
1449 FIOp.ChangeToImmediate(Offset);
1450 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
1451 Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1452 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1454 FIOp.ChangeToRegister(TmpReg, false, false, true);
1460 StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const {
1461 return AMDGPUInstPrinter::getRegisterName(Reg);
1464 const TargetRegisterClass *
1465 SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) {
1467 return &AMDGPU::VReg_1RegClass;
1469 return &AMDGPU::VGPR_LO16RegClass;
1471 return &AMDGPU::VGPR_32RegClass;
1473 return &AMDGPU::VReg_64RegClass;
1475 return &AMDGPU::VReg_96RegClass;
1476 if (BitWidth <= 128)
1477 return &AMDGPU::VReg_128RegClass;
1478 if (BitWidth <= 160)
1479 return &AMDGPU::VReg_160RegClass;
1480 if (BitWidth <= 192)
1481 return &AMDGPU::VReg_192RegClass;
1482 if (BitWidth <= 256)
1483 return &AMDGPU::VReg_256RegClass;
1484 if (BitWidth <= 512)
1485 return &AMDGPU::VReg_512RegClass;
1486 if (BitWidth <= 1024)
1487 return &AMDGPU::VReg_1024RegClass;
1492 const TargetRegisterClass *
1493 SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) {
1495 return &AMDGPU::AGPR_LO16RegClass;
1497 return &AMDGPU::AGPR_32RegClass;
1499 return &AMDGPU::AReg_64RegClass;
1501 return &AMDGPU::AReg_96RegClass;
1502 if (BitWidth <= 128)
1503 return &AMDGPU::AReg_128RegClass;
1504 if (BitWidth <= 160)
1505 return &AMDGPU::AReg_160RegClass;
1506 if (BitWidth <= 192)
1507 return &AMDGPU::AReg_192RegClass;
1508 if (BitWidth <= 256)
1509 return &AMDGPU::AReg_256RegClass;
1510 if (BitWidth <= 512)
1511 return &AMDGPU::AReg_512RegClass;
1512 if (BitWidth <= 1024)
1513 return &AMDGPU::AReg_1024RegClass;
1518 const TargetRegisterClass *
1519 SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
1521 return &AMDGPU::SGPR_LO16RegClass;
1523 return &AMDGPU::SReg_32RegClass;
1525 return &AMDGPU::SReg_64RegClass;
1527 return &AMDGPU::SGPR_96RegClass;
1528 if (BitWidth <= 128)
1529 return &AMDGPU::SGPR_128RegClass;
1530 if (BitWidth <= 160)
1531 return &AMDGPU::SGPR_160RegClass;
1532 if (BitWidth <= 192)
1533 return &AMDGPU::SGPR_192RegClass;
1534 if (BitWidth <= 256)
1535 return &AMDGPU::SGPR_256RegClass;
1536 if (BitWidth <= 512)
1537 return &AMDGPU::SGPR_512RegClass;
1538 if (BitWidth <= 1024)
1539 return &AMDGPU::SGPR_1024RegClass;
1544 // FIXME: This is very slow. It might be worth creating a map from physreg to
1546 const TargetRegisterClass *
1547 SIRegisterInfo::getPhysRegClass(MCRegister Reg) const {
1548 static const TargetRegisterClass *const BaseClasses[] = {
1549 &AMDGPU::VGPR_LO16RegClass,
1550 &AMDGPU::VGPR_HI16RegClass,
1551 &AMDGPU::SReg_LO16RegClass,
1552 &AMDGPU::AGPR_LO16RegClass,
1553 &AMDGPU::VGPR_32RegClass,
1554 &AMDGPU::SReg_32RegClass,
1555 &AMDGPU::AGPR_32RegClass,
1556 &AMDGPU::VReg_64RegClass,
1557 &AMDGPU::SReg_64RegClass,
1558 &AMDGPU::AReg_64RegClass,
1559 &AMDGPU::VReg_96RegClass,
1560 &AMDGPU::SReg_96RegClass,
1561 &AMDGPU::AReg_96RegClass,
1562 &AMDGPU::VReg_128RegClass,
1563 &AMDGPU::SReg_128RegClass,
1564 &AMDGPU::AReg_128RegClass,
1565 &AMDGPU::VReg_160RegClass,
1566 &AMDGPU::SReg_160RegClass,
1567 &AMDGPU::AReg_160RegClass,
1568 &AMDGPU::VReg_192RegClass,
1569 &AMDGPU::SReg_192RegClass,
1570 &AMDGPU::AReg_192RegClass,
1571 &AMDGPU::VReg_256RegClass,
1572 &AMDGPU::SReg_256RegClass,
1573 &AMDGPU::AReg_256RegClass,
1574 &AMDGPU::VReg_512RegClass,
1575 &AMDGPU::SReg_512RegClass,
1576 &AMDGPU::AReg_512RegClass,
1577 &AMDGPU::SReg_1024RegClass,
1578 &AMDGPU::VReg_1024RegClass,
1579 &AMDGPU::AReg_1024RegClass,
1580 &AMDGPU::SCC_CLASSRegClass,
1581 &AMDGPU::Pseudo_SReg_32RegClass,
1582 &AMDGPU::Pseudo_SReg_128RegClass,
1585 for (const TargetRegisterClass *BaseClass : BaseClasses) {
1586 if (BaseClass->contains(Reg)) {
1593 // TODO: It might be helpful to have some target specific flags in
1594 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
1595 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
1596 unsigned Size = getRegSizeInBits(*RC);
1598 return getCommonSubClass(&AMDGPU::VGPR_LO16RegClass, RC) != nullptr ||
1599 getCommonSubClass(&AMDGPU::VGPR_HI16RegClass, RC) != nullptr;
1601 const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
1603 assert(Size < 32 && "Invalid register class size");
1606 return getCommonSubClass(VRC, RC) != nullptr;
1609 bool SIRegisterInfo::hasAGPRs(const TargetRegisterClass *RC) const {
1610 unsigned Size = getRegSizeInBits(*RC);
1613 const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
1615 assert(getVGPRClassForBitWidth(Size) && "Invalid register class size");
1618 return getCommonSubClass(ARC, RC) != nullptr;
1621 const TargetRegisterClass *
1622 SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const {
1623 unsigned Size = getRegSizeInBits(*SRC);
1624 const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
1625 assert(VRC && "Invalid register class size");
1629 const TargetRegisterClass *
1630 SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const {
1631 unsigned Size = getRegSizeInBits(*SRC);
1632 const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
1633 assert(ARC && "Invalid register class size");
1637 const TargetRegisterClass *
1638 SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const {
1639 unsigned Size = getRegSizeInBits(*VRC);
1641 return &AMDGPU::SGPR_32RegClass;
1642 const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size);
1643 assert(SRC && "Invalid register class size");
1647 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
1648 const TargetRegisterClass *RC, unsigned SubIdx) const {
1649 if (SubIdx == AMDGPU::NoSubRegister)
1652 // We can assume that each lane corresponds to one 32-bit register.
1653 unsigned Size = getNumChannelsFromSubReg(SubIdx) * 32;
1654 if (isSGPRClass(RC)) {
1656 RC = &AMDGPU::SGPR_32RegClass;
1658 RC = getSGPRClassForBitWidth(Size);
1659 } else if (hasAGPRs(RC)) {
1660 RC = getAGPRClassForBitWidth(Size);
1662 RC = getVGPRClassForBitWidth(Size);
1664 assert(RC && "Invalid sub-register class size");
1668 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
1669 if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST &&
1670 OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST)
1671 return !ST.hasMFMAInlineLiteralBug();
1673 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
1674 OpType <= AMDGPU::OPERAND_SRC_LAST;
1677 bool SIRegisterInfo::shouldRewriteCopySrc(
1678 const TargetRegisterClass *DefRC,
1680 const TargetRegisterClass *SrcRC,
1681 unsigned SrcSubReg) const {
1682 // We want to prefer the smallest register class possible, so we don't want to
1683 // stop and rewrite on anything that looks like a subregister
1684 // extract. Operations mostly don't care about the super register class, so we
1685 // only want to stop on the most basic of copies between the same register
1688 // e.g. if we have something like
1691 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
1692 // %3 = COPY %2, sub0
1694 // We want to look through the COPY to find:
1698 return getCommonSubClass(DefRC, SrcRC) != nullptr;
1701 /// Returns a lowest register that is not used at any point in the function.
1702 /// If all registers are used, then this function will return
1703 /// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return
1704 /// highest unused register.
1705 MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
1706 const TargetRegisterClass *RC,
1707 const MachineFunction &MF,
1708 bool ReserveHighestVGPR) const {
1709 if (ReserveHighestVGPR) {
1710 for (MCRegister Reg : reverse(*RC))
1711 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
1714 for (MCRegister Reg : *RC)
1715 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
1718 return MCRegister();
1721 ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
1722 unsigned EltSize) const {
1723 const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC);
1724 assert(RegBitWidth >= 32 && RegBitWidth <= 1024);
1726 const unsigned RegDWORDs = RegBitWidth / 32;
1727 const unsigned EltDWORDs = EltSize / 4;
1728 assert(RegSplitParts.size() + 1 >= EltDWORDs);
1730 const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1];
1731 const unsigned NumParts = RegDWORDs / EltDWORDs;
1733 return makeArrayRef(Parts.data(), NumParts);
1736 const TargetRegisterClass*
1737 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
1738 Register Reg) const {
1739 return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegClass(Reg);
1742 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
1743 Register Reg) const {
1744 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
1745 // Registers without classes are unaddressable, SGPR-like registers.
1746 return RC && hasVGPRs(RC);
1749 bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI,
1750 Register Reg) const {
1751 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
1753 // Registers without classes are unaddressable, SGPR-like registers.
1754 return RC && hasAGPRs(RC);
1757 bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
1758 const TargetRegisterClass *SrcRC,
1760 const TargetRegisterClass *DstRC,
1762 const TargetRegisterClass *NewRC,
1763 LiveIntervals &LIS) const {
1764 unsigned SrcSize = getRegSizeInBits(*SrcRC);
1765 unsigned DstSize = getRegSizeInBits(*DstRC);
1766 unsigned NewSize = getRegSizeInBits(*NewRC);
1768 // Do not increase size of registers beyond dword, we would need to allocate
1769 // adjacent registers and constraint regalloc more than needed.
1771 // Always allow dword coalescing.
1772 if (SrcSize <= 32 || DstSize <= 32)
1775 return NewSize <= DstSize || NewSize <= SrcSize;
1778 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
1779 MachineFunction &MF) const {
1780 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1782 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
1784 switch (RC->getID()) {
1786 return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF);
1787 case AMDGPU::VGPR_32RegClassID:
1788 case AMDGPU::VGPR_LO16RegClassID:
1789 case AMDGPU::VGPR_HI16RegClassID:
1790 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
1791 case AMDGPU::SGPR_32RegClassID:
1792 case AMDGPU::SGPR_LO16RegClassID:
1793 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
1797 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
1798 unsigned Idx) const {
1799 if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 ||
1800 Idx == AMDGPU::RegisterPressureSets::AGPR_32)
1801 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
1802 const_cast<MachineFunction &>(MF));
1804 if (Idx == AMDGPU::RegisterPressureSets::SReg_32)
1805 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
1806 const_cast<MachineFunction &>(MF));
1808 llvm_unreachable("Unexpected register pressure set!");
1811 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
1812 static const int Empty[] = { -1 };
1814 if (RegPressureIgnoredUnits[RegUnit])
1817 return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit);
1820 MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
1821 // Not a callee saved register.
1822 return AMDGPU::SGPR30_SGPR31;
1825 const TargetRegisterClass *
1826 SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size,
1827 const RegisterBank &RB,
1828 const MachineRegisterInfo &MRI) const {
1829 switch (RB.getID()) {
1830 case AMDGPU::VGPRRegBankID:
1831 return getVGPRClassForBitWidth(std::max(32u, Size));
1832 case AMDGPU::VCCRegBankID:
1834 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
1835 : &AMDGPU::SReg_64_XEXECRegClass;
1836 case AMDGPU::SGPRRegBankID:
1837 return getSGPRClassForBitWidth(std::max(32u, Size));
1838 case AMDGPU::AGPRRegBankID:
1839 return getAGPRClassForBitWidth(std::max(32u, Size));
1841 llvm_unreachable("unknown register bank");
1845 const TargetRegisterClass *
1846 SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
1847 const MachineRegisterInfo &MRI) const {
1848 const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg());
1849 if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>())
1850 return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB, MRI);
1852 const TargetRegisterClass *RC = RCOrRB.get<const TargetRegisterClass*>();
1853 return getAllocatableClass(RC);
1856 MCRegister SIRegisterInfo::getVCC() const {
1857 return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
1860 const TargetRegisterClass *
1861 SIRegisterInfo::getRegClass(unsigned RCID) const {
1862 switch ((int)RCID) {
1863 case AMDGPU::SReg_1RegClassID:
1865 case AMDGPU::SReg_1_XEXECRegClassID:
1866 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
1867 : &AMDGPU::SReg_64_XEXECRegClass;
1871 return AMDGPUGenRegisterInfo::getRegClass(RCID);
1875 // Find reaching register definition
1876 MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg,
1878 MachineRegisterInfo &MRI,
1879 LiveIntervals *LIS) const {
1880 auto &MDT = LIS->getAnalysis<MachineDominatorTree>();
1881 SlotIndex UseIdx = LIS->getInstructionIndex(Use);
1884 if (Reg.isVirtual()) {
1885 if (!LIS->hasInterval(Reg))
1887 LiveInterval &LI = LIS->getInterval(Reg);
1888 LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg)
1889 : MRI.getMaxLaneMaskForVReg(Reg);
1890 VNInfo *V = nullptr;
1891 if (LI.hasSubRanges()) {
1892 for (auto &S : LI.subranges()) {
1893 if ((S.LaneMask & SubLanes) == SubLanes) {
1894 V = S.getVNInfoAt(UseIdx);
1899 V = LI.getVNInfoAt(UseIdx);
1906 for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units) {
1907 LiveRange &LR = LIS->getRegUnit(*Units);
1908 if (VNInfo *V = LR.getVNInfoAt(UseIdx)) {
1909 if (!DefIdx.isValid() ||
1910 MDT.dominates(LIS->getInstructionFromIndex(DefIdx),
1911 LIS->getInstructionFromIndex(V->def)))
1919 MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx);
1921 if (!Def || !MDT.dominates(Def, &Use))
1924 assert(Def->modifiesRegister(Reg, this));
1929 MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const {
1930 assert(getRegSizeInBits(*getPhysRegClass(Reg)) <= 32);
1932 for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass,
1933 AMDGPU::SReg_32RegClass,
1934 AMDGPU::AGPR_32RegClass } ) {
1935 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC))
1938 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16,
1939 &AMDGPU::VGPR_32RegClass)) {
1943 return AMDGPU::NoRegister;
1946 bool SIRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
1948 case AMDGPU::SGPR_NULL:
1949 case AMDGPU::SRC_SHARED_BASE:
1950 case AMDGPU::SRC_PRIVATE_BASE:
1951 case AMDGPU::SRC_SHARED_LIMIT:
1952 case AMDGPU::SRC_PRIVATE_LIMIT:
1960 SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const {
1961 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
1962 ST.getMaxNumSGPRs(MF) / 4);
1966 SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const {
1967 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));
1971 SIRegisterInfo::getAllVGPR32(const MachineFunction &MF) const {
1972 return makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), ST.getMaxNumVGPRs(MF));