1 //===----------------------- SIFrameLowering.cpp --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
10 #include "SIFrameLowering.h"
11 #include "AMDGPUSubtarget.h"
12 #include "SIInstrInfo.h"
13 #include "SIMachineFunctionInfo.h"
14 #include "SIRegisterInfo.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/RegisterScavenging.h"
24 static ArrayRef<MCPhysReg> getAllSGPR128(const SISubtarget &ST,
25 const MachineFunction &MF) {
26 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
27 ST.getMaxNumSGPRs(MF) / 4);
30 static ArrayRef<MCPhysReg> getAllSGPRs(const SISubtarget &ST,
31 const MachineFunction &MF) {
32 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(),
33 ST.getMaxNumSGPRs(MF));
36 void SIFrameLowering::emitFlatScratchInit(const SISubtarget &ST,
38 MachineBasicBlock &MBB) const {
39 const SIInstrInfo *TII = ST.getInstrInfo();
40 const SIRegisterInfo* TRI = &TII->getRegisterInfo();
42 // We don't need this if we only have spills since there is no user facing
45 // TODO: If we know we don't have flat instructions earlier, we can omit
46 // this from the input registers.
48 // TODO: We only need to know if we access scratch space through a flat
49 // pointer. Because we only detect if flat instructions are used at all,
50 // this will be used more often than necessary on VI.
52 // Debug location must be unknown since the first debug location is used to
53 // determine the end of the prologue.
55 MachineBasicBlock::iterator I = MBB.begin();
57 unsigned FlatScratchInitReg
58 = TRI->getPreloadedValue(MF, SIRegisterInfo::FLAT_SCRATCH_INIT);
60 MachineRegisterInfo &MRI = MF.getRegInfo();
61 MRI.addLiveIn(FlatScratchInitReg);
62 MBB.addLiveIn(FlatScratchInitReg);
64 unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
65 unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
67 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
68 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
70 // Do a 64-bit pointer add.
71 if (ST.flatScratchIsPointer()) {
72 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO)
73 .addReg(FlatScrInitLo)
74 .addReg(ScratchWaveOffsetReg);
75 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI)
76 .addReg(FlatScrInitHi)
82 // Copy the size in bytes.
83 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO)
84 .addReg(FlatScrInitHi, RegState::Kill);
86 // Add wave offset in bytes to private base offset.
87 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
88 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
89 .addReg(FlatScrInitLo)
90 .addReg(ScratchWaveOffsetReg);
92 // Convert offset to 256-byte units.
93 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI)
94 .addReg(FlatScrInitLo, RegState::Kill)
98 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg(
99 const SISubtarget &ST,
100 const SIInstrInfo *TII,
101 const SIRegisterInfo *TRI,
102 SIMachineFunctionInfo *MFI,
103 MachineFunction &MF) const {
104 MachineRegisterInfo &MRI = MF.getRegInfo();
106 // We need to insert initialization of the scratch resource descriptor.
107 unsigned ScratchRsrcReg = MFI->getScratchRSrcReg();
108 if (ScratchRsrcReg == AMDGPU::NoRegister ||
109 !MRI.isPhysRegUsed(ScratchRsrcReg))
110 return AMDGPU::NoRegister;
112 if (ST.hasSGPRInitBug() ||
113 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF))
114 return ScratchRsrcReg;
116 // We reserved the last registers for this. Shift it down to the end of those
117 // which were actually used.
119 // FIXME: It might be safer to use a pseudoregister before replacement.
121 // FIXME: We should be able to eliminate unused input registers. We only
122 // cannot do this for the resources required for scratch access. For now we
123 // skip over user SGPRs and may leave unused holes.
125 // We find the resource first because it has an alignment requirement.
127 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
128 ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF);
129 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded));
131 // Skip the last N reserved elements because they should have already been
132 // reserved for VCC etc.
133 for (MCPhysReg Reg : AllSGPR128s) {
134 // Pick the first unallocated one. Make sure we don't clobber the other
135 // reserved input we needed.
136 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
137 MRI.replaceRegWith(ScratchRsrcReg, Reg);
138 MFI->setScratchRSrcReg(Reg);
143 return ScratchRsrcReg;
146 // Shift down registers reserved for the scratch wave offset and stack pointer
148 std::pair<unsigned, unsigned>
149 SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg(
150 const SISubtarget &ST,
151 const SIInstrInfo *TII,
152 const SIRegisterInfo *TRI,
153 SIMachineFunctionInfo *MFI,
154 MachineFunction &MF) const {
155 MachineRegisterInfo &MRI = MF.getRegInfo();
156 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
158 // No replacement necessary.
159 if (ScratchWaveOffsetReg == AMDGPU::NoRegister ||
160 !MRI.isPhysRegUsed(ScratchWaveOffsetReg)) {
161 assert(MFI->getStackPtrOffsetReg() == AMDGPU::SP_REG);
162 return std::make_pair(AMDGPU::NoRegister, AMDGPU::NoRegister);
165 unsigned SPReg = MFI->getStackPtrOffsetReg();
166 if (ST.hasSGPRInitBug())
167 return std::make_pair(ScratchWaveOffsetReg, SPReg);
169 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
171 ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF);
172 if (NumPreloaded > AllSGPRs.size())
173 return std::make_pair(ScratchWaveOffsetReg, SPReg);
175 AllSGPRs = AllSGPRs.slice(NumPreloaded);
177 // We need to drop register from the end of the list that we cannot use
178 // for the scratch wave offset.
179 // + 2 s102 and s103 do not exist on VI.
181 // + 2 for xnack_mask
182 // + 2 for flat_scratch
183 // + 4 for registers reserved for scratch resource register
184 // + 1 for register reserved for scratch wave offset. (By exluding this
185 // register from the list to consider, it means that when this
186 // register is being used for the scratch wave offset and there
187 // are no other free SGPRs, then the value will stay in this register.
188 // + 1 if stack pointer is used.
191 unsigned ReservedRegCount = 13;
193 if (AllSGPRs.size() < ReservedRegCount)
194 return std::make_pair(ScratchWaveOffsetReg, SPReg);
196 bool HandledScratchWaveOffsetReg =
197 ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF);
199 for (MCPhysReg Reg : AllSGPRs.drop_back(ReservedRegCount)) {
200 // Pick the first unallocated SGPR. Be careful not to pick an alias of the
201 // scratch descriptor, since we haven’t added its uses yet.
202 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
203 if (!HandledScratchWaveOffsetReg) {
204 HandledScratchWaveOffsetReg = true;
206 MRI.replaceRegWith(ScratchWaveOffsetReg, Reg);
207 MFI->setScratchWaveOffsetReg(Reg);
208 ScratchWaveOffsetReg = Reg;
214 return std::make_pair(ScratchWaveOffsetReg, SPReg);
217 void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF,
218 MachineBasicBlock &MBB) const {
219 // Emit debugger prologue if "amdgpu-debugger-emit-prologue" attribute was
221 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
222 auto AMDGPUASI = ST.getAMDGPUAS();
223 if (ST.debuggerEmitPrologue())
224 emitDebuggerPrologue(MF, MBB);
226 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
228 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
230 // If we only have SGPR spills, we won't actually be using scratch memory
231 // since these spill to VGPRs.
233 // FIXME: We should be cleaning up these unused SGPR spill frame indices
236 const SIInstrInfo *TII = ST.getInstrInfo();
237 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
238 MachineRegisterInfo &MRI = MF.getRegInfo();
240 // We need to do the replacement of the private segment buffer and wave offset
241 // register even if there are no stack objects. There could be stores to undef
242 // or a constant without an associated object.
244 // FIXME: We still have implicit uses on SGPR spill instructions in case they
245 // need to spill to vector memory. It's likely that will not happen, but at
246 // this point it appears we need the setup. This part of the prolog should be
247 // emitted after frame indices are eliminated.
249 if (MFI->hasFlatScratchInit())
250 emitFlatScratchInit(ST, MF, MBB);
252 unsigned SPReg = MFI->getStackPtrOffsetReg();
253 if (SPReg != AMDGPU::SP_REG) {
254 assert(MRI.isReserved(SPReg) && "SPReg used but not reserved");
257 const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
258 int64_t StackSize = FrameInfo.getStackSize();
260 if (StackSize == 0) {
261 BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::COPY), SPReg)
262 .addReg(MFI->getScratchWaveOffsetReg());
264 BuildMI(MBB, MBB.begin(), DL, TII->get(AMDGPU::S_ADD_U32), SPReg)
265 .addReg(MFI->getScratchWaveOffsetReg())
266 .addImm(StackSize * ST.getWavefrontSize());
270 unsigned ScratchRsrcReg
271 = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF);
273 unsigned ScratchWaveOffsetReg;
274 std::tie(ScratchWaveOffsetReg, SPReg)
275 = getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF);
277 // It's possible to have uses of only ScratchWaveOffsetReg without
278 // ScratchRsrcReg if it's only used for the initialization of flat_scratch,
279 // but the inverse is not true.
280 if (ScratchWaveOffsetReg == AMDGPU::NoRegister) {
281 assert(ScratchRsrcReg == AMDGPU::NoRegister);
285 // We need to insert initialization of the scratch resource descriptor.
286 unsigned PreloadedScratchWaveOffsetReg = TRI->getPreloadedValue(
287 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
289 unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister;
290 if (ST.isAmdCodeObjectV2(MF)) {
291 PreloadedPrivateBufferReg = TRI->getPreloadedValue(
292 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER);
295 bool OffsetRegUsed = MRI.isPhysRegUsed(ScratchWaveOffsetReg);
296 bool ResourceRegUsed = ScratchRsrcReg != AMDGPU::NoRegister &&
297 MRI.isPhysRegUsed(ScratchRsrcReg);
299 // We added live-ins during argument lowering, but since they were not used
300 // they were deleted. We're adding the uses now, so add them back.
302 assert(PreloadedScratchWaveOffsetReg != AMDGPU::NoRegister &&
303 "scratch wave offset input is required");
304 MRI.addLiveIn(PreloadedScratchWaveOffsetReg);
305 MBB.addLiveIn(PreloadedScratchWaveOffsetReg);
308 if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) {
309 assert(ST.isAmdCodeObjectV2(MF) || ST.isMesaGfxShader(MF));
310 MRI.addLiveIn(PreloadedPrivateBufferReg);
311 MBB.addLiveIn(PreloadedPrivateBufferReg);
314 // Make the register selected live throughout the function.
315 for (MachineBasicBlock &OtherBB : MF) {
316 if (&OtherBB == &MBB)
320 OtherBB.addLiveIn(ScratchWaveOffsetReg);
323 OtherBB.addLiveIn(ScratchRsrcReg);
327 MachineBasicBlock::iterator I = MBB.begin();
329 // If we reserved the original input registers, we don't need to copy to the
330 // reserved registers.
332 bool CopyBuffer = ResourceRegUsed &&
333 PreloadedPrivateBufferReg != AMDGPU::NoRegister &&
334 ST.isAmdCodeObjectV2(MF) &&
335 ScratchRsrcReg != PreloadedPrivateBufferReg;
337 // This needs to be careful of the copying order to avoid overwriting one of
338 // the input registers before it's been copied to it's final
339 // destination. Usually the offset should be copied first.
340 bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg,
341 ScratchWaveOffsetReg);
342 if (CopyBuffer && CopyBufferFirst) {
343 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
344 .addReg(PreloadedPrivateBufferReg, RegState::Kill);
348 PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) {
349 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
350 .addReg(PreloadedScratchWaveOffsetReg,
351 MRI.isPhysRegUsed(ScratchWaveOffsetReg) ? 0 : RegState::Kill);
354 if (CopyBuffer && !CopyBufferFirst) {
355 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
356 .addReg(PreloadedPrivateBufferReg, RegState::Kill);
359 if (ResourceRegUsed && (ST.isMesaGfxShader(MF) || (PreloadedPrivateBufferReg == AMDGPU::NoRegister))) {
360 assert(!ST.isAmdCodeObjectV2(MF));
361 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
363 unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
364 unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
366 // Use relocations to get the pointer, and setup the other bits manually.
367 uint64_t Rsrc23 = TII->getScratchRsrcWords23();
369 if (MFI->hasImplicitBufferPtr()) {
370 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
372 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
373 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
375 BuildMI(MBB, I, DL, Mov64, Rsrc01)
376 .addReg(MFI->getImplicitBufferPtrUserSGPR())
377 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
379 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
382 PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()),
383 AMDGPUASI.CONSTANT_ADDRESS);
384 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
385 auto MMO = MF.getMachineMemOperand(PtrInfo,
386 MachineMemOperand::MOLoad |
387 MachineMemOperand::MOInvariant |
388 MachineMemOperand::MODereferenceable,
390 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01)
391 .addReg(MFI->getImplicitBufferPtrUserSGPR())
395 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
398 unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
399 unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
401 BuildMI(MBB, I, DL, SMovB32, Rsrc0)
402 .addExternalSymbol("SCRATCH_RSRC_DWORD0")
403 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
405 BuildMI(MBB, I, DL, SMovB32, Rsrc1)
406 .addExternalSymbol("SCRATCH_RSRC_DWORD1")
407 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
411 BuildMI(MBB, I, DL, SMovB32, Rsrc2)
412 .addImm(Rsrc23 & 0xffffffff)
413 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
415 BuildMI(MBB, I, DL, SMovB32, Rsrc3)
416 .addImm(Rsrc23 >> 32)
417 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
421 void SIFrameLowering::emitPrologue(MachineFunction &MF,
422 MachineBasicBlock &MBB) const {
423 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
424 if (FuncInfo->isEntryFunction()) {
425 emitEntryFunctionPrologue(MF, MBB);
429 const MachineFrameInfo &MFI = MF.getFrameInfo();
430 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
431 const SIInstrInfo *TII = ST.getInstrInfo();
433 unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg();
434 unsigned FramePtrReg = FuncInfo->getFrameOffsetReg();
436 MachineBasicBlock::iterator MBBI = MBB.begin();
439 bool NeedFP = hasFP(MF);
441 // If we need a base pointer, set it up here. It's whatever the value of
442 // the stack pointer is at this point. Any variable size objects will be
443 // allocated after this, so we can still use the base pointer to reference
445 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg)
447 .setMIFlag(MachineInstr::FrameSetup);
450 uint32_t NumBytes = MFI.getStackSize();
451 if (NumBytes != 0 && hasSP(MF)) {
452 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg)
454 .addImm(NumBytes * ST.getWavefrontSize())
455 .setMIFlag(MachineInstr::FrameSetup);
459 void SIFrameLowering::emitEpilogue(MachineFunction &MF,
460 MachineBasicBlock &MBB) const {
461 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
462 if (FuncInfo->isEntryFunction())
465 unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg();
466 if (StackPtrReg == AMDGPU::NoRegister)
469 const MachineFrameInfo &MFI = MF.getFrameInfo();
470 uint32_t NumBytes = MFI.getStackSize();
472 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
473 const SIInstrInfo *TII = ST.getInstrInfo();
474 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
477 // FIXME: Clarify distinction between no set SP and SP. For callee functions,
478 // it's really whether we need SP to be accurate or not.
480 if (NumBytes != 0 && hasSP(MF)) {
481 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg)
483 .addImm(NumBytes * ST.getWavefrontSize())
484 .setMIFlag(MachineInstr::FrameDestroy);
488 static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) {
489 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
491 if (!MFI.isDeadObjectIndex(I))
498 int SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
499 unsigned &FrameReg) const {
500 const SIRegisterInfo *RI = MF.getSubtarget<SISubtarget>().getRegisterInfo();
502 FrameReg = RI->getFrameRegister(MF);
503 return MF.getFrameInfo().getObjectOffset(FI);
506 void SIFrameLowering::processFunctionBeforeFrameFinalized(
508 RegScavenger *RS) const {
509 MachineFrameInfo &MFI = MF.getFrameInfo();
511 if (!MFI.hasStackObjects())
514 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
515 const SIInstrInfo *TII = ST.getInstrInfo();
516 const SIRegisterInfo &TRI = TII->getRegisterInfo();
517 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
518 bool AllSGPRSpilledToVGPRs = false;
520 if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) {
521 AllSGPRSpilledToVGPRs = true;
523 // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs
524 // are spilled to VGPRs, in which case we can eliminate the stack usage.
526 // XXX - This operates under the assumption that only other SGPR spills are
527 // users of the frame index. I'm not 100% sure this is correct. The
528 // StackColoring pass has a comment saying a future improvement would be to
529 // merging of allocas with spill slots, but for now according to
530 // MachineFrameInfo isSpillSlot can't alias any other object.
531 for (MachineBasicBlock &MBB : MF) {
532 MachineBasicBlock::iterator Next;
533 for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) {
534 MachineInstr &MI = *I;
537 if (TII->isSGPRSpill(MI)) {
538 int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex();
539 if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) {
540 bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS);
542 assert(Spilled && "failed to spill SGPR to VGPR when allocated");
544 AllSGPRSpilledToVGPRs = false;
549 FuncInfo->removeSGPRToVGPRFrameIndices(MFI);
552 // FIXME: The other checks should be redundant with allStackObjectsAreDead,
553 // but currently hasNonSpillStackObjects is set only from source
554 // allocas. Stack temps produced from legalization are not counted currently.
555 if (FuncInfo->hasNonSpillStackObjects() || FuncInfo->hasSpilledVGPRs() ||
556 !AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) {
557 assert(RS && "RegScavenger required if spilling");
559 // We force this to be at offset 0 so no user object ever has 0 as an
560 // address, so we may use 0 as an invalid pointer value. This is because
561 // LLVM assumes 0 is an invalid pointer in address space 0. Because alloca
562 // is required to be address space 0, we are forced to accept this for
563 // now. Ideally we could have the stack in another address space with 0 as a
564 // valid pointer, and -1 as the null value.
566 // This will also waste additional space when user stack objects require > 4
569 // The main cost here is losing the offset for addressing modes. However
570 // this also ensures we shouldn't need a register for the offset when
571 // emergency scavenging.
572 int ScavengeFI = MFI.CreateFixedObject(
573 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false);
574 RS->addScavengingFrameIndex(ScavengeFI);
578 void SIFrameLowering::emitDebuggerPrologue(MachineFunction &MF,
579 MachineBasicBlock &MBB) const {
580 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
581 const SIInstrInfo *TII = ST.getInstrInfo();
582 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
583 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
585 MachineBasicBlock::iterator I = MBB.begin();
588 // For each dimension:
589 for (unsigned i = 0; i < 3; ++i) {
590 // Get work group ID SGPR, and make it live-in again.
591 unsigned WorkGroupIDSGPR = MFI->getWorkGroupIDSGPR(i);
592 MF.getRegInfo().addLiveIn(WorkGroupIDSGPR);
593 MBB.addLiveIn(WorkGroupIDSGPR);
595 // Since SGPRs are spilled into VGPRs, copy work group ID SGPR to VGPR in
596 // order to spill it to scratch.
597 unsigned WorkGroupIDVGPR =
598 MF.getRegInfo().createVirtualRegister(&AMDGPU::VGPR_32RegClass);
599 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR)
600 .addReg(WorkGroupIDSGPR);
602 // Spill work group ID.
603 int WorkGroupIDObjectIdx = MFI->getDebuggerWorkGroupIDStackObjectIndex(i);
604 TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR, false,
605 WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
607 // Get work item ID VGPR, and make it live-in again.
608 unsigned WorkItemIDVGPR = MFI->getWorkItemIDVGPR(i);
609 MF.getRegInfo().addLiveIn(WorkItemIDVGPR);
610 MBB.addLiveIn(WorkItemIDVGPR);
612 // Spill work item ID.
613 int WorkItemIDObjectIdx = MFI->getDebuggerWorkItemIDStackObjectIndex(i);
614 TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR, false,
615 WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
619 bool SIFrameLowering::hasFP(const MachineFunction &MF) const {
620 // All stack operations are relative to the frame offset SGPR.
621 // TODO: Still want to eliminate sometimes.
622 const MachineFrameInfo &MFI = MF.getFrameInfo();
624 // XXX - Is this only called after frame is finalized? Should be able to check
626 return MFI.hasStackObjects() && !allStackObjectsAreDead(MFI);
629 bool SIFrameLowering::hasSP(const MachineFunction &MF) const {
630 // All stack operations are relative to the frame offset SGPR.
631 const MachineFrameInfo &MFI = MF.getFrameInfo();
632 return MFI.hasCalls() || MFI.hasVarSizedObjects();