1 //===- SIMachineFunctionInfo.cpp - SI Machine Function Info ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "SIMachineFunctionInfo.h"
10 #include "AMDGPUTargetMachine.h"
11 #include "AMDGPUSubtarget.h"
12 #include "SIRegisterInfo.h"
13 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
14 #include "Utils/AMDGPUBaseInfo.h"
15 #include "llvm/CodeGen/LiveIntervals.h"
16 #include "llvm/CodeGen/MachineBasicBlock.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/MIRParser/MIParser.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/DiagnosticInfo.h"
23 #include "llvm/IR/Function.h"
32 const GCNTargetMachine &getTM(const GCNSubtarget *STI) {
33 const SITargetLowering *TLI = STI->getTargetLowering();
34 return static_cast<const GCNTargetMachine &>(TLI->getTargetMachine());
37 SIMachineFunctionInfo::SIMachineFunctionInfo(const Function &F,
38 const GCNSubtarget *STI)
39 : AMDGPUMachineFunction(F, *STI),
41 GWSResourcePSV(getTM(STI)),
42 PrivateSegmentBuffer(false),
45 KernargSegmentPtr(false),
47 FlatScratchInit(false),
53 PrivateSegmentWaveByteOffset(false),
57 ImplicitBufferPtr(false),
58 ImplicitArgPtr(false),
59 GITPtrHigh(0xffffffff),
60 HighBitsOf32BitAddress(0) {
61 const GCNSubtarget &ST = *static_cast<const GCNSubtarget *>(STI);
62 FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F);
63 WavesPerEU = ST.getWavesPerEU(F);
65 Occupancy = ST.computeOccupancy(F, getLDSSize());
66 CallingConv::ID CC = F.getCallingConv();
68 VRegFlags.reserve(1024);
70 // FIXME: Should have analysis or something rather than attribute to detect
72 const bool HasCalls = F.hasFnAttribute("amdgpu-calls");
74 const bool IsKernel = CC == CallingConv::AMDGPU_KERNEL ||
75 CC == CallingConv::SPIR_KERNEL;
78 if (!F.arg_empty() || ST.getImplicitArgNumBytes(F) != 0)
79 KernargSegmentPtr = true;
82 } else if (CC == CallingConv::AMDGPU_PS) {
83 PSInputAddr = AMDGPU::getInitialPSInputAddr(F);
86 MayNeedAGPRs = ST.hasMAIInsts();
88 if (!isEntryFunction()) {
89 if (CC != CallingConv::AMDGPU_Gfx)
90 ArgInfo = AMDGPUArgumentUsageInfo::FixedABIFunctionInfo;
92 // TODO: Pick a high register, and shift down, similar to a kernel.
93 FrameOffsetReg = AMDGPU::SGPR33;
94 StackPtrOffsetReg = AMDGPU::SGPR32;
96 if (!ST.enableFlatScratch()) {
97 // Non-entry functions have no special inputs for now, other registers
98 // required for scratch access.
99 ScratchRSrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
101 ArgInfo.PrivateSegmentBuffer =
102 ArgDescriptor::createRegister(ScratchRSrcReg);
105 if (!F.hasFnAttribute("amdgpu-no-implicitarg-ptr"))
106 ImplicitArgPtr = true;
108 ImplicitArgPtr = false;
109 MaxKernArgAlign = std::max(ST.getAlignmentForImplicitArgPtr(),
112 if (ST.hasGFX90AInsts() &&
113 ST.getMaxNumVGPRs(F) <= AMDGPU::VGPR_32RegClass.getNumRegs() &&
115 MayNeedAGPRs = false; // We will select all MAI with VGPR operands.
118 bool isAmdHsaOrMesa = ST.isAmdHsaOrMesa(F);
119 if (isAmdHsaOrMesa && !ST.enableFlatScratch())
120 PrivateSegmentBuffer = true;
121 else if (ST.isMesaGfxShader(F))
122 ImplicitBufferPtr = true;
124 if (!AMDGPU::isGraphics(CC) ||
125 (CC == CallingConv::AMDGPU_CS && ST.hasArchitectedSGPRs())) {
126 if (IsKernel || !F.hasFnAttribute("amdgpu-no-workgroup-id-x"))
129 if (!F.hasFnAttribute("amdgpu-no-workgroup-id-y"))
132 if (!F.hasFnAttribute("amdgpu-no-workgroup-id-z"))
136 if (!AMDGPU::isGraphics(CC)) {
137 if (IsKernel || !F.hasFnAttribute("amdgpu-no-workitem-id-x"))
140 if (!F.hasFnAttribute("amdgpu-no-workitem-id-y") &&
141 ST.getMaxWorkitemID(F, 1) != 0)
144 if (!F.hasFnAttribute("amdgpu-no-workitem-id-z") &&
145 ST.getMaxWorkitemID(F, 2) != 0)
148 if (!F.hasFnAttribute("amdgpu-no-dispatch-ptr"))
151 if (!F.hasFnAttribute("amdgpu-no-queue-ptr"))
154 if (!F.hasFnAttribute("amdgpu-no-dispatch-id"))
157 if (!IsKernel && !F.hasFnAttribute("amdgpu-no-lds-kernel-id"))
161 // FIXME: This attribute is a hack, we just need an analysis on the function
162 // to look for allocas.
163 bool HasStackObjects = F.hasFnAttribute("amdgpu-stack-objects");
165 // TODO: This could be refined a lot. The attribute is a poor way of
166 // detecting calls or stack objects that may require it before argument
168 if (ST.hasFlatAddressSpace() && isEntryFunction() &&
169 (isAmdHsaOrMesa || ST.enableFlatScratch()) &&
170 (HasCalls || HasStackObjects || ST.enableFlatScratch()) &&
171 !ST.flatScratchIsArchitected()) {
172 FlatScratchInit = true;
175 if (isEntryFunction()) {
176 // X, XY, and XYZ are the only supported combinations, so make sure Y is
181 if (!ST.flatScratchIsArchitected()) {
182 PrivateSegmentWaveByteOffset = true;
184 // HS and GS always have the scratch wave offset in SGPR5 on GFX9.
185 if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 &&
186 (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS))
187 ArgInfo.PrivateSegmentWaveByteOffset =
188 ArgDescriptor::createRegister(AMDGPU::SGPR5);
192 Attribute A = F.getFnAttribute("amdgpu-git-ptr-high");
193 StringRef S = A.getValueAsString();
195 S.consumeInteger(0, GITPtrHigh);
197 A = F.getFnAttribute("amdgpu-32bit-address-high-bits");
198 S = A.getValueAsString();
200 S.consumeInteger(0, HighBitsOf32BitAddress);
202 // On GFX908, in order to guarantee copying between AGPRs, we need a scratch
203 // VGPR available at all times. For now, reserve highest available VGPR. After
204 // RA, shift it to the lowest available unused VGPR if the one exist.
205 if (ST.hasMAIInsts() && !ST.hasGFX90AInsts()) {
207 AMDGPU::VGPR_32RegClass.getRegister(ST.getMaxNumVGPRs(F) - 1);
211 MachineFunctionInfo *SIMachineFunctionInfo::clone(
212 BumpPtrAllocator &Allocator, MachineFunction &DestMF,
213 const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
215 return DestMF.cloneInfo<SIMachineFunctionInfo>(*this);
218 void SIMachineFunctionInfo::limitOccupancy(const MachineFunction &MF) {
219 limitOccupancy(getMaxWavesPerEU());
220 const GCNSubtarget& ST = MF.getSubtarget<GCNSubtarget>();
221 limitOccupancy(ST.getOccupancyWithLocalMemSize(getLDSSize(),
225 Register SIMachineFunctionInfo::addPrivateSegmentBuffer(
226 const SIRegisterInfo &TRI) {
227 ArgInfo.PrivateSegmentBuffer =
228 ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
229 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SGPR_128RegClass));
231 return ArgInfo.PrivateSegmentBuffer.getRegister();
234 Register SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) {
235 ArgInfo.DispatchPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
236 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
238 return ArgInfo.DispatchPtr.getRegister();
241 Register SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) {
242 ArgInfo.QueuePtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
243 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
245 return ArgInfo.QueuePtr.getRegister();
248 Register SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) {
249 ArgInfo.KernargSegmentPtr
250 = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
251 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
253 return ArgInfo.KernargSegmentPtr.getRegister();
256 Register SIMachineFunctionInfo::addDispatchID(const SIRegisterInfo &TRI) {
257 ArgInfo.DispatchID = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
258 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
260 return ArgInfo.DispatchID.getRegister();
263 Register SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) {
264 ArgInfo.FlatScratchInit = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
265 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
267 return ArgInfo.FlatScratchInit.getRegister();
270 Register SIMachineFunctionInfo::addImplicitBufferPtr(const SIRegisterInfo &TRI) {
271 ArgInfo.ImplicitBufferPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
272 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
274 return ArgInfo.ImplicitBufferPtr.getRegister();
277 Register SIMachineFunctionInfo::addLDSKernelId() {
278 ArgInfo.LDSKernelId = ArgDescriptor::createRegister(getNextUserSGPR());
280 return ArgInfo.LDSKernelId.getRegister();
283 void SIMachineFunctionInfo::allocateWWMSpill(MachineFunction &MF, Register VGPR,
284 uint64_t Size, Align Alignment) {
285 // Skip if it is an entry function or the register is already added.
286 if (isEntryFunction() || WWMSpills.count(VGPR))
289 WWMSpills.insert(std::make_pair(
290 VGPR, MF.getFrameInfo().CreateSpillStackObject(Size, Alignment)));
293 // Separate out the callee-saved and scratch registers.
294 void SIMachineFunctionInfo::splitWWMSpillRegisters(
296 SmallVectorImpl<std::pair<Register, int>> &CalleeSavedRegs,
297 SmallVectorImpl<std::pair<Register, int>> &ScratchRegs) const {
298 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs();
299 for (auto &Reg : WWMSpills) {
300 if (isCalleeSavedReg(CSRegs, Reg.first))
301 CalleeSavedRegs.push_back(Reg);
303 ScratchRegs.push_back(Reg);
307 bool SIMachineFunctionInfo::isCalleeSavedReg(const MCPhysReg *CSRegs,
308 MCPhysReg Reg) const {
309 for (unsigned I = 0; CSRegs[I]; ++I) {
310 if (CSRegs[I] == Reg)
317 bool SIMachineFunctionInfo::allocateVirtualVGPRForSGPRSpills(
318 MachineFunction &MF, int FI, unsigned LaneIndex) {
319 MachineRegisterInfo &MRI = MF.getRegInfo();
322 LaneVGPR = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
323 SpillVGPRs.push_back(LaneVGPR);
325 LaneVGPR = SpillVGPRs.back();
328 SGPRSpillsToVirtualVGPRLanes[FI].push_back(
329 SIRegisterInfo::SpilledReg(LaneVGPR, LaneIndex));
333 bool SIMachineFunctionInfo::allocatePhysicalVGPRForSGPRSpills(
334 MachineFunction &MF, int FI, unsigned LaneIndex) {
335 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
336 const SIRegisterInfo *TRI = ST.getRegisterInfo();
337 MachineRegisterInfo &MRI = MF.getRegInfo();
340 LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF);
341 if (LaneVGPR == AMDGPU::NoRegister) {
342 // We have no VGPRs left for spilling SGPRs. Reset because we will not
343 // partially spill the SGPR to VGPRs.
344 SGPRSpillsToPhysicalVGPRLanes.erase(FI);
348 allocateWWMSpill(MF, LaneVGPR);
349 reserveWWMRegister(LaneVGPR);
350 for (MachineBasicBlock &MBB : MF) {
351 MBB.addLiveIn(LaneVGPR);
352 MBB.sortUniqueLiveIns();
355 LaneVGPR = WWMReservedRegs.back();
358 SGPRSpillsToPhysicalVGPRLanes[FI].push_back(
359 SIRegisterInfo::SpilledReg(LaneVGPR, LaneIndex));
363 bool SIMachineFunctionInfo::allocateSGPRSpillToVGPRLane(MachineFunction &MF,
365 bool IsPrologEpilog) {
366 std::vector<SIRegisterInfo::SpilledReg> &SpillLanes =
367 IsPrologEpilog ? SGPRSpillsToPhysicalVGPRLanes[FI]
368 : SGPRSpillsToVirtualVGPRLanes[FI];
370 // This has already been allocated.
371 if (!SpillLanes.empty())
374 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
375 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
376 unsigned WaveSize = ST.getWavefrontSize();
378 unsigned Size = FrameInfo.getObjectSize(FI);
379 unsigned NumLanes = Size / 4;
381 if (NumLanes > WaveSize)
384 assert(Size >= 4 && "invalid sgpr spill size");
385 assert(ST.getRegisterInfo()->spillSGPRToVGPR() &&
386 "not spilling SGPRs to VGPRs");
388 unsigned &NumSpillLanes =
389 IsPrologEpilog ? NumPhysicalVGPRSpillLanes : NumVirtualVGPRSpillLanes;
391 for (unsigned I = 0; I < NumLanes; ++I, ++NumSpillLanes) {
392 unsigned LaneIndex = (NumSpillLanes % WaveSize);
394 bool Allocated = IsPrologEpilog
395 ? allocatePhysicalVGPRForSGPRSpills(MF, FI, LaneIndex)
396 : allocateVirtualVGPRForSGPRSpills(MF, FI, LaneIndex);
406 /// Reserve AGPRs or VGPRs to support spilling for FrameIndex \p FI.
407 /// Either AGPR is spilled to VGPR to vice versa.
408 /// Returns true if a \p FI can be eliminated completely.
409 bool SIMachineFunctionInfo::allocateVGPRSpillToAGPR(MachineFunction &MF,
412 MachineRegisterInfo &MRI = MF.getRegInfo();
413 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
414 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
416 assert(ST.hasMAIInsts() && FrameInfo.isSpillSlotObjectIndex(FI));
418 auto &Spill = VGPRToAGPRSpills[FI];
420 // This has already been allocated.
421 if (!Spill.Lanes.empty())
422 return Spill.FullyAllocated;
424 unsigned Size = FrameInfo.getObjectSize(FI);
425 unsigned NumLanes = Size / 4;
426 Spill.Lanes.resize(NumLanes, AMDGPU::NoRegister);
428 const TargetRegisterClass &RC =
429 isAGPRtoVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::AGPR_32RegClass;
430 auto Regs = RC.getRegisters();
432 auto &SpillRegs = isAGPRtoVGPR ? SpillAGPR : SpillVGPR;
433 const SIRegisterInfo *TRI = ST.getRegisterInfo();
434 Spill.FullyAllocated = true;
436 // FIXME: Move allocation logic out of MachineFunctionInfo and initialize
438 BitVector OtherUsedRegs;
439 OtherUsedRegs.resize(TRI->getNumRegs());
441 const uint32_t *CSRMask =
442 TRI->getCallPreservedMask(MF, MF.getFunction().getCallingConv());
444 OtherUsedRegs.setBitsInMask(CSRMask);
446 // TODO: Should include register tuples, but doesn't matter with current
448 for (MCPhysReg Reg : SpillAGPR)
449 OtherUsedRegs.set(Reg);
450 for (MCPhysReg Reg : SpillVGPR)
451 OtherUsedRegs.set(Reg);
453 SmallVectorImpl<MCPhysReg>::const_iterator NextSpillReg = Regs.begin();
454 for (int I = NumLanes - 1; I >= 0; --I) {
455 NextSpillReg = std::find_if(
456 NextSpillReg, Regs.end(), [&MRI, &OtherUsedRegs](MCPhysReg Reg) {
457 return MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg) &&
461 if (NextSpillReg == Regs.end()) { // Registers exhausted
462 Spill.FullyAllocated = false;
466 OtherUsedRegs.set(*NextSpillReg);
467 SpillRegs.push_back(*NextSpillReg);
468 MRI.reserveReg(*NextSpillReg, TRI);
469 Spill.Lanes[I] = *NextSpillReg++;
472 return Spill.FullyAllocated;
475 bool SIMachineFunctionInfo::removeDeadFrameIndices(
476 MachineFrameInfo &MFI, bool ResetSGPRSpillStackIDs) {
477 // Remove dead frame indices from function frame, however keep FP & BP since
478 // spills for them haven't been inserted yet. And also make sure to remove the
479 // frame indices from `SGPRSpillsToVirtualVGPRLanes` data structure,
480 // otherwise, it could result in an unexpected side effect and bug, in case of
481 // any re-mapping of freed frame indices by later pass(es) like "stack slot
483 for (auto &R : make_early_inc_range(SGPRSpillsToVirtualVGPRLanes)) {
484 MFI.RemoveStackObject(R.first);
485 SGPRSpillsToVirtualVGPRLanes.erase(R.first);
488 // Remove the dead frame indices of CSR SGPRs which are spilled to physical
489 // VGPR lanes during SILowerSGPRSpills pass.
490 if (!ResetSGPRSpillStackIDs) {
491 for (auto &R : make_early_inc_range(SGPRSpillsToPhysicalVGPRLanes)) {
492 MFI.RemoveStackObject(R.first);
493 SGPRSpillsToPhysicalVGPRLanes.erase(R.first);
496 bool HaveSGPRToMemory = false;
498 if (ResetSGPRSpillStackIDs) {
499 // All other SGPRs must be allocated on the default stack, so reset the
501 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); I != E;
503 if (!checkIndexInPrologEpilogSGPRSpills(I)) {
504 if (MFI.getStackID(I) == TargetStackID::SGPRSpill) {
505 MFI.setStackID(I, TargetStackID::Default);
506 HaveSGPRToMemory = true;
512 for (auto &R : VGPRToAGPRSpills) {
514 MFI.RemoveStackObject(R.first);
517 return HaveSGPRToMemory;
520 int SIMachineFunctionInfo::getScavengeFI(MachineFrameInfo &MFI,
521 const SIRegisterInfo &TRI) {
524 if (isEntryFunction()) {
525 ScavengeFI = MFI.CreateFixedObject(
526 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false);
528 ScavengeFI = MFI.CreateStackObject(
529 TRI.getSpillSize(AMDGPU::SGPR_32RegClass),
530 TRI.getSpillAlign(AMDGPU::SGPR_32RegClass), false);
535 MCPhysReg SIMachineFunctionInfo::getNextUserSGPR() const {
536 assert(NumSystemSGPRs == 0 && "System SGPRs must be added after user SGPRs");
537 return AMDGPU::SGPR0 + NumUserSGPRs;
540 MCPhysReg SIMachineFunctionInfo::getNextSystemSGPR() const {
541 return AMDGPU::SGPR0 + NumUserSGPRs + NumSystemSGPRs;
544 void SIMachineFunctionInfo::MRI_NoteNewVirtualRegister(Register Reg) {
548 void SIMachineFunctionInfo::MRI_NoteCloneVirtualRegister(Register NewReg,
550 VRegFlags.grow(NewReg);
551 VRegFlags[NewReg] = VRegFlags[SrcReg];
555 SIMachineFunctionInfo::getGITPtrLoReg(const MachineFunction &MF) const {
556 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
557 if (!ST.isAmdPalOS())
559 Register GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in
560 if (ST.hasMergedShaders()) {
561 switch (MF.getFunction().getCallingConv()) {
562 case CallingConv::AMDGPU_HS:
563 case CallingConv::AMDGPU_GS:
564 // Low GIT address is passed in s8 rather than s0 for an LS+HS or
565 // ES+GS merged shader on gfx9+.
566 GitPtrLo = AMDGPU::SGPR8;
575 static yaml::StringValue regToString(Register Reg,
576 const TargetRegisterInfo &TRI) {
577 yaml::StringValue Dest;
579 raw_string_ostream OS(Dest.Value);
580 OS << printReg(Reg, &TRI);
585 static std::optional<yaml::SIArgumentInfo>
586 convertArgumentInfo(const AMDGPUFunctionArgInfo &ArgInfo,
587 const TargetRegisterInfo &TRI) {
588 yaml::SIArgumentInfo AI;
590 auto convertArg = [&](std::optional<yaml::SIArgument> &A,
591 const ArgDescriptor &Arg) {
595 // Create a register or stack argument.
596 yaml::SIArgument SA = yaml::SIArgument::createArgument(Arg.isRegister());
597 if (Arg.isRegister()) {
598 raw_string_ostream OS(SA.RegisterName.Value);
599 OS << printReg(Arg.getRegister(), &TRI);
601 SA.StackOffset = Arg.getStackOffset();
602 // Check and update the optional mask.
604 SA.Mask = Arg.getMask();
611 Any |= convertArg(AI.PrivateSegmentBuffer, ArgInfo.PrivateSegmentBuffer);
612 Any |= convertArg(AI.DispatchPtr, ArgInfo.DispatchPtr);
613 Any |= convertArg(AI.QueuePtr, ArgInfo.QueuePtr);
614 Any |= convertArg(AI.KernargSegmentPtr, ArgInfo.KernargSegmentPtr);
615 Any |= convertArg(AI.DispatchID, ArgInfo.DispatchID);
616 Any |= convertArg(AI.FlatScratchInit, ArgInfo.FlatScratchInit);
617 Any |= convertArg(AI.LDSKernelId, ArgInfo.LDSKernelId);
618 Any |= convertArg(AI.PrivateSegmentSize, ArgInfo.PrivateSegmentSize);
619 Any |= convertArg(AI.WorkGroupIDX, ArgInfo.WorkGroupIDX);
620 Any |= convertArg(AI.WorkGroupIDY, ArgInfo.WorkGroupIDY);
621 Any |= convertArg(AI.WorkGroupIDZ, ArgInfo.WorkGroupIDZ);
622 Any |= convertArg(AI.WorkGroupInfo, ArgInfo.WorkGroupInfo);
623 Any |= convertArg(AI.PrivateSegmentWaveByteOffset,
624 ArgInfo.PrivateSegmentWaveByteOffset);
625 Any |= convertArg(AI.ImplicitArgPtr, ArgInfo.ImplicitArgPtr);
626 Any |= convertArg(AI.ImplicitBufferPtr, ArgInfo.ImplicitBufferPtr);
627 Any |= convertArg(AI.WorkItemIDX, ArgInfo.WorkItemIDX);
628 Any |= convertArg(AI.WorkItemIDY, ArgInfo.WorkItemIDY);
629 Any |= convertArg(AI.WorkItemIDZ, ArgInfo.WorkItemIDZ);
637 yaml::SIMachineFunctionInfo::SIMachineFunctionInfo(
638 const llvm::SIMachineFunctionInfo &MFI, const TargetRegisterInfo &TRI,
639 const llvm::MachineFunction &MF)
640 : ExplicitKernArgSize(MFI.getExplicitKernArgSize()),
641 MaxKernArgAlign(MFI.getMaxKernArgAlign()), LDSSize(MFI.getLDSSize()),
642 GDSSize(MFI.getGDSSize()),
643 DynLDSAlign(MFI.getDynLDSAlign()), IsEntryFunction(MFI.isEntryFunction()),
644 NoSignedZerosFPMath(MFI.hasNoSignedZerosFPMath()),
645 MemoryBound(MFI.isMemoryBound()), WaveLimiter(MFI.needsWaveLimiter()),
646 HasSpilledSGPRs(MFI.hasSpilledSGPRs()),
647 HasSpilledVGPRs(MFI.hasSpilledVGPRs()),
648 HighBitsOf32BitAddress(MFI.get32BitAddressHighBits()),
649 Occupancy(MFI.getOccupancy()),
650 ScratchRSrcReg(regToString(MFI.getScratchRSrcReg(), TRI)),
651 FrameOffsetReg(regToString(MFI.getFrameOffsetReg(), TRI)),
652 StackPtrOffsetReg(regToString(MFI.getStackPtrOffsetReg(), TRI)),
653 BytesInStackArgArea(MFI.getBytesInStackArgArea()),
654 ReturnsVoid(MFI.returnsVoid()),
655 ArgInfo(convertArgumentInfo(MFI.getArgInfo(), TRI)),
656 PSInputAddr(MFI.getPSInputAddr()),
657 PSInputEnable(MFI.getPSInputEnable()),
658 Mode(MFI.getMode()) {
659 for (Register Reg : MFI.getWWMReservedRegs())
660 WWMReservedRegs.push_back(regToString(Reg, TRI));
662 if (MFI.getLongBranchReservedReg())
663 LongBranchReservedReg = regToString(MFI.getLongBranchReservedReg(), TRI);
664 if (MFI.getVGPRForAGPRCopy())
665 VGPRForAGPRCopy = regToString(MFI.getVGPRForAGPRCopy(), TRI);
667 if (MFI.getSGPRForEXECCopy())
668 SGPRForEXECCopy = regToString(MFI.getSGPRForEXECCopy(), TRI);
670 auto SFI = MFI.getOptionalScavengeFI();
672 ScavengeFI = yaml::FrameIndex(*SFI, MF.getFrameInfo());
675 void yaml::SIMachineFunctionInfo::mappingImpl(yaml::IO &YamlIO) {
676 MappingTraits<SIMachineFunctionInfo>::mapping(YamlIO, *this);
679 bool SIMachineFunctionInfo::initializeBaseYamlFields(
680 const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF,
681 PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) {
682 ExplicitKernArgSize = YamlMFI.ExplicitKernArgSize;
683 MaxKernArgAlign = YamlMFI.MaxKernArgAlign;
684 LDSSize = YamlMFI.LDSSize;
685 GDSSize = YamlMFI.GDSSize;
686 DynLDSAlign = YamlMFI.DynLDSAlign;
687 PSInputAddr = YamlMFI.PSInputAddr;
688 PSInputEnable = YamlMFI.PSInputEnable;
689 HighBitsOf32BitAddress = YamlMFI.HighBitsOf32BitAddress;
690 Occupancy = YamlMFI.Occupancy;
691 IsEntryFunction = YamlMFI.IsEntryFunction;
692 NoSignedZerosFPMath = YamlMFI.NoSignedZerosFPMath;
693 MemoryBound = YamlMFI.MemoryBound;
694 WaveLimiter = YamlMFI.WaveLimiter;
695 HasSpilledSGPRs = YamlMFI.HasSpilledSGPRs;
696 HasSpilledVGPRs = YamlMFI.HasSpilledVGPRs;
697 BytesInStackArgArea = YamlMFI.BytesInStackArgArea;
698 ReturnsVoid = YamlMFI.ReturnsVoid;
700 if (YamlMFI.ScavengeFI) {
701 auto FIOrErr = YamlMFI.ScavengeFI->getFI(MF.getFrameInfo());
703 // Create a diagnostic for a the frame index.
704 const MemoryBuffer &Buffer =
705 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
707 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1, 1,
708 SourceMgr::DK_Error, toString(FIOrErr.takeError()),
709 "", std::nullopt, std::nullopt);
710 SourceRange = YamlMFI.ScavengeFI->SourceRange;
713 ScavengeFI = *FIOrErr;
715 ScavengeFI = std::nullopt;
720 bool SIMachineFunctionInfo::mayUseAGPRs(const Function &F) const {
721 for (const BasicBlock &BB : F) {
722 for (const Instruction &I : BB) {
723 const auto *CB = dyn_cast<CallBase>(&I);
727 if (CB->isInlineAsm()) {
728 const InlineAsm *IA = dyn_cast<InlineAsm>(CB->getCalledOperand());
729 for (const auto &CI : IA->ParseConstraints()) {
730 for (StringRef Code : CI.Codes) {
731 Code.consume_front("{");
732 if (Code.startswith("a"))
739 const Function *Callee =
740 dyn_cast<Function>(CB->getCalledOperand()->stripPointerCasts());
744 if (Callee->getIntrinsicID() == Intrinsic::not_intrinsic)
752 bool SIMachineFunctionInfo::usesAGPRs(const MachineFunction &MF) const {
756 if (!mayNeedAGPRs()) {
761 if (!AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv()) ||
762 MF.getFrameInfo().hasCalls()) {
767 const MachineRegisterInfo &MRI = MF.getRegInfo();
769 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
770 const Register Reg = Register::index2VirtReg(I);
771 const TargetRegisterClass *RC = MRI.getRegClassOrNull(Reg);
772 if (RC && SIRegisterInfo::isAGPRClass(RC)) {
775 } else if (!RC && !MRI.use_empty(Reg) && MRI.getType(Reg).isValid()) {
776 // Defer caching UsesAGPRs, function might not yet been regbank selected.
781 for (MCRegister Reg : AMDGPU::AGPR_32RegClass) {
782 if (MRI.isPhysRegUsed(Reg)) {