1 //===-- AMDGPUAsmPrinter.cpp - AMDGPU assembly printer --------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 /// The AMDGPUAsmPrinter is used to print both assembly string and also binary
12 /// code. When passed an MCAsmStreamer it prints assembly and when passed
13 /// an MCObjectStreamer it outputs binary code.
15 //===----------------------------------------------------------------------===//
18 #include "AMDGPUAsmPrinter.h"
20 #include "AMDGPUSubtarget.h"
21 #include "AMDGPUTargetMachine.h"
22 #include "MCTargetDesc/AMDGPUInstPrinter.h"
23 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
24 #include "MCTargetDesc/AMDGPUTargetStreamer.h"
25 #include "R600AsmPrinter.h"
26 #include "R600Defines.h"
27 #include "R600MachineFunctionInfo.h"
28 #include "R600RegisterInfo.h"
29 #include "SIDefines.h"
30 #include "SIInstrInfo.h"
31 #include "SIMachineFunctionInfo.h"
32 #include "SIRegisterInfo.h"
33 #include "TargetInfo/AMDGPUTargetInfo.h"
34 #include "Utils/AMDGPUBaseInfo.h"
35 #include "llvm/BinaryFormat/ELF.h"
36 #include "llvm/CodeGen/MachineFrameInfo.h"
37 #include "llvm/IR/DiagnosticInfo.h"
38 #include "llvm/MC/MCAssembler.h"
39 #include "llvm/MC/MCContext.h"
40 #include "llvm/MC/MCSectionELF.h"
41 #include "llvm/MC/MCStreamer.h"
42 #include "llvm/Support/AMDGPUMetadata.h"
43 #include "llvm/Support/MathExtras.h"
44 #include "llvm/Support/TargetParser.h"
45 #include "llvm/Support/TargetRegistry.h"
46 #include "llvm/Target/TargetLoweringObjectFile.h"
49 using namespace llvm::AMDGPU;
50 using namespace llvm::AMDGPU::HSAMD;
52 // We need to tell the runtime some amount ahead of time if we don't know the
53 // true stack size. Assume a smaller number if this is only due to dynamic /
54 // non-entry block allocas.
55 static cl::opt<uint32_t> AssumedStackSizeForExternalCall(
56 "amdgpu-assume-external-call-stack-size",
57 cl::desc("Assumed stack use of any external call (in bytes)"),
61 static cl::opt<uint32_t> AssumedStackSizeForDynamicSizeObjects(
62 "amdgpu-assume-dynamic-stack-object-size",
63 cl::desc("Assumed extra stack use if there are any "
64 "variable sized objects (in bytes)"),
68 // This should get the default rounding mode from the kernel. We just set the
69 // default here, but this could change if the OpenCL rounding mode pragmas are
72 // The denormal mode here should match what is reported by the OpenCL runtime
73 // for the CL_FP_DENORM bit from CL_DEVICE_{HALF|SINGLE|DOUBLE}_FP_CONFIG, but
74 // can also be override to flush with the -cl-denorms-are-zero compiler flag.
76 // AMD OpenCL only sets flush none and reports CL_FP_DENORM for double
77 // precision, and leaves single precision to flush all and does not report
78 // CL_FP_DENORM for CL_DEVICE_SINGLE_FP_CONFIG. Mesa's OpenCL currently reports
79 // CL_FP_DENORM for both.
81 // FIXME: It seems some instructions do not support single precision denormals
82 // regardless of the mode (exp_*_f32, rcp_*_f32, rsq_*_f32, rsq_*f32, sqrt_f32,
83 // and sin_f32, cos_f32 on most parts).
85 // We want to use these instructions, and using fp32 denormals also causes
86 // instructions to run at the double precision rate for the device so it's
87 // probably best to just report no single precision denormals.
88 static uint32_t getFPMode(AMDGPU::SIModeRegisterDefaults Mode) {
89 return FP_ROUND_MODE_SP(FP_ROUND_ROUND_TO_NEAREST) |
90 FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEAREST) |
91 FP_DENORM_MODE_SP(Mode.fpDenormModeSPValue()) |
92 FP_DENORM_MODE_DP(Mode.fpDenormModeDPValue());
96 createAMDGPUAsmPrinterPass(TargetMachine &tm,
97 std::unique_ptr<MCStreamer> &&Streamer) {
98 return new AMDGPUAsmPrinter(tm, std::move(Streamer));
101 extern "C" void LLVM_EXTERNAL_VISIBILITY LLVMInitializeAMDGPUAsmPrinter() {
102 TargetRegistry::RegisterAsmPrinter(getTheAMDGPUTarget(),
103 llvm::createR600AsmPrinterPass);
104 TargetRegistry::RegisterAsmPrinter(getTheGCNTarget(),
105 createAMDGPUAsmPrinterPass);
108 AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM,
109 std::unique_ptr<MCStreamer> Streamer)
110 : AsmPrinter(TM, std::move(Streamer)) {
111 if (IsaInfo::hasCodeObjectV3(getGlobalSTI()))
112 HSAMetadataStream.reset(new MetadataStreamerV3());
114 HSAMetadataStream.reset(new MetadataStreamerV2());
117 StringRef AMDGPUAsmPrinter::getPassName() const {
118 return "AMDGPU Assembly Printer";
121 const MCSubtargetInfo *AMDGPUAsmPrinter::getGlobalSTI() const {
122 return TM.getMCSubtargetInfo();
125 AMDGPUTargetStreamer* AMDGPUAsmPrinter::getTargetStreamer() const {
128 return static_cast<AMDGPUTargetStreamer*>(OutStreamer->getTargetStreamer());
131 void AMDGPUAsmPrinter::emitStartOfAsmFile(Module &M) {
132 if (IsaInfo::hasCodeObjectV3(getGlobalSTI())) {
133 std::string ExpectedTarget;
134 raw_string_ostream ExpectedTargetOS(ExpectedTarget);
135 IsaInfo::streamIsaVersion(getGlobalSTI(), ExpectedTargetOS);
137 getTargetStreamer()->EmitDirectiveAMDGCNTarget(ExpectedTarget);
140 if (TM.getTargetTriple().getOS() != Triple::AMDHSA &&
141 TM.getTargetTriple().getOS() != Triple::AMDPAL)
144 if (TM.getTargetTriple().getOS() == Triple::AMDHSA)
145 HSAMetadataStream->begin(M);
147 if (TM.getTargetTriple().getOS() == Triple::AMDPAL)
148 getTargetStreamer()->getPALMetadata()->readFromIR(M);
150 if (IsaInfo::hasCodeObjectV3(getGlobalSTI()))
153 // HSA emits NT_AMDGPU_HSA_CODE_OBJECT_VERSION for code objects v2.
154 if (TM.getTargetTriple().getOS() == Triple::AMDHSA)
155 getTargetStreamer()->EmitDirectiveHSACodeObjectVersion(2, 1);
157 // HSA and PAL emit NT_AMDGPU_HSA_ISA for code objects v2.
158 IsaVersion Version = getIsaVersion(getGlobalSTI()->getCPU());
159 getTargetStreamer()->EmitDirectiveHSACodeObjectISA(
160 Version.Major, Version.Minor, Version.Stepping, "AMD", "AMDGPU");
163 void AMDGPUAsmPrinter::emitEndOfAsmFile(Module &M) {
164 // Following code requires TargetStreamer to be present.
165 if (!getTargetStreamer())
168 if (!IsaInfo::hasCodeObjectV3(getGlobalSTI())) {
169 // Emit ISA Version (NT_AMD_AMDGPU_ISA).
170 std::string ISAVersionString;
171 raw_string_ostream ISAVersionStream(ISAVersionString);
172 IsaInfo::streamIsaVersion(getGlobalSTI(), ISAVersionStream);
173 getTargetStreamer()->EmitISAVersion(ISAVersionStream.str());
176 // Emit HSA Metadata (NT_AMD_AMDGPU_HSA_METADATA).
177 if (TM.getTargetTriple().getOS() == Triple::AMDHSA) {
178 HSAMetadataStream->end();
179 bool Success = HSAMetadataStream->emitTo(*getTargetStreamer());
181 assert(Success && "Malformed HSA Metadata");
185 bool AMDGPUAsmPrinter::isBlockOnlyReachableByFallthrough(
186 const MachineBasicBlock *MBB) const {
187 if (!AsmPrinter::isBlockOnlyReachableByFallthrough(MBB))
193 // If this is a block implementing a long branch, an expression relative to
194 // the start of the block is needed. to the start of the block.
195 // XXX - Is there a smarter way to check this?
196 return (MBB->back().getOpcode() != AMDGPU::S_SETPC_B64);
199 void AMDGPUAsmPrinter::emitFunctionBodyStart() {
200 const SIMachineFunctionInfo &MFI = *MF->getInfo<SIMachineFunctionInfo>();
201 if (!MFI.isEntryFunction())
204 const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>();
205 const Function &F = MF->getFunction();
206 if (!STM.hasCodeObjectV3() && STM.isAmdHsaOrMesa(F) &&
207 (F.getCallingConv() == CallingConv::AMDGPU_KERNEL ||
208 F.getCallingConv() == CallingConv::SPIR_KERNEL)) {
209 amd_kernel_code_t KernelCode;
210 getAmdKernelCode(KernelCode, CurrentProgramInfo, *MF);
211 getTargetStreamer()->EmitAMDKernelCodeT(KernelCode);
214 if (STM.isAmdHsaOS())
215 HSAMetadataStream->emitKernel(*MF, CurrentProgramInfo);
218 void AMDGPUAsmPrinter::emitFunctionBodyEnd() {
219 const SIMachineFunctionInfo &MFI = *MF->getInfo<SIMachineFunctionInfo>();
220 if (!MFI.isEntryFunction())
223 if (!IsaInfo::hasCodeObjectV3(getGlobalSTI()) ||
224 TM.getTargetTriple().getOS() != Triple::AMDHSA)
227 auto &Streamer = getTargetStreamer()->getStreamer();
228 auto &Context = Streamer.getContext();
229 auto &ObjectFileInfo = *Context.getObjectFileInfo();
230 auto &ReadOnlySection = *ObjectFileInfo.getReadOnlySection();
232 Streamer.PushSection();
233 Streamer.SwitchSection(&ReadOnlySection);
235 // CP microcode requires the kernel descriptor to be allocated on 64 byte
237 Streamer.emitValueToAlignment(64, 0, 1, 0);
238 if (ReadOnlySection.getAlignment() < 64)
239 ReadOnlySection.setAlignment(Align(64));
241 const MCSubtargetInfo &STI = MF->getSubtarget();
243 SmallString<128> KernelName;
244 getNameWithPrefix(KernelName, &MF->getFunction());
245 getTargetStreamer()->EmitAmdhsaKernelDescriptor(
246 STI, KernelName, getAmdhsaKernelDescriptor(*MF, CurrentProgramInfo),
247 CurrentProgramInfo.NumVGPRsForWavesPerEU,
248 CurrentProgramInfo.NumSGPRsForWavesPerEU -
249 IsaInfo::getNumExtraSGPRs(&STI,
250 CurrentProgramInfo.VCCUsed,
251 CurrentProgramInfo.FlatUsed),
252 CurrentProgramInfo.VCCUsed, CurrentProgramInfo.FlatUsed,
255 Streamer.PopSection();
258 void AMDGPUAsmPrinter::emitFunctionEntryLabel() {
259 if (IsaInfo::hasCodeObjectV3(getGlobalSTI()) &&
260 TM.getTargetTriple().getOS() == Triple::AMDHSA) {
261 AsmPrinter::emitFunctionEntryLabel();
265 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
266 const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>();
267 if (MFI->isEntryFunction() && STM.isAmdHsaOrMesa(MF->getFunction())) {
268 SmallString<128> SymbolName;
269 getNameWithPrefix(SymbolName, &MF->getFunction()),
270 getTargetStreamer()->EmitAMDGPUSymbolType(
271 SymbolName, ELF::STT_AMDGPU_HSA_KERNEL);
273 if (DumpCodeInstEmitter) {
274 // Disassemble function name label to text.
275 DisasmLines.push_back(MF->getName().str() + ":");
276 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size());
277 HexLines.push_back("");
280 AsmPrinter::emitFunctionEntryLabel();
283 void AMDGPUAsmPrinter::emitBasicBlockStart(const MachineBasicBlock &MBB) {
284 if (DumpCodeInstEmitter && !isBlockOnlyReachableByFallthrough(&MBB)) {
285 // Write a line for the basic block label if it is not only fallthrough.
286 DisasmLines.push_back(
287 (Twine("BB") + Twine(getFunctionNumber())
288 + "_" + Twine(MBB.getNumber()) + ":").str());
289 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size());
290 HexLines.push_back("");
292 AsmPrinter::emitBasicBlockStart(MBB);
295 void AMDGPUAsmPrinter::emitGlobalVariable(const GlobalVariable *GV) {
296 if (GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
297 if (GV->hasInitializer() && !isa<UndefValue>(GV->getInitializer())) {
298 OutContext.reportError({},
299 Twine(GV->getName()) +
300 ": unsupported initializer for address space");
304 // LDS variables aren't emitted in HSA or PAL yet.
305 const Triple::OSType OS = TM.getTargetTriple().getOS();
306 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL)
309 MCSymbol *GVSym = getSymbol(GV);
311 GVSym->redefineIfPossible();
312 if (GVSym->isDefined() || GVSym->isVariable())
313 report_fatal_error("symbol '" + Twine(GVSym->getName()) +
314 "' is already defined");
316 const DataLayout &DL = GV->getParent()->getDataLayout();
317 uint64_t Size = DL.getTypeAllocSize(GV->getValueType());
318 Align Alignment = GV->getAlign().getValueOr(Align(4));
320 emitVisibility(GVSym, GV->getVisibility(), !GV->isDeclaration());
321 emitLinkage(GV, GVSym);
322 if (auto TS = getTargetStreamer())
323 TS->emitAMDGPULDS(GVSym, Size, Alignment);
327 AsmPrinter::emitGlobalVariable(GV);
330 bool AMDGPUAsmPrinter::doFinalization(Module &M) {
331 CallGraphResourceInfo.clear();
333 // Pad with s_code_end to help tools and guard against instruction prefetch
334 // causing stale data in caches. Arguably this should be done by the linker,
335 // which is why this isn't done for Mesa.
336 const MCSubtargetInfo &STI = *getGlobalSTI();
337 if (AMDGPU::isGFX10(STI) &&
338 (STI.getTargetTriple().getOS() == Triple::AMDHSA ||
339 STI.getTargetTriple().getOS() == Triple::AMDPAL)) {
340 OutStreamer->SwitchSection(getObjFileLowering().getTextSection());
341 getTargetStreamer()->EmitCodeEnd();
344 return AsmPrinter::doFinalization(M);
347 // Print comments that apply to both callable functions and entry points.
348 void AMDGPUAsmPrinter::emitCommonFunctionComments(
350 Optional<uint32_t> NumAGPR,
351 uint32_t TotalNumVGPR,
353 uint64_t ScratchSize,
355 const AMDGPUMachineFunction *MFI) {
356 OutStreamer->emitRawComment(" codeLenInByte = " + Twine(CodeSize), false);
357 OutStreamer->emitRawComment(" NumSgprs: " + Twine(NumSGPR), false);
358 OutStreamer->emitRawComment(" NumVgprs: " + Twine(NumVGPR), false);
360 OutStreamer->emitRawComment(" NumAgprs: " + Twine(*NumAGPR), false);
361 OutStreamer->emitRawComment(" TotalNumVgprs: " + Twine(TotalNumVGPR),
364 OutStreamer->emitRawComment(" ScratchSize: " + Twine(ScratchSize), false);
365 OutStreamer->emitRawComment(" MemoryBound: " + Twine(MFI->isMemoryBound()),
369 uint16_t AMDGPUAsmPrinter::getAmdhsaKernelCodeProperties(
370 const MachineFunction &MF) const {
371 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
372 uint16_t KernelCodeProperties = 0;
374 if (MFI.hasPrivateSegmentBuffer()) {
375 KernelCodeProperties |=
376 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER;
378 if (MFI.hasDispatchPtr()) {
379 KernelCodeProperties |=
380 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
382 if (MFI.hasQueuePtr()) {
383 KernelCodeProperties |=
384 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR;
386 if (MFI.hasKernargSegmentPtr()) {
387 KernelCodeProperties |=
388 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR;
390 if (MFI.hasDispatchID()) {
391 KernelCodeProperties |=
392 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID;
394 if (MFI.hasFlatScratchInit()) {
395 KernelCodeProperties |=
396 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT;
398 if (MF.getSubtarget<GCNSubtarget>().isWave32()) {
399 KernelCodeProperties |=
400 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32;
403 return KernelCodeProperties;
406 amdhsa::kernel_descriptor_t AMDGPUAsmPrinter::getAmdhsaKernelDescriptor(
407 const MachineFunction &MF,
408 const SIProgramInfo &PI) const {
409 amdhsa::kernel_descriptor_t KernelDescriptor;
410 memset(&KernelDescriptor, 0x0, sizeof(KernelDescriptor));
412 assert(isUInt<32>(PI.ScratchSize));
413 assert(isUInt<32>(PI.ComputePGMRSrc1));
414 assert(isUInt<32>(PI.ComputePGMRSrc2));
416 KernelDescriptor.group_segment_fixed_size = PI.LDSSize;
417 KernelDescriptor.private_segment_fixed_size = PI.ScratchSize;
418 KernelDescriptor.compute_pgm_rsrc1 = PI.ComputePGMRSrc1;
419 KernelDescriptor.compute_pgm_rsrc2 = PI.ComputePGMRSrc2;
420 KernelDescriptor.kernel_code_properties = getAmdhsaKernelCodeProperties(MF);
422 return KernelDescriptor;
425 bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
426 CurrentProgramInfo = SIProgramInfo();
428 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
430 // The starting address of all shader programs must be 256 bytes aligned.
431 // Regular functions just need the basic required instruction alignment.
432 MF.setAlignment(MFI->isEntryFunction() ? Align(256) : Align(4));
434 SetupMachineFunction(MF);
436 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
437 MCContext &Context = getObjFileLowering().getContext();
438 // FIXME: This should be an explicit check for Mesa.
439 if (!STM.isAmdHsaOS() && !STM.isAmdPalOS()) {
440 MCSectionELF *ConfigSection =
441 Context.getELFSection(".AMDGPU.config", ELF::SHT_PROGBITS, 0);
442 OutStreamer->SwitchSection(ConfigSection);
445 if (MFI->isEntryFunction()) {
446 getSIProgramInfo(CurrentProgramInfo, MF);
448 auto I = CallGraphResourceInfo.insert(
449 std::make_pair(&MF.getFunction(), SIFunctionResourceInfo()));
450 SIFunctionResourceInfo &Info = I.first->second;
451 assert(I.second && "should only be called once per function");
452 Info = analyzeResourceUsage(MF);
455 if (STM.isAmdPalOS())
456 EmitPALMetadata(MF, CurrentProgramInfo);
457 else if (!STM.isAmdHsaOS()) {
458 EmitProgramInfoSI(MF, CurrentProgramInfo);
461 DumpCodeInstEmitter = nullptr;
462 if (STM.dumpCode()) {
463 // For -dumpcode, get the assembler out of the streamer, even if it does
464 // not really want to let us have it. This only works with -filetype=obj.
465 bool SaveFlag = OutStreamer->getUseAssemblerInfoForParsing();
466 OutStreamer->setUseAssemblerInfoForParsing(true);
467 MCAssembler *Assembler = OutStreamer->getAssemblerPtr();
468 OutStreamer->setUseAssemblerInfoForParsing(SaveFlag);
470 DumpCodeInstEmitter = Assembler->getEmitterPtr();
475 DisasmLineMaxLen = 0;
480 MCSectionELF *CommentSection =
481 Context.getELFSection(".AMDGPU.csdata", ELF::SHT_PROGBITS, 0);
482 OutStreamer->SwitchSection(CommentSection);
484 if (!MFI->isEntryFunction()) {
485 OutStreamer->emitRawComment(" Function info:", false);
486 SIFunctionResourceInfo &Info = CallGraphResourceInfo[&MF.getFunction()];
487 emitCommonFunctionComments(
489 STM.hasMAIInsts() ? Info.NumAGPR : Optional<uint32_t>(),
490 Info.getTotalNumVGPRs(STM),
491 Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()),
492 Info.PrivateSegmentSize,
493 getFunctionCodeSize(MF), MFI);
497 OutStreamer->emitRawComment(" Kernel info:", false);
498 emitCommonFunctionComments(CurrentProgramInfo.NumArchVGPR,
500 ? CurrentProgramInfo.NumAccVGPR
501 : Optional<uint32_t>(),
502 CurrentProgramInfo.NumVGPR,
503 CurrentProgramInfo.NumSGPR,
504 CurrentProgramInfo.ScratchSize,
505 getFunctionCodeSize(MF), MFI);
507 OutStreamer->emitRawComment(
508 " FloatMode: " + Twine(CurrentProgramInfo.FloatMode), false);
509 OutStreamer->emitRawComment(
510 " IeeeMode: " + Twine(CurrentProgramInfo.IEEEMode), false);
511 OutStreamer->emitRawComment(
512 " LDSByteSize: " + Twine(CurrentProgramInfo.LDSSize) +
513 " bytes/workgroup (compile time only)", false);
515 OutStreamer->emitRawComment(
516 " SGPRBlocks: " + Twine(CurrentProgramInfo.SGPRBlocks), false);
517 OutStreamer->emitRawComment(
518 " VGPRBlocks: " + Twine(CurrentProgramInfo.VGPRBlocks), false);
520 OutStreamer->emitRawComment(
521 " NumSGPRsForWavesPerEU: " +
522 Twine(CurrentProgramInfo.NumSGPRsForWavesPerEU), false);
523 OutStreamer->emitRawComment(
524 " NumVGPRsForWavesPerEU: " +
525 Twine(CurrentProgramInfo.NumVGPRsForWavesPerEU), false);
527 OutStreamer->emitRawComment(
529 Twine(CurrentProgramInfo.Occupancy), false);
531 OutStreamer->emitRawComment(
532 " WaveLimiterHint : " + Twine(MFI->needsWaveLimiter()), false);
534 OutStreamer->emitRawComment(
535 " COMPUTE_PGM_RSRC2:USER_SGPR: " +
536 Twine(G_00B84C_USER_SGPR(CurrentProgramInfo.ComputePGMRSrc2)), false);
537 OutStreamer->emitRawComment(
538 " COMPUTE_PGM_RSRC2:TRAP_HANDLER: " +
539 Twine(G_00B84C_TRAP_HANDLER(CurrentProgramInfo.ComputePGMRSrc2)), false);
540 OutStreamer->emitRawComment(
541 " COMPUTE_PGM_RSRC2:TGID_X_EN: " +
542 Twine(G_00B84C_TGID_X_EN(CurrentProgramInfo.ComputePGMRSrc2)), false);
543 OutStreamer->emitRawComment(
544 " COMPUTE_PGM_RSRC2:TGID_Y_EN: " +
545 Twine(G_00B84C_TGID_Y_EN(CurrentProgramInfo.ComputePGMRSrc2)), false);
546 OutStreamer->emitRawComment(
547 " COMPUTE_PGM_RSRC2:TGID_Z_EN: " +
548 Twine(G_00B84C_TGID_Z_EN(CurrentProgramInfo.ComputePGMRSrc2)), false);
549 OutStreamer->emitRawComment(
550 " COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: " +
551 Twine(G_00B84C_TIDIG_COMP_CNT(CurrentProgramInfo.ComputePGMRSrc2)),
555 if (DumpCodeInstEmitter) {
557 OutStreamer->SwitchSection(
558 Context.getELFSection(".AMDGPU.disasm", ELF::SHT_PROGBITS, 0));
560 for (size_t i = 0; i < DisasmLines.size(); ++i) {
561 std::string Comment = "\n";
562 if (!HexLines[i].empty()) {
563 Comment = std::string(DisasmLineMaxLen - DisasmLines[i].size(), ' ');
564 Comment += " ; " + HexLines[i] + "\n";
567 OutStreamer->emitBytes(StringRef(DisasmLines[i]));
568 OutStreamer->emitBytes(StringRef(Comment));
575 uint64_t AMDGPUAsmPrinter::getFunctionCodeSize(const MachineFunction &MF) const {
576 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
577 const SIInstrInfo *TII = STM.getInstrInfo();
579 uint64_t CodeSize = 0;
581 for (const MachineBasicBlock &MBB : MF) {
582 for (const MachineInstr &MI : MBB) {
583 // TODO: CodeSize should account for multiple functions.
585 // TODO: Should we count size of debug info?
586 if (MI.isDebugInstr())
589 CodeSize += TII->getInstSizeInBytes(MI);
596 static bool hasAnyNonFlatUseOfReg(const MachineRegisterInfo &MRI,
597 const SIInstrInfo &TII,
599 for (const MachineOperand &UseOp : MRI.reg_operands(Reg)) {
600 if (!UseOp.isImplicit() || !TII.isFLAT(*UseOp.getParent()))
607 int32_t AMDGPUAsmPrinter::SIFunctionResourceInfo::getTotalNumSGPRs(
608 const GCNSubtarget &ST) const {
609 return NumExplicitSGPR + IsaInfo::getNumExtraSGPRs(&ST,
610 UsesVCC, UsesFlatScratch);
613 int32_t AMDGPUAsmPrinter::SIFunctionResourceInfo::getTotalNumVGPRs(
614 const GCNSubtarget &ST) const {
615 return std::max(NumVGPR, NumAGPR);
618 static const Function *getCalleeFunction(const MachineOperand &Op) {
620 assert(Op.getImm() == 0);
624 return cast<Function>(Op.getGlobal());
627 AMDGPUAsmPrinter::SIFunctionResourceInfo AMDGPUAsmPrinter::analyzeResourceUsage(
628 const MachineFunction &MF) const {
629 SIFunctionResourceInfo Info;
631 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
632 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
633 const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
634 const MachineRegisterInfo &MRI = MF.getRegInfo();
635 const SIInstrInfo *TII = ST.getInstrInfo();
636 const SIRegisterInfo &TRI = TII->getRegisterInfo();
638 Info.UsesFlatScratch = MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_LO) ||
639 MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_HI);
641 // Even if FLAT_SCRATCH is implicitly used, it has no effect if flat
642 // instructions aren't used to access the scratch buffer. Inline assembly may
645 // If we only have implicit uses of flat_scr on flat instructions, it is not
647 if (Info.UsesFlatScratch && !MFI->hasFlatScratchInit() &&
648 (!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR) &&
649 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_LO) &&
650 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_HI))) {
651 Info.UsesFlatScratch = false;
654 Info.PrivateSegmentSize = FrameInfo.getStackSize();
656 // Assume a big number if there are any unknown sized objects.
657 Info.HasDynamicallySizedStack = FrameInfo.hasVarSizedObjects();
658 if (Info.HasDynamicallySizedStack)
659 Info.PrivateSegmentSize += AssumedStackSizeForDynamicSizeObjects;
661 if (MFI->isStackRealigned())
662 Info.PrivateSegmentSize += FrameInfo.getMaxAlign().value();
664 Info.UsesVCC = MRI.isPhysRegUsed(AMDGPU::VCC_LO) ||
665 MRI.isPhysRegUsed(AMDGPU::VCC_HI);
667 // If there are no calls, MachineRegisterInfo can tell us the used register
669 // A tail call isn't considered a call for MachineFrameInfo's purposes.
670 if (!FrameInfo.hasCalls() && !FrameInfo.hasTailCall()) {
671 MCPhysReg HighestVGPRReg = AMDGPU::NoRegister;
672 for (MCPhysReg Reg : reverse(AMDGPU::VGPR_32RegClass.getRegisters())) {
673 if (MRI.isPhysRegUsed(Reg)) {
674 HighestVGPRReg = Reg;
679 if (ST.hasMAIInsts()) {
680 MCPhysReg HighestAGPRReg = AMDGPU::NoRegister;
681 for (MCPhysReg Reg : reverse(AMDGPU::AGPR_32RegClass.getRegisters())) {
682 if (MRI.isPhysRegUsed(Reg)) {
683 HighestAGPRReg = Reg;
687 Info.NumAGPR = HighestAGPRReg == AMDGPU::NoRegister ? 0 :
688 TRI.getHWRegIndex(HighestAGPRReg) + 1;
691 MCPhysReg HighestSGPRReg = AMDGPU::NoRegister;
692 for (MCPhysReg Reg : reverse(AMDGPU::SGPR_32RegClass.getRegisters())) {
693 if (MRI.isPhysRegUsed(Reg)) {
694 HighestSGPRReg = Reg;
699 // We found the maximum register index. They start at 0, so add one to get the
700 // number of registers.
701 Info.NumVGPR = HighestVGPRReg == AMDGPU::NoRegister ? 0 :
702 TRI.getHWRegIndex(HighestVGPRReg) + 1;
703 Info.NumExplicitSGPR = HighestSGPRReg == AMDGPU::NoRegister ? 0 :
704 TRI.getHWRegIndex(HighestSGPRReg) + 1;
709 int32_t MaxVGPR = -1;
710 int32_t MaxAGPR = -1;
711 int32_t MaxSGPR = -1;
712 uint64_t CalleeFrameSize = 0;
714 for (const MachineBasicBlock &MBB : MF) {
715 for (const MachineInstr &MI : MBB) {
716 // TODO: Check regmasks? Do they occur anywhere except calls?
717 for (const MachineOperand &MO : MI.operands()) {
725 Register Reg = MO.getReg();
728 case AMDGPU::EXEC_LO:
729 case AMDGPU::EXEC_HI:
732 case AMDGPU::SRC_SHARED_BASE:
733 case AMDGPU::SRC_SHARED_LIMIT:
734 case AMDGPU::SRC_PRIVATE_BASE:
735 case AMDGPU::SRC_PRIVATE_LIMIT:
736 case AMDGPU::SGPR_NULL:
740 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
741 llvm_unreachable("src_pops_exiting_wave_id should not be used");
743 case AMDGPU::NoRegister:
744 assert(MI.isDebugInstr());
750 case AMDGPU::VCC_LO_LO16:
751 case AMDGPU::VCC_LO_HI16:
752 case AMDGPU::VCC_HI_LO16:
753 case AMDGPU::VCC_HI_HI16:
757 case AMDGPU::FLAT_SCR:
758 case AMDGPU::FLAT_SCR_LO:
759 case AMDGPU::FLAT_SCR_HI:
762 case AMDGPU::XNACK_MASK:
763 case AMDGPU::XNACK_MASK_LO:
764 case AMDGPU::XNACK_MASK_HI:
765 llvm_unreachable("xnack_mask registers should not be used");
767 case AMDGPU::LDS_DIRECT:
768 llvm_unreachable("lds_direct register should not be used");
776 llvm_unreachable("trap handler registers should not be used");
778 case AMDGPU::SRC_VCCZ:
779 llvm_unreachable("src_vccz register should not be used");
781 case AMDGPU::SRC_EXECZ:
782 llvm_unreachable("src_execz register should not be used");
784 case AMDGPU::SRC_SCC:
785 llvm_unreachable("src_scc register should not be used");
791 if (AMDGPU::SReg_32RegClass.contains(Reg) ||
792 AMDGPU::SReg_LO16RegClass.contains(Reg) ||
793 AMDGPU::SGPR_HI16RegClass.contains(Reg)) {
794 assert(!AMDGPU::TTMP_32RegClass.contains(Reg) &&
795 "trap handler registers should not be used");
798 } else if (AMDGPU::VGPR_32RegClass.contains(Reg) ||
799 AMDGPU::VGPR_LO16RegClass.contains(Reg) ||
800 AMDGPU::VGPR_HI16RegClass.contains(Reg)) {
803 } else if (AMDGPU::AGPR_32RegClass.contains(Reg) ||
804 AMDGPU::AGPR_LO16RegClass.contains(Reg)) {
808 } else if (AMDGPU::SReg_64RegClass.contains(Reg)) {
809 assert(!AMDGPU::TTMP_64RegClass.contains(Reg) &&
810 "trap handler registers should not be used");
813 } else if (AMDGPU::VReg_64RegClass.contains(Reg)) {
816 } else if (AMDGPU::AReg_64RegClass.contains(Reg)) {
820 } else if (AMDGPU::VReg_96RegClass.contains(Reg)) {
823 } else if (AMDGPU::SReg_96RegClass.contains(Reg)) {
826 } else if (AMDGPU::AReg_96RegClass.contains(Reg)) {
830 } else if (AMDGPU::SReg_128RegClass.contains(Reg)) {
831 assert(!AMDGPU::TTMP_128RegClass.contains(Reg) &&
832 "trap handler registers should not be used");
835 } else if (AMDGPU::VReg_128RegClass.contains(Reg)) {
838 } else if (AMDGPU::AReg_128RegClass.contains(Reg)) {
842 } else if (AMDGPU::VReg_160RegClass.contains(Reg)) {
845 } else if (AMDGPU::SReg_160RegClass.contains(Reg)) {
848 } else if (AMDGPU::AReg_160RegClass.contains(Reg)) {
852 } else if (AMDGPU::VReg_192RegClass.contains(Reg)) {
855 } else if (AMDGPU::SReg_192RegClass.contains(Reg)) {
858 } else if (AMDGPU::AReg_192RegClass.contains(Reg)) {
862 } else if (AMDGPU::SReg_256RegClass.contains(Reg)) {
863 assert(!AMDGPU::TTMP_256RegClass.contains(Reg) &&
864 "trap handler registers should not be used");
867 } else if (AMDGPU::VReg_256RegClass.contains(Reg)) {
870 } else if (AMDGPU::AReg_256RegClass.contains(Reg)) {
874 } else if (AMDGPU::SReg_512RegClass.contains(Reg)) {
875 assert(!AMDGPU::TTMP_512RegClass.contains(Reg) &&
876 "trap handler registers should not be used");
879 } else if (AMDGPU::VReg_512RegClass.contains(Reg)) {
882 } else if (AMDGPU::AReg_512RegClass.contains(Reg)) {
886 } else if (AMDGPU::SReg_1024RegClass.contains(Reg)) {
889 } else if (AMDGPU::VReg_1024RegClass.contains(Reg)) {
892 } else if (AMDGPU::AReg_1024RegClass.contains(Reg)) {
897 llvm_unreachable("Unknown register class");
899 unsigned HWReg = TRI.getHWRegIndex(Reg);
900 int MaxUsed = HWReg + Width - 1;
902 MaxSGPR = MaxUsed > MaxSGPR ? MaxUsed : MaxSGPR;
904 MaxAGPR = MaxUsed > MaxAGPR ? MaxUsed : MaxAGPR;
906 MaxVGPR = MaxUsed > MaxVGPR ? MaxUsed : MaxVGPR;
911 // Pseudo used just to encode the underlying global. Is there a better
912 // way to track this?
914 const MachineOperand *CalleeOp
915 = TII->getNamedOperand(MI, AMDGPU::OpName::callee);
917 const Function *Callee = getCalleeFunction(*CalleeOp);
918 if (!Callee || Callee->isDeclaration()) {
919 // If this is a call to an external function, we can't do much. Make
920 // conservative guesses.
922 // 48 SGPRs - vcc, - flat_scr, -xnack
924 47 - IsaInfo::getNumExtraSGPRs(&ST, true, ST.hasFlatAddressSpace());
925 MaxSGPR = std::max(MaxSGPR, MaxSGPRGuess);
926 MaxVGPR = std::max(MaxVGPR, 23);
927 MaxAGPR = std::max(MaxAGPR, 23);
929 CalleeFrameSize = std::max(CalleeFrameSize,
930 static_cast<uint64_t>(AssumedStackSizeForExternalCall));
933 Info.UsesFlatScratch = ST.hasFlatAddressSpace();
934 Info.HasDynamicallySizedStack = true;
936 // We force CodeGen to run in SCC order, so the callee's register
937 // usage etc. should be the cumulative usage of all callees.
939 auto I = CallGraphResourceInfo.find(Callee);
940 if (I == CallGraphResourceInfo.end()) {
941 // Avoid crashing on undefined behavior with an illegal call to a
942 // kernel. If a callsite's calling convention doesn't match the
943 // function's, it's undefined behavior. If the callsite calling
944 // convention does match, that would have errored earlier.
945 // FIXME: The verifier shouldn't allow this.
946 if (AMDGPU::isEntryFunctionCC(Callee->getCallingConv()))
947 report_fatal_error("invalid call to entry function");
949 llvm_unreachable("callee should have been handled before caller");
952 MaxSGPR = std::max(I->second.NumExplicitSGPR - 1, MaxSGPR);
953 MaxVGPR = std::max(I->second.NumVGPR - 1, MaxVGPR);
954 MaxAGPR = std::max(I->second.NumAGPR - 1, MaxAGPR);
956 = std::max(I->second.PrivateSegmentSize, CalleeFrameSize);
957 Info.UsesVCC |= I->second.UsesVCC;
958 Info.UsesFlatScratch |= I->second.UsesFlatScratch;
959 Info.HasDynamicallySizedStack |= I->second.HasDynamicallySizedStack;
960 Info.HasRecursion |= I->second.HasRecursion;
963 // FIXME: Call site could have norecurse on it
964 if (!Callee || !Callee->doesNotRecurse())
965 Info.HasRecursion = true;
970 Info.NumExplicitSGPR = MaxSGPR + 1;
971 Info.NumVGPR = MaxVGPR + 1;
972 Info.NumAGPR = MaxAGPR + 1;
973 Info.PrivateSegmentSize += CalleeFrameSize;
978 void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
979 const MachineFunction &MF) {
980 SIFunctionResourceInfo Info = analyzeResourceUsage(MF);
981 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
983 ProgInfo.NumArchVGPR = Info.NumVGPR;
984 ProgInfo.NumAccVGPR = Info.NumAGPR;
985 ProgInfo.NumVGPR = Info.getTotalNumVGPRs(STM);
986 ProgInfo.NumSGPR = Info.NumExplicitSGPR;
987 ProgInfo.ScratchSize = Info.PrivateSegmentSize;
988 ProgInfo.VCCUsed = Info.UsesVCC;
989 ProgInfo.FlatUsed = Info.UsesFlatScratch;
990 ProgInfo.DynamicCallStack = Info.HasDynamicallySizedStack || Info.HasRecursion;
992 if (!isUInt<32>(ProgInfo.ScratchSize)) {
993 DiagnosticInfoStackSize DiagStackSize(MF.getFunction(),
994 ProgInfo.ScratchSize, DS_Error);
995 MF.getFunction().getContext().diagnose(DiagStackSize);
998 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1000 // TODO(scott.linder): The calculations related to SGPR/VGPR blocks are
1001 // duplicated in part in AMDGPUAsmParser::calculateGPRBlocks, and could be
1003 unsigned ExtraSGPRs = IsaInfo::getNumExtraSGPRs(
1004 &STM, ProgInfo.VCCUsed, ProgInfo.FlatUsed);
1006 // Check the addressable register limit before we add ExtraSGPRs.
1007 if (STM.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS &&
1008 !STM.hasSGPRInitBug()) {
1009 unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs();
1010 if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) {
1011 // This can happen due to a compiler bug or when using inline asm.
1012 LLVMContext &Ctx = MF.getFunction().getContext();
1013 DiagnosticInfoResourceLimit Diag(MF.getFunction(),
1014 "addressable scalar registers",
1015 ProgInfo.NumSGPR, DS_Error,
1017 MaxAddressableNumSGPRs);
1019 ProgInfo.NumSGPR = MaxAddressableNumSGPRs - 1;
1023 // Account for extra SGPRs and VGPRs reserved for debugger use.
1024 ProgInfo.NumSGPR += ExtraSGPRs;
1026 // Ensure there are enough SGPRs and VGPRs for wave dispatch, where wave
1027 // dispatch registers are function args.
1028 unsigned WaveDispatchNumSGPR = 0, WaveDispatchNumVGPR = 0;
1029 for (auto &Arg : MF.getFunction().args()) {
1030 unsigned NumRegs = (Arg.getType()->getPrimitiveSizeInBits() + 31) / 32;
1031 if (Arg.hasAttribute(Attribute::InReg))
1032 WaveDispatchNumSGPR += NumRegs;
1034 WaveDispatchNumVGPR += NumRegs;
1036 ProgInfo.NumSGPR = std::max(ProgInfo.NumSGPR, WaveDispatchNumSGPR);
1037 ProgInfo.NumVGPR = std::max(ProgInfo.NumVGPR, WaveDispatchNumVGPR);
1039 // Adjust number of registers used to meet default/requested minimum/maximum
1040 // number of waves per execution unit request.
1041 ProgInfo.NumSGPRsForWavesPerEU = std::max(
1042 std::max(ProgInfo.NumSGPR, 1u), STM.getMinNumSGPRs(MFI->getMaxWavesPerEU()));
1043 ProgInfo.NumVGPRsForWavesPerEU = std::max(
1044 std::max(ProgInfo.NumVGPR, 1u), STM.getMinNumVGPRs(MFI->getMaxWavesPerEU()));
1046 if (STM.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS ||
1047 STM.hasSGPRInitBug()) {
1048 unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs();
1049 if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) {
1050 // This can happen due to a compiler bug or when using inline asm to use
1051 // the registers which are usually reserved for vcc etc.
1052 LLVMContext &Ctx = MF.getFunction().getContext();
1053 DiagnosticInfoResourceLimit Diag(MF.getFunction(),
1055 ProgInfo.NumSGPR, DS_Error,
1057 MaxAddressableNumSGPRs);
1059 ProgInfo.NumSGPR = MaxAddressableNumSGPRs;
1060 ProgInfo.NumSGPRsForWavesPerEU = MaxAddressableNumSGPRs;
1064 if (STM.hasSGPRInitBug()) {
1066 AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
1067 ProgInfo.NumSGPRsForWavesPerEU =
1068 AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
1071 if (MFI->getNumUserSGPRs() > STM.getMaxNumUserSGPRs()) {
1072 LLVMContext &Ctx = MF.getFunction().getContext();
1073 DiagnosticInfoResourceLimit Diag(MF.getFunction(), "user SGPRs",
1074 MFI->getNumUserSGPRs(), DS_Error);
1078 if (MFI->getLDSSize() > static_cast<unsigned>(STM.getLocalMemorySize())) {
1079 LLVMContext &Ctx = MF.getFunction().getContext();
1080 DiagnosticInfoResourceLimit Diag(MF.getFunction(), "local memory",
1081 MFI->getLDSSize(), DS_Error);
1085 ProgInfo.SGPRBlocks = IsaInfo::getNumSGPRBlocks(
1086 &STM, ProgInfo.NumSGPRsForWavesPerEU);
1087 ProgInfo.VGPRBlocks = IsaInfo::getNumVGPRBlocks(
1088 &STM, ProgInfo.NumVGPRsForWavesPerEU);
1090 const SIModeRegisterDefaults Mode = MFI->getMode();
1092 // Set the value to initialize FP_ROUND and FP_DENORM parts of the mode
1094 ProgInfo.FloatMode = getFPMode(Mode);
1096 ProgInfo.IEEEMode = Mode.IEEE;
1098 // Make clamp modifier on NaN input returns 0.
1099 ProgInfo.DX10Clamp = Mode.DX10Clamp;
1101 unsigned LDSAlignShift;
1102 if (STM.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
1103 // LDS is allocated in 64 dword blocks.
1106 // LDS is allocated in 128 dword blocks.
1110 unsigned LDSSpillSize =
1111 MFI->getLDSWaveSpillSize() * MFI->getMaxFlatWorkGroupSize();
1113 ProgInfo.LDSSize = MFI->getLDSSize() + LDSSpillSize;
1114 ProgInfo.LDSBlocks =
1115 alignTo(ProgInfo.LDSSize, 1ULL << LDSAlignShift) >> LDSAlignShift;
1117 // Scratch is allocated in 256 dword blocks.
1118 unsigned ScratchAlignShift = 10;
1119 // We need to program the hardware with the amount of scratch memory that
1120 // is used by the entire wave. ProgInfo.ScratchSize is the amount of
1121 // scratch memory used per thread.
1122 ProgInfo.ScratchBlocks =
1123 alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(),
1124 1ULL << ScratchAlignShift) >>
1127 if (getIsaVersion(getGlobalSTI()->getCPU()).Major >= 10) {
1128 ProgInfo.WgpMode = STM.isCuModeEnabled() ? 0 : 1;
1129 ProgInfo.MemOrdered = 1;
1132 ProgInfo.ComputePGMRSrc1 =
1133 S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
1134 S_00B848_SGPRS(ProgInfo.SGPRBlocks) |
1135 S_00B848_PRIORITY(ProgInfo.Priority) |
1136 S_00B848_FLOAT_MODE(ProgInfo.FloatMode) |
1137 S_00B848_PRIV(ProgInfo.Priv) |
1138 S_00B848_DX10_CLAMP(ProgInfo.DX10Clamp) |
1139 S_00B848_DEBUG_MODE(ProgInfo.DebugMode) |
1140 S_00B848_IEEE_MODE(ProgInfo.IEEEMode) |
1141 S_00B848_WGP_MODE(ProgInfo.WgpMode) |
1142 S_00B848_MEM_ORDERED(ProgInfo.MemOrdered);
1144 // 0 = X, 1 = XY, 2 = XYZ
1145 unsigned TIDIGCompCnt = 0;
1146 if (MFI->hasWorkItemIDZ())
1148 else if (MFI->hasWorkItemIDY())
1151 ProgInfo.ComputePGMRSrc2 =
1152 S_00B84C_SCRATCH_EN(ProgInfo.ScratchBlocks > 0) |
1153 S_00B84C_USER_SGPR(MFI->getNumUserSGPRs()) |
1154 // For AMDHSA, TRAP_HANDLER must be zero, as it is populated by the CP.
1155 S_00B84C_TRAP_HANDLER(STM.isAmdHsaOS() ? 0 : STM.isTrapHandlerEnabled()) |
1156 S_00B84C_TGID_X_EN(MFI->hasWorkGroupIDX()) |
1157 S_00B84C_TGID_Y_EN(MFI->hasWorkGroupIDY()) |
1158 S_00B84C_TGID_Z_EN(MFI->hasWorkGroupIDZ()) |
1159 S_00B84C_TG_SIZE_EN(MFI->hasWorkGroupInfo()) |
1160 S_00B84C_TIDIG_COMP_CNT(TIDIGCompCnt) |
1161 S_00B84C_EXCP_EN_MSB(0) |
1162 // For AMDHSA, LDS_SIZE must be zero, as it is populated by the CP.
1163 S_00B84C_LDS_SIZE(STM.isAmdHsaOS() ? 0 : ProgInfo.LDSBlocks) |
1164 S_00B84C_EXCP_EN(0);
1166 ProgInfo.Occupancy = STM.computeOccupancy(MF.getFunction(), ProgInfo.LDSSize,
1167 ProgInfo.NumSGPRsForWavesPerEU,
1168 ProgInfo.NumVGPRsForWavesPerEU);
1171 static unsigned getRsrcReg(CallingConv::ID CallConv) {
1173 default: LLVM_FALLTHROUGH;
1174 case CallingConv::AMDGPU_CS: return R_00B848_COMPUTE_PGM_RSRC1;
1175 case CallingConv::AMDGPU_LS: return R_00B528_SPI_SHADER_PGM_RSRC1_LS;
1176 case CallingConv::AMDGPU_HS: return R_00B428_SPI_SHADER_PGM_RSRC1_HS;
1177 case CallingConv::AMDGPU_ES: return R_00B328_SPI_SHADER_PGM_RSRC1_ES;
1178 case CallingConv::AMDGPU_GS: return R_00B228_SPI_SHADER_PGM_RSRC1_GS;
1179 case CallingConv::AMDGPU_VS: return R_00B128_SPI_SHADER_PGM_RSRC1_VS;
1180 case CallingConv::AMDGPU_PS: return R_00B028_SPI_SHADER_PGM_RSRC1_PS;
1184 void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
1185 const SIProgramInfo &CurrentProgramInfo) {
1186 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1187 unsigned RsrcReg = getRsrcReg(MF.getFunction().getCallingConv());
1189 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
1190 OutStreamer->emitInt32(R_00B848_COMPUTE_PGM_RSRC1);
1192 OutStreamer->emitInt32(CurrentProgramInfo.ComputePGMRSrc1);
1194 OutStreamer->emitInt32(R_00B84C_COMPUTE_PGM_RSRC2);
1195 OutStreamer->emitInt32(CurrentProgramInfo.ComputePGMRSrc2);
1197 OutStreamer->emitInt32(R_00B860_COMPUTE_TMPRING_SIZE);
1198 OutStreamer->emitInt32(S_00B860_WAVESIZE(CurrentProgramInfo.ScratchBlocks));
1200 // TODO: Should probably note flat usage somewhere. SC emits a "FlatPtr32 =
1201 // 0" comment but I don't see a corresponding field in the register spec.
1203 OutStreamer->emitInt32(RsrcReg);
1204 OutStreamer->emitIntValue(S_00B028_VGPRS(CurrentProgramInfo.VGPRBlocks) |
1205 S_00B028_SGPRS(CurrentProgramInfo.SGPRBlocks), 4);
1206 OutStreamer->emitInt32(R_0286E8_SPI_TMPRING_SIZE);
1207 OutStreamer->emitIntValue(
1208 S_0286E8_WAVESIZE(CurrentProgramInfo.ScratchBlocks), 4);
1211 if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) {
1212 OutStreamer->emitInt32(R_00B02C_SPI_SHADER_PGM_RSRC2_PS);
1213 OutStreamer->emitInt32(
1214 S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks));
1215 OutStreamer->emitInt32(R_0286CC_SPI_PS_INPUT_ENA);
1216 OutStreamer->emitInt32(MFI->getPSInputEnable());
1217 OutStreamer->emitInt32(R_0286D0_SPI_PS_INPUT_ADDR);
1218 OutStreamer->emitInt32(MFI->getPSInputAddr());
1221 OutStreamer->emitInt32(R_SPILLED_SGPRS);
1222 OutStreamer->emitInt32(MFI->getNumSpilledSGPRs());
1223 OutStreamer->emitInt32(R_SPILLED_VGPRS);
1224 OutStreamer->emitInt32(MFI->getNumSpilledVGPRs());
1227 // This is the equivalent of EmitProgramInfoSI above, but for when the OS type
1228 // is AMDPAL. It stores each compute/SPI register setting and other PAL
1229 // metadata items into the PALMD::Metadata, combining with any provided by the
1230 // frontend as LLVM metadata. Once all functions are written, the PAL metadata
1231 // is then written as a single block in the .note section.
1232 void AMDGPUAsmPrinter::EmitPALMetadata(const MachineFunction &MF,
1233 const SIProgramInfo &CurrentProgramInfo) {
1234 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1235 auto CC = MF.getFunction().getCallingConv();
1236 auto MD = getTargetStreamer()->getPALMetadata();
1238 MD->setEntryPoint(CC, MF.getFunction().getName());
1239 MD->setNumUsedVgprs(CC, CurrentProgramInfo.NumVGPRsForWavesPerEU);
1240 MD->setNumUsedSgprs(CC, CurrentProgramInfo.NumSGPRsForWavesPerEU);
1241 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
1242 MD->setRsrc1(CC, CurrentProgramInfo.ComputePGMRSrc1);
1243 MD->setRsrc2(CC, CurrentProgramInfo.ComputePGMRSrc2);
1245 MD->setRsrc1(CC, S_00B028_VGPRS(CurrentProgramInfo.VGPRBlocks) |
1246 S_00B028_SGPRS(CurrentProgramInfo.SGPRBlocks));
1247 if (CurrentProgramInfo.ScratchBlocks > 0)
1248 MD->setRsrc2(CC, S_00B84C_SCRATCH_EN(1));
1250 // ScratchSize is in bytes, 16 aligned.
1251 MD->setScratchSize(CC, alignTo(CurrentProgramInfo.ScratchSize, 16));
1252 if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) {
1253 MD->setRsrc2(CC, S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks));
1254 MD->setSpiPsInputEna(MFI->getPSInputEnable());
1255 MD->setSpiPsInputAddr(MFI->getPSInputAddr());
1258 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
1260 MD->setWave32(MF.getFunction().getCallingConv());
1263 // This is supposed to be log2(Size)
1264 static amd_element_byte_size_t getElementByteSizeValue(unsigned Size) {
1267 return AMD_ELEMENT_4_BYTES;
1269 return AMD_ELEMENT_8_BYTES;
1271 return AMD_ELEMENT_16_BYTES;
1273 llvm_unreachable("invalid private_element_size");
1277 void AMDGPUAsmPrinter::getAmdKernelCode(amd_kernel_code_t &Out,
1278 const SIProgramInfo &CurrentProgramInfo,
1279 const MachineFunction &MF) const {
1280 const Function &F = MF.getFunction();
1281 assert(F.getCallingConv() == CallingConv::AMDGPU_KERNEL ||
1282 F.getCallingConv() == CallingConv::SPIR_KERNEL);
1284 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1285 const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
1287 AMDGPU::initDefaultAMDKernelCodeT(Out, &STM);
1289 Out.compute_pgm_resource_registers =
1290 CurrentProgramInfo.ComputePGMRSrc1 |
1291 (CurrentProgramInfo.ComputePGMRSrc2 << 32);
1292 Out.code_properties |= AMD_CODE_PROPERTY_IS_PTR64;
1294 if (CurrentProgramInfo.DynamicCallStack)
1295 Out.code_properties |= AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK;
1297 AMD_HSA_BITS_SET(Out.code_properties,
1298 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE,
1299 getElementByteSizeValue(STM.getMaxPrivateElementSize()));
1301 if (MFI->hasPrivateSegmentBuffer()) {
1302 Out.code_properties |=
1303 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER;
1306 if (MFI->hasDispatchPtr())
1307 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
1309 if (MFI->hasQueuePtr())
1310 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR;
1312 if (MFI->hasKernargSegmentPtr())
1313 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR;
1315 if (MFI->hasDispatchID())
1316 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID;
1318 if (MFI->hasFlatScratchInit())
1319 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT;
1321 if (MFI->hasDispatchPtr())
1322 Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
1324 if (STM.isXNACKEnabled())
1325 Out.code_properties |= AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED;
1327 Align MaxKernArgAlign;
1328 Out.kernarg_segment_byte_size = STM.getKernArgSegmentSize(F, MaxKernArgAlign);
1329 Out.wavefront_sgpr_count = CurrentProgramInfo.NumSGPR;
1330 Out.workitem_vgpr_count = CurrentProgramInfo.NumVGPR;
1331 Out.workitem_private_segment_byte_size = CurrentProgramInfo.ScratchSize;
1332 Out.workgroup_group_segment_byte_size = CurrentProgramInfo.LDSSize;
1334 // kernarg_segment_alignment is specified as log of the alignment.
1335 // The minimum alignment is 16.
1336 Out.kernarg_segment_alignment = Log2(std::max(Align(16), MaxKernArgAlign));
1339 bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
1340 const char *ExtraCode, raw_ostream &O) {
1341 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1342 if (!AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, O))
1345 if (ExtraCode && ExtraCode[0]) {
1346 if (ExtraCode[1] != 0)
1347 return true; // Unknown modifier.
1349 switch (ExtraCode[0]) {
1357 // TODO: Should be able to support other operand types like globals.
1358 const MachineOperand &MO = MI->getOperand(OpNo);
1360 AMDGPUInstPrinter::printRegOperand(MO.getReg(), O,
1361 *MF->getSubtarget().getRegisterInfo());
1363 } else if (MO.isImm()) {
1364 int64_t Val = MO.getImm();
1365 if (AMDGPU::isInlinableIntLiteral(Val)) {
1367 } else if (isUInt<16>(Val)) {
1368 O << format("0x%" PRIx16, static_cast<uint16_t>(Val));
1369 } else if (isUInt<32>(Val)) {
1370 O << format("0x%" PRIx32, static_cast<uint32_t>(Val));
1372 O << format("0x%" PRIx64, static_cast<uint64_t>(Val));