1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "AMDGPUBaseInfo.h"
11 #include "AMDGPUAsmUtils.h"
12 #include "AMDKernelCodeT.h"
13 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
14 #include "llvm/BinaryFormat/ELF.h"
15 #include "llvm/CodeGen/TargetRegisterInfo.h"
16 #include "llvm/IR/Attributes.h"
17 #include "llvm/IR/Constants.h"
18 #include "llvm/IR/Function.h"
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/IntrinsicsAMDGPU.h"
21 #include "llvm/IR/IntrinsicsR600.h"
22 #include "llvm/IR/LLVMContext.h"
23 #include "llvm/MC/MCInstrInfo.h"
24 #include "llvm/MC/MCRegisterInfo.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/Support/AMDHSAKernelDescriptor.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/TargetParser/TargetParser.h"
31 #define GET_INSTRINFO_NAMED_OPS
32 #define GET_INSTRMAP_INFO
33 #include "AMDGPUGenInstrInfo.inc"
35 static llvm::cl::opt<unsigned>
36 AmdhsaCodeObjectVersion("amdhsa-code-object-version", llvm::cl::Hidden,
37 llvm::cl::desc("AMDHSA Code Object Version"),
42 /// \returns Bit mask for given bit \p Shift and bit \p Width.
43 unsigned getBitMask(unsigned Shift, unsigned Width) {
44 return ((1 << Width) - 1) << Shift;
47 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
49 /// \returns Packed \p Dst.
50 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
51 unsigned Mask = getBitMask(Shift, Width);
52 return ((Src << Shift) & Mask) | (Dst & ~Mask);
55 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
57 /// \returns Unpacked bits.
58 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
59 return (Src & getBitMask(Shift, Width)) >> Shift;
62 /// \returns Vmcnt bit shift (lower bits).
63 unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
64 return VersionMajor >= 11 ? 10 : 0;
67 /// \returns Vmcnt bit width (lower bits).
68 unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
69 return VersionMajor >= 11 ? 6 : 4;
72 /// \returns Expcnt bit shift.
73 unsigned getExpcntBitShift(unsigned VersionMajor) {
74 return VersionMajor >= 11 ? 0 : 4;
77 /// \returns Expcnt bit width.
78 unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
80 /// \returns Lgkmcnt bit shift.
81 unsigned getLgkmcntBitShift(unsigned VersionMajor) {
82 return VersionMajor >= 11 ? 4 : 8;
85 /// \returns Lgkmcnt bit width.
86 unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
87 return VersionMajor >= 10 ? 6 : 4;
90 /// \returns Vmcnt bit shift (higher bits).
91 unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
93 /// \returns Vmcnt bit width (higher bits).
94 unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
95 return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
98 /// \returns VmVsrc bit width
99 inline unsigned getVmVsrcBitWidth() { return 3; }
101 /// \returns VmVsrc bit shift
102 inline unsigned getVmVsrcBitShift() { return 2; }
104 /// \returns VaVdst bit width
105 inline unsigned getVaVdstBitWidth() { return 4; }
107 /// \returns VaVdst bit shift
108 inline unsigned getVaVdstBitShift() { return 12; }
110 /// \returns SaSdst bit width
111 inline unsigned getSaSdstBitWidth() { return 1; }
113 /// \returns SaSdst bit shift
114 inline unsigned getSaSdstBitShift() { return 0; }
116 } // end namespace anonymous
122 std::optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
123 if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA)
126 switch (AmdhsaCodeObjectVersion) {
128 return ELF::ELFABIVERSION_AMDGPU_HSA_V2;
130 return ELF::ELFABIVERSION_AMDGPU_HSA_V3;
132 return ELF::ELFABIVERSION_AMDGPU_HSA_V4;
134 return ELF::ELFABIVERSION_AMDGPU_HSA_V5;
136 report_fatal_error(Twine("Unsupported AMDHSA Code Object Version ") +
137 Twine(AmdhsaCodeObjectVersion));
141 bool isHsaAbiVersion2(const MCSubtargetInfo *STI) {
142 if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
143 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2;
147 bool isHsaAbiVersion3(const MCSubtargetInfo *STI) {
148 if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
149 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3;
153 bool isHsaAbiVersion4(const MCSubtargetInfo *STI) {
154 if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
155 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4;
159 bool isHsaAbiVersion5(const MCSubtargetInfo *STI) {
160 if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
161 return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5;
165 bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI) {
166 return isHsaAbiVersion3(STI) || isHsaAbiVersion4(STI) ||
167 isHsaAbiVersion5(STI);
170 unsigned getAmdhsaCodeObjectVersion() {
171 return AmdhsaCodeObjectVersion;
174 unsigned getCodeObjectVersion(const Module &M) {
175 if (auto Ver = mdconst::extract_or_null<ConstantInt>(
176 M.getModuleFlag("amdgpu_code_object_version"))) {
177 return (unsigned)Ver->getZExtValue() / 100;
180 // Default code object version.
184 unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion) {
185 switch (CodeObjectVersion) {
192 return AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET;
197 // FIXME: All such magic numbers about the ABI should be in a
199 unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion) {
200 switch (CodeObjectVersion) {
207 return AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET;
211 unsigned getDefaultQueueImplicitArgPosition(unsigned CodeObjectVersion) {
212 switch (CodeObjectVersion) {
219 return AMDGPU::ImplicitArg::DEFAULT_QUEUE_OFFSET;
223 unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion) {
224 switch (CodeObjectVersion) {
231 return AMDGPU::ImplicitArg::COMPLETION_ACTION_OFFSET;
235 #define GET_MIMGBaseOpcodesTable_IMPL
236 #define GET_MIMGDimInfoTable_IMPL
237 #define GET_MIMGInfoTable_IMPL
238 #define GET_MIMGLZMappingTable_IMPL
239 #define GET_MIMGMIPMappingTable_IMPL
240 #define GET_MIMGBiasMappingTable_IMPL
241 #define GET_MIMGOffsetMappingTable_IMPL
242 #define GET_MIMGG16MappingTable_IMPL
243 #define GET_MAIInstInfoTable_IMPL
244 #include "AMDGPUGenSearchableTables.inc"
246 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
247 unsigned VDataDwords, unsigned VAddrDwords) {
248 const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
249 VDataDwords, VAddrDwords);
250 return Info ? Info->Opcode : -1;
253 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) {
254 const MIMGInfo *Info = getMIMGInfo(Opc);
255 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
258 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
259 const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
260 const MIMGInfo *NewInfo =
261 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
262 NewChannels, OrigInfo->VAddrDwords);
263 return NewInfo ? NewInfo->Opcode : -1;
266 unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
267 const MIMGDimInfo *Dim, bool IsA16,
268 bool IsG16Supported) {
269 unsigned AddrWords = BaseOpcode->NumExtraArgs;
270 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
271 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
273 AddrWords += divideCeil(AddrComponents, 2);
275 AddrWords += AddrComponents;
277 // Note: For subtargets that support A16 but not G16, enabling A16 also
278 // enables 16 bit gradients.
279 // For subtargets that support A16 (operand) and G16 (done with a different
280 // instruction encoding), they are independent.
282 if (BaseOpcode->Gradients) {
283 if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
284 // There are two gradients per coordinate, we pack them separately.
286 // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
287 AddrWords += alignTo<2>(Dim->NumGradients / 2);
289 AddrWords += Dim->NumGradients;
323 struct VOPC64DPPInfo {
327 struct VOPDComponentInfo {
339 struct VOPTrue16Info {
344 #define GET_MTBUFInfoTable_DECL
345 #define GET_MTBUFInfoTable_IMPL
346 #define GET_MUBUFInfoTable_DECL
347 #define GET_MUBUFInfoTable_IMPL
348 #define GET_SMInfoTable_DECL
349 #define GET_SMInfoTable_IMPL
350 #define GET_VOP1InfoTable_DECL
351 #define GET_VOP1InfoTable_IMPL
352 #define GET_VOP2InfoTable_DECL
353 #define GET_VOP2InfoTable_IMPL
354 #define GET_VOP3InfoTable_DECL
355 #define GET_VOP3InfoTable_IMPL
356 #define GET_VOPC64DPPTable_DECL
357 #define GET_VOPC64DPPTable_IMPL
358 #define GET_VOPC64DPP8Table_DECL
359 #define GET_VOPC64DPP8Table_IMPL
360 #define GET_VOPDComponentTable_DECL
361 #define GET_VOPDComponentTable_IMPL
362 #define GET_VOPDPairs_DECL
363 #define GET_VOPDPairs_IMPL
364 #define GET_VOPTrue16Table_DECL
365 #define GET_VOPTrue16Table_IMPL
366 #define GET_WMMAOpcode2AddrMappingTable_DECL
367 #define GET_WMMAOpcode2AddrMappingTable_IMPL
368 #define GET_WMMAOpcode3AddrMappingTable_DECL
369 #define GET_WMMAOpcode3AddrMappingTable_IMPL
370 #include "AMDGPUGenSearchableTables.inc"
372 int getMTBUFBaseOpcode(unsigned Opc) {
373 const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
374 return Info ? Info->BaseOpcode : -1;
377 int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
378 const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
379 return Info ? Info->Opcode : -1;
382 int getMTBUFElements(unsigned Opc) {
383 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
384 return Info ? Info->elements : 0;
387 bool getMTBUFHasVAddr(unsigned Opc) {
388 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
389 return Info ? Info->has_vaddr : false;
392 bool getMTBUFHasSrsrc(unsigned Opc) {
393 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
394 return Info ? Info->has_srsrc : false;
397 bool getMTBUFHasSoffset(unsigned Opc) {
398 const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
399 return Info ? Info->has_soffset : false;
402 int getMUBUFBaseOpcode(unsigned Opc) {
403 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
404 return Info ? Info->BaseOpcode : -1;
407 int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
408 const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
409 return Info ? Info->Opcode : -1;
412 int getMUBUFElements(unsigned Opc) {
413 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
414 return Info ? Info->elements : 0;
417 bool getMUBUFHasVAddr(unsigned Opc) {
418 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
419 return Info ? Info->has_vaddr : false;
422 bool getMUBUFHasSrsrc(unsigned Opc) {
423 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
424 return Info ? Info->has_srsrc : false;
427 bool getMUBUFHasSoffset(unsigned Opc) {
428 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
429 return Info ? Info->has_soffset : false;
432 bool getMUBUFIsBufferInv(unsigned Opc) {
433 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
434 return Info ? Info->IsBufferInv : false;
437 bool getSMEMIsBuffer(unsigned Opc) {
438 const SMInfo *Info = getSMEMOpcodeHelper(Opc);
439 return Info ? Info->IsBuffer : false;
442 bool getVOP1IsSingle(unsigned Opc) {
443 const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
444 return Info ? Info->IsSingle : false;
447 bool getVOP2IsSingle(unsigned Opc) {
448 const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
449 return Info ? Info->IsSingle : false;
452 bool getVOP3IsSingle(unsigned Opc) {
453 const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
454 return Info ? Info->IsSingle : false;
457 bool isVOPC64DPP(unsigned Opc) {
458 return isVOPC64DPPOpcodeHelper(Opc) || isVOPC64DPP8OpcodeHelper(Opc);
461 bool getMAIIsDGEMM(unsigned Opc) {
462 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
463 return Info ? Info->is_dgemm : false;
466 bool getMAIIsGFX940XDL(unsigned Opc) {
467 const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
468 return Info ? Info->is_gfx940_xdl : false;
471 CanBeVOPD getCanBeVOPD(unsigned Opc) {
472 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
474 return {Info->CanBeVOPDX, true};
476 return {false, false};
479 unsigned getVOPDOpcode(unsigned Opc) {
480 const VOPDComponentInfo *Info = getVOPDComponentHelper(Opc);
481 return Info ? Info->VOPDOp : ~0u;
484 bool isVOPD(unsigned Opc) {
485 return AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0X);
488 bool isMAC(unsigned Opc) {
489 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
490 Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
491 Opc == AMDGPU::V_MAC_F32_e64_vi ||
492 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
493 Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
494 Opc == AMDGPU::V_MAC_F16_e64_vi ||
495 Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
496 Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
497 Opc == AMDGPU::V_FMAC_F32_e64_gfx11 ||
498 Opc == AMDGPU::V_FMAC_F32_e64_vi ||
499 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
500 Opc == AMDGPU::V_FMAC_DX9_ZERO_F32_e64_gfx11 ||
501 Opc == AMDGPU::V_FMAC_F16_e64_gfx10 ||
502 Opc == AMDGPU::V_FMAC_F16_t16_e64_gfx11 ||
503 Opc == AMDGPU::V_DOT2C_F32_F16_e64_vi ||
504 Opc == AMDGPU::V_DOT2C_I32_I16_e64_vi ||
505 Opc == AMDGPU::V_DOT4C_I32_I8_e64_vi ||
506 Opc == AMDGPU::V_DOT8C_I32_I4_e64_vi;
509 bool isPermlane16(unsigned Opc) {
510 return Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
511 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10 ||
512 Opc == AMDGPU::V_PERMLANE16_B32_e64_gfx11 ||
513 Opc == AMDGPU::V_PERMLANEX16_B32_e64_gfx11;
516 bool isTrue16Inst(unsigned Opc) {
517 const VOPTrue16Info *Info = getTrue16OpcodeHelper(Opc);
518 return Info ? Info->IsTrue16 : false;
521 unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc) {
522 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom2AddrOpcode(Opc);
523 return Info ? Info->Opcode3Addr : ~0u;
526 unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc) {
527 const WMMAOpcodeMappingInfo *Info = getWMMAMappingInfoFrom3AddrOpcode(Opc);
528 return Info ? Info->Opcode2Addr : ~0u;
531 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any
532 // header files, so we need to wrap it in a function that takes unsigned
534 int getMCOpcode(uint16_t Opcode, unsigned Gen) {
535 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
538 int getVOPDFull(unsigned OpX, unsigned OpY) {
539 const VOPDInfo *Info = getVOPDInfoFromComponentOpcodes(OpX, OpY);
540 return Info ? Info->Opcode : -1;
543 std::pair<unsigned, unsigned> getVOPDComponents(unsigned VOPDOpcode) {
544 const VOPDInfo *Info = getVOPDOpcodeHelper(VOPDOpcode);
546 auto OpX = getVOPDBaseFromComponent(Info->OpX);
547 auto OpY = getVOPDBaseFromComponent(Info->OpY);
549 return {OpX->BaseVOP, OpY->BaseVOP};
554 ComponentProps::ComponentProps(const MCInstrDesc &OpDesc) {
555 assert(OpDesc.getNumDefs() == Component::DST_NUM);
557 assert(OpDesc.getOperandConstraint(Component::SRC0, MCOI::TIED_TO) == -1);
558 assert(OpDesc.getOperandConstraint(Component::SRC1, MCOI::TIED_TO) == -1);
559 auto TiedIdx = OpDesc.getOperandConstraint(Component::SRC2, MCOI::TIED_TO);
560 assert(TiedIdx == -1 || TiedIdx == Component::DST);
561 HasSrc2Acc = TiedIdx != -1;
563 SrcOperandsNum = OpDesc.getNumOperands() - OpDesc.getNumDefs();
564 assert(SrcOperandsNum <= Component::MAX_SRC_NUM);
566 auto OperandsNum = OpDesc.getNumOperands();
568 for (CompOprIdx = Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
569 if (OpDesc.operands()[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
570 MandatoryLiteralIdx = CompOprIdx;
576 unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const {
577 assert(CompOprIdx < Component::MAX_OPR_NUM);
579 if (CompOprIdx == Component::DST)
580 return getIndexOfDstInParsedOperands();
582 auto CompSrcIdx = CompOprIdx - Component::DST_NUM;
583 if (CompSrcIdx < getCompParsedSrcOperandsNum())
584 return getIndexOfSrcInParsedOperands(CompSrcIdx);
586 // The specified operand does not exist.
590 std::optional<unsigned> InstInfo::getInvalidCompOperandIndex(
591 std::function<unsigned(unsigned, unsigned)> GetRegIdx) const {
593 auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx);
594 auto OpYRegs = getRegIndices(ComponentIndex::Y, GetRegIdx);
597 for (CompOprIdx = 0; CompOprIdx < Component::MAX_OPR_NUM; ++CompOprIdx) {
598 unsigned BanksMasks = VOPD_VGPR_BANK_MASKS[CompOprIdx];
599 if (OpXRegs[CompOprIdx] && OpYRegs[CompOprIdx] &&
600 ((OpXRegs[CompOprIdx] & BanksMasks) ==
601 (OpYRegs[CompOprIdx] & BanksMasks)))
608 // Return an array of VGPR registers [DST,SRC0,SRC1,SRC2] used
609 // by the specified component. If an operand is unused
610 // or is not a VGPR, the corresponding value is 0.
612 // GetRegIdx(Component, MCOperandIdx) must return a VGPR register index
613 // for the specified component and MC operand. The callback must return 0
614 // if the operand is not a register or not a VGPR.
615 InstInfo::RegIndices InstInfo::getRegIndices(
617 std::function<unsigned(unsigned, unsigned)> GetRegIdx) const {
618 assert(CompIdx < COMPONENTS_NUM);
620 const auto &Comp = CompInfo[CompIdx];
621 InstInfo::RegIndices RegIndices;
623 RegIndices[DST] = GetRegIdx(CompIdx, Comp.getIndexOfDstInMCOperands());
625 for (unsigned CompOprIdx : {SRC0, SRC1, SRC2}) {
626 unsigned CompSrcIdx = CompOprIdx - DST_NUM;
627 RegIndices[CompOprIdx] =
628 Comp.hasRegSrcOperand(CompSrcIdx)
629 ? GetRegIdx(CompIdx, Comp.getIndexOfSrcInMCOperands(CompSrcIdx))
637 VOPD::InstInfo getVOPDInstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY) {
638 return VOPD::InstInfo(OpX, OpY);
641 VOPD::InstInfo getVOPDInstInfo(unsigned VOPDOpcode,
642 const MCInstrInfo *InstrInfo) {
643 auto [OpX, OpY] = getVOPDComponents(VOPDOpcode);
644 const auto &OpXDesc = InstrInfo->get(OpX);
645 const auto &OpYDesc = InstrInfo->get(OpY);
646 VOPD::ComponentInfo OpXInfo(OpXDesc, VOPD::ComponentKind::COMPONENT_X);
647 VOPD::ComponentInfo OpYInfo(OpYDesc, OpXInfo);
648 return VOPD::InstInfo(OpXInfo, OpYInfo);
653 AMDGPUTargetID::AMDGPUTargetID(const MCSubtargetInfo &STI)
654 : STI(STI), XnackSetting(TargetIDSetting::Any),
655 SramEccSetting(TargetIDSetting::Any), CodeObjectVersion(0) {
656 if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
657 XnackSetting = TargetIDSetting::Unsupported;
658 if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
659 SramEccSetting = TargetIDSetting::Unsupported;
662 void AMDGPUTargetID::setTargetIDFromFeaturesString(StringRef FS) {
663 // Check if xnack or sramecc is explicitly enabled or disabled. In the
664 // absence of the target features we assume we must generate code that can run
665 // in any environment.
666 SubtargetFeatures Features(FS);
667 std::optional<bool> XnackRequested;
668 std::optional<bool> SramEccRequested;
670 for (const std::string &Feature : Features.getFeatures()) {
671 if (Feature == "+xnack")
672 XnackRequested = true;
673 else if (Feature == "-xnack")
674 XnackRequested = false;
675 else if (Feature == "+sramecc")
676 SramEccRequested = true;
677 else if (Feature == "-sramecc")
678 SramEccRequested = false;
681 bool XnackSupported = isXnackSupported();
682 bool SramEccSupported = isSramEccSupported();
684 if (XnackRequested) {
685 if (XnackSupported) {
687 *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
689 // If a specific xnack setting was requested and this GPU does not support
690 // xnack emit a warning. Setting will remain set to "Unsupported".
691 if (*XnackRequested) {
692 errs() << "warning: xnack 'On' was requested for a processor that does "
695 errs() << "warning: xnack 'Off' was requested for a processor that "
696 "does not support it!\n";
701 if (SramEccRequested) {
702 if (SramEccSupported) {
704 *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
706 // If a specific sramecc setting was requested and this GPU does not
707 // support sramecc emit a warning. Setting will remain set to
709 if (*SramEccRequested) {
710 errs() << "warning: sramecc 'On' was requested for a processor that "
711 "does not support it!\n";
713 errs() << "warning: sramecc 'Off' was requested for a processor that "
714 "does not support it!\n";
720 static TargetIDSetting
721 getTargetIDSettingFromFeatureString(StringRef FeatureString) {
722 if (FeatureString.endswith("-"))
723 return TargetIDSetting::Off;
724 if (FeatureString.endswith("+"))
725 return TargetIDSetting::On;
727 llvm_unreachable("Malformed feature string");
730 void AMDGPUTargetID::setTargetIDFromTargetIDStream(StringRef TargetID) {
731 SmallVector<StringRef, 3> TargetIDSplit;
732 TargetID.split(TargetIDSplit, ':');
734 for (const auto &FeatureString : TargetIDSplit) {
735 if (FeatureString.startswith("xnack"))
736 XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
737 if (FeatureString.startswith("sramecc"))
738 SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
742 std::string AMDGPUTargetID::toString() const {
743 std::string StringRep;
744 raw_string_ostream StreamRep(StringRep);
746 auto TargetTriple = STI.getTargetTriple();
747 auto Version = getIsaVersion(STI.getCPU());
749 StreamRep << TargetTriple.getArchName() << '-'
750 << TargetTriple.getVendorName() << '-'
751 << TargetTriple.getOSName() << '-'
752 << TargetTriple.getEnvironmentName() << '-';
754 std::string Processor;
755 // TODO: Following else statement is present here because we used various
756 // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
757 // Remove once all aliases are removed from GCNProcessors.td.
758 if (Version.Major >= 9)
759 Processor = STI.getCPU().str();
761 Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
762 Twine(Version.Stepping))
765 std::string Features;
766 if (STI.getTargetTriple().getOS() == Triple::AMDHSA) {
767 switch (CodeObjectVersion) {
768 case AMDGPU::AMDHSA_COV2:
769 // Code object V2 only supported specific processors and had fixed
770 // settings for the XNACK.
771 if (Processor == "gfx600") {
772 } else if (Processor == "gfx601") {
773 } else if (Processor == "gfx602") {
774 } else if (Processor == "gfx700") {
775 } else if (Processor == "gfx701") {
776 } else if (Processor == "gfx702") {
777 } else if (Processor == "gfx703") {
778 } else if (Processor == "gfx704") {
779 } else if (Processor == "gfx705") {
780 } else if (Processor == "gfx801") {
781 if (!isXnackOnOrAny())
783 "AMD GPU code object V2 does not support processor " +
784 Twine(Processor) + " without XNACK");
785 } else if (Processor == "gfx802") {
786 } else if (Processor == "gfx803") {
787 } else if (Processor == "gfx805") {
788 } else if (Processor == "gfx810") {
789 if (!isXnackOnOrAny())
791 "AMD GPU code object V2 does not support processor " +
792 Twine(Processor) + " without XNACK");
793 } else if (Processor == "gfx900") {
794 if (isXnackOnOrAny())
795 Processor = "gfx901";
796 } else if (Processor == "gfx902") {
797 if (isXnackOnOrAny())
798 Processor = "gfx903";
799 } else if (Processor == "gfx904") {
800 if (isXnackOnOrAny())
801 Processor = "gfx905";
802 } else if (Processor == "gfx906") {
803 if (isXnackOnOrAny())
804 Processor = "gfx907";
805 } else if (Processor == "gfx90c") {
806 if (isXnackOnOrAny())
808 "AMD GPU code object V2 does not support processor " +
809 Twine(Processor) + " with XNACK being ON or ANY");
812 "AMD GPU code object V2 does not support processor " +
816 case AMDGPU::AMDHSA_COV3:
818 if (isXnackOnOrAny())
819 Features += "+xnack";
820 // In code object v2 and v3, "sramecc" feature was spelled with a
821 // hyphen ("sram-ecc").
822 if (isSramEccOnOrAny())
823 Features += "+sram-ecc";
825 case AMDGPU::AMDHSA_COV4:
826 case AMDGPU::AMDHSA_COV5:
828 if (getSramEccSetting() == TargetIDSetting::Off)
829 Features += ":sramecc-";
830 else if (getSramEccSetting() == TargetIDSetting::On)
831 Features += ":sramecc+";
833 if (getXnackSetting() == TargetIDSetting::Off)
834 Features += ":xnack-";
835 else if (getXnackSetting() == TargetIDSetting::On)
836 Features += ":xnack+";
843 StreamRep << Processor << Features;
849 unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
850 if (STI->getFeatureBits().test(FeatureWavefrontSize16))
852 if (STI->getFeatureBits().test(FeatureWavefrontSize32))
858 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) {
859 unsigned BytesPerCU = 0;
860 if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
862 if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
865 // "Per CU" really means "per whatever functional block the waves of a
866 // workgroup must share". So the effective local memory size is doubled in
867 // WGP mode on gfx10.
868 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
874 unsigned getAddressableLocalMemorySize(const MCSubtargetInfo *STI) {
875 if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
877 if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
882 unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
883 // "Per CU" really means "per whatever functional block the waves of a
884 // workgroup must share". For gfx10 in CU mode this is the CU, which contains
886 if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
888 // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP contains
889 // two CUs, so a total of four SIMDs.
893 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI,
894 unsigned FlatWorkGroupSize) {
895 assert(FlatWorkGroupSize != 0);
896 if (STI->getTargetTriple().getArch() != Triple::amdgcn)
898 unsigned MaxWaves = getMaxWavesPerEU(STI) * getEUsPerCU(STI);
899 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
901 // Single-wave workgroups don't consume barrier resources.
905 unsigned MaxBarriers = 16;
906 if (isGFX10Plus(*STI) && !STI->getFeatureBits().test(FeatureCuMode))
909 return std::min(MaxWaves / N, MaxBarriers);
912 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) {
916 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
917 // FIXME: Need to take scratch memory into account.
920 if (!isGFX10Plus(*STI))
922 return hasGFX10_3Insts(*STI) ? 16 : 20;
925 unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI,
926 unsigned FlatWorkGroupSize) {
927 return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
931 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) {
935 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) {
936 // Some subtargets allow encoding 2048, but this isn't tested or supported.
940 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI,
941 unsigned FlatWorkGroupSize) {
942 return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
945 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) {
946 IsaVersion Version = getIsaVersion(STI->getCPU());
947 if (Version.Major >= 10)
948 return getAddressableNumSGPRs(STI);
949 if (Version.Major >= 8)
954 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) {
958 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
959 IsaVersion Version = getIsaVersion(STI->getCPU());
960 if (Version.Major >= 8)
965 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) {
966 if (STI->getFeatureBits().test(FeatureSGPRInitBug))
967 return FIXED_NUM_SGPRS_FOR_INIT_BUG;
969 IsaVersion Version = getIsaVersion(STI->getCPU());
970 if (Version.Major >= 10)
972 if (Version.Major >= 8)
977 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
978 assert(WavesPerEU != 0);
980 IsaVersion Version = getIsaVersion(STI->getCPU());
981 if (Version.Major >= 10)
984 if (WavesPerEU >= getMaxWavesPerEU(STI))
987 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
988 if (STI->getFeatureBits().test(FeatureTrapHandler))
989 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
990 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
991 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
994 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
996 assert(WavesPerEU != 0);
998 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
999 IsaVersion Version = getIsaVersion(STI->getCPU());
1000 if (Version.Major >= 10)
1001 return Addressable ? AddressableNumSGPRs : 108;
1002 if (Version.Major >= 8 && !Addressable)
1003 AddressableNumSGPRs = 112;
1004 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
1005 if (STI->getFeatureBits().test(FeatureTrapHandler))
1006 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
1007 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
1008 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
1011 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1012 bool FlatScrUsed, bool XNACKUsed) {
1013 unsigned ExtraSGPRs = 0;
1017 IsaVersion Version = getIsaVersion(STI->getCPU());
1018 if (Version.Major >= 10)
1021 if (Version.Major < 8) {
1029 STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
1036 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
1038 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
1039 STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
1042 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
1043 NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI));
1044 // SGPRBlocks is actual number of SGPR blocks minus 1.
1045 return NumSGPRs / getSGPREncodingGranule(STI) - 1;
1048 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
1049 std::optional<bool> EnableWavefrontSize32) {
1050 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1053 bool IsWave32 = EnableWavefrontSize32 ?
1054 *EnableWavefrontSize32 :
1055 STI->getFeatureBits().test(FeatureWavefrontSize32);
1057 if (STI->getFeatureBits().test(FeatureGFX11FullVGPRs))
1058 return IsWave32 ? 24 : 12;
1060 if (hasGFX10_3Insts(*STI))
1061 return IsWave32 ? 16 : 8;
1063 return IsWave32 ? 8 : 4;
1066 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
1067 std::optional<bool> EnableWavefrontSize32) {
1068 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1071 bool IsWave32 = EnableWavefrontSize32 ?
1072 *EnableWavefrontSize32 :
1073 STI->getFeatureBits().test(FeatureWavefrontSize32);
1075 return IsWave32 ? 8 : 4;
1078 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
1079 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1081 if (!isGFX10Plus(*STI))
1083 bool IsWave32 = STI->getFeatureBits().test(FeatureWavefrontSize32);
1084 if (STI->getFeatureBits().test(FeatureGFX11FullVGPRs))
1085 return IsWave32 ? 1536 : 768;
1086 return IsWave32 ? 1024 : 512;
1089 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) {
1090 if (STI->getFeatureBits().test(FeatureGFX90AInsts))
1095 unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI,
1096 unsigned NumVGPRs) {
1097 unsigned MaxWaves = getMaxWavesPerEU(STI);
1098 unsigned Granule = getVGPRAllocGranule(STI);
1099 if (NumVGPRs < Granule)
1101 unsigned RoundedRegs = alignTo(NumVGPRs, Granule);
1102 return std::min(std::max(getTotalNumVGPRs(STI) / RoundedRegs, 1u), MaxWaves);
1105 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
1106 assert(WavesPerEU != 0);
1108 unsigned MaxWavesPerEU = getMaxWavesPerEU(STI);
1109 if (WavesPerEU >= MaxWavesPerEU)
1112 unsigned TotNumVGPRs = getTotalNumVGPRs(STI);
1113 unsigned AddrsableNumVGPRs = getAddressableNumVGPRs(STI);
1114 unsigned Granule = getVGPRAllocGranule(STI);
1115 unsigned MaxNumVGPRs = alignDown(TotNumVGPRs / WavesPerEU, Granule);
1117 if (MaxNumVGPRs == alignDown(TotNumVGPRs / MaxWavesPerEU, Granule))
1120 unsigned MinWavesPerEU = getNumWavesPerEUWithNumVGPRs(STI, AddrsableNumVGPRs);
1121 if (WavesPerEU < MinWavesPerEU)
1122 return getMinNumVGPRs(STI, MinWavesPerEU);
1124 unsigned MaxNumVGPRsNext = alignDown(TotNumVGPRs / (WavesPerEU + 1), Granule);
1125 unsigned MinNumVGPRs = 1 + std::min(MaxNumVGPRs - Granule, MaxNumVGPRsNext);
1126 return std::min(MinNumVGPRs, AddrsableNumVGPRs);
1129 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
1130 assert(WavesPerEU != 0);
1132 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
1133 getVGPRAllocGranule(STI));
1134 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI);
1135 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
1138 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
1139 std::optional<bool> EnableWavefrontSize32) {
1140 NumVGPRs = alignTo(std::max(1u, NumVGPRs),
1141 getVGPREncodingGranule(STI, EnableWavefrontSize32));
1142 // VGPRBlocks is actual number of VGPR blocks minus 1.
1143 return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1;
1146 } // end namespace IsaInfo
1148 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
1149 const MCSubtargetInfo *STI) {
1150 IsaVersion Version = getIsaVersion(STI->getCPU());
1152 memset(&Header, 0, sizeof(Header));
1154 Header.amd_kernel_code_version_major = 1;
1155 Header.amd_kernel_code_version_minor = 2;
1156 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
1157 Header.amd_machine_version_major = Version.Major;
1158 Header.amd_machine_version_minor = Version.Minor;
1159 Header.amd_machine_version_stepping = Version.Stepping;
1160 Header.kernel_code_entry_byte_offset = sizeof(Header);
1161 Header.wavefront_size = 6;
1163 // If the code object does not support indirect functions, then the value must
1165 Header.call_convention = -1;
1167 // These alignment values are specified in powers of two, so alignment =
1168 // 2^n. The minimum alignment is 2^4 = 16.
1169 Header.kernarg_segment_alignment = 4;
1170 Header.group_segment_alignment = 4;
1171 Header.private_segment_alignment = 4;
1173 if (Version.Major >= 10) {
1174 if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
1175 Header.wavefront_size = 5;
1176 Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32;
1178 Header.compute_pgm_resource_registers |=
1179 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
1180 S_00B848_MEM_ORDERED(1);
1184 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(
1185 const MCSubtargetInfo *STI) {
1186 IsaVersion Version = getIsaVersion(STI->getCPU());
1188 amdhsa::kernel_descriptor_t KD;
1189 memset(&KD, 0, sizeof(KD));
1191 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
1192 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
1193 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE);
1194 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
1195 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
1196 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
1197 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
1198 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2,
1199 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
1200 if (Version.Major >= 10) {
1201 AMDHSA_BITS_SET(KD.kernel_code_properties,
1202 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
1203 STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0);
1204 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
1205 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE,
1206 STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1);
1207 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
1208 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1);
1210 if (AMDGPU::isGFX90A(*STI)) {
1211 AMDHSA_BITS_SET(KD.compute_pgm_rsrc3,
1212 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT,
1213 STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0);
1218 bool isGroupSegment(const GlobalValue *GV) {
1219 return GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
1222 bool isGlobalSegment(const GlobalValue *GV) {
1223 return GV->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
1226 bool isReadOnlySegment(const GlobalValue *GV) {
1227 unsigned AS = GV->getAddressSpace();
1228 return AS == AMDGPUAS::CONSTANT_ADDRESS ||
1229 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
1232 bool shouldEmitConstantsToTextSection(const Triple &TT) {
1233 return TT.getArch() == Triple::r600;
1236 std::pair<unsigned, unsigned>
1237 getIntegerPairAttribute(const Function &F, StringRef Name,
1238 std::pair<unsigned, unsigned> Default,
1239 bool OnlyFirstRequired) {
1240 Attribute A = F.getFnAttribute(Name);
1241 if (!A.isStringAttribute())
1244 LLVMContext &Ctx = F.getContext();
1245 std::pair<unsigned, unsigned> Ints = Default;
1246 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
1247 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
1248 Ctx.emitError("can't parse first integer attribute " + Name);
1251 if (Strs.second.trim().getAsInteger(0, Ints.second)) {
1252 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
1253 Ctx.emitError("can't parse second integer attribute " + Name);
1261 unsigned getVmcntBitMask(const IsaVersion &Version) {
1262 return (1 << (getVmcntBitWidthLo(Version.Major) +
1263 getVmcntBitWidthHi(Version.Major))) -
1267 unsigned getExpcntBitMask(const IsaVersion &Version) {
1268 return (1 << getExpcntBitWidth(Version.Major)) - 1;
1271 unsigned getLgkmcntBitMask(const IsaVersion &Version) {
1272 return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
1275 unsigned getWaitcntBitMask(const IsaVersion &Version) {
1276 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
1277 getVmcntBitWidthLo(Version.Major));
1278 unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
1279 getExpcntBitWidth(Version.Major));
1280 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
1281 getLgkmcntBitWidth(Version.Major));
1282 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
1283 getVmcntBitWidthHi(Version.Major));
1284 return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
1287 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1288 unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
1289 getVmcntBitWidthLo(Version.Major));
1290 unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
1291 getVmcntBitWidthHi(Version.Major));
1292 return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
1295 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
1296 return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
1297 getExpcntBitWidth(Version.Major));
1300 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1301 return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1302 getLgkmcntBitWidth(Version.Major));
1305 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
1306 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
1307 Vmcnt = decodeVmcnt(Version, Waitcnt);
1308 Expcnt = decodeExpcnt(Version, Waitcnt);
1309 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1312 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1314 Decoded.VmCnt = decodeVmcnt(Version, Encoded);
1315 Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1316 Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded);
1320 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1322 Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1323 getVmcntBitWidthLo(Version.Major));
1324 return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1325 getVmcntBitShiftHi(Version.Major),
1326 getVmcntBitWidthHi(Version.Major));
1329 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1331 return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1332 getExpcntBitWidth(Version.Major));
1335 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1337 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1338 getLgkmcntBitWidth(Version.Major));
1341 unsigned encodeWaitcnt(const IsaVersion &Version,
1342 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
1343 unsigned Waitcnt = getWaitcntBitMask(Version);
1344 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
1345 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1346 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1350 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1351 return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt);
1354 //===----------------------------------------------------------------------===//
1357 // A table of custom operands shall describe "primary" operand names
1358 // first followed by aliases if any. It is not required but recommended
1359 // to arrange operands so that operand encoding match operand position
1360 // in the table. This will make disassembly a bit more efficient.
1361 // Unused slots in the table shall have an empty name.
1363 //===----------------------------------------------------------------------===//
1366 static bool isValidOpr(int Idx, const CustomOperand<T> OpInfo[], int OpInfoSize,
1368 return 0 <= Idx && Idx < OpInfoSize && !OpInfo[Idx].Name.empty() &&
1369 (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context));
1373 static int getOprIdx(std::function<bool(const CustomOperand<T> &)> Test,
1374 const CustomOperand<T> OpInfo[], int OpInfoSize,
1376 int InvalidIdx = OPR_ID_UNKNOWN;
1377 for (int Idx = 0; Idx < OpInfoSize; ++Idx) {
1378 if (Test(OpInfo[Idx])) {
1379 if (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context))
1381 InvalidIdx = OPR_ID_UNSUPPORTED;
1388 static int getOprIdx(const StringRef Name, const CustomOperand<T> OpInfo[],
1389 int OpInfoSize, T Context) {
1390 auto Test = [=](const CustomOperand<T> &Op) { return Op.Name == Name; };
1391 return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1395 static int getOprIdx(int Id, const CustomOperand<T> OpInfo[], int OpInfoSize,
1396 T Context, bool QuickCheck = true) {
1397 auto Test = [=](const CustomOperand<T> &Op) {
1398 return Op.Encoding == Id && !Op.Name.empty();
1400 // This is an optimization that should work in most cases.
1401 // As a side effect, it may cause selection of an alias
1402 // instead of a primary operand name in case of sparse tables.
1403 if (QuickCheck && isValidOpr<T>(Id, OpInfo, OpInfoSize, Context) &&
1404 OpInfo[Id].Encoding == Id) {
1407 return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1410 //===----------------------------------------------------------------------===//
1411 // Custom Operand Values
1412 //===----------------------------------------------------------------------===//
1414 static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr,
1416 const MCSubtargetInfo &STI) {
1418 for (int Idx = 0; Idx < Size; ++Idx) {
1419 const auto &Op = Opr[Idx];
1420 if (Op.isSupported(STI))
1421 Enc |= Op.encode(Op.Default);
1426 static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr,
1427 int Size, unsigned Code,
1428 bool &HasNonDefaultVal,
1429 const MCSubtargetInfo &STI) {
1430 unsigned UsedOprMask = 0;
1431 HasNonDefaultVal = false;
1432 for (int Idx = 0; Idx < Size; ++Idx) {
1433 const auto &Op = Opr[Idx];
1434 if (!Op.isSupported(STI))
1436 UsedOprMask |= Op.getMask();
1437 unsigned Val = Op.decode(Code);
1438 if (!Op.isValid(Val))
1440 HasNonDefaultVal |= (Val != Op.Default);
1442 return (Code & ~UsedOprMask) == 0;
1445 static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1446 unsigned Code, int &Idx, StringRef &Name,
1447 unsigned &Val, bool &IsDefault,
1448 const MCSubtargetInfo &STI) {
1449 while (Idx < Size) {
1450 const auto &Op = Opr[Idx++];
1451 if (Op.isSupported(STI)) {
1453 Val = Op.decode(Code);
1454 IsDefault = (Val == Op.Default);
1462 static int encodeCustomOperandVal(const CustomOperandVal &Op,
1464 if (InputVal < 0 || InputVal > Op.Max)
1465 return OPR_VAL_INVALID;
1466 return Op.encode(InputVal);
1469 static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
1470 const StringRef Name, int64_t InputVal,
1471 unsigned &UsedOprMask,
1472 const MCSubtargetInfo &STI) {
1473 int InvalidId = OPR_ID_UNKNOWN;
1474 for (int Idx = 0; Idx < Size; ++Idx) {
1475 const auto &Op = Opr[Idx];
1476 if (Op.Name == Name) {
1477 if (!Op.isSupported(STI)) {
1478 InvalidId = OPR_ID_UNSUPPORTED;
1481 auto OprMask = Op.getMask();
1482 if (OprMask & UsedOprMask)
1483 return OPR_ID_DUPLICATE;
1484 UsedOprMask |= OprMask;
1485 return encodeCustomOperandVal(Op, InputVal);
1491 //===----------------------------------------------------------------------===//
1493 //===----------------------------------------------------------------------===//
1497 int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI) {
1498 static int Default = -1;
1500 Default = getDefaultCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, STI);
1504 bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
1505 const MCSubtargetInfo &STI) {
1506 return isSymbolicCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, Code,
1507 HasNonDefaultVal, STI);
1510 bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
1511 bool &IsDefault, const MCSubtargetInfo &STI) {
1512 return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
1516 int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
1517 const MCSubtargetInfo &STI) {
1518 return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
1522 unsigned decodeFieldVmVsrc(unsigned Encoded) {
1523 return unpackBits(Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
1526 unsigned decodeFieldVaVdst(unsigned Encoded) {
1527 return unpackBits(Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
1530 unsigned decodeFieldSaSdst(unsigned Encoded) {
1531 return unpackBits(Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
1534 unsigned encodeFieldVmVsrc(unsigned Encoded, unsigned VmVsrc) {
1535 return packBits(VmVsrc, Encoded, getVmVsrcBitShift(), getVmVsrcBitWidth());
1538 unsigned encodeFieldVmVsrc(unsigned VmVsrc) {
1539 return encodeFieldVmVsrc(0xffff, VmVsrc);
1542 unsigned encodeFieldVaVdst(unsigned Encoded, unsigned VaVdst) {
1543 return packBits(VaVdst, Encoded, getVaVdstBitShift(), getVaVdstBitWidth());
1546 unsigned encodeFieldVaVdst(unsigned VaVdst) {
1547 return encodeFieldVaVdst(0xffff, VaVdst);
1550 unsigned encodeFieldSaSdst(unsigned Encoded, unsigned SaSdst) {
1551 return packBits(SaSdst, Encoded, getSaSdstBitShift(), getSaSdstBitWidth());
1554 unsigned encodeFieldSaSdst(unsigned SaSdst) {
1555 return encodeFieldSaSdst(0xffff, SaSdst);
1558 } // namespace DepCtr
1560 //===----------------------------------------------------------------------===//
1562 //===----------------------------------------------------------------------===//
1566 int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) {
1567 int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Opr, OPR_SIZE, STI);
1568 return (Idx < 0) ? Idx : Opr[Idx].Encoding;
1571 bool isValidHwreg(int64_t Id) {
1572 return 0 <= Id && isUInt<ID_WIDTH_>(Id);
1575 bool isValidHwregOffset(int64_t Offset) {
1576 return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset);
1579 bool isValidHwregWidth(int64_t Width) {
1580 return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1);
1583 uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width) {
1584 return (Id << ID_SHIFT_) |
1585 (Offset << OFFSET_SHIFT_) |
1586 ((Width - 1) << WIDTH_M1_SHIFT_);
1589 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
1590 int Idx = getOprIdx<const MCSubtargetInfo &>(Id, Opr, OPR_SIZE, STI);
1591 return (Idx < 0) ? "" : Opr[Idx].Name;
1594 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) {
1595 Id = (Val & ID_MASK_) >> ID_SHIFT_;
1596 Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_;
1597 Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1;
1600 } // namespace Hwreg
1602 //===----------------------------------------------------------------------===//
1604 //===----------------------------------------------------------------------===//
1614 static constexpr ExpTgt ExpTgtInfo[] = {
1615 {{"null"}, ET_NULL, ET_NULL_MAX_IDX},
1616 {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX},
1617 {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX},
1618 {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX},
1619 {{"pos"}, ET_POS0, ET_POS_MAX_IDX},
1620 {{"dual_src_blend"}, ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
1621 {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
1624 bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
1625 for (const ExpTgt &Val : ExpTgtInfo) {
1626 if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
1627 Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
1635 unsigned getTgtId(const StringRef Name) {
1637 for (const ExpTgt &Val : ExpTgtInfo) {
1638 if (Val.MaxIndex == 0 && Name == Val.Name)
1641 if (Val.MaxIndex > 0 && Name.startswith(Val.Name)) {
1642 StringRef Suffix = Name.drop_front(Val.Name.size());
1645 if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
1648 // Disable leading zeroes
1649 if (Suffix.size() > 1 && Suffix[0] == '0')
1652 return Val.Tgt + Id;
1658 bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
1661 return !isGFX11Plus(STI);
1664 return isGFX10Plus(STI);
1665 case ET_DUAL_SRC_BLEND0:
1666 case ET_DUAL_SRC_BLEND1:
1667 return isGFX11Plus(STI);
1669 if (Id >= ET_PARAM0 && Id <= ET_PARAM31)
1670 return !isGFX11Plus(STI);
1677 //===----------------------------------------------------------------------===//
1679 //===----------------------------------------------------------------------===//
1681 namespace MTBUFFormat {
1683 int64_t getDfmt(const StringRef Name) {
1684 for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
1685 if (Name == DfmtSymbolic[Id])
1691 StringRef getDfmtName(unsigned Id) {
1692 assert(Id <= DFMT_MAX);
1693 return DfmtSymbolic[Id];
1696 static StringLiteral const *getNfmtLookupTable(const MCSubtargetInfo &STI) {
1697 if (isSI(STI) || isCI(STI))
1698 return NfmtSymbolicSICI;
1699 if (isVI(STI) || isGFX9(STI))
1700 return NfmtSymbolicVI;
1701 return NfmtSymbolicGFX10;
1704 int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
1705 auto lookupTable = getNfmtLookupTable(STI);
1706 for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
1707 if (Name == lookupTable[Id])
1713 StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
1714 assert(Id <= NFMT_MAX);
1715 return getNfmtLookupTable(STI)[Id];
1718 bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1721 decodeDfmtNfmt(Id, Dfmt, Nfmt);
1722 return isValidNfmt(Nfmt, STI);
1725 bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1726 return !getNfmtName(Id, STI).empty();
1729 int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
1730 return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
1733 void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
1734 Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
1735 Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
1738 int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
1739 if (isGFX11Plus(STI)) {
1740 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1741 if (Name == UfmtSymbolicGFX11[Id])
1745 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1746 if (Name == UfmtSymbolicGFX10[Id])
1753 StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI) {
1754 if(isValidUnifiedFormat(Id, STI))
1755 return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
1759 bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
1760 return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
1763 int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
1764 const MCSubtargetInfo &STI) {
1765 int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
1766 if (isGFX11Plus(STI)) {
1767 for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1768 if (Fmt == DfmtNfmt2UFmtGFX11[Id])
1772 for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1773 if (Fmt == DfmtNfmt2UFmtGFX10[Id])
1780 bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
1781 return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
1784 unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI) {
1785 if (isGFX10Plus(STI))
1786 return UFMT_DEFAULT;
1787 return DFMT_NFMT_DEFAULT;
1790 } // namespace MTBUFFormat
1792 //===----------------------------------------------------------------------===//
1794 //===----------------------------------------------------------------------===//
1798 static uint64_t getMsgIdMask(const MCSubtargetInfo &STI) {
1799 return isGFX11Plus(STI) ? ID_MASK_GFX11Plus_ : ID_MASK_PreGFX11_;
1802 int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI) {
1803 int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Msg, MSG_SIZE, STI);
1804 return (Idx < 0) ? Idx : Msg[Idx].Encoding;
1807 bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
1808 return (MsgId & ~(getMsgIdMask(STI))) == 0;
1811 StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI) {
1812 int Idx = getOprIdx<const MCSubtargetInfo &>(MsgId, Msg, MSG_SIZE, STI);
1813 return (Idx < 0) ? "" : Msg[Idx].Name;
1816 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) {
1817 const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1818 const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1819 const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
1820 for (int i = F; i < L; ++i) {
1828 bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
1830 assert(isValidMsgId(MsgId, STI));
1833 return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
1835 if (MsgId == ID_SYSMSG)
1836 return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_;
1837 if (!isGFX11Plus(STI)) {
1839 case ID_GS_PreGFX11:
1840 return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP;
1841 case ID_GS_DONE_PreGFX11:
1842 return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_;
1845 return OpId == OP_NONE_;
1848 StringRef getMsgOpName(int64_t MsgId, int64_t OpId,
1849 const MCSubtargetInfo &STI) {
1850 assert(msgRequiresOp(MsgId, STI));
1851 return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId];
1854 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
1855 const MCSubtargetInfo &STI, bool Strict) {
1856 assert(isValidMsgOp(MsgId, OpId, STI, Strict));
1859 return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId);
1861 if (!isGFX11Plus(STI)) {
1863 case ID_GS_PreGFX11:
1864 return STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_;
1865 case ID_GS_DONE_PreGFX11:
1866 return (OpId == OP_GS_NOP) ?
1867 (StreamId == STREAM_ID_NONE_) :
1868 (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_);
1871 return StreamId == STREAM_ID_NONE_;
1874 bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
1875 return MsgId == ID_SYSMSG ||
1876 (!isGFX11Plus(STI) &&
1877 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
1880 bool msgSupportsStream(int64_t MsgId, int64_t OpId,
1881 const MCSubtargetInfo &STI) {
1882 return !isGFX11Plus(STI) &&
1883 (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
1887 void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
1888 uint16_t &StreamId, const MCSubtargetInfo &STI) {
1889 MsgId = Val & getMsgIdMask(STI);
1890 if (isGFX11Plus(STI)) {
1894 OpId = (Val & OP_MASK_) >> OP_SHIFT_;
1895 StreamId = (Val & STREAM_ID_MASK_) >> STREAM_ID_SHIFT_;
1899 uint64_t encodeMsg(uint64_t MsgId,
1901 uint64_t StreamId) {
1902 return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
1905 } // namespace SendMsg
1907 //===----------------------------------------------------------------------===//
1909 //===----------------------------------------------------------------------===//
1911 unsigned getInitialPSInputAddr(const Function &F) {
1912 return F.getFnAttributeAsParsedInteger("InitialPSInputAddr", 0);
1915 bool getHasColorExport(const Function &F) {
1916 // As a safe default always respond as if PS has color exports.
1917 return F.getFnAttributeAsParsedInteger(
1918 "amdgpu-color-export",
1919 F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
1922 bool getHasDepthExport(const Function &F) {
1923 return F.getFnAttributeAsParsedInteger("amdgpu-depth-export", 0) != 0;
1926 bool isShader(CallingConv::ID cc) {
1928 case CallingConv::AMDGPU_VS:
1929 case CallingConv::AMDGPU_LS:
1930 case CallingConv::AMDGPU_HS:
1931 case CallingConv::AMDGPU_ES:
1932 case CallingConv::AMDGPU_GS:
1933 case CallingConv::AMDGPU_PS:
1934 case CallingConv::AMDGPU_CS:
1941 bool isGraphics(CallingConv::ID cc) {
1942 return isShader(cc) || cc == CallingConv::AMDGPU_Gfx;
1945 bool isCompute(CallingConv::ID cc) {
1946 return !isGraphics(cc) || cc == CallingConv::AMDGPU_CS;
1949 bool isEntryFunctionCC(CallingConv::ID CC) {
1951 case CallingConv::AMDGPU_KERNEL:
1952 case CallingConv::SPIR_KERNEL:
1953 case CallingConv::AMDGPU_VS:
1954 case CallingConv::AMDGPU_GS:
1955 case CallingConv::AMDGPU_PS:
1956 case CallingConv::AMDGPU_CS:
1957 case CallingConv::AMDGPU_ES:
1958 case CallingConv::AMDGPU_HS:
1959 case CallingConv::AMDGPU_LS:
1966 bool isModuleEntryFunctionCC(CallingConv::ID CC) {
1968 case CallingConv::AMDGPU_Gfx:
1971 return isEntryFunctionCC(CC);
1975 bool isKernelCC(const Function *Func) {
1976 return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv());
1979 bool hasXNACK(const MCSubtargetInfo &STI) {
1980 return STI.hasFeature(AMDGPU::FeatureXNACK);
1983 bool hasSRAMECC(const MCSubtargetInfo &STI) {
1984 return STI.hasFeature(AMDGPU::FeatureSRAMECC);
1987 bool hasMIMG_R128(const MCSubtargetInfo &STI) {
1988 return STI.hasFeature(AMDGPU::FeatureMIMG_R128) && !STI.hasFeature(AMDGPU::FeatureR128A16);
1991 bool hasA16(const MCSubtargetInfo &STI) {
1992 return STI.hasFeature(AMDGPU::FeatureA16);
1995 bool hasG16(const MCSubtargetInfo &STI) {
1996 return STI.hasFeature(AMDGPU::FeatureG16);
1999 bool hasPackedD16(const MCSubtargetInfo &STI) {
2000 return !STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) && !isCI(STI) &&
2004 unsigned getNSAMaxSize(const MCSubtargetInfo &STI) {
2005 auto Version = getIsaVersion(STI.getCPU());
2006 if (Version.Major == 10)
2007 return Version.Minor >= 3 ? 13 : 5;
2008 if (Version.Major == 11)
2013 bool isSI(const MCSubtargetInfo &STI) {
2014 return STI.hasFeature(AMDGPU::FeatureSouthernIslands);
2017 bool isCI(const MCSubtargetInfo &STI) {
2018 return STI.hasFeature(AMDGPU::FeatureSeaIslands);
2021 bool isVI(const MCSubtargetInfo &STI) {
2022 return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
2025 bool isGFX9(const MCSubtargetInfo &STI) {
2026 return STI.hasFeature(AMDGPU::FeatureGFX9);
2029 bool isGFX9_GFX10(const MCSubtargetInfo &STI) {
2030 return isGFX9(STI) || isGFX10(STI);
2033 bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI) {
2034 return isVI(STI) || isGFX9(STI) || isGFX10(STI);
2037 bool isGFX8Plus(const MCSubtargetInfo &STI) {
2038 return isVI(STI) || isGFX9Plus(STI);
2041 bool isGFX9Plus(const MCSubtargetInfo &STI) {
2042 return isGFX9(STI) || isGFX10Plus(STI);
2045 bool isGFX10(const MCSubtargetInfo &STI) {
2046 return STI.hasFeature(AMDGPU::FeatureGFX10);
2049 bool isGFX10Plus(const MCSubtargetInfo &STI) {
2050 return isGFX10(STI) || isGFX11Plus(STI);
2053 bool isGFX11(const MCSubtargetInfo &STI) {
2054 return STI.hasFeature(AMDGPU::FeatureGFX11);
2057 bool isGFX11Plus(const MCSubtargetInfo &STI) {
2058 return isGFX11(STI);
2061 bool isNotGFX11Plus(const MCSubtargetInfo &STI) {
2062 return !isGFX11Plus(STI);
2065 bool isNotGFX10Plus(const MCSubtargetInfo &STI) {
2066 return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
2069 bool isGFX10Before1030(const MCSubtargetInfo &STI) {
2070 return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
2073 bool isGCN3Encoding(const MCSubtargetInfo &STI) {
2074 return STI.hasFeature(AMDGPU::FeatureGCN3Encoding);
2077 bool isGFX10_AEncoding(const MCSubtargetInfo &STI) {
2078 return STI.hasFeature(AMDGPU::FeatureGFX10_AEncoding);
2081 bool isGFX10_BEncoding(const MCSubtargetInfo &STI) {
2082 return STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding);
2085 bool hasGFX10_3Insts(const MCSubtargetInfo &STI) {
2086 return STI.hasFeature(AMDGPU::FeatureGFX10_3Insts);
2089 bool isGFX90A(const MCSubtargetInfo &STI) {
2090 return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
2093 bool isGFX940(const MCSubtargetInfo &STI) {
2094 return STI.hasFeature(AMDGPU::FeatureGFX940Insts);
2097 bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI) {
2098 return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
2101 bool hasMAIInsts(const MCSubtargetInfo &STI) {
2102 return STI.hasFeature(AMDGPU::FeatureMAIInsts);
2105 bool hasVOPD(const MCSubtargetInfo &STI) {
2106 return STI.hasFeature(AMDGPU::FeatureVOPD);
2109 int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
2110 int32_t ArgNumVGPR) {
2111 if (has90AInsts && ArgNumAGPR)
2112 return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
2113 return std::max(ArgNumVGPR, ArgNumAGPR);
2116 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
2117 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
2118 const unsigned FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
2119 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
2123 #define MAP_REG2REG \
2124 using namespace AMDGPU; \
2126 default: return Reg; \
2127 CASE_CI_VI(FLAT_SCR) \
2128 CASE_CI_VI(FLAT_SCR_LO) \
2129 CASE_CI_VI(FLAT_SCR_HI) \
2130 CASE_VI_GFX9PLUS(TTMP0) \
2131 CASE_VI_GFX9PLUS(TTMP1) \
2132 CASE_VI_GFX9PLUS(TTMP2) \
2133 CASE_VI_GFX9PLUS(TTMP3) \
2134 CASE_VI_GFX9PLUS(TTMP4) \
2135 CASE_VI_GFX9PLUS(TTMP5) \
2136 CASE_VI_GFX9PLUS(TTMP6) \
2137 CASE_VI_GFX9PLUS(TTMP7) \
2138 CASE_VI_GFX9PLUS(TTMP8) \
2139 CASE_VI_GFX9PLUS(TTMP9) \
2140 CASE_VI_GFX9PLUS(TTMP10) \
2141 CASE_VI_GFX9PLUS(TTMP11) \
2142 CASE_VI_GFX9PLUS(TTMP12) \
2143 CASE_VI_GFX9PLUS(TTMP13) \
2144 CASE_VI_GFX9PLUS(TTMP14) \
2145 CASE_VI_GFX9PLUS(TTMP15) \
2146 CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
2147 CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
2148 CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
2149 CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
2150 CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
2151 CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
2152 CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
2153 CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
2154 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
2155 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
2156 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
2157 CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
2158 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
2159 CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
2160 CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2161 CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
2162 CASE_GFXPRE11_GFX11PLUS(M0) \
2163 CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
2164 CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
2167 #define CASE_CI_VI(node) \
2168 assert(!isSI(STI)); \
2169 case node: return isCI(STI) ? node##_ci : node##_vi;
2171 #define CASE_VI_GFX9PLUS(node) \
2172 case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
2174 #define CASE_GFXPRE11_GFX11PLUS(node) \
2175 case node: return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
2177 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
2178 case node: return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
2180 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
2181 if (STI.getTargetTriple().getArch() == Triple::r600)
2187 #undef CASE_VI_GFX9PLUS
2188 #undef CASE_GFXPRE11_GFX11PLUS
2189 #undef CASE_GFXPRE11_GFX11PLUS_TO
2191 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node;
2192 #define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node;
2193 #define CASE_GFXPRE11_GFX11PLUS(node) case node##_gfx11plus: case node##_gfxpre11: return node;
2194 #define CASE_GFXPRE11_GFX11PLUS_TO(node, result)
2196 unsigned mc2PseudoReg(unsigned Reg) {
2200 bool isInlineValue(unsigned Reg) {
2202 case AMDGPU::SRC_SHARED_BASE_LO:
2203 case AMDGPU::SRC_SHARED_BASE:
2204 case AMDGPU::SRC_SHARED_LIMIT_LO:
2205 case AMDGPU::SRC_SHARED_LIMIT:
2206 case AMDGPU::SRC_PRIVATE_BASE_LO:
2207 case AMDGPU::SRC_PRIVATE_BASE:
2208 case AMDGPU::SRC_PRIVATE_LIMIT_LO:
2209 case AMDGPU::SRC_PRIVATE_LIMIT:
2210 case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
2212 case AMDGPU::SRC_VCCZ:
2213 case AMDGPU::SRC_EXECZ:
2214 case AMDGPU::SRC_SCC:
2216 case AMDGPU::SGPR_NULL:
2224 #undef CASE_VI_GFX9PLUS
2225 #undef CASE_GFXPRE11_GFX11PLUS
2226 #undef CASE_GFXPRE11_GFX11PLUS_TO
2229 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2230 assert(OpNo < Desc.NumOperands);
2231 unsigned OpType = Desc.operands()[OpNo].OperandType;
2232 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
2233 OpType <= AMDGPU::OPERAND_SRC_LAST;
2236 bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2237 assert(OpNo < Desc.NumOperands);
2238 unsigned OpType = Desc.operands()[OpNo].OperandType;
2239 return OpType >= AMDGPU::OPERAND_KIMM_FIRST &&
2240 OpType <= AMDGPU::OPERAND_KIMM_LAST;
2243 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2244 assert(OpNo < Desc.NumOperands);
2245 unsigned OpType = Desc.operands()[OpNo].OperandType;
2247 case AMDGPU::OPERAND_REG_IMM_FP32:
2248 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
2249 case AMDGPU::OPERAND_REG_IMM_FP64:
2250 case AMDGPU::OPERAND_REG_IMM_FP16:
2251 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
2252 case AMDGPU::OPERAND_REG_IMM_V2FP16:
2253 case AMDGPU::OPERAND_REG_IMM_V2INT16:
2254 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
2255 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
2256 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
2257 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
2258 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
2259 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
2260 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
2261 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
2262 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
2263 case AMDGPU::OPERAND_REG_IMM_V2FP32:
2264 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
2265 case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
2272 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
2273 assert(OpNo < Desc.NumOperands);
2274 unsigned OpType = Desc.operands()[OpNo].OperandType;
2275 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
2276 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
2279 // Avoid using MCRegisterClass::getSize, since that function will go away
2280 // (move from MC* level to Target* level). Return size in bits.
2281 unsigned getRegBitWidth(unsigned RCID) {
2283 case AMDGPU::VGPR_LO16RegClassID:
2284 case AMDGPU::VGPR_HI16RegClassID:
2285 case AMDGPU::SGPR_LO16RegClassID:
2286 case AMDGPU::AGPR_LO16RegClassID:
2288 case AMDGPU::SGPR_32RegClassID:
2289 case AMDGPU::VGPR_32RegClassID:
2290 case AMDGPU::VRegOrLds_32RegClassID:
2291 case AMDGPU::AGPR_32RegClassID:
2292 case AMDGPU::VS_32RegClassID:
2293 case AMDGPU::AV_32RegClassID:
2294 case AMDGPU::SReg_32RegClassID:
2295 case AMDGPU::SReg_32_XM0RegClassID:
2296 case AMDGPU::SRegOrLds_32RegClassID:
2298 case AMDGPU::SGPR_64RegClassID:
2299 case AMDGPU::VS_64RegClassID:
2300 case AMDGPU::SReg_64RegClassID:
2301 case AMDGPU::VReg_64RegClassID:
2302 case AMDGPU::AReg_64RegClassID:
2303 case AMDGPU::SReg_64_XEXECRegClassID:
2304 case AMDGPU::VReg_64_Align2RegClassID:
2305 case AMDGPU::AReg_64_Align2RegClassID:
2306 case AMDGPU::AV_64RegClassID:
2307 case AMDGPU::AV_64_Align2RegClassID:
2309 case AMDGPU::SGPR_96RegClassID:
2310 case AMDGPU::SReg_96RegClassID:
2311 case AMDGPU::VReg_96RegClassID:
2312 case AMDGPU::AReg_96RegClassID:
2313 case AMDGPU::VReg_96_Align2RegClassID:
2314 case AMDGPU::AReg_96_Align2RegClassID:
2315 case AMDGPU::AV_96RegClassID:
2316 case AMDGPU::AV_96_Align2RegClassID:
2318 case AMDGPU::SGPR_128RegClassID:
2319 case AMDGPU::SReg_128RegClassID:
2320 case AMDGPU::VReg_128RegClassID:
2321 case AMDGPU::AReg_128RegClassID:
2322 case AMDGPU::VReg_128_Align2RegClassID:
2323 case AMDGPU::AReg_128_Align2RegClassID:
2324 case AMDGPU::AV_128RegClassID:
2325 case AMDGPU::AV_128_Align2RegClassID:
2327 case AMDGPU::SGPR_160RegClassID:
2328 case AMDGPU::SReg_160RegClassID:
2329 case AMDGPU::VReg_160RegClassID:
2330 case AMDGPU::AReg_160RegClassID:
2331 case AMDGPU::VReg_160_Align2RegClassID:
2332 case AMDGPU::AReg_160_Align2RegClassID:
2333 case AMDGPU::AV_160RegClassID:
2334 case AMDGPU::AV_160_Align2RegClassID:
2336 case AMDGPU::SGPR_192RegClassID:
2337 case AMDGPU::SReg_192RegClassID:
2338 case AMDGPU::VReg_192RegClassID:
2339 case AMDGPU::AReg_192RegClassID:
2340 case AMDGPU::VReg_192_Align2RegClassID:
2341 case AMDGPU::AReg_192_Align2RegClassID:
2342 case AMDGPU::AV_192RegClassID:
2343 case AMDGPU::AV_192_Align2RegClassID:
2345 case AMDGPU::SGPR_224RegClassID:
2346 case AMDGPU::SReg_224RegClassID:
2347 case AMDGPU::VReg_224RegClassID:
2348 case AMDGPU::AReg_224RegClassID:
2349 case AMDGPU::VReg_224_Align2RegClassID:
2350 case AMDGPU::AReg_224_Align2RegClassID:
2351 case AMDGPU::AV_224RegClassID:
2352 case AMDGPU::AV_224_Align2RegClassID:
2354 case AMDGPU::SGPR_256RegClassID:
2355 case AMDGPU::SReg_256RegClassID:
2356 case AMDGPU::VReg_256RegClassID:
2357 case AMDGPU::AReg_256RegClassID:
2358 case AMDGPU::VReg_256_Align2RegClassID:
2359 case AMDGPU::AReg_256_Align2RegClassID:
2360 case AMDGPU::AV_256RegClassID:
2361 case AMDGPU::AV_256_Align2RegClassID:
2363 case AMDGPU::SGPR_288RegClassID:
2364 case AMDGPU::SReg_288RegClassID:
2365 case AMDGPU::VReg_288RegClassID:
2366 case AMDGPU::AReg_288RegClassID:
2367 case AMDGPU::VReg_288_Align2RegClassID:
2368 case AMDGPU::AReg_288_Align2RegClassID:
2369 case AMDGPU::AV_288RegClassID:
2370 case AMDGPU::AV_288_Align2RegClassID:
2372 case AMDGPU::SGPR_320RegClassID:
2373 case AMDGPU::SReg_320RegClassID:
2374 case AMDGPU::VReg_320RegClassID:
2375 case AMDGPU::AReg_320RegClassID:
2376 case AMDGPU::VReg_320_Align2RegClassID:
2377 case AMDGPU::AReg_320_Align2RegClassID:
2378 case AMDGPU::AV_320RegClassID:
2379 case AMDGPU::AV_320_Align2RegClassID:
2381 case AMDGPU::SGPR_352RegClassID:
2382 case AMDGPU::SReg_352RegClassID:
2383 case AMDGPU::VReg_352RegClassID:
2384 case AMDGPU::AReg_352RegClassID:
2385 case AMDGPU::VReg_352_Align2RegClassID:
2386 case AMDGPU::AReg_352_Align2RegClassID:
2387 case AMDGPU::AV_352RegClassID:
2388 case AMDGPU::AV_352_Align2RegClassID:
2390 case AMDGPU::SGPR_384RegClassID:
2391 case AMDGPU::SReg_384RegClassID:
2392 case AMDGPU::VReg_384RegClassID:
2393 case AMDGPU::AReg_384RegClassID:
2394 case AMDGPU::VReg_384_Align2RegClassID:
2395 case AMDGPU::AReg_384_Align2RegClassID:
2396 case AMDGPU::AV_384RegClassID:
2397 case AMDGPU::AV_384_Align2RegClassID:
2399 case AMDGPU::SGPR_512RegClassID:
2400 case AMDGPU::SReg_512RegClassID:
2401 case AMDGPU::VReg_512RegClassID:
2402 case AMDGPU::AReg_512RegClassID:
2403 case AMDGPU::VReg_512_Align2RegClassID:
2404 case AMDGPU::AReg_512_Align2RegClassID:
2405 case AMDGPU::AV_512RegClassID:
2406 case AMDGPU::AV_512_Align2RegClassID:
2408 case AMDGPU::SGPR_1024RegClassID:
2409 case AMDGPU::SReg_1024RegClassID:
2410 case AMDGPU::VReg_1024RegClassID:
2411 case AMDGPU::AReg_1024RegClassID:
2412 case AMDGPU::VReg_1024_Align2RegClassID:
2413 case AMDGPU::AReg_1024_Align2RegClassID:
2414 case AMDGPU::AV_1024RegClassID:
2415 case AMDGPU::AV_1024_Align2RegClassID:
2418 llvm_unreachable("Unexpected register class");
2422 unsigned getRegBitWidth(const MCRegisterClass &RC) {
2423 return getRegBitWidth(RC.getID());
2426 unsigned getRegBitWidth(const TargetRegisterClass &RC) {
2427 return getRegBitWidth(RC.getID());
2430 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
2432 assert(OpNo < Desc.NumOperands);
2433 unsigned RCID = Desc.operands()[OpNo].RegClass;
2434 return getRegBitWidth(RCID) / 8;
2437 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
2438 if (isInlinableIntLiteral(Literal))
2441 uint64_t Val = static_cast<uint64_t>(Literal);
2442 return (Val == llvm::bit_cast<uint64_t>(0.0)) ||
2443 (Val == llvm::bit_cast<uint64_t>(1.0)) ||
2444 (Val == llvm::bit_cast<uint64_t>(-1.0)) ||
2445 (Val == llvm::bit_cast<uint64_t>(0.5)) ||
2446 (Val == llvm::bit_cast<uint64_t>(-0.5)) ||
2447 (Val == llvm::bit_cast<uint64_t>(2.0)) ||
2448 (Val == llvm::bit_cast<uint64_t>(-2.0)) ||
2449 (Val == llvm::bit_cast<uint64_t>(4.0)) ||
2450 (Val == llvm::bit_cast<uint64_t>(-4.0)) ||
2451 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2454 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
2455 if (isInlinableIntLiteral(Literal))
2458 // The actual type of the operand does not seem to matter as long
2459 // as the bits match one of the inline immediate values. For example:
2461 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
2462 // so it is a legal inline immediate.
2464 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
2465 // floating-point, so it is a legal inline immediate.
2467 uint32_t Val = static_cast<uint32_t>(Literal);
2468 return (Val == llvm::bit_cast<uint32_t>(0.0f)) ||
2469 (Val == llvm::bit_cast<uint32_t>(1.0f)) ||
2470 (Val == llvm::bit_cast<uint32_t>(-1.0f)) ||
2471 (Val == llvm::bit_cast<uint32_t>(0.5f)) ||
2472 (Val == llvm::bit_cast<uint32_t>(-0.5f)) ||
2473 (Val == llvm::bit_cast<uint32_t>(2.0f)) ||
2474 (Val == llvm::bit_cast<uint32_t>(-2.0f)) ||
2475 (Val == llvm::bit_cast<uint32_t>(4.0f)) ||
2476 (Val == llvm::bit_cast<uint32_t>(-4.0f)) ||
2477 (Val == 0x3e22f983 && HasInv2Pi);
2480 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
2484 if (isInlinableIntLiteral(Literal))
2487 uint16_t Val = static_cast<uint16_t>(Literal);
2488 return Val == 0x3C00 || // 1.0
2489 Val == 0xBC00 || // -1.0
2490 Val == 0x3800 || // 0.5
2491 Val == 0xB800 || // -0.5
2492 Val == 0x4000 || // 2.0
2493 Val == 0xC000 || // -2.0
2494 Val == 0x4400 || // 4.0
2495 Val == 0xC400 || // -4.0
2496 Val == 0x3118; // 1/2pi
2499 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2502 if (isInt<16>(Literal) || isUInt<16>(Literal)) {
2503 int16_t Trunc = static_cast<int16_t>(Literal);
2504 return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi);
2506 if (!(Literal & 0xffff))
2507 return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi);
2509 int16_t Lo16 = static_cast<int16_t>(Literal);
2510 int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2511 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
2514 bool isInlinableIntLiteralV216(int32_t Literal) {
2515 int16_t Lo16 = static_cast<int16_t>(Literal);
2516 if (isInt<16>(Literal) || isUInt<16>(Literal))
2517 return isInlinableIntLiteral(Lo16);
2519 int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2520 if (!(Literal & 0xffff))
2521 return isInlinableIntLiteral(Hi16);
2522 return Lo16 == Hi16 && isInlinableIntLiteral(Lo16);
2525 bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2528 int16_t Lo16 = static_cast<int16_t>(Literal);
2529 if (isInt<16>(Literal) || isUInt<16>(Literal))
2532 int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2533 if (!(Literal & 0xffff))
2535 return Lo16 == Hi16;
2538 bool isArgPassedInSGPR(const Argument *A) {
2539 const Function *F = A->getParent();
2541 // Arguments to compute shaders are never a source of divergence.
2542 CallingConv::ID CC = F->getCallingConv();
2544 case CallingConv::AMDGPU_KERNEL:
2545 case CallingConv::SPIR_KERNEL:
2547 case CallingConv::AMDGPU_VS:
2548 case CallingConv::AMDGPU_LS:
2549 case CallingConv::AMDGPU_HS:
2550 case CallingConv::AMDGPU_ES:
2551 case CallingConv::AMDGPU_GS:
2552 case CallingConv::AMDGPU_PS:
2553 case CallingConv::AMDGPU_CS:
2554 case CallingConv::AMDGPU_Gfx:
2555 // For non-compute shaders, SGPR inputs are marked with either inreg or
2556 // byval. Everything else is in VGPRs.
2557 return A->hasAttribute(Attribute::InReg) ||
2558 A->hasAttribute(Attribute::ByVal);
2560 // TODO: Should calls support inreg for SGPR inputs?
2565 bool isArgPassedInSGPR(const CallBase *CB, unsigned ArgNo) {
2566 // Arguments to compute shaders are never a source of divergence.
2567 CallingConv::ID CC = CB->getCallingConv();
2569 case CallingConv::AMDGPU_KERNEL:
2570 case CallingConv::SPIR_KERNEL:
2572 case CallingConv::AMDGPU_VS:
2573 case CallingConv::AMDGPU_LS:
2574 case CallingConv::AMDGPU_HS:
2575 case CallingConv::AMDGPU_ES:
2576 case CallingConv::AMDGPU_GS:
2577 case CallingConv::AMDGPU_PS:
2578 case CallingConv::AMDGPU_CS:
2579 case CallingConv::AMDGPU_Gfx:
2580 // For non-compute shaders, SGPR inputs are marked with either inreg or
2581 // byval. Everything else is in VGPRs.
2582 return CB->paramHasAttr(ArgNo, Attribute::InReg) ||
2583 CB->paramHasAttr(ArgNo, Attribute::ByVal);
2585 // TODO: Should calls support inreg for SGPR inputs?
2590 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
2591 return isGCN3Encoding(ST) || isGFX10Plus(ST);
2594 static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST) {
2595 return isGFX9Plus(ST);
2598 bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST,
2599 int64_t EncodedOffset) {
2600 return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
2601 : isUInt<8>(EncodedOffset);
2604 bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST,
2605 int64_t EncodedOffset,
2608 hasSMRDSignedImmOffset(ST) &&
2609 isInt<21>(EncodedOffset);
2612 static bool isDwordAligned(uint64_t ByteOffset) {
2613 return (ByteOffset & 3) == 0;
2616 uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST,
2617 uint64_t ByteOffset) {
2618 if (hasSMEMByteOffset(ST))
2621 assert(isDwordAligned(ByteOffset));
2622 return ByteOffset >> 2;
2625 std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
2626 int64_t ByteOffset, bool IsBuffer) {
2627 // The signed version is always a byte offset.
2628 if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
2629 assert(hasSMEMByteOffset(ST));
2630 return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
2634 if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
2635 return std::nullopt;
2637 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2638 return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
2639 ? std::optional<int64_t>(EncodedOffset)
2643 std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
2644 int64_t ByteOffset) {
2645 if (!isCI(ST) || !isDwordAligned(ByteOffset))
2646 return std::nullopt;
2648 int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2649 return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
2653 unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST) {
2654 // Address offset is 12-bit signed for GFX10, 13-bit for GFX9 and GFX11+.
2655 if (AMDGPU::isGFX10(ST))
2663 struct SourceOfDivergence {
2666 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
2668 struct AlwaysUniform {
2671 const AlwaysUniform *lookupAlwaysUniform(unsigned Intr);
2673 #define GET_SourcesOfDivergence_IMPL
2674 #define GET_UniformIntrinsics_IMPL
2675 #define GET_Gfx9BufferFormat_IMPL
2676 #define GET_Gfx10BufferFormat_IMPL
2677 #define GET_Gfx11PlusBufferFormat_IMPL
2678 #include "AMDGPUGenSearchableTables.inc"
2680 } // end anonymous namespace
2682 bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
2683 return lookupSourceOfDivergence(IntrID);
2686 bool isIntrinsicAlwaysUniform(unsigned IntrID) {
2687 return lookupAlwaysUniform(IntrID);
2690 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp,
2691 uint8_t NumComponents,
2693 const MCSubtargetInfo &STI) {
2694 return isGFX11Plus(STI)
2695 ? getGfx11PlusBufferFormatInfo(BitsPerComp, NumComponents,
2697 : isGFX10(STI) ? getGfx10BufferFormatInfo(BitsPerComp,
2698 NumComponents, NumFormat)
2699 : getGfx9BufferFormatInfo(BitsPerComp,
2700 NumComponents, NumFormat);
2703 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format,
2704 const MCSubtargetInfo &STI) {
2705 return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
2706 : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
2707 : getGfx9BufferFormatInfo(Format);
2710 } // namespace AMDGPU
2712 raw_ostream &operator<<(raw_ostream &OS,
2713 const AMDGPU::IsaInfo::TargetIDSetting S) {
2715 case (AMDGPU::IsaInfo::TargetIDSetting::Unsupported):
2716 OS << "Unsupported";
2718 case (AMDGPU::IsaInfo::TargetIDSetting::Any):
2721 case (AMDGPU::IsaInfo::TargetIDSetting::Off):
2724 case (AMDGPU::IsaInfo::TargetIDSetting::On):