1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "AMDGPUBaseInfo.h"
11 #include "AMDGPUTargetTransformInfo.h"
13 #include "SIDefines.h"
14 #include "llvm/ADT/StringRef.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/BinaryFormat/ELF.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/IR/Attributes.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/MC/MCInstrInfo.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCSectionELF.h"
30 #include "llvm/MC/MCSubtargetInfo.h"
31 #include "llvm/MC/SubtargetFeature.h"
32 #include "llvm/Support/Casting.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
41 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
43 #define GET_INSTRINFO_NAMED_OPS
44 #define GET_INSTRMAP_INFO
45 #include "AMDGPUGenInstrInfo.inc"
46 #undef GET_INSTRMAP_INFO
47 #undef GET_INSTRINFO_NAMED_OPS
51 /// \returns Bit mask for given bit \p Shift and bit \p Width.
52 unsigned getBitMask(unsigned Shift, unsigned Width) {
53 return ((1 << Width) - 1) << Shift;
56 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
58 /// \returns Packed \p Dst.
59 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
60 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width);
61 Dst |= (Src << Shift) & getBitMask(Shift, Width);
65 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
67 /// \returns Unpacked bits.
68 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
69 return (Src & getBitMask(Shift, Width)) >> Shift;
72 /// \returns Vmcnt bit shift (lower bits).
73 unsigned getVmcntBitShiftLo() { return 0; }
75 /// \returns Vmcnt bit width (lower bits).
76 unsigned getVmcntBitWidthLo() { return 4; }
78 /// \returns Expcnt bit shift.
79 unsigned getExpcntBitShift() { return 4; }
81 /// \returns Expcnt bit width.
82 unsigned getExpcntBitWidth() { return 3; }
84 /// \returns Lgkmcnt bit shift.
85 unsigned getLgkmcntBitShift() { return 8; }
87 /// \returns Lgkmcnt bit width.
88 unsigned getLgkmcntBitWidth() { return 4; }
90 /// \returns Vmcnt bit shift (higher bits).
91 unsigned getVmcntBitShiftHi() { return 14; }
93 /// \returns Vmcnt bit width (higher bits).
94 unsigned getVmcntBitWidthHi() { return 2; }
96 } // end namespace anonymous
105 uint8_t MIMGEncoding;
110 #define GET_MIMGBaseOpcodesTable_IMPL
111 #define GET_MIMGDimInfoTable_IMPL
112 #define GET_MIMGInfoTable_IMPL
113 #define GET_MIMGLZMappingTable_IMPL
114 #include "AMDGPUGenSearchableTables.inc"
116 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
117 unsigned VDataDwords, unsigned VAddrDwords) {
118 const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
119 VDataDwords, VAddrDwords);
120 return Info ? Info->Opcode : -1;
123 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
124 const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
125 const MIMGInfo *NewInfo =
126 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
127 NewChannels, OrigInfo->VAddrDwords);
128 return NewInfo ? NewInfo->Opcode : -1;
131 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any
132 // header files, so we need to wrap it in a function that takes unsigned
134 int getMCOpcode(uint16_t Opcode, unsigned Gen) {
135 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
140 IsaVersion getIsaVersion(const FeatureBitset &Features) {
141 // GCN GFX6 (Southern Islands (SI)).
142 if (Features.test(FeatureISAVersion6_0_0))
144 if (Features.test(FeatureISAVersion6_0_1))
147 // GCN GFX7 (Sea Islands (CI)).
148 if (Features.test(FeatureISAVersion7_0_0))
150 if (Features.test(FeatureISAVersion7_0_1))
152 if (Features.test(FeatureISAVersion7_0_2))
154 if (Features.test(FeatureISAVersion7_0_3))
156 if (Features.test(FeatureISAVersion7_0_4))
158 if (Features.test(FeatureSeaIslands))
161 // GCN GFX8 (Volcanic Islands (VI)).
162 if (Features.test(FeatureISAVersion8_0_1))
164 if (Features.test(FeatureISAVersion8_0_2))
166 if (Features.test(FeatureISAVersion8_0_3))
168 if (Features.test(FeatureISAVersion8_1_0))
170 if (Features.test(FeatureVolcanicIslands))
174 if (Features.test(FeatureISAVersion9_0_0))
176 if (Features.test(FeatureISAVersion9_0_2))
178 if (Features.test(FeatureISAVersion9_0_4))
180 if (Features.test(FeatureISAVersion9_0_6))
182 if (Features.test(FeatureGFX9))
185 if (Features.test(FeatureSouthernIslands))
190 void streamIsaVersion(const MCSubtargetInfo *STI, raw_ostream &Stream) {
191 auto TargetTriple = STI->getTargetTriple();
192 auto ISAVersion = IsaInfo::getIsaVersion(STI->getFeatureBits());
194 Stream << TargetTriple.getArchName() << '-'
195 << TargetTriple.getVendorName() << '-'
196 << TargetTriple.getOSName() << '-'
197 << TargetTriple.getEnvironmentName() << '-'
201 << ISAVersion.Stepping;
209 bool hasCodeObjectV3(const MCSubtargetInfo *STI) {
210 return STI->getFeatureBits().test(FeatureCodeObjectV3);
213 unsigned getWavefrontSize(const FeatureBitset &Features) {
214 if (Features.test(FeatureWavefrontSize16))
216 if (Features.test(FeatureWavefrontSize32))
222 unsigned getLocalMemorySize(const FeatureBitset &Features) {
223 if (Features.test(FeatureLocalMemorySize32768))
225 if (Features.test(FeatureLocalMemorySize65536))
231 unsigned getEUsPerCU(const FeatureBitset &Features) {
235 unsigned getMaxWorkGroupsPerCU(const FeatureBitset &Features,
236 unsigned FlatWorkGroupSize) {
237 if (!Features.test(FeatureGCN))
239 unsigned N = getWavesPerWorkGroup(Features, FlatWorkGroupSize);
243 return std::min(N, 16u);
246 unsigned getMaxWavesPerCU(const FeatureBitset &Features) {
247 return getMaxWavesPerEU() * getEUsPerCU(Features);
250 unsigned getMaxWavesPerCU(const FeatureBitset &Features,
251 unsigned FlatWorkGroupSize) {
252 return getWavesPerWorkGroup(Features, FlatWorkGroupSize);
255 unsigned getMinWavesPerEU(const FeatureBitset &Features) {
259 unsigned getMaxWavesPerEU() {
260 // FIXME: Need to take scratch memory into account.
264 unsigned getMaxWavesPerEU(const FeatureBitset &Features,
265 unsigned FlatWorkGroupSize) {
266 return alignTo(getMaxWavesPerCU(Features, FlatWorkGroupSize),
267 getEUsPerCU(Features)) / getEUsPerCU(Features);
270 unsigned getMinFlatWorkGroupSize(const FeatureBitset &Features) {
274 unsigned getMaxFlatWorkGroupSize(const FeatureBitset &Features) {
278 unsigned getWavesPerWorkGroup(const FeatureBitset &Features,
279 unsigned FlatWorkGroupSize) {
280 return alignTo(FlatWorkGroupSize, getWavefrontSize(Features)) /
281 getWavefrontSize(Features);
284 unsigned getSGPRAllocGranule(const FeatureBitset &Features) {
285 IsaVersion Version = getIsaVersion(Features);
286 if (Version.Major >= 8)
291 unsigned getSGPREncodingGranule(const FeatureBitset &Features) {
295 unsigned getTotalNumSGPRs(const FeatureBitset &Features) {
296 IsaVersion Version = getIsaVersion(Features);
297 if (Version.Major >= 8)
302 unsigned getAddressableNumSGPRs(const FeatureBitset &Features) {
303 if (Features.test(FeatureSGPRInitBug))
304 return FIXED_NUM_SGPRS_FOR_INIT_BUG;
306 IsaVersion Version = getIsaVersion(Features);
307 if (Version.Major >= 8)
312 unsigned getMinNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
313 assert(WavesPerEU != 0);
315 if (WavesPerEU >= getMaxWavesPerEU())
318 unsigned MinNumSGPRs = getTotalNumSGPRs(Features) / (WavesPerEU + 1);
319 if (Features.test(FeatureTrapHandler))
320 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
321 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(Features)) + 1;
322 return std::min(MinNumSGPRs, getAddressableNumSGPRs(Features));
325 unsigned getMaxNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU,
327 assert(WavesPerEU != 0);
329 IsaVersion Version = getIsaVersion(Features);
330 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(Features);
331 if (Version.Major >= 8 && !Addressable)
332 AddressableNumSGPRs = 112;
333 unsigned MaxNumSGPRs = getTotalNumSGPRs(Features) / WavesPerEU;
334 if (Features.test(FeatureTrapHandler))
335 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
336 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(Features));
337 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
340 unsigned getNumExtraSGPRs(const FeatureBitset &Features, bool VCCUsed,
341 bool FlatScrUsed, bool XNACKUsed) {
342 unsigned ExtraSGPRs = 0;
346 IsaVersion Version = getIsaVersion(Features);
347 if (Version.Major < 8) {
361 unsigned getNumExtraSGPRs(const FeatureBitset &Features, bool VCCUsed,
363 return getNumExtraSGPRs(Features, VCCUsed, FlatScrUsed,
364 Features[AMDGPU::FeatureXNACK]);
367 unsigned getNumSGPRBlocks(const FeatureBitset &Features, unsigned NumSGPRs) {
368 NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(Features));
369 // SGPRBlocks is actual number of SGPR blocks minus 1.
370 return NumSGPRs / getSGPREncodingGranule(Features) - 1;
373 unsigned getVGPRAllocGranule(const FeatureBitset &Features) {
377 unsigned getVGPREncodingGranule(const FeatureBitset &Features) {
378 return getVGPRAllocGranule(Features);
381 unsigned getTotalNumVGPRs(const FeatureBitset &Features) {
385 unsigned getAddressableNumVGPRs(const FeatureBitset &Features) {
386 return getTotalNumVGPRs(Features);
389 unsigned getMinNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
390 assert(WavesPerEU != 0);
392 if (WavesPerEU >= getMaxWavesPerEU())
394 unsigned MinNumVGPRs =
395 alignDown(getTotalNumVGPRs(Features) / (WavesPerEU + 1),
396 getVGPRAllocGranule(Features)) + 1;
397 return std::min(MinNumVGPRs, getAddressableNumVGPRs(Features));
400 unsigned getMaxNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
401 assert(WavesPerEU != 0);
403 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(Features) / WavesPerEU,
404 getVGPRAllocGranule(Features));
405 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(Features);
406 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
409 unsigned getNumVGPRBlocks(const FeatureBitset &Features, unsigned NumVGPRs) {
410 NumVGPRs = alignTo(std::max(1u, NumVGPRs), getVGPREncodingGranule(Features));
411 // VGPRBlocks is actual number of VGPR blocks minus 1.
412 return NumVGPRs / getVGPREncodingGranule(Features) - 1;
415 } // end namespace IsaInfo
417 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
418 const FeatureBitset &Features) {
419 IsaInfo::IsaVersion ISA = IsaInfo::getIsaVersion(Features);
421 memset(&Header, 0, sizeof(Header));
423 Header.amd_kernel_code_version_major = 1;
424 Header.amd_kernel_code_version_minor = 2;
425 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
426 Header.amd_machine_version_major = ISA.Major;
427 Header.amd_machine_version_minor = ISA.Minor;
428 Header.amd_machine_version_stepping = ISA.Stepping;
429 Header.kernel_code_entry_byte_offset = sizeof(Header);
430 // wavefront_size is specified as a power of 2: 2^6 = 64 threads.
431 Header.wavefront_size = 6;
433 // If the code object does not support indirect functions, then the value must
435 Header.call_convention = -1;
437 // These alignment values are specified in powers of two, so alignment =
438 // 2^n. The minimum alignment is 2^4 = 16.
439 Header.kernarg_segment_alignment = 4;
440 Header.group_segment_alignment = 4;
441 Header.private_segment_alignment = 4;
444 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor() {
445 amdhsa::kernel_descriptor_t KD;
446 memset(&KD, 0, sizeof(KD));
447 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
448 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
449 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE);
450 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
451 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
452 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
453 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
454 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2,
455 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
459 bool isGroupSegment(const GlobalValue *GV) {
460 return GV->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
463 bool isGlobalSegment(const GlobalValue *GV) {
464 return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
467 bool isReadOnlySegment(const GlobalValue *GV) {
468 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
469 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
472 bool shouldEmitConstantsToTextSection(const Triple &TT) {
473 return TT.getOS() != Triple::AMDHSA;
476 int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
477 Attribute A = F.getFnAttribute(Name);
478 int Result = Default;
480 if (A.isStringAttribute()) {
481 StringRef Str = A.getValueAsString();
482 if (Str.getAsInteger(0, Result)) {
483 LLVMContext &Ctx = F.getContext();
484 Ctx.emitError("can't parse integer attribute " + Name);
491 std::pair<int, int> getIntegerPairAttribute(const Function &F,
493 std::pair<int, int> Default,
494 bool OnlyFirstRequired) {
495 Attribute A = F.getFnAttribute(Name);
496 if (!A.isStringAttribute())
499 LLVMContext &Ctx = F.getContext();
500 std::pair<int, int> Ints = Default;
501 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
502 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
503 Ctx.emitError("can't parse first integer attribute " + Name);
506 if (Strs.second.trim().getAsInteger(0, Ints.second)) {
507 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
508 Ctx.emitError("can't parse second integer attribute " + Name);
516 unsigned getVmcntBitMask(const IsaInfo::IsaVersion &Version) {
517 unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1;
518 if (Version.Major < 9)
521 unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo();
522 return VmcntLo | VmcntHi;
525 unsigned getExpcntBitMask(const IsaInfo::IsaVersion &Version) {
526 return (1 << getExpcntBitWidth()) - 1;
529 unsigned getLgkmcntBitMask(const IsaInfo::IsaVersion &Version) {
530 return (1 << getLgkmcntBitWidth()) - 1;
533 unsigned getWaitcntBitMask(const IsaInfo::IsaVersion &Version) {
534 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo());
535 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth());
536 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(), getLgkmcntBitWidth());
537 unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt;
538 if (Version.Major < 9)
541 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi());
542 return Waitcnt | VmcntHi;
545 unsigned decodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
547 unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
548 if (Version.Major < 9)
552 unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
553 VmcntHi <<= getVmcntBitWidthLo();
554 return VmcntLo | VmcntHi;
557 unsigned decodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
558 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
561 unsigned decodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
562 return unpackBits(Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
565 void decodeWaitcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
566 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
567 Vmcnt = decodeVmcnt(Version, Waitcnt);
568 Expcnt = decodeExpcnt(Version, Waitcnt);
569 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
572 unsigned encodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
575 packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
576 if (Version.Major < 9)
579 Vmcnt >>= getVmcntBitWidthLo();
580 return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
583 unsigned encodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
585 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
588 unsigned encodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
590 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
593 unsigned encodeWaitcnt(const IsaInfo::IsaVersion &Version,
594 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
595 unsigned Waitcnt = getWaitcntBitMask(Version);
596 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
597 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
598 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
602 unsigned getInitialPSInputAddr(const Function &F) {
603 return getIntegerAttribute(F, "InitialPSInputAddr", 0);
606 bool isShader(CallingConv::ID cc) {
608 case CallingConv::AMDGPU_VS:
609 case CallingConv::AMDGPU_LS:
610 case CallingConv::AMDGPU_HS:
611 case CallingConv::AMDGPU_ES:
612 case CallingConv::AMDGPU_GS:
613 case CallingConv::AMDGPU_PS:
614 case CallingConv::AMDGPU_CS:
621 bool isCompute(CallingConv::ID cc) {
622 return !isShader(cc) || cc == CallingConv::AMDGPU_CS;
625 bool isEntryFunctionCC(CallingConv::ID CC) {
627 case CallingConv::AMDGPU_KERNEL:
628 case CallingConv::SPIR_KERNEL:
629 case CallingConv::AMDGPU_VS:
630 case CallingConv::AMDGPU_GS:
631 case CallingConv::AMDGPU_PS:
632 case CallingConv::AMDGPU_CS:
633 case CallingConv::AMDGPU_ES:
634 case CallingConv::AMDGPU_HS:
635 case CallingConv::AMDGPU_LS:
642 bool hasXNACK(const MCSubtargetInfo &STI) {
643 return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
646 bool hasMIMG_R128(const MCSubtargetInfo &STI) {
647 return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128];
650 bool hasPackedD16(const MCSubtargetInfo &STI) {
651 return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem];
654 bool isSI(const MCSubtargetInfo &STI) {
655 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
658 bool isCI(const MCSubtargetInfo &STI) {
659 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
662 bool isVI(const MCSubtargetInfo &STI) {
663 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
666 bool isGFX9(const MCSubtargetInfo &STI) {
667 return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
670 bool isGCN3Encoding(const MCSubtargetInfo &STI) {
671 return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding];
674 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
675 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
676 const unsigned FirstSubReg = TRI->getSubReg(Reg, 1);
677 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
681 bool isRegIntersect(unsigned Reg0, unsigned Reg1, const MCRegisterInfo* TRI) {
682 for (MCRegAliasIterator R(Reg0, TRI, true); R.isValid(); ++R) {
683 if (*R == Reg1) return true;
688 #define MAP_REG2REG \
689 using namespace AMDGPU; \
691 default: return Reg; \
692 CASE_CI_VI(FLAT_SCR) \
693 CASE_CI_VI(FLAT_SCR_LO) \
694 CASE_CI_VI(FLAT_SCR_HI) \
695 CASE_VI_GFX9(TTMP0) \
696 CASE_VI_GFX9(TTMP1) \
697 CASE_VI_GFX9(TTMP2) \
698 CASE_VI_GFX9(TTMP3) \
699 CASE_VI_GFX9(TTMP4) \
700 CASE_VI_GFX9(TTMP5) \
701 CASE_VI_GFX9(TTMP6) \
702 CASE_VI_GFX9(TTMP7) \
703 CASE_VI_GFX9(TTMP8) \
704 CASE_VI_GFX9(TTMP9) \
705 CASE_VI_GFX9(TTMP10) \
706 CASE_VI_GFX9(TTMP11) \
707 CASE_VI_GFX9(TTMP12) \
708 CASE_VI_GFX9(TTMP13) \
709 CASE_VI_GFX9(TTMP14) \
710 CASE_VI_GFX9(TTMP15) \
711 CASE_VI_GFX9(TTMP0_TTMP1) \
712 CASE_VI_GFX9(TTMP2_TTMP3) \
713 CASE_VI_GFX9(TTMP4_TTMP5) \
714 CASE_VI_GFX9(TTMP6_TTMP7) \
715 CASE_VI_GFX9(TTMP8_TTMP9) \
716 CASE_VI_GFX9(TTMP10_TTMP11) \
717 CASE_VI_GFX9(TTMP12_TTMP13) \
718 CASE_VI_GFX9(TTMP14_TTMP15) \
719 CASE_VI_GFX9(TTMP0_TTMP1_TTMP2_TTMP3) \
720 CASE_VI_GFX9(TTMP4_TTMP5_TTMP6_TTMP7) \
721 CASE_VI_GFX9(TTMP8_TTMP9_TTMP10_TTMP11) \
722 CASE_VI_GFX9(TTMP12_TTMP13_TTMP14_TTMP15) \
723 CASE_VI_GFX9(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
724 CASE_VI_GFX9(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
725 CASE_VI_GFX9(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
726 CASE_VI_GFX9(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
729 #define CASE_CI_VI(node) \
730 assert(!isSI(STI)); \
731 case node: return isCI(STI) ? node##_ci : node##_vi;
733 #define CASE_VI_GFX9(node) \
734 case node: return isGFX9(STI) ? node##_gfx9 : node##_vi;
736 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
737 if (STI.getTargetTriple().getArch() == Triple::r600)
745 #define CASE_CI_VI(node) case node##_ci: case node##_vi: return node;
746 #define CASE_VI_GFX9(node) case node##_vi: case node##_gfx9: return node;
748 unsigned mc2PseudoReg(unsigned Reg) {
756 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
757 assert(OpNo < Desc.NumOperands);
758 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
759 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
760 OpType <= AMDGPU::OPERAND_SRC_LAST;
763 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
764 assert(OpNo < Desc.NumOperands);
765 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
767 case AMDGPU::OPERAND_REG_IMM_FP32:
768 case AMDGPU::OPERAND_REG_IMM_FP64:
769 case AMDGPU::OPERAND_REG_IMM_FP16:
770 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
771 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
772 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
773 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
780 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
781 assert(OpNo < Desc.NumOperands);
782 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
783 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
784 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
787 // Avoid using MCRegisterClass::getSize, since that function will go away
788 // (move from MC* level to Target* level). Return size in bits.
789 unsigned getRegBitWidth(unsigned RCID) {
791 case AMDGPU::SGPR_32RegClassID:
792 case AMDGPU::VGPR_32RegClassID:
793 case AMDGPU::VS_32RegClassID:
794 case AMDGPU::SReg_32RegClassID:
795 case AMDGPU::SReg_32_XM0RegClassID:
797 case AMDGPU::SGPR_64RegClassID:
798 case AMDGPU::VS_64RegClassID:
799 case AMDGPU::SReg_64RegClassID:
800 case AMDGPU::VReg_64RegClassID:
802 case AMDGPU::VReg_96RegClassID:
804 case AMDGPU::SGPR_128RegClassID:
805 case AMDGPU::SReg_128RegClassID:
806 case AMDGPU::VReg_128RegClassID:
808 case AMDGPU::SReg_256RegClassID:
809 case AMDGPU::VReg_256RegClassID:
811 case AMDGPU::SReg_512RegClassID:
812 case AMDGPU::VReg_512RegClassID:
815 llvm_unreachable("Unexpected register class");
819 unsigned getRegBitWidth(const MCRegisterClass &RC) {
820 return getRegBitWidth(RC.getID());
823 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
825 assert(OpNo < Desc.NumOperands);
826 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
827 return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
830 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
831 if (Literal >= -16 && Literal <= 64)
834 uint64_t Val = static_cast<uint64_t>(Literal);
835 return (Val == DoubleToBits(0.0)) ||
836 (Val == DoubleToBits(1.0)) ||
837 (Val == DoubleToBits(-1.0)) ||
838 (Val == DoubleToBits(0.5)) ||
839 (Val == DoubleToBits(-0.5)) ||
840 (Val == DoubleToBits(2.0)) ||
841 (Val == DoubleToBits(-2.0)) ||
842 (Val == DoubleToBits(4.0)) ||
843 (Val == DoubleToBits(-4.0)) ||
844 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
847 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
848 if (Literal >= -16 && Literal <= 64)
851 // The actual type of the operand does not seem to matter as long
852 // as the bits match one of the inline immediate values. For example:
854 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
855 // so it is a legal inline immediate.
857 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
858 // floating-point, so it is a legal inline immediate.
860 uint32_t Val = static_cast<uint32_t>(Literal);
861 return (Val == FloatToBits(0.0f)) ||
862 (Val == FloatToBits(1.0f)) ||
863 (Val == FloatToBits(-1.0f)) ||
864 (Val == FloatToBits(0.5f)) ||
865 (Val == FloatToBits(-0.5f)) ||
866 (Val == FloatToBits(2.0f)) ||
867 (Val == FloatToBits(-2.0f)) ||
868 (Val == FloatToBits(4.0f)) ||
869 (Val == FloatToBits(-4.0f)) ||
870 (Val == 0x3e22f983 && HasInv2Pi);
873 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
877 if (Literal >= -16 && Literal <= 64)
880 uint16_t Val = static_cast<uint16_t>(Literal);
881 return Val == 0x3C00 || // 1.0
882 Val == 0xBC00 || // -1.0
883 Val == 0x3800 || // 0.5
884 Val == 0xB800 || // -0.5
885 Val == 0x4000 || // 2.0
886 Val == 0xC000 || // -2.0
887 Val == 0x4400 || // 4.0
888 Val == 0xC400 || // -4.0
889 Val == 0x3118; // 1/2pi
892 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
895 int16_t Lo16 = static_cast<int16_t>(Literal);
896 int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
897 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
900 bool isArgPassedInSGPR(const Argument *A) {
901 const Function *F = A->getParent();
903 // Arguments to compute shaders are never a source of divergence.
904 CallingConv::ID CC = F->getCallingConv();
906 case CallingConv::AMDGPU_KERNEL:
907 case CallingConv::SPIR_KERNEL:
909 case CallingConv::AMDGPU_VS:
910 case CallingConv::AMDGPU_LS:
911 case CallingConv::AMDGPU_HS:
912 case CallingConv::AMDGPU_ES:
913 case CallingConv::AMDGPU_GS:
914 case CallingConv::AMDGPU_PS:
915 case CallingConv::AMDGPU_CS:
916 // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
917 // Everything else is in VGPRs.
918 return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
919 F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
921 // TODO: Should calls support inreg for SGPR inputs?
926 int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
927 if (isGCN3Encoding(ST))
929 return ByteOffset >> 2;
932 bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
933 int64_t EncodedOffset = getSMRDEncodedOffset(ST, ByteOffset);
934 return isGCN3Encoding(ST) ?
935 isUInt<20>(EncodedOffset) : isUInt<8>(EncodedOffset);
938 } // end namespace AMDGPU
940 } // end namespace llvm
945 AMDGPUAS getAMDGPUAS(Triple T) {
948 AS.PRIVATE_ADDRESS = 5;
949 AS.REGION_ADDRESS = 2;
953 AMDGPUAS getAMDGPUAS(const TargetMachine &M) {
954 return getAMDGPUAS(M.getTargetTriple());
957 AMDGPUAS getAMDGPUAS(const Module &M) {
958 return getAMDGPUAS(Triple(M.getTargetTriple()));
963 struct SourceOfDivergence {
966 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
968 #define GET_SourcesOfDivergence_IMPL
969 #include "AMDGPUGenSearchableTables.inc"
971 } // end anonymous namespace
973 bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
974 return lookupSourceOfDivergence(IntrID);
976 } // namespace AMDGPU