1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 #include "AMDGPUBaseInfo.h"
12 #include "SIDefines.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/ADT/Triple.h"
15 #include "llvm/CodeGen/MachineMemOperand.h"
16 #include "llvm/IR/Attributes.h"
17 #include "llvm/IR/Constants.h"
18 #include "llvm/IR/Function.h"
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Instruction.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/IR/Module.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCInstrDesc.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCSectionELF.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/MC/SubtargetFeature.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/ELF.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/MathExtras.h"
39 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
42 #define GET_INSTRINFO_NAMED_OPS
43 #include "AMDGPUGenInstrInfo.inc"
44 #undef GET_INSTRINFO_NAMED_OPS
48 /// \returns Bit mask for given bit \p Shift and bit \p Width.
49 unsigned getBitMask(unsigned Shift, unsigned Width) {
50 return ((1 << Width) - 1) << Shift;
53 /// \brief Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
55 /// \returns Packed \p Dst.
56 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
57 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width);
58 Dst |= (Src << Shift) & getBitMask(Shift, Width);
62 /// \brief Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
64 /// \returns Unpacked bits.
65 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
66 return (Src & getBitMask(Shift, Width)) >> Shift;
69 /// \returns Vmcnt bit shift (lower bits).
70 unsigned getVmcntBitShiftLo() { return 0; }
72 /// \returns Vmcnt bit width (lower bits).
73 unsigned getVmcntBitWidthLo() { return 4; }
75 /// \returns Expcnt bit shift.
76 unsigned getExpcntBitShift() { return 4; }
78 /// \returns Expcnt bit width.
79 unsigned getExpcntBitWidth() { return 3; }
81 /// \returns Lgkmcnt bit shift.
82 unsigned getLgkmcntBitShift() { return 8; }
84 /// \returns Lgkmcnt bit width.
85 unsigned getLgkmcntBitWidth() { return 4; }
87 /// \returns Vmcnt bit shift (higher bits).
88 unsigned getVmcntBitShiftHi() { return 14; }
90 /// \returns Vmcnt bit width (higher bits).
91 unsigned getVmcntBitWidthHi() { return 2; }
93 } // end namespace anonymous
97 static cl::opt<bool> EnablePackedInlinableLiterals(
98 "enable-packed-inlinable-literals",
99 cl::desc("Enable packed inlinable literals (v2f16, v2i16)"),
106 IsaVersion getIsaVersion(const FeatureBitset &Features) {
108 if (Features.test(FeatureISAVersion7_0_0))
110 if (Features.test(FeatureISAVersion7_0_1))
112 if (Features.test(FeatureISAVersion7_0_2))
116 if (Features.test(FeatureISAVersion8_0_0))
118 if (Features.test(FeatureISAVersion8_0_1))
120 if (Features.test(FeatureISAVersion8_0_2))
122 if (Features.test(FeatureISAVersion8_0_3))
124 if (Features.test(FeatureISAVersion8_0_4))
126 if (Features.test(FeatureISAVersion8_1_0))
130 if (Features.test(FeatureISAVersion9_0_0))
132 if (Features.test(FeatureISAVersion9_0_1))
135 if (!Features.test(FeatureGCN) || Features.test(FeatureSouthernIslands))
140 unsigned getWavefrontSize(const FeatureBitset &Features) {
141 if (Features.test(FeatureWavefrontSize16))
143 if (Features.test(FeatureWavefrontSize32))
149 unsigned getLocalMemorySize(const FeatureBitset &Features) {
150 if (Features.test(FeatureLocalMemorySize32768))
152 if (Features.test(FeatureLocalMemorySize65536))
158 unsigned getEUsPerCU(const FeatureBitset &Features) {
162 unsigned getMaxWorkGroupsPerCU(const FeatureBitset &Features,
163 unsigned FlatWorkGroupSize) {
164 if (!Features.test(FeatureGCN))
166 unsigned N = getWavesPerWorkGroup(Features, FlatWorkGroupSize);
170 return std::min(N, 16u);
173 unsigned getMaxWavesPerCU(const FeatureBitset &Features) {
174 return getMaxWavesPerEU(Features) * getEUsPerCU(Features);
177 unsigned getMaxWavesPerCU(const FeatureBitset &Features,
178 unsigned FlatWorkGroupSize) {
179 return getWavesPerWorkGroup(Features, FlatWorkGroupSize);
182 unsigned getMinWavesPerEU(const FeatureBitset &Features) {
186 unsigned getMaxWavesPerEU(const FeatureBitset &Features) {
187 if (!Features.test(FeatureGCN))
189 // FIXME: Need to take scratch memory into account.
193 unsigned getMaxWavesPerEU(const FeatureBitset &Features,
194 unsigned FlatWorkGroupSize) {
195 return alignTo(getMaxWavesPerCU(Features, FlatWorkGroupSize),
196 getEUsPerCU(Features)) / getEUsPerCU(Features);
199 unsigned getMinFlatWorkGroupSize(const FeatureBitset &Features) {
203 unsigned getMaxFlatWorkGroupSize(const FeatureBitset &Features) {
207 unsigned getWavesPerWorkGroup(const FeatureBitset &Features,
208 unsigned FlatWorkGroupSize) {
209 return alignTo(FlatWorkGroupSize, getWavefrontSize(Features)) /
210 getWavefrontSize(Features);
213 unsigned getSGPRAllocGranule(const FeatureBitset &Features) {
214 IsaVersion Version = getIsaVersion(Features);
215 if (Version.Major >= 8)
220 unsigned getSGPREncodingGranule(const FeatureBitset &Features) {
224 unsigned getTotalNumSGPRs(const FeatureBitset &Features) {
225 IsaVersion Version = getIsaVersion(Features);
226 if (Version.Major >= 8)
231 unsigned getAddressableNumSGPRs(const FeatureBitset &Features) {
232 if (Features.test(FeatureSGPRInitBug))
233 return FIXED_NUM_SGPRS_FOR_INIT_BUG;
235 IsaVersion Version = getIsaVersion(Features);
236 if (Version.Major >= 8)
241 unsigned getMinNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
242 assert(WavesPerEU != 0);
244 if (WavesPerEU >= getMaxWavesPerEU(Features))
246 unsigned MinNumSGPRs =
247 alignDown(getTotalNumSGPRs(Features) / (WavesPerEU + 1),
248 getSGPRAllocGranule(Features)) + 1;
249 return std::min(MinNumSGPRs, getAddressableNumSGPRs(Features));
252 unsigned getMaxNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU,
254 assert(WavesPerEU != 0);
256 IsaVersion Version = getIsaVersion(Features);
257 unsigned MaxNumSGPRs = alignDown(getTotalNumSGPRs(Features) / WavesPerEU,
258 getSGPRAllocGranule(Features));
259 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(Features);
260 if (Version.Major >= 8 && !Addressable)
261 AddressableNumSGPRs = 112;
262 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
265 unsigned getVGPRAllocGranule(const FeatureBitset &Features) {
269 unsigned getVGPREncodingGranule(const FeatureBitset &Features) {
270 return getVGPRAllocGranule(Features);
273 unsigned getTotalNumVGPRs(const FeatureBitset &Features) {
277 unsigned getAddressableNumVGPRs(const FeatureBitset &Features) {
278 return getTotalNumVGPRs(Features);
281 unsigned getMinNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
282 assert(WavesPerEU != 0);
284 if (WavesPerEU >= getMaxWavesPerEU(Features))
286 unsigned MinNumVGPRs =
287 alignDown(getTotalNumVGPRs(Features) / (WavesPerEU + 1),
288 getVGPRAllocGranule(Features)) + 1;
289 return std::min(MinNumVGPRs, getAddressableNumVGPRs(Features));
292 unsigned getMaxNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
293 assert(WavesPerEU != 0);
295 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(Features) / WavesPerEU,
296 getVGPRAllocGranule(Features));
297 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(Features);
298 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
301 } // end namespace IsaInfo
303 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
304 const FeatureBitset &Features) {
305 IsaInfo::IsaVersion ISA = IsaInfo::getIsaVersion(Features);
307 memset(&Header, 0, sizeof(Header));
309 Header.amd_kernel_code_version_major = 1;
310 Header.amd_kernel_code_version_minor = 1;
311 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
312 Header.amd_machine_version_major = ISA.Major;
313 Header.amd_machine_version_minor = ISA.Minor;
314 Header.amd_machine_version_stepping = ISA.Stepping;
315 Header.kernel_code_entry_byte_offset = sizeof(Header);
316 // wavefront_size is specified as a power of 2: 2^6 = 64 threads.
317 Header.wavefront_size = 6;
319 // If the code object does not support indirect functions, then the value must
321 Header.call_convention = -1;
323 // These alignment values are specified in powers of two, so alignment =
324 // 2^n. The minimum alignment is 2^4 = 16.
325 Header.kernarg_segment_alignment = 4;
326 Header.group_segment_alignment = 4;
327 Header.private_segment_alignment = 4;
330 MCSection *getHSATextSection(MCContext &Ctx) {
331 return Ctx.getELFSection(".hsatext", ELF::SHT_PROGBITS,
332 ELF::SHF_ALLOC | ELF::SHF_WRITE |
334 ELF::SHF_AMDGPU_HSA_AGENT |
335 ELF::SHF_AMDGPU_HSA_CODE);
338 MCSection *getHSADataGlobalAgentSection(MCContext &Ctx) {
339 return Ctx.getELFSection(".hsadata_global_agent", ELF::SHT_PROGBITS,
340 ELF::SHF_ALLOC | ELF::SHF_WRITE |
341 ELF::SHF_AMDGPU_HSA_GLOBAL |
342 ELF::SHF_AMDGPU_HSA_AGENT);
345 MCSection *getHSADataGlobalProgramSection(MCContext &Ctx) {
346 return Ctx.getELFSection(".hsadata_global_program", ELF::SHT_PROGBITS,
347 ELF::SHF_ALLOC | ELF::SHF_WRITE |
348 ELF::SHF_AMDGPU_HSA_GLOBAL);
351 MCSection *getHSARodataReadonlyAgentSection(MCContext &Ctx) {
352 return Ctx.getELFSection(".hsarodata_readonly_agent", ELF::SHT_PROGBITS,
353 ELF::SHF_ALLOC | ELF::SHF_AMDGPU_HSA_READONLY |
354 ELF::SHF_AMDGPU_HSA_AGENT);
357 bool isGroupSegment(const GlobalValue *GV, AMDGPUAS AS) {
358 return GV->getType()->getAddressSpace() == AS.LOCAL_ADDRESS;
361 bool isGlobalSegment(const GlobalValue *GV, AMDGPUAS AS) {
362 return GV->getType()->getAddressSpace() == AS.GLOBAL_ADDRESS;
365 bool isReadOnlySegment(const GlobalValue *GV, AMDGPUAS AS) {
366 return GV->getType()->getAddressSpace() == AS.CONSTANT_ADDRESS;
369 bool shouldEmitConstantsToTextSection(const Triple &TT) {
370 return TT.getOS() != Triple::AMDHSA;
373 int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
374 Attribute A = F.getFnAttribute(Name);
375 int Result = Default;
377 if (A.isStringAttribute()) {
378 StringRef Str = A.getValueAsString();
379 if (Str.getAsInteger(0, Result)) {
380 LLVMContext &Ctx = F.getContext();
381 Ctx.emitError("can't parse integer attribute " + Name);
388 std::pair<int, int> getIntegerPairAttribute(const Function &F,
390 std::pair<int, int> Default,
391 bool OnlyFirstRequired) {
392 Attribute A = F.getFnAttribute(Name);
393 if (!A.isStringAttribute())
396 LLVMContext &Ctx = F.getContext();
397 std::pair<int, int> Ints = Default;
398 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
399 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
400 Ctx.emitError("can't parse first integer attribute " + Name);
403 if (Strs.second.trim().getAsInteger(0, Ints.second)) {
404 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
405 Ctx.emitError("can't parse second integer attribute " + Name);
413 unsigned getVmcntBitMask(const IsaInfo::IsaVersion &Version) {
414 unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1;
415 if (Version.Major < 9)
418 unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo();
419 return VmcntLo | VmcntHi;
422 unsigned getExpcntBitMask(const IsaInfo::IsaVersion &Version) {
423 return (1 << getExpcntBitWidth()) - 1;
426 unsigned getLgkmcntBitMask(const IsaInfo::IsaVersion &Version) {
427 return (1 << getLgkmcntBitWidth()) - 1;
430 unsigned getWaitcntBitMask(const IsaInfo::IsaVersion &Version) {
431 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo());
432 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth());
433 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(), getLgkmcntBitWidth());
434 unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt;
435 if (Version.Major < 9)
438 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi());
439 return Waitcnt | VmcntHi;
442 unsigned decodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
444 unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
445 if (Version.Major < 9)
449 unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
450 VmcntHi <<= getVmcntBitWidthLo();
451 return VmcntLo | VmcntHi;
454 unsigned decodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
455 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
458 unsigned decodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
459 return unpackBits(Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
462 void decodeWaitcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
463 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
464 Vmcnt = decodeVmcnt(Version, Waitcnt);
465 Expcnt = decodeExpcnt(Version, Waitcnt);
466 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
469 unsigned encodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
472 packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
473 if (Version.Major < 9)
476 Vmcnt >>= getVmcntBitWidthLo();
477 return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
480 unsigned encodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
482 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
485 unsigned encodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
487 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
490 unsigned encodeWaitcnt(const IsaInfo::IsaVersion &Version,
491 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
492 unsigned Waitcnt = getWaitcntBitMask(Version);
493 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
494 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
495 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
499 unsigned getInitialPSInputAddr(const Function &F) {
500 return getIntegerAttribute(F, "InitialPSInputAddr", 0);
503 bool isShader(CallingConv::ID cc) {
505 case CallingConv::AMDGPU_VS:
506 case CallingConv::AMDGPU_HS:
507 case CallingConv::AMDGPU_GS:
508 case CallingConv::AMDGPU_PS:
509 case CallingConv::AMDGPU_CS:
516 bool isCompute(CallingConv::ID cc) {
517 return !isShader(cc) || cc == CallingConv::AMDGPU_CS;
520 bool isEntryFunctionCC(CallingConv::ID CC) {
524 bool isSI(const MCSubtargetInfo &STI) {
525 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
528 bool isCI(const MCSubtargetInfo &STI) {
529 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
532 bool isVI(const MCSubtargetInfo &STI) {
533 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
536 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
540 case AMDGPU::FLAT_SCR:
542 return isCI(STI) ? AMDGPU::FLAT_SCR_ci : AMDGPU::FLAT_SCR_vi;
544 case AMDGPU::FLAT_SCR_LO:
546 return isCI(STI) ? AMDGPU::FLAT_SCR_LO_ci : AMDGPU::FLAT_SCR_LO_vi;
548 case AMDGPU::FLAT_SCR_HI:
550 return isCI(STI) ? AMDGPU::FLAT_SCR_HI_ci : AMDGPU::FLAT_SCR_HI_vi;
555 unsigned mc2PseudoReg(unsigned Reg) {
557 case AMDGPU::FLAT_SCR_ci:
558 case AMDGPU::FLAT_SCR_vi:
561 case AMDGPU::FLAT_SCR_LO_ci:
562 case AMDGPU::FLAT_SCR_LO_vi:
563 return AMDGPU::FLAT_SCR_LO;
565 case AMDGPU::FLAT_SCR_HI_ci:
566 case AMDGPU::FLAT_SCR_HI_vi:
567 return AMDGPU::FLAT_SCR_HI;
574 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
575 assert(OpNo < Desc.NumOperands);
576 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
577 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
578 OpType <= AMDGPU::OPERAND_SRC_LAST;
581 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
582 assert(OpNo < Desc.NumOperands);
583 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
585 case AMDGPU::OPERAND_REG_IMM_FP32:
586 case AMDGPU::OPERAND_REG_IMM_FP64:
587 case AMDGPU::OPERAND_REG_IMM_FP16:
588 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
589 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
590 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
591 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
598 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
599 assert(OpNo < Desc.NumOperands);
600 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
601 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
602 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
605 // Avoid using MCRegisterClass::getSize, since that function will go away
606 // (move from MC* level to Target* level). Return size in bits.
607 unsigned getRegBitWidth(unsigned RCID) {
609 case AMDGPU::SGPR_32RegClassID:
610 case AMDGPU::VGPR_32RegClassID:
611 case AMDGPU::VS_32RegClassID:
612 case AMDGPU::SReg_32RegClassID:
613 case AMDGPU::SReg_32_XM0RegClassID:
615 case AMDGPU::SGPR_64RegClassID:
616 case AMDGPU::VS_64RegClassID:
617 case AMDGPU::SReg_64RegClassID:
618 case AMDGPU::VReg_64RegClassID:
620 case AMDGPU::VReg_96RegClassID:
622 case AMDGPU::SGPR_128RegClassID:
623 case AMDGPU::SReg_128RegClassID:
624 case AMDGPU::VReg_128RegClassID:
626 case AMDGPU::SReg_256RegClassID:
627 case AMDGPU::VReg_256RegClassID:
629 case AMDGPU::SReg_512RegClassID:
630 case AMDGPU::VReg_512RegClassID:
633 llvm_unreachable("Unexpected register class");
637 unsigned getRegBitWidth(const MCRegisterClass &RC) {
638 return getRegBitWidth(RC.getID());
641 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
643 assert(OpNo < Desc.NumOperands);
644 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
645 return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
648 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
649 if (Literal >= -16 && Literal <= 64)
652 uint64_t Val = static_cast<uint64_t>(Literal);
653 return (Val == DoubleToBits(0.0)) ||
654 (Val == DoubleToBits(1.0)) ||
655 (Val == DoubleToBits(-1.0)) ||
656 (Val == DoubleToBits(0.5)) ||
657 (Val == DoubleToBits(-0.5)) ||
658 (Val == DoubleToBits(2.0)) ||
659 (Val == DoubleToBits(-2.0)) ||
660 (Val == DoubleToBits(4.0)) ||
661 (Val == DoubleToBits(-4.0)) ||
662 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
665 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
666 if (Literal >= -16 && Literal <= 64)
669 // The actual type of the operand does not seem to matter as long
670 // as the bits match one of the inline immediate values. For example:
672 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
673 // so it is a legal inline immediate.
675 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
676 // floating-point, so it is a legal inline immediate.
678 uint32_t Val = static_cast<uint32_t>(Literal);
679 return (Val == FloatToBits(0.0f)) ||
680 (Val == FloatToBits(1.0f)) ||
681 (Val == FloatToBits(-1.0f)) ||
682 (Val == FloatToBits(0.5f)) ||
683 (Val == FloatToBits(-0.5f)) ||
684 (Val == FloatToBits(2.0f)) ||
685 (Val == FloatToBits(-2.0f)) ||
686 (Val == FloatToBits(4.0f)) ||
687 (Val == FloatToBits(-4.0f)) ||
688 (Val == 0x3e22f983 && HasInv2Pi);
691 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
695 if (Literal >= -16 && Literal <= 64)
698 uint16_t Val = static_cast<uint16_t>(Literal);
699 return Val == 0x3C00 || // 1.0
700 Val == 0xBC00 || // -1.0
701 Val == 0x3800 || // 0.5
702 Val == 0xB800 || // -0.5
703 Val == 0x4000 || // 2.0
704 Val == 0xC000 || // -2.0
705 Val == 0x4400 || // 4.0
706 Val == 0xC400 || // -4.0
707 Val == 0x3118; // 1/2pi
710 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
713 if (!EnablePackedInlinableLiterals)
716 int16_t Lo16 = static_cast<int16_t>(Literal);
717 int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
718 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
721 bool isUniformMMO(const MachineMemOperand *MMO) {
722 const Value *Ptr = MMO->getValue();
723 // UndefValue means this is a load of a kernel input. These are uniform.
724 // Sometimes LDS instructions have constant pointers.
725 // If Ptr is null, then that means this mem operand contains a
726 // PseudoSourceValue like GOT.
727 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
728 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
731 const Instruction *I = dyn_cast<Instruction>(Ptr);
732 return I && I->getMetadata("amdgpu.uniform");
735 int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
736 if (isSI(ST) || isCI(ST))
737 return ByteOffset >> 2;
742 bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
743 int64_t EncodedOffset = getSMRDEncodedOffset(ST, ByteOffset);
744 return isSI(ST) || isCI(ST) ? isUInt<8>(EncodedOffset) :
745 isUInt<20>(EncodedOffset);
747 } // end namespace AMDGPU
749 } // end namespace llvm
751 const unsigned AMDGPUAS::MAX_COMMON_ADDRESS;
752 const unsigned AMDGPUAS::GLOBAL_ADDRESS;
753 const unsigned AMDGPUAS::LOCAL_ADDRESS;
754 const unsigned AMDGPUAS::PARAM_D_ADDRESS;
755 const unsigned AMDGPUAS::PARAM_I_ADDRESS;
756 const unsigned AMDGPUAS::CONSTANT_BUFFER_0;
757 const unsigned AMDGPUAS::CONSTANT_BUFFER_1;
758 const unsigned AMDGPUAS::CONSTANT_BUFFER_2;
759 const unsigned AMDGPUAS::CONSTANT_BUFFER_3;
760 const unsigned AMDGPUAS::CONSTANT_BUFFER_4;
761 const unsigned AMDGPUAS::CONSTANT_BUFFER_5;
762 const unsigned AMDGPUAS::CONSTANT_BUFFER_6;
763 const unsigned AMDGPUAS::CONSTANT_BUFFER_7;
764 const unsigned AMDGPUAS::CONSTANT_BUFFER_8;
765 const unsigned AMDGPUAS::CONSTANT_BUFFER_9;
766 const unsigned AMDGPUAS::CONSTANT_BUFFER_10;
767 const unsigned AMDGPUAS::CONSTANT_BUFFER_11;
768 const unsigned AMDGPUAS::CONSTANT_BUFFER_12;
769 const unsigned AMDGPUAS::CONSTANT_BUFFER_13;
770 const unsigned AMDGPUAS::CONSTANT_BUFFER_14;
771 const unsigned AMDGPUAS::CONSTANT_BUFFER_15;
772 const unsigned AMDGPUAS::UNKNOWN_ADDRESS_SPACE;
777 AMDGPUAS getAMDGPUAS(Triple T) {
778 auto Env = T.getEnvironmentName();
780 if (Env == "amdgiz" || Env == "amdgizcl") {
782 AS.PRIVATE_ADDRESS = 5;
783 AS.REGION_ADDRESS = 4;
787 AS.PRIVATE_ADDRESS = 0;
788 AS.REGION_ADDRESS = 5;
793 AMDGPUAS getAMDGPUAS(const TargetMachine &M) {
794 return getAMDGPUAS(M.getTargetTriple());
797 AMDGPUAS getAMDGPUAS(const Module &M) {
798 return getAMDGPUAS(Triple(M.getTargetTriple()));
800 } // namespace AMDGPU