1 //===-- SIMCCodeEmitter.cpp - SI Code Emitter -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief The SI code emitter produces machine code that can be executed
12 /// directly on the GPU device.
14 //===----------------------------------------------------------------------===//
17 #include "MCTargetDesc/AMDGPUFixupKinds.h"
18 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20 #include "Utils/AMDGPUBaseInfo.h"
21 #include "llvm/MC/MCCodeEmitter.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCExpr.h"
24 #include "llvm/MC/MCFixup.h"
25 #include "llvm/MC/MCInst.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/MC/MCInstrInfo.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Support/raw_ostream.h"
43 class SIMCCodeEmitter : public AMDGPUMCCodeEmitter {
44 const MCRegisterInfo &MRI;
46 /// \brief Encode an fp or int literal
47 uint32_t getLitEncoding(const MCOperand &MO, const MCOperandInfo &OpInfo,
48 const MCSubtargetInfo &STI) const;
51 SIMCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri,
53 : AMDGPUMCCodeEmitter(mcii), MRI(mri) {}
54 SIMCCodeEmitter(const SIMCCodeEmitter &) = delete;
55 SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete;
57 /// \brief Encode the instruction and write it to the OS.
58 void encodeInstruction(const MCInst &MI, raw_ostream &OS,
59 SmallVectorImpl<MCFixup> &Fixups,
60 const MCSubtargetInfo &STI) const override;
62 /// \returns the encoding for an MCOperand.
63 uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
64 SmallVectorImpl<MCFixup> &Fixups,
65 const MCSubtargetInfo &STI) const override;
67 /// \brief Use a fixup to encode the simm16 field for SOPP branch
69 unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
70 SmallVectorImpl<MCFixup> &Fixups,
71 const MCSubtargetInfo &STI) const override;
74 } // end anonymous namespace
76 MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII,
77 const MCRegisterInfo &MRI,
79 return new SIMCCodeEmitter(MCII, MRI, Ctx);
82 // Returns the encoding value to use if the given integer is an integer inline
83 // immediate value, or 0 if it is not.
84 template <typename IntTy>
85 static uint32_t getIntInlineImmEncoding(IntTy Imm) {
86 if (Imm >= 0 && Imm <= 64)
89 if (Imm >= -16 && Imm <= -1)
90 return 192 + std::abs(Imm);
95 static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) {
96 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
100 if (Val == 0x3800) // 0.5
103 if (Val == 0xB800) // -0.5
106 if (Val == 0x3C00) // 1.0
109 if (Val == 0xBC00) // -1.0
112 if (Val == 0x4000) // 2.0
115 if (Val == 0xC000) // -2.0
118 if (Val == 0x4400) // 4.0
121 if (Val == 0xC400) // -4.0
124 if (Val == 0x3118 && // 1.0 / (2.0 * pi)
125 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
131 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) {
132 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
136 if (Val == FloatToBits(0.5f))
139 if (Val == FloatToBits(-0.5f))
142 if (Val == FloatToBits(1.0f))
145 if (Val == FloatToBits(-1.0f))
148 if (Val == FloatToBits(2.0f))
151 if (Val == FloatToBits(-2.0f))
154 if (Val == FloatToBits(4.0f))
157 if (Val == FloatToBits(-4.0f))
160 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
161 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
167 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) {
168 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
172 if (Val == DoubleToBits(0.5))
175 if (Val == DoubleToBits(-0.5))
178 if (Val == DoubleToBits(1.0))
181 if (Val == DoubleToBits(-1.0))
184 if (Val == DoubleToBits(2.0))
187 if (Val == DoubleToBits(-2.0))
190 if (Val == DoubleToBits(4.0))
193 if (Val == DoubleToBits(-4.0))
196 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
197 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm])
203 uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
204 const MCOperandInfo &OpInfo,
205 const MCSubtargetInfo &STI) const {
208 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
215 assert(!MO.isFPImm());
223 switch (AMDGPU::getOperandSize(OpInfo)) {
225 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
227 return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
229 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
231 llvm_unreachable("invalid operand size");
235 void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
236 SmallVectorImpl<MCFixup> &Fixups,
237 const MCSubtargetInfo &STI) const {
238 verifyInstructionPredicates(MI,
239 computeAvailableFeatures(STI.getFeatureBits()));
241 uint64_t Encoding = getBinaryCodeForInstr(MI, Fixups, STI);
242 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
243 unsigned bytes = Desc.getSize();
245 for (unsigned i = 0; i < bytes; i++) {
246 OS.write((uint8_t) ((Encoding >> (8 * i)) & 0xff));
252 // Check for additional literals in SRC0/1/2 (Op 1/2/3)
253 for (unsigned i = 0, e = MI.getNumOperands(); i < e; ++i) {
255 // Check if this operand should be encoded as [SV]Src
256 if (!AMDGPU::isSISrcOperand(Desc, i))
259 // Is this operand a literal immediate?
260 const MCOperand &Op = MI.getOperand(i);
261 if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255)
269 else if (Op.isExpr()) {
270 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
273 } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
274 llvm_unreachable("Must be immediate or expr");
276 for (unsigned j = 0; j < 4; j++) {
277 OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff));
280 // Only one literal value allowed
285 unsigned SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
286 SmallVectorImpl<MCFixup> &Fixups,
287 const MCSubtargetInfo &STI) const {
288 const MCOperand &MO = MI.getOperand(OpNo);
291 const MCExpr *Expr = MO.getExpr();
292 MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br;
293 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
297 return getMachineOpValue(MI, MO, Fixups, STI);
300 uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI,
302 SmallVectorImpl<MCFixup> &Fixups,
303 const MCSubtargetInfo &STI) const {
305 return MRI.getEncodingValue(MO.getReg());
307 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
308 const auto *Expr = dyn_cast<MCSymbolRefExpr>(MO.getExpr());
310 if (Expr && Expr->getSymbol().isExternal())
314 Fixups.push_back(MCFixup::create(4, MO.getExpr(), Kind, MI.getLoc()));
317 // Figure out the operand number, needed for isSrcOperand check
319 for (unsigned e = MI.getNumOperands(); OpNo < e; ++OpNo) {
320 if (&MO == &MI.getOperand(OpNo))
324 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
325 if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
326 uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI);
327 if (Enc != ~0U && (Enc != 255 || Desc.getSize() == 4))
330 } else if (MO.isImm())
333 llvm_unreachable("Encoding of this operand type is not supported yet.");