1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 #include "MCTargetDesc/AMDGPUFixupKinds.h"
12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/BinaryFormat/ELF.h"
15 #include "llvm/MC/MCAsmBackend.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCFixupKindInfo.h"
19 #include "llvm/MC/MCObjectWriter.h"
20 #include "llvm/MC/MCValue.h"
21 #include "llvm/Support/TargetRegistry.h"
27 class AMDGPUAsmBackend : public MCAsmBackend {
29 AMDGPUAsmBackend(const Target &T) : MCAsmBackend(support::little) {}
31 unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
33 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
34 const MCValue &Target, MutableArrayRef<char> Data,
35 uint64_t Value, bool IsResolved,
36 const MCSubtargetInfo *STI) const override;
37 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
38 const MCRelaxableFragment *DF,
39 const MCAsmLayout &Layout) const override {
42 void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
43 MCInst &Res) const override {
44 llvm_unreachable("Not implemented");
46 bool mayNeedRelaxation(const MCInst &Inst,
47 const MCSubtargetInfo &STI) const override {
51 unsigned getMinimumNopSize() const override;
52 bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
54 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
57 } //End anonymous namespace
59 static unsigned getFixupKindNumBytes(unsigned Kind) {
61 case AMDGPU::fixup_si_sopp_br:
77 llvm_unreachable("Unknown fixup kind!");
81 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
83 int64_t SignedValue = static_cast<int64_t>(Value);
85 switch (static_cast<unsigned>(Fixup.getKind())) {
86 case AMDGPU::fixup_si_sopp_br: {
87 int64_t BrImm = (SignedValue - 4) / 4;
89 if (Ctx && !isInt<16>(BrImm))
90 Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
102 llvm_unreachable("unhandled fixup kind");
106 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
107 const MCValue &Target,
108 MutableArrayRef<char> Data, uint64_t Value,
110 const MCSubtargetInfo *STI) const {
111 Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
113 return; // Doesn't change encoding.
115 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
117 // Shift the value into position.
118 Value <<= Info.TargetOffset;
120 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
121 uint32_t Offset = Fixup.getOffset();
122 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
124 // For each byte of the fragment that the fixup touches, mask in the bits from
126 for (unsigned i = 0; i != NumBytes; ++i)
127 Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
130 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
131 MCFixupKind Kind) const {
132 const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
133 // name offset bits flags
134 { "fixup_si_sopp_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
137 if (Kind < FirstTargetFixupKind)
138 return MCAsmBackend::getFixupKindInfo(Kind);
140 return Infos[Kind - FirstTargetFixupKind];
143 unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
147 bool AMDGPUAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
148 // If the count is not 4-byte aligned, we must be writing data into the text
149 // section (otherwise we have unaligned instructions, and thus have far
150 // bigger problems), so just write zeros instead.
151 OS.write_zeros(Count % 4);
153 // We are properly aligned, so write NOPs as requested.
156 // FIXME: R600 support.
158 const uint32_t Encoded_S_NOP_0 = 0xbf800000;
160 for (uint64_t I = 0; I != Count; ++I)
161 support::endian::write<uint32_t>(OS, Encoded_S_NOP_0, Endian);
166 //===----------------------------------------------------------------------===//
167 // ELFAMDGPUAsmBackend class
168 //===----------------------------------------------------------------------===//
172 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
174 bool HasRelocationAddend;
175 uint8_t OSABI = ELF::ELFOSABI_NONE;
178 ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
179 AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
180 HasRelocationAddend(TT.getOS() == Triple::AMDHSA) {
181 switch (TT.getOS()) {
183 OSABI = ELF::ELFOSABI_AMDGPU_HSA;
186 OSABI = ELF::ELFOSABI_AMDGPU_PAL;
189 OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
196 std::unique_ptr<MCObjectTargetWriter>
197 createObjectTargetWriter() const override {
198 return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend);
202 } // end anonymous namespace
204 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
205 const MCSubtargetInfo &STI,
206 const MCRegisterInfo &MRI,
207 const MCTargetOptions &Options) {
208 // Use 64-bit ELF for amdgcn
209 return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple());