1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 #include "MCTargetDesc/AMDGPUFixupKinds.h"
12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/BinaryFormat/ELF.h"
15 #include "llvm/MC/MCAsmBackend.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCFixupKindInfo.h"
19 #include "llvm/MC/MCObjectWriter.h"
20 #include "llvm/MC/MCValue.h"
21 #include "llvm/Support/TargetRegistry.h"
27 class AMDGPUAsmBackend : public MCAsmBackend {
29 AMDGPUAsmBackend(const Target &T)
32 unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
34 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
35 const MCValue &Target, MutableArrayRef<char> Data,
36 uint64_t Value, bool IsResolved) const override;
37 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
38 const MCRelaxableFragment *DF,
39 const MCAsmLayout &Layout) const override {
42 void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
43 MCInst &Res) const override {
44 llvm_unreachable("Not implemented");
46 bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
48 unsigned getMinimumNopSize() const override;
49 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
51 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
54 } //End anonymous namespace
56 static unsigned getFixupKindNumBytes(unsigned Kind) {
58 case AMDGPU::fixup_si_sopp_br:
74 llvm_unreachable("Unknown fixup kind!");
78 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
80 int64_t SignedValue = static_cast<int64_t>(Value);
82 switch (static_cast<unsigned>(Fixup.getKind())) {
83 case AMDGPU::fixup_si_sopp_br: {
84 int64_t BrImm = (SignedValue - 4) / 4;
86 if (Ctx && !isInt<16>(BrImm))
87 Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
99 llvm_unreachable("unhandled fixup kind");
103 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
104 const MCValue &Target,
105 MutableArrayRef<char> Data, uint64_t Value,
106 bool IsResolved) const {
107 Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
109 return; // Doesn't change encoding.
111 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
113 // Shift the value into position.
114 Value <<= Info.TargetOffset;
116 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
117 uint32_t Offset = Fixup.getOffset();
118 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
120 // For each byte of the fragment that the fixup touches, mask in the bits from
122 for (unsigned i = 0; i != NumBytes; ++i)
123 Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
126 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
127 MCFixupKind Kind) const {
128 const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
129 // name offset bits flags
130 { "fixup_si_sopp_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
133 if (Kind < FirstTargetFixupKind)
134 return MCAsmBackend::getFixupKindInfo(Kind);
136 return Infos[Kind - FirstTargetFixupKind];
139 unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
143 bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
144 // If the count is not 4-byte aligned, we must be writing data into the text
145 // section (otherwise we have unaligned instructions, and thus have far
146 // bigger problems), so just write zeros instead.
147 OW->WriteZeros(Count % 4);
149 // We are properly aligned, so write NOPs as requested.
152 // FIXME: R600 support.
154 const uint32_t Encoded_S_NOP_0 = 0xbf800000;
156 for (uint64_t I = 0; I != Count; ++I)
157 OW->write32(Encoded_S_NOP_0);
162 //===----------------------------------------------------------------------===//
163 // ELFAMDGPUAsmBackend class
164 //===----------------------------------------------------------------------===//
168 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
170 bool HasRelocationAddend;
171 uint8_t OSABI = ELF::ELFOSABI_NONE;
174 ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
175 AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
176 HasRelocationAddend(TT.getOS() == Triple::AMDHSA) {
177 switch (TT.getOS()) {
179 OSABI = ELF::ELFOSABI_AMDGPU_HSA;
182 OSABI = ELF::ELFOSABI_AMDGPU_PAL;
185 OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
192 std::unique_ptr<MCObjectWriter>
193 createObjectWriter(raw_pwrite_stream &OS) const override {
194 return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend, OS);
198 } // end anonymous namespace
200 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
201 const MCSubtargetInfo &STI,
202 const MCRegisterInfo &MRI,
203 const MCTargetOptions &Options) {
204 // Use 64-bit ELF for amdgcn
205 return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple());