1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 #include "MCTargetDesc/AMDGPUFixupKinds.h"
12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCValue.h"
20 #include "llvm/Support/TargetRegistry.h"
26 class AMDGPUAsmBackend : public MCAsmBackend {
28 AMDGPUAsmBackend(const Target &T)
31 unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
33 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
34 const MCValue &Target, MutableArrayRef<char> Data,
35 uint64_t Value, bool IsResolved) const override;
36 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
37 const MCRelaxableFragment *DF,
38 const MCAsmLayout &Layout) const override {
41 void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
42 MCInst &Res) const override {
43 llvm_unreachable("Not implemented");
45 bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
46 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
48 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
51 } //End anonymous namespace
53 static unsigned getFixupKindNumBytes(unsigned Kind) {
55 case AMDGPU::fixup_si_sopp_br:
71 llvm_unreachable("Unknown fixup kind!");
75 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
77 int64_t SignedValue = static_cast<int64_t>(Value);
79 switch (Fixup.getKind()) {
80 case AMDGPU::fixup_si_sopp_br: {
81 int64_t BrImm = (SignedValue - 4) / 4;
83 if (Ctx && !isInt<16>(BrImm))
84 Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
96 llvm_unreachable("unhandled fixup kind");
100 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
101 const MCValue &Target,
102 MutableArrayRef<char> Data, uint64_t Value,
103 bool IsResolved) const {
104 Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
106 return; // Doesn't change encoding.
108 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
110 // Shift the value into position.
111 Value <<= Info.TargetOffset;
113 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
114 uint32_t Offset = Fixup.getOffset();
115 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
117 // For each byte of the fragment that the fixup touches, mask in the bits from
119 for (unsigned i = 0; i != NumBytes; ++i)
120 Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
123 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
124 MCFixupKind Kind) const {
125 const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
126 // name offset bits flags
127 { "fixup_si_sopp_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
130 if (Kind < FirstTargetFixupKind)
131 return MCAsmBackend::getFixupKindInfo(Kind);
133 return Infos[Kind - FirstTargetFixupKind];
136 bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
137 // If the count is not 4-byte aligned, we must be writing data into the text
138 // section (otherwise we have unaligned instructions, and thus have far
139 // bigger problems), so just write zeros instead.
140 OW->WriteZeros(Count % 4);
142 // We are properly aligned, so write NOPs as requested.
145 // FIXME: R600 support.
147 const uint32_t Encoded_S_NOP_0 = 0xbf800000;
149 for (uint64_t I = 0; I != Count; ++I)
150 OW->write32(Encoded_S_NOP_0);
155 //===----------------------------------------------------------------------===//
156 // ELFAMDGPUAsmBackend class
157 //===----------------------------------------------------------------------===//
161 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
163 bool HasRelocationAddend;
166 ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
167 AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
168 HasRelocationAddend(TT.getOS() == Triple::AMDHSA) { }
170 MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
171 return createAMDGPUELFObjectWriter(Is64Bit, HasRelocationAddend, OS);
175 } // end anonymous namespace
177 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
178 const MCRegisterInfo &MRI,
179 const Triple &TT, StringRef CPU,
180 const MCTargetOptions &Options) {
181 // Use 64-bit ELF for amdgcn
182 return new ELFAMDGPUAsmBackend(T, TT);