1 //===-- RISCVAsmBackend.cpp - RISCV Assembler Backend ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/RISCVFixupKinds.h"
11 #include "MCTargetDesc/RISCVMCTargetDesc.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/MC/MCAsmBackend.h"
14 #include "llvm/MC/MCAssembler.h"
15 #include "llvm/MC/MCContext.h"
16 #include "llvm/MC/MCDirectives.h"
17 #include "llvm/MC/MCELFObjectWriter.h"
18 #include "llvm/MC/MCExpr.h"
19 #include "llvm/MC/MCFixupKindInfo.h"
20 #include "llvm/MC/MCObjectWriter.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/raw_ostream.h"
29 class RISCVAsmBackend : public MCAsmBackend {
30 const MCSubtargetInfo &STI;
35 RISCVAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI, bool Is64Bit)
36 : MCAsmBackend(support::little), STI(STI), OSABI(OSABI),
38 ~RISCVAsmBackend() override {}
40 // Generate diff expression relocations if the relax feature is enabled,
41 // otherwise it is safe for the assembler to calculate these internally.
42 bool requiresDiffExpressionRelocations() const override {
43 return STI.getFeatureBits()[RISCV::FeatureRelax];
45 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
46 const MCValue &Target, MutableArrayRef<char> Data,
47 uint64_t Value, bool IsResolved,
48 const MCSubtargetInfo *STI) const override;
50 std::unique_ptr<MCObjectTargetWriter>
51 createObjectTargetWriter() const override;
53 // If linker relaxation is enabled, always emit relocations even if the fixup
54 // can be resolved. This is necessary for correctness as offsets may change
56 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
57 const MCValue &Target) override {
58 return STI.getFeatureBits()[RISCV::FeatureRelax];
61 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
62 const MCRelaxableFragment *DF,
63 const MCAsmLayout &Layout) const override {
64 llvm_unreachable("Handled by fixupNeedsRelaxationAdvanced");
67 bool fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, bool Resolved,
69 const MCRelaxableFragment *DF,
70 const MCAsmLayout &Layout,
71 const bool WasForced) const override;
73 unsigned getNumFixupKinds() const override {
74 return RISCV::NumTargetFixupKinds;
77 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
78 const static MCFixupKindInfo Infos[] = {
79 // This table *must* be in the order that the fixup_* kinds are defined in
82 // name offset bits flags
83 { "fixup_riscv_hi20", 12, 20, 0 },
84 { "fixup_riscv_lo12_i", 20, 12, 0 },
85 { "fixup_riscv_lo12_s", 0, 32, 0 },
86 { "fixup_riscv_pcrel_hi20", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
87 { "fixup_riscv_pcrel_lo12_i", 20, 12, MCFixupKindInfo::FKF_IsPCRel },
88 { "fixup_riscv_pcrel_lo12_s", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
89 { "fixup_riscv_jal", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
90 { "fixup_riscv_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
91 { "fixup_riscv_rvc_jump", 2, 11, MCFixupKindInfo::FKF_IsPCRel },
92 { "fixup_riscv_rvc_branch", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
93 { "fixup_riscv_call", 0, 64, MCFixupKindInfo::FKF_IsPCRel },
94 { "fixup_riscv_relax", 0, 0, 0 }
96 static_assert((array_lengthof(Infos)) == RISCV::NumTargetFixupKinds,
97 "Not all fixup kinds added to Infos array");
99 if (Kind < FirstTargetFixupKind)
100 return MCAsmBackend::getFixupKindInfo(Kind);
102 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
104 return Infos[Kind - FirstTargetFixupKind];
107 bool mayNeedRelaxation(const MCInst &Inst,
108 const MCSubtargetInfo &STI) const override;
109 unsigned getRelaxedOpcode(unsigned Op) const;
111 void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
112 MCInst &Res) const override;
115 bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
119 bool RISCVAsmBackend::fixupNeedsRelaxationAdvanced(const MCFixup &Fixup,
122 const MCRelaxableFragment *DF,
123 const MCAsmLayout &Layout,
124 const bool WasForced) const {
125 // Return true if the symbol is actually unresolved.
126 // Resolved could be always false when shouldForceRelocation return true.
127 // We use !WasForced to indicate that the symbol is unresolved and not forced
128 // by shouldForceRelocation.
129 if (!Resolved && !WasForced)
132 int64_t Offset = int64_t(Value);
133 switch ((unsigned)Fixup.getKind()) {
136 case RISCV::fixup_riscv_rvc_branch:
137 // For compressed branch instructions the immediate must be
138 // in the range [-256, 254].
139 return Offset > 254 || Offset < -256;
140 case RISCV::fixup_riscv_rvc_jump:
141 // For compressed jump instructions the immediate must be
142 // in the range [-2048, 2046].
143 return Offset > 2046 || Offset < -2048;
147 void RISCVAsmBackend::relaxInstruction(const MCInst &Inst,
148 const MCSubtargetInfo &STI,
150 // TODO: replace this with call to auto generated uncompressinstr() function.
151 switch (Inst.getOpcode()) {
153 llvm_unreachable("Opcode not expected!");
155 // c.beqz $rs1, $imm -> beq $rs1, X0, $imm.
156 Res.setOpcode(RISCV::BEQ);
157 Res.addOperand(Inst.getOperand(0));
158 Res.addOperand(MCOperand::createReg(RISCV::X0));
159 Res.addOperand(Inst.getOperand(1));
162 // c.bnez $rs1, $imm -> bne $rs1, X0, $imm.
163 Res.setOpcode(RISCV::BNE);
164 Res.addOperand(Inst.getOperand(0));
165 Res.addOperand(MCOperand::createReg(RISCV::X0));
166 Res.addOperand(Inst.getOperand(1));
169 // c.j $imm -> jal X0, $imm.
170 Res.setOpcode(RISCV::JAL);
171 Res.addOperand(MCOperand::createReg(RISCV::X0));
172 Res.addOperand(Inst.getOperand(0));
175 // c.jal $imm -> jal X1, $imm.
176 Res.setOpcode(RISCV::JAL);
177 Res.addOperand(MCOperand::createReg(RISCV::X1));
178 Res.addOperand(Inst.getOperand(0));
183 // Given a compressed control flow instruction this function returns
184 // the expanded instruction.
185 unsigned RISCVAsmBackend::getRelaxedOpcode(unsigned Op) const {
194 case RISCV::C_JAL: // fall through.
199 bool RISCVAsmBackend::mayNeedRelaxation(const MCInst &Inst,
200 const MCSubtargetInfo &STI) const {
201 return getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode();
204 bool RISCVAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
205 bool HasStdExtC = STI.getFeatureBits()[RISCV::FeatureStdExtC];
206 unsigned MinNopLen = HasStdExtC ? 2 : 4;
208 if ((Count % MinNopLen) != 0)
211 // The canonical nop on RISC-V is addi x0, x0, 0.
212 uint64_t Nop32Count = Count / 4;
213 for (uint64_t i = Nop32Count; i != 0; --i)
214 OS.write("\x13\0\0\0", 4);
216 // The canonical nop on RVC is c.nop.
218 uint64_t Nop16Count = (Count - Nop32Count * 4) / 2;
219 for (uint64_t i = Nop16Count; i != 0; --i)
220 OS.write("\x01\0", 2);
226 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
228 unsigned Kind = Fixup.getKind();
231 llvm_unreachable("Unknown fixup kind!");
237 case RISCV::fixup_riscv_lo12_i:
238 case RISCV::fixup_riscv_pcrel_lo12_i:
239 return Value & 0xfff;
240 case RISCV::fixup_riscv_lo12_s:
241 case RISCV::fixup_riscv_pcrel_lo12_s:
242 return (((Value >> 5) & 0x7f) << 25) | ((Value & 0x1f) << 7);
243 case RISCV::fixup_riscv_hi20:
244 case RISCV::fixup_riscv_pcrel_hi20:
245 // Add 1 if bit 11 is 1, to compensate for low 12 bits being negative.
246 return ((Value + 0x800) >> 12) & 0xfffff;
247 case RISCV::fixup_riscv_jal: {
248 if (!isInt<21>(Value))
249 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
251 Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
252 // Need to produce imm[19|10:1|11|19:12] from the 21-bit Value.
253 unsigned Sbit = (Value >> 20) & 0x1;
254 unsigned Hi8 = (Value >> 12) & 0xff;
255 unsigned Mid1 = (Value >> 11) & 0x1;
256 unsigned Lo10 = (Value >> 1) & 0x3ff;
258 // Inst{30-21} = Lo10;
260 // Inst{19-12} = Hi8;
261 Value = (Sbit << 19) | (Lo10 << 9) | (Mid1 << 8) | Hi8;
264 case RISCV::fixup_riscv_branch: {
265 if (!isInt<13>(Value))
266 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
268 Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
269 // Need to extract imm[12], imm[10:5], imm[4:1], imm[11] from the 13-bit
271 unsigned Sbit = (Value >> 12) & 0x1;
272 unsigned Hi1 = (Value >> 11) & 0x1;
273 unsigned Mid6 = (Value >> 5) & 0x3f;
274 unsigned Lo4 = (Value >> 1) & 0xf;
276 // Inst{30-25} = Mid6;
279 Value = (Sbit << 31) | (Mid6 << 25) | (Lo4 << 8) | (Hi1 << 7);
282 case RISCV::fixup_riscv_call: {
283 // Jalr will add UpperImm with the sign-extended 12-bit LowerImm,
284 // we need to add 0x800ULL before extract upper bits to reflect the
285 // effect of the sign extension.
286 uint64_t UpperImm = (Value + 0x800ULL) & 0xfffff000ULL;
287 uint64_t LowerImm = Value & 0xfffULL;
288 return UpperImm | ((LowerImm << 20) << 32);
290 case RISCV::fixup_riscv_rvc_jump: {
291 // Need to produce offset[11|4|9:8|10|6|7|3:1|5] from the 11-bit Value.
292 unsigned Bit11 = (Value >> 11) & 0x1;
293 unsigned Bit4 = (Value >> 4) & 0x1;
294 unsigned Bit9_8 = (Value >> 8) & 0x3;
295 unsigned Bit10 = (Value >> 10) & 0x1;
296 unsigned Bit6 = (Value >> 6) & 0x1;
297 unsigned Bit7 = (Value >> 7) & 0x1;
298 unsigned Bit3_1 = (Value >> 1) & 0x7;
299 unsigned Bit5 = (Value >> 5) & 0x1;
300 Value = (Bit11 << 10) | (Bit4 << 9) | (Bit9_8 << 7) | (Bit10 << 6) |
301 (Bit6 << 5) | (Bit7 << 4) | (Bit3_1 << 1) | Bit5;
304 case RISCV::fixup_riscv_rvc_branch: {
305 // Need to produce offset[8|4:3], [reg 3 bit], offset[7:6|2:1|5]
306 unsigned Bit8 = (Value >> 8) & 0x1;
307 unsigned Bit7_6 = (Value >> 6) & 0x3;
308 unsigned Bit5 = (Value >> 5) & 0x1;
309 unsigned Bit4_3 = (Value >> 3) & 0x3;
310 unsigned Bit2_1 = (Value >> 1) & 0x3;
311 Value = (Bit8 << 12) | (Bit4_3 << 10) | (Bit7_6 << 5) | (Bit2_1 << 3) |
319 void RISCVAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
320 const MCValue &Target,
321 MutableArrayRef<char> Data, uint64_t Value,
323 const MCSubtargetInfo *STI) const {
324 MCContext &Ctx = Asm.getContext();
325 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
327 return; // Doesn't change encoding.
328 // Apply any target-specific value adjustments.
329 Value = adjustFixupValue(Fixup, Value, Ctx);
331 // Shift the value into position.
332 Value <<= Info.TargetOffset;
334 unsigned Offset = Fixup.getOffset();
335 unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
337 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
339 // For each byte of the fragment that the fixup touches, mask in the
340 // bits from the fixup value.
341 for (unsigned i = 0; i != NumBytes; ++i) {
342 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
346 std::unique_ptr<MCObjectTargetWriter>
347 RISCVAsmBackend::createObjectTargetWriter() const {
348 return createRISCVELFObjectWriter(OSABI, Is64Bit);
351 } // end anonymous namespace
353 MCAsmBackend *llvm::createRISCVAsmBackend(const Target &T,
354 const MCSubtargetInfo &STI,
355 const MCRegisterInfo &MRI,
356 const MCTargetOptions &Options) {
357 const Triple &TT = STI.getTargetTriple();
358 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS());
359 return new RISCVAsmBackend(STI, OSABI, TT.isArch64Bit());