1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "MCTargetDesc/AArch64FixupKinds.h"
10 #include "MCTargetDesc/AArch64MCExpr.h"
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/Triple.h"
14 #include "llvm/BinaryFormat/MachO.h"
15 #include "llvm/MC/MCAsmBackend.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCDirectives.h"
19 #include "llvm/MC/MCELFObjectWriter.h"
20 #include "llvm/MC/MCFixupKindInfo.h"
21 #include "llvm/MC/MCObjectWriter.h"
22 #include "llvm/MC/MCRegisterInfo.h"
23 #include "llvm/MC/MCSectionELF.h"
24 #include "llvm/MC/MCSectionMachO.h"
25 #include "llvm/MC/MCTargetOptions.h"
26 #include "llvm/MC/MCValue.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/TargetRegistry.h"
33 class AArch64AsmBackend : public MCAsmBackend {
34 static const unsigned PCRelFlagVal =
35 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
39 AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
40 : MCAsmBackend(IsLittleEndian ? support::little : support::big),
43 unsigned getNumFixupKinds() const override {
44 return AArch64::NumTargetFixupKinds;
47 Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
49 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
50 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
51 // This table *must* be in the order that the fixup_* kinds are defined
52 // in AArch64FixupKinds.h.
54 // Name Offset (bits) Size (bits) Flags
55 {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
56 {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
57 {"fixup_aarch64_add_imm12", 10, 12, 0},
58 {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
59 {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
60 {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
61 {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
62 {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
63 {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
64 {"fixup_aarch64_movw", 5, 16, 0},
65 {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
66 {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
67 {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
68 {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal},
69 {"fixup_aarch64_tlsdesc_call", 0, 0, 0}};
71 if (Kind < FirstTargetFixupKind)
72 return MCAsmBackend::getFixupKindInfo(Kind);
74 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
76 return Infos[Kind - FirstTargetFixupKind];
79 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
80 const MCValue &Target, MutableArrayRef<char> Data,
81 uint64_t Value, bool IsResolved,
82 const MCSubtargetInfo *STI) const override;
84 bool mayNeedRelaxation(const MCInst &Inst,
85 const MCSubtargetInfo &STI) const override;
86 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
87 const MCRelaxableFragment *DF,
88 const MCAsmLayout &Layout) const override;
89 void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
90 MCInst &Res) const override;
91 bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
93 void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
95 unsigned getPointerSize() const { return 8; }
97 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
99 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
100 const MCValue &Target) override;
103 } // end anonymous namespace
105 /// The number of bytes the fixup may change.
106 static unsigned getFixupKindNumBytes(unsigned Kind) {
109 llvm_unreachable("Unknown fixup kind!");
112 case AArch64::fixup_aarch64_tlsdesc_call:
122 case AArch64::fixup_aarch64_movw:
123 case AArch64::fixup_aarch64_pcrel_branch14:
124 case AArch64::fixup_aarch64_add_imm12:
125 case AArch64::fixup_aarch64_ldst_imm12_scale1:
126 case AArch64::fixup_aarch64_ldst_imm12_scale2:
127 case AArch64::fixup_aarch64_ldst_imm12_scale4:
128 case AArch64::fixup_aarch64_ldst_imm12_scale8:
129 case AArch64::fixup_aarch64_ldst_imm12_scale16:
130 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
131 case AArch64::fixup_aarch64_pcrel_branch19:
134 case AArch64::fixup_aarch64_pcrel_adr_imm21:
135 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
136 case AArch64::fixup_aarch64_pcrel_branch26:
137 case AArch64::fixup_aarch64_pcrel_call26:
147 static unsigned AdrImmBits(unsigned Value) {
148 unsigned lo2 = Value & 0x3;
149 unsigned hi19 = (Value & 0x1ffffc) >> 2;
150 return (hi19 << 5) | (lo2 << 29);
153 static bool valueFitsIntoFixupKind(unsigned Kind, uint64_t Value) {
156 case FK_Data_1: NumBits = 8; break;
157 case FK_Data_2: NumBits = 16; break;
158 case FK_Data_4: NumBits = 32; break;
159 case FK_Data_8: NumBits = 64; break;
160 default: return true;
162 return isUIntN(NumBits, Value) ||
163 isIntN(NumBits, static_cast<int64_t>(Value));
166 static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
167 uint64_t Value, MCContext &Ctx,
168 const Triple &TheTriple, bool IsResolved) {
169 int64_t SignedValue = static_cast<int64_t>(Value);
170 switch (Fixup.getTargetKind()) {
172 llvm_unreachable("Unknown fixup kind!");
173 case AArch64::fixup_aarch64_pcrel_adr_imm21:
174 if (SignedValue > 2097151 || SignedValue < -2097152)
175 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
176 return AdrImmBits(Value & 0x1fffffULL);
177 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
179 if (TheTriple.isOSBinFormatCOFF())
180 return AdrImmBits(Value & 0x1fffffULL);
181 return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
182 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
183 case AArch64::fixup_aarch64_pcrel_branch19:
184 // Signed 21-bit immediate
185 if (SignedValue > 2097151 || SignedValue < -2097152)
186 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
188 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
189 // Low two bits are not encoded.
190 return (Value >> 2) & 0x7ffff;
191 case AArch64::fixup_aarch64_add_imm12:
192 case AArch64::fixup_aarch64_ldst_imm12_scale1:
193 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
195 // Unsigned 12-bit immediate
197 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
199 case AArch64::fixup_aarch64_ldst_imm12_scale2:
200 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
202 // Unsigned 12-bit immediate which gets multiplied by 2
204 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
206 Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
208 case AArch64::fixup_aarch64_ldst_imm12_scale4:
209 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
211 // Unsigned 12-bit immediate which gets multiplied by 4
213 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
215 Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
217 case AArch64::fixup_aarch64_ldst_imm12_scale8:
218 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
220 // Unsigned 12-bit immediate which gets multiplied by 8
222 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
224 Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
226 case AArch64::fixup_aarch64_ldst_imm12_scale16:
227 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
229 // Unsigned 12-bit immediate which gets multiplied by 16
230 if (Value >= 0x10000)
231 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
233 Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
235 case AArch64::fixup_aarch64_movw: {
236 AArch64MCExpr::VariantKind RefKind =
237 static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
238 if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS &&
239 AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) {
240 // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
241 // ever be resolved in the assembler.
242 Ctx.reportError(Fixup.getLoc(),
243 "relocation for a thread-local variable points to an "
249 // FIXME: Figure out when this can actually happen, and verify our
251 Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
256 if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
257 switch (AArch64MCExpr::getAddressFrag(RefKind)) {
258 case AArch64MCExpr::VK_G0:
260 case AArch64MCExpr::VK_G1:
261 SignedValue = SignedValue >> 16;
263 case AArch64MCExpr::VK_G2:
264 SignedValue = SignedValue >> 32;
266 case AArch64MCExpr::VK_G3:
267 SignedValue = SignedValue >> 48;
270 llvm_unreachable("Variant kind doesn't correspond to fixup");
274 switch (AArch64MCExpr::getAddressFrag(RefKind)) {
275 case AArch64MCExpr::VK_G0:
277 case AArch64MCExpr::VK_G1:
280 case AArch64MCExpr::VK_G2:
283 case AArch64MCExpr::VK_G3:
287 llvm_unreachable("Variant kind doesn't correspond to fixup");
291 if (RefKind & AArch64MCExpr::VK_NC) {
294 else if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
295 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
296 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
298 // Invert the negative immediate because it will feed into a MOVN.
300 SignedValue = ~SignedValue;
301 Value = static_cast<uint64_t>(SignedValue);
303 else if (Value > 0xFFFF) {
304 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
308 case AArch64::fixup_aarch64_pcrel_branch14:
309 // Signed 16-bit immediate
310 if (SignedValue > 32767 || SignedValue < -32768)
311 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
312 // Low two bits are not encoded (4-byte alignment assumed).
314 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
315 return (Value >> 2) & 0x3fff;
316 case AArch64::fixup_aarch64_pcrel_branch26:
317 case AArch64::fixup_aarch64_pcrel_call26:
318 // Signed 28-bit immediate
319 if (SignedValue > 134217727 || SignedValue < -134217728)
320 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
321 // Low two bits are not encoded (4-byte alignment assumed).
323 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
324 return (Value >> 2) & 0x3ffffff;
329 if (!valueFitsIntoFixupKind(Fixup.getTargetKind(), Value))
330 Ctx.reportError(Fixup.getLoc(), "fixup value too large for data type!");
339 Optional<MCFixupKind> AArch64AsmBackend::getFixupKind(StringRef Name) const {
340 if (TheTriple.isOSBinFormatELF() && Name == "R_AARCH64_NONE")
342 return MCAsmBackend::getFixupKind(Name);
345 /// getFixupKindContainereSizeInBytes - The number of bytes of the
346 /// container involved in big endian or 0 if the item is little endian
347 unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
348 if (Endian == support::little)
353 llvm_unreachable("Unknown fixup kind!");
364 case AArch64::fixup_aarch64_tlsdesc_call:
365 case AArch64::fixup_aarch64_movw:
366 case AArch64::fixup_aarch64_pcrel_branch14:
367 case AArch64::fixup_aarch64_add_imm12:
368 case AArch64::fixup_aarch64_ldst_imm12_scale1:
369 case AArch64::fixup_aarch64_ldst_imm12_scale2:
370 case AArch64::fixup_aarch64_ldst_imm12_scale4:
371 case AArch64::fixup_aarch64_ldst_imm12_scale8:
372 case AArch64::fixup_aarch64_ldst_imm12_scale16:
373 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
374 case AArch64::fixup_aarch64_pcrel_branch19:
375 case AArch64::fixup_aarch64_pcrel_adr_imm21:
376 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
377 case AArch64::fixup_aarch64_pcrel_branch26:
378 case AArch64::fixup_aarch64_pcrel_call26:
379 // Instructions are always little endian
384 void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
385 const MCValue &Target,
386 MutableArrayRef<char> Data, uint64_t Value,
388 const MCSubtargetInfo *STI) const {
389 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
391 return; // Doesn't change encoding.
392 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
393 MCContext &Ctx = Asm.getContext();
394 int64_t SignedValue = static_cast<int64_t>(Value);
395 // Apply any target-specific value adjustments.
396 Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
398 // Shift the value into position.
399 Value <<= Info.TargetOffset;
401 unsigned Offset = Fixup.getOffset();
402 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
404 // Used to point to big endian bytes.
405 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
407 // For each byte of the fragment that the fixup touches, mask in the
408 // bits from the fixup value.
409 if (FulleSizeInBytes == 0) {
410 // Handle as little-endian
411 for (unsigned i = 0; i != NumBytes; ++i) {
412 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
415 // Handle as big-endian
416 assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
417 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
418 for (unsigned i = 0; i != NumBytes; ++i) {
419 unsigned Idx = FulleSizeInBytes - 1 - i;
420 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
424 // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
425 // handle this more cleanly. This may affect the output of -show-mc-encoding.
426 AArch64MCExpr::VariantKind RefKind =
427 static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
428 if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
429 // If the immediate is negative, generate MOVN else MOVZ.
430 // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
432 Data[Offset + 3] &= ~(1 << 6);
434 Data[Offset + 3] |= (1 << 6);
438 bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst,
439 const MCSubtargetInfo &STI) const {
443 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
445 const MCRelaxableFragment *DF,
446 const MCAsmLayout &Layout) const {
447 // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
448 // into the targets for now.
450 // Relax if the value is too big for a (signed) i8.
451 return int64_t(Value) != int64_t(int8_t(Value));
454 void AArch64AsmBackend::relaxInstruction(const MCInst &Inst,
455 const MCSubtargetInfo &STI,
457 llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
460 bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
461 // If the count is not 4-byte aligned, we must be writing data into the text
462 // section (otherwise we have unaligned instructions, and thus have far
463 // bigger problems), so just write zeros instead.
464 OS.write_zeros(Count % 4);
466 // We are properly aligned, so write NOPs as requested.
468 for (uint64_t i = 0; i != Count; ++i)
469 support::endian::write<uint32_t>(OS, 0xd503201f, Endian);
473 bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
474 const MCFixup &Fixup,
475 const MCValue &Target) {
476 unsigned Kind = Fixup.getKind();
480 // The ADRP instruction adds some multiple of 0x1000 to the current PC &
481 // ~0xfff. This means that the required offset to reach a symbol can vary by
482 // up to one step depending on where the ADRP is in memory. For example:
487 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
488 // we'll need that as an offset. At any other address "there" will be in the
489 // same page as the ADRP and the instruction should encode 0x0. Assuming the
490 // section isn't 0x1000-aligned, we therefore need to delegate this decision
491 // to the linker -- a relocation!
492 if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
495 AArch64MCExpr::VariantKind RefKind =
496 static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
497 AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
498 // LDR GOT relocations need a relocation
499 if (Kind == AArch64::fixup_aarch64_ldr_pcrel_imm19 &&
500 SymLoc == AArch64MCExpr::VK_GOT)
509 /// Compact unwind encoding values.
510 enum CompactUnwindEncodings {
511 /// A "frameless" leaf function, where no non-volatile registers are
512 /// saved. The return remains in LR throughout the function.
513 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
515 /// No compact unwind encoding available. Instead the low 23-bits of
516 /// the compact unwind encoding is the offset of the DWARF FDE in the
517 /// __eh_frame section. This mode is never used in object files. It is only
518 /// generated by the linker in final linked images, which have only DWARF info
520 UNWIND_ARM64_MODE_DWARF = 0x03000000,
522 /// This is a standard arm64 prologue where FP/LR are immediately
523 /// pushed on the stack, then SP is copied to FP. If there are any
524 /// non-volatile register saved, they are copied into the stack fame in pairs
525 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
526 /// five X pairs and four D pairs can be saved, but the memory layout must be
527 /// in register number order.
528 UNWIND_ARM64_MODE_FRAME = 0x04000000,
530 /// Frame register pair encodings.
531 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
532 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
533 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
534 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
535 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
536 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
537 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
538 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
539 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
542 } // end CU namespace
544 // FIXME: This should be in a separate file.
545 class DarwinAArch64AsmBackend : public AArch64AsmBackend {
546 const MCRegisterInfo &MRI;
549 /// Encode compact unwind stack adjustment for frameless functions.
550 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
551 /// The stack size always needs to be 16 byte aligned.
552 uint32_t encodeStackAdjustment(uint32_t StackSize) const {
553 return (StackSize / 16) << 12;
557 DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
558 const MCRegisterInfo &MRI, bool IsILP32)
559 : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI),
562 std::unique_ptr<MCObjectTargetWriter>
563 createObjectTargetWriter() const override {
565 return createAArch64MachObjectWriter(
566 MachO::CPU_TYPE_ARM64_32, MachO::CPU_SUBTYPE_ARM64_32_V8, true);
568 return createAArch64MachObjectWriter(MachO::CPU_TYPE_ARM64,
569 MachO::CPU_SUBTYPE_ARM64_ALL, false);
572 /// Generate the compact unwind encoding from the CFI directives.
573 uint32_t generateCompactUnwindEncoding(
574 ArrayRef<MCCFIInstruction> Instrs) const override {
576 return CU::UNWIND_ARM64_MODE_FRAMELESS;
579 unsigned StackSize = 0;
581 uint32_t CompactUnwindEncoding = 0;
582 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
583 const MCCFIInstruction &Inst = Instrs[i];
585 switch (Inst.getOperation()) {
587 // Cannot handle this directive: bail out.
588 return CU::UNWIND_ARM64_MODE_DWARF;
589 case MCCFIInstruction::OpDefCfa: {
590 // Defines a frame pointer.
592 getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true));
594 // Other CFA registers than FP are not supported by compact unwind.
595 // Fallback on DWARF.
596 // FIXME: When opt-remarks are supported in MC, add a remark to notify
598 if (XReg != AArch64::FP)
599 return CU::UNWIND_ARM64_MODE_DWARF;
601 assert(XReg == AArch64::FP && "Invalid frame pointer!");
602 assert(i + 2 < e && "Insufficient CFI instructions to define a frame!");
604 const MCCFIInstruction &LRPush = Instrs[++i];
605 assert(LRPush.getOperation() == MCCFIInstruction::OpOffset &&
606 "Link register not pushed!");
607 const MCCFIInstruction &FPPush = Instrs[++i];
608 assert(FPPush.getOperation() == MCCFIInstruction::OpOffset &&
609 "Frame pointer not pushed!");
611 unsigned LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true);
612 unsigned FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true);
614 LRReg = getXRegFromWReg(LRReg);
615 FPReg = getXRegFromWReg(FPReg);
617 assert(LRReg == AArch64::LR && FPReg == AArch64::FP &&
618 "Pushing invalid registers for frame!");
620 // Indicate that the function has a frame.
621 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
625 case MCCFIInstruction::OpDefCfaOffset: {
626 assert(StackSize == 0 && "We already have the CFA offset!");
627 StackSize = std::abs(Inst.getOffset());
630 case MCCFIInstruction::OpOffset: {
631 // Registers are saved in pairs. We expect there to be two consecutive
632 // `.cfi_offset' instructions with the appropriate registers specified.
633 unsigned Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true);
635 return CU::UNWIND_ARM64_MODE_DWARF;
637 const MCCFIInstruction &Inst2 = Instrs[++i];
638 if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
639 return CU::UNWIND_ARM64_MODE_DWARF;
640 unsigned Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true);
642 // N.B. The encodings must be in register number order, and the X
643 // registers before the D registers.
645 // X19/X20 pair = 0x00000001,
646 // X21/X22 pair = 0x00000002,
647 // X23/X24 pair = 0x00000004,
648 // X25/X26 pair = 0x00000008,
649 // X27/X28 pair = 0x00000010
650 Reg1 = getXRegFromWReg(Reg1);
651 Reg2 = getXRegFromWReg(Reg2);
653 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
654 (CompactUnwindEncoding & 0xF1E) == 0)
655 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
656 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
657 (CompactUnwindEncoding & 0xF1C) == 0)
658 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
659 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
660 (CompactUnwindEncoding & 0xF18) == 0)
661 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
662 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
663 (CompactUnwindEncoding & 0xF10) == 0)
664 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
665 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
666 (CompactUnwindEncoding & 0xF00) == 0)
667 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
669 Reg1 = getDRegFromBReg(Reg1);
670 Reg2 = getDRegFromBReg(Reg2);
672 // D8/D9 pair = 0x00000100,
673 // D10/D11 pair = 0x00000200,
674 // D12/D13 pair = 0x00000400,
675 // D14/D15 pair = 0x00000800
676 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
677 (CompactUnwindEncoding & 0xE00) == 0)
678 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
679 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
680 (CompactUnwindEncoding & 0xC00) == 0)
681 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
682 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
683 (CompactUnwindEncoding & 0x800) == 0)
684 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
685 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
686 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
688 // A pair was pushed which we cannot handle.
689 return CU::UNWIND_ARM64_MODE_DWARF;
698 // With compact unwind info we can only represent stack adjustments of up
700 if (StackSize > 65520)
701 return CU::UNWIND_ARM64_MODE_DWARF;
703 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
704 CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
707 return CompactUnwindEncoding;
711 } // end anonymous namespace
715 class ELFAArch64AsmBackend : public AArch64AsmBackend {
720 ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
721 bool IsLittleEndian, bool IsILP32)
722 : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
725 std::unique_ptr<MCObjectTargetWriter>
726 createObjectTargetWriter() const override {
727 return createAArch64ELFObjectWriter(OSABI, IsILP32);
734 class COFFAArch64AsmBackend : public AArch64AsmBackend {
736 COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
737 : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
739 std::unique_ptr<MCObjectTargetWriter>
740 createObjectTargetWriter() const override {
741 return createAArch64WinCOFFObjectWriter();
746 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
747 const MCSubtargetInfo &STI,
748 const MCRegisterInfo &MRI,
749 const MCTargetOptions &Options) {
750 const Triple &TheTriple = STI.getTargetTriple();
751 if (TheTriple.isOSBinFormatMachO()) {
752 const bool IsILP32 = TheTriple.isArch32Bit();
753 return new DarwinAArch64AsmBackend(T, TheTriple, MRI, IsILP32);
756 if (TheTriple.isOSBinFormatCOFF())
757 return new COFFAArch64AsmBackend(T, TheTriple);
759 assert(TheTriple.isOSBinFormatELF() && "Invalid target");
761 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
762 bool IsILP32 = Options.getABIName() == "ilp32";
763 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
767 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
768 const MCSubtargetInfo &STI,
769 const MCRegisterInfo &MRI,
770 const MCTargetOptions &Options) {
771 const Triple &TheTriple = STI.getTargetTriple();
772 assert(TheTriple.isOSBinFormatELF() &&
773 "Big endian is only supported for ELF targets!");
774 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
775 bool IsILP32 = Options.getABIName() == "ilp32";
776 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,