1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/ARMAsmBackend.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMAsmBackendDarwin.h"
13 #include "MCTargetDesc/ARMAsmBackendELF.h"
14 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
15 #include "MCTargetDesc/ARMFixupKinds.h"
16 #include "MCTargetDesc/ARMMCTargetDesc.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/BinaryFormat/ELF.h"
19 #include "llvm/BinaryFormat/MachO.h"
20 #include "llvm/MC/MCAsmBackend.h"
21 #include "llvm/MC/MCAssembler.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCDirectives.h"
24 #include "llvm/MC/MCELFObjectWriter.h"
25 #include "llvm/MC/MCExpr.h"
26 #include "llvm/MC/MCFixupKindInfo.h"
27 #include "llvm/MC/MCObjectWriter.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCSectionELF.h"
30 #include "llvm/MC/MCSectionMachO.h"
31 #include "llvm/MC/MCSubtargetInfo.h"
32 #include "llvm/MC/MCValue.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/EndianStream.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/Format.h"
37 #include "llvm/Support/TargetParser.h"
38 #include "llvm/Support/raw_ostream.h"
42 class ARMELFObjectWriter : public MCELFObjectTargetWriter {
44 ARMELFObjectWriter(uint8_t OSABI)
45 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
46 /*HasRelocationAddend*/ false) {}
48 } // end anonymous namespace
50 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
51 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
52 // This table *must* be in the order that the fixup_* kinds are defined in
55 // Name Offset (bits) Size (bits) Flags
56 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
57 {"fixup_t2_ldst_pcrel_12", 0, 32,
58 MCFixupKindInfo::FKF_IsPCRel |
59 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
60 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
61 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
62 {"fixup_t2_pcrel_10", 0, 32,
63 MCFixupKindInfo::FKF_IsPCRel |
64 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
65 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
66 {"fixup_t2_pcrel_9", 0, 32,
67 MCFixupKindInfo::FKF_IsPCRel |
68 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
69 {"fixup_thumb_adr_pcrel_10", 0, 8,
70 MCFixupKindInfo::FKF_IsPCRel |
71 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
72 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
73 {"fixup_t2_adr_pcrel_12", 0, 32,
74 MCFixupKindInfo::FKF_IsPCRel |
75 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
76 {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
77 {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
78 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
79 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
80 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
81 {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
82 {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
83 {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
84 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
85 {"fixup_arm_thumb_blx", 0, 32,
86 MCFixupKindInfo::FKF_IsPCRel |
87 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
88 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
89 {"fixup_arm_thumb_cp", 0, 8,
90 MCFixupKindInfo::FKF_IsPCRel |
91 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
92 {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
93 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
95 {"fixup_arm_movt_hi16", 0, 20, 0},
96 {"fixup_arm_movw_lo16", 0, 20, 0},
97 {"fixup_t2_movt_hi16", 0, 20, 0},
98 {"fixup_t2_movw_lo16", 0, 20, 0},
99 {"fixup_arm_mod_imm", 0, 12, 0},
100 {"fixup_t2_so_imm", 0, 26, 0},
102 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
103 // This table *must* be in the order that the fixup_* kinds are defined in
106 // Name Offset (bits) Size (bits) Flags
107 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
108 {"fixup_t2_ldst_pcrel_12", 0, 32,
109 MCFixupKindInfo::FKF_IsPCRel |
110 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
111 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
112 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
113 {"fixup_t2_pcrel_10", 0, 32,
114 MCFixupKindInfo::FKF_IsPCRel |
115 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
116 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
117 {"fixup_t2_pcrel_9", 0, 32,
118 MCFixupKindInfo::FKF_IsPCRel |
119 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
120 {"fixup_thumb_adr_pcrel_10", 8, 8,
121 MCFixupKindInfo::FKF_IsPCRel |
122 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
123 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
124 {"fixup_t2_adr_pcrel_12", 0, 32,
125 MCFixupKindInfo::FKF_IsPCRel |
126 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
127 {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
128 {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
129 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
130 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
131 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
132 {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
133 {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
134 {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
135 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
136 {"fixup_arm_thumb_blx", 0, 32,
137 MCFixupKindInfo::FKF_IsPCRel |
138 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
139 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
140 {"fixup_arm_thumb_cp", 8, 8,
141 MCFixupKindInfo::FKF_IsPCRel |
142 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
143 {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
144 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
146 {"fixup_arm_movt_hi16", 12, 20, 0},
147 {"fixup_arm_movw_lo16", 12, 20, 0},
148 {"fixup_t2_movt_hi16", 12, 20, 0},
149 {"fixup_t2_movw_lo16", 12, 20, 0},
150 {"fixup_arm_mod_imm", 20, 12, 0},
151 {"fixup_t2_so_imm", 26, 6, 0},
154 if (Kind < FirstTargetFixupKind)
155 return MCAsmBackend::getFixupKindInfo(Kind);
157 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
159 return (Endian == support::little ? InfosLE
160 : InfosBE)[Kind - FirstTargetFixupKind];
163 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
176 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
177 const MCSubtargetInfo &STI) const {
178 bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2];
179 bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps];
185 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
187 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
189 return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
191 return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
199 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
200 const MCSubtargetInfo &STI) const {
201 if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
206 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
207 uint64_t Value) const {
208 switch ((unsigned)Fixup.getKind()) {
209 case ARM::fixup_arm_thumb_br: {
210 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
211 // low bit being an implied zero. There's an implied +4 offset for the
212 // branch, so we adjust the other way here to determine what's
215 // Relax if the value is too big for a (signed) i8.
216 int64_t Offset = int64_t(Value) - 4;
217 if (Offset > 2046 || Offset < -2048)
218 return "out of range pc-relative fixup value";
221 case ARM::fixup_arm_thumb_bcc: {
222 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
223 // low bit being an implied zero. There's an implied +4 offset for the
224 // branch, so we adjust the other way here to determine what's
227 // Relax if the value is too big for a (signed) i8.
228 int64_t Offset = int64_t(Value) - 4;
229 if (Offset > 254 || Offset < -256)
230 return "out of range pc-relative fixup value";
233 case ARM::fixup_thumb_adr_pcrel_10:
234 case ARM::fixup_arm_thumb_cp: {
235 // If the immediate is negative, greater than 1020, or not a multiple
236 // of four, the wide version of the instruction must be used.
237 int64_t Offset = int64_t(Value) - 4;
239 return "misaligned pc-relative fixup value";
240 else if (Offset > 1020 || Offset < 0)
241 return "out of range pc-relative fixup value";
244 case ARM::fixup_arm_thumb_cb: {
245 // If we have a Thumb CBZ or CBNZ instruction and its target is the next
246 // instruction it is actually out of range for the instruction.
247 // It will be changed to a NOP.
248 int64_t Offset = (Value & ~1);
250 return "will be converted to nop";
254 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
259 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
260 const MCRelaxableFragment *DF,
261 const MCAsmLayout &Layout) const {
262 return reasonForFixupRelaxation(Fixup, Value);
265 void ARMAsmBackend::relaxInstruction(const MCInst &Inst,
266 const MCSubtargetInfo &STI,
268 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
270 // Sanity check w/ diagnostic if we get here w/ a bogus instruction.
271 if (RelaxedOp == Inst.getOpcode()) {
272 SmallString<256> Tmp;
273 raw_svector_ostream OS(Tmp);
274 Inst.dump_pretty(OS);
276 report_fatal_error("unexpected instruction to relax: " + OS.str());
279 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
280 // have to change the operands too.
281 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
282 RelaxedOp == ARM::tHINT) {
283 Res.setOpcode(RelaxedOp);
284 Res.addOperand(MCOperand::createImm(0));
285 Res.addOperand(MCOperand::createImm(14));
286 Res.addOperand(MCOperand::createReg(0));
290 // The rest of instructions we're relaxing have the same operands.
291 // We just need to update to the proper opcode.
293 Res.setOpcode(RelaxedOp);
296 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
297 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
298 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
299 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0
300 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
302 const uint16_t nopEncoding =
303 hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
304 uint64_t NumNops = Count / 2;
305 for (uint64_t i = 0; i != NumNops; ++i)
306 support::endian::write(OS, nopEncoding, Endian);
312 const uint32_t nopEncoding =
313 hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
314 uint64_t NumNops = Count / 4;
315 for (uint64_t i = 0; i != NumNops; ++i)
316 support::endian::write(OS, nopEncoding, Endian);
317 // FIXME: should this function return false when unable to write exactly
318 // 'Count' bytes with NOP encodings?
321 break; // No leftover bytes to write
329 OS.write("\0\0\xa0", 3);
336 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
337 if (IsLittleEndian) {
338 // Note that the halfwords are stored high first and low second in thumb;
339 // so we need to swap the fixup value here to map properly.
340 uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
341 Swapped |= (Value & 0x0000FFFF) << 16;
347 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
348 bool IsLittleEndian) {
351 if (IsLittleEndian) {
352 Value = (SecondHalf & 0xFFFF) << 16;
353 Value |= (FirstHalf & 0xFFFF);
355 Value = (SecondHalf & 0xFFFF);
356 Value |= (FirstHalf & 0xFFFF) << 16;
362 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
363 const MCFixup &Fixup,
364 const MCValue &Target, uint64_t Value,
365 bool IsResolved, MCContext &Ctx,
366 const MCSubtargetInfo* STI) const {
367 unsigned Kind = Fixup.getKind();
369 // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
370 // and .word relocations they put the Thumb bit into the addend if possible.
371 // Other relocation types don't want this bit though (branches couldn't encode
372 // it if it *was* present, and no other relocations exist) and it can
373 // interfere with checking valid expressions.
374 if (const MCSymbolRefExpr *A = Target.getSymA()) {
375 if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
376 A->getSymbol().isExternal() &&
377 (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
378 Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
379 Kind == ARM::fixup_t2_movt_hi16))
385 Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
395 case ARM::fixup_arm_movt_hi16:
396 assert(STI != nullptr);
397 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
400 case ARM::fixup_arm_movw_lo16: {
401 unsigned Hi4 = (Value & 0xF000) >> 12;
402 unsigned Lo12 = Value & 0x0FFF;
403 // inst{19-16} = Hi4;
404 // inst{11-0} = Lo12;
405 Value = (Hi4 << 16) | (Lo12);
408 case ARM::fixup_t2_movt_hi16:
409 assert(STI != nullptr);
410 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
413 case ARM::fixup_t2_movw_lo16: {
414 unsigned Hi4 = (Value & 0xF000) >> 12;
415 unsigned i = (Value & 0x800) >> 11;
416 unsigned Mid3 = (Value & 0x700) >> 8;
417 unsigned Lo8 = Value & 0x0FF;
418 // inst{19-16} = Hi4;
420 // inst{14-12} = Mid3;
422 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
423 return swapHalfWords(Value, Endian == support::little);
425 case ARM::fixup_arm_ldst_pcrel_12:
426 // ARM PC-relative values are offset by 8.
429 case ARM::fixup_t2_ldst_pcrel_12: {
430 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
433 if ((int64_t)Value < 0) {
438 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
441 Value |= isAdd << 23;
443 // Same addressing mode as fixup_arm_pcrel_10,
444 // but with 16-bit halfwords swapped.
445 if (Kind == ARM::fixup_t2_ldst_pcrel_12)
446 return swapHalfWords(Value, Endian == support::little);
450 case ARM::fixup_arm_adr_pcrel_12: {
451 // ARM PC-relative values are offset by 8.
453 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
454 if ((int64_t)Value < 0) {
458 if (ARM_AM::getSOImmVal(Value) == -1) {
459 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
462 // Encode the immediate and shift the opcode into place.
463 return ARM_AM::getSOImmVal(Value) | (opc << 21);
466 case ARM::fixup_t2_adr_pcrel_12: {
469 if ((int64_t)Value < 0) {
474 uint32_t out = (opc << 21);
475 out |= (Value & 0x800) << 15;
476 out |= (Value & 0x700) << 4;
477 out |= (Value & 0x0FF);
479 return swapHalfWords(out, Endian == support::little);
482 case ARM::fixup_arm_condbranch:
483 case ARM::fixup_arm_uncondbranch:
484 case ARM::fixup_arm_uncondbl:
485 case ARM::fixup_arm_condbl:
486 case ARM::fixup_arm_blx:
487 // These values don't encode the low two bits since they're always zero.
488 // Offset by 8 just as above.
489 if (const MCSymbolRefExpr *SRE =
490 dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
491 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
493 return 0xffffff & ((Value - 8) >> 2);
494 case ARM::fixup_t2_uncondbranch: {
496 if (!isInt<25>(Value)) {
497 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
501 Value >>= 1; // Low bit is not encoded.
504 bool I = Value & 0x800000;
505 bool J1 = Value & 0x400000;
506 bool J2 = Value & 0x200000;
510 out |= I << 26; // S bit
511 out |= !J1 << 13; // J1 bit
512 out |= !J2 << 11; // J2 bit
513 out |= (Value & 0x1FF800) << 5; // imm6 field
514 out |= (Value & 0x0007FF); // imm11 field
516 return swapHalfWords(out, Endian == support::little);
518 case ARM::fixup_t2_condbranch: {
520 if (!isInt<21>(Value)) {
521 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
525 Value >>= 1; // Low bit is not encoded.
528 out |= (Value & 0x80000) << 7; // S bit
529 out |= (Value & 0x40000) >> 7; // J2 bit
530 out |= (Value & 0x20000) >> 4; // J1 bit
531 out |= (Value & 0x1F800) << 5; // imm6 field
532 out |= (Value & 0x007FF); // imm11 field
534 return swapHalfWords(out, Endian == support::little);
536 case ARM::fixup_arm_thumb_bl: {
537 if (!isInt<25>(Value - 4) ||
538 (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
539 !STI->getFeatureBits()[ARM::HasV8MBaselineOps] &&
540 !STI->getFeatureBits()[ARM::HasV6MOps] &&
541 !isInt<23>(Value - 4))) {
542 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
546 // The value doesn't encode the low bit (always zero) and is offset by
547 // four. The 32-bit immediate value is encoded as
548 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
549 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
550 // The value is encoded into disjoint bit positions in the destination
551 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
552 // J = either J1 or J2 bit
554 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
556 // Note that the halfwords are stored high first, low second; so we need
557 // to transpose the fixup value here to map properly.
558 uint32_t offset = (Value - 4) >> 1;
559 uint32_t signBit = (offset & 0x800000) >> 23;
560 uint32_t I1Bit = (offset & 0x400000) >> 22;
561 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
562 uint32_t I2Bit = (offset & 0x200000) >> 21;
563 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
564 uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
565 uint32_t imm11Bits = (offset & 0x000007FF);
567 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
568 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
569 (uint16_t)imm11Bits);
570 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
572 case ARM::fixup_arm_thumb_blx: {
573 // The value doesn't encode the low two bits (always zero) and is offset by
574 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
575 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
576 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
577 // The value is encoded into disjoint bit positions in the destination
578 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
579 // J = either J1 or J2 bit, 0 = zero.
581 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
583 // Note that the halfwords are stored high first, low second; so we need
584 // to transpose the fixup value here to map properly.
585 if (Value % 4 != 0) {
586 Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
590 uint32_t offset = (Value - 4) >> 2;
591 if (const MCSymbolRefExpr *SRE =
592 dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
593 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
595 uint32_t signBit = (offset & 0x400000) >> 22;
596 uint32_t I1Bit = (offset & 0x200000) >> 21;
597 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
598 uint32_t I2Bit = (offset & 0x100000) >> 20;
599 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
600 uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
601 uint32_t imm10LBits = (offset & 0x3FF);
603 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
604 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
605 ((uint16_t)imm10LBits) << 1);
606 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
608 case ARM::fixup_thumb_adr_pcrel_10:
609 case ARM::fixup_arm_thumb_cp:
610 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
611 // could have an error on our hands.
612 assert(STI != nullptr);
613 if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
614 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
615 if (FixupDiagnostic) {
616 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
620 // Offset by 4, and don't encode the low two bits.
621 return ((Value - 4) >> 2) & 0xff;
622 case ARM::fixup_arm_thumb_cb: {
623 // CB instructions can only branch to offsets in [4, 126] in multiples of 2
624 // so ensure that the raw value LSB is zero and it lies in [2, 130].
625 // An offset of 2 will be relaxed to a NOP.
626 if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
627 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
630 // Offset by 4 and don't encode the lower bit, which is always 0.
631 // FIXME: diagnose if no Thumb2
632 uint32_t Binary = (Value - 4) >> 1;
633 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
635 case ARM::fixup_arm_thumb_br:
636 // Offset by 4 and don't encode the lower bit, which is always 0.
637 assert(STI != nullptr);
638 if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
639 !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
640 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
641 if (FixupDiagnostic) {
642 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
646 return ((Value - 4) >> 1) & 0x7ff;
647 case ARM::fixup_arm_thumb_bcc:
648 // Offset by 4 and don't encode the lower bit, which is always 0.
649 assert(STI != nullptr);
650 if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
651 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
652 if (FixupDiagnostic) {
653 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
657 return ((Value - 4) >> 1) & 0xff;
658 case ARM::fixup_arm_pcrel_10_unscaled: {
659 Value = Value - 8; // ARM fixups offset by an additional word and don't
660 // need to adjust for the half-word ordering.
662 if ((int64_t)Value < 0) {
666 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
668 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
671 Value = (Value & 0xf) | ((Value & 0xf0) << 4);
672 return Value | (isAdd << 23);
674 case ARM::fixup_arm_pcrel_10:
675 Value = Value - 4; // ARM fixups offset by an additional word and don't
676 // need to adjust for the half-word ordering.
678 case ARM::fixup_t2_pcrel_10: {
679 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
682 if ((int64_t)Value < 0) {
686 // These values don't encode the low two bits since they're always zero.
689 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
692 Value |= isAdd << 23;
694 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
696 if (Kind == ARM::fixup_t2_pcrel_10)
697 return swapHalfWords(Value, Endian == support::little);
701 case ARM::fixup_arm_pcrel_9:
702 Value = Value - 4; // ARM fixups offset by an additional word and don't
703 // need to adjust for the half-word ordering.
705 case ARM::fixup_t2_pcrel_9: {
706 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
709 if ((int64_t)Value < 0) {
713 // These values don't encode the low bit since it's always zero.
715 Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
720 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
723 Value |= isAdd << 23;
725 // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
727 if (Kind == ARM::fixup_t2_pcrel_9)
728 return swapHalfWords(Value, Endian == support::little);
732 case ARM::fixup_arm_mod_imm:
733 Value = ARM_AM::getSOImmVal(Value);
735 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
739 case ARM::fixup_t2_so_imm: {
740 Value = ARM_AM::getT2SOImmVal(Value);
741 if ((int64_t)Value < 0) {
742 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
745 // Value will contain a 12-bit value broken up into a 4-bit shift in bits
746 // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
747 // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
748 // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
750 uint64_t EncValue = 0;
751 EncValue |= (Value & 0x800) << 15;
752 EncValue |= (Value & 0x700) << 4;
753 EncValue |= (Value & 0xff);
754 return swapHalfWords(EncValue, Endian == support::little);
759 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
760 const MCFixup &Fixup,
761 const MCValue &Target) {
762 const MCSymbolRefExpr *A = Target.getSymA();
763 const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
764 const unsigned FixupKind = Fixup.getKind() ;
765 if ((unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl) {
766 assert(Sym && "How did we resolve this?");
768 // If the symbol is external the linker will handle it.
769 // FIXME: Should we handle it as an optimization?
771 // If the symbol is out of range, produce a relocation and hope the
772 // linker can handle it. GNU AS produces an error in this case.
773 if (Sym->isExternal())
776 // Create relocations for unconditional branches to function symbols with
777 // different execution mode in ELF binaries.
778 if (Sym && Sym->isELF()) {
779 unsigned Type = cast<MCSymbolELF>(Sym)->getType();
780 if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
781 if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
783 if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
784 FixupKind == ARM::fixup_arm_thumb_bl ||
785 FixupKind == ARM::fixup_t2_condbranch ||
786 FixupKind == ARM::fixup_t2_uncondbranch))
790 // We must always generate a relocation for BL/BLX instructions if we have
791 // a symbol to reference, as the linker relies on knowing the destination
792 // symbol's thumb-ness to get interworking right.
793 if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
794 FixupKind == ARM::fixup_arm_blx ||
795 FixupKind == ARM::fixup_arm_uncondbl ||
796 FixupKind == ARM::fixup_arm_condbl))
801 /// getFixupKindNumBytes - The number of bytes the fixup may change.
802 static unsigned getFixupKindNumBytes(unsigned Kind) {
805 llvm_unreachable("Unknown fixup kind!");
808 case ARM::fixup_arm_thumb_bcc:
809 case ARM::fixup_arm_thumb_cp:
810 case ARM::fixup_thumb_adr_pcrel_10:
814 case ARM::fixup_arm_thumb_br:
815 case ARM::fixup_arm_thumb_cb:
816 case ARM::fixup_arm_mod_imm:
819 case ARM::fixup_arm_pcrel_10_unscaled:
820 case ARM::fixup_arm_ldst_pcrel_12:
821 case ARM::fixup_arm_pcrel_10:
822 case ARM::fixup_arm_pcrel_9:
823 case ARM::fixup_arm_adr_pcrel_12:
824 case ARM::fixup_arm_uncondbl:
825 case ARM::fixup_arm_condbl:
826 case ARM::fixup_arm_blx:
827 case ARM::fixup_arm_condbranch:
828 case ARM::fixup_arm_uncondbranch:
832 case ARM::fixup_t2_ldst_pcrel_12:
833 case ARM::fixup_t2_condbranch:
834 case ARM::fixup_t2_uncondbranch:
835 case ARM::fixup_t2_pcrel_10:
836 case ARM::fixup_t2_pcrel_9:
837 case ARM::fixup_t2_adr_pcrel_12:
838 case ARM::fixup_arm_thumb_bl:
839 case ARM::fixup_arm_thumb_blx:
840 case ARM::fixup_arm_movt_hi16:
841 case ARM::fixup_arm_movw_lo16:
842 case ARM::fixup_t2_movt_hi16:
843 case ARM::fixup_t2_movw_lo16:
844 case ARM::fixup_t2_so_imm:
854 /// getFixupKindContainerSizeBytes - The number of bytes of the
855 /// container involved in big endian.
856 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
859 llvm_unreachable("Unknown fixup kind!");
868 case ARM::fixup_arm_thumb_bcc:
869 case ARM::fixup_arm_thumb_cp:
870 case ARM::fixup_thumb_adr_pcrel_10:
871 case ARM::fixup_arm_thumb_br:
872 case ARM::fixup_arm_thumb_cb:
873 // Instruction size is 2 bytes.
876 case ARM::fixup_arm_pcrel_10_unscaled:
877 case ARM::fixup_arm_ldst_pcrel_12:
878 case ARM::fixup_arm_pcrel_10:
879 case ARM::fixup_arm_adr_pcrel_12:
880 case ARM::fixup_arm_uncondbl:
881 case ARM::fixup_arm_condbl:
882 case ARM::fixup_arm_blx:
883 case ARM::fixup_arm_condbranch:
884 case ARM::fixup_arm_uncondbranch:
885 case ARM::fixup_t2_ldst_pcrel_12:
886 case ARM::fixup_t2_condbranch:
887 case ARM::fixup_t2_uncondbranch:
888 case ARM::fixup_t2_pcrel_10:
889 case ARM::fixup_t2_adr_pcrel_12:
890 case ARM::fixup_arm_thumb_bl:
891 case ARM::fixup_arm_thumb_blx:
892 case ARM::fixup_arm_movt_hi16:
893 case ARM::fixup_arm_movw_lo16:
894 case ARM::fixup_t2_movt_hi16:
895 case ARM::fixup_t2_movw_lo16:
896 case ARM::fixup_arm_mod_imm:
897 case ARM::fixup_t2_so_imm:
898 // Instruction size is 4 bytes.
903 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
904 const MCValue &Target,
905 MutableArrayRef<char> Data, uint64_t Value,
907 const MCSubtargetInfo* STI) const {
908 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
909 MCContext &Ctx = Asm.getContext();
910 Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
912 return; // Doesn't change encoding.
914 unsigned Offset = Fixup.getOffset();
915 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
917 // Used to point to big endian bytes.
918 unsigned FullSizeBytes;
919 if (Endian == support::big) {
920 FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind());
921 assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
922 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
925 // For each byte of the fragment that the fixup touches, mask in the bits from
926 // the fixup value. The Value has been "split up" into the appropriate
928 for (unsigned i = 0; i != NumBytes; ++i) {
929 unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i);
930 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
936 /// Compact unwind encoding values.
937 enum CompactUnwindEncodings {
938 UNWIND_ARM_MODE_MASK = 0x0F000000,
939 UNWIND_ARM_MODE_FRAME = 0x01000000,
940 UNWIND_ARM_MODE_FRAME_D = 0x02000000,
941 UNWIND_ARM_MODE_DWARF = 0x04000000,
943 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000,
945 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001,
946 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002,
947 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004,
949 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008,
950 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010,
951 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020,
952 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040,
953 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080,
955 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00,
957 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF
960 } // end CU namespace
962 /// Generate compact unwind encoding for the function based on the CFI
963 /// instructions. If the CFI instructions describe a frame that cannot be
964 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
965 /// tells the runtime to fallback and unwind using dwarf.
966 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
967 ArrayRef<MCCFIInstruction> Instrs) const {
968 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
969 // Only armv7k uses CFI based unwinding.
970 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
972 // No .cfi directives means no frame.
975 // Start off assuming CFA is at SP+0.
976 int CFARegister = ARM::SP;
977 int CFARegisterOffset = 0;
978 // Mark savable registers as initially unsaved
979 DenseMap<unsigned, int> RegOffsets;
980 int FloatRegCount = 0;
981 // Process each .cfi directive and build up compact unwind info.
982 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
984 const MCCFIInstruction &Inst = Instrs[i];
985 switch (Inst.getOperation()) {
986 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
987 CFARegisterOffset = -Inst.getOffset();
988 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true);
990 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
991 CFARegisterOffset = -Inst.getOffset();
993 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
994 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true);
996 case MCCFIInstruction::OpOffset: // DW_CFA_offset
997 Reg = MRI.getLLVMRegNum(Inst.getRegister(), true);
998 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
999 RegOffsets[Reg] = Inst.getOffset();
1000 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
1001 RegOffsets[Reg] = Inst.getOffset();
1004 DEBUG_WITH_TYPE("compact-unwind",
1005 llvm::dbgs() << ".cfi_offset on unknown register="
1006 << Inst.getRegister() << "\n");
1007 return CU::UNWIND_ARM_MODE_DWARF;
1010 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
1014 // Directive not convertable to compact unwind, bail out.
1015 DEBUG_WITH_TYPE("compact-unwind",
1017 << "CFI directive not compatiable with comact "
1018 "unwind encoding, opcode=" << Inst.getOperation()
1020 return CU::UNWIND_ARM_MODE_DWARF;
1025 // If no frame set up, return no unwind info.
1026 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1029 // Verify standard frame (lr/r7) was used.
1030 if (CFARegister != ARM::R7) {
1031 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1033 << " instead of r7\n");
1034 return CU::UNWIND_ARM_MODE_DWARF;
1036 int StackAdjust = CFARegisterOffset - 8;
1037 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1038 DEBUG_WITH_TYPE("compact-unwind",
1040 << "LR not saved as standard frame, StackAdjust="
1042 << ", CFARegisterOffset=" << CFARegisterOffset
1043 << ", lr save at offset=" << RegOffsets[14] << "\n");
1044 return CU::UNWIND_ARM_MODE_DWARF;
1046 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1047 DEBUG_WITH_TYPE("compact-unwind",
1048 llvm::dbgs() << "r7 not saved as standard frame\n");
1049 return CU::UNWIND_ARM_MODE_DWARF;
1051 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1053 // If var-args are used, there may be a stack adjust required.
1054 switch (StackAdjust) {
1058 CompactUnwindEncoding |= 0x00400000;
1061 CompactUnwindEncoding |= 0x00800000;
1064 CompactUnwindEncoding |= 0x00C00000;
1067 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1068 << ".cfi_def_cfa stack adjust ("
1069 << StackAdjust << ") out of range\n");
1070 return CU::UNWIND_ARM_MODE_DWARF;
1073 // If r6 is saved, it must be right below r7.
1077 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1078 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1079 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1080 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1081 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1082 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1083 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1084 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1086 int CurOffset = -8 - StackAdjust;
1087 for (auto CSReg : GPRCSRegs) {
1088 auto Offset = RegOffsets.find(CSReg.Reg);
1089 if (Offset == RegOffsets.end())
1092 int RegOffset = Offset->second;
1093 if (RegOffset != CurOffset - 4) {
1094 DEBUG_WITH_TYPE("compact-unwind",
1095 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1096 << RegOffset << " but only supported at "
1097 << CurOffset << "\n");
1098 return CU::UNWIND_ARM_MODE_DWARF;
1100 CompactUnwindEncoding |= CSReg.Encoding;
1104 // If no floats saved, we are done.
1105 if (FloatRegCount == 0)
1106 return CompactUnwindEncoding;
1108 // Switch mode to include D register saving.
1109 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1110 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1112 // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1113 // but needs coordination with the linker and libunwind.
1114 if (FloatRegCount > 4) {
1115 DEBUG_WITH_TYPE("compact-unwind",
1116 llvm::dbgs() << "unsupported number of D registers saved ("
1117 << FloatRegCount << ")\n");
1118 return CU::UNWIND_ARM_MODE_DWARF;
1121 // Floating point registers must either be saved sequentially, or we defer to
1122 // DWARF. No gaps allowed here so check that each saved d-register is
1123 // precisely where it should be.
1124 static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
1125 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1126 auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1127 if (Offset == RegOffsets.end()) {
1128 DEBUG_WITH_TYPE("compact-unwind",
1129 llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1130 << MRI.getName(FPRCSRegs[Idx])
1132 return CU::UNWIND_ARM_MODE_DWARF;
1133 } else if (Offset->second != CurOffset - 8) {
1134 DEBUG_WITH_TYPE("compact-unwind",
1135 llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1136 << MRI.getName(FPRCSRegs[Idx])
1137 << " saved at " << Offset->second
1138 << ", expected at " << CurOffset - 8
1140 return CU::UNWIND_ARM_MODE_DWARF;
1145 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1148 static MachO::CPUSubTypeARM getMachOSubTypeFromArch(StringRef Arch) {
1149 ARM::ArchKind AK = ARM::parseArch(Arch);
1152 return MachO::CPU_SUBTYPE_ARM_V7;
1153 case ARM::ArchKind::ARMV4T:
1154 return MachO::CPU_SUBTYPE_ARM_V4T;
1155 case ARM::ArchKind::ARMV5T:
1156 case ARM::ArchKind::ARMV5TE:
1157 case ARM::ArchKind::ARMV5TEJ:
1158 return MachO::CPU_SUBTYPE_ARM_V5;
1159 case ARM::ArchKind::ARMV6:
1160 case ARM::ArchKind::ARMV6K:
1161 return MachO::CPU_SUBTYPE_ARM_V6;
1162 case ARM::ArchKind::ARMV7A:
1163 return MachO::CPU_SUBTYPE_ARM_V7;
1164 case ARM::ArchKind::ARMV7S:
1165 return MachO::CPU_SUBTYPE_ARM_V7S;
1166 case ARM::ArchKind::ARMV7K:
1167 return MachO::CPU_SUBTYPE_ARM_V7K;
1168 case ARM::ArchKind::ARMV6M:
1169 return MachO::CPU_SUBTYPE_ARM_V6M;
1170 case ARM::ArchKind::ARMV7M:
1171 return MachO::CPU_SUBTYPE_ARM_V7M;
1172 case ARM::ArchKind::ARMV7EM:
1173 return MachO::CPU_SUBTYPE_ARM_V7EM;
1177 static MCAsmBackend *createARMAsmBackend(const Target &T,
1178 const MCSubtargetInfo &STI,
1179 const MCRegisterInfo &MRI,
1180 const MCTargetOptions &Options,
1181 support::endianness Endian) {
1182 const Triple &TheTriple = STI.getTargetTriple();
1183 switch (TheTriple.getObjectFormat()) {
1185 llvm_unreachable("unsupported object format");
1186 case Triple::MachO: {
1187 MachO::CPUSubTypeARM CS = getMachOSubTypeFromArch(TheTriple.getArchName());
1188 return new ARMAsmBackendDarwin(T, STI, MRI, CS);
1191 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1192 return new ARMAsmBackendWinCOFF(T, STI);
1194 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1195 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1196 return new ARMAsmBackendELF(T, STI, OSABI, Endian);
1200 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1201 const MCSubtargetInfo &STI,
1202 const MCRegisterInfo &MRI,
1203 const MCTargetOptions &Options) {
1204 return createARMAsmBackend(T, STI, MRI, Options, support::little);
1207 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1208 const MCSubtargetInfo &STI,
1209 const MCRegisterInfo &MRI,
1210 const MCTargetOptions &Options) {
1211 return createARMAsmBackend(T, STI, MRI, Options, support::big);