1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "llvm/MC/MCAssembler.h"
10 #include "llvm/ADT/ArrayRef.h"
11 #include "llvm/ADT/SmallString.h"
12 #include "llvm/ADT/SmallVector.h"
13 #include "llvm/ADT/Statistic.h"
14 #include "llvm/ADT/StringRef.h"
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/MC/MCAsmBackend.h"
17 #include "llvm/MC/MCAsmInfo.h"
18 #include "llvm/MC/MCAsmLayout.h"
19 #include "llvm/MC/MCCodeEmitter.h"
20 #include "llvm/MC/MCCodeView.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCDwarf.h"
23 #include "llvm/MC/MCExpr.h"
24 #include "llvm/MC/MCFixup.h"
25 #include "llvm/MC/MCFixupKindInfo.h"
26 #include "llvm/MC/MCFragment.h"
27 #include "llvm/MC/MCInst.h"
28 #include "llvm/MC/MCObjectWriter.h"
29 #include "llvm/MC/MCSection.h"
30 #include "llvm/MC/MCSectionELF.h"
31 #include "llvm/MC/MCSymbol.h"
32 #include "llvm/MC/MCValue.h"
33 #include "llvm/Support/Alignment.h"
34 #include "llvm/Support/Casting.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/EndianStream.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/LEB128.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
49 #define DEBUG_TYPE "assembler"
54 STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total");
55 STATISTIC(EmittedRelaxableFragments,
56 "Number of emitted assembler fragments - relaxable");
57 STATISTIC(EmittedDataFragments,
58 "Number of emitted assembler fragments - data");
59 STATISTIC(EmittedCompactEncodedInstFragments,
60 "Number of emitted assembler fragments - compact encoded inst");
61 STATISTIC(EmittedAlignFragments,
62 "Number of emitted assembler fragments - align");
63 STATISTIC(EmittedFillFragments,
64 "Number of emitted assembler fragments - fill");
65 STATISTIC(EmittedNopsFragments, "Number of emitted assembler fragments - nops");
66 STATISTIC(EmittedOrgFragments, "Number of emitted assembler fragments - org");
67 STATISTIC(evaluateFixup, "Number of evaluated fixups");
68 STATISTIC(FragmentLayouts, "Number of fragment layouts");
69 STATISTIC(ObjectBytes, "Number of emitted object file bytes");
70 STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
71 STATISTIC(RelaxedInstructions, "Number of relaxed instructions");
73 } // end namespace stats
74 } // end anonymous namespace
76 // FIXME FIXME FIXME: There are number of places in this file where we convert
77 // what is a 64-bit assembler value used for computation into a value in the
78 // object file, which may truncate it. We should detect that truncation where
79 // invalid and report errors back.
83 MCAssembler::MCAssembler(MCContext &Context,
84 std::unique_ptr<MCAsmBackend> Backend,
85 std::unique_ptr<MCCodeEmitter> Emitter,
86 std::unique_ptr<MCObjectWriter> Writer)
87 : Context(Context), Backend(std::move(Backend)),
88 Emitter(std::move(Emitter)), Writer(std::move(Writer)),
89 BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false),
90 IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) {
91 VersionInfo.Major = 0; // Major version == 0 for "none specified"
92 DarwinTargetVariantVersionInfo.Major = 0;
95 MCAssembler::~MCAssembler() = default;
97 void MCAssembler::reset() {
100 IndirectSymbols.clear();
102 LinkerOptions.clear();
107 SubsectionsViaSymbols = false;
108 IncrementalLinkerCompatible = false;
110 LOHContainer.reset();
111 VersionInfo.Major = 0;
112 VersionInfo.SDKVersion = VersionTuple();
113 DarwinTargetVariantVersionInfo.Major = 0;
114 DarwinTargetVariantVersionInfo.SDKVersion = VersionTuple();
116 // reset objects owned by us
118 getBackendPtr()->reset();
120 getEmitterPtr()->reset();
122 getWriterPtr()->reset();
123 getLOHContainer().reset();
126 bool MCAssembler::registerSection(MCSection &Section) {
127 if (Section.isRegistered())
129 Sections.push_back(&Section);
130 Section.setIsRegistered(true);
134 bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const {
135 if (ThumbFuncs.count(Symbol))
138 if (!Symbol->isVariable())
141 const MCExpr *Expr = Symbol->getVariableValue();
144 if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr))
147 if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None)
150 const MCSymbolRefExpr *Ref = V.getSymA();
154 if (Ref->getKind() != MCSymbolRefExpr::VK_None)
157 const MCSymbol &Sym = Ref->getSymbol();
158 if (!isThumbFunc(&Sym))
161 ThumbFuncs.insert(Symbol); // Cache it.
165 bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
166 // Non-temporary labels should always be visible to the linker.
167 if (!Symbol.isTemporary())
170 if (Symbol.isUsedInReloc())
176 const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const {
177 // Linker visible symbols define atoms.
178 if (isSymbolLinkerVisible(S))
181 // Absolute and undefined symbols have no defining atom.
182 if (!S.isInSection())
185 // Non-linker visible symbols in sections which can't be atomized have no
187 if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols(
188 *S.getFragment()->getParent()))
191 // Otherwise, return the atom for the containing fragment.
192 return S.getFragment()->getAtom();
195 bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout,
196 const MCFixup &Fixup, const MCFragment *DF,
197 MCValue &Target, uint64_t &Value,
198 bool &WasForced) const {
199 ++stats::evaluateFixup;
201 // FIXME: This code has some duplication with recordRelocation. We should
202 // probably merge the two into a single callback that tries to evaluate a
203 // fixup and records a relocation if one is needed.
205 // On error claim to have completely evaluated the fixup, to prevent any
206 // further processing from being done.
207 const MCExpr *Expr = Fixup.getValue();
208 MCContext &Ctx = getContext();
211 if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) {
212 Ctx.reportError(Fixup.getLoc(), "expected relocatable expression");
215 if (const MCSymbolRefExpr *RefB = Target.getSymB()) {
216 if (RefB->getKind() != MCSymbolRefExpr::VK_None) {
217 Ctx.reportError(Fixup.getLoc(),
218 "unsupported subtraction of qualified symbol");
223 assert(getBackendPtr() && "Expected assembler backend");
224 bool IsTarget = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags &
225 MCFixupKindInfo::FKF_IsTarget;
228 return getBackend().evaluateTargetFixup(*this, Layout, Fixup, DF, Target,
231 unsigned FixupFlags = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags;
232 bool IsPCRel = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags &
233 MCFixupKindInfo::FKF_IsPCRel;
235 bool IsResolved = false;
237 if (Target.getSymB()) {
239 } else if (!Target.getSymA()) {
242 const MCSymbolRefExpr *A = Target.getSymA();
243 const MCSymbol &SA = A->getSymbol();
244 if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) {
246 } else if (auto *Writer = getWriterPtr()) {
247 IsResolved = (FixupFlags & MCFixupKindInfo::FKF_Constant) ||
248 Writer->isSymbolRefDifferenceFullyResolvedImpl(
249 *this, SA, *DF, false, true);
253 IsResolved = Target.isAbsolute();
256 Value = Target.getConstant();
258 if (const MCSymbolRefExpr *A = Target.getSymA()) {
259 const MCSymbol &Sym = A->getSymbol();
261 Value += Layout.getSymbolOffset(Sym);
263 if (const MCSymbolRefExpr *B = Target.getSymB()) {
264 const MCSymbol &Sym = B->getSymbol();
266 Value -= Layout.getSymbolOffset(Sym);
269 bool ShouldAlignPC = getBackend().getFixupKindInfo(Fixup.getKind()).Flags &
270 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits;
271 assert((ShouldAlignPC ? IsPCRel : true) &&
272 "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!");
275 uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset();
277 // A number of ARM fixups in Thumb mode require that the effective PC
278 // address be determined as the 32-bit aligned version of the actual offset.
279 if (ShouldAlignPC) Offset &= ~0x3;
283 // Let the backend force a relocation if needed.
284 if (IsResolved && getBackend().shouldForceRelocation(*this, Fixup, Target)) {
292 uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout,
293 const MCFragment &F) const {
294 assert(getBackendPtr() && "Requires assembler backend");
295 switch (F.getKind()) {
296 case MCFragment::FT_Data:
297 return cast<MCDataFragment>(F).getContents().size();
298 case MCFragment::FT_Relaxable:
299 return cast<MCRelaxableFragment>(F).getContents().size();
300 case MCFragment::FT_CompactEncodedInst:
301 return cast<MCCompactEncodedInstFragment>(F).getContents().size();
302 case MCFragment::FT_Fill: {
303 auto &FF = cast<MCFillFragment>(F);
304 int64_t NumValues = 0;
305 if (!FF.getNumValues().evaluateAsAbsolute(NumValues, Layout)) {
306 getContext().reportError(FF.getLoc(),
307 "expected assembly-time absolute expression");
310 int64_t Size = NumValues * FF.getValueSize();
312 getContext().reportError(FF.getLoc(), "invalid number of bytes");
318 case MCFragment::FT_Nops:
319 return cast<MCNopsFragment>(F).getNumBytes();
321 case MCFragment::FT_LEB:
322 return cast<MCLEBFragment>(F).getContents().size();
324 case MCFragment::FT_BoundaryAlign:
325 return cast<MCBoundaryAlignFragment>(F).getSize();
327 case MCFragment::FT_SymbolId:
330 case MCFragment::FT_Align: {
331 const MCAlignFragment &AF = cast<MCAlignFragment>(F);
332 unsigned Offset = Layout.getFragmentOffset(&AF);
333 unsigned Size = offsetToAlignment(Offset, Align(AF.getAlignment()));
335 // Insert extra Nops for code alignment if the target define
336 // shouldInsertExtraNopBytesForCodeAlign target hook.
337 if (AF.getParent()->UseCodeAlign() && AF.hasEmitNops() &&
338 getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size))
341 // If we are padding with nops, force the padding to be larger than the
343 if (Size > 0 && AF.hasEmitNops()) {
344 while (Size % getBackend().getMinimumNopSize())
345 Size += AF.getAlignment();
347 if (Size > AF.getMaxBytesToEmit())
352 case MCFragment::FT_Org: {
353 const MCOrgFragment &OF = cast<MCOrgFragment>(F);
355 if (!OF.getOffset().evaluateAsValue(Value, Layout)) {
356 getContext().reportError(OF.getLoc(),
357 "expected assembly-time absolute expression");
361 uint64_t FragmentOffset = Layout.getFragmentOffset(&OF);
362 int64_t TargetLocation = Value.getConstant();
363 if (const MCSymbolRefExpr *A = Value.getSymA()) {
365 if (!Layout.getSymbolOffset(A->getSymbol(), Val)) {
366 getContext().reportError(OF.getLoc(), "expected absolute expression");
369 TargetLocation += Val;
371 int64_t Size = TargetLocation - FragmentOffset;
372 if (Size < 0 || Size >= 0x40000000) {
373 getContext().reportError(
374 OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) +
375 "' (at offset '" + Twine(FragmentOffset) + "')");
381 case MCFragment::FT_Dwarf:
382 return cast<MCDwarfLineAddrFragment>(F).getContents().size();
383 case MCFragment::FT_DwarfFrame:
384 return cast<MCDwarfCallFrameFragment>(F).getContents().size();
385 case MCFragment::FT_CVInlineLines:
386 return cast<MCCVInlineLineTableFragment>(F).getContents().size();
387 case MCFragment::FT_CVDefRange:
388 return cast<MCCVDefRangeFragment>(F).getContents().size();
389 case MCFragment::FT_PseudoProbe:
390 return cast<MCPseudoProbeAddrFragment>(F).getContents().size();
391 case MCFragment::FT_Dummy:
392 llvm_unreachable("Should not have been added");
395 llvm_unreachable("invalid fragment kind");
398 void MCAsmLayout::layoutFragment(MCFragment *F) {
399 MCFragment *Prev = F->getPrevNode();
401 // We should never try to recompute something which is valid.
402 assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!");
403 // We should never try to compute the fragment layout if its predecessor
405 assert((!Prev || isFragmentValid(Prev)) &&
406 "Attempt to compute fragment before its predecessor!");
408 assert(!F->IsBeingLaidOut && "Already being laid out!");
409 F->IsBeingLaidOut = true;
411 ++stats::FragmentLayouts;
413 // Compute fragment offset and size.
415 F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev);
418 F->IsBeingLaidOut = false;
419 LastValidFragment[F->getParent()] = F;
421 // If bundling is enabled and this fragment has instructions in it, it has to
422 // obey the bundling restrictions. With padding, we'll have:
427 // -------------------------------------
428 // Prev |##########| F |
429 // -------------------------------------
434 // The fragment's offset will point to after the padding, and its computed
435 // size won't include the padding.
437 // When the -mc-relax-all flag is used, we optimize bundling by writting the
438 // padding directly into fragments when the instructions are emitted inside
439 // the streamer. When the fragment is larger than the bundle size, we need to
440 // ensure that it's bundle aligned. This means that if we end up with
441 // multiple fragments, we must emit bundle padding between fragments.
443 // ".align N" is an example of a directive that introduces multiple
444 // fragments. We could add a special case to handle ".align N" by emitting
445 // within-fragment padding (which would produce less padding when N is less
446 // than the bundle size), but for now we don't.
448 if (Assembler.isBundlingEnabled() && F->hasInstructions()) {
449 assert(isa<MCEncodedFragment>(F) &&
450 "Only MCEncodedFragment implementations have instructions");
451 MCEncodedFragment *EF = cast<MCEncodedFragment>(F);
452 uint64_t FSize = Assembler.computeFragmentSize(*this, *EF);
454 if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize())
455 report_fatal_error("Fragment can't be larger than a bundle size");
457 uint64_t RequiredBundlePadding =
458 computeBundlePadding(Assembler, EF, EF->Offset, FSize);
459 if (RequiredBundlePadding > UINT8_MAX)
460 report_fatal_error("Padding cannot exceed 255 bytes");
461 EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding));
462 EF->Offset += RequiredBundlePadding;
466 void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) {
467 bool New = !Symbol.isRegistered();
471 Symbol.setIsRegistered(true);
472 Symbols.push_back(&Symbol);
476 void MCAssembler::writeFragmentPadding(raw_ostream &OS,
477 const MCEncodedFragment &EF,
478 uint64_t FSize) const {
479 assert(getBackendPtr() && "Expected assembler backend");
480 // Should NOP padding be written out before this fragment?
481 unsigned BundlePadding = EF.getBundlePadding();
482 if (BundlePadding > 0) {
483 assert(isBundlingEnabled() &&
484 "Writing bundle padding with disabled bundling");
485 assert(EF.hasInstructions() &&
486 "Writing bundle padding for a fragment without instructions");
488 unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize);
489 const MCSubtargetInfo *STI = EF.getSubtargetInfo();
490 if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) {
491 // If the padding itself crosses a bundle boundary, it must be emitted
492 // in 2 pieces, since even nop instructions must not cross boundaries.
493 // v--------------v <- BundleAlignSize
494 // v---------v <- BundlePadding
495 // ----------------------------
496 // | Prev |####|####| F |
497 // ----------------------------
498 // ^-------------------^ <- TotalLength
499 unsigned DistanceToBoundary = TotalLength - getBundleAlignSize();
500 if (!getBackend().writeNopData(OS, DistanceToBoundary, STI))
501 report_fatal_error("unable to write NOP sequence of " +
502 Twine(DistanceToBoundary) + " bytes");
503 BundlePadding -= DistanceToBoundary;
505 if (!getBackend().writeNopData(OS, BundlePadding, STI))
506 report_fatal_error("unable to write NOP sequence of " +
507 Twine(BundlePadding) + " bytes");
511 /// Write the fragment \p F to the output file.
512 static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
513 const MCAsmLayout &Layout, const MCFragment &F) {
514 // FIXME: Embed in fragments instead?
515 uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F);
517 support::endianness Endian = Asm.getBackend().Endian;
519 if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(&F))
520 Asm.writeFragmentPadding(OS, *EF, FragmentSize);
522 // This variable (and its dummy usage) is to participate in the assert at
523 // the end of the function.
524 uint64_t Start = OS.tell();
527 ++stats::EmittedFragments;
529 switch (F.getKind()) {
530 case MCFragment::FT_Align: {
531 ++stats::EmittedAlignFragments;
532 const MCAlignFragment &AF = cast<MCAlignFragment>(F);
533 assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!");
535 uint64_t Count = FragmentSize / AF.getValueSize();
537 // FIXME: This error shouldn't actually occur (the front end should emit
538 // multiple .align directives to enforce the semantics it wants), but is
539 // severe enough that we want to report it. How to handle this?
540 if (Count * AF.getValueSize() != FragmentSize)
541 report_fatal_error("undefined .align directive, value size '" +
542 Twine(AF.getValueSize()) +
543 "' is not a divisor of padding size '" +
544 Twine(FragmentSize) + "'");
546 // See if we are aligning with nops, and if so do that first to try to fill
547 // the Count bytes. Then if that did not fill any bytes or there are any
548 // bytes left to fill use the Value and ValueSize to fill the rest.
549 // If we are aligning with nops, ask that target to emit the right data.
550 if (AF.hasEmitNops()) {
551 if (!Asm.getBackend().writeNopData(OS, Count, AF.getSubtargetInfo()))
552 report_fatal_error("unable to write nop sequence of " +
553 Twine(Count) + " bytes");
557 // Otherwise, write out in multiples of the value size.
558 for (uint64_t i = 0; i != Count; ++i) {
559 switch (AF.getValueSize()) {
560 default: llvm_unreachable("Invalid size!");
561 case 1: OS << char(AF.getValue()); break;
563 support::endian::write<uint16_t>(OS, AF.getValue(), Endian);
566 support::endian::write<uint32_t>(OS, AF.getValue(), Endian);
569 support::endian::write<uint64_t>(OS, AF.getValue(), Endian);
576 case MCFragment::FT_Data:
577 ++stats::EmittedDataFragments;
578 OS << cast<MCDataFragment>(F).getContents();
581 case MCFragment::FT_Relaxable:
582 ++stats::EmittedRelaxableFragments;
583 OS << cast<MCRelaxableFragment>(F).getContents();
586 case MCFragment::FT_CompactEncodedInst:
587 ++stats::EmittedCompactEncodedInstFragments;
588 OS << cast<MCCompactEncodedInstFragment>(F).getContents();
591 case MCFragment::FT_Fill: {
592 ++stats::EmittedFillFragments;
593 const MCFillFragment &FF = cast<MCFillFragment>(F);
594 uint64_t V = FF.getValue();
595 unsigned VSize = FF.getValueSize();
596 const unsigned MaxChunkSize = 16;
597 char Data[MaxChunkSize];
598 assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size");
599 // Duplicate V into Data as byte vector to reduce number of
600 // writes done. As such, do endian conversion here.
601 for (unsigned I = 0; I != VSize; ++I) {
602 unsigned index = Endian == support::little ? I : (VSize - I - 1);
603 Data[I] = uint8_t(V >> (index * 8));
605 for (unsigned I = VSize; I < MaxChunkSize; ++I)
606 Data[I] = Data[I - VSize];
608 // Set to largest multiple of VSize in Data.
609 const unsigned NumPerChunk = MaxChunkSize / VSize;
610 // Set ChunkSize to largest multiple of VSize in Data
611 const unsigned ChunkSize = VSize * NumPerChunk;
613 // Do copies by chunk.
614 StringRef Ref(Data, ChunkSize);
615 for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I)
618 // do remainder if needed.
619 unsigned TrailingCount = FragmentSize % ChunkSize;
621 OS.write(Data, TrailingCount);
625 case MCFragment::FT_Nops: {
626 ++stats::EmittedNopsFragments;
627 const MCNopsFragment &NF = cast<MCNopsFragment>(F);
629 int64_t NumBytes = NF.getNumBytes();
630 int64_t ControlledNopLength = NF.getControlledNopLength();
631 int64_t MaximumNopLength =
632 Asm.getBackend().getMaximumNopSize(*NF.getSubtargetInfo());
634 assert(NumBytes > 0 && "Expected positive NOPs fragment size");
635 assert(ControlledNopLength >= 0 && "Expected non-negative NOP size");
637 if (ControlledNopLength > MaximumNopLength) {
638 Asm.getContext().reportError(NF.getLoc(),
639 "illegal NOP size " +
640 std::to_string(ControlledNopLength) +
641 ". (expected within [0, " +
642 std::to_string(MaximumNopLength) + "])");
643 // Clamp the NOP length as reportError does not stop the execution
645 ControlledNopLength = MaximumNopLength;
648 // Use maximum value if the size of each NOP is not specified
649 if (!ControlledNopLength)
650 ControlledNopLength = MaximumNopLength;
653 uint64_t NumBytesToEmit =
654 (uint64_t)std::min(NumBytes, ControlledNopLength);
655 assert(NumBytesToEmit && "try to emit empty NOP instruction");
656 if (!Asm.getBackend().writeNopData(OS, NumBytesToEmit,
657 NF.getSubtargetInfo())) {
658 report_fatal_error("unable to write nop sequence of the remaining " +
659 Twine(NumBytesToEmit) + " bytes");
662 NumBytes -= NumBytesToEmit;
667 case MCFragment::FT_LEB: {
668 const MCLEBFragment &LF = cast<MCLEBFragment>(F);
669 OS << LF.getContents();
673 case MCFragment::FT_BoundaryAlign: {
674 const MCBoundaryAlignFragment &BF = cast<MCBoundaryAlignFragment>(F);
675 if (!Asm.getBackend().writeNopData(OS, FragmentSize, BF.getSubtargetInfo()))
676 report_fatal_error("unable to write nop sequence of " +
677 Twine(FragmentSize) + " bytes");
681 case MCFragment::FT_SymbolId: {
682 const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(F);
683 support::endian::write<uint32_t>(OS, SF.getSymbol()->getIndex(), Endian);
687 case MCFragment::FT_Org: {
688 ++stats::EmittedOrgFragments;
689 const MCOrgFragment &OF = cast<MCOrgFragment>(F);
691 for (uint64_t i = 0, e = FragmentSize; i != e; ++i)
692 OS << char(OF.getValue());
697 case MCFragment::FT_Dwarf: {
698 const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F);
699 OS << OF.getContents();
702 case MCFragment::FT_DwarfFrame: {
703 const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F);
704 OS << CF.getContents();
707 case MCFragment::FT_CVInlineLines: {
708 const auto &OF = cast<MCCVInlineLineTableFragment>(F);
709 OS << OF.getContents();
712 case MCFragment::FT_CVDefRange: {
713 const auto &DRF = cast<MCCVDefRangeFragment>(F);
714 OS << DRF.getContents();
717 case MCFragment::FT_PseudoProbe: {
718 const MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(F);
719 OS << PF.getContents();
722 case MCFragment::FT_Dummy:
723 llvm_unreachable("Should not have been added");
726 assert(OS.tell() - Start == FragmentSize &&
727 "The stream should advance by fragment size");
730 void MCAssembler::writeSectionData(raw_ostream &OS, const MCSection *Sec,
731 const MCAsmLayout &Layout) const {
732 assert(getBackendPtr() && "Expected assembler backend");
734 // Ignore virtual sections.
735 if (Sec->isVirtualSection()) {
736 assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!");
738 // Check that contents are only things legal inside a virtual section.
739 for (const MCFragment &F : *Sec) {
740 switch (F.getKind()) {
741 default: llvm_unreachable("Invalid fragment in virtual section!");
742 case MCFragment::FT_Data: {
743 // Check that we aren't trying to write a non-zero contents (or fixups)
744 // into a virtual section. This is to support clients which use standard
745 // directives to fill the contents of virtual sections.
746 const MCDataFragment &DF = cast<MCDataFragment>(F);
747 if (DF.fixup_begin() != DF.fixup_end())
748 getContext().reportError(SMLoc(), Sec->getVirtualSectionKind() +
749 " section '" + Sec->getName() +
750 "' cannot have fixups");
751 for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i)
752 if (DF.getContents()[i]) {
753 getContext().reportError(SMLoc(),
754 Sec->getVirtualSectionKind() +
755 " section '" + Sec->getName() +
756 "' cannot have non-zero initializers");
761 case MCFragment::FT_Align:
762 // Check that we aren't trying to write a non-zero value into a virtual
764 assert((cast<MCAlignFragment>(F).getValueSize() == 0 ||
765 cast<MCAlignFragment>(F).getValue() == 0) &&
766 "Invalid align in virtual section!");
768 case MCFragment::FT_Fill:
769 assert((cast<MCFillFragment>(F).getValue() == 0) &&
770 "Invalid fill in virtual section!");
772 case MCFragment::FT_Org:
780 uint64_t Start = OS.tell();
783 for (const MCFragment &F : *Sec)
784 writeFragment(OS, *this, Layout, F);
786 assert(getContext().hadError() ||
787 OS.tell() - Start == Layout.getSectionAddressSize(Sec));
790 std::tuple<MCValue, uint64_t, bool>
791 MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F,
792 const MCFixup &Fixup) {
793 // Evaluate the fixup.
797 bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue,
800 // The fixup was unresolved, we need a relocation. Inform the object
801 // writer of the relocation, and give it an opportunity to adjust the
802 // fixup value if need be.
803 getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, FixedValue);
805 return std::make_tuple(Target, FixedValue, IsResolved);
808 void MCAssembler::layout(MCAsmLayout &Layout) {
809 assert(getBackendPtr() && "Expected assembler backend");
810 DEBUG_WITH_TYPE("mc-dump", {
811 errs() << "assembler backend - pre-layout\n--\n";
814 // Create dummy fragments and assign section ordinals.
815 unsigned SectionIndex = 0;
816 for (MCSection &Sec : *this) {
817 // Create dummy fragments to eliminate any empty sections, this simplifies
819 if (Sec.getFragmentList().empty())
820 new MCDataFragment(&Sec);
822 Sec.setOrdinal(SectionIndex++);
825 // Assign layout order indices to sections and fragments.
826 for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) {
827 MCSection *Sec = Layout.getSectionOrder()[i];
828 Sec->setLayoutOrder(i);
830 unsigned FragmentIndex = 0;
831 for (MCFragment &Frag : *Sec)
832 Frag.setLayoutOrder(FragmentIndex++);
835 // Layout until everything fits.
836 while (layoutOnce(Layout)) {
837 if (getContext().hadError())
839 // Size of fragments in one section can depend on the size of fragments in
840 // another. If any fragment has changed size, we have to re-layout (and
841 // as a result possibly further relax) all.
842 for (MCSection &Sec : *this)
843 Layout.invalidateFragmentsFrom(&*Sec.begin());
846 DEBUG_WITH_TYPE("mc-dump", {
847 errs() << "assembler backend - post-relaxation\n--\n";
850 // Finalize the layout, including fragment lowering.
851 finishLayout(Layout);
853 DEBUG_WITH_TYPE("mc-dump", {
854 errs() << "assembler backend - final-layout\n--\n";
857 // Allow the object writer a chance to perform post-layout binding (for
858 // example, to set the index fields in the symbol data).
859 getWriter().executePostLayoutBinding(*this, Layout);
861 // Evaluate and apply the fixups, generating relocation entries as necessary.
862 for (MCSection &Sec : *this) {
863 for (MCFragment &Frag : Sec) {
864 ArrayRef<MCFixup> Fixups;
865 MutableArrayRef<char> Contents;
866 const MCSubtargetInfo *STI = nullptr;
868 // Process MCAlignFragment and MCEncodedFragmentWithFixups here.
869 switch (Frag.getKind()) {
872 case MCFragment::FT_Align: {
873 MCAlignFragment &AF = cast<MCAlignFragment>(Frag);
874 // Insert fixup type for code alignment if the target define
875 // shouldInsertFixupForCodeAlign target hook.
876 if (Sec.UseCodeAlign() && AF.hasEmitNops())
877 getBackend().shouldInsertFixupForCodeAlign(*this, Layout, AF);
880 case MCFragment::FT_Data: {
881 MCDataFragment &DF = cast<MCDataFragment>(Frag);
882 Fixups = DF.getFixups();
883 Contents = DF.getContents();
884 STI = DF.getSubtargetInfo();
885 assert(!DF.hasInstructions() || STI != nullptr);
888 case MCFragment::FT_Relaxable: {
889 MCRelaxableFragment &RF = cast<MCRelaxableFragment>(Frag);
890 Fixups = RF.getFixups();
891 Contents = RF.getContents();
892 STI = RF.getSubtargetInfo();
893 assert(!RF.hasInstructions() || STI != nullptr);
896 case MCFragment::FT_CVDefRange: {
897 MCCVDefRangeFragment &CF = cast<MCCVDefRangeFragment>(Frag);
898 Fixups = CF.getFixups();
899 Contents = CF.getContents();
902 case MCFragment::FT_Dwarf: {
903 MCDwarfLineAddrFragment &DF = cast<MCDwarfLineAddrFragment>(Frag);
904 Fixups = DF.getFixups();
905 Contents = DF.getContents();
908 case MCFragment::FT_DwarfFrame: {
909 MCDwarfCallFrameFragment &DF = cast<MCDwarfCallFrameFragment>(Frag);
910 Fixups = DF.getFixups();
911 Contents = DF.getContents();
914 case MCFragment::FT_PseudoProbe: {
915 MCPseudoProbeAddrFragment &PF = cast<MCPseudoProbeAddrFragment>(Frag);
916 Fixups = PF.getFixups();
917 Contents = PF.getContents();
921 for (const MCFixup &Fixup : Fixups) {
925 std::tie(Target, FixedValue, IsResolved) =
926 handleFixup(Layout, Frag, Fixup);
927 getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue,
934 void MCAssembler::Finish() {
935 // Create the layout object.
936 MCAsmLayout Layout(*this);
939 // Write the object file.
940 stats::ObjectBytes += getWriter().writeObject(*this, Layout);
943 bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup,
944 const MCRelaxableFragment *DF,
945 const MCAsmLayout &Layout) const {
946 assert(getBackendPtr() && "Expected assembler backend");
950 bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value, WasForced);
951 if (Target.getSymA() &&
952 Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 &&
953 Fixup.getKind() == FK_Data_1)
955 return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF,
959 bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F,
960 const MCAsmLayout &Layout) const {
961 assert(getBackendPtr() && "Expected assembler backend");
962 // If this inst doesn't ever need relaxation, ignore it. This occurs when we
963 // are intentionally pushing out inst fragments, or because we relaxed a
964 // previous instruction to one that doesn't need relaxation.
965 if (!getBackend().mayNeedRelaxation(F->getInst(), *F->getSubtargetInfo()))
968 for (const MCFixup &Fixup : F->getFixups())
969 if (fixupNeedsRelaxation(Fixup, F, Layout))
975 bool MCAssembler::relaxInstruction(MCAsmLayout &Layout,
976 MCRelaxableFragment &F) {
977 assert(getEmitterPtr() &&
978 "Expected CodeEmitter defined for relaxInstruction");
979 if (!fragmentNeedsRelaxation(&F, Layout))
982 ++stats::RelaxedInstructions;
984 // FIXME-PERF: We could immediately lower out instructions if we can tell
985 // they are fully resolved, to avoid retesting on later passes.
987 // Relax the fragment.
989 MCInst Relaxed = F.getInst();
990 getBackend().relaxInstruction(Relaxed, *F.getSubtargetInfo());
992 // Encode the new instruction.
994 // FIXME-PERF: If it matters, we could let the target do this. It can
995 // probably do so more efficiently in many cases.
996 SmallVector<MCFixup, 4> Fixups;
997 SmallString<256> Code;
998 raw_svector_ostream VecOS(Code);
999 getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, *F.getSubtargetInfo());
1001 // Update the fragment.
1003 F.getContents() = Code;
1004 F.getFixups() = Fixups;
1009 bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) {
1010 uint64_t OldSize = LF.getContents().size();
1012 bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout);
1014 report_fatal_error("sleb128 and uleb128 expressions must be absolute");
1015 SmallString<8> &Data = LF.getContents();
1017 raw_svector_ostream OSE(Data);
1018 // The compiler can generate EH table assembly that is impossible to assemble
1019 // without either adding padding to an LEB fragment or adding extra padding
1020 // to a later alignment fragment. To accommodate such tables, relaxation can
1021 // only increase an LEB fragment size here, not decrease it. See PR35809.
1023 encodeSLEB128(Value, OSE, OldSize);
1025 encodeULEB128(Value, OSE, OldSize);
1026 return OldSize != LF.getContents().size();
1029 /// Check if the branch crosses the boundary.
1031 /// \param StartAddr start address of the fused/unfused branch.
1032 /// \param Size size of the fused/unfused branch.
1033 /// \param BoundaryAlignment alignment requirement of the branch.
1034 /// \returns true if the branch cross the boundary.
1035 static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size,
1036 Align BoundaryAlignment) {
1037 uint64_t EndAddr = StartAddr + Size;
1038 return (StartAddr >> Log2(BoundaryAlignment)) !=
1039 ((EndAddr - 1) >> Log2(BoundaryAlignment));
1042 /// Check if the branch is against the boundary.
1044 /// \param StartAddr start address of the fused/unfused branch.
1045 /// \param Size size of the fused/unfused branch.
1046 /// \param BoundaryAlignment alignment requirement of the branch.
1047 /// \returns true if the branch is against the boundary.
1048 static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size,
1049 Align BoundaryAlignment) {
1050 uint64_t EndAddr = StartAddr + Size;
1051 return (EndAddr & (BoundaryAlignment.value() - 1)) == 0;
1054 /// Check if the branch needs padding.
1056 /// \param StartAddr start address of the fused/unfused branch.
1057 /// \param Size size of the fused/unfused branch.
1058 /// \param BoundaryAlignment alignment requirement of the branch.
1059 /// \returns true if the branch needs padding.
1060 static bool needPadding(uint64_t StartAddr, uint64_t Size,
1061 Align BoundaryAlignment) {
1062 return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) ||
1063 isAgainstBoundary(StartAddr, Size, BoundaryAlignment);
1066 bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout,
1067 MCBoundaryAlignFragment &BF) {
1068 // BoundaryAlignFragment that doesn't need to align any fragment should not be
1070 if (!BF.getLastFragment())
1073 uint64_t AlignedOffset = Layout.getFragmentOffset(&BF);
1074 uint64_t AlignedSize = 0;
1075 for (const MCFragment *F = BF.getLastFragment(); F != &BF;
1076 F = F->getPrevNode())
1077 AlignedSize += computeFragmentSize(Layout, *F);
1079 Align BoundaryAlignment = BF.getAlignment();
1080 uint64_t NewSize = needPadding(AlignedOffset, AlignedSize, BoundaryAlignment)
1081 ? offsetToAlignment(AlignedOffset, BoundaryAlignment)
1083 if (NewSize == BF.getSize())
1085 BF.setSize(NewSize);
1086 Layout.invalidateFragmentsFrom(&BF);
1090 bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout,
1091 MCDwarfLineAddrFragment &DF) {
1094 if (getBackend().relaxDwarfLineAddr(DF, Layout, WasRelaxed))
1097 MCContext &Context = Layout.getAssembler().getContext();
1098 uint64_t OldSize = DF.getContents().size();
1100 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout);
1101 assert(Abs && "We created a line delta with an invalid expression");
1104 LineDelta = DF.getLineDelta();
1105 SmallVectorImpl<char> &Data = DF.getContents();
1107 raw_svector_ostream OSE(Data);
1108 DF.getFixups().clear();
1110 MCDwarfLineAddr::Encode(Context, getDWARFLinetableParams(), LineDelta,
1112 return OldSize != Data.size();
1115 bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout,
1116 MCDwarfCallFrameFragment &DF) {
1118 if (getBackend().relaxDwarfCFA(DF, Layout, WasRelaxed))
1121 MCContext &Context = Layout.getAssembler().getContext();
1122 uint64_t OldSize = DF.getContents().size();
1124 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout);
1125 assert(Abs && "We created call frame with an invalid expression");
1127 SmallVectorImpl<char> &Data = DF.getContents();
1129 raw_svector_ostream OSE(Data);
1130 DF.getFixups().clear();
1132 MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE);
1133 return OldSize != Data.size();
1136 bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout,
1137 MCCVInlineLineTableFragment &F) {
1138 unsigned OldSize = F.getContents().size();
1139 getContext().getCVContext().encodeInlineLineTable(Layout, F);
1140 return OldSize != F.getContents().size();
1143 bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout,
1144 MCCVDefRangeFragment &F) {
1145 unsigned OldSize = F.getContents().size();
1146 getContext().getCVContext().encodeDefRange(Layout, F);
1147 return OldSize != F.getContents().size();
1150 bool MCAssembler::relaxPseudoProbeAddr(MCAsmLayout &Layout,
1151 MCPseudoProbeAddrFragment &PF) {
1152 uint64_t OldSize = PF.getContents().size();
1154 bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout);
1155 assert(Abs && "We created a pseudo probe with an invalid expression");
1157 SmallVectorImpl<char> &Data = PF.getContents();
1159 raw_svector_ostream OSE(Data);
1160 PF.getFixups().clear();
1162 // AddrDelta is a signed integer
1163 encodeSLEB128(AddrDelta, OSE, OldSize);
1164 return OldSize != Data.size();
1167 bool MCAssembler::relaxFragment(MCAsmLayout &Layout, MCFragment &F) {
1168 switch(F.getKind()) {
1171 case MCFragment::FT_Relaxable:
1172 assert(!getRelaxAll() &&
1173 "Did not expect a MCRelaxableFragment in RelaxAll mode");
1174 return relaxInstruction(Layout, cast<MCRelaxableFragment>(F));
1175 case MCFragment::FT_Dwarf:
1176 return relaxDwarfLineAddr(Layout, cast<MCDwarfLineAddrFragment>(F));
1177 case MCFragment::FT_DwarfFrame:
1178 return relaxDwarfCallFrameFragment(Layout,
1179 cast<MCDwarfCallFrameFragment>(F));
1180 case MCFragment::FT_LEB:
1181 return relaxLEB(Layout, cast<MCLEBFragment>(F));
1182 case MCFragment::FT_BoundaryAlign:
1183 return relaxBoundaryAlign(Layout, cast<MCBoundaryAlignFragment>(F));
1184 case MCFragment::FT_CVInlineLines:
1185 return relaxCVInlineLineTable(Layout, cast<MCCVInlineLineTableFragment>(F));
1186 case MCFragment::FT_CVDefRange:
1187 return relaxCVDefRange(Layout, cast<MCCVDefRangeFragment>(F));
1188 case MCFragment::FT_PseudoProbe:
1189 return relaxPseudoProbeAddr(Layout, cast<MCPseudoProbeAddrFragment>(F));
1193 bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) {
1194 // Holds the first fragment which needed relaxing during this layout. It will
1195 // remain NULL if none were relaxed.
1196 // When a fragment is relaxed, all the fragments following it should get
1197 // invalidated because their offset is going to change.
1198 MCFragment *FirstRelaxedFragment = nullptr;
1200 // Attempt to relax all the fragments in the section.
1201 for (MCFragment &Frag : Sec) {
1202 // Check if this is a fragment that needs relaxation.
1203 bool RelaxedFrag = relaxFragment(Layout, Frag);
1204 if (RelaxedFrag && !FirstRelaxedFragment)
1205 FirstRelaxedFragment = &Frag;
1207 if (FirstRelaxedFragment) {
1208 Layout.invalidateFragmentsFrom(FirstRelaxedFragment);
1214 bool MCAssembler::layoutOnce(MCAsmLayout &Layout) {
1215 ++stats::RelaxationSteps;
1217 bool WasRelaxed = false;
1218 for (MCSection &Sec : *this) {
1219 while (layoutSectionOnce(Layout, Sec))
1226 void MCAssembler::finishLayout(MCAsmLayout &Layout) {
1227 assert(getBackendPtr() && "Expected assembler backend");
1228 // The layout is done. Mark every fragment as valid.
1229 for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
1230 MCSection &Section = *Layout.getSectionOrder()[i];
1231 Layout.getFragmentOffset(&*Section.getFragmentList().rbegin());
1232 computeFragmentSize(Layout, *Section.getFragmentList().rbegin());
1234 getBackend().finishLayout(*this, Layout);
1237 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1238 LLVM_DUMP_METHOD void MCAssembler::dump() const{
1239 raw_ostream &OS = errs();
1241 OS << "<MCAssembler\n";
1242 OS << " Sections:[\n ";
1243 for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
1244 if (it != begin()) OS << ",\n ";
1250 for (const_symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) {
1251 if (it != symbol_begin()) OS << ",\n ";
1254 OS << ", Index:" << it->getIndex() << ", ";