1 //===- lib/ReaderWriter/Native/WriterNative.cpp ---------------------------===//
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "NativeFileFormat.h"
11 #include "lld/Core/File.h"
12 #include "lld/Core/LinkingContext.h"
13 #include "lld/Core/Writer.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/StringRef.h"
17 #include "llvm/Support/raw_ostream.h"
20 #include <system_error>
27 /// Class for writing native object files.
29 class Writer : public lld::Writer {
31 std::error_code writeFile(const lld::File &file, StringRef outPath) override {
32 // reserve first byte for unnamed atoms
33 _stringPool.push_back('\0');
35 for ( const DefinedAtom *defAtom : file.defined() ) {
36 this->addIVarsForDefinedAtom(*defAtom);
37 // We are trying to process all atoms, but the defined() iterator does not
38 // return group children. So, when a group parent is found, we need to
39 // handle each child atom.
40 if (defAtom->isGroupParent()) {
41 for (const Reference *r : *defAtom) {
42 if (r->kindNamespace() != lld::Reference::KindNamespace::all)
44 if (r->kindValue() == lld::Reference::kindGroupChild) {
45 const DefinedAtom *target = dyn_cast<DefinedAtom>(r->target());
46 assert(target && "Internal Error: kindGroupChild references need "
47 "to be associated with Defined Atoms only");
48 this->addIVarsForDefinedAtom(*target);
53 for ( const UndefinedAtom *undefAtom : file.undefined() ) {
54 this->addIVarsForUndefinedAtom(*undefAtom);
56 for ( const SharedLibraryAtom *shlibAtom : file.sharedLibrary() ) {
57 this->addIVarsForSharedLibraryAtom(*shlibAtom);
59 for ( const AbsoluteAtom *absAtom : file.absolute() ) {
60 this->addIVarsForAbsoluteAtom(*absAtom);
63 maybeConvertReferencesToV1();
65 // construct file header based on atom information accumulated
69 llvm::raw_fd_ostream out(outPath, ec, llvm::sys::fs::F_None);
75 return std::error_code();
83 // write the lld::File in native format to the specified stream
84 void write(raw_ostream &out) {
85 assert(out.tell() == 0);
86 out.write((char*)_headerBuffer, _headerBufferSize);
88 writeChunk(out, _definedAtomIvars, NCS_DefinedAtomsV1);
89 writeChunk(out, _attributes, NCS_AttributesArrayV1);
90 writeChunk(out, _undefinedAtomIvars, NCS_UndefinedAtomsV1);
91 writeChunk(out, _sharedLibraryAtomIvars, NCS_SharedLibraryAtomsV1);
92 writeChunk(out, _absoluteAtomIvars, NCS_AbsoluteAtomsV1);
93 writeChunk(out, _absAttributes, NCS_AbsoluteAttributesV1);
94 writeChunk(out, _stringPool, NCS_Strings);
95 writeChunk(out, _referencesV1, NCS_ReferencesArrayV1);
96 writeChunk(out, _referencesV2, NCS_ReferencesArrayV2);
98 if (!_targetsTableIndex.empty()) {
99 assert(out.tell() == findChunk(NCS_TargetsTable).fileOffset);
100 writeTargetTable(out);
103 if (!_addendsTableIndex.empty()) {
104 assert(out.tell() == findChunk(NCS_AddendsTable).fileOffset);
105 writeAddendTable(out);
108 writeChunk(out, _contentPool, NCS_Content);
112 void writeChunk(raw_ostream &out, std::vector<T> &vector, uint32_t signature) {
115 assert(out.tell() == findChunk(signature).fileOffset);
116 out.write((char*)&vector[0], vector.size() * sizeof(T));
119 void addIVarsForDefinedAtom(const DefinedAtom& atom) {
120 _definedAtomIndex[&atom] = _definedAtomIvars.size();
121 NativeDefinedAtomIvarsV1 ivar;
123 ivar.nameOffset = getNameOffset(atom);
124 ivar.attributesOffset = getAttributeOffset(atom);
125 ivar.referencesStartIndex = getReferencesIndex(atom, refsCount);
126 ivar.referencesCount = refsCount;
127 ivar.contentOffset = getContentOffset(atom);
128 ivar.contentSize = atom.size();
129 ivar.sectionSize = atom.sectionSize();
130 _definedAtomIvars.push_back(ivar);
133 void addIVarsForUndefinedAtom(const UndefinedAtom& atom) {
134 _undefinedAtomIndex[&atom] = _undefinedAtomIvars.size();
135 NativeUndefinedAtomIvarsV1 ivar;
136 ivar.nameOffset = getNameOffset(atom);
137 ivar.flags = (atom.canBeNull() & 0x03);
138 ivar.fallbackNameOffset = 0;
140 ivar.fallbackNameOffset = getNameOffset(*atom.fallback());
141 _undefinedAtomIvars.push_back(ivar);
144 void addIVarsForSharedLibraryAtom(const SharedLibraryAtom& atom) {
145 _sharedLibraryAtomIndex[&atom] = _sharedLibraryAtomIvars.size();
146 NativeSharedLibraryAtomIvarsV1 ivar;
147 ivar.size = atom.size();
148 ivar.nameOffset = getNameOffset(atom);
149 ivar.loadNameOffset = getSharedLibraryNameOffset(atom.loadName());
150 ivar.type = (uint32_t)atom.type();
151 ivar.flags = atom.canBeNullAtRuntime();
152 _sharedLibraryAtomIvars.push_back(ivar);
155 void addIVarsForAbsoluteAtom(const AbsoluteAtom& atom) {
156 _absoluteAtomIndex[&atom] = _absoluteAtomIvars.size();
157 NativeAbsoluteAtomIvarsV1 ivar;
158 ivar.nameOffset = getNameOffset(atom);
159 ivar.attributesOffset = getAttributeOffset(atom);
161 ivar.value = atom.value();
162 _absoluteAtomIvars.push_back(ivar);
165 void convertReferencesToV1() {
166 for (const NativeReferenceIvarsV2 &v2 : _referencesV2) {
167 NativeReferenceIvarsV1 v1;
168 v1.offsetInAtom = v2.offsetInAtom;
169 v1.kindNamespace = v2.kindNamespace;
170 v1.kindArch = v2.kindArch;
171 v1.kindValue = v2.kindValue;
172 v1.targetIndex = (v2.targetIndex == NativeReferenceIvarsV2::noTarget) ?
173 (uint16_t)NativeReferenceIvarsV1::noTarget : v2.targetIndex;
174 v1.addendIndex = this->getAddendIndex(v2.addend);
175 _referencesV1.push_back(v1);
177 _referencesV2.clear();
180 bool canConvertReferenceToV1(const NativeReferenceIvarsV2 &ref) {
181 bool validOffset = (ref.offsetInAtom == NativeReferenceIvarsV2::noTarget) ||
182 ref.offsetInAtom < NativeReferenceIvarsV1::noTarget;
183 return validOffset && ref.targetIndex < UINT16_MAX;
186 // Convert vector of NativeReferenceIvarsV2 to NativeReferenceIvarsV1 if
188 void maybeConvertReferencesToV1() {
189 std::set<int64_t> addends;
190 for (const NativeReferenceIvarsV2 &ref : _referencesV2) {
191 if (!canConvertReferenceToV1(ref))
193 addends.insert(ref.addend);
194 if (addends.size() >= UINT16_MAX)
197 convertReferencesToV1();
200 // fill out native file header and chunk directory
202 const bool hasDefines = !_definedAtomIvars.empty();
203 const bool hasUndefines = !_undefinedAtomIvars.empty();
204 const bool hasSharedLibraries = !_sharedLibraryAtomIvars.empty();
205 const bool hasAbsolutes = !_absoluteAtomIvars.empty();
206 const bool hasReferencesV1 = !_referencesV1.empty();
207 const bool hasReferencesV2 = !_referencesV2.empty();
208 const bool hasTargetsTable = !_targetsTableIndex.empty();
209 const bool hasAddendTable = !_addendsTableIndex.empty();
210 const bool hasContent = !_contentPool.empty();
212 int chunkCount = 1; // always have string pool chunk
213 if ( hasDefines ) chunkCount += 2;
214 if ( hasUndefines ) ++chunkCount;
215 if ( hasSharedLibraries ) ++chunkCount;
216 if ( hasAbsolutes ) chunkCount += 2;
217 if ( hasReferencesV1 ) ++chunkCount;
218 if ( hasReferencesV2 ) ++chunkCount;
219 if ( hasTargetsTable ) ++chunkCount;
220 if ( hasAddendTable ) ++chunkCount;
221 if ( hasContent ) ++chunkCount;
223 _headerBufferSize = sizeof(NativeFileHeader)
224 + chunkCount*sizeof(NativeChunk);
225 _headerBuffer = reinterpret_cast<NativeFileHeader*>
226 (operator new(_headerBufferSize, std::nothrow));
227 NativeChunk *chunks =
228 reinterpret_cast<NativeChunk*>(reinterpret_cast<char*>(_headerBuffer)
229 + sizeof(NativeFileHeader));
230 memcpy(_headerBuffer->magic, NATIVE_FILE_HEADER_MAGIC,
231 sizeof(_headerBuffer->magic));
232 _headerBuffer->endian = NFH_LittleEndian;
233 _headerBuffer->architecture = 0;
234 _headerBuffer->fileSize = 0;
235 _headerBuffer->chunkCount = chunkCount;
237 // create chunk for defined atom ivar array
239 uint32_t nextFileOffset = _headerBufferSize;
241 fillChunkHeader(chunks[nextIndex++], nextFileOffset, _definedAtomIvars,
244 // create chunk for attributes
245 fillChunkHeader(chunks[nextIndex++], nextFileOffset, _attributes,
246 NCS_AttributesArrayV1);
249 // create chunk for undefined atom array
251 fillChunkHeader(chunks[nextIndex++], nextFileOffset, _undefinedAtomIvars,
252 NCS_UndefinedAtomsV1);
254 // create chunk for shared library atom array
255 if (hasSharedLibraries)
256 fillChunkHeader(chunks[nextIndex++], nextFileOffset,
257 _sharedLibraryAtomIvars, NCS_SharedLibraryAtomsV1);
259 // create chunk for shared library atom array
261 fillChunkHeader(chunks[nextIndex++], nextFileOffset, _absoluteAtomIvars,
262 NCS_AbsoluteAtomsV1);
264 // create chunk for attributes
265 fillChunkHeader(chunks[nextIndex++], nextFileOffset, _absAttributes,
266 NCS_AbsoluteAttributesV1);
269 // create chunk for symbol strings
270 // pad end of string pool to 4-bytes
271 while ((_stringPool.size() % 4) != 0)
272 _stringPool.push_back('\0');
273 fillChunkHeader(chunks[nextIndex++], nextFileOffset, _stringPool,
276 // create chunk for referencesV2
278 fillChunkHeader(chunks[nextIndex++], nextFileOffset, _referencesV1,
279 NCS_ReferencesArrayV1);
281 // create chunk for referencesV2
283 fillChunkHeader(chunks[nextIndex++], nextFileOffset, _referencesV2,
284 NCS_ReferencesArrayV2);
286 // create chunk for target table
287 if (hasTargetsTable) {
288 NativeChunk& cht = chunks[nextIndex++];
289 cht.signature = NCS_TargetsTable;
290 cht.fileOffset = nextFileOffset;
291 cht.fileSize = _targetsTableIndex.size() * sizeof(uint32_t);
292 cht.elementCount = _targetsTableIndex.size();
293 nextFileOffset = cht.fileOffset + cht.fileSize;
296 // create chunk for addend table
297 if (hasAddendTable) {
298 NativeChunk& chad = chunks[nextIndex++];
299 chad.signature = NCS_AddendsTable;
300 chad.fileOffset = nextFileOffset;
301 chad.fileSize = _addendsTableIndex.size() * sizeof(Reference::Addend);
302 chad.elementCount = _addendsTableIndex.size();
303 nextFileOffset = chad.fileOffset + chad.fileSize;
306 // create chunk for content
308 fillChunkHeader(chunks[nextIndex++], nextFileOffset, _contentPool,
311 _headerBuffer->fileSize = nextFileOffset;
315 void fillChunkHeader(NativeChunk &chunk, uint32_t &nextFileOffset,
316 const std::vector<T> &data, uint32_t signature) {
317 chunk.signature = signature;
318 chunk.fileOffset = nextFileOffset;
319 chunk.fileSize = data.size() * sizeof(T);
320 chunk.elementCount = data.size();
321 nextFileOffset = chunk.fileOffset + chunk.fileSize;
324 // scan header to find particular chunk
325 NativeChunk& findChunk(uint32_t signature) {
326 const uint32_t chunkCount = _headerBuffer->chunkCount;
327 NativeChunk* chunks =
328 reinterpret_cast<NativeChunk*>(reinterpret_cast<char*>(_headerBuffer)
329 + sizeof(NativeFileHeader));
330 for (uint32_t i=0; i < chunkCount; ++i) {
331 if ( chunks[i].signature == signature )
334 llvm_unreachable("findChunk() signature not found");
337 // append atom name to string pool and return offset
338 uint32_t getNameOffset(const Atom& atom) {
339 return this->getNameOffset(atom.name());
342 // check if name is already in pool or append and return offset
343 uint32_t getSharedLibraryNameOffset(StringRef name) {
344 assert(!name.empty());
345 // look to see if this library name was used by another atom
346 for (auto &it : _sharedLibraryNames)
347 if (name.equals(it.first))
349 // first use of this library name
350 uint32_t result = this->getNameOffset(name);
351 _sharedLibraryNames.push_back(std::make_pair(name, result));
355 // append atom name to string pool and return offset
356 uint32_t getNameOffset(StringRef name) {
359 uint32_t result = _stringPool.size();
360 _stringPool.insert(_stringPool.end(), name.begin(), name.end());
361 _stringPool.push_back(0);
365 // append atom cotent to content pool and return offset
366 uint32_t getContentOffset(const DefinedAtom& atom) {
367 if (!atom.occupiesDiskSpace())
369 uint32_t result = _contentPool.size();
370 ArrayRef<uint8_t> cont = atom.rawContent();
371 _contentPool.insert(_contentPool.end(), cont.begin(), cont.end());
375 // reuse existing attributes entry or create a new one and return offet
376 uint32_t getAttributeOffset(const DefinedAtom& atom) {
377 NativeAtomAttributesV1 attrs = computeAttributesV1(atom);
378 return getOrPushAttribute(_attributes, attrs);
381 uint32_t getAttributeOffset(const AbsoluteAtom& atom) {
382 NativeAtomAttributesV1 attrs = computeAbsoluteAttributes(atom);
383 return getOrPushAttribute(_absAttributes, attrs);
386 uint32_t getOrPushAttribute(std::vector<NativeAtomAttributesV1> &dest,
387 const NativeAtomAttributesV1 &attrs) {
388 for (size_t i = 0, e = dest.size(); i < e; ++i) {
389 if (!memcmp(&dest[i], &attrs, sizeof(attrs))) {
390 // found that this set of attributes already used, so re-use
391 return i * sizeof(attrs);
394 // append new attribute set to end
395 uint32_t result = dest.size() * sizeof(attrs);
396 dest.push_back(attrs);
400 uint32_t sectionNameOffset(const DefinedAtom& atom) {
401 // if section based on content, then no custom section name available
402 if (atom.sectionChoice() == DefinedAtom::sectionBasedOnContent)
404 StringRef name = atom.customSectionName();
405 assert(!name.empty());
406 // look to see if this section name was used by another atom
407 for (auto &it : _sectionNames)
408 if (name.equals(it.first))
410 // first use of this section name
411 uint32_t result = this->getNameOffset(name);
412 _sectionNames.push_back(std::make_pair(name, result));
416 NativeAtomAttributesV1 computeAttributesV1(const DefinedAtom& atom) {
417 NativeAtomAttributesV1 attrs;
418 attrs.sectionNameOffset = sectionNameOffset(atom);
419 attrs.align2 = atom.alignment().powerOf2;
420 attrs.alignModulus = atom.alignment().modulus;
421 attrs.scope = atom.scope();
422 attrs.interposable = atom.interposable();
423 attrs.merge = atom.merge();
424 attrs.contentType = atom.contentType();
425 attrs.sectionChoice = atom.sectionChoice();
426 attrs.deadStrip = atom.deadStrip();
427 attrs.dynamicExport = atom.dynamicExport();
428 attrs.codeModel = atom.codeModel();
429 attrs.permissions = atom.permissions();
433 NativeAtomAttributesV1 computeAbsoluteAttributes(const AbsoluteAtom& atom) {
434 NativeAtomAttributesV1 attrs;
435 attrs.scope = atom.scope();
439 // add references for this atom in a contiguous block in NCS_ReferencesArrayV2
440 uint32_t getReferencesIndex(const DefinedAtom& atom, unsigned& refsCount) {
441 size_t startRefSize = _referencesV2.size();
442 uint32_t result = startRefSize;
443 for (const Reference *ref : atom) {
444 NativeReferenceIvarsV2 nref;
445 nref.offsetInAtom = ref->offsetInAtom();
446 nref.kindNamespace = (uint8_t)ref->kindNamespace();
447 nref.kindArch = (uint8_t)ref->kindArch();
448 nref.kindValue = ref->kindValue();
449 nref.targetIndex = this->getTargetIndex(ref->target());
450 nref.addend = ref->addend();
451 nref.tag = ref->tag();
452 _referencesV2.push_back(nref);
454 refsCount = _referencesV2.size() - startRefSize;
455 return (refsCount == 0) ? 0 : result;
458 uint32_t getTargetIndex(const Atom* target) {
459 if ( target == nullptr )
460 return NativeReferenceIvarsV2::noTarget;
461 TargetToIndex::const_iterator pos = _targetsTableIndex.find(target);
462 if ( pos != _targetsTableIndex.end() ) {
465 uint32_t result = _targetsTableIndex.size();
466 _targetsTableIndex[target] = result;
470 void writeTargetTable(raw_ostream &out) {
471 // Build table of target indexes
472 uint32_t maxTargetIndex = _targetsTableIndex.size();
473 assert(maxTargetIndex > 0);
474 std::vector<uint32_t> targetIndexes(maxTargetIndex);
475 for (auto &it : _targetsTableIndex) {
476 const Atom* atom = it.first;
477 uint32_t targetIndex = it.second;
478 assert(targetIndex < maxTargetIndex);
480 TargetToIndex::iterator pos = _definedAtomIndex.find(atom);
481 if (pos != _definedAtomIndex.end()) {
482 targetIndexes[targetIndex] = pos->second;
485 uint32_t base = _definedAtomIvars.size();
487 pos = _undefinedAtomIndex.find(atom);
488 if (pos != _undefinedAtomIndex.end()) {
489 targetIndexes[targetIndex] = pos->second + base;
492 base += _undefinedAtomIndex.size();
494 pos = _sharedLibraryAtomIndex.find(atom);
495 if (pos != _sharedLibraryAtomIndex.end()) {
496 targetIndexes[targetIndex] = pos->second + base;
499 base += _sharedLibraryAtomIndex.size();
501 pos = _absoluteAtomIndex.find(atom);
502 assert(pos != _absoluteAtomIndex.end());
503 targetIndexes[targetIndex] = pos->second + base;
506 out.write((char*)&targetIndexes[0], maxTargetIndex * sizeof(uint32_t));
509 uint32_t getAddendIndex(Reference::Addend addend) {
511 return 0; // addend index zero is used to mean "no addend"
512 AddendToIndex::const_iterator pos = _addendsTableIndex.find(addend);
513 if ( pos != _addendsTableIndex.end() ) {
516 uint32_t result = _addendsTableIndex.size() + 1; // one-based index
517 _addendsTableIndex[addend] = result;
521 void writeAddendTable(raw_ostream &out) {
522 // Build table of addends
523 uint32_t maxAddendIndex = _addendsTableIndex.size();
524 std::vector<Reference::Addend> addends(maxAddendIndex);
525 for (auto &it : _addendsTableIndex) {
526 Reference::Addend addend = it.first;
527 uint32_t index = it.second;
528 assert(index <= maxAddendIndex);
529 addends[index-1] = addend;
532 out.write((char*)&addends[0], maxAddendIndex*sizeof(Reference::Addend));
535 typedef std::vector<std::pair<StringRef, uint32_t>> NameToOffsetVector;
537 typedef llvm::DenseMap<const Atom*, uint32_t> TargetToIndex;
538 typedef llvm::DenseMap<Reference::Addend, uint32_t> AddendToIndex;
540 NativeFileHeader* _headerBuffer;
541 size_t _headerBufferSize;
542 std::vector<char> _stringPool;
543 std::vector<uint8_t> _contentPool;
544 std::vector<NativeDefinedAtomIvarsV1> _definedAtomIvars;
545 std::vector<NativeAtomAttributesV1> _attributes;
546 std::vector<NativeAtomAttributesV1> _absAttributes;
547 std::vector<NativeUndefinedAtomIvarsV1> _undefinedAtomIvars;
548 std::vector<NativeSharedLibraryAtomIvarsV1> _sharedLibraryAtomIvars;
549 std::vector<NativeAbsoluteAtomIvarsV1> _absoluteAtomIvars;
550 std::vector<NativeReferenceIvarsV1> _referencesV1;
551 std::vector<NativeReferenceIvarsV2> _referencesV2;
552 TargetToIndex _targetsTableIndex;
553 TargetToIndex _definedAtomIndex;
554 TargetToIndex _undefinedAtomIndex;
555 TargetToIndex _sharedLibraryAtomIndex;
556 TargetToIndex _absoluteAtomIndex;
557 AddendToIndex _addendsTableIndex;
558 NameToOffsetVector _sectionNames;
559 NameToOffsetVector _sharedLibraryNames;
561 } // end namespace native
563 std::unique_ptr<Writer> createWriterNative() {
564 return std::unique_ptr<Writer>(new native::Writer());
566 } // end namespace lld