//===-- RuntimeDyld.cpp - Run-time dynamic linker for MC-JIT ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Implementation of the MC-JIT runtime dynamic linker. // //===----------------------------------------------------------------------===// #include "llvm/ExecutionEngine/RuntimeDyld.h" #include "RuntimeDyldCheckerImpl.h" #include "RuntimeDyldCOFF.h" #include "RuntimeDyldELF.h" #include "RuntimeDyldImpl.h" #include "RuntimeDyldMachO.h" #include "llvm/Object/ELFObjectFile.h" #include "llvm/Object/COFF.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/MutexGuard.h" using namespace llvm; using namespace llvm::object; #define DEBUG_TYPE "dyld" // Empty out-of-line virtual destructor as the key function. RuntimeDyldImpl::~RuntimeDyldImpl() {} // Pin LoadedObjectInfo's vtables to this file. void RuntimeDyld::LoadedObjectInfo::anchor() {} namespace llvm { void RuntimeDyldImpl::registerEHFrames() {} void RuntimeDyldImpl::deregisterEHFrames() {} #ifndef NDEBUG static void dumpSectionMemory(const SectionEntry &S, StringRef State) { dbgs() << "----- Contents of section " << S.getName() << " " << State << " -----"; if (S.getAddress() == nullptr) { dbgs() << "\n
\n"; return; } const unsigned ColsPerRow = 16; uint8_t *DataAddr = S.getAddress(); uint64_t LoadAddr = S.getLoadAddress(); unsigned StartPadding = LoadAddr & (ColsPerRow - 1); unsigned BytesRemaining = S.getSize(); if (StartPadding) { dbgs() << "\n" << format("0x%016" PRIx64, LoadAddr & ~(uint64_t)(ColsPerRow - 1)) << ":"; while (StartPadding--) dbgs() << " "; } while (BytesRemaining > 0) { if ((LoadAddr & (ColsPerRow - 1)) == 0) dbgs() << "\n" << format("0x%016" PRIx64, LoadAddr) << ":"; dbgs() << " " << format("%02x", *DataAddr); ++DataAddr; ++LoadAddr; --BytesRemaining; } dbgs() << "\n"; } #endif // Resolve the relocations for all symbols we currently know about. void RuntimeDyldImpl::resolveRelocations() { MutexGuard locked(lock); // Print out the sections prior to relocation. DEBUG( for (int i = 0, e = Sections.size(); i != e; ++i) dumpSectionMemory(Sections[i], "before relocations"); ); // First, resolve relocations associated with external symbols. resolveExternalSymbols(); // Iterate over all outstanding relocations for (auto it = Relocations.begin(), e = Relocations.end(); it != e; ++it) { // The Section here (Sections[i]) refers to the section in which the // symbol for the relocation is located. The SectionID in the relocation // entry provides the section to which the relocation will be applied. int Idx = it->first; uint64_t Addr = Sections[Idx].getLoadAddress(); DEBUG(dbgs() << "Resolving relocations Section #" << Idx << "\t" << format("%p", (uintptr_t)Addr) << "\n"); resolveRelocationList(it->second, Addr); } Relocations.clear(); // Print out sections after relocation. DEBUG( for (int i = 0, e = Sections.size(); i != e; ++i) dumpSectionMemory(Sections[i], "after relocations"); ); } void RuntimeDyldImpl::mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress) { MutexGuard locked(lock); for (unsigned i = 0, e = Sections.size(); i != e; ++i) { if (Sections[i].getAddress() == LocalAddress) { reassignSectionAddress(i, TargetAddress); return; } } llvm_unreachable("Attempting to remap address of unknown section!"); } static std::error_code getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result) { ErrorOr AddressOrErr = Sym.getAddress(); if (std::error_code EC = AddressOrErr.getError()) return EC; Result = *AddressOrErr - Sec.getAddress(); return std::error_code(); } RuntimeDyldImpl::ObjSectionToIDMap RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) { MutexGuard locked(lock); // Save information about our target Arch = (Triple::ArchType)Obj.getArch(); IsTargetLittleEndian = Obj.isLittleEndian(); setMipsABI(Obj); // Compute the memory size required to load all sections to be loaded // and pass this information to the memory manager if (MemMgr.needsToReserveAllocationSpace()) { uint64_t CodeSize = 0, RODataSize = 0, RWDataSize = 0; uint32_t CodeAlign = 1, RODataAlign = 1, RWDataAlign = 1; computeTotalAllocSize(Obj, CodeSize, CodeAlign, RODataSize, RODataAlign, RWDataSize, RWDataAlign); MemMgr.reserveAllocationSpace(CodeSize, CodeAlign, RODataSize, RODataAlign, RWDataSize, RWDataAlign); } // Used sections from the object file ObjSectionToIDMap LocalSections; // Common symbols requiring allocation, with their sizes and alignments CommonSymbolList CommonSymbols; // Parse symbols DEBUG(dbgs() << "Parse symbols:\n"); for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E; ++I) { uint32_t Flags = I->getFlags(); if (Flags & SymbolRef::SF_Common) CommonSymbols.push_back(*I); else { object::SymbolRef::Type SymType = I->getType(); // Get symbol name. ErrorOr NameOrErr = I->getName(); Check(NameOrErr.getError()); StringRef Name = *NameOrErr; // Compute JIT symbol flags. JITSymbolFlags RTDyldSymFlags = JITSymbolFlags::None; if (Flags & SymbolRef::SF_Weak) RTDyldSymFlags |= JITSymbolFlags::Weak; if (Flags & SymbolRef::SF_Exported) RTDyldSymFlags |= JITSymbolFlags::Exported; if (Flags & SymbolRef::SF_Absolute && SymType != object::SymbolRef::ST_File) { auto Addr = I->getAddress(); Check(Addr.getError()); uint64_t SectOffset = *Addr; unsigned SectionID = AbsoluteSymbolSection; DEBUG(dbgs() << "\tType: " << SymType << " (absolute) Name: " << Name << " SID: " << SectionID << " Offset: " << format("%p", (uintptr_t)SectOffset) << " flags: " << Flags << "\n"); GlobalSymbolTable[Name] = SymbolTableEntry(SectionID, SectOffset, RTDyldSymFlags); } else if (SymType == object::SymbolRef::ST_Function || SymType == object::SymbolRef::ST_Data || SymType == object::SymbolRef::ST_Unknown || SymType == object::SymbolRef::ST_Other) { ErrorOr SIOrErr = I->getSection(); Check(SIOrErr.getError()); section_iterator SI = *SIOrErr; if (SI == Obj.section_end()) continue; // Get symbol offset. uint64_t SectOffset; Check(getOffset(*I, *SI, SectOffset)); bool IsCode = SI->isText(); unsigned SectionID = findOrEmitSection(Obj, *SI, IsCode, LocalSections); DEBUG(dbgs() << "\tType: " << SymType << " Name: " << Name << " SID: " << SectionID << " Offset: " << format("%p", (uintptr_t)SectOffset) << " flags: " << Flags << "\n"); GlobalSymbolTable[Name] = SymbolTableEntry(SectionID, SectOffset, RTDyldSymFlags); } } } // Allocate common symbols emitCommonSymbols(Obj, CommonSymbols); // Parse and process relocations DEBUG(dbgs() << "Parse relocations:\n"); for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end(); SI != SE; ++SI) { unsigned SectionID = 0; StubMap Stubs; section_iterator RelocatedSection = SI->getRelocatedSection(); if (RelocatedSection == SE) continue; relocation_iterator I = SI->relocation_begin(); relocation_iterator E = SI->relocation_end(); if (I == E && !ProcessAllSections) continue; bool IsCode = RelocatedSection->isText(); SectionID = findOrEmitSection(Obj, *RelocatedSection, IsCode, LocalSections); DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n"); for (; I != E;) I = processRelocationRef(SectionID, I, Obj, LocalSections, Stubs); // If there is an attached checker, notify it about the stubs for this // section so that they can be verified. if (Checker) Checker->registerStubMap(Obj.getFileName(), SectionID, Stubs); } // Give the subclasses a chance to tie-up any loose ends. finalizeLoad(Obj, LocalSections); // for (auto E : LocalSections) // llvm::dbgs() << "Added: " << E.first.getRawDataRefImpl() << " -> " << E.second << "\n"; return LocalSections; } // A helper method for computeTotalAllocSize. // Computes the memory size required to allocate sections with the given sizes, // assuming that all sections are allocated with the given alignment static uint64_t computeAllocationSizeForSections(std::vector &SectionSizes, uint64_t Alignment) { uint64_t TotalSize = 0; for (size_t Idx = 0, Cnt = SectionSizes.size(); Idx < Cnt; Idx++) { uint64_t AlignedSize = (SectionSizes[Idx] + Alignment - 1) / Alignment * Alignment; TotalSize += AlignedSize; } return TotalSize; } static bool isRequiredForExecution(const SectionRef Section) { const ObjectFile *Obj = Section.getObject(); if (isa(Obj)) return ELFSectionRef(Section).getFlags() & ELF::SHF_ALLOC; if (auto *COFFObj = dyn_cast(Obj)) { const coff_section *CoffSection = COFFObj->getCOFFSection(Section); // Avoid loading zero-sized COFF sections. // In PE files, VirtualSize gives the section size, and SizeOfRawData // may be zero for sections with content. In Obj files, SizeOfRawData // gives the section size, and VirtualSize is always zero. Hence // the need to check for both cases below. bool HasContent = (CoffSection->VirtualSize > 0) || (CoffSection->SizeOfRawData > 0); bool IsDiscardable = CoffSection->Characteristics & (COFF::IMAGE_SCN_MEM_DISCARDABLE | COFF::IMAGE_SCN_LNK_INFO); return HasContent && !IsDiscardable; } assert(isa(Obj)); return true; } static bool isReadOnlyData(const SectionRef Section) { const ObjectFile *Obj = Section.getObject(); if (isa(Obj)) return !(ELFSectionRef(Section).getFlags() & (ELF::SHF_WRITE | ELF::SHF_EXECINSTR)); if (auto *COFFObj = dyn_cast(Obj)) return ((COFFObj->getCOFFSection(Section)->Characteristics & (COFF::IMAGE_SCN_CNT_INITIALIZED_DATA | COFF::IMAGE_SCN_MEM_READ | COFF::IMAGE_SCN_MEM_WRITE)) == (COFF::IMAGE_SCN_CNT_INITIALIZED_DATA | COFF::IMAGE_SCN_MEM_READ)); assert(isa(Obj)); return false; } static bool isZeroInit(const SectionRef Section) { const ObjectFile *Obj = Section.getObject(); if (isa(Obj)) return ELFSectionRef(Section).getType() == ELF::SHT_NOBITS; if (auto *COFFObj = dyn_cast(Obj)) return COFFObj->getCOFFSection(Section)->Characteristics & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA; auto *MachO = cast(Obj); unsigned SectionType = MachO->getSectionType(Section); return SectionType == MachO::S_ZEROFILL || SectionType == MachO::S_GB_ZEROFILL; } // Compute an upper bound of the memory size that is required to load all // sections void RuntimeDyldImpl::computeTotalAllocSize(const ObjectFile &Obj, uint64_t &CodeSize, uint32_t &CodeAlign, uint64_t &RODataSize, uint32_t &RODataAlign, uint64_t &RWDataSize, uint32_t &RWDataAlign) { // Compute the size of all sections required for execution std::vector CodeSectionSizes; std::vector ROSectionSizes; std::vector RWSectionSizes; // Collect sizes of all sections to be loaded; // also determine the max alignment of all sections for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end(); SI != SE; ++SI) { const SectionRef &Section = *SI; bool IsRequired = isRequiredForExecution(Section); // Consider only the sections that are required to be loaded for execution if (IsRequired) { StringRef Name; uint64_t DataSize = Section.getSize(); uint64_t Alignment64 = Section.getAlignment(); bool IsCode = Section.isText(); bool IsReadOnly = isReadOnlyData(Section); Check(Section.getName(Name)); unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL; uint64_t StubBufSize = computeSectionStubBufSize(Obj, Section); uint64_t SectionSize = DataSize + StubBufSize; // The .eh_frame section (at least on Linux) needs an extra four bytes // padded // with zeroes added at the end. For MachO objects, this section has a // slightly different name, so this won't have any effect for MachO // objects. if (Name == ".eh_frame") SectionSize += 4; if (!SectionSize) SectionSize = 1; if (IsCode) { CodeAlign = std::max(CodeAlign, Alignment); CodeSectionSizes.push_back(SectionSize); } else if (IsReadOnly) { RODataAlign = std::max(RODataAlign, Alignment); ROSectionSizes.push_back(SectionSize); } else { RWDataAlign = std::max(RWDataAlign, Alignment); RWSectionSizes.push_back(SectionSize); } } } // Compute the size of all common symbols uint64_t CommonSize = 0; for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E; ++I) { uint32_t Flags = I->getFlags(); if (Flags & SymbolRef::SF_Common) { // Add the common symbols to a list. We'll allocate them all below. uint64_t Size = I->getCommonSize(); CommonSize += Size; } } if (CommonSize != 0) { RWSectionSizes.push_back(CommonSize); } // Compute the required allocation space for each different type of sections // (code, read-only data, read-write data) assuming that all sections are // allocated with the max alignment. Note that we cannot compute with the // individual alignments of the sections, because then the required size // depends on the order, in which the sections are allocated. CodeSize = computeAllocationSizeForSections(CodeSectionSizes, CodeAlign); RODataSize = computeAllocationSizeForSections(ROSectionSizes, RODataAlign); RWDataSize = computeAllocationSizeForSections(RWSectionSizes, RWDataAlign); } // compute stub buffer size for the given section unsigned RuntimeDyldImpl::computeSectionStubBufSize(const ObjectFile &Obj, const SectionRef &Section) { unsigned StubSize = getMaxStubSize(); if (StubSize == 0) { return 0; } // FIXME: this is an inefficient way to handle this. We should computed the // necessary section allocation size in loadObject by walking all the sections // once. unsigned StubBufSize = 0; for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end(); SI != SE; ++SI) { section_iterator RelSecI = SI->getRelocatedSection(); if (!(RelSecI == Section)) continue; for (const RelocationRef &Reloc : SI->relocations()) if (relocationNeedsStub(Reloc)) StubBufSize += StubSize; } // Get section data size and alignment uint64_t DataSize = Section.getSize(); uint64_t Alignment64 = Section.getAlignment(); // Add stubbuf size alignment unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL; unsigned StubAlignment = getStubAlignment(); unsigned EndAlignment = (DataSize | Alignment) & -(DataSize | Alignment); if (StubAlignment > EndAlignment) StubBufSize += StubAlignment - EndAlignment; return StubBufSize; } uint64_t RuntimeDyldImpl::readBytesUnaligned(uint8_t *Src, unsigned Size) const { uint64_t Result = 0; if (IsTargetLittleEndian) { Src += Size - 1; while (Size--) Result = (Result << 8) | *Src--; } else while (Size--) Result = (Result << 8) | *Src++; return Result; } void RuntimeDyldImpl::writeBytesUnaligned(uint64_t Value, uint8_t *Dst, unsigned Size) const { if (IsTargetLittleEndian) { while (Size--) { *Dst++ = Value & 0xFF; Value >>= 8; } } else { Dst += Size - 1; while (Size--) { *Dst-- = Value & 0xFF; Value >>= 8; } } } void RuntimeDyldImpl::emitCommonSymbols(const ObjectFile &Obj, CommonSymbolList &CommonSymbols) { if (CommonSymbols.empty()) return; uint64_t CommonSize = 0; CommonSymbolList SymbolsToAllocate; DEBUG(dbgs() << "Processing common symbols...\n"); for (const auto &Sym : CommonSymbols) { ErrorOr NameOrErr = Sym.getName(); Check(NameOrErr.getError()); StringRef Name = *NameOrErr; // Skip common symbols already elsewhere. if (GlobalSymbolTable.count(Name) || Resolver.findSymbolInLogicalDylib(Name)) { DEBUG(dbgs() << "\tSkipping already emitted common symbol '" << Name << "'\n"); continue; } uint32_t Align = Sym.getAlignment(); uint64_t Size = Sym.getCommonSize(); CommonSize += Align + Size; SymbolsToAllocate.push_back(Sym); } // Allocate memory for the section unsigned SectionID = Sections.size(); uint8_t *Addr = MemMgr.allocateDataSection(CommonSize, sizeof(void *), SectionID, StringRef(), false); if (!Addr) report_fatal_error("Unable to allocate memory for common symbols!"); uint64_t Offset = 0; Sections.push_back( SectionEntry("", Addr, CommonSize, CommonSize, 0)); memset(Addr, 0, CommonSize); DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID << " new addr: " << format("%p", Addr) << " DataSize: " << CommonSize << "\n"); // Assign the address of each symbol for (auto &Sym : SymbolsToAllocate) { uint32_t Align = Sym.getAlignment(); uint64_t Size = Sym.getCommonSize(); ErrorOr NameOrErr = Sym.getName(); Check(NameOrErr.getError()); StringRef Name = *NameOrErr; if (Align) { // This symbol has an alignment requirement. uint64_t AlignOffset = OffsetToAlignment((uint64_t)Addr, Align); Addr += AlignOffset; Offset += AlignOffset; } uint32_t Flags = Sym.getFlags(); JITSymbolFlags RTDyldSymFlags = JITSymbolFlags::None; if (Flags & SymbolRef::SF_Weak) RTDyldSymFlags |= JITSymbolFlags::Weak; if (Flags & SymbolRef::SF_Exported) RTDyldSymFlags |= JITSymbolFlags::Exported; DEBUG(dbgs() << "Allocating common symbol " << Name << " address " << format("%p", Addr) << "\n"); GlobalSymbolTable[Name] = SymbolTableEntry(SectionID, Offset, RTDyldSymFlags); Offset += Size; Addr += Size; } if (Checker) Checker->registerSection(Obj.getFileName(), SectionID); } unsigned RuntimeDyldImpl::emitSection(const ObjectFile &Obj, const SectionRef &Section, bool IsCode) { StringRef data; uint64_t Alignment64 = Section.getAlignment(); unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL; unsigned PaddingSize = 0; unsigned StubBufSize = 0; StringRef Name; bool IsRequired = isRequiredForExecution(Section); bool IsVirtual = Section.isVirtual(); bool IsZeroInit = isZeroInit(Section); bool IsReadOnly = isReadOnlyData(Section); uint64_t DataSize = Section.getSize(); Check(Section.getName(Name)); StubBufSize = computeSectionStubBufSize(Obj, Section); // The .eh_frame section (at least on Linux) needs an extra four bytes padded // with zeroes added at the end. For MachO objects, this section has a // slightly different name, so this won't have any effect for MachO objects. if (Name == ".eh_frame") PaddingSize = 4; uintptr_t Allocate; unsigned SectionID = Sections.size(); uint8_t *Addr; const char *pData = nullptr; // If this section contains any bits (i.e. isn't a virtual or bss section), // grab a reference to them. if (!IsVirtual && !IsZeroInit) { // In either case, set the location of the unrelocated section in memory, // since we still process relocations for it even if we're not applying them. Check(Section.getContents(data)); pData = data.data(); } // Code section alignment needs to be at least as high as stub alignment or // padding calculations may by incorrect when the section is remapped to a // higher alignment. if (IsCode) Alignment = std::max(Alignment, getStubAlignment()); // Some sections, such as debug info, don't need to be loaded for execution. // Leave those where they are. if (IsRequired) { Allocate = DataSize + PaddingSize + StubBufSize; if (!Allocate) Allocate = 1; Addr = IsCode ? MemMgr.allocateCodeSection(Allocate, Alignment, SectionID, Name) : MemMgr.allocateDataSection(Allocate, Alignment, SectionID, Name, IsReadOnly); if (!Addr) report_fatal_error("Unable to allocate section memory!"); // Zero-initialize or copy the data from the image if (IsZeroInit || IsVirtual) memset(Addr, 0, DataSize); else memcpy(Addr, pData, DataSize); // Fill in any extra bytes we allocated for padding if (PaddingSize != 0) { memset(Addr + DataSize, 0, PaddingSize); // Update the DataSize variable so that the stub offset is set correctly. DataSize += PaddingSize; } DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name << " obj addr: " << format("%p", pData) << " new addr: " << format("%p", Addr) << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize << " Allocate: " << Allocate << "\n"); } else { // Even if we didn't load the section, we need to record an entry for it // to handle later processing (and by 'handle' I mean don't do anything // with these sections). Allocate = 0; Addr = nullptr; DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name << " obj addr: " << format("%p", data.data()) << " new addr: 0" << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize << " Allocate: " << Allocate << "\n"); } Sections.push_back( SectionEntry(Name, Addr, DataSize, Allocate, (uintptr_t)pData)); if (Checker) Checker->registerSection(Obj.getFileName(), SectionID); return SectionID; } unsigned RuntimeDyldImpl::findOrEmitSection(const ObjectFile &Obj, const SectionRef &Section, bool IsCode, ObjSectionToIDMap &LocalSections) { unsigned SectionID = 0; ObjSectionToIDMap::iterator i = LocalSections.find(Section); if (i != LocalSections.end()) SectionID = i->second; else { SectionID = emitSection(Obj, Section, IsCode); LocalSections[Section] = SectionID; } return SectionID; } void RuntimeDyldImpl::addRelocationForSection(const RelocationEntry &RE, unsigned SectionID) { Relocations[SectionID].push_back(RE); } void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE, StringRef SymbolName) { // Relocation by symbol. If the symbol is found in the global symbol table, // create an appropriate section relocation. Otherwise, add it to // ExternalSymbolRelocations. RTDyldSymbolTable::const_iterator Loc = GlobalSymbolTable.find(SymbolName); if (Loc == GlobalSymbolTable.end()) { ExternalSymbolRelocations[SymbolName].push_back(RE); } else { // Copy the RE since we want to modify its addend. RelocationEntry RECopy = RE; const auto &SymInfo = Loc->second; RECopy.Addend += SymInfo.getOffset(); Relocations[SymInfo.getSectionID()].push_back(RECopy); } } uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr, unsigned AbiVariant) { if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be) { // This stub has to be able to access the full address space, // since symbol lookup won't necessarily find a handy, in-range, // PLT stub for functions which could be anywhere. // Stub can use ip0 (== x16) to calculate address writeBytesUnaligned(0xd2e00010, Addr, 4); // movz ip0, #:abs_g3: writeBytesUnaligned(0xf2c00010, Addr+4, 4); // movk ip0, #:abs_g2_nc: writeBytesUnaligned(0xf2a00010, Addr+8, 4); // movk ip0, #:abs_g1_nc: writeBytesUnaligned(0xf2800010, Addr+12, 4); // movk ip0, #:abs_g0_nc: writeBytesUnaligned(0xd61f0200, Addr+16, 4); // br ip0 return Addr; } else if (Arch == Triple::arm || Arch == Triple::armeb) { // TODO: There is only ARM far stub now. We should add the Thumb stub, // and stubs for branches Thumb - ARM and ARM - Thumb. writeBytesUnaligned(0xe51ff004, Addr, 4); // ldr pc,