1 //===-- sanitizer_procmaps_mac.cc -----------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Information about the process mappings (Mac-specific parts).
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_platform.h"
15 #include "sanitizer_common.h"
16 #include "sanitizer_placement_new.h"
17 #include "sanitizer_procmaps.h"
19 #include <mach-o/dyld.h>
20 #include <mach-o/loader.h>
21 #include <mach/mach.h>
23 // These are not available in older macOS SDKs.
24 #ifndef CPU_SUBTYPE_X86_64_H
25 #define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell */
27 #ifndef CPU_SUBTYPE_ARM_V7S
28 #define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t)11) /* Swift */
30 #ifndef CPU_SUBTYPE_ARM_V7K
31 #define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t)12)
33 #ifndef CPU_TYPE_ARM64
34 #define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
37 namespace __sanitizer {
39 MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
43 MemoryMappingLayout::~MemoryMappingLayout() {
46 // More information about Mach-O headers can be found in mach-o/loader.h
47 // Each Mach-O image has a header (mach_header or mach_header_64) starting with
48 // a magic number, and a list of linker load commands directly following the
50 // A load command is at least two 32-bit words: the command type and the
51 // command size in bytes. We're interested only in segment load commands
52 // (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
53 // into the task's address space.
54 // The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
55 // segment_command_64 correspond to the memory address, memory size and the
56 // file offset of the current memory segment.
57 // Because these fields are taken from the images as is, one needs to add
58 // _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
60 void MemoryMappingLayout::Reset() {
61 // Count down from the top.
62 // TODO(glider): as per man 3 dyld, iterating over the headers with
63 // _dyld_image_count is thread-unsafe. We need to register callbacks for
64 // adding and removing images which will invalidate the MemoryMappingLayout
66 current_image_ = _dyld_image_count();
67 current_load_cmd_count_ = -1;
68 current_load_cmd_addr_ = 0;
70 current_filetype_ = 0;
71 current_arch_ = kModuleArchUnknown;
72 internal_memset(current_uuid_, 0, kModuleUUIDSize);
75 // The dyld load address should be unchanged throughout process execution,
76 // and it is expensive to compute once many libraries have been loaded,
77 // so cache it here and do not reset.
78 static mach_header *dyld_hdr = 0;
79 static const char kDyldPath[] = "/usr/lib/dyld";
80 static const int kDyldImageIdx = -1;
83 void MemoryMappingLayout::CacheMemoryMappings() {
84 // No-op on Mac for now.
87 void MemoryMappingLayout::LoadFromCache() {
88 // No-op on Mac for now.
91 // _dyld_get_image_header() and related APIs don't report dyld itself.
92 // We work around this by manually recursing through the memory map
93 // until we hit a Mach header matching dyld instead. These recurse
94 // calls are expensive, but the first memory map generation occurs
95 // early in the process, when dyld is one of the only images loaded,
96 // so it will be hit after only a few iterations.
97 static mach_header *get_dyld_image_header() {
98 mach_port_name_t port;
99 if (task_for_pid(mach_task_self(), internal_getpid(), &port) !=
106 vm_address_t address = 0;
107 kern_return_t err = KERN_SUCCESS;
108 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
111 struct vm_region_submap_info_64 info;
112 err = vm_region_recurse_64(port, &address, &size, &depth,
113 (vm_region_info_t)&info, &count);
114 if (err != KERN_SUCCESS) return nullptr;
116 if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
117 mach_header *hdr = (mach_header *)address;
118 if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
119 hdr->filetype == MH_DYLINKER) {
127 const mach_header *get_dyld_hdr() {
128 if (!dyld_hdr) dyld_hdr = get_dyld_image_header();
133 // Next and NextSegmentLoad were inspired by base/sysinfo.cc in
134 // Google Perftools, https://github.com/gperftools/gperftools.
136 // NextSegmentLoad scans the current image for the next segment load command
137 // and returns the start and end addresses and file offset of the corresponding
139 // Note that the segment addresses are not necessarily sorted.
140 template <u32 kLCSegment, typename SegmentCommand>
141 bool MemoryMappingLayout::NextSegmentLoad(MemoryMappedSegment *segment) {
142 const char *lc = current_load_cmd_addr_;
143 current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
144 if (((const load_command *)lc)->cmd == kLCSegment) {
145 const SegmentCommand* sc = (const SegmentCommand *)lc;
147 if (current_image_ == kDyldImageIdx) {
148 // vmaddr is masked with 0xfffff because on macOS versions < 10.12,
149 // it contains an absolute address rather than an offset for dyld.
150 // To make matters even more complicated, this absolute address
151 // isn't actually the absolute segment address, but the offset portion
152 // of the address is accurate when combined with the dyld base address,
153 // and the mask will give just this offset.
154 segment->start = (sc->vmaddr & 0xfffff) + (uptr)get_dyld_hdr();
155 segment->end = (sc->vmaddr & 0xfffff) + sc->vmsize + (uptr)get_dyld_hdr();
157 const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
158 segment->start = sc->vmaddr + dlloff;
159 segment->end = sc->vmaddr + sc->vmsize + dlloff;
162 // Return the initial protection.
163 segment->protection = sc->initprot;
165 (current_filetype_ == /*MH_EXECUTE*/ 0x2) ? sc->vmaddr : sc->fileoff;
166 if (segment->filename) {
167 const char *src = (current_image_ == kDyldImageIdx)
169 : _dyld_get_image_name(current_image_);
170 internal_strncpy(segment->filename, src, segment->filename_size);
172 segment->arch = current_arch_;
173 internal_memcpy(segment->uuid, current_uuid_, kModuleUUIDSize);
179 ModuleArch ModuleArchFromCpuType(cpu_type_t cputype, cpu_subtype_t cpusubtype) {
180 cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK;
183 return kModuleArchI386;
184 case CPU_TYPE_X86_64:
185 if (cpusubtype == CPU_SUBTYPE_X86_64_ALL) return kModuleArchX86_64;
186 if (cpusubtype == CPU_SUBTYPE_X86_64_H) return kModuleArchX86_64H;
187 CHECK(0 && "Invalid subtype of x86_64");
188 return kModuleArchUnknown;
190 if (cpusubtype == CPU_SUBTYPE_ARM_V6) return kModuleArchARMV6;
191 if (cpusubtype == CPU_SUBTYPE_ARM_V7) return kModuleArchARMV7;
192 if (cpusubtype == CPU_SUBTYPE_ARM_V7S) return kModuleArchARMV7S;
193 if (cpusubtype == CPU_SUBTYPE_ARM_V7K) return kModuleArchARMV7K;
194 CHECK(0 && "Invalid subtype of ARM");
195 return kModuleArchUnknown;
197 return kModuleArchARM64;
199 CHECK(0 && "Invalid CPU type");
200 return kModuleArchUnknown;
204 static const load_command *NextCommand(const load_command *lc) {
205 return (const load_command *)((char *)lc + lc->cmdsize);
208 static void FindUUID(const load_command *first_lc, u8 *uuid_output) {
209 for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {
210 if (lc->cmd != LC_UUID) continue;
212 const uuid_command *uuid_lc = (const uuid_command *)lc;
213 const uint8_t *uuid = &uuid_lc->uuid[0];
214 internal_memcpy(uuid_output, uuid, kModuleUUIDSize);
219 static bool IsModuleInstrumented(const load_command *first_lc) {
220 for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {
221 if (lc->cmd != LC_LOAD_DYLIB) continue;
223 const dylib_command *dylib_lc = (const dylib_command *)lc;
224 uint32_t dylib_name_offset = dylib_lc->dylib.name.offset;
225 const char *dylib_name = ((const char *)dylib_lc) + dylib_name_offset;
226 dylib_name = StripModuleName(dylib_name);
227 if (dylib_name != 0 && (internal_strstr(dylib_name, "libclang_rt."))) {
234 bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
235 for (; current_image_ >= kDyldImageIdx; current_image_--) {
236 const mach_header *hdr = (current_image_ == kDyldImageIdx)
238 : _dyld_get_image_header(current_image_);
240 if (current_load_cmd_count_ < 0) {
241 // Set up for this image;
242 current_load_cmd_count_ = hdr->ncmds;
243 current_magic_ = hdr->magic;
244 current_filetype_ = hdr->filetype;
245 current_arch_ = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype);
246 switch (current_magic_) {
249 current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
254 current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
261 FindUUID((const load_command *)current_load_cmd_addr_, ¤t_uuid_[0]);
262 current_instrumented_ =
263 IsModuleInstrumented((const load_command *)current_load_cmd_addr_);
266 for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
267 switch (current_magic_) {
268 // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
271 if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
278 if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(segment))
284 // If we get here, no more load_cmd's in this image talk about
285 // segments. Go on to the next image.
290 void MemoryMappingLayout::DumpListOfModules(
291 InternalMmapVector<LoadedModule> *modules) {
293 InternalScopedString module_name(kMaxPathLength);
294 MemoryMappedSegment segment(module_name.data(), kMaxPathLength);
295 for (uptr i = 0; Next(&segment); i++) {
296 if (segment.filename[0] == '\0') continue;
297 LoadedModule *cur_module = nullptr;
298 if (!modules->empty() &&
299 0 == internal_strcmp(segment.filename, modules->back().full_name())) {
300 cur_module = &modules->back();
302 modules->push_back(LoadedModule());
303 cur_module = &modules->back();
304 cur_module->set(segment.filename, segment.start, segment.arch,
305 segment.uuid, current_instrumented_);
307 cur_module->addAddressRange(segment.start, segment.end,
308 segment.IsExecutable(), segment.IsWritable());
312 } // namespace __sanitizer
314 #endif // SANITIZER_MAC