1 //===-- sanitizer_fuchsia.cc ----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is shared between AddressSanitizer and other sanitizer
11 // run-time libraries and implements Fuchsia-specific functions from
12 // sanitizer_common.h.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_fuchsia.h"
18 #include "sanitizer_common.h"
19 #include "sanitizer_libc.h"
20 #include "sanitizer_mutex.h"
26 #include <zircon/errors.h>
27 #include <zircon/process.h>
28 #include <zircon/syscalls.h>
30 namespace __sanitizer {
32 void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
34 uptr internal_sched_yield() {
35 zx_status_t status = _zx_nanosleep(0);
36 CHECK_EQ(status, ZX_OK);
37 return 0; // Why doesn't this return void?
40 static void internal_nanosleep(zx_time_t ns) {
41 zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
42 CHECK_EQ(status, ZX_OK);
45 unsigned int internal_sleep(unsigned int seconds) {
46 internal_nanosleep(ZX_SEC(seconds));
50 u64 NanoTime() { return _zx_clock_get(ZX_CLOCK_UTC); }
52 u64 MonotonicNanoTime() { return _zx_clock_get(ZX_CLOCK_MONOTONIC); }
54 uptr internal_getpid() {
55 zx_info_handle_basic_t info;
57 _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
58 sizeof(info), NULL, NULL);
59 CHECK_EQ(status, ZX_OK);
60 uptr pid = static_cast<uptr>(info.koid);
61 CHECK_EQ(pid, info.koid);
65 uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
67 tid_t GetTid() { return GetThreadSelf(); }
69 void Abort() { abort(); }
71 int Atexit(void (*function)(void)) { return atexit(function); }
73 void SleepForSeconds(int seconds) { internal_sleep(seconds); }
75 void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
77 void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
79 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
82 CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
83 CHECK_EQ(pthread_attr_destroy(&attr), 0);
85 *stack_bottom = reinterpret_cast<uptr>(base);
86 *stack_top = *stack_bottom + size;
89 void InitializePlatformEarly() {}
92 void CheckMPROTECT() {}
93 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
94 void DisableCoreDumperIfNecessary() {}
95 void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
96 void SetAlternateSignalStack() {}
97 void UnsetAlternateSignalStack() {}
100 void PrintModuleMap() {}
102 bool SignalContext::IsStackOverflow() const { return false; }
103 void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
104 const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
106 enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
108 BlockingMutex::BlockingMutex() {
109 // NOTE! It's important that this use internal_memset, because plain
110 // memset might be intercepted (e.g., actually be __asan_memset).
111 // Defining this so the compiler initializes each field, e.g.:
112 // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
113 // might result in the compiler generating a call to memset, which would
114 // have the same problem.
115 internal_memset(this, 0, sizeof(*this));
118 void BlockingMutex::Lock() {
120 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
121 if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
123 while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
125 _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping,
126 ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
127 if (status != ZX_ERR_BAD_STATE) // Normal race.
128 CHECK_EQ(status, ZX_OK);
132 void BlockingMutex::Unlock() {
133 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
134 u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
135 CHECK_NE(v, MtxUnlocked);
136 if (v == MtxSleeping) {
137 zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
138 CHECK_EQ(status, ZX_OK);
142 void BlockingMutex::CheckLocked() {
143 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
144 CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
147 uptr GetPageSize() { return PAGE_SIZE; }
149 uptr GetMmapGranularity() { return PAGE_SIZE; }
151 sanitizer_shadow_bounds_t ShadowBounds;
153 uptr GetMaxUserVirtualAddress() {
154 ShadowBounds = __sanitizer_shadow_bounds();
155 return ShadowBounds.memory_limit - 1;
158 uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
160 static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
161 bool raw_report, bool die_for_nomem) {
162 size = RoundUpTo(size, PAGE_SIZE);
165 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
166 if (status != ZX_OK) {
167 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
168 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
172 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
173 internal_strlen(mem_type));
175 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
178 _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
179 vmo, 0, size, &addr);
180 _zx_handle_close(vmo);
182 if (status != ZX_OK) {
183 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
184 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
189 IncreaseTotalMmap(size);
191 return reinterpret_cast<void *>(addr);
194 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
195 return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
198 void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
199 return MmapOrDie(size, mem_type);
202 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
203 return DoAnonymousMmapOrDie(size, mem_type, false, false);
206 uptr ReservedAddressRange::Init(uptr init_size, const char *name,
208 init_size = RoundUpTo(init_size, PAGE_SIZE);
209 DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
214 _zx_vmar_root_self(),
215 ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC,
216 0, init_size, &vmar, &base);
218 ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
219 base_ = reinterpret_cast<void *>(base);
224 return reinterpret_cast<uptr>(base_);
227 static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
228 void *base, const char *name, bool die_for_nomem) {
229 uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
230 map_size = RoundUpTo(map_size, PAGE_SIZE);
232 zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
233 if (status != ZX_OK) {
234 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
235 ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
238 _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
239 DCHECK_GE(base + size_, map_size + offset);
243 _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
244 offset, vmo, 0, map_size, &addr);
245 _zx_handle_close(vmo);
246 if (status != ZX_OK) {
247 if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
248 ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
252 IncreaseTotalMmap(map_size);
256 uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size) {
257 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
261 uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size) {
262 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
266 void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
267 if (!addr || !size) return;
268 size = RoundUpTo(size, PAGE_SIZE);
271 _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
272 if (status != ZX_OK) {
273 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
274 SanitizerToolName, size, size, addr);
275 CHECK("unable to unmap" && 0);
278 DecreaseTotalMmap(size);
281 void ReservedAddressRange::Unmap(uptr addr, uptr size) {
282 CHECK_LE(size, size_);
283 const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
284 if (addr == reinterpret_cast<uptr>(base_)) {
286 // Destroying the vmar effectively unmaps the whole mapping.
287 _zx_vmar_destroy(vmar);
288 _zx_handle_close(vmar);
289 os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
290 DecreaseTotalMmap(size);
294 CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
296 // Partial unmapping does not affect the fact that the initial range is still
297 // reserved, and the resulting unmapped memory can't be reused.
298 UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
301 // This should never be called.
302 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
306 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
307 const char *mem_type) {
308 CHECK_GE(size, PAGE_SIZE);
309 CHECK(IsPowerOfTwo(size));
310 CHECK(IsPowerOfTwo(alignment));
313 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
314 if (status != ZX_OK) {
315 if (status != ZX_ERR_NO_MEMORY)
316 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
319 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
320 internal_strlen(mem_type));
322 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
324 // Map a larger size to get a chunk of address space big enough that
325 // it surely contains an aligned region of the requested size. Then
326 // overwrite the aligned middle portion with a mapping from the
327 // beginning of the VMO, and unmap the excess before and after.
328 size_t map_size = size + alignment;
331 _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
332 vmo, 0, map_size, &addr);
333 if (status == ZX_OK) {
334 uintptr_t map_addr = addr;
335 uintptr_t map_end = map_addr + map_size;
336 addr = RoundUpTo(map_addr, alignment);
337 uintptr_t end = addr + size;
338 if (addr != map_addr) {
340 status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
341 sizeof(info), NULL, NULL);
342 if (status == ZX_OK) {
344 status = _zx_vmar_map(
345 _zx_vmar_root_self(),
346 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
347 addr - info.base, vmo, 0, size, &new_addr);
348 if (status == ZX_OK) CHECK_EQ(new_addr, addr);
351 if (status == ZX_OK && addr != map_addr)
352 status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
353 if (status == ZX_OK && end != map_end)
354 status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
356 _zx_handle_close(vmo);
358 if (status != ZX_OK) {
359 if (status != ZX_ERR_NO_MEMORY)
360 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
364 IncreaseTotalMmap(size);
366 return reinterpret_cast<void *>(addr);
369 void UnmapOrDie(void *addr, uptr size) {
370 UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
373 // This is used on the shadow mapping, which cannot be changed.
374 // Zircon doesn't have anything like MADV_DONTNEED.
375 void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
377 void DumpProcessMap() {
378 // TODO(mcgrathr): write it
382 bool IsAccessibleMemoryRange(uptr beg, uptr size) {
383 // TODO(mcgrathr): Figure out a better way.
385 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
386 if (status == ZX_OK) {
387 status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
388 _zx_handle_close(vmo);
390 return status == ZX_OK;
393 // FIXME implement on this platform.
394 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
396 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
397 uptr *read_len, uptr max_len, error_t *errno_p) {
399 zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
400 if (status == ZX_OK) {
402 status = _zx_vmo_get_size(vmo, &vmo_size);
403 if (status == ZX_OK) {
404 if (vmo_size < max_len) max_len = vmo_size;
405 size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
407 status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
409 if (status == ZX_OK) {
410 *buff = reinterpret_cast<char *>(addr);
411 *buff_size = map_size;
415 _zx_handle_close(vmo);
417 if (status != ZX_OK && errno_p) *errno_p = status;
418 return status == ZX_OK;
421 void RawWrite(const char *buffer) {
422 constexpr size_t size = 128;
423 static _Thread_local char line[size];
424 static _Thread_local size_t lastLineEnd = 0;
425 static _Thread_local size_t cur = 0;
429 if (lastLineEnd == 0)
431 __sanitizer_log_write(line, lastLineEnd);
432 internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
433 cur = cur - lastLineEnd;
437 lastLineEnd = cur + 1;
438 line[cur++] = *buffer++;
440 // Flush all complete lines before returning.
441 if (lastLineEnd != 0) {
442 __sanitizer_log_write(line, lastLineEnd);
443 internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
444 cur = cur - lastLineEnd;
449 void CatastrophicErrorWrite(const char *buffer, uptr length) {
450 __sanitizer_log_write(buffer, length);
454 char **StoredEnviron;
456 char **GetArgv() { return StoredArgv; }
457 char **GetEnviron() { return StoredEnviron; }
459 const char *GetEnv(const char *name) {
461 uptr NameLen = internal_strlen(name);
462 for (char **Env = StoredEnviron; *Env != 0; Env++) {
463 if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
464 return (*Env) + NameLen + 1;
470 uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
471 const char *argv0 = "<UNKNOWN>";
472 if (StoredArgv && StoredArgv[0]) {
473 argv0 = StoredArgv[0];
475 internal_strncpy(buf, argv0, buf_len);
476 return internal_strlen(buf);
479 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
480 return ReadBinaryName(buf, buf_len);
483 uptr MainThreadStackBase, MainThreadStackSize;
485 bool GetRandom(void *buffer, uptr length, bool blocking) {
486 CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
487 _zx_cprng_draw(buffer, length);
491 u32 GetNumberOfCPUs() {
492 return zx_system_get_num_cpus();
495 uptr GetRSS() { UNIMPLEMENTED(); }
497 } // namespace __sanitizer
499 using namespace __sanitizer; // NOLINT
502 void __sanitizer_startup_hook(int argc, char **argv, char **envp,
503 void *stack_base, size_t stack_size) {
504 __sanitizer::StoredArgv = argv;
505 __sanitizer::StoredEnviron = envp;
506 __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
507 __sanitizer::MainThreadStackSize = stack_size;
510 void __sanitizer_set_report_path(const char *path) {
511 // Handle the initialization code in each sanitizer, but no other calls.
512 // This setting is never consulted on Fuchsia.
513 DCHECK_EQ(path, common_flags()->log_path);
516 void __sanitizer_set_report_fd(void *fd) {
517 UNREACHABLE("not available on Fuchsia");
521 #endif // SANITIZER_FUCHSIA