1 //===-- sanitizer_fuchsia.cc ---------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===---------------------------------------------------------------------===//
10 // This file is shared between AddressSanitizer and other sanitizer
11 // run-time libraries and implements Fuchsia-specific functions from
12 // sanitizer_common.h.
13 //===---------------------------------------------------------------------===//
15 #include "sanitizer_fuchsia.h"
18 #include "sanitizer_common.h"
19 #include "sanitizer_libc.h"
20 #include "sanitizer_mutex.h"
21 #include "sanitizer_stacktrace.h"
28 #include <zircon/errors.h>
29 #include <zircon/process.h>
30 #include <zircon/syscalls.h>
32 namespace __sanitizer {
34 void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
36 uptr internal_sched_yield() {
37 zx_status_t status = _zx_nanosleep(0);
38 CHECK_EQ(status, ZX_OK);
39 return 0; // Why doesn't this return void?
42 static void internal_nanosleep(zx_time_t ns) {
43 zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
44 CHECK_EQ(status, ZX_OK);
47 unsigned int internal_sleep(unsigned int seconds) {
48 internal_nanosleep(ZX_SEC(seconds));
52 u64 NanoTime() { return _zx_time_get(ZX_CLOCK_UTC); }
54 u64 MonotonicNanoTime() { return _zx_time_get(ZX_CLOCK_MONOTONIC); }
56 uptr internal_getpid() {
57 zx_info_handle_basic_t info;
59 _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
60 sizeof(info), NULL, NULL);
61 CHECK_EQ(status, ZX_OK);
62 uptr pid = static_cast<uptr>(info.koid);
63 CHECK_EQ(pid, info.koid);
67 uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
69 uptr GetTid() { return GetThreadSelf(); }
71 void Abort() { abort(); }
73 int Atexit(void (*function)(void)) { return atexit(function); }
75 void SleepForSeconds(int seconds) { internal_sleep(seconds); }
77 void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
79 void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
81 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
84 CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
85 CHECK_EQ(pthread_attr_destroy(&attr), 0);
87 *stack_bottom = reinterpret_cast<uptr>(base);
88 *stack_top = *stack_bottom + size;
92 void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
93 void DisableCoreDumperIfNecessary() {}
94 void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
95 void StartReportDeadlySignal() {}
96 void ReportDeadlySignal(const SignalContext &sig, u32 tid,
97 UnwindSignalStackCallbackType unwind,
98 const void *unwind_context) {}
99 void SetAlternateSignalStack() {}
100 void UnsetAlternateSignalStack() {}
101 void InitTlsSize() {}
103 void PrintModuleMap() {}
105 bool SignalContext::IsStackOverflow() const { return false; }
106 void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
107 const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
109 struct UnwindTraceArg {
110 BufferedStackTrace *stack;
114 _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
115 UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
116 CHECK_LT(arg->stack->size, arg->max_depth);
117 uptr pc = _Unwind_GetIP(ctx);
118 if (pc < PAGE_SIZE) return _URC_NORMAL_STOP;
119 arg->stack->trace_buffer[arg->stack->size++] = pc;
120 return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
124 void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
125 CHECK_GE(max_depth, 2);
127 UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
128 _Unwind_Backtrace(Unwind_Trace, &arg);
130 // We need to pop a few frames so that pc is on top.
131 uptr to_pop = LocatePcInTrace(pc);
132 // trace_buffer[0] belongs to the current function so we always pop it,
133 // unless there is only 1 frame in the stack trace (1 frame is always better
135 PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
136 trace_buffer[0] = pc;
139 void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
141 CHECK_NE(context, nullptr);
142 UNREACHABLE("signal context doesn't exist");
145 enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
147 BlockingMutex::BlockingMutex() {
148 // NOTE! It's important that this use internal_memset, because plain
149 // memset might be intercepted (e.g., actually be __asan_memset).
150 // Defining this so the compiler initializes each field, e.g.:
151 // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
152 // might result in the compiler generating a call to memset, which would
153 // have the same problem.
154 internal_memset(this, 0, sizeof(*this));
157 void BlockingMutex::Lock() {
159 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
160 if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
162 while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
163 zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m),
164 MtxSleeping, ZX_TIME_INFINITE);
165 if (status != ZX_ERR_BAD_STATE) // Normal race.
166 CHECK_EQ(status, ZX_OK);
170 void BlockingMutex::Unlock() {
171 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
172 u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
173 CHECK_NE(v, MtxUnlocked);
174 if (v == MtxSleeping) {
175 zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
176 CHECK_EQ(status, ZX_OK);
180 void BlockingMutex::CheckLocked() {
181 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
182 CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
185 uptr GetPageSize() { return PAGE_SIZE; }
187 uptr GetMmapGranularity() { return PAGE_SIZE; }
189 sanitizer_shadow_bounds_t ShadowBounds;
191 uptr GetMaxUserVirtualAddress() {
192 ShadowBounds = __sanitizer_shadow_bounds();
193 return ShadowBounds.memory_limit - 1;
196 uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
198 static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
199 bool raw_report, bool die_for_nomem) {
200 size = RoundUpTo(size, PAGE_SIZE);
203 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
204 if (status != ZX_OK) {
205 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
206 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
210 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
211 internal_strlen(mem_type));
213 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
215 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, size,
216 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
217 _zx_handle_close(vmo);
219 if (status != ZX_OK) {
220 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
221 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
226 IncreaseTotalMmap(size);
228 return reinterpret_cast<void *>(addr);
231 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
232 return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
235 void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
236 return MmapOrDie(size, mem_type);
239 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
240 return DoAnonymousMmapOrDie(size, mem_type, false, false);
243 uptr ReservedAddressRange::Init(uptr init_size, const char *name,
245 init_size = RoundUpTo(init_size, PAGE_SIZE);
246 DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
250 _zx_vmar_allocate(_zx_vmar_root_self(), 0, init_size,
251 ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE |
252 ZX_VM_FLAG_CAN_MAP_SPECIFIC,
255 ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
256 base_ = reinterpret_cast<void *>(base);
261 return reinterpret_cast<uptr>(base_);
264 static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
265 void *base, const char *name, bool die_for_nomem) {
266 uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
267 map_size = RoundUpTo(map_size, PAGE_SIZE);
269 zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
270 if (status != ZX_OK) {
271 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
272 ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
275 _zx_object_set_property(vmo, ZX_PROP_NAME, name, sizeof(name) - 1);
276 DCHECK_GE(base + size_, map_size + offset);
279 status = _zx_vmar_map(
280 vmar, offset, vmo, 0, map_size,
281 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC,
283 _zx_handle_close(vmo);
284 if (status != ZX_OK) {
285 if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
286 ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
290 IncreaseTotalMmap(map_size);
294 uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size) {
295 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
299 uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size) {
300 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
304 void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
305 if (!addr || !size) return;
306 size = RoundUpTo(size, PAGE_SIZE);
309 _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
310 if (status != ZX_OK) {
311 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
312 SanitizerToolName, size, size, addr);
313 CHECK("unable to unmap" && 0);
316 DecreaseTotalMmap(size);
319 void ReservedAddressRange::Unmap(uptr fixed_addr, uptr size) {
320 uptr offset = fixed_addr - reinterpret_cast<uptr>(base_);
321 uptr addr = reinterpret_cast<uptr>(base_) + offset;
322 void *addr_as_void = reinterpret_cast<void *>(addr);
323 uptr base_as_uptr = reinterpret_cast<uptr>(base_);
324 // Only unmap at the beginning or end of the range.
325 CHECK((addr_as_void == base_) || (addr + size == base_as_uptr + size_));
326 CHECK_LE(size, size_);
327 UnmapOrDieVmar(reinterpret_cast<void *>(addr), size,
328 static_cast<zx_handle_t>(os_handle_));
329 if (addr_as_void == base_) {
330 base_ = reinterpret_cast<void *>(addr + size);
332 size_ = size_ - size;
335 // This should never be called.
336 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
340 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
341 const char *mem_type) {
342 CHECK_GE(size, PAGE_SIZE);
343 CHECK(IsPowerOfTwo(size));
344 CHECK(IsPowerOfTwo(alignment));
347 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
348 if (status != ZX_OK) {
349 if (status != ZX_ERR_NO_MEMORY)
350 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
353 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
354 internal_strlen(mem_type));
356 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
358 // Map a larger size to get a chunk of address space big enough that
359 // it surely contains an aligned region of the requested size. Then
360 // overwrite the aligned middle portion with a mapping from the
361 // beginning of the VMO, and unmap the excess before and after.
362 size_t map_size = size + alignment;
364 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size,
365 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
366 if (status == ZX_OK) {
367 uintptr_t map_addr = addr;
368 uintptr_t map_end = map_addr + map_size;
369 addr = RoundUpTo(map_addr, alignment);
370 uintptr_t end = addr + size;
371 if (addr != map_addr) {
373 status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
374 sizeof(info), NULL, NULL);
375 if (status == ZX_OK) {
378 _zx_vmar_map(_zx_vmar_root_self(), addr - info.base, vmo, 0, size,
379 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
380 ZX_VM_FLAG_SPECIFIC_OVERWRITE,
382 if (status == ZX_OK) CHECK_EQ(new_addr, addr);
385 if (status == ZX_OK && addr != map_addr)
386 status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
387 if (status == ZX_OK && end != map_end)
388 status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
390 _zx_handle_close(vmo);
392 if (status != ZX_OK) {
393 if (status != ZX_ERR_NO_MEMORY)
394 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
398 IncreaseTotalMmap(size);
400 return reinterpret_cast<void *>(addr);
403 void UnmapOrDie(void *addr, uptr size) {
404 UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
407 // This is used on the shadow mapping, which cannot be changed.
408 // Zircon doesn't have anything like MADV_DONTNEED.
409 void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
411 void DumpProcessMap() {
412 // TODO(mcgrathr): write it
416 bool IsAccessibleMemoryRange(uptr beg, uptr size) {
417 // TODO(mcgrathr): Figure out a better way.
419 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
420 if (status == ZX_OK) {
423 status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size,
425 if (status != ZX_OK) break;
427 CHECK_LE(wrote, size);
431 _zx_handle_close(vmo);
433 return status == ZX_OK;
436 // FIXME implement on this platform.
437 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
439 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
440 uptr *read_len, uptr max_len, error_t *errno_p) {
442 zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
443 if (status == ZX_OK) {
445 status = _zx_vmo_get_size(vmo, &vmo_size);
446 if (status == ZX_OK) {
447 if (vmo_size < max_len) max_len = vmo_size;
448 size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
450 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size,
451 ZX_VM_FLAG_PERM_READ, &addr);
452 if (status == ZX_OK) {
453 *buff = reinterpret_cast<char *>(addr);
454 *buff_size = map_size;
458 _zx_handle_close(vmo);
460 if (status != ZX_OK && errno_p) *errno_p = status;
461 return status == ZX_OK;
464 void RawWrite(const char *buffer) {
465 __sanitizer_log_write(buffer, internal_strlen(buffer));
468 void CatastrophicErrorWrite(const char *buffer, uptr length) {
469 __sanitizer_log_write(buffer, length);
473 char **StoredEnviron;
475 char **GetArgv() { return StoredArgv; }
477 const char *GetEnv(const char *name) {
479 uptr NameLen = internal_strlen(name);
480 for (char **Env = StoredEnviron; *Env != 0; Env++) {
481 if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
482 return (*Env) + NameLen + 1;
488 uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
489 const char *argv0 = StoredArgv[0];
490 if (!argv0) argv0 = "<UNKNOWN>";
491 internal_strncpy(buf, argv0, buf_len);
492 return internal_strlen(buf);
495 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
496 return ReadBinaryName(buf, buf_len);
499 uptr MainThreadStackBase, MainThreadStackSize;
501 bool GetRandom(void *buffer, uptr length, bool blocking) {
502 CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
504 CHECK_EQ(_zx_cprng_draw(buffer, length, &size), ZX_OK);
505 CHECK_EQ(size, length);
509 u32 GetNumberOfCPUs() {
510 return zx_system_get_num_cpus();
513 uptr GetRSS() { UNIMPLEMENTED(); }
515 } // namespace __sanitizer
517 using namespace __sanitizer; // NOLINT
520 void __sanitizer_startup_hook(int argc, char **argv, char **envp,
521 void *stack_base, size_t stack_size) {
522 __sanitizer::StoredArgv = argv;
523 __sanitizer::StoredEnviron = envp;
524 __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
525 __sanitizer::MainThreadStackSize = stack_size;
528 void __sanitizer_set_report_path(const char *path) {
529 // Handle the initialization code in each sanitizer, but no other calls.
530 // This setting is never consulted on Fuchsia.
531 DCHECK_EQ(path, common_flags()->log_path);
534 void __sanitizer_set_report_fd(void *fd) {
535 UNREACHABLE("not available on Fuchsia");
539 #endif // SANITIZER_FUCHSIA