1 //===-- sanitizer_fuchsia.cc ----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is shared between AddressSanitizer and other sanitizer
11 // run-time libraries and implements Fuchsia-specific functions from
12 // sanitizer_common.h.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_fuchsia.h"
18 #include "sanitizer_common.h"
19 #include "sanitizer_libc.h"
20 #include "sanitizer_mutex.h"
26 #include <zircon/errors.h>
27 #include <zircon/process.h>
28 #include <zircon/syscalls.h>
30 namespace __sanitizer {
32 // TODO(phosek): remove this and replace it with ZX_TIME_INFINITE
33 #define ZX_TIME_INFINITE_OLD INT64_MAX
35 void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
37 uptr internal_sched_yield() {
38 zx_status_t status = _zx_nanosleep(0);
39 CHECK_EQ(status, ZX_OK);
40 return 0; // Why doesn't this return void?
43 static void internal_nanosleep(zx_time_t ns) {
44 zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
45 CHECK_EQ(status, ZX_OK);
48 unsigned int internal_sleep(unsigned int seconds) {
49 internal_nanosleep(ZX_SEC(seconds));
53 u64 NanoTime() { return _zx_clock_get(ZX_CLOCK_UTC); }
55 u64 MonotonicNanoTime() { return _zx_clock_get(ZX_CLOCK_MONOTONIC); }
57 uptr internal_getpid() {
58 zx_info_handle_basic_t info;
60 _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
61 sizeof(info), NULL, NULL);
62 CHECK_EQ(status, ZX_OK);
63 uptr pid = static_cast<uptr>(info.koid);
64 CHECK_EQ(pid, info.koid);
68 uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
70 tid_t GetTid() { return GetThreadSelf(); }
72 void Abort() { abort(); }
74 int Atexit(void (*function)(void)) { return atexit(function); }
76 void SleepForSeconds(int seconds) { internal_sleep(seconds); }
78 void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
80 void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
82 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
85 CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
86 CHECK_EQ(pthread_attr_destroy(&attr), 0);
88 *stack_bottom = reinterpret_cast<uptr>(base);
89 *stack_top = *stack_bottom + size;
94 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
95 void DisableCoreDumperIfNecessary() {}
96 void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
97 void SetAlternateSignalStack() {}
98 void UnsetAlternateSignalStack() {}
101 void PrintModuleMap() {}
103 bool SignalContext::IsStackOverflow() const { return false; }
104 void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
105 const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
107 enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
109 BlockingMutex::BlockingMutex() {
110 // NOTE! It's important that this use internal_memset, because plain
111 // memset might be intercepted (e.g., actually be __asan_memset).
112 // Defining this so the compiler initializes each field, e.g.:
113 // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
114 // might result in the compiler generating a call to memset, which would
115 // have the same problem.
116 internal_memset(this, 0, sizeof(*this));
119 void BlockingMutex::Lock() {
121 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
122 if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
124 while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
125 zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m),
126 MtxSleeping, ZX_TIME_INFINITE_OLD);
127 if (status != ZX_ERR_BAD_STATE) // Normal race.
128 CHECK_EQ(status, ZX_OK);
132 void BlockingMutex::Unlock() {
133 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
134 u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
135 CHECK_NE(v, MtxUnlocked);
136 if (v == MtxSleeping) {
137 zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
138 CHECK_EQ(status, ZX_OK);
142 void BlockingMutex::CheckLocked() {
143 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
144 CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
147 uptr GetPageSize() { return PAGE_SIZE; }
149 uptr GetMmapGranularity() { return PAGE_SIZE; }
151 sanitizer_shadow_bounds_t ShadowBounds;
153 uptr GetMaxUserVirtualAddress() {
154 ShadowBounds = __sanitizer_shadow_bounds();
155 return ShadowBounds.memory_limit - 1;
158 uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
160 static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
161 bool raw_report, bool die_for_nomem) {
162 size = RoundUpTo(size, PAGE_SIZE);
165 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
166 if (status != ZX_OK) {
167 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
168 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
172 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
173 internal_strlen(mem_type));
175 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
178 _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, size,
179 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
180 _zx_handle_close(vmo);
182 if (status != ZX_OK) {
183 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
184 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
189 IncreaseTotalMmap(size);
191 return reinterpret_cast<void *>(addr);
194 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
195 return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
198 void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
199 return MmapOrDie(size, mem_type);
202 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
203 return DoAnonymousMmapOrDie(size, mem_type, false, false);
206 uptr ReservedAddressRange::Init(uptr init_size, const char *name,
208 init_size = RoundUpTo(init_size, PAGE_SIZE);
209 DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
213 _zx_vmar_allocate_old(_zx_vmar_root_self(), 0, init_size,
214 ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE |
215 ZX_VM_FLAG_CAN_MAP_SPECIFIC,
218 ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
219 base_ = reinterpret_cast<void *>(base);
224 return reinterpret_cast<uptr>(base_);
227 static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
228 void *base, const char *name, bool die_for_nomem) {
229 uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
230 map_size = RoundUpTo(map_size, PAGE_SIZE);
232 zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
233 if (status != ZX_OK) {
234 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
235 ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
238 _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
239 DCHECK_GE(base + size_, map_size + offset);
242 status = _zx_vmar_map_old(
243 vmar, offset, vmo, 0, map_size,
244 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC,
246 _zx_handle_close(vmo);
247 if (status != ZX_OK) {
248 if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
249 ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
253 IncreaseTotalMmap(map_size);
257 uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size) {
258 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
262 uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size) {
263 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
267 void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
268 if (!addr || !size) return;
269 size = RoundUpTo(size, PAGE_SIZE);
272 _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
273 if (status != ZX_OK) {
274 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
275 SanitizerToolName, size, size, addr);
276 CHECK("unable to unmap" && 0);
279 DecreaseTotalMmap(size);
282 void ReservedAddressRange::Unmap(uptr addr, uptr size) {
283 CHECK_LE(size, size_);
284 if (addr == reinterpret_cast<uptr>(base_))
285 // If we unmap the whole range, just null out the base.
286 base_ = (size == size_) ? nullptr : reinterpret_cast<void*>(addr + size);
288 CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
290 UnmapOrDieVmar(reinterpret_cast<void *>(addr), size,
291 static_cast<zx_handle_t>(os_handle_));
294 // This should never be called.
295 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
299 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
300 const char *mem_type) {
301 CHECK_GE(size, PAGE_SIZE);
302 CHECK(IsPowerOfTwo(size));
303 CHECK(IsPowerOfTwo(alignment));
306 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
307 if (status != ZX_OK) {
308 if (status != ZX_ERR_NO_MEMORY)
309 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
312 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
313 internal_strlen(mem_type));
315 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
317 // Map a larger size to get a chunk of address space big enough that
318 // it surely contains an aligned region of the requested size. Then
319 // overwrite the aligned middle portion with a mapping from the
320 // beginning of the VMO, and unmap the excess before and after.
321 size_t map_size = size + alignment;
324 _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, map_size,
325 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
326 if (status == ZX_OK) {
327 uintptr_t map_addr = addr;
328 uintptr_t map_end = map_addr + map_size;
329 addr = RoundUpTo(map_addr, alignment);
330 uintptr_t end = addr + size;
331 if (addr != map_addr) {
333 status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
334 sizeof(info), NULL, NULL);
335 if (status == ZX_OK) {
337 status = _zx_vmar_map_old(_zx_vmar_root_self(), addr - info.base, vmo,
339 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
340 ZX_VM_FLAG_SPECIFIC_OVERWRITE,
342 if (status == ZX_OK) CHECK_EQ(new_addr, addr);
345 if (status == ZX_OK && addr != map_addr)
346 status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
347 if (status == ZX_OK && end != map_end)
348 status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
350 _zx_handle_close(vmo);
352 if (status != ZX_OK) {
353 if (status != ZX_ERR_NO_MEMORY)
354 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
358 IncreaseTotalMmap(size);
360 return reinterpret_cast<void *>(addr);
363 void UnmapOrDie(void *addr, uptr size) {
364 UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
367 // This is used on the shadow mapping, which cannot be changed.
368 // Zircon doesn't have anything like MADV_DONTNEED.
369 void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
371 void DumpProcessMap() {
372 // TODO(mcgrathr): write it
376 bool IsAccessibleMemoryRange(uptr beg, uptr size) {
377 // TODO(mcgrathr): Figure out a better way.
379 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
380 if (status == ZX_OK) {
381 status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
382 _zx_handle_close(vmo);
384 return status == ZX_OK;
387 // FIXME implement on this platform.
388 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
390 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
391 uptr *read_len, uptr max_len, error_t *errno_p) {
393 zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
394 if (status == ZX_OK) {
396 status = _zx_vmo_get_size(vmo, &vmo_size);
397 if (status == ZX_OK) {
398 if (vmo_size < max_len) max_len = vmo_size;
399 size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
401 status = _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, map_size,
402 ZX_VM_FLAG_PERM_READ, &addr);
403 if (status == ZX_OK) {
404 *buff = reinterpret_cast<char *>(addr);
405 *buff_size = map_size;
409 _zx_handle_close(vmo);
411 if (status != ZX_OK && errno_p) *errno_p = status;
412 return status == ZX_OK;
415 void RawWrite(const char *buffer) {
416 constexpr size_t size = 128;
417 static _Thread_local char line[size];
418 static _Thread_local size_t lastLineEnd = 0;
419 static _Thread_local size_t cur = 0;
423 if (lastLineEnd == 0)
425 __sanitizer_log_write(line, lastLineEnd);
426 internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
427 cur = cur - lastLineEnd;
431 lastLineEnd = cur + 1;
432 line[cur++] = *buffer++;
434 // Flush all complete lines before returning.
435 if (lastLineEnd != 0) {
436 __sanitizer_log_write(line, lastLineEnd);
437 internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
438 cur = cur - lastLineEnd;
443 void CatastrophicErrorWrite(const char *buffer, uptr length) {
444 __sanitizer_log_write(buffer, length);
448 char **StoredEnviron;
450 char **GetArgv() { return StoredArgv; }
452 const char *GetEnv(const char *name) {
454 uptr NameLen = internal_strlen(name);
455 for (char **Env = StoredEnviron; *Env != 0; Env++) {
456 if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
457 return (*Env) + NameLen + 1;
463 uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
464 const char *argv0 = "<UNKNOWN>";
465 if (StoredArgv && StoredArgv[0]) {
466 argv0 = StoredArgv[0];
468 internal_strncpy(buf, argv0, buf_len);
469 return internal_strlen(buf);
472 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
473 return ReadBinaryName(buf, buf_len);
476 uptr MainThreadStackBase, MainThreadStackSize;
478 bool GetRandom(void *buffer, uptr length, bool blocking) {
479 CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
480 _zx_cprng_draw(buffer, length);
484 u32 GetNumberOfCPUs() {
485 return zx_system_get_num_cpus();
488 uptr GetRSS() { UNIMPLEMENTED(); }
490 } // namespace __sanitizer
492 using namespace __sanitizer; // NOLINT
495 void __sanitizer_startup_hook(int argc, char **argv, char **envp,
496 void *stack_base, size_t stack_size) {
497 __sanitizer::StoredArgv = argv;
498 __sanitizer::StoredEnviron = envp;
499 __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
500 __sanitizer::MainThreadStackSize = stack_size;
503 void __sanitizer_set_report_path(const char *path) {
504 // Handle the initialization code in each sanitizer, but no other calls.
505 // This setting is never consulted on Fuchsia.
506 DCHECK_EQ(path, common_flags()->log_path);
509 void __sanitizer_set_report_fd(void *fd) {
510 UNREACHABLE("not available on Fuchsia");
514 #endif // SANITIZER_FUCHSIA