1 //===-- hwasan_linux.cc -----------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
12 /// FreeBSD-specific code.
14 //===----------------------------------------------------------------------===//
16 #include "sanitizer_common/sanitizer_platform.h"
17 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
20 #include "hwasan_dynamic_shadow.h"
21 #include "hwasan_interface_internal.h"
22 #include "hwasan_mapping.h"
23 #include "hwasan_report.h"
24 #include "hwasan_thread.h"
32 #include <sys/resource.h>
37 #include "sanitizer_common/sanitizer_common.h"
38 #include "sanitizer_common/sanitizer_procmaps.h"
42 static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
43 CHECK_EQ((beg % GetMmapGranularity()), 0);
44 CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
45 uptr size = end - beg + 1;
46 DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
47 if (!MmapFixedNoReserve(beg, size, name)) {
49 "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
50 "Perhaps you're using ulimit -v\n",
54 if (common_flags()->no_huge_pages_for_shadow) NoHugePagesInRegion(beg, size);
55 if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size);
58 static void ProtectGap(uptr addr, uptr size) {
61 void *res = MmapFixedNoAccess(addr, size, "shadow gap");
62 if (addr == (uptr)res)
64 // A few pages at the start of the address space can not be protected.
65 // But we really want to protect as much as possible, to prevent this memory
66 // being returned as a result of a non-FIXED mmap().
68 uptr step = GetMmapGranularity();
72 void *res = MmapFixedNoAccess(addr, size, "shadow gap");
73 if (addr == (uptr)res)
79 "ERROR: Failed to protect shadow gap [%p, %p]. "
80 "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr,
81 (void *)(addr + size));
86 static uptr kLowMemStart;
87 static uptr kLowMemEnd;
88 static uptr kLowShadowEnd;
89 static uptr kLowShadowStart;
90 static uptr kHighShadowStart;
91 static uptr kHighShadowEnd;
92 static uptr kHighMemStart;
93 static uptr kHighMemEnd;
95 static void PrintRange(uptr start, uptr end, const char *name) {
96 Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
99 static void PrintAddressSpaceLayout() {
100 PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
101 if (kHighShadowEnd + 1 < kHighMemStart)
102 PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
104 CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
105 PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
107 if (kLowShadowEnd + 1 < kHighShadowStart)
108 PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
110 CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
111 PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
112 if (kLowMemEnd + 1 < kLowShadowStart)
113 PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
115 CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
116 PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
117 CHECK_EQ(0, kLowMemStart);
119 if (kLowMemEnd + 1 < kHighShadowStart)
120 PrintRange(kLowMemEnd + 1, kHighShadowStart - 1, "ShadowGap");
122 CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
123 PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
124 CHECK_EQ(kLowShadowEnd + 1, kLowMemStart);
125 PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
126 PrintRange(0, kLowShadowStart - 1, "ShadowGap");
130 static uptr GetHighMemEnd() {
131 // HighMem covers the upper part of the address space.
132 uptr max_address = GetMaxUserVirtualAddress();
134 // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
136 max_address |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
140 static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
141 // Set the shadow memory address to uninitialized.
142 __hwasan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
143 uptr shadow_start = SHADOW_OFFSET;
144 // Detect if a dynamic shadow address must be used and find the available
145 // location when necessary. When dynamic address is used, the macro
146 // kLowShadowBeg expands to __hwasan_shadow_memory_dynamic_address which
147 // was just set to kDefaultShadowSentinel.
148 if (shadow_start == kDefaultShadowSentinel) {
149 __hwasan_shadow_memory_dynamic_address = 0;
150 CHECK_EQ(0, SHADOW_OFFSET);
151 shadow_start = FindDynamicShadowStart(shadow_size_bytes);
153 // Update the shadow memory address (potentially) used by instrumentation.
154 __hwasan_shadow_memory_dynamic_address = shadow_start;
158 // Define the entire memory range.
159 kHighMemEnd = GetHighMemEnd();
161 // Determine shadow memory base offset.
162 InitializeShadowBaseAddress(MEM_TO_SHADOW_SIZE(kHighMemEnd));
164 // Place the low memory first.
166 kLowMemEnd = SHADOW_OFFSET - 1;
169 // LowMem covers as much of the first 4GB as possible.
170 kLowMemEnd = (1UL << 32) - 1;
171 kLowMemStart = MEM_TO_SHADOW(kLowMemEnd) + 1;
174 // Define the low shadow based on the already placed low memory.
175 kLowShadowEnd = MEM_TO_SHADOW(kLowMemEnd);
176 kLowShadowStart = SHADOW_OFFSET ? SHADOW_OFFSET : MEM_TO_SHADOW(kLowMemStart);
178 // High shadow takes whatever memory is left up there (making sure it is not
179 // interfering with low memory in the fixed case).
180 kHighShadowEnd = MEM_TO_SHADOW(kHighMemEnd);
181 kHighShadowStart = Max(kLowMemEnd, MEM_TO_SHADOW(kHighShadowEnd)) + 1;
183 // High memory starts where allocated shadow allows.
184 kHighMemStart = SHADOW_TO_MEM(kHighShadowStart);
186 // Check the sanity of the defined memory ranges (there might be gaps).
187 CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
188 CHECK_GT(kHighMemStart, kHighShadowEnd);
189 CHECK_GT(kHighShadowEnd, kHighShadowStart);
190 CHECK_GT(kHighShadowStart, kLowMemEnd);
191 CHECK_GT(kLowMemEnd, kLowMemStart);
192 CHECK_GT(kLowShadowEnd, kLowShadowStart);
194 CHECK_GT(kLowShadowStart, kLowMemEnd);
196 CHECK_GT(kLowMemEnd, kLowShadowStart);
199 PrintAddressSpaceLayout();
201 // Reserve shadow memory.
202 ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
203 ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
205 // Protect all the gaps.
206 ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
208 if (kLowMemEnd + 1 < kLowShadowStart)
209 ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
210 if (kLowShadowEnd + 1 < kHighShadowStart)
211 ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
213 if (kLowMemEnd + 1 < kHighShadowStart)
214 ProtectGap(kLowMemEnd + 1, kHighShadowStart - kLowMemEnd - 1);
216 if (kHighShadowEnd + 1 < kHighMemStart)
217 ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
222 bool MemIsApp(uptr p) {
223 CHECK(GetTagFromPointer(p) == 0);
224 return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
227 static void HwasanAtExit(void) {
228 if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0))
230 if (hwasan_report_count > 0) {
231 // ReportAtExitStatistics();
232 if (common_flags()->exitcode)
233 internal__exit(common_flags()->exitcode);
237 void InstallAtExitHandler() {
238 atexit(HwasanAtExit);
241 // ---------------------- TSD ---------------- {{{1
243 static pthread_key_t tsd_key;
244 static bool tsd_key_inited = false;
246 void HwasanTSDInit(void (*destructor)(void *tsd)) {
247 CHECK(!tsd_key_inited);
248 tsd_key_inited = true;
249 CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
252 HwasanThread *GetCurrentThread() {
253 return (HwasanThread*)pthread_getspecific(tsd_key);
256 void SetCurrentThread(HwasanThread *t) {
257 // Make sure that HwasanTSDDtor gets called at the end.
258 CHECK(tsd_key_inited);
259 // Make sure we do not reset the current HwasanThread.
260 CHECK_EQ(0, pthread_getspecific(tsd_key));
261 pthread_setspecific(tsd_key, (void *)t);
264 void HwasanTSDDtor(void *tsd) {
265 HwasanThread *t = (HwasanThread*)tsd;
266 if (t->destructor_iterations_ > 1) {
267 t->destructor_iterations_--;
268 CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
271 // Make sure that signal handler can not see a stale current thread pointer.
272 atomic_signal_fence(memory_order_seq_cst);
273 HwasanThread::TSDDtor(tsd);
284 static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
285 // Access type is passed in a platform dependent way (see below) and encoded
286 // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
287 // recoverable. Valid values of Y are 0 to 4, which are interpreted as
288 // log2(access_size), and 0xF, which means that access size is passed via
289 // platform dependent register (see below).
290 #if defined(__aarch64__)
291 // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
292 // access size is stored in X1 register. Access address is always in X0
294 uptr pc = (uptr)info->si_addr;
295 const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
296 if ((code & 0xff00) != 0x900)
297 return AccessInfo{}; // Not ours.
299 const bool is_store = code & 0x10;
300 const bool recover = code & 0x20;
301 const uptr addr = uc->uc_mcontext.regs[0];
302 const unsigned size_log = code & 0xf;
303 if (size_log > 4 && size_log != 0xf)
304 return AccessInfo{}; // Not ours.
305 const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
307 #elif defined(__x86_64__)
308 // Access type is encoded in the instruction following INT3 as
309 // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
310 // RSI register. Access address is always in RDI register.
311 uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
312 uint8_t *nop = (uint8_t*)pc;
313 if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
315 return AccessInfo{}; // Not ours.
316 const unsigned code = *(nop + 3);
318 const bool is_store = code & 0x10;
319 const bool recover = code & 0x20;
320 const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
321 const unsigned size_log = code & 0xf;
322 if (size_log > 4 && size_log != 0xf)
323 return AccessInfo{}; // Not ours.
325 size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
328 # error Unsupported architecture
331 return AccessInfo{addr, size, is_store, !is_store, recover};
334 static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
335 AccessInfo ai = GetAccessInfo(info, uc);
336 if (!ai.is_store && !ai.is_load)
339 InternalMmapVector<BufferedStackTrace> stack_buffer(1);
340 BufferedStackTrace *stack = stack_buffer.data();
342 SignalContext sig{info, uc};
343 GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, uc,
344 common_flags()->fast_unwind_on_fatal);
346 ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store);
348 ++hwasan_report_count;
349 if (flags()->halt_on_error || !ai.recover)
352 #if defined(__aarch64__)
353 uc->uc_mcontext.pc += 4;
354 #elif defined(__x86_64__)
356 # error Unsupported architecture
361 static void OnStackUnwind(const SignalContext &sig, const void *,
362 BufferedStackTrace *stack) {
363 GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context,
364 common_flags()->fast_unwind_on_fatal);
367 void HwasanOnDeadlySignal(int signo, void *info, void *context) {
368 // Probably a tag mismatch.
369 if (signo == SIGTRAP)
370 if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context))
373 HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
377 } // namespace __hwasan
379 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD