1 //===-- asan_fake_stack.cc ------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of AddressSanitizer, an address sanity checker.
12 // FakeStack is used to detect use-after-return bugs.
13 //===----------------------------------------------------------------------===//
14 #include "asan_allocator.h"
15 #include "asan_poisoning.h"
16 #include "asan_thread.h"
20 static const u64 kMagic1 = kAsanStackAfterReturnMagic;
21 static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
22 static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
23 static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
25 static const u64 kAllocaRedzoneSize = 32UL;
26 static const u64 kAllocaRedzoneMask = 31UL;
28 // For small size classes inline PoisonShadow for better performance.
29 ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
30 CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
31 u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
33 for (uptr i = 0; i < (1U << class_id); i++) {
35 SanitizerBreakOptimization(0); // Make sure this does not become memset.
38 // The size class is too big, it's cheaper to poison only size bytes.
39 PoisonShadow(ptr, size, static_cast<u8>(magic));
43 FakeStack *FakeStack::Create(uptr stack_size_log) {
44 static uptr kMinStackSizeLog = 16;
45 static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
46 if (stack_size_log < kMinStackSizeLog)
47 stack_size_log = kMinStackSizeLog;
48 if (stack_size_log > kMaxStackSizeLog)
49 stack_size_log = kMaxStackSizeLog;
50 uptr size = RequiredSize(stack_size_log);
51 FakeStack *res = reinterpret_cast<FakeStack *>(
52 flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
53 : MmapOrDie(size, "FakeStack"));
54 res->stack_size_log_ = stack_size_log;
55 u8 *p = reinterpret_cast<u8 *>(res);
56 VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
57 "mmapped %zdK, noreserve=%d \n",
58 GetCurrentTidOrInvalid(), p,
59 p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
60 size >> 10, flags()->uar_noreserve);
64 void FakeStack::Destroy(int tid) {
66 if (Verbosity() >= 2) {
67 InternalScopedString str(kNumberOfSizeClasses * 50);
68 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
69 str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
70 NumberOfFrames(stack_size_log(), class_id));
71 Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
73 uptr size = RequiredSize(stack_size_log_);
74 FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
75 UnmapOrDie(this, size);
78 void FakeStack::PoisonAll(u8 magic) {
79 PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()),
84 FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
86 CHECK_LT(class_id, kNumberOfSizeClasses);
89 uptr &hint_position = hint_position_[class_id];
90 const int num_iter = NumberOfFrames(stack_size_log, class_id);
91 u8 *flags = GetFlags(stack_size_log, class_id);
92 for (int i = 0; i < num_iter; i++) {
93 uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
94 // This part is tricky. On one hand, checking and setting flags[pos]
95 // should be atomic to ensure async-signal safety. But on the other hand,
96 // if the signal arrives between checking and setting flags[pos], the
97 // signal handler's fake stack will start from a different hint_position
98 // and so will not touch this particular byte. So, it is safe to do this
99 // with regular non-atimic load and store (at least I was not able to make
101 if (flags[pos]) continue;
103 FakeFrame *res = reinterpret_cast<FakeFrame *>(
104 GetFrame(stack_size_log, class_id, pos));
105 res->real_stack = real_stack;
106 *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
109 return 0; // We are out of fake stack.
112 uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
113 uptr stack_size_log = this->stack_size_log();
114 uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
115 uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
116 if (ptr < beg || ptr >= end) return 0;
117 uptr class_id = (ptr - beg) >> stack_size_log;
118 uptr base = beg + (class_id << stack_size_log);
120 CHECK_LT(ptr, base + (1UL << stack_size_log));
121 uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
122 uptr res = base + pos * BytesInSizeClass(class_id);
123 *frame_end = res + BytesInSizeClass(class_id);
124 *frame_beg = res + sizeof(FakeFrame);
128 void FakeStack::HandleNoReturn() {
132 // When throw, longjmp or some such happens we don't call OnFree() and
133 // as the result may leak one or more fake frames, but the good news is that
134 // we are notified about all such events by HandleNoReturn().
135 // If we recently had such no-return event we need to collect garbage frames.
136 // We do it based on their 'real_stack' values -- everything that is lower
137 // than the current real_stack is garbage.
138 NOINLINE void FakeStack::GC(uptr real_stack) {
140 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
141 u8 *flags = GetFlags(stack_size_log(), class_id);
142 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
144 if (flags[i] == 0) continue; // not allocated.
145 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
146 GetFrame(stack_size_log(), class_id, i));
147 if (ff->real_stack < real_stack) {
156 void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
157 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
158 u8 *flags = GetFlags(stack_size_log(), class_id);
159 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
161 if (flags[i] == 0) continue; // not allocated.
162 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
163 GetFrame(stack_size_log(), class_id, i));
164 uptr begin = reinterpret_cast<uptr>(ff);
165 callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
170 #if SANITIZER_LINUX && !SANITIZER_ANDROID
171 static THREADLOCAL FakeStack *fake_stack_tls;
173 FakeStack *GetTLSFakeStack() {
174 return fake_stack_tls;
176 void SetTLSFakeStack(FakeStack *fs) {
180 FakeStack *GetTLSFakeStack() { return 0; }
181 void SetTLSFakeStack(FakeStack *fs) { }
182 #endif // SANITIZER_LINUX && !SANITIZER_ANDROID
184 static FakeStack *GetFakeStack() {
185 AsanThread *t = GetCurrentThread();
187 return t->fake_stack();
190 static FakeStack *GetFakeStackFast() {
191 if (FakeStack *fs = GetTLSFakeStack())
193 if (!__asan_option_detect_stack_use_after_return)
195 return GetFakeStack();
198 ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
199 FakeStack *fs = GetFakeStackFast();
202 uptr real_stack = reinterpret_cast<uptr>(&local_stack);
203 FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
204 if (!ff) return 0; // Out of fake stack.
205 uptr ptr = reinterpret_cast<uptr>(ff);
206 SetShadow(ptr, size, class_id, 0);
210 ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
211 FakeStack::Deallocate(ptr, class_id);
212 SetShadow(ptr, size, class_id, kMagic8);
215 } // namespace __asan
217 // ---------------------- Interface ---------------- {{{1
218 using namespace __asan;
219 #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
220 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
221 __asan_stack_malloc_##class_id(uptr size) { \
222 return OnMalloc(class_id, size); \
224 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
225 uptr ptr, uptr size) { \
226 OnFree(ptr, class_id, size); \
229 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
230 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
231 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
232 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
233 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
234 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
235 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
236 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
237 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
238 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
239 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
241 SANITIZER_INTERFACE_ATTRIBUTE
242 void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
244 SANITIZER_INTERFACE_ATTRIBUTE
245 void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
247 FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
249 uptr frame_beg, frame_end;
250 FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
251 reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
252 if (!frame) return 0;
253 if (frame->magic != kCurrentStackFrameMagic)
255 if (beg) *beg = reinterpret_cast<void*>(frame_beg);
256 if (end) *end = reinterpret_cast<void*>(frame_end);
257 return reinterpret_cast<void*>(frame->real_stack);
260 SANITIZER_INTERFACE_ATTRIBUTE
261 void __asan_alloca_poison(uptr addr, uptr size) {
262 uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
263 uptr PartialRzAddr = addr + size;
264 uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
265 uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
266 FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
267 FastPoisonShadowPartialRightRedzone(
268 PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
269 RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
270 FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
273 SANITIZER_INTERFACE_ATTRIBUTE
274 void __asan_allocas_unpoison(uptr top, uptr bottom) {
275 if ((!top) || (top > bottom)) return;
276 REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
277 (bottom - top) / SHADOW_GRANULARITY);