1 //===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of HWAddressSanitizer.
12 //===----------------------------------------------------------------------===//
14 // HwasanThreadList is a registry for live threads, as well as an allocator for
15 // HwasanThread objects and their stack history ring buffers. There are
16 // constraints on memory layout of the shadow region and CompactRingBuffer that
17 // are part of the ABI contract between compiler-rt and llvm.
19 // * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
20 // * All stack ring buffers are located within (2**kShadowBaseAlignment)
21 // sized region below and adjacent to the shadow region.
22 // * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
23 // aligned to twice its size. The value of N can be different for each buffer.
25 // These constrains guarantee that, given an address A of any element of the
27 // A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
28 // is the address of the next element of that ring buffer (with wrap-around).
29 // And, with K = kShadowBaseAlignment,
30 // S = (A | ((1 << K) - 1)) + 1
31 // (align up to kShadowBaseAlignment) is the start of the shadow region.
33 // These calculations are used in compiler instrumentation to update the ring
34 // buffer and obtain the base address of shadow using only two inputs: address
35 // of the current element of the ring buffer, and N (i.e. size of the ring
36 // buffer). Since the value of N is very limited, we pack both inputs into a
37 // single thread-local word as
38 // (1 << (N + 56)) | A
39 // See the implementation of class CompactRingBuffer, which is what is stored in
40 // said thread-local word.
42 // Note the unusual way of aligning up the address of the shadow:
43 // (A | ((1 << K) - 1)) + 1
44 // It is only correct if A is not already equal to the shadow base address, but
45 // it saves 2 instructions on AArch64.
48 #include "hwasan_allocator.h"
49 #include "hwasan_flags.h"
50 #include "hwasan_thread.h"
52 #include "sanitizer_common/sanitizer_placement_new.h"
56 static uptr RingBufferSize() {
57 uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
58 // FIXME: increase the limit to 8 once this bug is fixed:
59 // https://bugs.llvm.org/show_bug.cgi?id=39030
60 for (int shift = 1; shift < 7; ++shift) {
61 uptr size = 4096 * (1ULL << shift);
62 if (size >= desired_bytes)
65 Printf("stack history size too large: %d\n", flags()->stack_history_size);
70 struct ThreadListHead {
73 ThreadListHead() : list_(nullptr) {}
75 void Push(Thread *t) {
87 void Remove(Thread *t) {
88 Thread **cur = &list_;
89 while (*cur != t) cur = &(*cur)->next_;
90 CHECK(*cur && "thread not found");
106 uptr total_stack_size;
109 class HwasanThreadList {
111 HwasanThreadList(uptr storage, uptr size)
112 : free_space_(storage),
113 free_space_end_(storage + size),
114 ring_buffer_size_(RingBufferSize()) {}
116 Thread *CreateCurrentThread() {
119 SpinMutexLock l(&list_mutex_);
120 t = free_list_.Pop();
122 internal_memset((void *)t, 0, sizeof(Thread) + ring_buffer_size_);
127 t->Init((uptr)(t + 1), ring_buffer_size_);
132 void ReleaseThread(Thread *t) {
133 // FIXME: madvise away the ring buffer?
134 RemoveThreadStats(t);
136 SpinMutexLock l(&list_mutex_);
137 live_list_.Remove(t);
141 Thread *GetThreadByBufferAddress(uptr p) {
142 uptr align = ring_buffer_size_ * 2;
143 return (Thread *)(RoundDownTo(p, align) - sizeof(Thread));
146 uptr MemoryUsedPerThread() {
147 uptr res = sizeof(Thread) + ring_buffer_size_;
148 if (auto sz = flags()->heap_history_size)
149 res += HeapAllocationsRingBuffer::SizeInBytes(sz);
154 void VisitAllLiveThreads(CB cb) {
155 SpinMutexLock l(&list_mutex_);
156 live_list_.ForEach(cb);
159 void AddThreadStats(Thread *t) {
160 SpinMutexLock l(&stats_mutex_);
161 stats_.n_live_threads++;
162 stats_.total_stack_size += t->stack_size();
165 void RemoveThreadStats(Thread *t) {
166 SpinMutexLock l(&stats_mutex_);
167 stats_.n_live_threads--;
168 stats_.total_stack_size -= t->stack_size();
171 ThreadStats GetThreadStats() {
172 SpinMutexLock l(&stats_mutex_);
177 Thread *AllocThread() {
178 uptr align = ring_buffer_size_ * 2;
179 uptr ring_buffer_start = RoundUpTo(free_space_ + sizeof(Thread), align);
180 free_space_ = ring_buffer_start + ring_buffer_size_;
181 CHECK(free_space_ <= free_space_end_ && "out of thread memory");
182 return (Thread *)(ring_buffer_start - sizeof(Thread));
186 uptr free_space_end_;
187 uptr ring_buffer_size_;
189 ThreadListHead free_list_;
190 ThreadListHead live_list_;
191 SpinMutex list_mutex_;
194 SpinMutex stats_mutex_;
197 void InitThreadList(uptr storage, uptr size);
198 HwasanThreadList &hwasanThreadList();