1 //===-- msan_linux.cc -----------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of MemorySanitizer.
12 // Linux- and FreeBSD-specific code.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_platform.h"
16 #if SANITIZER_FREEBSD || SANITIZER_LINUX
19 #include "msan_thread.h"
31 #include <sys/resource.h>
33 #include "sanitizer_common/sanitizer_common.h"
34 #include "sanitizer_common/sanitizer_procmaps.h"
38 void ReportMapRange(const char *descr, uptr beg, uptr size) {
40 uptr end = beg + size - 1;
41 VPrintf(1, "%s : %p - %p\n", descr, beg, end);
45 static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
47 uptr end = beg + size - 1;
48 if (!MemoryRangeIsAvailable(beg, end)) {
49 Printf("FATAL: Memory range %p - %p is not available.\n", beg, end);
56 static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
58 void *addr = MmapFixedNoAccess(beg, size, name);
59 if (beg == 0 && addr) {
60 // Depending on the kernel configuration, we may not be able to protect
61 // the page at address zero.
62 uptr gap = 16 * GetPageSizeCached();
65 addr = MmapFixedNoAccess(beg, size, name);
67 if ((uptr)addr != beg) {
68 uptr end = beg + size - 1;
69 Printf("FATAL: Cannot protect memory range %p - %p (%s).\n", beg, end,
77 static void CheckMemoryLayoutSanity() {
79 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
80 uptr start = kMemoryLayout[i].start;
81 uptr end = kMemoryLayout[i].end;
82 MappingDesc::Type type = kMemoryLayout[i].type;
84 CHECK_EQ(prev_end, start);
85 CHECK(addr_is_type(start, type));
86 CHECK(addr_is_type((start + end) / 2, type));
87 CHECK(addr_is_type(end - 1, type));
88 if (type == MappingDesc::APP) {
90 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
91 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
92 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
94 addr = (start + end) / 2;
95 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
96 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
97 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
100 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
101 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
102 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
108 bool InitShadow(bool init_origins) {
109 // Let user know mapping parameters first.
110 VPrintf(1, "__msan_init %p\n", &__msan_init);
111 for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
112 VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
113 kMemoryLayout[i].end - 1);
115 CheckMemoryLayoutSanity();
117 if (!MEM_IS_APP(&__msan_init)) {
118 Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
123 const uptr maxVirtualAddress = GetMaxVirtualAddress();
125 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
126 uptr start = kMemoryLayout[i].start;
127 uptr end = kMemoryLayout[i].end;
128 uptr size= end - start;
129 MappingDesc::Type type = kMemoryLayout[i].type;
131 // Check if the segment should be mapped based on platform constraints.
132 if (start >= maxVirtualAddress)
135 bool map = type == MappingDesc::SHADOW ||
136 (init_origins && type == MappingDesc::ORIGIN);
137 bool protect = type == MappingDesc::INVALID ||
138 (!init_origins && type == MappingDesc::ORIGIN);
139 CHECK(!(map && protect));
140 if (!map && !protect)
141 CHECK(type == MappingDesc::APP);
143 if (!CheckMemoryRangeAvailability(start, size))
145 if ((uptr)MmapFixedNoReserve(start, size, kMemoryLayout[i].name) != start)
147 if (common_flags()->use_madv_dontdump)
148 DontDumpShadowMemory(start, size);
151 if (!CheckMemoryRangeAvailability(start, size))
153 if (!ProtectMemoryRange(start, size, kMemoryLayout[i].name))
161 static void MsanAtExit(void) {
162 if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
164 if (msan_report_count > 0) {
165 ReportAtExitStatistics();
166 if (common_flags()->exitcode)
167 internal__exit(common_flags()->exitcode);
171 void InstallAtExitHandler() {
175 // ---------------------- TSD ---------------- {{{1
177 static pthread_key_t tsd_key;
178 static bool tsd_key_inited = false;
180 void MsanTSDInit(void (*destructor)(void *tsd)) {
181 CHECK(!tsd_key_inited);
182 tsd_key_inited = true;
183 CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
186 static THREADLOCAL MsanThread* msan_current_thread;
188 MsanThread *GetCurrentThread() {
189 return msan_current_thread;
192 void SetCurrentThread(MsanThread *t) {
193 // Make sure we do not reset the current MsanThread.
194 CHECK_EQ(0, msan_current_thread);
195 msan_current_thread = t;
196 // Make sure that MsanTSDDtor gets called at the end.
197 CHECK(tsd_key_inited);
198 pthread_setspecific(tsd_key, (void *)t);
201 void MsanTSDDtor(void *tsd) {
202 MsanThread *t = (MsanThread*)tsd;
203 if (t->destructor_iterations_ > 1) {
204 t->destructor_iterations_--;
205 CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
208 msan_current_thread = nullptr;
209 // Make sure that signal handler can not see a stale current thread pointer.
210 atomic_signal_fence(memory_order_seq_cst);
211 MsanThread::TSDDtor(tsd);
214 } // namespace __msan
216 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX