1 //===-- asan_noinst_test.cc -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of AddressSanitizer, an address sanity checker.
12 // This test file should be compiled w/o asan instrumentation.
13 //===----------------------------------------------------------------------===//
15 #include "asan_allocator.h"
16 #include "asan_internal.h"
17 #include "asan_mapping.h"
18 #include "asan_test_utils.h"
19 #include <sanitizer/allocator_interface.h>
24 #include <string.h> // for memset()
30 // Please don't call intercepted functions (including malloc() and friends)
31 // in this test. The static runtime library is linked explicitly (without
32 // -fsanitize=address), thus the interceptors do not work correctly on OS X.
36 // Set specific ASan options for uninstrumented unittest.
37 const char* __asan_default_options() {
38 return "allow_reexec=0";
43 // Make sure __asan_init is called before any test case is run.
44 struct AsanInitCaller {
45 AsanInitCaller() { __asan_init(); }
47 static AsanInitCaller asan_init_caller;
49 TEST(AddressSanitizer, InternalSimpleDeathTest) {
50 EXPECT_DEATH(exit(1), "");
53 static void MallocStress(size_t n) {
55 BufferedStackTrace stack1;
56 stack1.trace_buffer[0] = 0xa123;
57 stack1.trace_buffer[1] = 0xa456;
60 BufferedStackTrace stack2;
61 stack2.trace_buffer[0] = 0xb123;
62 stack2.trace_buffer[1] = 0xb456;
65 BufferedStackTrace stack3;
66 stack3.trace_buffer[0] = 0xc123;
67 stack3.trace_buffer[1] = 0xc456;
70 std::vector<void *> vec;
71 for (size_t i = 0; i < n; i++) {
73 if (vec.empty()) continue;
74 size_t idx = my_rand_r(&seed) % vec.size();
76 vec[idx] = vec.back();
78 __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
80 size_t size = my_rand_r(&seed) % 1000 + 1;
81 switch ((my_rand_r(&seed) % 128)) {
82 case 0: size += 1024; break;
83 case 1: size += 2048; break;
84 case 2: size += 4096; break;
86 size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
87 char *ptr = (char*)__asan::asan_memalign(alignment, size,
88 &stack2, __asan::FROM_MALLOC);
89 EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, 0, 0));
96 for (size_t i = 0; i < vec.size(); i++)
97 __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
101 TEST(AddressSanitizer, NoInstMallocTest) {
102 MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000);
105 TEST(AddressSanitizer, ThreadedMallocStressTest) {
106 const int kNumThreads = 4;
107 const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
108 pthread_t t[kNumThreads];
109 for (int i = 0; i < kNumThreads; i++) {
110 PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress,
111 (void*)kNumIterations);
113 for (int i = 0; i < kNumThreads; i++) {
114 PTHREAD_JOIN(t[i], 0);
118 static void PrintShadow(const char *tag, uptr ptr, size_t size) {
119 fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
120 uptr prev_shadow = 0;
121 for (sptr i = -32; i < (sptr)size + 32; i++) {
122 uptr shadow = __asan::MemToShadow(ptr + i);
123 if (i == 0 || i == (sptr)size)
124 fprintf(stderr, ".");
125 if (shadow != prev_shadow) {
126 prev_shadow = shadow;
127 fprintf(stderr, "%02x", (int)*(u8*)shadow);
130 fprintf(stderr, "\n");
133 TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
134 for (size_t size = 1; size <= 513; size++) {
135 char *ptr = new char[size];
136 PrintShadow("m", (uptr)ptr, size);
138 PrintShadow("f", (uptr)ptr, size);
142 TEST(AddressSanitizer, QuarantineTest) {
143 BufferedStackTrace stack;
144 stack.trace_buffer[0] = 0x890;
147 const int size = 1024;
148 void *p = __asan::asan_malloc(size, &stack);
149 __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
151 size_t max_i = 1 << 30;
152 for (i = 0; i < max_i; i++) {
153 void *p1 = __asan::asan_malloc(size, &stack);
154 __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
157 EXPECT_GE(i, 10000U);
161 void *ThreadedQuarantineTestWorker(void *unused) {
163 u32 seed = my_rand();
164 BufferedStackTrace stack;
165 stack.trace_buffer[0] = 0x890;
168 for (size_t i = 0; i < 1000; i++) {
169 void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
170 __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
175 // Check that the thread local allocators are flushed when threads are
177 TEST(AddressSanitizer, ThreadedQuarantineTest) {
178 const int n_threads = 3000;
179 size_t mmaped1 = __sanitizer_get_heap_size();
180 for (int i = 0; i < n_threads; i++) {
182 PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
184 size_t mmaped2 = __sanitizer_get_heap_size();
185 EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
189 void *ThreadedOneSizeMallocStress(void *unused) {
191 BufferedStackTrace stack;
192 stack.trace_buffer[0] = 0x890;
194 const size_t kNumMallocs = 1000;
195 for (int iter = 0; iter < 1000; iter++) {
196 void *p[kNumMallocs];
197 for (size_t i = 0; i < kNumMallocs; i++) {
198 p[i] = __asan::asan_malloc(32, &stack);
200 for (size_t i = 0; i < kNumMallocs; i++) {
201 __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
207 TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
208 const int kNumThreads = 4;
209 pthread_t t[kNumThreads];
210 for (int i = 0; i < kNumThreads; i++) {
211 PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
213 for (int i = 0; i < kNumThreads; i++) {
214 PTHREAD_JOIN(t[i], 0);
218 TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) {
219 using __asan::kHighMemEnd;
220 // Check that __asan_region_is_poisoned works for shadow regions.
221 uptr ptr = kLowShadowBeg + 200;
222 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
223 ptr = kShadowGapBeg + 200;
224 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
225 ptr = kHighShadowBeg + 200;
226 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
229 // Test __asan_load1 & friends.
230 TEST(AddressSanitizer, LoadStoreCallbacks) {
231 typedef void (*CB)(uptr p);
234 __asan_load1, __asan_load2, __asan_load4, __asan_load8, __asan_load16,
236 __asan_store1, __asan_store2, __asan_store4, __asan_store8,
243 __asan_test_only_reported_buggy_pointer = &buggy_ptr;
244 BufferedStackTrace stack;
245 stack.trace_buffer[0] = 0x890;
248 for (uptr len = 16; len <= 32; len++) {
249 char *ptr = (char*) __asan::asan_malloc(len, &stack);
250 uptr p = reinterpret_cast<uptr>(ptr);
251 for (uptr is_write = 0; is_write <= 1; is_write++) {
252 for (uptr size_log = 0; size_log <= 4; size_log++) {
253 uptr size = 1 << size_log;
254 CB call = cb[is_write][size_log];
255 // Iterate only size-aligned offsets.
256 for (uptr offset = 0; offset <= len; offset += size) {
259 if (offset + size <= len)
260 EXPECT_EQ(buggy_ptr, 0U);
262 EXPECT_EQ(buggy_ptr, p + offset);
266 __asan::asan_free(ptr, &stack, __asan::FROM_MALLOC);
268 __asan_test_only_reported_buggy_pointer = 0;