1 //===-- sanitizer_allocator_test.cc ---------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Tests for sanitizer_allocator.h.
13 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_allocator.h"
15 #include "sanitizer_common/sanitizer_allocator_internal.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_test_utils.h"
20 #include "sanitizer_pthread_wrappers.h"
22 #include "gtest/gtest.h"
29 // Too slow for debug build
32 #if SANITIZER_WORDSIZE == 64
33 static const uptr kAllocatorSpace = 0x700000000000ULL;
34 static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
35 static const u64 kAddressSpaceSize = 1ULL << 47;
37 typedef SizeClassAllocator64<
38 kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
40 typedef SizeClassAllocator64<
41 kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
43 static const u64 kAddressSpaceSize = 1ULL << 32;
46 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
47 static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
49 typedef SizeClassAllocator32<
54 FlatByteMap<kFlatByteMapSize> >
57 template <class SizeClassMap>
58 void TestSizeClassMap() {
59 typedef SizeClassMap SCMap;
64 TEST(SanitizerCommon, DefaultSizeClassMap) {
65 TestSizeClassMap<DefaultSizeClassMap>();
68 TEST(SanitizerCommon, CompactSizeClassMap) {
69 TestSizeClassMap<CompactSizeClassMap>();
72 TEST(SanitizerCommon, InternalSizeClassMap) {
73 TestSizeClassMap<InternalSizeClassMap>();
76 template <class Allocator>
77 void TestSizeClassAllocator() {
78 Allocator *a = new Allocator;
80 SizeClassAllocatorLocalCache<Allocator> cache;
81 memset(&cache, 0, sizeof(cache));
84 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
85 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
87 std::vector<void *> allocated;
89 uptr last_total_allocated = 0;
90 for (int i = 0; i < 3; i++) {
91 // Allocate a bunch of chunks.
92 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
94 if (!a->CanAllocate(size, 1)) continue;
95 // printf("s = %ld\n", size);
96 uptr n_iter = std::max((uptr)6, 8000000 / size);
97 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
98 for (uptr i = 0; i < n_iter; i++) {
99 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
100 char *x = (char*)cache.Allocate(a, class_id0);
104 allocated.push_back(x);
105 CHECK_EQ(x, a->GetBlockBegin(x));
106 CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
107 CHECK(a->PointerIsMine(x));
108 CHECK(a->PointerIsMine(x + size - 1));
109 CHECK(a->PointerIsMine(x + size / 2));
110 CHECK_GE(a->GetActuallyAllocatedSize(x), size);
111 uptr class_id = a->GetSizeClass(x);
112 CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
113 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
114 metadata[0] = reinterpret_cast<uptr>(x) + 1;
115 metadata[1] = 0xABCD;
119 for (uptr i = 0; i < allocated.size(); i++) {
120 void *x = allocated[i];
121 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
122 CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
123 CHECK_EQ(metadata[1], 0xABCD);
124 cache.Deallocate(a, a->GetSizeClass(x), x);
127 uptr total_allocated = a->TotalMemoryUsed();
128 if (last_total_allocated == 0)
129 last_total_allocated = total_allocated;
130 CHECK_EQ(last_total_allocated, total_allocated);
133 // Check that GetBlockBegin never crashes.
134 for (uptr x = 0, step = kAddressSpaceSize / 100000;
135 x < kAddressSpaceSize - step; x += step)
136 if (a->PointerIsMine(reinterpret_cast<void *>(x)))
137 Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
143 #if SANITIZER_WORDSIZE == 64
144 TEST(SanitizerCommon, SizeClassAllocator64) {
145 TestSizeClassAllocator<Allocator64>();
148 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
149 TestSizeClassAllocator<Allocator64Compact>();
153 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
154 TestSizeClassAllocator<Allocator32Compact>();
157 template <class Allocator>
158 void SizeClassAllocatorMetadataStress() {
159 Allocator *a = new Allocator;
161 SizeClassAllocatorLocalCache<Allocator> cache;
162 memset(&cache, 0, sizeof(cache));
165 const uptr kNumAllocs = 1 << 13;
166 void *allocated[kNumAllocs];
167 void *meta[kNumAllocs];
168 for (uptr i = 0; i < kNumAllocs; i++) {
169 void *x = cache.Allocate(a, 1 + i % 50);
171 meta[i] = a->GetMetaData(x);
173 // Get Metadata kNumAllocs^2 times.
174 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
175 uptr idx = i % kNumAllocs;
176 void *m = a->GetMetaData(allocated[idx]);
177 EXPECT_EQ(m, meta[idx]);
179 for (uptr i = 0; i < kNumAllocs; i++) {
180 cache.Deallocate(a, 1 + i % 50, allocated[i]);
187 #if SANITIZER_WORDSIZE == 64
188 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
189 SizeClassAllocatorMetadataStress<Allocator64>();
192 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
193 SizeClassAllocatorMetadataStress<Allocator64Compact>();
195 #endif // SANITIZER_WORDSIZE == 64
196 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
197 SizeClassAllocatorMetadataStress<Allocator32Compact>();
200 template <class Allocator>
201 void SizeClassAllocatorGetBlockBeginStress() {
202 Allocator *a = new Allocator;
204 SizeClassAllocatorLocalCache<Allocator> cache;
205 memset(&cache, 0, sizeof(cache));
208 uptr max_size_class = Allocator::kNumClasses - 1;
209 uptr size = Allocator::SizeClassMapT::Size(max_size_class);
211 // Make sure we correctly compute GetBlockBegin() w/o overflow.
212 for (size_t i = 0; i <= G8 / size; i++) {
213 void *x = cache.Allocate(a, max_size_class);
214 void *beg = a->GetBlockBegin(x);
215 // if ((i & (i - 1)) == 0)
216 // fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
224 #if SANITIZER_WORDSIZE == 64
225 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
226 SizeClassAllocatorGetBlockBeginStress<Allocator64>();
228 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
229 SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
231 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
232 SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
234 #endif // SANITIZER_WORDSIZE == 64
236 struct TestMapUnmapCallback {
237 static int map_count, unmap_count;
238 void OnMap(uptr p, uptr size) const { map_count++; }
239 void OnUnmap(uptr p, uptr size) const { unmap_count++; }
241 int TestMapUnmapCallback::map_count;
242 int TestMapUnmapCallback::unmap_count;
244 #if SANITIZER_WORDSIZE == 64
245 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
246 TestMapUnmapCallback::map_count = 0;
247 TestMapUnmapCallback::unmap_count = 0;
248 typedef SizeClassAllocator64<
249 kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
250 TestMapUnmapCallback> Allocator64WithCallBack;
251 Allocator64WithCallBack *a = new Allocator64WithCallBack;
253 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
254 SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
255 memset(&cache, 0, sizeof(cache));
257 AllocatorStats stats;
259 a->AllocateBatch(&stats, &cache, 32);
260 EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc + metadata.
262 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
267 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
268 TestMapUnmapCallback::map_count = 0;
269 TestMapUnmapCallback::unmap_count = 0;
270 typedef SizeClassAllocator32<
271 0, kAddressSpaceSize,
275 FlatByteMap<kFlatByteMapSize>,
276 TestMapUnmapCallback>
277 Allocator32WithCallBack;
278 Allocator32WithCallBack *a = new Allocator32WithCallBack;
280 EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
281 SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
282 memset(&cache, 0, sizeof(cache));
284 AllocatorStats stats;
286 a->AllocateBatch(&stats, &cache, 32);
287 EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
289 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
291 // fprintf(stderr, "Map: %d Unmap: %d\n",
292 // TestMapUnmapCallback::map_count,
293 // TestMapUnmapCallback::unmap_count);
296 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
297 TestMapUnmapCallback::map_count = 0;
298 TestMapUnmapCallback::unmap_count = 0;
299 LargeMmapAllocator<TestMapUnmapCallback> a;
301 AllocatorStats stats;
303 void *x = a.Allocate(&stats, 1 << 20, 1);
304 EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
305 a.Deallocate(&stats, x);
306 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
309 template<class Allocator>
310 void FailInAssertionOnOOM() {
313 SizeClassAllocatorLocalCache<Allocator> cache;
314 memset(&cache, 0, sizeof(cache));
316 AllocatorStats stats;
318 for (int i = 0; i < 1000000; i++) {
319 a.AllocateBatch(&stats, &cache, 52);
325 #if SANITIZER_WORDSIZE == 64
326 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
327 EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
331 #if !defined(_WIN32) // FIXME: This currently fails on Windows.
332 TEST(SanitizerCommon, LargeMmapAllocator) {
333 LargeMmapAllocator<> a;
335 AllocatorStats stats;
338 static const int kNumAllocs = 1000;
339 char *allocated[kNumAllocs];
340 static const uptr size = 4000;
342 for (int i = 0; i < kNumAllocs; i++) {
343 allocated[i] = (char *)a.Allocate(&stats, size, 1);
344 CHECK(a.PointerIsMine(allocated[i]));
347 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
348 for (int i = 0; i < kNumAllocs; i++) {
349 char *p = allocated[i];
350 CHECK(a.PointerIsMine(p));
351 a.Deallocate(&stats, p);
353 // Check that non left.
354 CHECK_EQ(a.TotalMemoryUsed(), 0);
356 // Allocate some more, also add metadata.
357 for (int i = 0; i < kNumAllocs; i++) {
358 char *x = (char *)a.Allocate(&stats, size, 1);
359 CHECK_GE(a.GetActuallyAllocatedSize(x), size);
360 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
364 for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
365 char *p = allocated[i % kNumAllocs];
366 CHECK(a.PointerIsMine(p));
367 CHECK(a.PointerIsMine(p + 2000));
369 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
370 // Deallocate all in reverse order.
371 for (int i = 0; i < kNumAllocs; i++) {
372 int idx = kNumAllocs - i - 1;
373 char *p = allocated[idx];
374 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
375 CHECK_EQ(*meta, idx);
376 CHECK(a.PointerIsMine(p));
377 a.Deallocate(&stats, p);
379 CHECK_EQ(a.TotalMemoryUsed(), 0);
382 uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
383 for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
384 const uptr kNumAlignedAllocs = 100;
385 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
386 uptr size = ((i % 10) + 1) * 4096;
387 char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
388 CHECK_EQ(p, a.GetBlockBegin(p));
389 CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
390 CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
391 CHECK_EQ(0, (uptr)allocated[i] % alignment);
392 p[0] = p[size - 1] = 0;
394 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
395 a.Deallocate(&stats, allocated[i]);
399 // Regression test for boundary condition in GetBlockBegin().
400 uptr page_size = GetPageSizeCached();
401 char *p = (char *)a.Allocate(&stats, page_size, 1);
402 CHECK_EQ(p, a.GetBlockBegin(p));
403 CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
404 CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
405 a.Deallocate(&stats, p);
410 <class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
411 void TestCombinedAllocator() {
413 CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
415 Allocator *a = new Allocator;
418 AllocatorCache cache;
419 memset(&cache, 0, sizeof(cache));
420 a->InitCache(&cache);
422 bool allocator_may_return_null = common_flags()->allocator_may_return_null;
423 common_flags()->allocator_may_return_null = true;
424 EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
425 EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
426 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
427 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
428 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
430 common_flags()->allocator_may_return_null = false;
431 EXPECT_DEATH(a->Allocate(&cache, -1, 1),
432 "allocator is terminating the process");
433 // Restore the original value.
434 common_flags()->allocator_may_return_null = allocator_may_return_null;
436 const uptr kNumAllocs = 100000;
437 const uptr kNumIter = 10;
438 for (uptr iter = 0; iter < kNumIter; iter++) {
439 std::vector<void*> allocated;
440 for (uptr i = 0; i < kNumAllocs; i++) {
441 uptr size = (i % (1 << 14)) + 1;
443 size = 1 << (10 + (i % 14));
444 void *x = a->Allocate(&cache, size, 1);
445 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
448 allocated.push_back(x);
451 random_shuffle(allocated.begin(), allocated.end());
453 for (uptr i = 0; i < kNumAllocs; i++) {
454 void *x = allocated[i];
455 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
457 CHECK(a->PointerIsMine(x));
459 a->Deallocate(&cache, x);
462 a->SwallowCache(&cache);
464 a->DestroyCache(&cache);
468 #if SANITIZER_WORDSIZE == 64
469 TEST(SanitizerCommon, CombinedAllocator64) {
470 TestCombinedAllocator<Allocator64,
471 LargeMmapAllocator<>,
472 SizeClassAllocatorLocalCache<Allocator64> > ();
475 TEST(SanitizerCommon, CombinedAllocator64Compact) {
476 TestCombinedAllocator<Allocator64Compact,
477 LargeMmapAllocator<>,
478 SizeClassAllocatorLocalCache<Allocator64Compact> > ();
482 #if !defined(_WIN32) // FIXME: This currently fails on Windows.
483 TEST(SanitizerCommon, CombinedAllocator32Compact) {
484 TestCombinedAllocator<Allocator32Compact,
485 LargeMmapAllocator<>,
486 SizeClassAllocatorLocalCache<Allocator32Compact> > ();
490 template <class AllocatorCache>
491 void TestSizeClassAllocatorLocalCache() {
492 AllocatorCache cache;
493 typedef typename AllocatorCache::Allocator Allocator;
494 Allocator *a = new Allocator();
497 memset(&cache, 0, sizeof(cache));
500 const uptr kNumAllocs = 10000;
501 const int kNumIter = 100;
502 uptr saved_total = 0;
503 for (int class_id = 1; class_id <= 5; class_id++) {
504 for (int it = 0; it < kNumIter; it++) {
505 void *allocated[kNumAllocs];
506 for (uptr i = 0; i < kNumAllocs; i++) {
507 allocated[i] = cache.Allocate(a, class_id);
509 for (uptr i = 0; i < kNumAllocs; i++) {
510 cache.Deallocate(a, class_id, allocated[i]);
513 uptr total_allocated = a->TotalMemoryUsed();
515 CHECK_EQ(saved_total, total_allocated);
516 saved_total = total_allocated;
524 #if SANITIZER_WORDSIZE == 64
525 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
526 TestSizeClassAllocatorLocalCache<
527 SizeClassAllocatorLocalCache<Allocator64> >();
530 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
531 TestSizeClassAllocatorLocalCache<
532 SizeClassAllocatorLocalCache<Allocator64Compact> >();
536 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
537 TestSizeClassAllocatorLocalCache<
538 SizeClassAllocatorLocalCache<Allocator32Compact> >();
541 #if SANITIZER_WORDSIZE == 64
542 typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
543 static AllocatorCache static_allocator_cache;
545 void *AllocatorLeakTestWorker(void *arg) {
546 typedef AllocatorCache::Allocator Allocator;
547 Allocator *a = (Allocator*)(arg);
548 static_allocator_cache.Allocate(a, 10);
549 static_allocator_cache.Drain(a);
553 TEST(SanitizerCommon, AllocatorLeakTest) {
554 typedef AllocatorCache::Allocator Allocator;
557 uptr total_used_memory = 0;
558 for (int i = 0; i < 100; i++) {
560 PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
563 total_used_memory = a.TotalMemoryUsed();
564 EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
570 // Struct which is allocated to pass info to new threads. The new thread frees
572 struct NewThreadParams {
573 AllocatorCache *thread_cache;
574 AllocatorCache::Allocator *allocator;
578 // Called in a new thread. Just frees its argument.
579 static void *DeallocNewThreadWorker(void *arg) {
580 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
581 params->thread_cache->Deallocate(params->allocator, params->class_id, params);
585 // The allocator cache is supposed to be POD and zero initialized. We should be
586 // able to call Deallocate on a zeroed cache, and it will self-initialize.
587 TEST(Allocator, AllocatorCacheDeallocNewThread) {
588 AllocatorCache::Allocator allocator;
590 AllocatorCache main_cache;
591 AllocatorCache child_cache;
592 memset(&main_cache, 0, sizeof(main_cache));
593 memset(&child_cache, 0, sizeof(child_cache));
595 uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
596 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
597 main_cache.Allocate(&allocator, class_id));
598 params->thread_cache = &child_cache;
599 params->allocator = &allocator;
600 params->class_id = class_id;
602 PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
607 TEST(Allocator, Basic) {
608 char *p = (char*)InternalAlloc(10);
609 EXPECT_NE(p, (char*)0);
610 char *p2 = (char*)InternalAlloc(20);
611 EXPECT_NE(p2, (char*)0);
617 TEST(Allocator, Stress) {
618 const int kCount = 1000;
621 for (int i = 0; i < kCount; i++) {
622 uptr sz = my_rand_r(&rnd) % 1000;
623 char *p = (char*)InternalAlloc(sz);
624 EXPECT_NE(p, (char*)0);
627 for (int i = 0; i < kCount; i++) {
628 InternalFree(ptrs[i]);
632 TEST(Allocator, LargeAlloc) {
633 void *p = InternalAlloc(10 << 20);
637 TEST(Allocator, ScopedBuffer) {
638 const int kSize = 512;
640 InternalScopedBuffer<int> int_buf(kSize);
641 EXPECT_EQ(sizeof(int) * kSize, int_buf.size()); // NOLINT
643 InternalScopedBuffer<char> char_buf(kSize);
644 EXPECT_EQ(sizeof(char) * kSize, char_buf.size()); // NOLINT
645 internal_memset(char_buf.data(), 'c', kSize);
646 for (int i = 0; i < kSize; i++) {
647 EXPECT_EQ('c', char_buf[i]);
651 void IterationTestCallback(uptr chunk, void *arg) {
652 reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
655 template <class Allocator>
656 void TestSizeClassAllocatorIteration() {
657 Allocator *a = new Allocator;
659 SizeClassAllocatorLocalCache<Allocator> cache;
660 memset(&cache, 0, sizeof(cache));
663 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
664 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
666 std::vector<void *> allocated;
668 // Allocate a bunch of chunks.
669 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
670 uptr size = sizes[s];
671 if (!a->CanAllocate(size, 1)) continue;
672 // printf("s = %ld\n", size);
673 uptr n_iter = std::max((uptr)6, 80000 / size);
674 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
675 for (uptr j = 0; j < n_iter; j++) {
676 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
677 void *x = cache.Allocate(a, class_id0);
678 allocated.push_back(x);
682 std::set<uptr> reported_chunks;
684 a->ForEachChunk(IterationTestCallback, &reported_chunks);
687 for (uptr i = 0; i < allocated.size(); i++) {
688 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
689 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
690 reported_chunks.end());
697 #if SANITIZER_WORDSIZE == 64
698 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
699 TestSizeClassAllocatorIteration<Allocator64>();
703 TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
704 TestSizeClassAllocatorIteration<Allocator32Compact>();
707 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
708 LargeMmapAllocator<> a;
710 AllocatorStats stats;
713 static const uptr kNumAllocs = 1000;
714 char *allocated[kNumAllocs];
715 static const uptr size = 40;
717 for (uptr i = 0; i < kNumAllocs; i++)
718 allocated[i] = (char *)a.Allocate(&stats, size, 1);
720 std::set<uptr> reported_chunks;
722 a.ForEachChunk(IterationTestCallback, &reported_chunks);
725 for (uptr i = 0; i < kNumAllocs; i++) {
726 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
727 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
728 reported_chunks.end());
730 for (uptr i = 0; i < kNumAllocs; i++)
731 a.Deallocate(&stats, allocated[i]);
734 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
735 LargeMmapAllocator<> a;
737 AllocatorStats stats;
740 static const uptr kNumAllocs = 1024;
741 static const uptr kNumExpectedFalseLookups = 10000000;
742 char *allocated[kNumAllocs];
743 static const uptr size = 4096;
745 for (uptr i = 0; i < kNumAllocs; i++) {
746 allocated[i] = (char *)a.Allocate(&stats, size, 1);
750 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
751 // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
752 char *p1 = allocated[i % kNumAllocs];
753 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
754 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
755 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
756 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
759 for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
760 void *p = reinterpret_cast<void *>(i % 1024);
761 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
762 p = reinterpret_cast<void *>(~0L - (i % 1024));
763 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
767 for (uptr i = 0; i < kNumAllocs; i++)
768 a.Deallocate(&stats, allocated[i]);
772 #if SANITIZER_WORDSIZE == 64
773 // Regression test for out-of-memory condition in PopulateFreeList().
774 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
775 // In a world where regions are small and chunks are huge...
776 typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
777 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
778 SpecialSizeClassMap> SpecialAllocator64;
779 const uptr kRegionSize =
780 kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
781 SpecialAllocator64 *a = new SpecialAllocator64;
783 SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
784 memset(&cache, 0, sizeof(cache));
787 // ...one man is on a mission to overflow a region with a series of
788 // successive allocations.
789 const uptr kClassID = 107;
790 const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
791 ASSERT_LT(2 * kAllocationSize, kRegionSize);
792 ASSERT_GT(3 * kAllocationSize, kRegionSize);
793 cache.Allocate(a, kClassID);
794 EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
795 "The process has exhausted");
801 TEST(SanitizerCommon, TwoLevelByteMap) {
802 const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
803 const u64 n = kSize1 * kSize2;
804 TwoLevelByteMap<kSize1, kSize2> m;
806 for (u64 i = 0; i < n; i += 7) {
807 m.set(i, (i % 100) + 1);
809 for (u64 j = 0; j < n; j++) {
813 EXPECT_EQ(m[j], (j % 100) + 1);
820 typedef TwoLevelByteMap<1 << 12, 1 << 13, TestMapUnmapCallback> TestByteMap;
822 struct TestByteMapParam {
828 void *TwoLevelByteMapUserThread(void *param) {
829 TestByteMapParam *p = (TestByteMapParam*)param;
830 for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
831 size_t val = (i % 100) + 1;
833 EXPECT_EQ((*p->m)[i], val);
838 TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
841 TestMapUnmapCallback::map_count = 0;
842 TestMapUnmapCallback::unmap_count = 0;
843 static const int kNumThreads = 4;
844 pthread_t t[kNumThreads];
845 TestByteMapParam p[kNumThreads];
846 for (int i = 0; i < kNumThreads; i++) {
849 p[i].num_shards = kNumThreads;
850 PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
852 for (int i = 0; i < kNumThreads; i++) {
853 PTHREAD_JOIN(t[i], 0);
855 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
856 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
858 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
859 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
862 #endif // #if TSAN_DEBUG==0