1 //===-- sanitizer_allocator_test.cc ---------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Tests for sanitizer_allocator.h.
13 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_allocator.h"
15 #include "sanitizer_common/sanitizer_allocator_internal.h"
16 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_test_utils.h"
19 #include "sanitizer_pthread_wrappers.h"
21 #include "gtest/gtest.h"
30 using namespace __sanitizer;
32 // Too slow for debug build
35 #if SANITIZER_CAN_USE_ALLOCATOR64
37 // On Windows 64-bit there is no easy way to find a large enough fixed address
38 // space that is always available. Thus, a dynamically allocated address space
39 // is used instead (i.e. ~(uptr)0).
40 static const uptr kAllocatorSpace = ~(uptr)0;
41 static const uptr kAllocatorSize = 0x8000000000ULL; // 500G
42 static const u64 kAddressSpaceSize = 1ULL << 47;
43 typedef DefaultSizeClassMap SizeClassMap;
44 #elif SANITIZER_ANDROID && defined(__aarch64__)
45 static const uptr kAllocatorSpace = 0x3000000000ULL;
46 static const uptr kAllocatorSize = 0x2000000000ULL;
47 static const u64 kAddressSpaceSize = 1ULL << 39;
48 typedef VeryCompactSizeClassMap SizeClassMap;
50 static const uptr kAllocatorSpace = 0x700000000000ULL;
51 static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
52 static const u64 kAddressSpaceSize = 1ULL << 47;
53 typedef DefaultSizeClassMap SizeClassMap;
56 template <typename AddressSpaceViewTy>
57 struct AP64 { // Allocator Params. Short name for shorter demangled names..
58 static const uptr kSpaceBeg = kAllocatorSpace;
59 static const uptr kSpaceSize = kAllocatorSize;
60 static const uptr kMetadataSize = 16;
61 typedef ::SizeClassMap SizeClassMap;
62 typedef NoOpMapUnmapCallback MapUnmapCallback;
63 static const uptr kFlags = 0;
64 using AddressSpaceView = AddressSpaceViewTy;
67 template <typename AddressSpaceViewTy>
69 static const uptr kSpaceBeg = ~(uptr)0;
70 static const uptr kSpaceSize = kAllocatorSize;
71 static const uptr kMetadataSize = 16;
72 typedef ::SizeClassMap SizeClassMap;
73 typedef NoOpMapUnmapCallback MapUnmapCallback;
74 static const uptr kFlags = 0;
75 using AddressSpaceView = AddressSpaceViewTy;
78 template <typename AddressSpaceViewTy>
80 static const uptr kSpaceBeg = ~(uptr)0;
81 static const uptr kSpaceSize = kAllocatorSize;
82 static const uptr kMetadataSize = 16;
83 typedef CompactSizeClassMap SizeClassMap;
84 typedef NoOpMapUnmapCallback MapUnmapCallback;
85 static const uptr kFlags = 0;
86 using AddressSpaceView = AddressSpaceViewTy;
89 template <typename AddressSpaceViewTy>
90 struct AP64VeryCompact {
91 static const uptr kSpaceBeg = ~(uptr)0;
92 static const uptr kSpaceSize = 1ULL << 37;
93 static const uptr kMetadataSize = 16;
94 typedef VeryCompactSizeClassMap SizeClassMap;
95 typedef NoOpMapUnmapCallback MapUnmapCallback;
96 static const uptr kFlags = 0;
97 using AddressSpaceView = AddressSpaceViewTy;
100 template <typename AddressSpaceViewTy>
102 static const uptr kSpaceBeg = kAllocatorSpace;
103 static const uptr kSpaceSize = kAllocatorSize;
104 static const uptr kMetadataSize = 16;
105 typedef DenseSizeClassMap SizeClassMap;
106 typedef NoOpMapUnmapCallback MapUnmapCallback;
107 static const uptr kFlags = 0;
108 using AddressSpaceView = AddressSpaceViewTy;
111 template <typename AddressSpaceView>
112 using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
113 using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>;
115 template <typename AddressSpaceView>
116 using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>;
117 using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>;
119 template <typename AddressSpaceView>
120 using Allocator64CompactASVT =
121 SizeClassAllocator64<AP64Compact<AddressSpaceView>>;
122 using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>;
124 template <typename AddressSpaceView>
125 using Allocator64VeryCompactASVT =
126 SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>;
127 using Allocator64VeryCompact =
128 Allocator64VeryCompactASVT<LocalAddressSpaceView>;
130 template <typename AddressSpaceView>
131 using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>;
132 using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>;
134 #elif defined(__mips64)
135 static const u64 kAddressSpaceSize = 1ULL << 40;
136 #elif defined(__aarch64__)
137 static const u64 kAddressSpaceSize = 1ULL << 39;
138 #elif defined(__s390x__)
139 static const u64 kAddressSpaceSize = 1ULL << 53;
140 #elif defined(__s390__)
141 static const u64 kAddressSpaceSize = 1ULL << 31;
143 static const u64 kAddressSpaceSize = 1ULL << 32;
146 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
147 static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
149 template <typename AddressSpaceViewTy>
151 static const uptr kSpaceBeg = 0;
152 static const u64 kSpaceSize = kAddressSpaceSize;
153 static const uptr kMetadataSize = 16;
154 typedef CompactSizeClassMap SizeClassMap;
155 static const uptr kRegionSizeLog = ::kRegionSizeLog;
156 using AddressSpaceView = AddressSpaceViewTy;
157 using ByteMap = FlatByteMap<kFlatByteMapSize, AddressSpaceView>;
158 typedef NoOpMapUnmapCallback MapUnmapCallback;
159 static const uptr kFlags = 0;
161 template <typename AddressSpaceView>
162 using Allocator32CompactASVT =
163 SizeClassAllocator32<AP32Compact<AddressSpaceView>>;
164 using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>;
166 template <class SizeClassMap>
167 void TestSizeClassMap() {
168 typedef SizeClassMap SCMap;
173 TEST(SanitizerCommon, DefaultSizeClassMap) {
174 TestSizeClassMap<DefaultSizeClassMap>();
177 TEST(SanitizerCommon, CompactSizeClassMap) {
178 TestSizeClassMap<CompactSizeClassMap>();
181 TEST(SanitizerCommon, VeryCompactSizeClassMap) {
182 TestSizeClassMap<VeryCompactSizeClassMap>();
185 TEST(SanitizerCommon, InternalSizeClassMap) {
186 TestSizeClassMap<InternalSizeClassMap>();
189 TEST(SanitizerCommon, DenseSizeClassMap) {
190 TestSizeClassMap<VeryCompactSizeClassMap>();
193 template <class Allocator>
194 void TestSizeClassAllocator() {
195 Allocator *a = new Allocator;
196 a->Init(kReleaseToOSIntervalNever);
197 SizeClassAllocatorLocalCache<Allocator> cache;
198 memset(&cache, 0, sizeof(cache));
201 static const uptr sizes[] = {
202 1, 16, 30, 40, 100, 1000, 10000,
203 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000
206 std::vector<void *> allocated;
208 uptr last_total_allocated = 0;
209 for (int i = 0; i < 3; i++) {
210 // Allocate a bunch of chunks.
211 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
212 uptr size = sizes[s];
213 if (!a->CanAllocate(size, 1)) continue;
214 // printf("s = %ld\n", size);
215 uptr n_iter = std::max((uptr)6, 4000000 / size);
216 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
217 for (uptr i = 0; i < n_iter; i++) {
218 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
219 char *x = (char*)cache.Allocate(a, class_id0);
223 allocated.push_back(x);
224 CHECK_EQ(x, a->GetBlockBegin(x));
225 CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
226 CHECK(a->PointerIsMine(x));
227 CHECK(a->PointerIsMine(x + size - 1));
228 CHECK(a->PointerIsMine(x + size / 2));
229 CHECK_GE(a->GetActuallyAllocatedSize(x), size);
230 uptr class_id = a->GetSizeClass(x);
231 CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
232 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
233 metadata[0] = reinterpret_cast<uptr>(x) + 1;
234 metadata[1] = 0xABCD;
238 for (uptr i = 0; i < allocated.size(); i++) {
239 void *x = allocated[i];
240 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
241 CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
242 CHECK_EQ(metadata[1], 0xABCD);
243 cache.Deallocate(a, a->GetSizeClass(x), x);
246 uptr total_allocated = a->TotalMemoryUsed();
247 if (last_total_allocated == 0)
248 last_total_allocated = total_allocated;
249 CHECK_EQ(last_total_allocated, total_allocated);
252 // Check that GetBlockBegin never crashes.
253 for (uptr x = 0, step = kAddressSpaceSize / 100000;
254 x < kAddressSpaceSize - step; x += step)
255 if (a->PointerIsMine(reinterpret_cast<void *>(x)))
256 Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
262 #if SANITIZER_CAN_USE_ALLOCATOR64
263 // These tests can fail on Windows if memory is somewhat full and lit happens
264 // to run them all at the same time. FIXME: Make them not flaky and reenable.
265 #if !SANITIZER_WINDOWS
266 TEST(SanitizerCommon, SizeClassAllocator64) {
267 TestSizeClassAllocator<Allocator64>();
270 TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
271 TestSizeClassAllocator<Allocator64Dynamic>();
274 #if !SANITIZER_ANDROID
275 //FIXME(kostyak): find values so that those work on Android as well.
276 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
277 TestSizeClassAllocator<Allocator64Compact>();
280 TEST(SanitizerCommon, SizeClassAllocator64Dense) {
281 TestSizeClassAllocator<Allocator64Dense>();
285 TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
286 TestSizeClassAllocator<Allocator64VeryCompact>();
291 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
292 TestSizeClassAllocator<Allocator32Compact>();
295 template <typename AddressSpaceViewTy>
296 struct AP32SeparateBatches {
297 static const uptr kSpaceBeg = 0;
298 static const u64 kSpaceSize = kAddressSpaceSize;
299 static const uptr kMetadataSize = 16;
300 typedef DefaultSizeClassMap SizeClassMap;
301 static const uptr kRegionSizeLog = ::kRegionSizeLog;
302 using AddressSpaceView = AddressSpaceViewTy;
303 using ByteMap = FlatByteMap<kFlatByteMapSize, AddressSpaceView>;
304 typedef NoOpMapUnmapCallback MapUnmapCallback;
305 static const uptr kFlags =
306 SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
308 template <typename AddressSpaceView>
309 using Allocator32SeparateBatchesASVT =
310 SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>;
311 using Allocator32SeparateBatches =
312 Allocator32SeparateBatchesASVT<LocalAddressSpaceView>;
314 TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {
315 TestSizeClassAllocator<Allocator32SeparateBatches>();
318 template <class Allocator>
319 void SizeClassAllocatorMetadataStress() {
320 Allocator *a = new Allocator;
321 a->Init(kReleaseToOSIntervalNever);
322 SizeClassAllocatorLocalCache<Allocator> cache;
323 memset(&cache, 0, sizeof(cache));
326 const uptr kNumAllocs = 1 << 13;
327 void *allocated[kNumAllocs];
328 void *meta[kNumAllocs];
329 for (uptr i = 0; i < kNumAllocs; i++) {
330 void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
332 meta[i] = a->GetMetaData(x);
334 // Get Metadata kNumAllocs^2 times.
335 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
336 uptr idx = i % kNumAllocs;
337 void *m = a->GetMetaData(allocated[idx]);
338 EXPECT_EQ(m, meta[idx]);
340 for (uptr i = 0; i < kNumAllocs; i++) {
341 cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
348 #if SANITIZER_CAN_USE_ALLOCATOR64
349 // These tests can fail on Windows if memory is somewhat full and lit happens
350 // to run them all at the same time. FIXME: Make them not flaky and reenable.
351 #if !SANITIZER_WINDOWS
352 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
353 SizeClassAllocatorMetadataStress<Allocator64>();
356 TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
357 SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
360 #if !SANITIZER_ANDROID
361 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
362 SizeClassAllocatorMetadataStress<Allocator64Compact>();
367 #endif // SANITIZER_CAN_USE_ALLOCATOR64
368 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
369 SizeClassAllocatorMetadataStress<Allocator32Compact>();
372 template <class Allocator>
373 void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) {
374 Allocator *a = new Allocator;
375 a->Init(kReleaseToOSIntervalNever);
376 SizeClassAllocatorLocalCache<Allocator> cache;
377 memset(&cache, 0, sizeof(cache));
380 uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
381 uptr size = Allocator::SizeClassMapT::Size(max_size_class);
382 // Make sure we correctly compute GetBlockBegin() w/o overflow.
383 for (size_t i = 0; i <= TotalSize / size; i++) {
384 void *x = cache.Allocate(a, max_size_class);
385 void *beg = a->GetBlockBegin(x);
386 // if ((i & (i - 1)) == 0)
387 // fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
395 #if SANITIZER_CAN_USE_ALLOCATOR64
396 // These tests can fail on Windows if memory is somewhat full and lit happens
397 // to run them all at the same time. FIXME: Make them not flaky and reenable.
398 #if !SANITIZER_WINDOWS
399 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
400 SizeClassAllocatorGetBlockBeginStress<Allocator64>(
401 1ULL << (SANITIZER_ANDROID ? 31 : 33));
403 TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
404 SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
405 1ULL << (SANITIZER_ANDROID ? 31 : 33));
407 #if !SANITIZER_ANDROID
408 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
409 SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
412 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
413 // Does not have > 4Gb for each class.
414 SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
416 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
417 SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(1ULL << 33);
420 #endif // SANITIZER_CAN_USE_ALLOCATOR64
422 struct TestMapUnmapCallback {
423 static int map_count, unmap_count;
424 void OnMap(uptr p, uptr size) const { map_count++; }
425 void OnUnmap(uptr p, uptr size) const { unmap_count++; }
427 int TestMapUnmapCallback::map_count;
428 int TestMapUnmapCallback::unmap_count;
430 #if SANITIZER_CAN_USE_ALLOCATOR64
431 // These tests can fail on Windows if memory is somewhat full and lit happens
432 // to run them all at the same time. FIXME: Make them not flaky and reenable.
433 #if !SANITIZER_WINDOWS
435 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
436 struct AP64WithCallback {
437 static const uptr kSpaceBeg = kAllocatorSpace;
438 static const uptr kSpaceSize = kAllocatorSize;
439 static const uptr kMetadataSize = 16;
440 typedef ::SizeClassMap SizeClassMap;
441 typedef TestMapUnmapCallback MapUnmapCallback;
442 static const uptr kFlags = 0;
443 using AddressSpaceView = AddressSpaceViewTy;
446 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
447 TestMapUnmapCallback::map_count = 0;
448 TestMapUnmapCallback::unmap_count = 0;
449 typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack;
450 Allocator64WithCallBack *a = new Allocator64WithCallBack;
451 a->Init(kReleaseToOSIntervalNever);
452 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
453 SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
454 memset(&cache, 0, sizeof(cache));
456 AllocatorStats stats;
458 const size_t kNumChunks = 128;
459 uint32_t chunks[kNumChunks];
460 a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
461 // State + alloc + metadata + freearray.
462 EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
464 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
470 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
471 struct AP32WithCallback {
472 static const uptr kSpaceBeg = 0;
473 static const u64 kSpaceSize = kAddressSpaceSize;
474 static const uptr kMetadataSize = 16;
475 typedef CompactSizeClassMap SizeClassMap;
476 static const uptr kRegionSizeLog = ::kRegionSizeLog;
477 using AddressSpaceView = AddressSpaceViewTy;
478 using ByteMap = FlatByteMap<kFlatByteMapSize, AddressSpaceView>;
479 typedef TestMapUnmapCallback MapUnmapCallback;
480 static const uptr kFlags = 0;
483 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
484 TestMapUnmapCallback::map_count = 0;
485 TestMapUnmapCallback::unmap_count = 0;
486 typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack;
487 Allocator32WithCallBack *a = new Allocator32WithCallBack;
488 a->Init(kReleaseToOSIntervalNever);
489 EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
490 SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
491 memset(&cache, 0, sizeof(cache));
493 AllocatorStats stats;
495 a->AllocateBatch(&stats, &cache, 32);
496 EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
498 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
500 // fprintf(stderr, "Map: %d Unmap: %d\n",
501 // TestMapUnmapCallback::map_count,
502 // TestMapUnmapCallback::unmap_count);
505 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
506 TestMapUnmapCallback::map_count = 0;
507 TestMapUnmapCallback::unmap_count = 0;
508 LargeMmapAllocator<TestMapUnmapCallback> a;
510 AllocatorStats stats;
512 void *x = a.Allocate(&stats, 1 << 20, 1);
513 EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
514 a.Deallocate(&stats, x);
515 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
518 // Don't test OOM conditions on Win64 because it causes other tests on the same
520 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
521 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
523 a.Init(kReleaseToOSIntervalNever);
524 SizeClassAllocatorLocalCache<Allocator64> cache;
525 memset(&cache, 0, sizeof(cache));
527 AllocatorStats stats;
530 const size_t kNumChunks = 128;
531 uint32_t chunks[kNumChunks];
532 bool allocation_failed = false;
533 for (int i = 0; i < 1000000; i++) {
534 if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) {
535 allocation_failed = true;
539 EXPECT_EQ(allocation_failed, true);
545 TEST(SanitizerCommon, LargeMmapAllocator) {
546 LargeMmapAllocator<NoOpMapUnmapCallback> a;
548 AllocatorStats stats;
551 static const int kNumAllocs = 1000;
552 char *allocated[kNumAllocs];
553 static const uptr size = 4000;
555 for (int i = 0; i < kNumAllocs; i++) {
556 allocated[i] = (char *)a.Allocate(&stats, size, 1);
557 CHECK(a.PointerIsMine(allocated[i]));
560 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
561 for (int i = 0; i < kNumAllocs; i++) {
562 char *p = allocated[i];
563 CHECK(a.PointerIsMine(p));
564 a.Deallocate(&stats, p);
566 // Check that non left.
567 CHECK_EQ(a.TotalMemoryUsed(), 0);
569 // Allocate some more, also add metadata.
570 for (int i = 0; i < kNumAllocs; i++) {
571 char *x = (char *)a.Allocate(&stats, size, 1);
572 CHECK_GE(a.GetActuallyAllocatedSize(x), size);
573 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
577 for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
578 char *p = allocated[i % kNumAllocs];
579 CHECK(a.PointerIsMine(p));
580 CHECK(a.PointerIsMine(p + 2000));
582 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
583 // Deallocate all in reverse order.
584 for (int i = 0; i < kNumAllocs; i++) {
585 int idx = kNumAllocs - i - 1;
586 char *p = allocated[idx];
587 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
588 CHECK_EQ(*meta, idx);
589 CHECK(a.PointerIsMine(p));
590 a.Deallocate(&stats, p);
592 CHECK_EQ(a.TotalMemoryUsed(), 0);
594 // Test alignments. Test with 512MB alignment on x64 non-Windows machines.
595 // Windows doesn't overcommit, and many machines do not have 51.2GB of swap.
597 (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24);
598 for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
599 const uptr kNumAlignedAllocs = 100;
600 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
601 uptr size = ((i % 10) + 1) * 4096;
602 char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
603 CHECK_EQ(p, a.GetBlockBegin(p));
604 CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
605 CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
606 CHECK_EQ(0, (uptr)allocated[i] % alignment);
607 p[0] = p[size - 1] = 0;
609 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
610 a.Deallocate(&stats, allocated[i]);
614 // Regression test for boundary condition in GetBlockBegin().
615 uptr page_size = GetPageSizeCached();
616 char *p = (char *)a.Allocate(&stats, page_size, 1);
617 CHECK_EQ(p, a.GetBlockBegin(p));
618 CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
619 CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
620 a.Deallocate(&stats, p);
624 <class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
625 void TestCombinedAllocator() {
627 CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
629 Allocator *a = new Allocator;
630 a->Init(kReleaseToOSIntervalNever);
633 AllocatorCache cache;
634 memset(&cache, 0, sizeof(cache));
635 a->InitCache(&cache);
637 EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
638 EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
639 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
640 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
641 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
642 EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
644 const uptr kNumAllocs = 100000;
645 const uptr kNumIter = 10;
646 for (uptr iter = 0; iter < kNumIter; iter++) {
647 std::vector<void*> allocated;
648 for (uptr i = 0; i < kNumAllocs; i++) {
649 uptr size = (i % (1 << 14)) + 1;
651 size = 1 << (10 + (i % 14));
652 void *x = a->Allocate(&cache, size, 1);
653 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
656 allocated.push_back(x);
659 std::shuffle(allocated.begin(), allocated.end(), r);
661 // Test ForEachChunk(...)
663 std::set<void *> reported_chunks;
664 auto cb = [](uptr chunk, void *arg) {
665 auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg);
667 reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk));
668 // Check chunk is never reported more than once.
669 ASSERT_TRUE(pair.second);
671 a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks));
672 for (const auto &allocated_ptr : allocated) {
673 ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end());
677 for (uptr i = 0; i < kNumAllocs; i++) {
678 void *x = allocated[i];
679 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
681 CHECK(a->PointerIsMine(x));
683 a->Deallocate(&cache, x);
686 a->SwallowCache(&cache);
688 a->DestroyCache(&cache);
692 #if SANITIZER_CAN_USE_ALLOCATOR64
693 TEST(SanitizerCommon, CombinedAllocator64) {
694 TestCombinedAllocator<Allocator64,
695 LargeMmapAllocator<>,
696 SizeClassAllocatorLocalCache<Allocator64> > ();
699 TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
700 TestCombinedAllocator<Allocator64Dynamic,
701 LargeMmapAllocator<>,
702 SizeClassAllocatorLocalCache<Allocator64Dynamic> > ();
705 #if !SANITIZER_ANDROID
706 TEST(SanitizerCommon, CombinedAllocator64Compact) {
707 TestCombinedAllocator<Allocator64Compact,
708 LargeMmapAllocator<>,
709 SizeClassAllocatorLocalCache<Allocator64Compact> > ();
713 TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
714 TestCombinedAllocator<Allocator64VeryCompact,
715 LargeMmapAllocator<>,
716 SizeClassAllocatorLocalCache<Allocator64VeryCompact> > ();
720 TEST(SanitizerCommon, CombinedAllocator32Compact) {
721 TestCombinedAllocator<Allocator32Compact,
722 LargeMmapAllocator<>,
723 SizeClassAllocatorLocalCache<Allocator32Compact> > ();
726 template <class AllocatorCache>
727 void TestSizeClassAllocatorLocalCache() {
728 AllocatorCache cache;
729 typedef typename AllocatorCache::Allocator Allocator;
730 Allocator *a = new Allocator();
732 a->Init(kReleaseToOSIntervalNever);
733 memset(&cache, 0, sizeof(cache));
736 const uptr kNumAllocs = 10000;
737 const int kNumIter = 100;
738 uptr saved_total = 0;
739 for (int class_id = 1; class_id <= 5; class_id++) {
740 for (int it = 0; it < kNumIter; it++) {
741 void *allocated[kNumAllocs];
742 for (uptr i = 0; i < kNumAllocs; i++) {
743 allocated[i] = cache.Allocate(a, class_id);
745 for (uptr i = 0; i < kNumAllocs; i++) {
746 cache.Deallocate(a, class_id, allocated[i]);
749 uptr total_allocated = a->TotalMemoryUsed();
751 CHECK_EQ(saved_total, total_allocated);
752 saved_total = total_allocated;
760 #if SANITIZER_CAN_USE_ALLOCATOR64
761 // These tests can fail on Windows if memory is somewhat full and lit happens
762 // to run them all at the same time. FIXME: Make them not flaky and reenable.
763 #if !SANITIZER_WINDOWS
764 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
765 TestSizeClassAllocatorLocalCache<
766 SizeClassAllocatorLocalCache<Allocator64> >();
769 TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
770 TestSizeClassAllocatorLocalCache<
771 SizeClassAllocatorLocalCache<Allocator64Dynamic> >();
774 #if !SANITIZER_ANDROID
775 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
776 TestSizeClassAllocatorLocalCache<
777 SizeClassAllocatorLocalCache<Allocator64Compact> >();
780 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
781 TestSizeClassAllocatorLocalCache<
782 SizeClassAllocatorLocalCache<Allocator64VeryCompact> >();
787 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
788 TestSizeClassAllocatorLocalCache<
789 SizeClassAllocatorLocalCache<Allocator32Compact> >();
792 #if SANITIZER_CAN_USE_ALLOCATOR64
793 typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
794 static AllocatorCache static_allocator_cache;
796 void *AllocatorLeakTestWorker(void *arg) {
797 typedef AllocatorCache::Allocator Allocator;
798 Allocator *a = (Allocator*)(arg);
799 static_allocator_cache.Allocate(a, 10);
800 static_allocator_cache.Drain(a);
804 TEST(SanitizerCommon, AllocatorLeakTest) {
805 typedef AllocatorCache::Allocator Allocator;
807 a.Init(kReleaseToOSIntervalNever);
808 uptr total_used_memory = 0;
809 for (int i = 0; i < 100; i++) {
811 PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
814 total_used_memory = a.TotalMemoryUsed();
815 EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
821 // Struct which is allocated to pass info to new threads. The new thread frees
823 struct NewThreadParams {
824 AllocatorCache *thread_cache;
825 AllocatorCache::Allocator *allocator;
829 // Called in a new thread. Just frees its argument.
830 static void *DeallocNewThreadWorker(void *arg) {
831 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
832 params->thread_cache->Deallocate(params->allocator, params->class_id, params);
836 // The allocator cache is supposed to be POD and zero initialized. We should be
837 // able to call Deallocate on a zeroed cache, and it will self-initialize.
838 TEST(Allocator, AllocatorCacheDeallocNewThread) {
839 AllocatorCache::Allocator allocator;
840 allocator.Init(kReleaseToOSIntervalNever);
841 AllocatorCache main_cache;
842 AllocatorCache child_cache;
843 memset(&main_cache, 0, sizeof(main_cache));
844 memset(&child_cache, 0, sizeof(child_cache));
846 uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
847 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
848 main_cache.Allocate(&allocator, class_id));
849 params->thread_cache = &child_cache;
850 params->allocator = &allocator;
851 params->class_id = class_id;
853 PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
856 allocator.TestOnlyUnmap();
860 TEST(Allocator, Basic) {
861 char *p = (char*)InternalAlloc(10);
862 EXPECT_NE(p, (char*)0);
863 char *p2 = (char*)InternalAlloc(20);
864 EXPECT_NE(p2, (char*)0);
870 TEST(Allocator, Stress) {
871 const int kCount = 1000;
874 for (int i = 0; i < kCount; i++) {
875 uptr sz = my_rand_r(&rnd) % 1000;
876 char *p = (char*)InternalAlloc(sz);
877 EXPECT_NE(p, (char*)0);
880 for (int i = 0; i < kCount; i++) {
881 InternalFree(ptrs[i]);
885 TEST(Allocator, LargeAlloc) {
886 void *p = InternalAlloc(10 << 20);
890 TEST(Allocator, ScopedBuffer) {
891 const int kSize = 512;
893 InternalMmapVector<int> int_buf(kSize);
894 EXPECT_EQ((uptr)kSize, int_buf.size()); // NOLINT
896 InternalMmapVector<char> char_buf(kSize);
897 EXPECT_EQ((uptr)kSize, char_buf.size()); // NOLINT
898 internal_memset(char_buf.data(), 'c', kSize);
899 for (int i = 0; i < kSize; i++) {
900 EXPECT_EQ('c', char_buf[i]);
904 void IterationTestCallback(uptr chunk, void *arg) {
905 reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
908 template <class Allocator>
909 void TestSizeClassAllocatorIteration() {
910 Allocator *a = new Allocator;
911 a->Init(kReleaseToOSIntervalNever);
912 SizeClassAllocatorLocalCache<Allocator> cache;
913 memset(&cache, 0, sizeof(cache));
916 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
917 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
919 std::vector<void *> allocated;
921 // Allocate a bunch of chunks.
922 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
923 uptr size = sizes[s];
924 if (!a->CanAllocate(size, 1)) continue;
925 // printf("s = %ld\n", size);
926 uptr n_iter = std::max((uptr)6, 80000 / size);
927 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
928 for (uptr j = 0; j < n_iter; j++) {
929 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
930 void *x = cache.Allocate(a, class_id0);
931 allocated.push_back(x);
935 std::set<uptr> reported_chunks;
937 a->ForEachChunk(IterationTestCallback, &reported_chunks);
940 for (uptr i = 0; i < allocated.size(); i++) {
941 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
942 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
943 reported_chunks.end());
950 #if SANITIZER_CAN_USE_ALLOCATOR64
951 // These tests can fail on Windows if memory is somewhat full and lit happens
952 // to run them all at the same time. FIXME: Make them not flaky and reenable.
953 #if !SANITIZER_WINDOWS
954 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
955 TestSizeClassAllocatorIteration<Allocator64>();
957 TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
958 TestSizeClassAllocatorIteration<Allocator64Dynamic>();
963 TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
964 TestSizeClassAllocatorIteration<Allocator32Compact>();
967 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
968 LargeMmapAllocator<NoOpMapUnmapCallback> a;
970 AllocatorStats stats;
973 static const uptr kNumAllocs = 1000;
974 char *allocated[kNumAllocs];
975 static const uptr size = 40;
977 for (uptr i = 0; i < kNumAllocs; i++)
978 allocated[i] = (char *)a.Allocate(&stats, size, 1);
980 std::set<uptr> reported_chunks;
982 a.ForEachChunk(IterationTestCallback, &reported_chunks);
985 for (uptr i = 0; i < kNumAllocs; i++) {
986 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
987 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
988 reported_chunks.end());
990 for (uptr i = 0; i < kNumAllocs; i++)
991 a.Deallocate(&stats, allocated[i]);
994 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
995 LargeMmapAllocator<NoOpMapUnmapCallback> a;
997 AllocatorStats stats;
1000 static const uptr kNumAllocs = 1024;
1001 static const uptr kNumExpectedFalseLookups = 10000000;
1002 char *allocated[kNumAllocs];
1003 static const uptr size = 4096;
1005 for (uptr i = 0; i < kNumAllocs; i++) {
1006 allocated[i] = (char *)a.Allocate(&stats, size, 1);
1010 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
1011 // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
1012 char *p1 = allocated[i % kNumAllocs];
1013 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
1014 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
1015 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
1016 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
1019 for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
1020 void *p = reinterpret_cast<void *>(i % 1024);
1021 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1022 p = reinterpret_cast<void *>(~0L - (i % 1024));
1023 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
1027 for (uptr i = 0; i < kNumAllocs; i++)
1028 a.Deallocate(&stats, allocated[i]);
1032 // Don't test OOM conditions on Win64 because it causes other tests on the same
1034 #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
1035 typedef SizeClassMap<3, 4, 8, 63, 128, 16> SpecialSizeClassMap;
1036 template <typename AddressSpaceViewTy = LocalAddressSpaceView>
1037 struct AP64_SpecialSizeClassMap {
1038 static const uptr kSpaceBeg = kAllocatorSpace;
1039 static const uptr kSpaceSize = kAllocatorSize;
1040 static const uptr kMetadataSize = 0;
1041 typedef SpecialSizeClassMap SizeClassMap;
1042 typedef NoOpMapUnmapCallback MapUnmapCallback;
1043 static const uptr kFlags = 0;
1044 using AddressSpaceView = AddressSpaceViewTy;
1047 // Regression test for out-of-memory condition in PopulateFreeList().
1048 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
1049 // In a world where regions are small and chunks are huge...
1050 typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64;
1051 const uptr kRegionSize =
1052 kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
1053 SpecialAllocator64 *a = new SpecialAllocator64;
1054 a->Init(kReleaseToOSIntervalNever);
1055 SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
1056 memset(&cache, 0, sizeof(cache));
1059 // ...one man is on a mission to overflow a region with a series of
1060 // successive allocations.
1062 const uptr kClassID = 107;
1063 const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
1064 ASSERT_LT(2 * kAllocationSize, kRegionSize);
1065 ASSERT_GT(3 * kAllocationSize, kRegionSize);
1066 EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1067 EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
1068 EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);
1070 const uptr Class2 = 100;
1071 const uptr Size2 = SpecialSizeClassMap::Size(Class2);
1072 ASSERT_EQ(Size2 * 8, kRegionSize);
1074 for (int i = 0; i < 7; i++) {
1075 p[i] = (char*)cache.Allocate(a, Class2);
1076 EXPECT_NE(p[i], nullptr);
1077 fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
1078 p[i][Size2 - 1] = 42;
1079 if (i) ASSERT_LT(p[i - 1], p[i]);
1081 EXPECT_EQ(cache.Allocate(a, Class2), nullptr);
1082 cache.Deallocate(a, Class2, p[0]);
1084 ASSERT_EQ(p[6][Size2 - 1], 42);
1091 #if SANITIZER_CAN_USE_ALLOCATOR64
1093 class NoMemoryMapper {
1095 uptr last_request_buffer_size;
1097 NoMemoryMapper() : last_request_buffer_size(0) {}
1099 uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
1100 last_request_buffer_size = buffer_size;
1103 void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {}
1106 class RedZoneMemoryMapper {
1108 RedZoneMemoryMapper() {
1109 const auto page_size = GetPageSize();
1110 buffer = MmapOrDie(3ULL * page_size, "");
1111 MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size);
1112 MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size);
1114 ~RedZoneMemoryMapper() {
1115 UnmapOrDie(buffer, 3 * GetPageSize());
1118 uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
1119 const auto page_size = GetPageSize();
1120 CHECK_EQ(buffer_size, page_size);
1121 memset(reinterpret_cast<void*>(reinterpret_cast<uptr>(buffer) + page_size),
1123 return reinterpret_cast<uptr>(buffer) + page_size;
1125 void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {}
1131 TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) {
1132 NoMemoryMapper no_memory_mapper;
1133 typedef Allocator64::PackedCounterArray<NoMemoryMapper>
1134 NoMemoryPackedCounterArray;
1136 for (int i = 0; i < 64; i++) {
1137 // Various valid counter's max values packed into one word.
1138 NoMemoryPackedCounterArray counters_2n(1, 1ULL << i, &no_memory_mapper);
1139 EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1141 // Check the "all bit set" values too.
1142 NoMemoryPackedCounterArray counters_2n1_1(1, ~0ULL >> i, &no_memory_mapper);
1143 EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
1145 // Verify the packing ratio, the counter is expected to be packed into the
1146 // closest power of 2 bits.
1147 NoMemoryPackedCounterArray counters(64, 1ULL << i, &no_memory_mapper);
1148 EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1),
1149 no_memory_mapper.last_request_buffer_size);
1152 RedZoneMemoryMapper memory_mapper;
1153 typedef Allocator64::PackedCounterArray<RedZoneMemoryMapper>
1154 RedZonePackedCounterArray;
1155 // Go through 1, 2, 4, 8, .. 64 bits per counter.
1156 for (int i = 0; i < 7; i++) {
1157 // Make sure counters request one memory page for the buffer.
1158 const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i);
1159 RedZonePackedCounterArray counters(kNumCounters,
1160 1ULL << ((1 << i) - 1),
1163 for (u64 c = 1; c < kNumCounters - 1; c++) {
1164 ASSERT_EQ(0ULL, counters.Get(c));
1166 ASSERT_EQ(1ULL, counters.Get(c - 1));
1168 ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1));
1169 counters.Inc(kNumCounters - 1);
1172 counters.IncRange(0, kNumCounters - 1);
1173 for (u64 c = 0; c < kNumCounters; c++)
1174 ASSERT_EQ(2ULL, counters.Get(c));
1179 class RangeRecorder {
1181 std::string reported_pages;
1184 : page_size_scaled_log(
1185 Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
1186 last_page_reported(0) {}
1188 void ReleasePageRangeToOS(u32 from, u32 to) {
1189 from >>= page_size_scaled_log;
1190 to >>= page_size_scaled_log;
1191 ASSERT_LT(from, to);
1192 if (!reported_pages.empty())
1193 ASSERT_LT(last_page_reported, from);
1194 reported_pages.append(from - last_page_reported, '.');
1195 reported_pages.append(to - from, 'x');
1196 last_page_reported = to;
1199 const uptr page_size_scaled_log;
1200 u32 last_page_reported;
1203 TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {
1204 typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker;
1206 // 'x' denotes a page to be released, '.' denotes a page to be kept around.
1207 const char* test_cases[] = {
1213 "..............xxxxx",
1214 "xxxxxxxxxxxxxxxxxx.....",
1215 "......xxxxxxxx........",
1216 "xxx..........xxxxxxxxxxxxxxx",
1217 "......xxxx....xxxx........",
1218 "xxx..........xxxxxxxx....xxxxxxx",
1219 "x.x.x.x.x.x.x.x.x.x.x.x.",
1220 ".x.x.x.x.x.x.x.x.x.x.x.x",
1221 ".x.x.x.x.x.x.x.x.x.x.x.x.",
1222 "x.x.x.x.x.x.x.x.x.x.x.x.x",
1225 for (auto test_case : test_cases) {
1226 RangeRecorder range_recorder;
1227 RangeTracker tracker(&range_recorder);
1228 for (int i = 0; test_case[i] != 0; i++)
1229 tracker.NextPage(test_case[i] == 'x');
1231 // Strip trailing '.'-pages before comparing the results as they are not
1232 // going to be reported to range_recorder anyway.
1233 const char* last_x = strrchr(test_case, 'x');
1234 std::string expected(
1236 last_x == nullptr ? 0 : (last_x - test_case + 1));
1237 EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str());
1241 class ReleasedPagesTrackingMemoryMapper {
1243 std::set<u32> reported_pages;
1245 uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
1246 reported_pages.clear();
1247 return reinterpret_cast<uptr>(calloc(1, buffer_size));
1249 void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
1250 free(reinterpret_cast<void*>(buffer));
1253 void ReleasePageRangeToOS(u32 from, u32 to) {
1254 uptr page_size_scaled =
1255 GetPageSizeCached() >> Allocator64::kCompactPtrScale;
1256 for (u32 i = from; i < to; i += page_size_scaled)
1257 reported_pages.insert(i);
1261 template <class Allocator>
1262 void TestReleaseFreeMemoryToOS() {
1263 ReleasedPagesTrackingMemoryMapper memory_mapper;
1264 const uptr kAllocatedPagesCount = 1024;
1265 const uptr page_size = GetPageSizeCached();
1266 const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale;
1268 uint32_t rnd_state = 42;
1270 for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID;
1272 const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id);
1273 const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale;
1274 const uptr max_chunks =
1275 kAllocatedPagesCount * GetPageSizeCached() / chunk_size;
1277 // Generate the random free list.
1278 std::vector<u32> free_array;
1279 bool in_free_range = false;
1280 uptr current_range_end = 0;
1281 for (uptr i = 0; i < max_chunks; i++) {
1282 if (i == current_range_end) {
1283 in_free_range = (my_rand_r(&rnd_state) & 1U) == 1;
1284 current_range_end += my_rand_r(&rnd_state) % 100 + 1;
1287 free_array.push_back(i * chunk_size_scaled);
1289 if (free_array.empty())
1291 // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on
1292 // the list ordering.
1293 std::shuffle(free_array.begin(), free_array.end(), r);
1295 Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
1296 chunk_size, kAllocatedPagesCount,
1299 // Verify that there are no released pages touched by used chunks and all
1300 // ranges of free chunks big enough to contain the entire memory pages had
1301 // these pages released.
1302 uptr verified_released_pages = 0;
1303 std::set<u32> free_chunks(free_array.begin(), free_array.end());
1305 u32 current_chunk = 0;
1306 in_free_range = false;
1307 u32 current_free_range_start = 0;
1308 for (uptr i = 0; i <= max_chunks; i++) {
1309 bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end();
1311 if (is_free_chunk) {
1312 if (!in_free_range) {
1313 in_free_range = true;
1314 current_free_range_start = current_chunk;
1317 // Verify that this used chunk does not touch any released page.
1318 for (uptr i_page = current_chunk / page_size_scaled;
1319 i_page <= (current_chunk + chunk_size_scaled - 1) /
1322 bool page_released =
1323 memory_mapper.reported_pages.find(i_page * page_size_scaled) !=
1324 memory_mapper.reported_pages.end();
1325 ASSERT_EQ(false, page_released);
1328 if (in_free_range) {
1329 in_free_range = false;
1330 // Verify that all entire memory pages covered by this range of free
1331 // chunks were released.
1332 u32 page = RoundUpTo(current_free_range_start, page_size_scaled);
1333 while (page + page_size_scaled <= current_chunk) {
1334 bool page_released =
1335 memory_mapper.reported_pages.find(page) !=
1336 memory_mapper.reported_pages.end();
1337 ASSERT_EQ(true, page_released);
1338 verified_released_pages++;
1339 page += page_size_scaled;
1344 current_chunk += chunk_size_scaled;
1347 ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages);
1351 TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {
1352 TestReleaseFreeMemoryToOS<Allocator64>();
1355 #if !SANITIZER_ANDROID
1356 TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
1357 TestReleaseFreeMemoryToOS<Allocator64Compact>();
1360 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
1361 TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();
1363 #endif // !SANITIZER_ANDROID
1365 #endif // SANITIZER_CAN_USE_ALLOCATOR64
1367 TEST(SanitizerCommon, TwoLevelByteMap) {
1368 const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
1369 const u64 n = kSize1 * kSize2;
1370 TwoLevelByteMap<kSize1, kSize2> m;
1372 for (u64 i = 0; i < n; i += 7) {
1373 m.set(i, (i % 100) + 1);
1375 for (u64 j = 0; j < n; j++) {
1379 EXPECT_EQ(m[j], (j % 100) + 1);
1385 template <typename AddressSpaceView>
1386 using TestByteMapASVT =
1387 TwoLevelByteMap<1 << 12, 1 << 13, AddressSpaceView, TestMapUnmapCallback>;
1388 using TestByteMap = TestByteMapASVT<LocalAddressSpaceView>;
1390 struct TestByteMapParam {
1396 void *TwoLevelByteMapUserThread(void *param) {
1397 TestByteMapParam *p = (TestByteMapParam*)param;
1398 for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
1399 size_t val = (i % 100) + 1;
1401 EXPECT_EQ((*p->m)[i], val);
1406 TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
1409 TestMapUnmapCallback::map_count = 0;
1410 TestMapUnmapCallback::unmap_count = 0;
1411 static const int kNumThreads = 4;
1412 pthread_t t[kNumThreads];
1413 TestByteMapParam p[kNumThreads];
1414 for (int i = 0; i < kNumThreads; i++) {
1417 p[i].num_shards = kNumThreads;
1418 PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
1420 for (int i = 0; i < kNumThreads; i++) {
1421 PTHREAD_JOIN(t[i], 0);
1423 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
1424 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
1426 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
1427 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
1430 #endif // #if !SANITIZER_DEBUG