1 //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// Scudo Hardened Allocator implementation.
11 /// It uses the sanitizer_common allocator as a base and aims at mitigating
12 /// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13 /// header, a delayed free list, and additional sanity checks.
15 //===----------------------------------------------------------------------===//
17 #include "scudo_allocator.h"
18 #include "scudo_utils.h"
20 #include "sanitizer_common/sanitizer_allocator_interface.h"
21 #include "sanitizer_common/sanitizer_quarantine.h"
28 // Hardware CRC32 is supported at compilation via the following:
29 // - for i386 & x86_64: -msse4.2
30 // - for ARM & AArch64: -march=armv8-a+crc
31 // An additional check must be performed at runtime as well to make sure the
32 // emitted instructions are valid on the target host.
33 #if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
35 # include <smmintrin.h>
36 # define HW_CRC32 FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
38 # ifdef __ARM_FEATURE_CRC32
39 # include <arm_acle.h>
40 # define HW_CRC32 FIRST_32_SECOND_64(__crc32cw, __crc32cd)
46 #if SANITIZER_CAN_USE_ALLOCATOR64
47 const uptr AllocatorSpace = ~0ULL;
48 const uptr AllocatorSize = 0x40000000000ULL;
49 typedef DefaultSizeClassMap SizeClassMap;
51 static const uptr kSpaceBeg = AllocatorSpace;
52 static const uptr kSpaceSize = AllocatorSize;
53 static const uptr kMetadataSize = 0;
54 typedef __scudo::SizeClassMap SizeClassMap;
55 typedef NoOpMapUnmapCallback MapUnmapCallback;
56 static const uptr kFlags =
57 SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
59 typedef SizeClassAllocator64<AP> PrimaryAllocator;
61 // Currently, the 32-bit Sanitizer allocator has not yet benefited from all the
62 // security improvements brought to the 64-bit one. This makes the 32-bit
63 // version of Scudo slightly less toughened.
64 static const uptr RegionSizeLog = 20;
65 static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
66 # if SANITIZER_WORDSIZE == 32
67 typedef FlatByteMap<NumRegions> ByteMap;
68 # elif SANITIZER_WORDSIZE == 64
69 typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
70 # endif // SANITIZER_WORDSIZE
71 typedef DefaultSizeClassMap SizeClassMap;
72 typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap,
73 RegionSizeLog, ByteMap> PrimaryAllocator;
74 #endif // SANITIZER_CAN_USE_ALLOCATOR64
76 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
77 typedef ScudoLargeMmapAllocator SecondaryAllocator;
78 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
81 static ScudoAllocator &getAllocator();
83 static thread_local Xorshift128Plus Prng;
84 // Global static cookie, initialized at start-up.
91 // We default to software CRC32 if the alternatives are not supported, either
92 // at compilation or at runtime.
93 static atomic_uint8_t HashAlgorithm = { CRC32Software };
95 // Helper function that will compute the chunk checksum, being passed all the
96 // the needed information as uptrs. It will opt for the hardware version of
97 // the checksumming function if available.
98 INLINE u32 hashUptrs(uptr Pointer, uptr *Array, uptr ArraySize, u8 HashType) {
100 #if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
101 if (HashType == CRC32Hardware) {
102 Crc = HW_CRC32(Cookie, Pointer);
103 for (uptr i = 0; i < ArraySize; i++)
104 Crc = HW_CRC32(Crc, Array[i]);
108 Crc = computeCRC32(Cookie, Pointer);
109 for (uptr i = 0; i < ArraySize; i++)
110 Crc = computeCRC32(Crc, Array[i]);
114 struct ScudoChunk : UnpackedHeader {
115 // We can't use the offset member of the chunk itself, as we would double
116 // fetch it without any warranty that it wouldn't have been tampered. To
117 // prevent this, we work with a local copy of the header.
118 void *getAllocBeg(UnpackedHeader *Header) {
119 return reinterpret_cast<void *>(
120 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
123 // Returns the usable size for a chunk, meaning the amount of bytes from the
124 // beginning of the user data to the end of the backend allocated chunk.
125 uptr getUsableSize(UnpackedHeader *Header) {
126 uptr Size = getAllocator().GetActuallyAllocatedSize(getAllocBeg(Header));
129 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
132 // Compute the checksum of the Chunk pointer and its ChunkHeader.
133 u16 computeChecksum(UnpackedHeader *Header) const {
134 UnpackedHeader ZeroChecksumHeader = *Header;
135 ZeroChecksumHeader.Checksum = 0;
136 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
137 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
138 u32 Hash = hashUptrs(reinterpret_cast<uptr>(this),
140 ARRAY_SIZE(HeaderHolder),
141 atomic_load_relaxed(&HashAlgorithm));
142 return static_cast<u16>(Hash);
145 // Checks the validity of a chunk by verifying its checksum.
147 UnpackedHeader NewUnpackedHeader;
148 const AtomicPackedHeader *AtomicHeader =
149 reinterpret_cast<const AtomicPackedHeader *>(this);
150 PackedHeader NewPackedHeader =
151 AtomicHeader->load(std::memory_order_relaxed);
152 NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
153 return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
156 // Loads and unpacks the header, verifying the checksum in the process.
157 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
158 const AtomicPackedHeader *AtomicHeader =
159 reinterpret_cast<const AtomicPackedHeader *>(this);
160 PackedHeader NewPackedHeader =
161 AtomicHeader->load(std::memory_order_relaxed);
162 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
163 if (NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader)) {
164 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
168 // Packs and stores the header, computing the checksum in the process.
169 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
170 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
171 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
172 AtomicPackedHeader *AtomicHeader =
173 reinterpret_cast<AtomicPackedHeader *>(this);
174 AtomicHeader->store(NewPackedHeader, std::memory_order_relaxed);
177 // Packs and stores the header, computing the checksum in the process. We
178 // compare the current header with the expected provided one to ensure that
179 // we are not being raced by a corruption occurring in another thread.
180 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
181 UnpackedHeader *OldUnpackedHeader) {
182 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
183 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
184 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
185 AtomicPackedHeader *AtomicHeader =
186 reinterpret_cast<AtomicPackedHeader *>(this);
187 if (!AtomicHeader->compare_exchange_strong(OldPackedHeader,
189 std::memory_order_relaxed,
190 std::memory_order_relaxed)) {
191 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
196 static bool ScudoInitIsRunning = false;
198 static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
199 static pthread_key_t PThreadKey;
201 static thread_local bool ThreadInited = false;
202 static thread_local bool ThreadTornDown = false;
203 static thread_local AllocatorCache Cache;
205 static void teardownThread(void *p) {
206 uptr v = reinterpret_cast<uptr>(p);
207 // The glibc POSIX thread-local-storage deallocation routine calls user
208 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
209 // We want to be called last since other destructors might call free and the
210 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
211 // quarantine and swallowing the cache.
212 if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
213 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(v + 1));
217 getAllocator().DestroyCache(&Cache);
218 ThreadTornDown = true;
221 static void initInternal() {
222 SanitizerToolName = "Scudo";
223 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
224 ScudoInitIsRunning = true;
226 // Check is SSE4.2 is supported, if so, opt for the CRC32 hardware version.
227 if (testCPUFeature(CRC32CPUFeature)) {
228 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
233 AllocatorOptions Options;
234 Options.setFrom(getFlags(), common_flags());
235 initAllocator(Options);
237 MaybeStartBackgroudThread();
239 ScudoInitIsRunning = false;
242 static void initGlobal() {
243 pthread_key_create(&PThreadKey, teardownThread);
247 static void NOINLINE initThread() {
248 pthread_once(&GlobalInited, initGlobal);
249 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(1));
250 getAllocator().InitCache(&Cache);
254 struct QuarantineCallback {
255 explicit QuarantineCallback(AllocatorCache *Cache)
258 // Chunk recycling function, returns a quarantined chunk to the backend.
259 void Recycle(ScudoChunk *Chunk) {
260 UnpackedHeader Header;
261 Chunk->loadHeader(&Header);
262 if (Header.State != ChunkQuarantine) {
263 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
266 void *Ptr = Chunk->getAllocBeg(&Header);
267 getAllocator().Deallocate(Cache_, Ptr);
270 /// Internal quarantine allocation and deallocation functions.
271 void *Allocate(uptr Size) {
272 // The internal quarantine memory cannot be protected by us. But the only
273 // structures allocated are QuarantineBatch, that are 8KB for x64. So we
274 // will use mmap for those, and given that Deallocate doesn't pass a size
275 // in, we enforce the size of the allocation to be sizeof(QuarantineBatch).
276 // TODO(kostyak): switching to mmap impacts greatly performances, we have
277 // to find another solution
278 // CHECK_EQ(Size, sizeof(QuarantineBatch));
279 // return MmapOrDie(Size, "QuarantineBatch");
280 return getAllocator().Allocate(Cache_, Size, 1, false);
283 void Deallocate(void *Ptr) {
284 // UnmapOrDie(Ptr, sizeof(QuarantineBatch));
285 getAllocator().Deallocate(Cache_, Ptr);
288 AllocatorCache *Cache_;
291 typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
292 typedef ScudoQuarantine::Cache QuarantineCache;
293 static thread_local QuarantineCache ThreadQuarantineCache;
295 void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
296 MayReturnNull = cf->allocator_may_return_null;
297 ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
298 QuarantineSizeMb = f->QuarantineSizeMb;
299 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
300 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
301 DeleteSizeMismatch = f->DeleteSizeMismatch;
302 ZeroContents = f->ZeroContents;
305 void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
306 cf->allocator_may_return_null = MayReturnNull;
307 cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
308 f->QuarantineSizeMb = QuarantineSizeMb;
309 f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
310 f->DeallocationTypeMismatch = DeallocationTypeMismatch;
311 f->DeleteSizeMismatch = DeleteSizeMismatch;
312 f->ZeroContents = ZeroContents;
316 static const uptr MaxAllowedMallocSize =
317 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
319 ScudoAllocator BackendAllocator;
320 ScudoQuarantine AllocatorQuarantine;
322 // The fallback caches are used when the thread local caches have been
323 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
324 // be accessed by different threads.
325 StaticSpinMutex FallbackMutex;
326 AllocatorCache FallbackAllocatorCache;
327 QuarantineCache FallbackQuarantineCache;
329 bool DeallocationTypeMismatch;
331 bool DeleteSizeMismatch;
333 explicit Allocator(LinkerInitialized)
334 : AllocatorQuarantine(LINKER_INITIALIZED),
335 FallbackQuarantineCache(LINKER_INITIALIZED) {}
337 void init(const AllocatorOptions &Options) {
338 // Verify that the header offset field can hold the maximum offset. In the
339 // case of the Secondary allocator, it takes care of alignment and the
340 // offset will always be 0. In the case of the Primary, the worst case
341 // scenario happens in the last size class, when the backend allocation
342 // would already be aligned on the requested alignment, which would happen
343 // to be the maximum alignment that would fit in that size class. As a
344 // result, the maximum offset will be at most the maximum alignment for the
345 // last size class minus the header size, in multiples of MinAlignment.
346 UnpackedHeader Header = {};
347 uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(
348 SizeClassMap::kMaxSize - MinAlignment);
349 uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >>
351 Header.Offset = MaxOffset;
352 if (Header.Offset != MaxOffset) {
353 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
356 // Verify that we can fit the maximum amount of unused bytes in the header.
357 // Given that the Secondary fits the allocation to a page, the worst case
358 // scenario happens in the Primary. It will depend on the second to last
359 // and last class sizes, as well as the dynamic base for the Primary. The
360 // following is an over-approximation that works for our needs.
361 uptr MaxUnusedBytes = SizeClassMap::kMaxSize - 1 - AlignedChunkHeaderSize;
362 Header.UnusedBytes = MaxUnusedBytes;
363 if (Header.UnusedBytes != MaxUnusedBytes) {
364 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
368 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
369 DeleteSizeMismatch = Options.DeleteSizeMismatch;
370 ZeroContents = Options.ZeroContents;
371 BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
372 AllocatorQuarantine.Init(
373 static_cast<uptr>(Options.QuarantineSizeMb) << 20,
374 static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
375 BackendAllocator.InitCache(&FallbackAllocatorCache);
376 Cookie = Prng.Next();
379 // Helper function that checks for a valid Scudo chunk.
380 bool isValidPointer(const void *UserPtr) {
381 uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
382 if (!IsAligned(ChunkBeg, MinAlignment)) {
386 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
387 return Chunk->isValid();
390 // Allocates a chunk.
391 void *allocate(uptr Size, uptr Alignment, AllocType Type) {
392 if (UNLIKELY(!ThreadInited))
394 if (!IsPowerOfTwo(Alignment)) {
395 dieWithMessage("ERROR: alignment is not a power of 2\n");
397 if (Alignment > MaxAlignment)
398 return BackendAllocator.ReturnNullOrDieOnBadRequest();
399 if (Alignment < MinAlignment)
400 Alignment = MinAlignment;
403 if (Size >= MaxAllowedMallocSize)
404 return BackendAllocator.ReturnNullOrDieOnBadRequest();
406 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
407 if (Alignment > MinAlignment)
408 NeededSize += Alignment;
409 if (NeededSize >= MaxAllowedMallocSize)
410 return BackendAllocator.ReturnNullOrDieOnBadRequest();
412 // Primary backed and Secondary backed allocations have a different
413 // treatment. We deal with alignment requirements of Primary serviced
414 // allocations here, but the Secondary will take care of its own alignment
415 // needs, which means we also have to work around some limitations of the
416 // combined allocator to accommodate the situation.
417 bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
420 if (LIKELY(!ThreadTornDown)) {
421 Ptr = BackendAllocator.Allocate(&Cache, NeededSize,
422 FromPrimary ? MinAlignment : Alignment);
424 SpinMutexLock l(&FallbackMutex);
425 Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
426 FromPrimary ? MinAlignment : Alignment);
429 return BackendAllocator.ReturnNullOrDieOnOOM();
431 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
432 // If the allocation was serviced by the secondary, the returned pointer
433 // accounts for ChunkHeaderSize to pass the alignment check of the combined
434 // allocator. Adjust it here.
436 AllocBeg -= AlignedChunkHeaderSize;
437 if (Alignment > MinAlignment)
438 NeededSize -= Alignment;
441 uptr ActuallyAllocatedSize = BackendAllocator.GetActuallyAllocatedSize(
442 reinterpret_cast<void *>(AllocBeg));
443 // If requested, we will zero out the entire contents of the returned chunk.
444 if (ZeroContents && FromPrimary)
445 memset(Ptr, 0, ActuallyAllocatedSize);
447 uptr ChunkBeg = AllocBeg + AlignedChunkHeaderSize;
448 if (!IsAligned(ChunkBeg, Alignment))
449 ChunkBeg = RoundUpTo(ChunkBeg, Alignment);
450 CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize);
452 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
453 UnpackedHeader Header = {};
454 Header.State = ChunkAllocated;
455 uptr Offset = ChunkBeg - AlignedChunkHeaderSize - AllocBeg;
456 Header.Offset = Offset >> MinAlignmentLog;
457 Header.AllocType = Type;
458 Header.UnusedBytes = ActuallyAllocatedSize - Offset -
459 AlignedChunkHeaderSize - Size;
460 Header.Salt = static_cast<u8>(Prng.Next());
461 Chunk->storeHeader(&Header);
462 void *UserPtr = reinterpret_cast<void *>(ChunkBeg);
463 // TODO(kostyak): hooks sound like a terrible idea security wise but might
464 // be needed for things to work properly?
465 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
469 // Deallocates a Chunk, which means adding it to the delayed free list (or
471 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
472 if (UNLIKELY(!ThreadInited))
474 // TODO(kostyak): see hook comment above
475 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
478 uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
479 if (!IsAligned(ChunkBeg, MinAlignment)) {
480 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
481 "aligned at address %p\n", UserPtr);
484 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
485 UnpackedHeader OldHeader;
486 Chunk->loadHeader(&OldHeader);
487 if (OldHeader.State != ChunkAllocated) {
488 dieWithMessage("ERROR: invalid chunk state when deallocating address "
491 uptr UsableSize = Chunk->getUsableSize(&OldHeader);
492 UnpackedHeader NewHeader = OldHeader;
493 NewHeader.State = ChunkQuarantine;
494 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
495 if (DeallocationTypeMismatch) {
496 // The deallocation type has to match the allocation one.
497 if (NewHeader.AllocType != Type) {
498 // With the exception of memalign'd Chunks, that can be still be free'd.
499 if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) {
500 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
505 uptr Size = UsableSize - OldHeader.UnusedBytes;
506 if (DeleteSizeMismatch) {
507 if (DeleteSize && DeleteSize != Size) {
508 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
513 if (LIKELY(!ThreadTornDown)) {
514 AllocatorQuarantine.Put(&ThreadQuarantineCache,
515 QuarantineCallback(&Cache), Chunk, UsableSize);
517 SpinMutexLock l(&FallbackMutex);
518 AllocatorQuarantine.Put(&FallbackQuarantineCache,
519 QuarantineCallback(&FallbackAllocatorCache),
524 // Reallocates a chunk. We can save on a new allocation if the new requested
525 // size still fits in the chunk.
526 void *reallocate(void *OldPtr, uptr NewSize) {
527 if (UNLIKELY(!ThreadInited))
529 uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr);
531 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
532 UnpackedHeader OldHeader;
533 Chunk->loadHeader(&OldHeader);
534 if (OldHeader.State != ChunkAllocated) {
535 dieWithMessage("ERROR: invalid chunk state when reallocating address "
538 uptr Size = Chunk->getUsableSize(&OldHeader);
539 if (OldHeader.AllocType != FromMalloc) {
540 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
543 UnpackedHeader NewHeader = OldHeader;
544 // The new size still fits in the current chunk.
545 if (NewSize <= Size) {
546 NewHeader.UnusedBytes = Size - NewSize;
547 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
550 // Otherwise, we have to allocate a new chunk and copy the contents of the
552 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
554 uptr OldSize = Size - OldHeader.UnusedBytes;
555 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
556 NewHeader.State = ChunkQuarantine;
557 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
558 if (LIKELY(!ThreadTornDown)) {
559 AllocatorQuarantine.Put(&ThreadQuarantineCache,
560 QuarantineCallback(&Cache), Chunk, Size);
562 SpinMutexLock l(&FallbackMutex);
563 AllocatorQuarantine.Put(&FallbackQuarantineCache,
564 QuarantineCallback(&FallbackAllocatorCache),
571 // Helper function that returns the actual usable size of a chunk.
572 uptr getUsableSize(const void *Ptr) {
573 if (UNLIKELY(!ThreadInited))
577 uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
579 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
580 UnpackedHeader Header;
581 Chunk->loadHeader(&Header);
582 // Getting the usable size of a chunk only makes sense if it's allocated.
583 if (Header.State != ChunkAllocated) {
584 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
587 return Chunk->getUsableSize(&Header);
590 void *calloc(uptr NMemB, uptr Size) {
591 if (UNLIKELY(!ThreadInited))
593 uptr Total = NMemB * Size;
594 if (Size != 0 && Total / Size != NMemB) // Overflow check
595 return BackendAllocator.ReturnNullOrDieOnBadRequest();
596 void *Ptr = allocate(Total, MinAlignment, FromMalloc);
597 // If ZeroContents, the content of the chunk has already been zero'd out.
598 if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
599 memset(Ptr, 0, getUsableSize(Ptr));
603 void drainQuarantine() {
604 AllocatorQuarantine.Drain(&ThreadQuarantineCache,
605 QuarantineCallback(&Cache));
609 static Allocator Instance(LINKER_INITIALIZED);
611 static ScudoAllocator &getAllocator() {
612 return Instance.BackendAllocator;
615 void initAllocator(const AllocatorOptions &Options) {
616 Instance.init(Options);
619 void drainQuarantine() {
620 Instance.drainQuarantine();
623 void *scudoMalloc(uptr Size, AllocType Type) {
624 return Instance.allocate(Size, MinAlignment, Type);
627 void scudoFree(void *Ptr, AllocType Type) {
628 Instance.deallocate(Ptr, 0, Type);
631 void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
632 Instance.deallocate(Ptr, Size, Type);
635 void *scudoRealloc(void *Ptr, uptr Size) {
637 return Instance.allocate(Size, MinAlignment, FromMalloc);
639 Instance.deallocate(Ptr, 0, FromMalloc);
642 return Instance.reallocate(Ptr, Size);
645 void *scudoCalloc(uptr NMemB, uptr Size) {
646 return Instance.calloc(NMemB, Size);
649 void *scudoValloc(uptr Size) {
650 return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
653 void *scudoMemalign(uptr Alignment, uptr Size) {
654 return Instance.allocate(Size, Alignment, FromMemalign);
657 void *scudoPvalloc(uptr Size) {
658 uptr PageSize = GetPageSizeCached();
659 Size = RoundUpTo(Size, PageSize);
661 // pvalloc(0) should allocate one page.
664 return Instance.allocate(Size, PageSize, FromMemalign);
667 int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
668 *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
672 void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
673 // size must be a multiple of the alignment. To avoid a division, we first
674 // make sure that alignment is a power of 2.
675 CHECK(IsPowerOfTwo(Alignment));
676 CHECK_EQ((Size & (Alignment - 1)), 0);
677 return Instance.allocate(Size, Alignment, FromMalloc);
680 uptr scudoMallocUsableSize(void *Ptr) {
681 return Instance.getUsableSize(Ptr);
684 } // namespace __scudo
686 using namespace __scudo;
688 // MallocExtension helper functions
690 uptr __sanitizer_get_current_allocated_bytes() {
691 uptr stats[AllocatorStatCount];
692 getAllocator().GetStats(stats);
693 return stats[AllocatorStatAllocated];
696 uptr __sanitizer_get_heap_size() {
697 uptr stats[AllocatorStatCount];
698 getAllocator().GetStats(stats);
699 return stats[AllocatorStatMapped];
702 uptr __sanitizer_get_free_bytes() {
706 uptr __sanitizer_get_unmapped_bytes() {
710 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
714 int __sanitizer_get_ownership(const void *Ptr) {
715 return Instance.isValidPointer(Ptr);
718 uptr __sanitizer_get_allocated_size(const void *Ptr) {
719 return Instance.getUsableSize(Ptr);