1 //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// Scudo Hardened Allocator implementation.
11 /// It uses the sanitizer_common allocator as a base and aims at mitigating
12 /// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13 /// header, a delayed free list, and additional sanity checks.
15 //===----------------------------------------------------------------------===//
17 #include "scudo_allocator.h"
18 #include "scudo_crc32.h"
19 #include "scudo_tls.h"
20 #include "scudo_utils.h"
22 #include "sanitizer_common/sanitizer_allocator_checks.h"
23 #include "sanitizer_common/sanitizer_allocator_interface.h"
24 #include "sanitizer_common/sanitizer_errno.h"
25 #include "sanitizer_common/sanitizer_quarantine.h"
31 // Global static cookie, initialized at start-up.
34 // We default to software CRC32 if the alternatives are not supported, either
35 // at compilation or at runtime.
36 static atomic_uint8_t HashAlgorithm = { CRC32Software };
38 INLINE u32 computeCRC32(uptr Crc, uptr Value, uptr *Array, uptr ArraySize) {
39 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
40 // as opposed to only for scudo_crc32.cpp. This means that other hardware
41 // specific instructions were likely emitted at other places, and as a
42 // result there is no reason to not use it here.
43 #if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
44 Crc = CRC32_INTRINSIC(Crc, Value);
45 for (uptr i = 0; i < ArraySize; i++)
46 Crc = CRC32_INTRINSIC(Crc, Array[i]);
49 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
50 Crc = computeHardwareCRC32(Crc, Value);
51 for (uptr i = 0; i < ArraySize; i++)
52 Crc = computeHardwareCRC32(Crc, Array[i]);
55 Crc = computeSoftwareCRC32(Crc, Value);
56 for (uptr i = 0; i < ArraySize; i++)
57 Crc = computeSoftwareCRC32(Crc, Array[i]);
59 #endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
62 static ScudoBackendAllocator &getBackendAllocator();
64 struct ScudoChunk : UnpackedHeader {
65 // We can't use the offset member of the chunk itself, as we would double
66 // fetch it without any warranty that it wouldn't have been tampered. To
67 // prevent this, we work with a local copy of the header.
68 void *getAllocBeg(UnpackedHeader *Header) {
69 return reinterpret_cast<void *>(
70 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
73 // Returns the usable size for a chunk, meaning the amount of bytes from the
74 // beginning of the user data to the end of the backend allocated chunk.
75 uptr getUsableSize(UnpackedHeader *Header) {
77 getBackendAllocator().getActuallyAllocatedSize(getAllocBeg(Header),
81 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
84 // Compute the checksum of the Chunk pointer and its ChunkHeader.
85 u16 computeChecksum(UnpackedHeader *Header) const {
86 UnpackedHeader ZeroChecksumHeader = *Header;
87 ZeroChecksumHeader.Checksum = 0;
88 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
89 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
90 u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(this), HeaderHolder,
91 ARRAY_SIZE(HeaderHolder));
92 return static_cast<u16>(Crc);
95 // Checks the validity of a chunk by verifying its checksum. It doesn't
96 // incur termination in the event of an invalid chunk.
98 UnpackedHeader NewUnpackedHeader;
99 const AtomicPackedHeader *AtomicHeader =
100 reinterpret_cast<const AtomicPackedHeader *>(this);
101 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
102 NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
103 return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
106 // Nulls out a chunk header. When returning the chunk to the backend, there
107 // is no need to store a valid ChunkAvailable header, as this would be
108 // computationally expensive. Zeroing out serves the same purpose by making
109 // the header invalid. In the extremely rare event where 0 would be a valid
110 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
111 COMPILER_CHECK(ChunkAvailable == 0);
113 PackedHeader NullPackedHeader = 0;
114 AtomicPackedHeader *AtomicHeader =
115 reinterpret_cast<AtomicPackedHeader *>(this);
116 atomic_store_relaxed(AtomicHeader, NullPackedHeader);
119 // Loads and unpacks the header, verifying the checksum in the process.
120 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
121 const AtomicPackedHeader *AtomicHeader =
122 reinterpret_cast<const AtomicPackedHeader *>(this);
123 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
124 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
125 if (UNLIKELY(NewUnpackedHeader->Checksum !=
126 computeChecksum(NewUnpackedHeader))) {
127 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
131 // Packs and stores the header, computing the checksum in the process.
132 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
133 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
134 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
135 AtomicPackedHeader *AtomicHeader =
136 reinterpret_cast<AtomicPackedHeader *>(this);
137 atomic_store_relaxed(AtomicHeader, NewPackedHeader);
140 // Packs and stores the header, computing the checksum in the process. We
141 // compare the current header with the expected provided one to ensure that
142 // we are not being raced by a corruption occurring in another thread.
143 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
144 UnpackedHeader *OldUnpackedHeader) {
145 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
146 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
147 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
148 AtomicPackedHeader *AtomicHeader =
149 reinterpret_cast<AtomicPackedHeader *>(this);
150 if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader,
153 memory_order_relaxed))) {
154 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
159 ScudoChunk *getScudoChunk(uptr UserBeg) {
160 return reinterpret_cast<ScudoChunk *>(UserBeg - AlignedChunkHeaderSize);
163 struct AllocatorOptions {
164 u32 QuarantineSizeMb;
165 u32 ThreadLocalQuarantineSizeKb;
167 s32 ReleaseToOSIntervalMs;
168 bool DeallocationTypeMismatch;
169 bool DeleteSizeMismatch;
172 void setFrom(const Flags *f, const CommonFlags *cf);
173 void copyTo(Flags *f, CommonFlags *cf) const;
176 void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
177 MayReturnNull = cf->allocator_may_return_null;
178 ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
179 QuarantineSizeMb = f->QuarantineSizeMb;
180 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
181 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
182 DeleteSizeMismatch = f->DeleteSizeMismatch;
183 ZeroContents = f->ZeroContents;
186 void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
187 cf->allocator_may_return_null = MayReturnNull;
188 cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
189 f->QuarantineSizeMb = QuarantineSizeMb;
190 f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
191 f->DeallocationTypeMismatch = DeallocationTypeMismatch;
192 f->DeleteSizeMismatch = DeleteSizeMismatch;
193 f->ZeroContents = ZeroContents;
196 static void initScudoInternal(const AllocatorOptions &Options);
198 static bool ScudoInitIsRunning = false;
201 SanitizerToolName = "Scudo";
202 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
203 ScudoInitIsRunning = true;
205 // Check if hardware CRC32 is supported in the binary and by the platform, if
206 // so, opt for the CRC32 hardware version of the checksum.
207 if (computeHardwareCRC32 && testCPUFeature(CRC32CPUFeature))
208 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
212 AllocatorOptions Options;
213 Options.setFrom(getFlags(), common_flags());
214 initScudoInternal(Options);
216 // TODO(kostyak): determine if MaybeStartBackgroudThread could be of some use.
218 ScudoInitIsRunning = false;
221 struct QuarantineCallback {
222 explicit QuarantineCallback(AllocatorCache *Cache)
225 // Chunk recycling function, returns a quarantined chunk to the backend,
226 // first making sure it hasn't been tampered with.
227 void Recycle(ScudoChunk *Chunk) {
228 UnpackedHeader Header;
229 Chunk->loadHeader(&Header);
230 if (UNLIKELY(Header.State != ChunkQuarantine)) {
231 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
234 Chunk->eraseHeader();
235 void *Ptr = Chunk->getAllocBeg(&Header);
236 if (Header.FromPrimary)
237 getBackendAllocator().deallocatePrimary(Cache_, Ptr);
239 getBackendAllocator().deallocateSecondary(Ptr);
242 // Internal quarantine allocation and deallocation functions. We first check
243 // that the batches are indeed serviced by the Primary.
244 // TODO(kostyak): figure out the best way to protect the batches.
245 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
246 void *Allocate(uptr Size) {
247 return getBackendAllocator().allocatePrimary(Cache_, Size);
250 void Deallocate(void *Ptr) {
251 getBackendAllocator().deallocatePrimary(Cache_, Ptr);
254 AllocatorCache *Cache_;
257 typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
258 typedef ScudoQuarantine::Cache ScudoQuarantineCache;
259 COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
260 sizeof(ScudoThreadContext::QuarantineCachePlaceHolder));
262 AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) {
263 return &ThreadContext->Cache;
266 ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) {
267 return reinterpret_cast<
268 ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder);
271 ScudoPrng *getPrng(ScudoThreadContext *ThreadContext) {
272 return &ThreadContext->Prng;
275 struct ScudoAllocator {
276 static const uptr MaxAllowedMallocSize =
277 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
279 typedef ReturnNullOrDieOnFailure FailureHandler;
281 ScudoBackendAllocator BackendAllocator;
282 ScudoQuarantine AllocatorQuarantine;
284 StaticSpinMutex GlobalPrngMutex;
285 ScudoPrng GlobalPrng;
287 // The fallback caches are used when the thread local caches have been
288 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
289 // be accessed by different threads.
290 StaticSpinMutex FallbackMutex;
291 AllocatorCache FallbackAllocatorCache;
292 ScudoQuarantineCache FallbackQuarantineCache;
293 ScudoPrng FallbackPrng;
295 bool DeallocationTypeMismatch;
297 bool DeleteSizeMismatch;
299 explicit ScudoAllocator(LinkerInitialized)
300 : AllocatorQuarantine(LINKER_INITIALIZED),
301 FallbackQuarantineCache(LINKER_INITIALIZED) {}
303 void init(const AllocatorOptions &Options) {
304 // Verify that the header offset field can hold the maximum offset. In the
305 // case of the Secondary allocator, it takes care of alignment and the
306 // offset will always be 0. In the case of the Primary, the worst case
307 // scenario happens in the last size class, when the backend allocation
308 // would already be aligned on the requested alignment, which would happen
309 // to be the maximum alignment that would fit in that size class. As a
310 // result, the maximum offset will be at most the maximum alignment for the
311 // last size class minus the header size, in multiples of MinAlignment.
312 UnpackedHeader Header = {};
313 uptr MaxPrimaryAlignment =
314 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
316 (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog;
317 Header.Offset = MaxOffset;
318 if (Header.Offset != MaxOffset) {
319 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
322 // Verify that we can fit the maximum size or amount of unused bytes in the
323 // header. Given that the Secondary fits the allocation to a page, the worst
324 // case scenario happens in the Primary. It will depend on the second to
325 // last and last class sizes, as well as the dynamic base for the Primary.
326 // The following is an over-approximation that works for our needs.
327 uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
328 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
329 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
330 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
334 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
335 DeleteSizeMismatch = Options.DeleteSizeMismatch;
336 ZeroContents = Options.ZeroContents;
337 SetAllocatorMayReturnNull(Options.MayReturnNull);
338 BackendAllocator.init(Options.ReleaseToOSIntervalMs);
339 AllocatorQuarantine.Init(
340 static_cast<uptr>(Options.QuarantineSizeMb) << 20,
341 static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
343 Cookie = GlobalPrng.getU64();
344 BackendAllocator.initCache(&FallbackAllocatorCache);
348 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
349 bool isValidPointer(const void *UserPtr) {
351 if (UNLIKELY(!UserPtr))
353 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
354 if (!IsAligned(UserBeg, MinAlignment))
356 return getScudoChunk(UserBeg)->isValid();
359 // Allocates a chunk.
360 void *allocate(uptr Size, uptr Alignment, AllocType Type,
361 bool ForceZeroContents = false) {
363 if (UNLIKELY(Alignment > MaxAlignment))
364 return FailureHandler::OnBadRequest();
365 if (UNLIKELY(Alignment < MinAlignment))
366 Alignment = MinAlignment;
367 if (UNLIKELY(Size >= MaxAllowedMallocSize))
368 return FailureHandler::OnBadRequest();
369 if (UNLIKELY(Size == 0))
372 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
373 uptr AlignedSize = (Alignment > MinAlignment) ?
374 NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
375 if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize))
376 return FailureHandler::OnBadRequest();
378 // Primary and Secondary backed allocations have a different treatment. We
379 // deal with alignment requirements of Primary serviced allocations here,
380 // but the Secondary will take care of its own alignment needs.
381 bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment);
387 AllocSize = AlignedSize;
388 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
389 if (LIKELY(ThreadContext)) {
390 Salt = getPrng(ThreadContext)->getU8();
391 Ptr = BackendAllocator.allocatePrimary(getAllocatorCache(ThreadContext),
393 ThreadContext->unlock();
395 SpinMutexLock l(&FallbackMutex);
396 Salt = FallbackPrng.getU8();
397 Ptr = BackendAllocator.allocatePrimary(&FallbackAllocatorCache,
402 SpinMutexLock l(&GlobalPrngMutex);
403 Salt = GlobalPrng.getU8();
405 AllocSize = NeededSize;
406 Ptr = BackendAllocator.allocateSecondary(AllocSize, Alignment);
409 return FailureHandler::OnOOM();
411 // If requested, we will zero out the entire contents of the returned chunk.
412 if ((ForceZeroContents || ZeroContents) && FromPrimary)
413 memset(Ptr, 0, BackendAllocator.getActuallyAllocatedSize(
414 Ptr, /*FromPrimary=*/true));
416 UnpackedHeader Header = {};
417 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
418 uptr UserBeg = AllocBeg + AlignedChunkHeaderSize;
419 if (UNLIKELY(!IsAligned(UserBeg, Alignment))) {
420 // Since the Secondary takes care of alignment, a non-aligned pointer
421 // means it is from the Primary. It is also the only case where the offset
422 // field of the header would be non-zero.
424 UserBeg = RoundUpTo(UserBeg, Alignment);
425 uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
426 Header.Offset = Offset >> MinAlignmentLog;
428 CHECK_LE(UserBeg + Size, AllocBeg + AllocSize);
429 Header.State = ChunkAllocated;
430 Header.AllocType = Type;
432 Header.FromPrimary = 1;
433 Header.SizeOrUnusedBytes = Size;
435 // The secondary fits the allocations to a page, so the amount of unused
436 // bytes is the difference between the end of the user allocation and the
437 // next page boundary.
438 uptr PageSize = GetPageSizeCached();
439 uptr TrailingBytes = (UserBeg + Size) & (PageSize - 1);
441 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
444 getScudoChunk(UserBeg)->storeHeader(&Header);
445 void *UserPtr = reinterpret_cast<void *>(UserBeg);
446 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
450 // Place a chunk in the quarantine. In the event of a zero-sized quarantine,
451 // we directly deallocate the chunk, otherwise the flow would lead to the
452 // chunk being loaded (and checked) twice, and stored (and checksummed) once,
453 // with no additional security value.
454 void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
456 bool FromPrimary = Header->FromPrimary;
457 bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
458 if (BypassQuarantine) {
459 Chunk->eraseHeader();
460 void *Ptr = Chunk->getAllocBeg(Header);
462 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
463 if (LIKELY(ThreadContext)) {
464 getBackendAllocator().deallocatePrimary(
465 getAllocatorCache(ThreadContext), Ptr);
466 ThreadContext->unlock();
468 SpinMutexLock Lock(&FallbackMutex);
469 getBackendAllocator().deallocatePrimary(&FallbackAllocatorCache, Ptr);
472 getBackendAllocator().deallocateSecondary(Ptr);
475 UnpackedHeader NewHeader = *Header;
476 NewHeader.State = ChunkQuarantine;
477 Chunk->compareExchangeHeader(&NewHeader, Header);
478 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
479 if (LIKELY(ThreadContext)) {
480 AllocatorQuarantine.Put(getQuarantineCache(ThreadContext),
482 getAllocatorCache(ThreadContext)),
484 ThreadContext->unlock();
486 SpinMutexLock l(&FallbackMutex);
487 AllocatorQuarantine.Put(&FallbackQuarantineCache,
488 QuarantineCallback(&FallbackAllocatorCache),
494 // Deallocates a Chunk, which means adding it to the delayed free list (or
496 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
498 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
499 if (UNLIKELY(!UserPtr))
501 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
502 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
503 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
504 "aligned at address %p\n", UserPtr);
506 ScudoChunk *Chunk = getScudoChunk(UserBeg);
507 UnpackedHeader OldHeader;
508 Chunk->loadHeader(&OldHeader);
509 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
510 dieWithMessage("ERROR: invalid chunk state when deallocating address "
513 if (DeallocationTypeMismatch) {
514 // The deallocation type has to match the allocation one.
515 if (OldHeader.AllocType != Type) {
516 // With the exception of memalign'd Chunks, that can be still be free'd.
517 if (OldHeader.AllocType != FromMemalign || Type != FromMalloc) {
518 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
523 uptr Size = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
524 Chunk->getUsableSize(&OldHeader) - OldHeader.SizeOrUnusedBytes;
525 if (DeleteSizeMismatch) {
526 if (DeleteSize && DeleteSize != Size) {
527 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
532 // If a small memory amount was allocated with a larger alignment, we want
533 // to take that into account. Otherwise the Quarantine would be filled with
534 // tiny chunks, taking a lot of VA memory. This is an approximation of the
535 // usable size, that allows us to not call GetActuallyAllocatedSize.
536 uptr LiableSize = Size + (OldHeader.Offset << MinAlignment);
537 quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize);
540 // Reallocates a chunk. We can save on a new allocation if the new requested
541 // size still fits in the chunk.
542 void *reallocate(void *OldPtr, uptr NewSize) {
544 uptr UserBeg = reinterpret_cast<uptr>(OldPtr);
545 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
546 dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
547 "aligned at address %p\n", OldPtr);
549 ScudoChunk *Chunk = getScudoChunk(UserBeg);
550 UnpackedHeader OldHeader;
551 Chunk->loadHeader(&OldHeader);
552 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
553 dieWithMessage("ERROR: invalid chunk state when reallocating address "
556 if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
557 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
560 uptr UsableSize = Chunk->getUsableSize(&OldHeader);
561 // The new size still fits in the current chunk, and the size difference
563 if (NewSize <= UsableSize &&
564 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
565 UnpackedHeader NewHeader = OldHeader;
566 NewHeader.SizeOrUnusedBytes =
567 OldHeader.FromPrimary ? NewSize : UsableSize - NewSize;
568 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
571 // Otherwise, we have to allocate a new chunk and copy the contents of the
573 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
575 uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
576 UsableSize - OldHeader.SizeOrUnusedBytes;
577 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
578 quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize);
583 // Helper function that returns the actual usable size of a chunk.
584 uptr getUsableSize(const void *Ptr) {
588 uptr UserBeg = reinterpret_cast<uptr>(Ptr);
589 ScudoChunk *Chunk = getScudoChunk(UserBeg);
590 UnpackedHeader Header;
591 Chunk->loadHeader(&Header);
592 // Getting the usable size of a chunk only makes sense if it's allocated.
593 if (UNLIKELY(Header.State != ChunkAllocated)) {
594 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
597 return Chunk->getUsableSize(&Header);
600 void *calloc(uptr NMemB, uptr Size) {
602 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size)))
603 return FailureHandler::OnBadRequest();
604 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
607 void commitBack(ScudoThreadContext *ThreadContext) {
608 AllocatorCache *Cache = getAllocatorCache(ThreadContext);
609 AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext),
610 QuarantineCallback(Cache));
611 BackendAllocator.destroyCache(Cache);
614 uptr getStats(AllocatorStat StatType) {
616 uptr stats[AllocatorStatCount];
617 BackendAllocator.getStats(stats);
618 return stats[StatType];
622 static ScudoAllocator Instance(LINKER_INITIALIZED);
624 static ScudoBackendAllocator &getBackendAllocator() {
625 return Instance.BackendAllocator;
628 static void initScudoInternal(const AllocatorOptions &Options) {
629 Instance.init(Options);
632 void ScudoThreadContext::init() {
633 getBackendAllocator().initCache(&Cache);
635 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
638 void ScudoThreadContext::commitBack() {
639 Instance.commitBack(this);
642 void *scudoMalloc(uptr Size, AllocType Type) {
643 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, Type));
646 void scudoFree(void *Ptr, AllocType Type) {
647 Instance.deallocate(Ptr, 0, Type);
650 void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
651 Instance.deallocate(Ptr, Size, Type);
654 void *scudoRealloc(void *Ptr, uptr Size) {
656 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
658 Instance.deallocate(Ptr, 0, FromMalloc);
661 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
664 void *scudoCalloc(uptr NMemB, uptr Size) {
665 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
668 void *scudoValloc(uptr Size) {
669 return SetErrnoOnNull(
670 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
673 void *scudoPvalloc(uptr Size) {
674 uptr PageSize = GetPageSizeCached();
675 // pvalloc(0) should allocate one page.
676 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
677 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
680 void *scudoMemalign(uptr Alignment, uptr Size) {
681 if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
682 errno = errno_EINVAL;
683 return ScudoAllocator::FailureHandler::OnBadRequest();
685 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMemalign));
688 int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
689 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
690 ScudoAllocator::FailureHandler::OnBadRequest();
693 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
700 void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
701 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
702 errno = errno_EINVAL;
703 return ScudoAllocator::FailureHandler::OnBadRequest();
705 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
708 uptr scudoMallocUsableSize(void *Ptr) {
709 return Instance.getUsableSize(Ptr);
712 } // namespace __scudo
714 using namespace __scudo;
716 // MallocExtension helper functions
718 uptr __sanitizer_get_current_allocated_bytes() {
719 return Instance.getStats(AllocatorStatAllocated);
722 uptr __sanitizer_get_heap_size() {
723 return Instance.getStats(AllocatorStatMapped);
726 uptr __sanitizer_get_free_bytes() {
730 uptr __sanitizer_get_unmapped_bytes() {
734 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
738 int __sanitizer_get_ownership(const void *Ptr) {
739 return Instance.isValidPointer(Ptr);
742 uptr __sanitizer_get_allocated_size(const void *Ptr) {
743 return Instance.getUsableSize(Ptr);