1 //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// Scudo Hardened Allocator implementation.
10 /// It uses the sanitizer_common allocator as a base and aims at mitigating
11 /// heap corruption vulnerabilities. It provides a checksum-guarded chunk
12 /// header, a delayed free list, and additional sanity checks.
14 //===----------------------------------------------------------------------===//
16 #include "scudo_allocator.h"
17 #include "scudo_crc32.h"
18 #include "scudo_errors.h"
19 #include "scudo_flags.h"
20 #include "scudo_interface_internal.h"
21 #include "scudo_tsd.h"
22 #include "scudo_utils.h"
24 #include "sanitizer_common/sanitizer_allocator_checks.h"
25 #include "sanitizer_common/sanitizer_allocator_interface.h"
26 #include "sanitizer_common/sanitizer_quarantine.h"
29 # include "gwp_asan/guarded_pool_allocator.h"
30 # include "gwp_asan/optional/backtrace.h"
31 # include "gwp_asan/optional/options_parser.h"
32 #endif // GWP_ASAN_HOOKS
39 // Global static cookie, initialized at start-up.
42 // We default to software CRC32 if the alternatives are not supported, either
43 // at compilation or at runtime.
44 static atomic_uint8_t HashAlgorithm = { CRC32Software };
46 INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
47 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
48 // as opposed to only for scudo_crc32.cpp. This means that other hardware
49 // specific instructions were likely emitted at other places, and as a
50 // result there is no reason to not use it here.
51 #if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
52 Crc = CRC32_INTRINSIC(Crc, Value);
53 for (uptr i = 0; i < ArraySize; i++)
54 Crc = CRC32_INTRINSIC(Crc, Array[i]);
57 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
58 Crc = computeHardwareCRC32(Crc, Value);
59 for (uptr i = 0; i < ArraySize; i++)
60 Crc = computeHardwareCRC32(Crc, Array[i]);
63 Crc = computeSoftwareCRC32(Crc, Value);
64 for (uptr i = 0; i < ArraySize; i++)
65 Crc = computeSoftwareCRC32(Crc, Array[i]);
67 #endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
70 static BackendT &getBackend();
73 static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
74 return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
78 const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
79 return reinterpret_cast<const AtomicPackedHeader *>(
80 reinterpret_cast<uptr>(Ptr) - getHeaderSize());
83 static INLINE bool isAligned(const void *Ptr) {
84 return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
87 // We can't use the offset member of the chunk itself, as we would double
88 // fetch it without any warranty that it wouldn't have been tampered. To
89 // prevent this, we work with a local copy of the header.
90 static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
91 return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
92 getHeaderSize() - (Header->Offset << MinAlignmentLog));
95 // Returns the usable size for a chunk, meaning the amount of bytes from the
96 // beginning of the user data to the end of the backend allocated chunk.
97 static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
98 const uptr ClassId = Header->ClassId;
100 return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
101 (Header->Offset << MinAlignmentLog);
102 return SecondaryT::GetActuallyAllocatedSize(
103 getBackendPtr(Ptr, Header)) - getHeaderSize();
106 // Returns the size the user requested when allocating the chunk.
107 static INLINE uptr getSize(const void *Ptr, UnpackedHeader *Header) {
108 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
110 return SizeOrUnusedBytes;
111 return SecondaryT::GetActuallyAllocatedSize(
112 getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
115 // Compute the checksum of the chunk pointer and its header.
116 static INLINE u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
117 UnpackedHeader ZeroChecksumHeader = *Header;
118 ZeroChecksumHeader.Checksum = 0;
119 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
120 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
121 const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
122 HeaderHolder, ARRAY_SIZE(HeaderHolder));
123 return static_cast<u16>(Crc);
126 // Checks the validity of a chunk by verifying its checksum. It doesn't
127 // incur termination in the event of an invalid chunk.
128 static INLINE bool isValid(const void *Ptr) {
129 PackedHeader NewPackedHeader =
130 atomic_load_relaxed(getConstAtomicHeader(Ptr));
131 UnpackedHeader NewUnpackedHeader =
132 bit_cast<UnpackedHeader>(NewPackedHeader);
133 return (NewUnpackedHeader.Checksum ==
134 computeChecksum(Ptr, &NewUnpackedHeader));
137 // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid
138 // for a fully nulled out header, its state will be available anyway.
139 COMPILER_CHECK(ChunkAvailable == 0);
141 // Loads and unpacks the header, verifying the checksum in the process.
143 void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
144 PackedHeader NewPackedHeader =
145 atomic_load_relaxed(getConstAtomicHeader(Ptr));
146 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
147 if (UNLIKELY(NewUnpackedHeader->Checksum !=
148 computeChecksum(Ptr, NewUnpackedHeader)))
149 dieWithMessage("corrupted chunk header at address %p\n", Ptr);
152 // Packs and stores the header, computing the checksum in the process.
153 static INLINE void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
154 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
155 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
156 atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
159 // Packs and stores the header, computing the checksum in the process. We
160 // compare the current header with the expected provided one to ensure that
161 // we are not being raced by a corruption occurring in another thread.
162 static INLINE void compareExchangeHeader(void *Ptr,
163 UnpackedHeader *NewUnpackedHeader,
164 UnpackedHeader *OldUnpackedHeader) {
165 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
166 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
167 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
168 if (UNLIKELY(!atomic_compare_exchange_strong(
169 getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
170 memory_order_relaxed)))
171 dieWithMessage("race on chunk header at address %p\n", Ptr);
175 struct QuarantineCallback {
176 explicit QuarantineCallback(AllocatorCacheT *Cache)
179 // Chunk recycling function, returns a quarantined chunk to the backend,
180 // first making sure it hasn't been tampered with.
181 void Recycle(void *Ptr) {
182 UnpackedHeader Header;
183 Chunk::loadHeader(Ptr, &Header);
184 if (UNLIKELY(Header.State != ChunkQuarantine))
185 dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
186 UnpackedHeader NewHeader = Header;
187 NewHeader.State = ChunkAvailable;
188 Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header);
189 void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
191 getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
193 getBackend().deallocateSecondary(BackendPtr);
196 // Internal quarantine allocation and deallocation functions. We first check
197 // that the batches are indeed serviced by the Primary.
198 // TODO(kostyak): figure out the best way to protect the batches.
199 void *Allocate(uptr Size) {
200 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
201 return getBackend().allocatePrimary(Cache_, BatchClassId);
204 void Deallocate(void *Ptr) {
205 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
206 getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
209 AllocatorCacheT *Cache_;
210 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
213 typedef Quarantine<QuarantineCallback, void> QuarantineT;
214 typedef QuarantineT::Cache QuarantineCacheT;
215 COMPILER_CHECK(sizeof(QuarantineCacheT) <=
216 sizeof(ScudoTSD::QuarantineCachePlaceHolder));
218 QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
219 return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
222 #ifdef GWP_ASAN_HOOKS
223 static gwp_asan::GuardedPoolAllocator GuardedAlloc;
224 #endif // GWP_ASAN_HOOKS
227 static const uptr MaxAllowedMallocSize =
228 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
231 QuarantineT Quarantine;
233 u32 QuarantineChunksUpToSize;
235 bool DeallocationTypeMismatch;
237 bool DeleteSizeMismatch;
242 atomic_uint8_t RssLimitExceeded;
243 atomic_uint64_t RssLastCheckedAtNS;
245 explicit Allocator(LinkerInitialized)
246 : Quarantine(LINKER_INITIALIZED) {}
248 NOINLINE void performSanityChecks();
251 SanitizerToolName = "Scudo";
252 PrimaryAllocatorName = "ScudoPrimary";
253 SecondaryAllocatorName = "ScudoSecondary";
257 performSanityChecks();
259 // Check if hardware CRC32 is supported in the binary and by the platform,
260 // if so, opt for the CRC32 hardware version of the checksum.
261 if (&computeHardwareCRC32 && hasHardwareCRC32())
262 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
264 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
265 Backend.init(common_flags()->allocator_release_to_os_interval_ms);
266 HardRssLimitMb = common_flags()->hard_rss_limit_mb;
267 SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
269 static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
270 static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
271 QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 :
272 getFlags()->QuarantineChunksUpToSize;
273 DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
274 DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
275 ZeroContents = getFlags()->ZeroContents;
277 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
278 /*blocking=*/false))) {
279 Cookie = static_cast<u32>((NanoTime() >> 12) ^
280 (reinterpret_cast<uptr>(this) >> 4));
283 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
285 atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
288 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
289 bool isValidPointer(const void *Ptr) {
293 if (!Chunk::isAligned(Ptr))
295 return Chunk::isValid(Ptr);
298 NOINLINE bool isRssLimitExceeded();
300 // Allocates a chunk.
301 void *allocate(uptr Size, uptr Alignment, AllocType Type,
302 bool ForceZeroContents = false) {
305 #ifdef GWP_ASAN_HOOKS
306 if (UNLIKELY(GuardedAlloc.shouldSample())) {
307 if (void *Ptr = GuardedAlloc.allocate(Size))
310 #endif // GWP_ASAN_HOOKS
312 if (UNLIKELY(Alignment > MaxAlignment)) {
313 if (AllocatorMayReturnNull())
315 reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
317 if (UNLIKELY(Alignment < MinAlignment))
318 Alignment = MinAlignment;
320 const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) +
321 Chunk::getHeaderSize();
322 const uptr AlignedSize = (Alignment > MinAlignment) ?
323 NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
324 if (UNLIKELY(Size >= MaxAllowedMallocSize) ||
325 UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) {
326 if (AllocatorMayReturnNull())
328 reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
331 if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) {
332 if (AllocatorMayReturnNull())
334 reportRssLimitExceeded();
337 // Primary and Secondary backed allocations have a different treatment. We
338 // deal with alignment requirements of Primary serviced allocations here,
339 // but the Secondary will take care of its own alignment needs.
343 if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
344 BackendSize = AlignedSize;
345 ClassId = SizeClassMap::ClassID(BackendSize);
347 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
348 BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
352 BackendSize = NeededSize;
354 BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
356 if (UNLIKELY(!BackendPtr)) {
357 SetAllocatorOutOfMemory();
358 if (AllocatorMayReturnNull())
360 reportOutOfMemory(Size);
363 // If requested, we will zero out the entire contents of the returned chunk.
364 if ((ForceZeroContents || ZeroContents) && ClassId)
365 memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
367 UnpackedHeader Header = {};
368 uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
369 if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
370 // Since the Secondary takes care of alignment, a non-aligned pointer
371 // means it is from the Primary. It is also the only case where the offset
372 // field of the header would be non-zero.
374 const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
375 Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
376 UserPtr = AlignedUserPtr;
378 DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
379 Header.State = ChunkAllocated;
380 Header.AllocType = Type;
382 Header.ClassId = ClassId;
383 Header.SizeOrUnusedBytes = Size;
385 // The secondary fits the allocations to a page, so the amount of unused
386 // bytes is the difference between the end of the user allocation and the
387 // next page boundary.
388 const uptr PageSize = GetPageSizeCached();
389 const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
391 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
393 void *Ptr = reinterpret_cast<void *>(UserPtr);
394 Chunk::storeHeader(Ptr, &Header);
395 if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
396 __sanitizer_malloc_hook(Ptr, Size);
400 // Place a chunk in the quarantine or directly deallocate it in the event of
401 // a zero-sized quarantine, or if the size of the chunk is greater than the
402 // quarantine chunk size threshold.
403 void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
405 const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
406 if (BypassQuarantine) {
407 UnpackedHeader NewHeader = *Header;
408 NewHeader.State = ChunkAvailable;
409 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
410 void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
411 if (Header->ClassId) {
413 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
414 getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
419 getBackend().deallocateSecondary(BackendPtr);
422 // If a small memory amount was allocated with a larger alignment, we want
423 // to take that into account. Otherwise the Quarantine would be filled
424 // with tiny chunks, taking a lot of VA memory. This is an approximation
425 // of the usable size, that allows us to not call
426 // GetActuallyAllocatedSize.
427 const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
428 UnpackedHeader NewHeader = *Header;
429 NewHeader.State = ChunkQuarantine;
430 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
432 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
433 Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
440 // Deallocates a Chunk, which means either adding it to the quarantine or
441 // directly returning it to the backend if criteria are met.
442 void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
444 // For a deallocation, we only ensure minimal initialization, meaning thread
445 // local data will be left uninitialized for now (when using ELF TLS). The
446 // fallback cache will be used instead. This is a workaround for a situation
447 // where the only heap operation performed in a thread would be a free past
448 // the TLS destructors, ending up in initialized thread specific data never
449 // being destroyed properly. Any other heap operation will do a full init.
450 initThreadMaybe(/*MinimalInit=*/true);
451 if (SCUDO_CAN_USE_HOOKS && &__sanitizer_free_hook)
452 __sanitizer_free_hook(Ptr);
456 #ifdef GWP_ASAN_HOOKS
457 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
458 GuardedAlloc.deallocate(Ptr);
461 #endif // GWP_ASAN_HOOKS
463 if (UNLIKELY(!Chunk::isAligned(Ptr)))
464 dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
465 UnpackedHeader Header;
466 Chunk::loadHeader(Ptr, &Header);
467 if (UNLIKELY(Header.State != ChunkAllocated))
468 dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
469 if (DeallocationTypeMismatch) {
470 // The deallocation type has to match the allocation one.
471 if (Header.AllocType != Type) {
472 // With the exception of memalign'd Chunks, that can be still be free'd.
473 if (Header.AllocType != FromMemalign || Type != FromMalloc)
474 dieWithMessage("allocation type mismatch when deallocating address "
478 const uptr Size = Chunk::getSize(Ptr, &Header);
479 if (DeleteSizeMismatch) {
480 if (DeleteSize && DeleteSize != Size)
481 dieWithMessage("invalid sized delete when deallocating address %p\n",
484 (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches.
485 quarantineOrDeallocateChunk(Ptr, &Header, Size);
488 // Reallocates a chunk. We can save on a new allocation if the new requested
489 // size still fits in the chunk.
490 void *reallocate(void *OldPtr, uptr NewSize) {
493 #ifdef GWP_ASAN_HOOKS
494 if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
495 size_t OldSize = GuardedAlloc.getSize(OldPtr);
496 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
498 memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
499 GuardedAlloc.deallocate(OldPtr);
502 #endif // GWP_ASAN_HOOKS
504 if (UNLIKELY(!Chunk::isAligned(OldPtr)))
505 dieWithMessage("misaligned address when reallocating address %p\n",
507 UnpackedHeader OldHeader;
508 Chunk::loadHeader(OldPtr, &OldHeader);
509 if (UNLIKELY(OldHeader.State != ChunkAllocated))
510 dieWithMessage("invalid chunk state when reallocating address %p\n",
512 if (DeallocationTypeMismatch) {
513 if (UNLIKELY(OldHeader.AllocType != FromMalloc))
514 dieWithMessage("allocation type mismatch when reallocating address "
517 const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
518 // The new size still fits in the current chunk, and the size difference
520 if (NewSize <= UsableSize &&
521 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
522 UnpackedHeader NewHeader = OldHeader;
523 NewHeader.SizeOrUnusedBytes =
524 OldHeader.ClassId ? NewSize : UsableSize - NewSize;
525 Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
528 // Otherwise, we have to allocate a new chunk and copy the contents of the
530 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
532 const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
533 UsableSize - OldHeader.SizeOrUnusedBytes;
534 memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
535 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
540 // Helper function that returns the actual usable size of a chunk.
541 uptr getUsableSize(const void *Ptr) {
546 #ifdef GWP_ASAN_HOOKS
547 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
548 return GuardedAlloc.getSize(Ptr);
549 #endif // GWP_ASAN_HOOKS
551 UnpackedHeader Header;
552 Chunk::loadHeader(Ptr, &Header);
553 // Getting the usable size of a chunk only makes sense if it's allocated.
554 if (UNLIKELY(Header.State != ChunkAllocated))
555 dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
556 return Chunk::getUsableSize(Ptr, &Header);
559 void *calloc(uptr NMemB, uptr Size) {
561 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) {
562 if (AllocatorMayReturnNull())
564 reportCallocOverflow(NMemB, Size);
566 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
569 void commitBack(ScudoTSD *TSD) {
570 Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
571 Backend.destroyCache(&TSD->Cache);
574 uptr getStats(AllocatorStat StatType) {
576 uptr stats[AllocatorStatCount];
577 Backend.getStats(stats);
578 return stats[StatType];
581 bool canReturnNull() {
583 return AllocatorMayReturnNull();
586 void setRssLimit(uptr LimitMb, bool HardLimit) {
588 HardRssLimitMb = LimitMb;
590 SoftRssLimitMb = LimitMb;
591 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
596 Backend.printStats();
600 NOINLINE void Allocator::performSanityChecks() {
601 // Verify that the header offset field can hold the maximum offset. In the
602 // case of the Secondary allocator, it takes care of alignment and the
603 // offset will always be 0. In the case of the Primary, the worst case
604 // scenario happens in the last size class, when the backend allocation
605 // would already be aligned on the requested alignment, which would happen
606 // to be the maximum alignment that would fit in that size class. As a
607 // result, the maximum offset will be at most the maximum alignment for the
608 // last size class minus the header size, in multiples of MinAlignment.
609 UnpackedHeader Header = {};
610 const uptr MaxPrimaryAlignment =
611 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
612 const uptr MaxOffset =
613 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
614 Header.Offset = MaxOffset;
615 if (Header.Offset != MaxOffset)
616 dieWithMessage("maximum possible offset doesn't fit in header\n");
617 // Verify that we can fit the maximum size or amount of unused bytes in the
618 // header. Given that the Secondary fits the allocation to a page, the worst
619 // case scenario happens in the Primary. It will depend on the second to
620 // last and last class sizes, as well as the dynamic base for the Primary.
621 // The following is an over-approximation that works for our needs.
622 const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
623 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
624 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
625 dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
627 const uptr LargestClassId = SizeClassMap::kLargestClassID;
628 Header.ClassId = LargestClassId;
629 if (Header.ClassId != LargestClassId)
630 dieWithMessage("largest class ID doesn't fit in header\n");
633 // Opportunistic RSS limit check. This will update the RSS limit status, if
634 // it can, every 250ms, otherwise it will just return the current one.
635 NOINLINE bool Allocator::isRssLimitExceeded() {
636 u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
637 const u64 CurrentCheck = MonotonicNanoTime();
638 if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL)))
639 return atomic_load_relaxed(&RssLimitExceeded);
640 if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
641 CurrentCheck, memory_order_relaxed))
642 return atomic_load_relaxed(&RssLimitExceeded);
643 // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
644 // RSS from /proc/self/statm by default. We might want to
645 // call getrusage directly, even if it's less accurate.
646 const uptr CurrentRssMb = GetRSS() >> 20;
647 if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
648 dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
649 HardRssLimitMb, CurrentRssMb);
650 if (SoftRssLimitMb) {
651 if (atomic_load_relaxed(&RssLimitExceeded)) {
652 if (CurrentRssMb <= SoftRssLimitMb)
653 atomic_store_relaxed(&RssLimitExceeded, false);
655 if (CurrentRssMb > SoftRssLimitMb) {
656 atomic_store_relaxed(&RssLimitExceeded, true);
657 Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
658 SoftRssLimitMb, CurrentRssMb);
662 return atomic_load_relaxed(&RssLimitExceeded);
665 static Allocator Instance(LINKER_INITIALIZED);
667 static BackendT &getBackend() {
668 return Instance.Backend;
673 #ifdef GWP_ASAN_HOOKS
674 gwp_asan::options::initOptions();
675 gwp_asan::options::Options &Opts = gwp_asan::options::getOptions();
676 Opts.Backtrace = gwp_asan::options::getBacktraceFunction();
677 Opts.PrintBacktrace = gwp_asan::options::getPrintBacktraceFunction();
678 GuardedAlloc.init(Opts);
679 #endif // GWP_ASAN_HOOKS
682 void ScudoTSD::init() {
683 getBackend().initCache(&Cache);
684 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
687 void ScudoTSD::commitBack() {
688 Instance.commitBack(this);
691 void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
692 if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))) {
694 if (Instance.canReturnNull())
696 reportAllocationAlignmentNotPowerOfTwo(Alignment);
698 return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
701 void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
702 Instance.deallocate(Ptr, Size, Alignment, Type);
705 void *scudoRealloc(void *Ptr, uptr Size) {
707 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
709 Instance.deallocate(Ptr, 0, 0, FromMalloc);
712 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
715 void *scudoCalloc(uptr NMemB, uptr Size) {
716 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
719 void *scudoValloc(uptr Size) {
720 return SetErrnoOnNull(
721 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
724 void *scudoPvalloc(uptr Size) {
725 const uptr PageSize = GetPageSizeCached();
726 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
728 if (Instance.canReturnNull())
730 reportPvallocOverflow(Size);
732 // pvalloc(0) should allocate one page.
733 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
734 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
737 int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
738 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
739 if (!Instance.canReturnNull())
740 reportInvalidPosixMemalignAlignment(Alignment);
743 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
750 void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
751 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
753 if (Instance.canReturnNull())
755 reportInvalidAlignedAllocAlignment(Size, Alignment);
757 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
760 uptr scudoMallocUsableSize(void *Ptr) {
761 return Instance.getUsableSize(Ptr);
764 } // namespace __scudo
766 using namespace __scudo;
768 // MallocExtension helper functions
770 uptr __sanitizer_get_current_allocated_bytes() {
771 return Instance.getStats(AllocatorStatAllocated);
774 uptr __sanitizer_get_heap_size() {
775 return Instance.getStats(AllocatorStatMapped);
778 uptr __sanitizer_get_free_bytes() {
782 uptr __sanitizer_get_unmapped_bytes() {
786 uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
790 int __sanitizer_get_ownership(const void *Ptr) {
791 return Instance.isValidPointer(Ptr);
794 uptr __sanitizer_get_allocated_size(const void *Ptr) {
795 return Instance.getUsableSize(Ptr);
798 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
799 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
800 void *Ptr, uptr Size) {
805 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr) {
810 // Interface functions
812 void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
813 if (!SCUDO_CAN_USE_PUBLIC_INTERFACE)
815 Instance.setRssLimit(LimitMb, !!HardLimit);
818 void __scudo_print_stats() {
819 Instance.printStats();