1 //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// Scudo Hardened Allocator implementation.
11 /// It uses the sanitizer_common allocator as a base and aims at mitigating
12 /// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13 /// header, a delayed free list, and additional sanity checks.
15 //===----------------------------------------------------------------------===//
17 #include "scudo_allocator.h"
18 #include "scudo_crc32.h"
19 #include "scudo_errors.h"
20 #include "scudo_flags.h"
21 #include "scudo_interface_internal.h"
22 #include "scudo_tsd.h"
23 #include "scudo_utils.h"
25 #include "sanitizer_common/sanitizer_allocator_checks.h"
26 #include "sanitizer_common/sanitizer_allocator_interface.h"
27 #include "sanitizer_common/sanitizer_quarantine.h"
34 // Global static cookie, initialized at start-up.
37 // We default to software CRC32 if the alternatives are not supported, either
38 // at compilation or at runtime.
39 static atomic_uint8_t HashAlgorithm = { CRC32Software };
41 INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
42 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
43 // as opposed to only for scudo_crc32.cpp. This means that other hardware
44 // specific instructions were likely emitted at other places, and as a
45 // result there is no reason to not use it here.
46 #if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
47 Crc = CRC32_INTRINSIC(Crc, Value);
48 for (uptr i = 0; i < ArraySize; i++)
49 Crc = CRC32_INTRINSIC(Crc, Array[i]);
52 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
53 Crc = computeHardwareCRC32(Crc, Value);
54 for (uptr i = 0; i < ArraySize; i++)
55 Crc = computeHardwareCRC32(Crc, Array[i]);
58 Crc = computeSoftwareCRC32(Crc, Value);
59 for (uptr i = 0; i < ArraySize; i++)
60 Crc = computeSoftwareCRC32(Crc, Array[i]);
62 #endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
65 static BackendT &getBackend();
68 static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
69 return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
73 const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
74 return reinterpret_cast<const AtomicPackedHeader *>(
75 reinterpret_cast<uptr>(Ptr) - getHeaderSize());
78 static INLINE bool isAligned(const void *Ptr) {
79 return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
82 // We can't use the offset member of the chunk itself, as we would double
83 // fetch it without any warranty that it wouldn't have been tampered. To
84 // prevent this, we work with a local copy of the header.
85 static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
86 return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
87 getHeaderSize() - (Header->Offset << MinAlignmentLog));
90 // Returns the usable size for a chunk, meaning the amount of bytes from the
91 // beginning of the user data to the end of the backend allocated chunk.
92 static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
93 const uptr ClassId = Header->ClassId;
95 return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
96 (Header->Offset << MinAlignmentLog);
97 return SecondaryT::GetActuallyAllocatedSize(
98 getBackendPtr(Ptr, Header)) - getHeaderSize();
101 // Returns the size the user requested when allocating the chunk.
102 static INLINE uptr getSize(const void *Ptr, UnpackedHeader *Header) {
103 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
105 return SizeOrUnusedBytes;
106 return SecondaryT::GetActuallyAllocatedSize(
107 getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
110 // Compute the checksum of the chunk pointer and its header.
111 static INLINE u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
112 UnpackedHeader ZeroChecksumHeader = *Header;
113 ZeroChecksumHeader.Checksum = 0;
114 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
115 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
116 const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
117 HeaderHolder, ARRAY_SIZE(HeaderHolder));
118 return static_cast<u16>(Crc);
121 // Checks the validity of a chunk by verifying its checksum. It doesn't
122 // incur termination in the event of an invalid chunk.
123 static INLINE bool isValid(const void *Ptr) {
124 PackedHeader NewPackedHeader =
125 atomic_load_relaxed(getConstAtomicHeader(Ptr));
126 UnpackedHeader NewUnpackedHeader =
127 bit_cast<UnpackedHeader>(NewPackedHeader);
128 return (NewUnpackedHeader.Checksum ==
129 computeChecksum(Ptr, &NewUnpackedHeader));
132 // Nulls out a chunk header. When returning the chunk to the backend, there
133 // is no need to store a valid ChunkAvailable header, as this would be
134 // computationally expensive. Zeroing out serves the same purpose by making
135 // the header invalid. In the extremely rare event where 0 would be a valid
136 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
137 COMPILER_CHECK(ChunkAvailable == 0);
138 static INLINE void eraseHeader(void *Ptr) {
139 const PackedHeader NullPackedHeader = 0;
140 atomic_store_relaxed(getAtomicHeader(Ptr), NullPackedHeader);
143 // Loads and unpacks the header, verifying the checksum in the process.
145 void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
146 PackedHeader NewPackedHeader =
147 atomic_load_relaxed(getConstAtomicHeader(Ptr));
148 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
149 if (UNLIKELY(NewUnpackedHeader->Checksum !=
150 computeChecksum(Ptr, NewUnpackedHeader)))
151 dieWithMessage("corrupted chunk header at address %p\n", Ptr);
154 // Packs and stores the header, computing the checksum in the process.
155 static INLINE void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
156 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
157 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
158 atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
161 // Packs and stores the header, computing the checksum in the process. We
162 // compare the current header with the expected provided one to ensure that
163 // we are not being raced by a corruption occurring in another thread.
164 static INLINE void compareExchangeHeader(void *Ptr,
165 UnpackedHeader *NewUnpackedHeader,
166 UnpackedHeader *OldUnpackedHeader) {
167 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
168 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
169 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
170 if (UNLIKELY(!atomic_compare_exchange_strong(
171 getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
172 memory_order_relaxed)))
173 dieWithMessage("race on chunk header at address %p\n", Ptr);
177 struct QuarantineCallback {
178 explicit QuarantineCallback(AllocatorCacheT *Cache)
181 // Chunk recycling function, returns a quarantined chunk to the backend,
182 // first making sure it hasn't been tampered with.
183 void Recycle(void *Ptr) {
184 UnpackedHeader Header;
185 Chunk::loadHeader(Ptr, &Header);
186 if (UNLIKELY(Header.State != ChunkQuarantine))
187 dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
188 Chunk::eraseHeader(Ptr);
189 void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
191 getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
193 getBackend().deallocateSecondary(BackendPtr);
196 // Internal quarantine allocation and deallocation functions. We first check
197 // that the batches are indeed serviced by the Primary.
198 // TODO(kostyak): figure out the best way to protect the batches.
199 void *Allocate(uptr Size) {
200 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
201 return getBackend().allocatePrimary(Cache_, BatchClassId);
204 void Deallocate(void *Ptr) {
205 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
206 getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
209 AllocatorCacheT *Cache_;
210 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
213 typedef Quarantine<QuarantineCallback, void> QuarantineT;
214 typedef QuarantineT::Cache QuarantineCacheT;
215 COMPILER_CHECK(sizeof(QuarantineCacheT) <=
216 sizeof(ScudoTSD::QuarantineCachePlaceHolder));
218 QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
219 return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
223 static const uptr MaxAllowedMallocSize =
224 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
227 QuarantineT Quarantine;
229 u32 QuarantineChunksUpToSize;
231 bool DeallocationTypeMismatch;
233 bool DeleteSizeMismatch;
238 atomic_uint8_t RssLimitExceeded;
239 atomic_uint64_t RssLastCheckedAtNS;
241 explicit Allocator(LinkerInitialized)
242 : Quarantine(LINKER_INITIALIZED) {}
244 NOINLINE void performSanityChecks();
247 SanitizerToolName = "Scudo";
248 PrimaryAllocatorName = "ScudoPrimary";
249 SecondaryAllocatorName = "ScudoSecondary";
253 performSanityChecks();
255 // Check if hardware CRC32 is supported in the binary and by the platform,
256 // if so, opt for the CRC32 hardware version of the checksum.
257 if (&computeHardwareCRC32 && hasHardwareCRC32())
258 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
260 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
261 Backend.init(common_flags()->allocator_release_to_os_interval_ms);
262 HardRssLimitMb = common_flags()->hard_rss_limit_mb;
263 SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
265 static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
266 static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
267 QuarantineChunksUpToSize = getFlags()->QuarantineChunksUpToSize;
268 DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
269 DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
270 ZeroContents = getFlags()->ZeroContents;
272 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
273 /*blocking=*/false))) {
274 Cookie = static_cast<u32>((NanoTime() >> 12) ^
275 (reinterpret_cast<uptr>(this) >> 4));
278 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
280 atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
283 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
284 bool isValidPointer(const void *Ptr) {
288 if (!Chunk::isAligned(Ptr))
290 return Chunk::isValid(Ptr);
293 NOINLINE bool isRssLimitExceeded();
295 // Allocates a chunk.
296 void *allocate(uptr Size, uptr Alignment, AllocType Type,
297 bool ForceZeroContents = false) {
299 if (UNLIKELY(Alignment > MaxAlignment)) {
300 if (AllocatorMayReturnNull())
302 reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
304 if (UNLIKELY(Alignment < MinAlignment))
305 Alignment = MinAlignment;
307 const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) +
308 Chunk::getHeaderSize();
309 const uptr AlignedSize = (Alignment > MinAlignment) ?
310 NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
311 if (UNLIKELY(Size >= MaxAllowedMallocSize) ||
312 UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) {
313 if (AllocatorMayReturnNull())
315 reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
318 if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) {
319 if (AllocatorMayReturnNull())
321 reportRssLimitExceeded();
324 // Primary and Secondary backed allocations have a different treatment. We
325 // deal with alignment requirements of Primary serviced allocations here,
326 // but the Secondary will take care of its own alignment needs.
330 if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
331 BackendSize = AlignedSize;
332 ClassId = SizeClassMap::ClassID(BackendSize);
334 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
335 BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
339 BackendSize = NeededSize;
341 BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
343 if (UNLIKELY(!BackendPtr)) {
344 SetAllocatorOutOfMemory();
345 if (AllocatorMayReturnNull())
347 reportOutOfMemory(Size);
350 // If requested, we will zero out the entire contents of the returned chunk.
351 if ((ForceZeroContents || ZeroContents) && ClassId)
352 memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
354 UnpackedHeader Header = {};
355 uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
356 if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
357 // Since the Secondary takes care of alignment, a non-aligned pointer
358 // means it is from the Primary. It is also the only case where the offset
359 // field of the header would be non-zero.
361 const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
362 Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
363 UserPtr = AlignedUserPtr;
365 DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
366 Header.State = ChunkAllocated;
367 Header.AllocType = Type;
369 Header.ClassId = ClassId;
370 Header.SizeOrUnusedBytes = Size;
372 // The secondary fits the allocations to a page, so the amount of unused
373 // bytes is the difference between the end of the user allocation and the
374 // next page boundary.
375 const uptr PageSize = GetPageSizeCached();
376 const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
378 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
380 void *Ptr = reinterpret_cast<void *>(UserPtr);
381 Chunk::storeHeader(Ptr, &Header);
382 if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
383 __sanitizer_malloc_hook(Ptr, Size);
387 // Place a chunk in the quarantine or directly deallocate it in the event of
388 // a zero-sized quarantine, or if the size of the chunk is greater than the
389 // quarantine chunk size threshold.
390 void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
392 const bool BypassQuarantine = (Quarantine.GetCacheSize() == 0) ||
393 (Size > QuarantineChunksUpToSize);
394 if (BypassQuarantine) {
395 Chunk::eraseHeader(Ptr);
396 void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
397 if (Header->ClassId) {
399 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
400 getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
405 getBackend().deallocateSecondary(BackendPtr);
408 // If a small memory amount was allocated with a larger alignment, we want
409 // to take that into account. Otherwise the Quarantine would be filled
410 // with tiny chunks, taking a lot of VA memory. This is an approximation
411 // of the usable size, that allows us to not call
412 // GetActuallyAllocatedSize.
413 const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
414 UnpackedHeader NewHeader = *Header;
415 NewHeader.State = ChunkQuarantine;
416 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
418 ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
419 Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
426 // Deallocates a Chunk, which means either adding it to the quarantine or
427 // directly returning it to the backend if criteria are met.
428 void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
430 // For a deallocation, we only ensure minimal initialization, meaning thread
431 // local data will be left uninitialized for now (when using ELF TLS). The
432 // fallback cache will be used instead. This is a workaround for a situation
433 // where the only heap operation performed in a thread would be a free past
434 // the TLS destructors, ending up in initialized thread specific data never
435 // being destroyed properly. Any other heap operation will do a full init.
436 initThreadMaybe(/*MinimalInit=*/true);
437 if (SCUDO_CAN_USE_HOOKS && &__sanitizer_free_hook)
438 __sanitizer_free_hook(Ptr);
441 if (UNLIKELY(!Chunk::isAligned(Ptr)))
442 dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
443 UnpackedHeader Header;
444 Chunk::loadHeader(Ptr, &Header);
445 if (UNLIKELY(Header.State != ChunkAllocated))
446 dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
447 if (DeallocationTypeMismatch) {
448 // The deallocation type has to match the allocation one.
449 if (Header.AllocType != Type) {
450 // With the exception of memalign'd Chunks, that can be still be free'd.
451 if (Header.AllocType != FromMemalign || Type != FromMalloc)
452 dieWithMessage("allocation type mismatch when deallocating address "
456 const uptr Size = Chunk::getSize(Ptr, &Header);
457 if (DeleteSizeMismatch) {
458 if (DeleteSize && DeleteSize != Size)
459 dieWithMessage("invalid sized delete when deallocating address %p\n",
462 (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches.
463 quarantineOrDeallocateChunk(Ptr, &Header, Size);
466 // Reallocates a chunk. We can save on a new allocation if the new requested
467 // size still fits in the chunk.
468 void *reallocate(void *OldPtr, uptr NewSize) {
470 if (UNLIKELY(!Chunk::isAligned(OldPtr)))
471 dieWithMessage("misaligned address when reallocating address %p\n",
473 UnpackedHeader OldHeader;
474 Chunk::loadHeader(OldPtr, &OldHeader);
475 if (UNLIKELY(OldHeader.State != ChunkAllocated))
476 dieWithMessage("invalid chunk state when reallocating address %p\n",
478 if (DeallocationTypeMismatch) {
479 if (UNLIKELY(OldHeader.AllocType != FromMalloc))
480 dieWithMessage("allocation type mismatch when reallocating address "
483 const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
484 // The new size still fits in the current chunk, and the size difference
486 if (NewSize <= UsableSize &&
487 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
488 UnpackedHeader NewHeader = OldHeader;
489 NewHeader.SizeOrUnusedBytes =
490 OldHeader.ClassId ? NewSize : UsableSize - NewSize;
491 Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
494 // Otherwise, we have to allocate a new chunk and copy the contents of the
496 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
498 const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
499 UsableSize - OldHeader.SizeOrUnusedBytes;
500 memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
501 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
506 // Helper function that returns the actual usable size of a chunk.
507 uptr getUsableSize(const void *Ptr) {
511 UnpackedHeader Header;
512 Chunk::loadHeader(Ptr, &Header);
513 // Getting the usable size of a chunk only makes sense if it's allocated.
514 if (UNLIKELY(Header.State != ChunkAllocated))
515 dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
516 return Chunk::getUsableSize(Ptr, &Header);
519 void *calloc(uptr NMemB, uptr Size) {
521 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) {
522 if (AllocatorMayReturnNull())
524 reportCallocOverflow(NMemB, Size);
526 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
529 void commitBack(ScudoTSD *TSD) {
530 Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
531 Backend.destroyCache(&TSD->Cache);
534 uptr getStats(AllocatorStat StatType) {
536 uptr stats[AllocatorStatCount];
537 Backend.getStats(stats);
538 return stats[StatType];
541 bool canReturnNull() {
543 return AllocatorMayReturnNull();
546 void setRssLimit(uptr LimitMb, bool HardLimit) {
548 HardRssLimitMb = LimitMb;
550 SoftRssLimitMb = LimitMb;
551 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
556 Backend.printStats();
560 NOINLINE void Allocator::performSanityChecks() {
561 // Verify that the header offset field can hold the maximum offset. In the
562 // case of the Secondary allocator, it takes care of alignment and the
563 // offset will always be 0. In the case of the Primary, the worst case
564 // scenario happens in the last size class, when the backend allocation
565 // would already be aligned on the requested alignment, which would happen
566 // to be the maximum alignment that would fit in that size class. As a
567 // result, the maximum offset will be at most the maximum alignment for the
568 // last size class minus the header size, in multiples of MinAlignment.
569 UnpackedHeader Header = {};
570 const uptr MaxPrimaryAlignment =
571 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
572 const uptr MaxOffset =
573 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
574 Header.Offset = MaxOffset;
575 if (Header.Offset != MaxOffset)
576 dieWithMessage("maximum possible offset doesn't fit in header\n");
577 // Verify that we can fit the maximum size or amount of unused bytes in the
578 // header. Given that the Secondary fits the allocation to a page, the worst
579 // case scenario happens in the Primary. It will depend on the second to
580 // last and last class sizes, as well as the dynamic base for the Primary.
581 // The following is an over-approximation that works for our needs.
582 const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
583 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
584 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
585 dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
587 const uptr LargestClassId = SizeClassMap::kLargestClassID;
588 Header.ClassId = LargestClassId;
589 if (Header.ClassId != LargestClassId)
590 dieWithMessage("largest class ID doesn't fit in header\n");
593 // Opportunistic RSS limit check. This will update the RSS limit status, if
594 // it can, every 100ms, otherwise it will just return the current one.
595 NOINLINE bool Allocator::isRssLimitExceeded() {
596 u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
597 const u64 CurrentCheck = MonotonicNanoTime();
598 if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL)))
599 return atomic_load_relaxed(&RssLimitExceeded);
600 if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
601 CurrentCheck, memory_order_relaxed))
602 return atomic_load_relaxed(&RssLimitExceeded);
603 // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
604 // RSS from /proc/self/statm by default. We might want to
605 // call getrusage directly, even if it's less accurate.
606 const uptr CurrentRssMb = GetRSS() >> 20;
607 if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
608 dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
609 HardRssLimitMb, CurrentRssMb);
610 if (SoftRssLimitMb) {
611 if (atomic_load_relaxed(&RssLimitExceeded)) {
612 if (CurrentRssMb <= SoftRssLimitMb)
613 atomic_store_relaxed(&RssLimitExceeded, false);
615 if (CurrentRssMb > SoftRssLimitMb) {
616 atomic_store_relaxed(&RssLimitExceeded, true);
617 Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
618 SoftRssLimitMb, CurrentRssMb);
622 return atomic_load_relaxed(&RssLimitExceeded);
625 static Allocator Instance(LINKER_INITIALIZED);
627 static BackendT &getBackend() {
628 return Instance.Backend;
635 void ScudoTSD::init() {
636 getBackend().initCache(&Cache);
637 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
640 void ScudoTSD::commitBack() {
641 Instance.commitBack(this);
644 void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
645 if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))) {
647 if (Instance.canReturnNull())
649 reportAllocationAlignmentNotPowerOfTwo(Alignment);
651 return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
654 void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
655 Instance.deallocate(Ptr, Size, Alignment, Type);
658 void *scudoRealloc(void *Ptr, uptr Size) {
660 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
662 Instance.deallocate(Ptr, 0, 0, FromMalloc);
665 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
668 void *scudoCalloc(uptr NMemB, uptr Size) {
669 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
672 void *scudoValloc(uptr Size) {
673 return SetErrnoOnNull(
674 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
677 void *scudoPvalloc(uptr Size) {
678 uptr PageSize = GetPageSizeCached();
679 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
681 if (Instance.canReturnNull())
683 reportPvallocOverflow(Size);
685 // pvalloc(0) should allocate one page.
686 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
687 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
690 int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
691 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
692 if (!Instance.canReturnNull())
693 reportInvalidPosixMemalignAlignment(Alignment);
696 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
703 void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
704 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
706 if (Instance.canReturnNull())
708 reportInvalidAlignedAllocAlignment(Size, Alignment);
710 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
713 uptr scudoMallocUsableSize(void *Ptr) {
714 return Instance.getUsableSize(Ptr);
717 } // namespace __scudo
719 using namespace __scudo;
721 // MallocExtension helper functions
723 uptr __sanitizer_get_current_allocated_bytes() {
724 return Instance.getStats(AllocatorStatAllocated);
727 uptr __sanitizer_get_heap_size() {
728 return Instance.getStats(AllocatorStatMapped);
731 uptr __sanitizer_get_free_bytes() {
735 uptr __sanitizer_get_unmapped_bytes() {
739 uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
743 int __sanitizer_get_ownership(const void *Ptr) {
744 return Instance.isValidPointer(Ptr);
747 uptr __sanitizer_get_allocated_size(const void *Ptr) {
748 return Instance.getUsableSize(Ptr);
751 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
752 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
753 void *Ptr, uptr Size) {
758 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr) {
763 // Interface functions
765 void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
766 if (!SCUDO_CAN_USE_PUBLIC_INTERFACE)
768 Instance.setRssLimit(LimitMb, !!HardLimit);
771 void __scudo_print_stats() {
772 Instance.printStats();