1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "gwp_asan/guarded_pool_allocator.h"
11 #include "gwp_asan/options.h"
13 // RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this
14 // macro is defined before including <inttypes.h>.
15 #ifndef __STDC_FORMAT_MACROS
16 #define __STDC_FORMAT_MACROS 1
26 using AllocationMetadata = gwp_asan::GuardedPoolAllocator::AllocationMetadata;
27 using Error = gwp_asan::GuardedPoolAllocator::Error;
31 // Forward declare the pointer to the singleton version of this class.
32 // Instantiated during initialisation, this allows the signal handler
33 // to find this class in order to deduce the root cause of failures. Must not be
34 // referenced by users outside this translation unit, in order to avoid
36 GuardedPoolAllocator *SingletonPtr = nullptr;
40 ScopedBoolean(bool &B) : Bool(B) { Bool = true; }
41 ~ScopedBoolean() { Bool = false; }
47 void defaultPrintStackTrace(uintptr_t *Trace, options::Printf_t Printf) {
49 Printf(" <unknown (does your allocator support backtracing?)>\n");
51 for (size_t i = 0; Trace[i] != 0; ++i) {
52 Printf(" #%zu 0x%zx in <unknown>\n", i, Trace[i]);
56 } // anonymous namespace
58 // Gets the singleton implementation of this class. Thread-compatible until
59 // init() is called, thread-safe afterwards.
60 GuardedPoolAllocator *getSingleton() { return SingletonPtr; }
62 void GuardedPoolAllocator::AllocationMetadata::RecordAllocation(
63 uintptr_t AllocAddr, size_t AllocSize, options::Backtrace_t Backtrace) {
66 IsDeallocated = false;
68 // TODO(hctim): Ask the caller to provide the thread ID, so we don't waste
69 // other thread's time getting the thread ID under lock.
70 AllocationTrace.ThreadID = getThreadID();
71 DeallocationTrace.ThreadID = kInvalidThreadID;
73 Backtrace(AllocationTrace.Trace, kMaximumStackFrames);
75 AllocationTrace.Trace[0] = 0;
76 DeallocationTrace.Trace[0] = 0;
79 void GuardedPoolAllocator::AllocationMetadata::RecordDeallocation(
80 options::Backtrace_t Backtrace) {
82 // Ensure that the unwinder is not called if the recursive flag is set,
83 // otherwise non-reentrant unwinders may deadlock.
84 if (Backtrace && !ThreadLocals.RecursiveGuard) {
85 ScopedBoolean B(ThreadLocals.RecursiveGuard);
86 Backtrace(DeallocationTrace.Trace, kMaximumStackFrames);
88 DeallocationTrace.Trace[0] = 0;
90 DeallocationTrace.ThreadID = getThreadID();
93 void GuardedPoolAllocator::init(const options::Options &Opts) {
94 // Note: We return from the constructor here if GWP-ASan is not available.
95 // This will stop heap-allocation of class members, as well as mmap() of the
97 if (!Opts.Enabled || Opts.SampleRate == 0 ||
98 Opts.MaxSimultaneousAllocations == 0)
101 // TODO(hctim): Add a death unit test for this.
103 (*SingletonPtr->Printf)(
104 "GWP-ASan Error: init() has already been called.\n");
108 if (Opts.SampleRate < 0) {
109 Opts.Printf("GWP-ASan Error: SampleRate is < 0.\n");
113 if (Opts.SampleRate > INT32_MAX) {
114 Opts.Printf("GWP-ASan Error: SampleRate is > 2^31.\n");
118 if (Opts.MaxSimultaneousAllocations < 0) {
119 Opts.Printf("GWP-ASan Error: MaxSimultaneousAllocations is < 0.\n");
125 MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
127 PageSize = getPlatformPageSize();
129 PerfectlyRightAlign = Opts.PerfectlyRightAlign;
130 Printf = Opts.Printf;
131 Backtrace = Opts.Backtrace;
132 if (Opts.PrintBacktrace)
133 PrintBacktrace = Opts.PrintBacktrace;
135 PrintBacktrace = defaultPrintStackTrace;
137 size_t PoolBytesRequired =
138 PageSize * (1 + MaxSimultaneousAllocations) +
139 MaxSimultaneousAllocations * maximumAllocationSize();
140 void *GuardedPoolMemory = mapMemory(PoolBytesRequired);
142 size_t BytesRequired = MaxSimultaneousAllocations * sizeof(*Metadata);
143 Metadata = reinterpret_cast<AllocationMetadata *>(mapMemory(BytesRequired));
144 markReadWrite(Metadata, BytesRequired);
146 // Allocate memory and set up the free pages queue.
147 BytesRequired = MaxSimultaneousAllocations * sizeof(*FreeSlots);
148 FreeSlots = reinterpret_cast<size_t *>(mapMemory(BytesRequired));
149 markReadWrite(FreeSlots, BytesRequired);
151 // Multiply the sample rate by 2 to give a good, fast approximation for (1 /
152 // SampleRate) chance of sampling.
153 if (Opts.SampleRate != 1)
154 AdjustedSampleRate = static_cast<uint32_t>(Opts.SampleRate) * 2;
156 AdjustedSampleRate = 1;
158 GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory);
160 reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired;
162 // Ensure that signal handlers are installed as late as possible, as the class
163 // is not thread-safe until init() is finished, and thus a SIGSEGV may cause a
164 // race to members if recieved during init().
165 if (Opts.InstallSignalHandlers)
166 installSignalHandlers();
169 void *GuardedPoolAllocator::allocate(size_t Size) {
170 // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
171 // back to the supporting allocator.
172 if (GuardedPagePoolEnd == 0)
175 // Protect against recursivity.
176 if (ThreadLocals.RecursiveGuard)
178 ScopedBoolean SB(ThreadLocals.RecursiveGuard);
180 if (Size == 0 || Size > maximumAllocationSize())
185 ScopedLock L(PoolMutex);
186 Index = reserveSlot();
189 if (Index == kInvalidSlotID)
192 uintptr_t Ptr = slotToAddr(Index);
193 Ptr += allocationSlotOffset(Size);
194 AllocationMetadata *Meta = addrToMetadata(Ptr);
196 // If a slot is multiple pages in size, and the allocation takes up a single
197 // page, we can improve overflow detection by leaving the unused pages as
199 markReadWrite(reinterpret_cast<void *>(getPageAddr(Ptr)), Size);
201 Meta->RecordAllocation(Ptr, Size, Backtrace);
203 return reinterpret_cast<void *>(Ptr);
206 void GuardedPoolAllocator::deallocate(void *Ptr) {
207 assert(pointerIsMine(Ptr) && "Pointer is not mine!");
208 uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr);
209 uintptr_t SlotStart = slotToAddr(addrToSlot(UPtr));
210 AllocationMetadata *Meta = addrToMetadata(UPtr);
211 if (Meta->Addr != UPtr) {
212 reportError(UPtr, Error::INVALID_FREE);
216 // Intentionally scope the mutex here, so that other threads can access the
217 // pool during the expensive markInaccessible() call.
219 ScopedLock L(PoolMutex);
220 if (Meta->IsDeallocated) {
221 reportError(UPtr, Error::DOUBLE_FREE);
225 // Ensure that the deallocation is recorded before marking the page as
226 // inaccessible. Otherwise, a racy use-after-free will have inconsistent
228 Meta->RecordDeallocation(Backtrace);
231 markInaccessible(reinterpret_cast<void *>(SlotStart),
232 maximumAllocationSize());
234 // And finally, lock again to release the slot back into the pool.
235 ScopedLock L(PoolMutex);
236 freeSlot(addrToSlot(UPtr));
239 size_t GuardedPoolAllocator::getSize(const void *Ptr) {
240 assert(pointerIsMine(Ptr));
241 ScopedLock L(PoolMutex);
242 AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
243 assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
247 size_t GuardedPoolAllocator::maximumAllocationSize() const { return PageSize; }
249 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {
250 return &Metadata[addrToSlot(Ptr)];
253 size_t GuardedPoolAllocator::addrToSlot(uintptr_t Ptr) const {
254 assert(pointerIsMine(reinterpret_cast<void *>(Ptr)));
255 size_t ByteOffsetFromPoolStart = Ptr - GuardedPagePool;
256 return ByteOffsetFromPoolStart / (maximumAllocationSize() + PageSize);
259 uintptr_t GuardedPoolAllocator::slotToAddr(size_t N) const {
260 return GuardedPagePool + (PageSize * (1 + N)) + (maximumAllocationSize() * N);
263 uintptr_t GuardedPoolAllocator::getPageAddr(uintptr_t Ptr) const {
264 assert(pointerIsMine(reinterpret_cast<void *>(Ptr)));
265 return Ptr & ~(static_cast<uintptr_t>(PageSize) - 1);
268 bool GuardedPoolAllocator::isGuardPage(uintptr_t Ptr) const {
269 assert(pointerIsMine(reinterpret_cast<void *>(Ptr)));
270 size_t PageOffsetFromPoolStart = (Ptr - GuardedPagePool) / PageSize;
271 size_t PagesPerSlot = maximumAllocationSize() / PageSize;
272 return (PageOffsetFromPoolStart % (PagesPerSlot + 1)) == 0;
275 size_t GuardedPoolAllocator::reserveSlot() {
276 // Avoid potential reuse of a slot before we have made at least a single
277 // allocation in each slot. Helps with our use-after-free detection.
278 if (NumSampledAllocations < MaxSimultaneousAllocations)
279 return NumSampledAllocations++;
281 if (FreeSlotsLength == 0)
282 return kInvalidSlotID;
284 size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength;
285 size_t SlotIndex = FreeSlots[ReservedIndex];
286 FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength];
290 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) {
291 assert(FreeSlotsLength < MaxSimultaneousAllocations);
292 FreeSlots[FreeSlotsLength++] = SlotIndex;
295 uintptr_t GuardedPoolAllocator::allocationSlotOffset(size_t Size) const {
298 bool ShouldRightAlign = getRandomUnsigned32() % 2 == 0;
299 if (!ShouldRightAlign)
302 uintptr_t Offset = maximumAllocationSize();
303 if (!PerfectlyRightAlign) {
306 else if (Size > 4 && Size <= 8)
308 else if (Size > 8 && (Size % 16) != 0)
309 Size += 16 - (Size % 16);
315 void GuardedPoolAllocator::reportError(uintptr_t AccessPtr, Error E) {
317 SingletonPtr->reportErrorInternal(AccessPtr, E);
320 size_t GuardedPoolAllocator::getNearestSlot(uintptr_t Ptr) const {
321 if (Ptr <= GuardedPagePool + PageSize)
323 if (Ptr > GuardedPagePoolEnd - PageSize)
324 return MaxSimultaneousAllocations - 1;
326 if (!isGuardPage(Ptr))
327 return addrToSlot(Ptr);
329 if (Ptr % PageSize <= PageSize / 2)
330 return addrToSlot(Ptr - PageSize); // Round down.
331 return addrToSlot(Ptr + PageSize); // Round up.
334 Error GuardedPoolAllocator::diagnoseUnknownError(uintptr_t AccessPtr,
335 AllocationMetadata **Meta) {
336 // Let's try and figure out what the source of this error is.
337 if (isGuardPage(AccessPtr)) {
338 size_t Slot = getNearestSlot(AccessPtr);
339 AllocationMetadata *SlotMeta = addrToMetadata(slotToAddr(Slot));
341 // Ensure that this slot was allocated once upon a time.
343 return Error::UNKNOWN;
346 if (SlotMeta->Addr < AccessPtr)
347 return Error::BUFFER_OVERFLOW;
348 return Error::BUFFER_UNDERFLOW;
351 // Access wasn't a guard page, check for use-after-free.
352 AllocationMetadata *SlotMeta = addrToMetadata(AccessPtr);
353 if (SlotMeta->IsDeallocated) {
355 return Error::USE_AFTER_FREE;
358 // If we have reached here, the error is still unknown. There is no metadata
361 return Error::UNKNOWN;
365 // Prints the provided error and metadata information.
366 void printErrorType(Error E, uintptr_t AccessPtr, AllocationMetadata *Meta,
367 options::Printf_t Printf, uint64_t ThreadID) {
368 // Print using intermediate strings. Platforms like Android don't like when
369 // you print multiple times to the same line, as there may be a newline
370 // appended to a log file automatically per Printf() call.
371 const char *ErrorString;
374 ErrorString = "GWP-ASan couldn't automatically determine the source of "
375 "the memory error. It was likely caused by a wild memory "
376 "access into the GWP-ASan pool. The error occured";
378 case Error::USE_AFTER_FREE:
379 ErrorString = "Use after free";
381 case Error::DOUBLE_FREE:
382 ErrorString = "Double free";
384 case Error::INVALID_FREE:
385 ErrorString = "Invalid (wild) free";
387 case Error::BUFFER_OVERFLOW:
388 ErrorString = "Buffer overflow";
390 case Error::BUFFER_UNDERFLOW:
391 ErrorString = "Buffer underflow";
395 constexpr size_t kDescriptionBufferLen = 128;
396 char DescriptionBuffer[kDescriptionBufferLen];
398 if (E == Error::USE_AFTER_FREE) {
399 snprintf(DescriptionBuffer, kDescriptionBufferLen,
400 "(%zu byte%s into a %zu-byte allocation at 0x%zx)",
401 AccessPtr - Meta->Addr, (AccessPtr - Meta->Addr == 1) ? "" : "s",
402 Meta->Size, Meta->Addr);
403 } else if (AccessPtr < Meta->Addr) {
404 snprintf(DescriptionBuffer, kDescriptionBufferLen,
405 "(%zu byte%s to the left of a %zu-byte allocation at 0x%zx)",
406 Meta->Addr - AccessPtr, (Meta->Addr - AccessPtr == 1) ? "" : "s",
407 Meta->Size, Meta->Addr);
408 } else if (AccessPtr > Meta->Addr) {
409 snprintf(DescriptionBuffer, kDescriptionBufferLen,
410 "(%zu byte%s to the right of a %zu-byte allocation at 0x%zx)",
411 AccessPtr - Meta->Addr, (AccessPtr - Meta->Addr == 1) ? "" : "s",
412 Meta->Size, Meta->Addr);
414 snprintf(DescriptionBuffer, kDescriptionBufferLen,
415 "(a %zu-byte allocation)", Meta->Size);
419 // Possible number of digits of a 64-bit number: ceil(log10(2^64)) == 20. Add
420 // a null terminator, and round to the nearest 8-byte boundary.
421 constexpr size_t kThreadBufferLen = 24;
422 char ThreadBuffer[kThreadBufferLen];
423 if (ThreadID == GuardedPoolAllocator::kInvalidThreadID)
424 snprintf(ThreadBuffer, kThreadBufferLen, "<unknown>");
426 snprintf(ThreadBuffer, kThreadBufferLen, "%" PRIu64, ThreadID);
428 Printf("%s at 0x%zx %s by thread %s here:\n", ErrorString, AccessPtr,
429 DescriptionBuffer, ThreadBuffer);
432 void printAllocDeallocTraces(uintptr_t AccessPtr, AllocationMetadata *Meta,
433 options::Printf_t Printf,
434 options::PrintBacktrace_t PrintBacktrace) {
435 assert(Meta != nullptr && "Metadata is non-null for printAllocDeallocTraces");
437 if (Meta->IsDeallocated) {
438 if (Meta->DeallocationTrace.ThreadID ==
439 GuardedPoolAllocator::kInvalidThreadID)
440 Printf("0x%zx was deallocated by thread <unknown> here:\n", AccessPtr);
442 Printf("0x%zx was deallocated by thread %zu here:\n", AccessPtr,
443 Meta->DeallocationTrace.ThreadID);
445 PrintBacktrace(Meta->DeallocationTrace.Trace, Printf);
448 if (Meta->AllocationTrace.ThreadID == GuardedPoolAllocator::kInvalidThreadID)
449 Printf("0x%zx was allocated by thread <unknown> here:\n", Meta->Addr);
451 Printf("0x%zx was allocated by thread %zu here:\n", Meta->Addr,
452 Meta->AllocationTrace.ThreadID);
454 PrintBacktrace(Meta->AllocationTrace.Trace, Printf);
457 struct ScopedEndOfReportDecorator {
458 ScopedEndOfReportDecorator(options::Printf_t Printf) : Printf(Printf) {}
459 ~ScopedEndOfReportDecorator() { Printf("*** End GWP-ASan report ***\n"); }
460 options::Printf_t Printf;
462 } // anonymous namespace
464 void GuardedPoolAllocator::reportErrorInternal(uintptr_t AccessPtr, Error E) {
465 if (!pointerIsMine(reinterpret_cast<void *>(AccessPtr))) {
469 // Attempt to prevent races to re-use the same slot that triggered this error.
470 // This does not guarantee that there are no races, because another thread can
471 // take the locks during the time that the signal handler is being called.
473 ThreadLocals.RecursiveGuard = true;
475 Printf("*** GWP-ASan detected a memory error ***\n");
476 ScopedEndOfReportDecorator Decorator(Printf);
478 AllocationMetadata *Meta = nullptr;
480 if (E == Error::UNKNOWN) {
481 E = diagnoseUnknownError(AccessPtr, &Meta);
483 size_t Slot = getNearestSlot(AccessPtr);
484 Meta = addrToMetadata(slotToAddr(Slot));
485 // Ensure that this slot has been previously allocated.
490 // Print the error information.
491 uint64_t ThreadID = getThreadID();
492 printErrorType(E, AccessPtr, Meta, Printf, ThreadID);
494 static constexpr unsigned kMaximumStackFramesForCrashTrace = 128;
495 uintptr_t Trace[kMaximumStackFramesForCrashTrace];
496 Backtrace(Trace, kMaximumStackFramesForCrashTrace);
498 PrintBacktrace(Trace, Printf);
500 Printf(" <unknown (does your allocator support backtracing?)>\n\n");
504 printAllocDeallocTraces(AccessPtr, Meta, Printf, PrintBacktrace);
508 GuardedPoolAllocator::ThreadLocalPackedVariables
509 GuardedPoolAllocator::ThreadLocals;
510 } // namespace gwp_asan