1 //===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// Scudo Secondary Allocator.
11 /// This services allocation that are too large to be serviced by the Primary
12 /// Allocator. It is directly backed by the memory mapping functions of the
15 //===----------------------------------------------------------------------===//
17 #ifndef SCUDO_ALLOCATOR_SECONDARY_H_
18 #define SCUDO_ALLOCATOR_SECONDARY_H_
20 #ifndef SCUDO_ALLOCATOR_H_
21 # error "This file must be included inside scudo_allocator.h."
24 class ScudoLargeMmapAllocator {
27 void Init(bool AllocatorMayReturnNull) {
28 PageSize = GetPageSizeCached();
29 atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_relaxed);
32 void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
33 // The Scudo frontend prevents us from allocating more than
34 // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
35 uptr MapSize = Size + SecondaryHeaderSize;
36 MapSize = RoundUpTo(MapSize, PageSize);
37 // Account for 2 guard pages, one before and one after the chunk.
38 MapSize += 2 * PageSize;
39 // The size passed to the Secondary comprises the alignment, if large
40 // enough. Subtract it here to get the requested size, including header.
41 if (Alignment > MinAlignment)
44 uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
45 if (MapBeg == ~static_cast<uptr>(0))
46 return ReturnNullOrDieOnOOM();
47 // A page-aligned pointer is assumed after that, so check it now.
48 CHECK(IsAligned(MapBeg, PageSize));
49 uptr MapEnd = MapBeg + MapSize;
50 // The beginning of the user area for that allocation comes after the
51 // initial guard page, and both headers. This is the pointer that has to
52 // abide by alignment requirements.
53 uptr UserBeg = MapBeg + PageSize + HeadersSize;
55 // In the rare event of larger alignments, we will attempt to fit the mmap
56 // area better and unmap extraneous memory. This will also ensure that the
57 // offset and unused bytes field of the header stay small.
58 if (Alignment > MinAlignment) {
59 if (UserBeg & (Alignment - 1))
60 UserBeg += Alignment - (UserBeg & (Alignment - 1));
61 CHECK_GE(UserBeg, MapBeg);
62 uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) - PageSize;
63 CHECK_GE(NewMapBeg, MapBeg);
64 uptr NewMapEnd = RoundUpTo(UserBeg + (Size - AlignedChunkHeaderSize),
66 CHECK_LE(NewMapEnd, MapEnd);
67 // Unmap the extra memory if it's large enough, on both sides.
68 uptr Diff = NewMapBeg - MapBeg;
70 UnmapOrDie(reinterpret_cast<void *>(MapBeg), Diff);
71 Diff = MapEnd - NewMapEnd;
73 UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
76 MapSize = NewMapEnd - NewMapBeg;
79 uptr UserEnd = UserBeg + (Size - AlignedChunkHeaderSize);
80 CHECK_LE(UserEnd, MapEnd - PageSize);
81 // Actually mmap the memory, preserving the guard pages on either side.
82 CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
83 MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize)));
84 uptr Ptr = UserBeg - AlignedChunkHeaderSize;
85 SecondaryHeader *Header = getHeader(Ptr);
86 Header->MapBeg = MapBeg;
87 Header->MapSize = MapSize;
88 // The primary adds the whole class size to the stats when allocating a
89 // chunk, so we will do something similar here. But we will not account for
91 Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
92 Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
94 return reinterpret_cast<void *>(UserBeg);
97 void *ReturnNullOrDieOnBadRequest() {
98 if (atomic_load(&MayReturnNull, memory_order_acquire))
100 ReportAllocatorCannotReturnNull(false);
103 void *ReturnNullOrDieOnOOM() {
104 if (atomic_load(&MayReturnNull, memory_order_acquire))
106 ReportAllocatorCannotReturnNull(true);
109 void SetMayReturnNull(bool AllocatorMayReturnNull) {
110 atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_release);
113 void Deallocate(AllocatorStats *Stats, void *Ptr) {
114 SecondaryHeader *Header = getHeader(Ptr);
115 Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize);
116 Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize);
117 UnmapOrDie(reinterpret_cast<void *>(Header->MapBeg), Header->MapSize);
120 uptr TotalMemoryUsed() {
124 bool PointerIsMine(const void *Ptr) {
128 uptr GetActuallyAllocatedSize(void *Ptr) {
129 SecondaryHeader *Header = getHeader(Ptr);
130 // Deduct PageSize as MapEnd includes the trailing guard page.
131 uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize;
132 return MapEnd - reinterpret_cast<uptr>(Ptr);
135 void *GetMetaData(const void *Ptr) {
139 void *GetBlockBegin(const void *Ptr) {
143 void *GetBlockBeginFastLocked(void *Ptr) {
159 void ForEachChunk(ForEachChunkCallback Callback, void *Arg) {
164 // A Secondary allocated chunk header contains the base of the mapping and
165 // its size. Currently, the base is always a page before the header, but
166 // we might want to extend that number in the future based on the size of
168 struct SecondaryHeader {
172 // Check that sizeof(SecondaryHeader) is a multiple of MinAlignment.
173 COMPILER_CHECK((sizeof(SecondaryHeader) & (MinAlignment - 1)) == 0);
175 SecondaryHeader *getHeader(uptr Ptr) {
176 return reinterpret_cast<SecondaryHeader*>(Ptr - sizeof(SecondaryHeader));
178 SecondaryHeader *getHeader(const void *Ptr) {
179 return getHeader(reinterpret_cast<uptr>(Ptr));
182 const uptr SecondaryHeaderSize = sizeof(SecondaryHeader);
183 const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
185 atomic_uint8_t MayReturnNull;
188 #endif // SCUDO_ALLOCATOR_SECONDARY_H_