1 //===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// Scudo Secondary Allocator.
10 /// This services allocation that are too large to be serviced by the Primary
11 /// Allocator. It is directly backed by the memory mapping functions of the
14 //===----------------------------------------------------------------------===//
16 #ifndef SCUDO_ALLOCATOR_SECONDARY_H_
17 #define SCUDO_ALLOCATOR_SECONDARY_H_
19 #ifndef SCUDO_ALLOCATOR_H_
20 # error "This file must be included inside scudo_allocator.h."
23 // Secondary backed allocations are standalone chunks that contain extra
24 // information stored in a LargeChunk::Header prior to the frontend's header.
26 // The secondary takes care of alignment requirements (so that it can release
27 // unnecessary pages in the rare event of larger alignments), and as such must
28 // know about the frontend's header size.
30 // Since Windows doesn't support partial releasing of a reserved memory region,
31 // we have to keep track of both the reserved and the committed memory.
33 // The resulting chunk resembles the following:
35 // +--------------------+
37 // +--------------------+
39 // +--------------------+
40 // | LargeChunk::Header |
41 // +--------------------+
42 // | {Unp,P}ackedHeader |
43 // +--------------------+
45 // +--------------------+
47 // +--------------------+
49 // +--------------------+
51 namespace LargeChunk {
53 ReservedAddressRange StoredRange;
57 constexpr uptr getHeaderSize() {
58 return RoundUpTo(sizeof(Header), MinAlignment);
60 static Header *getHeader(uptr Ptr) {
61 return reinterpret_cast<Header *>(Ptr - getHeaderSize());
63 static Header *getHeader(const void *Ptr) {
64 return getHeader(reinterpret_cast<uptr>(Ptr));
66 } // namespace LargeChunk
68 class LargeMmapAllocator {
71 internal_memset(this, 0, sizeof(*this));
74 void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
75 const uptr UserSize = Size - Chunk::getHeaderSize();
76 // The Scudo frontend prevents us from allocating more than
77 // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
78 uptr ReservedSize = Size + LargeChunk::getHeaderSize();
79 if (UNLIKELY(Alignment > MinAlignment))
80 ReservedSize += Alignment;
81 const uptr PageSize = GetPageSizeCached();
82 ReservedSize = RoundUpTo(ReservedSize, PageSize);
83 // Account for 2 guard pages, one before and one after the chunk.
84 ReservedSize += 2 * PageSize;
86 ReservedAddressRange AddressRange;
87 uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName);
88 if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0)))
90 // A page-aligned pointer is assumed after that, so check it now.
91 DCHECK(IsAligned(ReservedBeg, PageSize));
92 uptr ReservedEnd = ReservedBeg + ReservedSize;
93 // The beginning of the user area for that allocation comes after the
94 // initial guard page, and both headers. This is the pointer that has to
95 // abide by alignment requirements.
96 uptr CommittedBeg = ReservedBeg + PageSize;
97 uptr UserBeg = CommittedBeg + HeadersSize;
98 uptr UserEnd = UserBeg + UserSize;
99 uptr CommittedEnd = RoundUpTo(UserEnd, PageSize);
101 // In the rare event of larger alignments, we will attempt to fit the mmap
102 // area better and unmap extraneous memory. This will also ensure that the
103 // offset and unused bytes field of the header stay small.
104 if (UNLIKELY(Alignment > MinAlignment)) {
105 if (!IsAligned(UserBeg, Alignment)) {
106 UserBeg = RoundUpTo(UserBeg, Alignment);
107 CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize);
108 const uptr NewReservedBeg = CommittedBeg - PageSize;
109 DCHECK_GE(NewReservedBeg, ReservedBeg);
110 if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) {
111 AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg);
112 ReservedBeg = NewReservedBeg;
114 UserEnd = UserBeg + UserSize;
115 CommittedEnd = RoundUpTo(UserEnd, PageSize);
117 const uptr NewReservedEnd = CommittedEnd + PageSize;
118 DCHECK_LE(NewReservedEnd, ReservedEnd);
119 if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) {
120 AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd);
121 ReservedEnd = NewReservedEnd;
125 DCHECK_LE(UserEnd, CommittedEnd);
126 const uptr CommittedSize = CommittedEnd - CommittedBeg;
127 // Actually mmap the memory, preserving the guard pages on either sides.
128 CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize));
129 const uptr Ptr = UserBeg - Chunk::getHeaderSize();
130 LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
131 H->StoredRange = AddressRange;
132 H->Size = CommittedEnd - Ptr;
133 H->CommittedSize = CommittedSize;
135 // The primary adds the whole class size to the stats when allocating a
136 // chunk, so we will do something similar here. But we will not account for
139 SpinMutexLock l(&StatsMutex);
140 Stats->Add(AllocatorStatAllocated, CommittedSize);
141 Stats->Add(AllocatorStatMapped, CommittedSize);
142 AllocatedBytes += CommittedSize;
143 if (LargestSize < CommittedSize)
144 LargestSize = CommittedSize;
148 return reinterpret_cast<void *>(Ptr);
151 void Deallocate(AllocatorStats *Stats, void *Ptr) {
152 LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
153 // Since we're unmapping the entirety of where the ReservedAddressRange
154 // actually is, copy onto the stack.
155 ReservedAddressRange AddressRange = H->StoredRange;
156 const uptr Size = H->CommittedSize;
158 SpinMutexLock l(&StatsMutex);
159 Stats->Sub(AllocatorStatAllocated, Size);
160 Stats->Sub(AllocatorStatMapped, Size);
164 AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
165 AddressRange.size());
168 static uptr GetActuallyAllocatedSize(void *Ptr) {
169 return LargeChunk::getHeader(Ptr)->Size;
173 Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), "
174 "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n",
175 NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
176 FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
177 (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
181 static constexpr uptr HeadersSize =
182 LargeChunk::getHeaderSize() + Chunk::getHeaderSize();
184 StaticSpinMutex StatsMutex;
192 #endif // SCUDO_ALLOCATOR_SECONDARY_H_