1 //===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// Scudo Secondary Allocator.
11 /// This services allocation that are too large to be serviced by the Primary
12 /// Allocator. It is directly backed by the memory mapping functions of the
15 //===----------------------------------------------------------------------===//
17 #ifndef SCUDO_ALLOCATOR_SECONDARY_H_
18 #define SCUDO_ALLOCATOR_SECONDARY_H_
20 #ifndef SCUDO_ALLOCATOR_H_
21 # error "This file must be included inside scudo_allocator.h."
24 // Secondary backed allocations are standalone chunks that contain extra
25 // information stored in a LargeChunk::Header prior to the frontend's header.
27 // The secondary takes care of alignment requirements (so that it can release
28 // unnecessary pages in the rare event of larger alignments), and as such must
29 // know about the frontend's header size.
31 // Since Windows doesn't support partial releasing of a reserved memory region,
32 // we have to keep track of both the reserved and the committed memory.
34 // The resulting chunk resembles the following:
36 // +--------------------+
38 // +--------------------+
40 // +--------------------+
41 // | LargeChunk::Header |
42 // +--------------------+
43 // | {Unp,P}ackedHeader |
44 // +--------------------+
46 // +--------------------+
48 // +--------------------+
50 // +--------------------+
52 namespace LargeChunk {
54 ReservedAddressRange StoredRange;
58 constexpr uptr getHeaderSize() {
59 return RoundUpTo(sizeof(Header), MinAlignment);
61 static Header *getHeader(uptr Ptr) {
62 return reinterpret_cast<Header *>(Ptr - getHeaderSize());
64 static Header *getHeader(const void *Ptr) {
65 return getHeader(reinterpret_cast<uptr>(Ptr));
67 } // namespace LargeChunk
69 class LargeMmapAllocator {
72 internal_memset(this, 0, sizeof(*this));
75 void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
76 const uptr UserSize = Size - Chunk::getHeaderSize();
77 // The Scudo frontend prevents us from allocating more than
78 // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
79 uptr ReservedSize = Size + LargeChunk::getHeaderSize();
80 if (UNLIKELY(Alignment > MinAlignment))
81 ReservedSize += Alignment;
82 const uptr PageSize = GetPageSizeCached();
83 ReservedSize = RoundUpTo(ReservedSize, PageSize);
84 // Account for 2 guard pages, one before and one after the chunk.
85 ReservedSize += 2 * PageSize;
87 ReservedAddressRange AddressRange;
88 uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName);
89 if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0)))
91 // A page-aligned pointer is assumed after that, so check it now.
92 DCHECK(IsAligned(ReservedBeg, PageSize));
93 uptr ReservedEnd = ReservedBeg + ReservedSize;
94 // The beginning of the user area for that allocation comes after the
95 // initial guard page, and both headers. This is the pointer that has to
96 // abide by alignment requirements.
97 uptr CommittedBeg = ReservedBeg + PageSize;
98 uptr UserBeg = CommittedBeg + HeadersSize;
99 uptr UserEnd = UserBeg + UserSize;
100 uptr CommittedEnd = RoundUpTo(UserEnd, PageSize);
102 // In the rare event of larger alignments, we will attempt to fit the mmap
103 // area better and unmap extraneous memory. This will also ensure that the
104 // offset and unused bytes field of the header stay small.
105 if (UNLIKELY(Alignment > MinAlignment)) {
106 if (!IsAligned(UserBeg, Alignment)) {
107 UserBeg = RoundUpTo(UserBeg, Alignment);
108 CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize);
109 const uptr NewReservedBeg = CommittedBeg - PageSize;
110 DCHECK_GE(NewReservedBeg, ReservedBeg);
111 if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) {
112 AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg);
113 ReservedBeg = NewReservedBeg;
115 UserEnd = UserBeg + UserSize;
116 CommittedEnd = RoundUpTo(UserEnd, PageSize);
118 const uptr NewReservedEnd = CommittedEnd + PageSize;
119 DCHECK_LE(NewReservedEnd, ReservedEnd);
120 if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) {
121 AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd);
122 ReservedEnd = NewReservedEnd;
126 DCHECK_LE(UserEnd, CommittedEnd);
127 const uptr CommittedSize = CommittedEnd - CommittedBeg;
128 // Actually mmap the memory, preserving the guard pages on either sides.
129 CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize));
130 const uptr Ptr = UserBeg - Chunk::getHeaderSize();
131 LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
132 H->StoredRange = AddressRange;
133 H->Size = CommittedEnd - Ptr;
134 H->CommittedSize = CommittedSize;
136 // The primary adds the whole class size to the stats when allocating a
137 // chunk, so we will do something similar here. But we will not account for
140 SpinMutexLock l(&StatsMutex);
141 Stats->Add(AllocatorStatAllocated, CommittedSize);
142 Stats->Add(AllocatorStatMapped, CommittedSize);
143 AllocatedBytes += CommittedSize;
144 if (LargestSize < CommittedSize)
145 LargestSize = CommittedSize;
149 return reinterpret_cast<void *>(Ptr);
152 void Deallocate(AllocatorStats *Stats, void *Ptr) {
153 LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
154 // Since we're unmapping the entirety of where the ReservedAddressRange
155 // actually is, copy onto the stack.
156 ReservedAddressRange AddressRange = H->StoredRange;
157 const uptr Size = H->CommittedSize;
159 SpinMutexLock l(&StatsMutex);
160 Stats->Sub(AllocatorStatAllocated, Size);
161 Stats->Sub(AllocatorStatMapped, Size);
165 AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
166 AddressRange.size());
169 static uptr GetActuallyAllocatedSize(void *Ptr) {
170 return LargeChunk::getHeader(Ptr)->Size;
174 Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), "
175 "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n",
176 NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
177 FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
178 (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
182 static constexpr uptr HeadersSize =
183 LargeChunk::getHeaderSize() + Chunk::getHeaderSize();
185 StaticSpinMutex StatsMutex;
193 #endif // SCUDO_ALLOCATOR_SECONDARY_H_