1 //===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// Scudo Secondary Allocator.
11 /// This services allocation that are too large to be serviced by the Primary
12 /// Allocator. It is directly backed by the memory mapping functions of the
15 //===----------------------------------------------------------------------===//
17 #ifndef SCUDO_ALLOCATOR_SECONDARY_H_
18 #define SCUDO_ALLOCATOR_SECONDARY_H_
20 #ifndef SCUDO_ALLOCATOR_H_
21 # error "This file must be included inside scudo_allocator.h."
24 class ScudoLargeMmapAllocator {
28 PageSize = GetPageSizeCached();
31 void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
32 uptr UserSize = Size - AlignedChunkHeaderSize;
33 // The Scudo frontend prevents us from allocating more than
34 // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
35 uptr MapSize = Size + SecondaryHeaderSize;
36 if (Alignment > MinAlignment)
38 MapSize = RoundUpTo(MapSize, PageSize);
39 // Account for 2 guard pages, one before and one after the chunk.
40 MapSize += 2 * PageSize;
42 uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
43 if (MapBeg == ~static_cast<uptr>(0))
44 return ReturnNullOrDieOnFailure::OnOOM();
45 // A page-aligned pointer is assumed after that, so check it now.
46 CHECK(IsAligned(MapBeg, PageSize));
47 uptr MapEnd = MapBeg + MapSize;
48 // The beginning of the user area for that allocation comes after the
49 // initial guard page, and both headers. This is the pointer that has to
50 // abide by alignment requirements.
51 uptr UserBeg = MapBeg + PageSize + HeadersSize;
52 uptr UserEnd = UserBeg + UserSize;
54 // In the rare event of larger alignments, we will attempt to fit the mmap
55 // area better and unmap extraneous memory. This will also ensure that the
56 // offset and unused bytes field of the header stay small.
57 if (Alignment > MinAlignment) {
58 if (!IsAligned(UserBeg, Alignment)) {
59 UserBeg = RoundUpTo(UserBeg, Alignment);
60 CHECK_GE(UserBeg, MapBeg);
61 uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) -
63 CHECK_GE(NewMapBeg, MapBeg);
64 if (NewMapBeg != MapBeg) {
65 UnmapOrDie(reinterpret_cast<void *>(MapBeg), NewMapBeg - MapBeg);
68 UserEnd = UserBeg + UserSize;
70 uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize;
71 if (NewMapEnd != MapEnd) {
72 UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd);
75 MapSize = MapEnd - MapBeg;
78 CHECK_LE(UserEnd, MapEnd - PageSize);
79 // Actually mmap the memory, preserving the guard pages on either side.
80 CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
81 MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize)));
82 uptr Ptr = UserBeg - AlignedChunkHeaderSize;
83 SecondaryHeader *Header = getHeader(Ptr);
84 Header->MapBeg = MapBeg;
85 Header->MapSize = MapSize;
86 // The primary adds the whole class size to the stats when allocating a
87 // chunk, so we will do something similar here. But we will not account for
90 SpinMutexLock l(&StatsMutex);
91 Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
92 Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
95 return reinterpret_cast<void *>(Ptr);
98 void Deallocate(AllocatorStats *Stats, void *Ptr) {
99 SecondaryHeader *Header = getHeader(Ptr);
101 SpinMutexLock l(&StatsMutex);
102 Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize);
103 Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize);
105 UnmapOrDie(reinterpret_cast<void *>(Header->MapBeg), Header->MapSize);
108 uptr GetActuallyAllocatedSize(void *Ptr) {
109 SecondaryHeader *Header = getHeader(Ptr);
110 // Deduct PageSize as MapSize includes the trailing guard page.
111 uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize;
112 return MapEnd - reinterpret_cast<uptr>(Ptr);
116 // A Secondary allocated chunk header contains the base of the mapping and
117 // its size, which comprises the guard pages.
118 struct SecondaryHeader {
122 // Check that sizeof(SecondaryHeader) is a multiple of MinAlignment.
123 COMPILER_CHECK((sizeof(SecondaryHeader) & (MinAlignment - 1)) == 0);
125 SecondaryHeader *getHeader(uptr Ptr) {
126 return reinterpret_cast<SecondaryHeader*>(Ptr - sizeof(SecondaryHeader));
128 SecondaryHeader *getHeader(const void *Ptr) {
129 return getHeader(reinterpret_cast<uptr>(Ptr));
132 const uptr SecondaryHeaderSize = sizeof(SecondaryHeader);
133 const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
135 SpinMutex StatsMutex;
138 #endif // SCUDO_ALLOCATOR_SECONDARY_H_