1 //===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// Scudo Secondary Allocator.
11 /// This services allocation that are too large to be serviced by the Primary
12 /// Allocator. It is directly backed by the memory mapping functions of the
15 //===----------------------------------------------------------------------===//
17 #ifndef SCUDO_ALLOCATOR_SECONDARY_H_
18 #define SCUDO_ALLOCATOR_SECONDARY_H_
20 #ifndef SCUDO_ALLOCATOR_H_
21 # error "This file must be included inside scudo_allocator.h."
24 class ScudoLargeMmapAllocator {
27 PageSizeCached = GetPageSizeCached();
30 void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
31 const uptr UserSize = Size - AlignedChunkHeaderSize;
32 // The Scudo frontend prevents us from allocating more than
33 // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
34 uptr MapSize = Size + AlignedReservedAddressRangeSize;
35 if (Alignment > MinAlignment)
37 const uptr PageSize = PageSizeCached;
38 MapSize = RoundUpTo(MapSize, PageSize);
39 // Account for 2 guard pages, one before and one after the chunk.
40 MapSize += 2 * PageSize;
42 ReservedAddressRange AddressRange;
43 uptr MapBeg = AddressRange.Init(MapSize);
44 if (MapBeg == ~static_cast<uptr>(0))
45 return ReturnNullOrDieOnFailure::OnOOM();
46 // A page-aligned pointer is assumed after that, so check it now.
47 CHECK(IsAligned(MapBeg, PageSize));
48 uptr MapEnd = MapBeg + MapSize;
49 // The beginning of the user area for that allocation comes after the
50 // initial guard page, and both headers. This is the pointer that has to
51 // abide by alignment requirements.
52 uptr UserBeg = MapBeg + PageSize + HeadersSize;
53 uptr UserEnd = UserBeg + UserSize;
55 // In the rare event of larger alignments, we will attempt to fit the mmap
56 // area better and unmap extraneous memory. This will also ensure that the
57 // offset and unused bytes field of the header stay small.
58 if (Alignment > MinAlignment) {
59 if (!IsAligned(UserBeg, Alignment)) {
60 UserBeg = RoundUpTo(UserBeg, Alignment);
61 CHECK_GE(UserBeg, MapBeg);
62 uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) -
64 CHECK_GE(NewMapBeg, MapBeg);
65 if (NewMapBeg != MapBeg) {
66 AddressRange.Unmap(MapBeg, NewMapBeg - MapBeg);
69 UserEnd = UserBeg + UserSize;
71 uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize;
72 if (NewMapEnd != MapEnd) {
73 AddressRange.Unmap(NewMapEnd, MapEnd - NewMapEnd);
76 MapSize = MapEnd - MapBeg;
79 CHECK_LE(UserEnd, MapEnd - PageSize);
80 // Actually mmap the memory, preserving the guard pages on either side
81 CHECK_EQ(MapBeg + PageSize,
82 AddressRange.Map(MapBeg + PageSize, MapSize - 2 * PageSize));
83 const uptr Ptr = UserBeg - AlignedChunkHeaderSize;
84 ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
85 *StoredRange = AddressRange;
87 // The primary adds the whole class size to the stats when allocating a
88 // chunk, so we will do something similar here. But we will not account for
91 SpinMutexLock l(&StatsMutex);
92 Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
93 Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
96 return reinterpret_cast<void *>(Ptr);
99 void Deallocate(AllocatorStats *Stats, void *Ptr) {
100 // Since we're unmapping the entirety of where the ReservedAddressRange
101 // actually is, copy onto the stack.
102 const uptr PageSize = PageSizeCached;
103 ReservedAddressRange AddressRange = *getReservedAddressRange(Ptr);
105 SpinMutexLock l(&StatsMutex);
106 Stats->Sub(AllocatorStatAllocated, AddressRange.size() - 2 * PageSize);
107 Stats->Sub(AllocatorStatMapped, AddressRange.size() - 2 * PageSize);
109 AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
110 AddressRange.size());
113 uptr GetActuallyAllocatedSize(void *Ptr) {
114 ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
115 // Deduct PageSize as ReservedAddressRange size includes the trailing guard
117 uptr MapEnd = reinterpret_cast<uptr>(StoredRange->base()) +
118 StoredRange->size() - PageSizeCached;
119 return MapEnd - reinterpret_cast<uptr>(Ptr);
123 ReservedAddressRange *getReservedAddressRange(uptr Ptr) {
124 return reinterpret_cast<ReservedAddressRange*>(
125 Ptr - sizeof(ReservedAddressRange));
127 ReservedAddressRange *getReservedAddressRange(const void *Ptr) {
128 return getReservedAddressRange(reinterpret_cast<uptr>(Ptr));
131 static constexpr uptr AlignedReservedAddressRangeSize =
132 (sizeof(ReservedAddressRange) + MinAlignment - 1) & ~(MinAlignment - 1);
133 static constexpr uptr HeadersSize =
134 AlignedReservedAddressRangeSize + AlignedChunkHeaderSize;
137 SpinMutex StatsMutex;
140 #endif // SCUDO_ALLOCATOR_SECONDARY_H_