1 //===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file defines the MallocAllocator and BumpPtrAllocator interfaces. Both
12 /// of these conform to an LLVM "Allocator" concept which consists of an
13 /// Allocate method accepting a size and alignment, and a Deallocate accepting
14 /// a pointer and size. Further, the LLVM "Allocator" concept has overloads of
15 /// Allocate and Deallocate for setting size and alignment based on the final
16 /// type. These overloads are typically provided by a base class template \c
19 //===----------------------------------------------------------------------===//
21 #ifndef LLVM_SUPPORT_ALLOCATOR_H
22 #define LLVM_SUPPORT_ALLOCATOR_H
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/Support/Compiler.h"
26 #include "llvm/Support/MathExtras.h"
33 #include <type_traits>
38 /// \brief CRTP base class providing obvious overloads for the core \c
39 /// Allocate() methods of LLVM-style allocators.
41 /// This base class both documents the full public interface exposed by all
42 /// LLVM-style allocators, and redirects all of the overloads to a single core
43 /// set of methods which the derived class must define.
44 template <typename DerivedT> class AllocatorBase {
46 /// \brief Allocate \a Size bytes of \a Alignment aligned memory. This method
47 /// must be implemented by \c DerivedT.
48 void *Allocate(size_t Size, size_t Alignment) {
50 static_assert(static_cast<void *(AllocatorBase::*)(size_t, size_t)>(
51 &AllocatorBase::Allocate) !=
52 static_cast<void *(DerivedT::*)(size_t, size_t)>(
54 "Class derives from AllocatorBase without implementing the "
55 "core Allocate(size_t, size_t) overload!");
57 return static_cast<DerivedT *>(this)->Allocate(Size, Alignment);
60 /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this
62 void Deallocate(const void *Ptr, size_t Size) {
64 static_assert(static_cast<void (AllocatorBase::*)(const void *, size_t)>(
65 &AllocatorBase::Deallocate) !=
66 static_cast<void (DerivedT::*)(const void *, size_t)>(
67 &DerivedT::Deallocate),
68 "Class derives from AllocatorBase without implementing the "
69 "core Deallocate(void *) overload!");
71 return static_cast<DerivedT *>(this)->Deallocate(Ptr, Size);
74 // The rest of these methods are helpers that redirect to one of the above
77 /// \brief Allocate space for a sequence of objects without constructing them.
78 template <typename T> T *Allocate(size_t Num = 1) {
79 return static_cast<T *>(Allocate(Num * sizeof(T), alignof(T)));
82 /// \brief Deallocate space for a sequence of objects without constructing them.
84 typename std::enable_if<
85 !std::is_same<typename std::remove_cv<T>::type, void>::value, void>::type
86 Deallocate(T *Ptr, size_t Num = 1) {
87 Deallocate(static_cast<const void *>(Ptr), Num * sizeof(T));
91 class MallocAllocator : public AllocatorBase<MallocAllocator> {
95 LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size,
96 size_t /*Alignment*/) {
100 // Pull in base class overloads.
101 using AllocatorBase<MallocAllocator>::Allocate;
103 void Deallocate(const void *Ptr, size_t /*Size*/) {
104 free(const_cast<void *>(Ptr));
107 // Pull in base class overloads.
108 using AllocatorBase<MallocAllocator>::Deallocate;
110 void PrintStats() const {}
115 // We call out to an external function to actually print the message as the
116 // printing code uses Allocator.h in its implementation.
117 void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
120 } // end namespace detail
122 /// \brief Allocate memory in an ever growing pool, as if by bump-pointer.
124 /// This isn't strictly a bump-pointer allocator as it uses backing slabs of
125 /// memory rather than relying on a boundless contiguous heap. However, it has
126 /// bump-pointer semantics in that it is a monotonically growing pool of memory
127 /// where every allocation is found by merely allocating the next N bytes in
128 /// the slab, or the next N bytes in the next slab.
130 /// Note that this also has a threshold for forcing allocations above a certain
131 /// size into their own slab.
133 /// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
134 /// object, which wraps malloc, to allocate memory, but it can be changed to
135 /// use a custom allocator.
136 template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
137 size_t SizeThreshold = SlabSize>
138 class BumpPtrAllocatorImpl
139 : public AllocatorBase<
140 BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold>> {
142 static_assert(SizeThreshold <= SlabSize,
143 "The SizeThreshold must be at most the SlabSize to ensure "
144 "that objects larger than a slab go into their own memory "
147 BumpPtrAllocatorImpl() = default;
149 template <typename T>
150 BumpPtrAllocatorImpl(T &&Allocator)
151 : Allocator(std::forward<T &&>(Allocator)) {}
153 // Manually implement a move constructor as we must clear the old allocator's
154 // slabs as a matter of correctness.
155 BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
156 : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
157 CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
158 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize),
159 Allocator(std::move(Old.Allocator)) {
160 Old.CurPtr = Old.End = nullptr;
161 Old.BytesAllocated = 0;
163 Old.CustomSizedSlabs.clear();
166 ~BumpPtrAllocatorImpl() {
167 DeallocateSlabs(Slabs.begin(), Slabs.end());
168 DeallocateCustomSizedSlabs();
171 BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
172 DeallocateSlabs(Slabs.begin(), Slabs.end());
173 DeallocateCustomSizedSlabs();
177 BytesAllocated = RHS.BytesAllocated;
178 RedZoneSize = RHS.RedZoneSize;
179 Slabs = std::move(RHS.Slabs);
180 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
181 Allocator = std::move(RHS.Allocator);
183 RHS.CurPtr = RHS.End = nullptr;
184 RHS.BytesAllocated = 0;
186 RHS.CustomSizedSlabs.clear();
190 /// \brief Deallocate all but the current slab and reset the current pointer
191 /// to the beginning of it, freeing all memory allocated so far.
193 // Deallocate all but the first slab, and deallocate all custom-sized slabs.
194 DeallocateCustomSizedSlabs();
195 CustomSizedSlabs.clear();
202 CurPtr = (char *)Slabs.front();
203 End = CurPtr + SlabSize;
205 __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
206 DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
207 Slabs.erase(std::next(Slabs.begin()), Slabs.end());
210 /// \brief Allocate space at the specified alignment.
211 LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
212 Allocate(size_t Size, size_t Alignment) {
213 assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead.");
215 // Keep track of how many bytes we've allocated.
216 BytesAllocated += Size;
218 size_t Adjustment = alignmentAdjustment(CurPtr, Alignment);
219 assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");
221 size_t SizeToAllocate = Size;
222 #if LLVM_ADDRESS_SANITIZER_BUILD
223 // Add trailing bytes as a "red zone" under ASan.
224 SizeToAllocate += RedZoneSize;
227 // Check if we have enough space.
228 if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)) {
229 char *AlignedPtr = CurPtr + Adjustment;
230 CurPtr = AlignedPtr + SizeToAllocate;
231 // Update the allocation point of this memory block in MemorySanitizer.
232 // Without this, MemorySanitizer messages for values originated from here
233 // will point to the allocation of the entire slab.
234 __msan_allocated_memory(AlignedPtr, Size);
235 // Similarly, tell ASan about this space.
236 __asan_unpoison_memory_region(AlignedPtr, Size);
240 // If Size is really big, allocate a separate slab for it.
241 size_t PaddedSize = SizeToAllocate + Alignment - 1;
242 if (PaddedSize > SizeThreshold) {
243 void *NewSlab = Allocator.Allocate(PaddedSize, 0);
244 // We own the new slab and don't want anyone reading anyting other than
245 // pieces returned from this method. So poison the whole slab.
246 __asan_poison_memory_region(NewSlab, PaddedSize);
247 CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
249 uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
250 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
251 char *AlignedPtr = (char*)AlignedAddr;
252 __msan_allocated_memory(AlignedPtr, Size);
253 __asan_unpoison_memory_region(AlignedPtr, Size);
257 // Otherwise, start a new slab and try again.
259 uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
260 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
261 "Unable to allocate memory!");
262 char *AlignedPtr = (char*)AlignedAddr;
263 CurPtr = AlignedPtr + SizeToAllocate;
264 __msan_allocated_memory(AlignedPtr, Size);
265 __asan_unpoison_memory_region(AlignedPtr, Size);
269 // Pull in base class overloads.
270 using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;
272 // Bump pointer allocators are expected to never free their storage; and
273 // clients expect pointers to remain valid for non-dereferencing uses even
274 // after deallocation.
275 void Deallocate(const void *Ptr, size_t Size) {
276 __asan_poison_memory_region(Ptr, Size);
279 // Pull in base class overloads.
280 using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;
282 size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
284 size_t getTotalMemory() const {
285 size_t TotalMemory = 0;
286 for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
287 TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
288 for (auto &PtrAndSize : CustomSizedSlabs)
289 TotalMemory += PtrAndSize.second;
293 size_t getBytesAllocated() const { return BytesAllocated; }
295 void setRedZoneSize(size_t NewSize) {
296 RedZoneSize = NewSize;
299 void PrintStats() const {
300 detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
305 /// \brief The current pointer into the current slab.
307 /// This points to the next free byte in the slab.
308 char *CurPtr = nullptr;
310 /// \brief The end of the current slab.
313 /// \brief The slabs allocated so far.
314 SmallVector<void *, 4> Slabs;
316 /// \brief Custom-sized slabs allocated for too-large allocation requests.
317 SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
319 /// \brief How many bytes we've allocated.
321 /// Used so that we can compute how much space was wasted.
322 size_t BytesAllocated = 0;
324 /// \brief The number of bytes to put between allocations when running under
326 size_t RedZoneSize = 1;
328 /// \brief The allocator instance we use to get slabs of memory.
329 AllocatorT Allocator;
331 static size_t computeSlabSize(unsigned SlabIdx) {
332 // Scale the actual allocated slab size based on the number of slabs
333 // allocated. Every 128 slabs allocated, we double the allocated size to
334 // reduce allocation frequency, but saturate at multiplying the slab size by
336 return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
339 /// \brief Allocate a new slab and move the bump pointers over into the new
340 /// slab, modifying CurPtr and End.
341 void StartNewSlab() {
342 size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
344 void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0);
345 // We own the new slab and don't want anyone reading anything other than
346 // pieces returned from this method. So poison the whole slab.
347 __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
349 Slabs.push_back(NewSlab);
350 CurPtr = (char *)(NewSlab);
351 End = ((char *)NewSlab) + AllocatedSlabSize;
354 /// \brief Deallocate a sequence of slabs.
355 void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
356 SmallVectorImpl<void *>::iterator E) {
357 for (; I != E; ++I) {
358 size_t AllocatedSlabSize =
359 computeSlabSize(std::distance(Slabs.begin(), I));
360 Allocator.Deallocate(*I, AllocatedSlabSize);
364 /// \brief Deallocate all memory for custom sized slabs.
365 void DeallocateCustomSizedSlabs() {
366 for (auto &PtrAndSize : CustomSizedSlabs) {
367 void *Ptr = PtrAndSize.first;
368 size_t Size = PtrAndSize.second;
369 Allocator.Deallocate(Ptr, Size);
373 template <typename T> friend class SpecificBumpPtrAllocator;
376 /// \brief The standard BumpPtrAllocator which just uses the default template
378 typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
380 /// \brief A BumpPtrAllocator that allows only elements of a specific type to be
383 /// This allows calling the destructor in DestroyAll() and when the allocator is
385 template <typename T> class SpecificBumpPtrAllocator {
386 BumpPtrAllocator Allocator;
389 SpecificBumpPtrAllocator() {
390 // Because SpecificBumpPtrAllocator walks the memory to call destructors,
391 // it can't have red zones between allocations.
392 Allocator.setRedZoneSize(0);
394 SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
395 : Allocator(std::move(Old.Allocator)) {}
396 ~SpecificBumpPtrAllocator() { DestroyAll(); }
398 SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
399 Allocator = std::move(RHS.Allocator);
403 /// Call the destructor of each allocated object and deallocate all but the
404 /// current slab and reset the current pointer to the beginning of it, freeing
405 /// all memory allocated so far.
407 auto DestroyElements = [](char *Begin, char *End) {
408 assert(Begin == (char *)alignAddr(Begin, alignof(T)));
409 for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
410 reinterpret_cast<T *>(Ptr)->~T();
413 for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
415 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
416 std::distance(Allocator.Slabs.begin(), I));
417 char *Begin = (char *)alignAddr(*I, alignof(T));
418 char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
419 : (char *)*I + AllocatedSlabSize;
421 DestroyElements(Begin, End);
424 for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
425 void *Ptr = PtrAndSize.first;
426 size_t Size = PtrAndSize.second;
427 DestroyElements((char *)alignAddr(Ptr, alignof(T)), (char *)Ptr + Size);
433 /// \brief Allocate space for an array of objects without constructing them.
434 T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
437 } // end namespace llvm
439 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
440 void *operator new(size_t Size,
441 llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
442 SizeThreshold> &Allocator) {
452 return Allocator.Allocate(
453 Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x)));
456 template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
457 void operator delete(
458 void *, llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold> &) {
461 #endif // LLVM_SUPPORT_ALLOCATOR_H