1 //===-- xray_allocator.h ---------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of XRay, a dynamic runtime instrumentation system.
12 // Defines the allocator interface for an arena allocator, used primarily for
13 // the profiling runtime.
15 //===----------------------------------------------------------------------===//
16 #ifndef XRAY_ALLOCATOR_H
17 #define XRAY_ALLOCATOR_H
19 #include "sanitizer_common/sanitizer_common.h"
20 #include "sanitizer_common/sanitizer_internal_defs.h"
21 #include "sanitizer_common/sanitizer_mutex.h"
22 #include "sanitizer_common/sanitizer_posix.h"
23 #include "xray_utils.h"
29 // no-op on NetBSD (at least), unsupported flag on FreeBSD basically because unneeded
30 #define MAP_NORESERVE 0
35 /// The Allocator type hands out fixed-sized chunks of memory that are
36 /// cache-line aligned and sized. This is useful for placement of
37 /// performance-sensitive data in memory that's frequently accessed. The
38 /// allocator also self-limits the peak memory usage to a dynamically defined
41 /// N is the lower-bound size of the block of memory to return from the
42 /// allocation function. N is used to compute the size of a block, which is
43 /// cache-line-size multiples worth of memory. We compute the size of a block by
44 /// determining how many cache lines worth of memory is required to subsume N.
46 /// The Allocator instance will manage its own memory acquired through mmap.
47 /// This severely constrains the platforms on which this can be used to POSIX
48 /// systems where mmap semantics are well-defined.
50 /// FIXME: Isolate the lower-level memory management to a different abstraction
51 /// that can be platform-specific.
52 template <size_t N> struct Allocator {
53 // The Allocator returns memory as Block instances.
55 /// Compute the minimum cache-line size multiple that is >= N.
56 static constexpr auto Size = nearest_boundary(N, kCacheLineSize);
61 const size_t MaxMemory{0};
62 void *BackingStore = nullptr;
63 void *AlignedNextBlock = nullptr;
64 size_t AllocatedBlocks = 0;
68 SpinMutexLock Lock(&Mutex);
69 if (UNLIKELY(BackingStore == nullptr)) {
70 BackingStore = reinterpret_cast<void *>(
71 internal_mmap(NULL, MaxMemory, PROT_READ | PROT_WRITE,
72 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, 0, 0));
73 if (BackingStore == MAP_FAILED) {
74 BackingStore = nullptr;
76 Report("XRay Profiling: Failed to allocate memory for allocator.\n");
80 AlignedNextBlock = BackingStore;
82 // Ensure that NextBlock is aligned appropriately.
83 auto BackingStoreNum = reinterpret_cast<uintptr_t>(BackingStore);
84 auto AlignedNextBlockNum = nearest_boundary(
85 reinterpret_cast<uintptr_t>(AlignedNextBlock), kCacheLineSize);
86 if (diff(AlignedNextBlockNum, BackingStoreNum) > ptrdiff_t(MaxMemory)) {
87 munmap(BackingStore, MaxMemory);
88 AlignedNextBlock = BackingStore = nullptr;
90 Report("XRay Profiling: Cannot obtain enough memory from "
91 "preallocated region.\n");
95 AlignedNextBlock = reinterpret_cast<void *>(AlignedNextBlockNum);
97 // Assert that AlignedNextBlock is cache-line aligned.
98 DCHECK_EQ(reinterpret_cast<uintptr_t>(AlignedNextBlock) % kCacheLineSize,
102 if ((AllocatedBlocks * Block::Size) >= MaxMemory)
105 // Align the pointer we'd like to return to an appropriate alignment, then
106 // advance the pointer from where to start allocations.
107 void *Result = AlignedNextBlock;
108 AlignedNextBlock = reinterpret_cast<void *>(
109 reinterpret_cast<char *>(AlignedNextBlock) + N);
115 explicit Allocator(size_t M)
116 : MaxMemory(nearest_boundary(M, kCacheLineSize)) {}
118 Block Allocate() { return {Alloc()}; }
120 ~Allocator() NOEXCEPT {
121 if (BackingStore != nullptr) {
122 internal_munmap(BackingStore, MaxMemory);
127 } // namespace __xray
129 #endif // XRAY_ALLOCATOR_H