1 //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 // This class implements a complete memory allocator by using two
18 // internal allocators:
19 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
20 // When allocating 2^x bytes it should return 2^x aligned chunk.
21 // PrimaryAllocator is used via a local AllocatorCache.
22 // SecondaryAllocator can allocate anything, but is not efficient.
23 template <class PrimaryAllocator, class AllocatorCache,
24 class SecondaryAllocator,
25 typename AddressSpaceViewTy = LocalAddressSpaceView> // NOLINT
26 class CombinedAllocator {
28 using AddressSpaceView = AddressSpaceViewTy;
29 static_assert(is_same<AddressSpaceView,
30 typename PrimaryAllocator::AddressSpaceView>::value,
31 "PrimaryAllocator is using wrong AddressSpaceView");
32 static_assert(is_same<AddressSpaceView,
33 typename SecondaryAllocator::AddressSpaceView>::value,
34 "SecondaryAllocator is using wrong AddressSpaceView");
36 void InitLinkerInitialized(s32 release_to_os_interval_ms) {
37 stats_.InitLinkerInitialized();
38 primary_.Init(release_to_os_interval_ms);
39 secondary_.InitLinkerInitialized();
42 void Init(s32 release_to_os_interval_ms) {
44 primary_.Init(release_to_os_interval_ms);
48 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
49 // Returning 0 on malloc(0) may break a lot of code.
52 if (size + alignment < size) {
53 Report("WARNING: %s: CombinedAllocator allocation overflow: "
54 "0x%zx bytes with 0x%zx alignment requested\n",
55 SanitizerToolName, size, alignment);
58 uptr original_size = size;
59 // If alignment requirements are to be fulfilled by the frontend allocator
60 // rather than by the primary or secondary, passing an alignment lower than
61 // or equal to 8 will prevent any further rounding up, as well as the later
64 size = RoundUpTo(size, alignment);
65 // The primary allocator should return a 2^x aligned allocation when
66 // requested 2^x bytes, hence using the rounded up 'size' when being
67 // serviced by the primary (this is no longer true when the primary is
68 // using a non-fixed base address). The secondary takes care of the
69 // alignment without such requirement, and allocating 'size' would use
70 // extraneous memory, so we employ 'original_size'.
72 if (primary_.CanAllocate(size, alignment))
73 res = cache->Allocate(&primary_, primary_.ClassID(size));
75 res = secondary_.Allocate(&stats_, original_size, alignment);
77 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
81 s32 ReleaseToOSIntervalMs() const {
82 return primary_.ReleaseToOSIntervalMs();
85 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
86 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
89 void ForceReleaseToOS() {
90 primary_.ForceReleaseToOS();
93 void Deallocate(AllocatorCache *cache, void *p) {
95 if (primary_.PointerIsMine(p))
96 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
98 secondary_.Deallocate(&stats_, p);
101 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
104 return Allocate(cache, new_size, alignment);
106 Deallocate(cache, p);
109 CHECK(PointerIsMine(p));
110 uptr old_size = GetActuallyAllocatedSize(p);
111 uptr memcpy_size = Min(new_size, old_size);
112 void *new_p = Allocate(cache, new_size, alignment);
114 internal_memcpy(new_p, p, memcpy_size);
115 Deallocate(cache, p);
119 bool PointerIsMine(void *p) {
120 if (primary_.PointerIsMine(p))
122 return secondary_.PointerIsMine(p);
125 bool FromPrimary(void *p) {
126 return primary_.PointerIsMine(p);
129 void *GetMetaData(const void *p) {
130 if (primary_.PointerIsMine(p))
131 return primary_.GetMetaData(p);
132 return secondary_.GetMetaData(p);
135 void *GetBlockBegin(const void *p) {
136 if (primary_.PointerIsMine(p))
137 return primary_.GetBlockBegin(p);
138 return secondary_.GetBlockBegin(p);
141 // This function does the same as GetBlockBegin, but is much faster.
142 // Must be called with the allocator locked.
143 void *GetBlockBeginFastLocked(void *p) {
144 if (primary_.PointerIsMine(p))
145 return primary_.GetBlockBegin(p);
146 return secondary_.GetBlockBeginFastLocked(p);
149 uptr GetActuallyAllocatedSize(void *p) {
150 if (primary_.PointerIsMine(p))
151 return primary_.GetActuallyAllocatedSize(p);
152 return secondary_.GetActuallyAllocatedSize(p);
155 uptr TotalMemoryUsed() {
156 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
159 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
161 void InitCache(AllocatorCache *cache) {
162 cache->Init(&stats_);
165 void DestroyCache(AllocatorCache *cache) {
166 cache->Destroy(&primary_, &stats_);
169 void SwallowCache(AllocatorCache *cache) {
170 cache->Drain(&primary_);
173 void GetStats(AllocatorStatCounters s) const {
178 primary_.PrintStats();
179 secondary_.PrintStats();
182 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
183 // introspection API.
185 primary_.ForceLock();
186 secondary_.ForceLock();
190 secondary_.ForceUnlock();
191 primary_.ForceUnlock();
194 // Iterate over all existing chunks.
195 // The allocator must be locked when calling this function.
196 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
197 primary_.ForEachChunk(callback, arg);
198 secondary_.ForEachChunk(callback, arg);
202 PrimaryAllocator primary_;
203 SecondaryAllocator secondary_;
204 AllocatorGlobalStats stats_;