1 //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 // This class implements a complete memory allocator by using two
18 // internal allocators:
19 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
20 // When allocating 2^x bytes it should return 2^x aligned chunk.
21 // PrimaryAllocator is used via a local AllocatorCache.
22 // SecondaryAllocator can allocate anything, but is not efficient.
23 template <class PrimaryAllocator, class AllocatorCache,
24 class SecondaryAllocator> // NOLINT
25 class CombinedAllocator {
27 typedef typename SecondaryAllocator::FailureHandler FailureHandler;
29 void InitLinkerInitialized(s32 release_to_os_interval_ms) {
30 primary_.Init(release_to_os_interval_ms);
31 secondary_.InitLinkerInitialized();
32 stats_.InitLinkerInitialized();
35 void Init(s32 release_to_os_interval_ms) {
36 primary_.Init(release_to_os_interval_ms);
41 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
42 // Returning 0 on malloc(0) may break a lot of code.
45 if (size + alignment < size)
46 return FailureHandler::OnBadRequest();
47 uptr original_size = size;
48 // If alignment requirements are to be fulfilled by the frontend allocator
49 // rather than by the primary or secondary, passing an alignment lower than
50 // or equal to 8 will prevent any further rounding up, as well as the later
53 size = RoundUpTo(size, alignment);
54 // The primary allocator should return a 2^x aligned allocation when
55 // requested 2^x bytes, hence using the rounded up 'size' when being
56 // serviced by the primary (this is no longer true when the primary is
57 // using a non-fixed base address). The secondary takes care of the
58 // alignment without such requirement, and allocating 'size' would use
59 // extraneous memory, so we employ 'original_size'.
61 if (primary_.CanAllocate(size, alignment))
62 res = cache->Allocate(&primary_, primary_.ClassID(size));
64 res = secondary_.Allocate(&stats_, original_size, alignment);
66 return FailureHandler::OnOOM();
68 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
72 s32 ReleaseToOSIntervalMs() const {
73 return primary_.ReleaseToOSIntervalMs();
76 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
77 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
80 void Deallocate(AllocatorCache *cache, void *p) {
82 if (primary_.PointerIsMine(p))
83 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
85 secondary_.Deallocate(&stats_, p);
88 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
91 return Allocate(cache, new_size, alignment);
96 CHECK(PointerIsMine(p));
97 uptr old_size = GetActuallyAllocatedSize(p);
98 uptr memcpy_size = Min(new_size, old_size);
99 void *new_p = Allocate(cache, new_size, alignment);
101 internal_memcpy(new_p, p, memcpy_size);
102 Deallocate(cache, p);
106 bool PointerIsMine(void *p) {
107 if (primary_.PointerIsMine(p))
109 return secondary_.PointerIsMine(p);
112 bool FromPrimary(void *p) {
113 return primary_.PointerIsMine(p);
116 void *GetMetaData(const void *p) {
117 if (primary_.PointerIsMine(p))
118 return primary_.GetMetaData(p);
119 return secondary_.GetMetaData(p);
122 void *GetBlockBegin(const void *p) {
123 if (primary_.PointerIsMine(p))
124 return primary_.GetBlockBegin(p);
125 return secondary_.GetBlockBegin(p);
128 // This function does the same as GetBlockBegin, but is much faster.
129 // Must be called with the allocator locked.
130 void *GetBlockBeginFastLocked(void *p) {
131 if (primary_.PointerIsMine(p))
132 return primary_.GetBlockBegin(p);
133 return secondary_.GetBlockBeginFastLocked(p);
136 uptr GetActuallyAllocatedSize(void *p) {
137 if (primary_.PointerIsMine(p))
138 return primary_.GetActuallyAllocatedSize(p);
139 return secondary_.GetActuallyAllocatedSize(p);
142 uptr TotalMemoryUsed() {
143 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
146 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
148 void InitCache(AllocatorCache *cache) {
149 cache->Init(&stats_);
152 void DestroyCache(AllocatorCache *cache) {
153 cache->Destroy(&primary_, &stats_);
156 void SwallowCache(AllocatorCache *cache) {
157 cache->Drain(&primary_);
160 void GetStats(AllocatorStatCounters s) const {
165 primary_.PrintStats();
166 secondary_.PrintStats();
169 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
170 // introspection API.
172 primary_.ForceLock();
173 secondary_.ForceLock();
177 secondary_.ForceUnlock();
178 primary_.ForceUnlock();
181 // Iterate over all existing chunks.
182 // The allocator must be locked when calling this function.
183 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
184 primary_.ForEachChunk(callback, arg);
185 secondary_.ForEachChunk(callback, arg);
189 PrimaryAllocator primary_;
190 SecondaryAllocator secondary_;
191 AllocatorGlobalStats stats_;