1 //=-- lsan_common.h -------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of LeakSanitizer.
10 // Private LSan header.
12 //===----------------------------------------------------------------------===//
17 #include "sanitizer_common/sanitizer_allocator.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_platform.h"
21 #include "sanitizer_common/sanitizer_stoptheworld.h"
22 #include "sanitizer_common/sanitizer_symbolizer.h"
24 // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
25 // Also, LSan doesn't like 32 bit architectures
26 // because of "small" (4 bytes) pointer size that leads to high false negative
27 // ratio on large leaks. But we still want to have it for some 32 bit arches
28 // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
29 // To enable LeakSanitizer on a new architecture, one needs to implement the
30 // internal_clone function as well as (probably) adjust the TLS machinery for
31 // the new architecture inside the sanitizer library.
32 #if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
33 (SANITIZER_WORDSIZE == 64) && \
34 (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
35 defined(__powerpc64__))
36 #define CAN_SANITIZE_LEAKS 1
37 #elif defined(__i386__) && \
38 (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
39 #define CAN_SANITIZE_LEAKS 1
40 #elif defined(__arm__) && \
41 SANITIZER_LINUX && !SANITIZER_ANDROID
42 #define CAN_SANITIZE_LEAKS 1
43 #elif SANITIZER_NETBSD
44 #define CAN_SANITIZE_LEAKS 1
46 #define CAN_SANITIZE_LEAKS 0
49 namespace __sanitizer {
59 kDirectlyLeaked = 0, // default
60 kIndirectlyLeaked = 1,
65 const u32 kInvalidTid = (u32) -1;
68 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
69 #include "lsan_flags.inc"
73 uptr pointer_alignment() const {
74 return use_unaligned ? 1 : sizeof(uptr);
78 extern Flags lsan_flags;
79 inline Flags *flags() { return &lsan_flags; }
80 void RegisterLsanFlags(FlagParser *parser, Flags *f);
87 bool is_directly_leaked;
97 // Aggregates leaks by stack trace prefix.
101 void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
103 void ReportTopLeaks(uptr max_leaks);
105 void ApplySuppressions();
106 uptr UnsuppressedLeakCount();
109 void PrintReportForLeak(uptr index);
110 void PrintLeakedObjectsForLeak(uptr index);
113 InternalMmapVector<Leak> leaks_;
114 InternalMmapVector<LeakedObject> leaked_objects_;
117 typedef InternalMmapVector<uptr> Frontier;
119 // Platform-specific functions.
120 void InitializePlatformSpecificModules();
121 void ProcessGlobalRegions(Frontier *frontier);
122 void ProcessPlatformSpecificAllocations(Frontier *frontier);
129 InternalMmapVector<RootRegion> const *GetRootRegions();
130 void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
131 uptr region_begin, uptr region_end, bool is_readable);
132 // Run stoptheworld while holding any platform-specific locks, as well as the
133 // allocator and thread registry locks.
134 void LockStuffAndStopTheWorld(StopTheWorldCallback callback, void* argument);
136 void ScanRangeForPointers(uptr begin, uptr end,
138 const char *region_type, ChunkTag tag);
139 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
141 enum IgnoreObjectResult {
142 kIgnoreObjectSuccess,
143 kIgnoreObjectAlreadyIgnored,
147 // Functions called from the parent tool.
148 const char *MaybeCallLsanDefaultOptions();
149 void InitCommonLsan();
151 void DoRecoverableLeakCheckVoid();
152 void DisableCounterUnderflow();
153 bool DisabledInThisThread();
155 // Used to implement __lsan::ScopedDisabler.
156 void DisableInThisThread();
157 void EnableInThisThread();
158 // Can be used to ignore memory allocated by an intercepted
160 struct ScopedInterceptorDisabler {
161 ScopedInterceptorDisabler() { DisableInThisThread(); }
162 ~ScopedInterceptorDisabler() { EnableInThisThread(); }
165 // According to Itanium C++ ABI array cookie is a one word containing
166 // size of allocated array.
167 static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
169 return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
170 *reinterpret_cast<uptr *>(chunk_beg) == 0;
173 // According to ARM C++ ABI array cookie consists of two words:
174 // struct array_cookie {
175 // std::size_t element_size; // element_size != 0
176 // std::size_t element_count;
178 static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
180 return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
181 *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
184 // Special case for "new T[0]" where T is a type with DTOR.
185 // new T[0] will allocate a cookie (one or two words) for the array size (0)
186 // and store a pointer to the end of allocated chunk. The actual cookie layout
187 // varies between platforms according to their C++ ABI implementation.
188 inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
191 return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
193 return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
197 // The following must be implemented in the parent tool.
199 void ForEachChunk(ForEachChunkCallback callback, void *arg);
200 // Returns the address range occupied by the global allocator object.
201 void GetAllocatorGlobalRange(uptr *begin, uptr *end);
202 // Wrappers for allocator's ForceLock()/ForceUnlock().
203 void LockAllocator();
204 void UnlockAllocator();
205 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
206 bool WordIsPoisoned(uptr addr);
207 // Wrappers for ThreadRegistry access.
208 void LockThreadRegistry();
209 void UnlockThreadRegistry();
210 ThreadRegistry *GetThreadRegistryLocked();
211 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
212 uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
213 uptr *cache_end, DTLS **dtls);
214 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
216 // If called from the main thread, updates the main thread's TID in the thread
217 // registry. We need this to handle processes that fork() without a subsequent
218 // exec(), which invalidates the recorded TID. To update it, we must call
219 // gettid() from the main thread. Our solution is to call this function before
220 // leak checking and also before every call to pthread_create() (to handle cases
221 // where leak checking is initiated from a non-main thread).
222 void EnsureMainThreadIDIsCorrect();
223 // If p points into a chunk that has been allocated to the user, returns its
224 // user-visible address. Otherwise, returns 0.
225 uptr PointsIntoChunk(void *p);
226 // Returns address of user-visible chunk contained in this allocator chunk.
227 uptr GetUserBegin(uptr chunk);
228 // Helper for __lsan_ignore_object().
229 IgnoreObjectResult IgnoreObjectLocked(const void *p);
231 // Return the linker module, if valid for the platform.
232 LoadedModule *GetLinker();
234 // Return true if LSan has finished leak checking and reported leaks.
235 bool HasReportedLeaks();
237 // Run platform-specific leak handlers.
240 // Wrapper for chunk metadata operations.
243 // Constructor accepts address of user-visible chunk.
244 explicit LsanMetadata(uptr chunk);
245 bool allocated() const;
246 ChunkTag tag() const;
247 void set_tag(ChunkTag value);
248 uptr requested_size() const;
249 u32 stack_trace_id() const;
254 } // namespace __lsan
257 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
258 const char *__lsan_default_options();
260 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
261 int __lsan_is_turned_off();
263 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
264 const char *__lsan_default_suppressions();
267 #endif // LSAN_COMMON_H