1 //=-- lsan_common.h -------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of LeakSanitizer.
11 // Private LSan header.
13 //===----------------------------------------------------------------------===//
18 #include "sanitizer_common/sanitizer_allocator.h"
19 #include "sanitizer_common/sanitizer_common.h"
20 #include "sanitizer_common/sanitizer_internal_defs.h"
21 #include "sanitizer_common/sanitizer_platform.h"
22 #include "sanitizer_common/sanitizer_stoptheworld.h"
23 #include "sanitizer_common/sanitizer_symbolizer.h"
25 // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus
26 // supported for Linux only. Also, LSan doesn't like 32 bit architectures
27 // because of "small" (4 bytes) pointer size that leads to high false negative
28 // ratio on large leaks. But we still want to have it for some 32 bit arches
29 // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
30 // To enable LeakSanitizer on a new architecture, one needs to implement the
31 // internal_clone function as well as (probably) adjust the TLS machinery for
32 // the new architecture inside the sanitizer library.
33 #if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
34 (SANITIZER_WORDSIZE == 64) && \
35 (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
36 defined(__powerpc64__))
37 #define CAN_SANITIZE_LEAKS 1
38 #elif defined(__i386__) && \
39 (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
40 #define CAN_SANITIZE_LEAKS 1
41 #elif defined(__arm__) && \
42 SANITIZER_LINUX && !SANITIZER_ANDROID
43 #define CAN_SANITIZE_LEAKS 1
45 #define CAN_SANITIZE_LEAKS 0
48 namespace __sanitizer {
57 kDirectlyLeaked = 0, // default
58 kIndirectlyLeaked = 1,
63 const u32 kInvalidTid = (u32) -1;
66 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
67 #include "lsan_flags.inc"
71 uptr pointer_alignment() const {
72 return use_unaligned ? 1 : sizeof(uptr);
76 extern Flags lsan_flags;
77 inline Flags *flags() { return &lsan_flags; }
78 void RegisterLsanFlags(FlagParser *parser, Flags *f);
85 bool is_directly_leaked;
95 // Aggregates leaks by stack trace prefix.
98 LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
99 void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
101 void ReportTopLeaks(uptr max_leaks);
103 void ApplySuppressions();
104 uptr UnsuppressedLeakCount();
108 void PrintReportForLeak(uptr index);
109 void PrintLeakedObjectsForLeak(uptr index);
112 InternalMmapVector<Leak> leaks_;
113 InternalMmapVector<LeakedObject> leaked_objects_;
116 typedef InternalMmapVector<uptr> Frontier;
118 // Platform-specific functions.
119 void InitializePlatformSpecificModules();
120 void ProcessGlobalRegions(Frontier *frontier);
121 void ProcessPlatformSpecificAllocations(Frontier *frontier);
128 InternalMmapVector<RootRegion> const *GetRootRegions();
129 void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
130 uptr region_begin, uptr region_end, bool is_readable);
131 // Run stoptheworld while holding any platform-specific locks.
132 void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
134 void ScanRangeForPointers(uptr begin, uptr end,
136 const char *region_type, ChunkTag tag);
137 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
139 enum IgnoreObjectResult {
140 kIgnoreObjectSuccess,
141 kIgnoreObjectAlreadyIgnored,
145 // Functions called from the parent tool.
146 const char *MaybeCallLsanDefaultOptions();
147 void InitCommonLsan();
149 void DoRecoverableLeakCheckVoid();
150 void DisableCounterUnderflow();
151 bool DisabledInThisThread();
153 // Used to implement __lsan::ScopedDisabler.
154 void DisableInThisThread();
155 void EnableInThisThread();
156 // Can be used to ignore memory allocated by an intercepted
158 struct ScopedInterceptorDisabler {
159 ScopedInterceptorDisabler() { DisableInThisThread(); }
160 ~ScopedInterceptorDisabler() { EnableInThisThread(); }
163 // According to Itanium C++ ABI array cookie is a one word containing
164 // size of allocated array.
165 static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
167 return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
168 *reinterpret_cast<uptr *>(chunk_beg) == 0;
171 // According to ARM C++ ABI array cookie consists of two words:
172 // struct array_cookie {
173 // std::size_t element_size; // element_size != 0
174 // std::size_t element_count;
176 static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
178 return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
179 *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
182 // Special case for "new T[0]" where T is a type with DTOR.
183 // new T[0] will allocate a cookie (one or two words) for the array size (0)
184 // and store a pointer to the end of allocated chunk. The actual cookie layout
185 // varies between platforms according to their C++ ABI implementation.
186 inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
189 return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
191 return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
195 // The following must be implemented in the parent tool.
197 void ForEachChunk(ForEachChunkCallback callback, void *arg);
198 // Returns the address range occupied by the global allocator object.
199 void GetAllocatorGlobalRange(uptr *begin, uptr *end);
200 // Wrappers for allocator's ForceLock()/ForceUnlock().
201 void LockAllocator();
202 void UnlockAllocator();
203 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
204 bool WordIsPoisoned(uptr addr);
205 // Wrappers for ThreadRegistry access.
206 void LockThreadRegistry();
207 void UnlockThreadRegistry();
208 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
209 uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
210 uptr *cache_end, DTLS **dtls);
211 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
213 // If called from the main thread, updates the main thread's TID in the thread
214 // registry. We need this to handle processes that fork() without a subsequent
215 // exec(), which invalidates the recorded TID. To update it, we must call
216 // gettid() from the main thread. Our solution is to call this function before
217 // leak checking and also before every call to pthread_create() (to handle cases
218 // where leak checking is initiated from a non-main thread).
219 void EnsureMainThreadIDIsCorrect();
220 // If p points into a chunk that has been allocated to the user, returns its
221 // user-visible address. Otherwise, returns 0.
222 uptr PointsIntoChunk(void *p);
223 // Returns address of user-visible chunk contained in this allocator chunk.
224 uptr GetUserBegin(uptr chunk);
225 // Helper for __lsan_ignore_object().
226 IgnoreObjectResult IgnoreObjectLocked(const void *p);
228 // Return the linker module, if valid for the platform.
229 LoadedModule *GetLinker();
231 // Return true if LSan has finished leak checking and reported leaks.
232 bool HasReportedLeaks();
234 // Run platform-specific leak handlers.
237 // Wrapper for chunk metadata operations.
240 // Constructor accepts address of user-visible chunk.
241 explicit LsanMetadata(uptr chunk);
242 bool allocated() const;
243 ChunkTag tag() const;
244 void set_tag(ChunkTag value);
245 uptr requested_size() const;
246 u32 stack_trace_id() const;
251 } // namespace __lsan
254 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
255 const char *__lsan_default_options();
257 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
258 int __lsan_is_turned_off();
260 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
261 const char *__lsan_default_suppressions();
264 #endif // LSAN_COMMON_H