1 //=-- lsan_common.cc ------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of LeakSanitizer.
11 // Implementation of common leak checking functionality.
13 //===----------------------------------------------------------------------===//
15 #include "lsan_common.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_flag_parser.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_procmaps.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_report_decorator.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
28 #if CAN_SANITIZE_LEAKS
31 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
32 // also to protect the global list of root regions.
33 BlockingMutex global_mutex(LINKER_INITIALIZED);
37 void DisableCounterUnderflow() {
38 if (common_flags()->detect_leaks) {
39 Report("Unmatched call to __lsan_enable().\n");
44 void Flags::SetDefaults() {
45 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
46 #include "lsan_flags.inc"
50 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
51 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
52 RegisterFlag(parser, #Name, Description, &f->Name);
53 #include "lsan_flags.inc"
57 #define LOG_POINTERS(...) \
59 if (flags()->log_pointers) Report(__VA_ARGS__); \
62 #define LOG_THREADS(...) \
64 if (flags()->log_threads) Report(__VA_ARGS__); \
67 ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
68 static SuppressionContext *suppression_ctx = nullptr;
69 static const char kSuppressionLeak[] = "leak";
70 static const char *kSuppressionTypes[] = { kSuppressionLeak };
71 static const char kStdSuppressions[] =
72 #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
73 // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
75 "leak:*pthread_exit*\n"
76 #endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
77 // TLS leak in some glibc versions, described in
78 // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
79 "leak:*tls_get_addr*\n";
81 void InitializeSuppressions() {
82 CHECK_EQ(nullptr, suppression_ctx);
83 suppression_ctx = new (suppression_placeholder) // NOLINT
84 SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
85 suppression_ctx->ParseFromFile(flags()->suppressions);
86 if (&__lsan_default_suppressions)
87 suppression_ctx->Parse(__lsan_default_suppressions());
88 suppression_ctx->Parse(kStdSuppressions);
91 static SuppressionContext *GetSuppressionContext() {
92 CHECK(suppression_ctx);
93 return suppression_ctx;
96 static InternalMmapVector<RootRegion> *root_regions;
98 InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
100 void InitializeRootRegions() {
101 CHECK(!root_regions);
102 ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
103 root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
106 void InitCommonLsan() {
107 InitializeRootRegions();
108 if (common_flags()->detect_leaks) {
109 // Initialization which can fail or print warnings should only be done if
110 // LSan is actually enabled.
111 InitializeSuppressions();
112 InitializePlatformSpecificModules();
116 class Decorator: public __sanitizer::SanitizerCommonDecorator {
118 Decorator() : SanitizerCommonDecorator() { }
119 const char *Error() { return Red(); }
120 const char *Leak() { return Blue(); }
121 const char *End() { return Default(); }
124 static inline bool CanBeAHeapPointer(uptr p) {
125 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
126 // bound on heap addresses.
127 const uptr kMinAddress = 4 * 4096;
128 if (p < kMinAddress) return false;
129 #if defined(__x86_64__)
130 // Accept only canonical form user-space addresses.
131 return ((p >> 47) == 0);
132 #elif defined(__mips64)
133 return ((p >> 40) == 0);
134 #elif defined(__aarch64__)
135 unsigned runtimeVMA =
136 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
137 return ((p >> runtimeVMA) == 0);
143 // Scans the memory range, looking for byte patterns that point into allocator
144 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
145 // There are two usage modes for this function: finding reachable chunks
146 // (|tag| = kReachable) and finding indirectly leaked chunks
147 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
148 // so |frontier| = 0.
149 void ScanRangeForPointers(uptr begin, uptr end,
151 const char *region_type, ChunkTag tag) {
152 CHECK(tag == kReachable || tag == kIndirectlyLeaked);
153 const uptr alignment = flags()->pointer_alignment();
154 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
157 pp = pp + alignment - pp % alignment;
158 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
159 void *p = *reinterpret_cast<void **>(pp);
160 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
161 uptr chunk = PointsIntoChunk(p);
162 if (!chunk) continue;
163 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
164 if (chunk == begin) continue;
165 LsanMetadata m(chunk);
166 if (m.tag() == kReachable || m.tag() == kIgnored) continue;
168 // Do this check relatively late so we can log only the interesting cases.
169 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
171 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
173 pp, p, chunk, chunk + m.requested_size(), m.requested_size());
178 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
179 chunk, chunk + m.requested_size(), m.requested_size());
181 frontier->push_back(chunk);
185 // Scans a global range for pointers
186 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
187 uptr allocator_begin = 0, allocator_end = 0;
188 GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
189 if (begin <= allocator_begin && allocator_begin < end) {
190 CHECK_LE(allocator_begin, allocator_end);
191 CHECK_LE(allocator_end, end);
192 if (begin < allocator_begin)
193 ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
195 if (allocator_end < end)
196 ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
198 ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
202 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
203 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
204 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
207 // Scans thread data (stacks and TLS) for heap pointers.
208 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
209 Frontier *frontier) {
210 InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount());
211 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
212 uptr registers_end = registers_begin + registers.size();
213 for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
214 tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
215 LOG_THREADS("Processing thread %d.\n", os_id);
216 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
218 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
219 &tls_begin, &tls_end,
220 &cache_begin, &cache_end, &dtls);
222 // If a thread can't be found in the thread registry, it's probably in the
223 // process of destruction. Log this event and move on.
224 LOG_THREADS("Thread %d not found in registry.\n", os_id);
228 PtraceRegistersStatus have_registers =
229 suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
230 if (have_registers != REGISTERS_AVAILABLE) {
231 Report("Unable to get registers from thread %d.\n", os_id);
232 // If unable to get SP, consider the entire stack to be reachable unless
233 // GetRegistersAndSP failed with ESRCH.
234 if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
238 if (flags()->use_registers && have_registers)
239 ScanRangeForPointers(registers_begin, registers_end, frontier,
240 "REGISTERS", kReachable);
242 if (flags()->use_stacks) {
243 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
244 if (sp < stack_begin || sp >= stack_end) {
245 // SP is outside the recorded stack range (e.g. the thread is running a
246 // signal handler on alternate stack, or swapcontext was used).
247 // Again, consider the entire stack range to be reachable.
248 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
249 uptr page_size = GetPageSizeCached();
251 while (stack_begin < stack_end &&
252 !IsAccessibleMemoryRange(stack_begin, 1)) {
254 stack_begin += page_size;
256 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
257 skipped, stack_begin, stack_end);
259 // Shrink the stack range to ignore out-of-scope values.
262 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
264 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
267 if (flags()->use_tls) {
269 LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
270 // If the tls and cache ranges don't overlap, scan full tls range,
271 // otherwise, only scan the non-overlapping portions
272 if (cache_begin == cache_end || tls_end < cache_begin ||
273 tls_begin > cache_end) {
274 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
276 if (tls_begin < cache_begin)
277 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
279 if (tls_end > cache_end)
280 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
284 if (dtls && !DTLSInDestruction(dtls)) {
285 for (uptr j = 0; j < dtls->dtv_size; ++j) {
286 uptr dtls_beg = dtls->dtv[j].beg;
287 uptr dtls_end = dtls_beg + dtls->dtv[j].size;
288 if (dtls_beg < dtls_end) {
289 LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
290 ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
295 // We are handling a thread with DTLS under destruction. Log about
296 // this and continue.
297 LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
303 void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
304 uptr region_begin, uptr region_end, uptr prot) {
305 uptr intersection_begin = Max(root_region.begin, region_begin);
306 uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
307 if (intersection_begin >= intersection_end) return;
308 bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
309 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
310 root_region.begin, root_region.begin + root_region.size,
311 region_begin, region_end,
312 is_readable ? "readable" : "unreadable");
314 ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
318 static void ProcessRootRegion(Frontier *frontier,
319 const RootRegion &root_region) {
320 MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
321 uptr begin, end, prot;
322 while (proc_maps.Next(&begin, &end,
323 /*offset*/ nullptr, /*filename*/ nullptr,
324 /*filename_size*/ 0, &prot)) {
325 ScanRootRegion(frontier, root_region, begin, end, prot);
329 // Scans root regions for heap pointers.
330 static void ProcessRootRegions(Frontier *frontier) {
331 if (!flags()->use_root_regions) return;
333 for (uptr i = 0; i < root_regions->size(); i++) {
334 ProcessRootRegion(frontier, (*root_regions)[i]);
338 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
339 while (frontier->size()) {
340 uptr next_chunk = frontier->back();
341 frontier->pop_back();
342 LsanMetadata m(next_chunk);
343 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
348 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
349 // which are reachable from it as indirectly leaked.
350 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
351 chunk = GetUserBegin(chunk);
352 LsanMetadata m(chunk);
353 if (m.allocated() && m.tag() != kReachable) {
354 ScanRangeForPointers(chunk, chunk + m.requested_size(),
355 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
359 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
361 static void CollectIgnoredCb(uptr chunk, void *arg) {
363 chunk = GetUserBegin(chunk);
364 LsanMetadata m(chunk);
365 if (m.allocated() && m.tag() == kIgnored) {
366 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
367 chunk, chunk + m.requested_size(), m.requested_size());
368 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
372 static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
374 StackTrace stack = map->Get(stack_id);
375 // The top frame is our malloc/calloc/etc. The next frame is the caller.
377 return stack.trace[1];
381 struct InvalidPCParam {
383 StackDepotReverseMap *stack_depot_reverse_map;
384 bool skip_linker_allocations;
387 // ForEachChunk callback. If the caller pc is invalid or is within the linker,
388 // mark as reachable. Called by ProcessPlatformSpecificAllocations.
389 static void MarkInvalidPCCb(uptr chunk, void *arg) {
391 InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
392 chunk = GetUserBegin(chunk);
393 LsanMetadata m(chunk);
394 if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
395 u32 stack_id = m.stack_trace_id();
398 caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
399 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
400 // it as reachable, as we can't properly report its allocation stack anyway.
401 if (caller_pc == 0 || (param->skip_linker_allocations &&
402 GetLinker()->containsAddress(caller_pc))) {
403 m.set_tag(kReachable);
404 param->frontier->push_back(chunk);
409 // On Linux, handles dynamically allocated TLS blocks by treating all chunks
410 // allocated from ld-linux.so as reachable.
411 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
412 // They are allocated with a __libc_memalign() call in allocate_and_init()
413 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
414 // blocks, but we can make sure they come from our own allocator by intercepting
415 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
416 // addresses are stored in a dynamically allocated array (the DTV) which is
417 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
418 // being reachable from the static TLS, and the dynamic TLS being reachable from
419 // the DTV. This is because the initial DTV is allocated before our interception
420 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
421 // can't special-case it either, since we don't know its size.
422 // Our solution is to include in the root set all allocations made from
423 // ld-linux.so (which is where allocate_and_init() is implemented). This is
424 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
425 // which we don't care about).
426 // On all other platforms, this simply checks to ensure that the caller pc is
427 // valid before reporting chunks as leaked.
428 void ProcessPC(Frontier *frontier) {
429 StackDepotReverseMap stack_depot_reverse_map;
431 arg.frontier = frontier;
432 arg.stack_depot_reverse_map = &stack_depot_reverse_map;
433 arg.skip_linker_allocations =
434 flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
435 ForEachChunk(MarkInvalidPCCb, &arg);
438 // Sets the appropriate tag on each chunk.
439 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
440 // Holds the flood fill frontier.
441 Frontier frontier(1);
443 ForEachChunk(CollectIgnoredCb, &frontier);
444 ProcessGlobalRegions(&frontier);
445 ProcessThreads(suspended_threads, &frontier);
446 ProcessRootRegions(&frontier);
447 FloodFillTag(&frontier, kReachable);
449 CHECK_EQ(0, frontier.size());
450 ProcessPC(&frontier);
452 // The check here is relatively expensive, so we do this in a separate flood
453 // fill. That way we can skip the check for chunks that are reachable
455 LOG_POINTERS("Processing platform-specific allocations.\n");
456 ProcessPlatformSpecificAllocations(&frontier);
457 FloodFillTag(&frontier, kReachable);
459 // Iterate over leaked chunks and mark those that are reachable from other
461 LOG_POINTERS("Scanning leaked chunks.\n");
462 ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
465 // ForEachChunk callback. Resets the tags to pre-leak-check state.
466 static void ResetTagsCb(uptr chunk, void *arg) {
468 chunk = GetUserBegin(chunk);
469 LsanMetadata m(chunk);
470 if (m.allocated() && m.tag() != kIgnored)
471 m.set_tag(kDirectlyLeaked);
474 static void PrintStackTraceById(u32 stack_trace_id) {
475 CHECK(stack_trace_id);
476 StackDepotGet(stack_trace_id).Print();
479 // ForEachChunk callback. Aggregates information about unreachable chunks into
481 static void CollectLeaksCb(uptr chunk, void *arg) {
483 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
484 chunk = GetUserBegin(chunk);
485 LsanMetadata m(chunk);
486 if (!m.allocated()) return;
487 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
488 u32 resolution = flags()->resolution;
489 u32 stack_trace_id = 0;
490 if (resolution > 0) {
491 StackTrace stack = StackDepotGet(m.stack_trace_id());
492 stack.size = Min(stack.size, resolution);
493 stack_trace_id = StackDepotPut(stack);
495 stack_trace_id = m.stack_trace_id();
497 leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
502 static void PrintMatchedSuppressions() {
503 InternalMmapVector<Suppression *> matched(1);
504 GetSuppressionContext()->GetMatched(&matched);
507 const char *line = "-----------------------------------------------------";
508 Printf("%s\n", line);
509 Printf("Suppressions used:\n");
510 Printf(" count bytes template\n");
511 for (uptr i = 0; i < matched.size(); i++)
512 Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
513 &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
514 Printf("%s\n\n", line);
517 struct CheckForLeaksParam {
519 LeakReport leak_report;
522 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
524 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
526 CHECK(!param->success);
527 ClassifyAllChunks(suspended_threads);
528 ForEachChunk(CollectLeaksCb, ¶m->leak_report);
529 // Clean up for subsequent leak checks. This assumes we did not overwrite any
531 ForEachChunk(ResetTagsCb, nullptr);
532 param->success = true;
535 static bool CheckForLeaks() {
536 if (&__lsan_is_turned_off && __lsan_is_turned_off())
538 EnsureMainThreadIDIsCorrect();
539 CheckForLeaksParam param;
540 param.success = false;
541 LockThreadRegistry();
543 DoStopTheWorld(CheckForLeaksCallback, ¶m);
545 UnlockThreadRegistry();
547 if (!param.success) {
548 Report("LeakSanitizer has encountered a fatal error.\n");
550 "HINT: For debugging, try setting environment variable "
551 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
553 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
556 param.leak_report.ApplySuppressions();
557 uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
558 if (unsuppressed_count > 0) {
561 "================================================================="
563 Printf("%s", d.Error());
564 Report("ERROR: LeakSanitizer: detected memory leaks\n");
565 Printf("%s", d.End());
566 param.leak_report.ReportTopLeaks(flags()->max_leaks);
568 if (common_flags()->print_suppressions)
569 PrintMatchedSuppressions();
570 if (unsuppressed_count > 0) {
571 param.leak_report.PrintSummary();
578 BlockingMutexLock l(&global_mutex);
579 static bool already_done;
580 if (already_done) return;
582 bool have_leaks = CheckForLeaks();
586 if (common_flags()->exitcode) {
591 static int DoRecoverableLeakCheck() {
592 BlockingMutexLock l(&global_mutex);
593 bool have_leaks = CheckForLeaks();
594 return have_leaks ? 1 : 0;
597 static Suppression *GetSuppressionForAddr(uptr addr) {
598 Suppression *s = nullptr;
600 // Suppress by module name.
601 SuppressionContext *suppressions = GetSuppressionContext();
602 if (const char *module_name =
603 Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
604 if (suppressions->Match(module_name, kSuppressionLeak, &s))
607 // Suppress by file or function name.
608 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
609 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
610 if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
611 suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
619 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
620 StackTrace stack = StackDepotGet(stack_trace_id);
621 for (uptr i = 0; i < stack.size; i++) {
622 Suppression *s = GetSuppressionForAddr(
623 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
629 ///// LeakReport implementation. /////
631 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
632 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
633 // in real-world applications.
634 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
636 const uptr kMaxLeaksConsidered = 5000;
638 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
639 uptr leaked_size, ChunkTag tag) {
640 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
641 bool is_directly_leaked = (tag == kDirectlyLeaked);
643 for (i = 0; i < leaks_.size(); i++) {
644 if (leaks_[i].stack_trace_id == stack_trace_id &&
645 leaks_[i].is_directly_leaked == is_directly_leaked) {
646 leaks_[i].hit_count++;
647 leaks_[i].total_size += leaked_size;
651 if (i == leaks_.size()) {
652 if (leaks_.size() == kMaxLeaksConsidered) return;
653 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
654 is_directly_leaked, /* is_suppressed */ false };
655 leaks_.push_back(leak);
657 if (flags()->report_objects) {
658 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
659 leaked_objects_.push_back(obj);
663 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
664 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
665 return leak1.total_size > leak2.total_size;
667 return leak1.is_directly_leaked;
670 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
671 CHECK(leaks_.size() <= kMaxLeaksConsidered);
673 if (leaks_.size() == kMaxLeaksConsidered)
674 Printf("Too many leaks! Only the first %zu leaks encountered will be "
676 kMaxLeaksConsidered);
678 uptr unsuppressed_count = UnsuppressedLeakCount();
679 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
680 Printf("The %zu top leak(s):\n", num_leaks_to_report);
681 InternalSort(&leaks_, leaks_.size(), LeakComparator);
682 uptr leaks_reported = 0;
683 for (uptr i = 0; i < leaks_.size(); i++) {
684 if (leaks_[i].is_suppressed) continue;
685 PrintReportForLeak(i);
687 if (leaks_reported == num_leaks_to_report) break;
689 if (leaks_reported < unsuppressed_count) {
690 uptr remaining = unsuppressed_count - leaks_reported;
691 Printf("Omitting %zu more leak(s).\n", remaining);
695 void LeakReport::PrintReportForLeak(uptr index) {
697 Printf("%s", d.Leak());
698 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
699 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
700 leaks_[index].total_size, leaks_[index].hit_count);
701 Printf("%s", d.End());
703 PrintStackTraceById(leaks_[index].stack_trace_id);
705 if (flags()->report_objects) {
706 Printf("Objects leaked above:\n");
707 PrintLeakedObjectsForLeak(index);
712 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
713 u32 leak_id = leaks_[index].id;
714 for (uptr j = 0; j < leaked_objects_.size(); j++) {
715 if (leaked_objects_[j].leak_id == leak_id)
716 Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
717 leaked_objects_[j].size);
721 void LeakReport::PrintSummary() {
722 CHECK(leaks_.size() <= kMaxLeaksConsidered);
723 uptr bytes = 0, allocations = 0;
724 for (uptr i = 0; i < leaks_.size(); i++) {
725 if (leaks_[i].is_suppressed) continue;
726 bytes += leaks_[i].total_size;
727 allocations += leaks_[i].hit_count;
729 InternalScopedString summary(kMaxSummaryLength);
730 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
732 ReportErrorSummary(summary.data());
735 void LeakReport::ApplySuppressions() {
736 for (uptr i = 0; i < leaks_.size(); i++) {
737 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
739 s->weight += leaks_[i].total_size;
740 atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
741 leaks_[i].hit_count);
742 leaks_[i].is_suppressed = true;
747 uptr LeakReport::UnsuppressedLeakCount() {
749 for (uptr i = 0; i < leaks_.size(); i++)
750 if (!leaks_[i].is_suppressed) result++;
754 } // namespace __lsan
755 #else // CAN_SANITIZE_LEAKS
757 void InitCommonLsan() { }
758 void DoLeakCheck() { }
759 void DisableInThisThread() { }
760 void EnableInThisThread() { }
762 #endif // CAN_SANITIZE_LEAKS
764 using namespace __lsan; // NOLINT
767 SANITIZER_INTERFACE_ATTRIBUTE
768 void __lsan_ignore_object(const void *p) {
769 #if CAN_SANITIZE_LEAKS
770 if (!common_flags()->detect_leaks)
772 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
774 BlockingMutexLock l(&global_mutex);
775 IgnoreObjectResult res = IgnoreObjectLocked(p);
776 if (res == kIgnoreObjectInvalid)
777 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
778 if (res == kIgnoreObjectAlreadyIgnored)
779 VReport(1, "__lsan_ignore_object(): "
780 "heap object at %p is already being ignored\n", p);
781 if (res == kIgnoreObjectSuccess)
782 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
783 #endif // CAN_SANITIZE_LEAKS
786 SANITIZER_INTERFACE_ATTRIBUTE
787 void __lsan_register_root_region(const void *begin, uptr size) {
788 #if CAN_SANITIZE_LEAKS
789 BlockingMutexLock l(&global_mutex);
791 RootRegion region = {reinterpret_cast<uptr>(begin), size};
792 root_regions->push_back(region);
793 VReport(1, "Registered root region at %p of size %llu\n", begin, size);
794 #endif // CAN_SANITIZE_LEAKS
797 SANITIZER_INTERFACE_ATTRIBUTE
798 void __lsan_unregister_root_region(const void *begin, uptr size) {
799 #if CAN_SANITIZE_LEAKS
800 BlockingMutexLock l(&global_mutex);
802 bool removed = false;
803 for (uptr i = 0; i < root_regions->size(); i++) {
804 RootRegion region = (*root_regions)[i];
805 if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
807 uptr last_index = root_regions->size() - 1;
808 (*root_regions)[i] = (*root_regions)[last_index];
809 root_regions->pop_back();
810 VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
816 "__lsan_unregister_root_region(): region at %p of size %llu has not "
817 "been registered.\n",
821 #endif // CAN_SANITIZE_LEAKS
824 SANITIZER_INTERFACE_ATTRIBUTE
825 void __lsan_disable() {
826 #if CAN_SANITIZE_LEAKS
827 __lsan::DisableInThisThread();
831 SANITIZER_INTERFACE_ATTRIBUTE
832 void __lsan_enable() {
833 #if CAN_SANITIZE_LEAKS
834 __lsan::EnableInThisThread();
838 SANITIZER_INTERFACE_ATTRIBUTE
839 void __lsan_do_leak_check() {
840 #if CAN_SANITIZE_LEAKS
841 if (common_flags()->detect_leaks)
842 __lsan::DoLeakCheck();
843 #endif // CAN_SANITIZE_LEAKS
846 SANITIZER_INTERFACE_ATTRIBUTE
847 int __lsan_do_recoverable_leak_check() {
848 #if CAN_SANITIZE_LEAKS
849 if (common_flags()->detect_leaks)
850 return __lsan::DoRecoverableLeakCheck();
851 #endif // CAN_SANITIZE_LEAKS
855 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
856 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
857 int __lsan_is_turned_off() {
861 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
862 const char *__lsan_default_suppressions() {