1 //=-- lsan_common.cc ------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of LeakSanitizer.
11 // Implementation of common leak checking functionality.
13 //===----------------------------------------------------------------------===//
15 #include "lsan_common.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_stacktrace.h"
23 #include "sanitizer_common/sanitizer_stoptheworld.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_report_decorator.h"
27 #if CAN_SANITIZE_LEAKS
30 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
31 // also to protect the global list of root regions.
32 BlockingMutex global_mutex(LINKER_INITIALIZED);
34 THREADLOCAL int disable_counter;
35 bool DisabledInThisThread() { return disable_counter > 0; }
39 static void InitializeFlags(bool standalone) {
42 f->report_objects = false;
46 f->use_registers = true;
47 f->use_globals = true;
50 f->use_root_regions = true;
51 f->use_unaligned = false;
52 f->use_poisoned = false;
53 f->log_pointers = false;
54 f->log_threads = false;
56 const char *options = GetEnv("LSAN_OPTIONS");
58 ParseFlag(options, &f->use_registers, "use_registers", "");
59 ParseFlag(options, &f->use_globals, "use_globals", "");
60 ParseFlag(options, &f->use_stacks, "use_stacks", "");
61 ParseFlag(options, &f->use_tls, "use_tls", "");
62 ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
63 ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
64 ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
65 ParseFlag(options, &f->report_objects, "report_objects", "");
66 ParseFlag(options, &f->resolution, "resolution", "");
67 CHECK_GE(&f->resolution, 0);
68 ParseFlag(options, &f->max_leaks, "max_leaks", "");
69 CHECK_GE(&f->max_leaks, 0);
70 ParseFlag(options, &f->log_pointers, "log_pointers", "");
71 ParseFlag(options, &f->log_threads, "log_threads", "");
72 ParseFlag(options, &f->exitcode, "exitcode", "");
75 // Set defaults for common flags (only in standalone mode) and parse
76 // them from LSAN_OPTIONS.
77 CommonFlags *cf = common_flags();
79 SetCommonFlagsDefaults(cf);
80 cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
81 cf->malloc_context_size = 30;
82 cf->detect_leaks = true;
84 ParseCommonFlagsFromString(cf, options);
87 #define LOG_POINTERS(...) \
89 if (flags()->log_pointers) Report(__VA_ARGS__); \
92 #define LOG_THREADS(...) \
94 if (flags()->log_threads) Report(__VA_ARGS__); \
97 static bool suppressions_inited = false;
99 void InitializeSuppressions() {
100 CHECK(!suppressions_inited);
101 SuppressionContext::InitIfNecessary();
102 if (&__lsan_default_suppressions)
103 SuppressionContext::Get()->Parse(__lsan_default_suppressions());
104 suppressions_inited = true;
112 InternalMmapVector<RootRegion> *root_regions;
114 void InitializeRootRegions() {
115 CHECK(!root_regions);
116 ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
117 root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
120 void InitCommonLsan(bool standalone) {
121 InitializeFlags(standalone);
122 InitializeRootRegions();
123 if (common_flags()->detect_leaks) {
124 // Initialization which can fail or print warnings should only be done if
125 // LSan is actually enabled.
126 InitializeSuppressions();
127 InitializePlatformSpecificModules();
131 class Decorator: public __sanitizer::SanitizerCommonDecorator {
133 Decorator() : SanitizerCommonDecorator() { }
134 const char *Error() { return Red(); }
135 const char *Leak() { return Blue(); }
136 const char *End() { return Default(); }
139 static inline bool CanBeAHeapPointer(uptr p) {
140 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
141 // bound on heap addresses.
142 const uptr kMinAddress = 4 * 4096;
143 if (p < kMinAddress) return false;
145 // Accept only canonical form user-space addresses.
146 return ((p >> 47) == 0);
152 // Scans the memory range, looking for byte patterns that point into allocator
153 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
154 // There are two usage modes for this function: finding reachable or ignored
155 // chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
156 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
157 // so |frontier| = 0.
158 void ScanRangeForPointers(uptr begin, uptr end,
160 const char *region_type, ChunkTag tag) {
161 const uptr alignment = flags()->pointer_alignment();
162 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
165 pp = pp + alignment - pp % alignment;
166 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
167 void *p = *reinterpret_cast<void **>(pp);
168 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
169 uptr chunk = PointsIntoChunk(p);
170 if (!chunk) continue;
171 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
172 if (chunk == begin) continue;
173 LsanMetadata m(chunk);
174 // Reachable beats ignored beats leaked.
175 if (m.tag() == kReachable) continue;
176 if (m.tag() == kIgnored && tag != kReachable) continue;
178 // Do this check relatively late so we can log only the interesting cases.
179 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
181 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
183 pp, p, chunk, chunk + m.requested_size(), m.requested_size());
188 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
189 chunk, chunk + m.requested_size(), m.requested_size());
191 frontier->push_back(chunk);
195 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
196 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
197 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
200 // Scans thread data (stacks and TLS) for heap pointers.
201 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
202 Frontier *frontier) {
203 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
204 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
205 uptr registers_end = registers_begin + registers.size();
206 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
207 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
208 LOG_THREADS("Processing thread %d.\n", os_id);
209 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
210 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
211 &tls_begin, &tls_end,
212 &cache_begin, &cache_end);
214 // If a thread can't be found in the thread registry, it's probably in the
215 // process of destruction. Log this event and move on.
216 LOG_THREADS("Thread %d not found in registry.\n", os_id);
220 bool have_registers =
221 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
222 if (!have_registers) {
223 Report("Unable to get registers from thread %d.\n");
224 // If unable to get SP, consider the entire stack to be reachable.
228 if (flags()->use_registers && have_registers)
229 ScanRangeForPointers(registers_begin, registers_end, frontier,
230 "REGISTERS", kReachable);
232 if (flags()->use_stacks) {
233 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
234 if (sp < stack_begin || sp >= stack_end) {
235 // SP is outside the recorded stack range (e.g. the thread is running a
236 // signal handler on alternate stack). Again, consider the entire stack
237 // range to be reachable.
238 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
240 // Shrink the stack range to ignore out-of-scope values.
243 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
245 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
248 if (flags()->use_tls) {
249 LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
250 if (cache_begin == cache_end) {
251 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
253 // Because LSan should not be loaded with dlopen(), we can assume
254 // that allocator cache will be part of static TLS image.
255 CHECK_LE(tls_begin, cache_begin);
256 CHECK_GE(tls_end, cache_end);
257 if (tls_begin < cache_begin)
258 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
260 if (tls_end > cache_end)
261 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
267 static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
269 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
270 uptr begin, end, prot;
271 while (proc_maps.Next(&begin, &end,
272 /*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
274 uptr intersection_begin = Max(root_begin, begin);
275 uptr intersection_end = Min(end, root_end);
276 if (intersection_begin >= intersection_end) continue;
277 bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
278 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
279 root_begin, root_end, begin, end,
280 is_readable ? "readable" : "unreadable");
282 ScanRangeForPointers(intersection_begin, intersection_end, frontier,
287 // Scans root regions for heap pointers.
288 static void ProcessRootRegions(Frontier *frontier) {
289 if (!flags()->use_root_regions) return;
291 for (uptr i = 0; i < root_regions->size(); i++) {
292 RootRegion region = (*root_regions)[i];
293 uptr begin_addr = reinterpret_cast<uptr>(region.begin);
294 ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
298 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
299 while (frontier->size()) {
300 uptr next_chunk = frontier->back();
301 frontier->pop_back();
302 LsanMetadata m(next_chunk);
303 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
308 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
309 // which are reachable from it as indirectly leaked.
310 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
311 chunk = GetUserBegin(chunk);
312 LsanMetadata m(chunk);
313 if (m.allocated() && m.tag() != kReachable) {
314 ScanRangeForPointers(chunk, chunk + m.requested_size(),
315 /* frontier */ 0, "HEAP", kIndirectlyLeaked);
319 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
321 static void CollectIgnoredCb(uptr chunk, void *arg) {
323 chunk = GetUserBegin(chunk);
324 LsanMetadata m(chunk);
325 if (m.allocated() && m.tag() == kIgnored)
326 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
329 // Sets the appropriate tag on each chunk.
330 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
331 // Holds the flood fill frontier.
332 Frontier frontier(1);
334 ProcessGlobalRegions(&frontier);
335 ProcessThreads(suspended_threads, &frontier);
336 ProcessRootRegions(&frontier);
337 FloodFillTag(&frontier, kReachable);
338 // The check here is relatively expensive, so we do this in a separate flood
339 // fill. That way we can skip the check for chunks that are reachable
341 LOG_POINTERS("Processing platform-specific allocations.\n");
342 ProcessPlatformSpecificAllocations(&frontier);
343 FloodFillTag(&frontier, kReachable);
345 LOG_POINTERS("Scanning ignored chunks.\n");
346 CHECK_EQ(0, frontier.size());
347 ForEachChunk(CollectIgnoredCb, &frontier);
348 FloodFillTag(&frontier, kIgnored);
350 // Iterate over leaked chunks and mark those that are reachable from other
352 LOG_POINTERS("Scanning leaked chunks.\n");
353 ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
356 static void PrintStackTraceById(u32 stack_trace_id) {
357 CHECK(stack_trace_id);
358 StackDepotGet(stack_trace_id).Print();
361 // ForEachChunk callback. Aggregates information about unreachable chunks into
363 static void CollectLeaksCb(uptr chunk, void *arg) {
365 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
366 chunk = GetUserBegin(chunk);
367 LsanMetadata m(chunk);
368 if (!m.allocated()) return;
369 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
370 uptr resolution = flags()->resolution;
371 u32 stack_trace_id = 0;
372 if (resolution > 0) {
373 StackTrace stack = StackDepotGet(m.stack_trace_id());
374 stack.size = Min(stack.size, resolution);
375 stack_trace_id = StackDepotPut(stack);
377 stack_trace_id = m.stack_trace_id();
379 leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
384 static void PrintMatchedSuppressions() {
385 InternalMmapVector<Suppression *> matched(1);
386 SuppressionContext::Get()->GetMatched(&matched);
389 const char *line = "-----------------------------------------------------";
390 Printf("%s\n", line);
391 Printf("Suppressions used:\n");
392 Printf(" count bytes template\n");
393 for (uptr i = 0; i < matched.size(); i++)
394 Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
395 matched[i]->weight, matched[i]->templ);
396 Printf("%s\n\n", line);
399 struct DoLeakCheckParam {
401 LeakReport leak_report;
404 static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
406 DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
408 CHECK(!param->success);
409 ClassifyAllChunks(suspended_threads);
410 ForEachChunk(CollectLeaksCb, ¶m->leak_report);
411 param->success = true;
415 EnsureMainThreadIDIsCorrect();
416 BlockingMutexLock l(&global_mutex);
417 static bool already_done;
418 if (already_done) return;
420 if (&__lsan_is_turned_off && __lsan_is_turned_off())
423 DoLeakCheckParam param;
424 param.success = false;
425 LockThreadRegistry();
427 StopTheWorld(DoLeakCheckCallback, ¶m);
429 UnlockThreadRegistry();
431 if (!param.success) {
432 Report("LeakSanitizer has encountered a fatal error.\n");
435 param.leak_report.ApplySuppressions();
436 uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
437 if (unsuppressed_count > 0) {
440 "================================================================="
442 Printf("%s", d.Error());
443 Report("ERROR: LeakSanitizer: detected memory leaks\n");
444 Printf("%s", d.End());
445 param.leak_report.ReportTopLeaks(flags()->max_leaks);
447 if (common_flags()->print_suppressions)
448 PrintMatchedSuppressions();
449 if (unsuppressed_count > 0) {
450 param.leak_report.PrintSummary();
451 if (flags()->exitcode) {
452 if (common_flags()->coverage)
453 __sanitizer_cov_dump();
454 internal__exit(flags()->exitcode);
459 static Suppression *GetSuppressionForAddr(uptr addr) {
460 Suppression *s = nullptr;
462 // Suppress by module name.
463 const char *module_name;
465 if (Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(addr, &module_name,
467 SuppressionContext::Get()->Match(module_name, SuppressionLeak, &s))
470 // Suppress by file or function name.
471 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
472 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
473 if (SuppressionContext::Get()->Match(cur->info.function, SuppressionLeak,
475 SuppressionContext::Get()->Match(cur->info.file, SuppressionLeak, &s)) {
483 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
484 StackTrace stack = StackDepotGet(stack_trace_id);
485 for (uptr i = 0; i < stack.size; i++) {
486 Suppression *s = GetSuppressionForAddr(
487 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
493 ///// LeakReport implementation. /////
495 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
496 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
497 // in real-world applications.
498 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
500 const uptr kMaxLeaksConsidered = 5000;
502 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
503 uptr leaked_size, ChunkTag tag) {
504 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
505 bool is_directly_leaked = (tag == kDirectlyLeaked);
507 for (i = 0; i < leaks_.size(); i++) {
508 if (leaks_[i].stack_trace_id == stack_trace_id &&
509 leaks_[i].is_directly_leaked == is_directly_leaked) {
510 leaks_[i].hit_count++;
511 leaks_[i].total_size += leaked_size;
515 if (i == leaks_.size()) {
516 if (leaks_.size() == kMaxLeaksConsidered) return;
517 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
518 is_directly_leaked, /* is_suppressed */ false };
519 leaks_.push_back(leak);
521 if (flags()->report_objects) {
522 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
523 leaked_objects_.push_back(obj);
527 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
528 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
529 return leak1.total_size > leak2.total_size;
531 return leak1.is_directly_leaked;
534 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
535 CHECK(leaks_.size() <= kMaxLeaksConsidered);
537 if (leaks_.size() == kMaxLeaksConsidered)
538 Printf("Too many leaks! Only the first %zu leaks encountered will be "
540 kMaxLeaksConsidered);
542 uptr unsuppressed_count = UnsuppressedLeakCount();
543 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
544 Printf("The %zu top leak(s):\n", num_leaks_to_report);
545 InternalSort(&leaks_, leaks_.size(), LeakComparator);
546 uptr leaks_reported = 0;
547 for (uptr i = 0; i < leaks_.size(); i++) {
548 if (leaks_[i].is_suppressed) continue;
549 PrintReportForLeak(i);
551 if (leaks_reported == num_leaks_to_report) break;
553 if (leaks_reported < unsuppressed_count) {
554 uptr remaining = unsuppressed_count - leaks_reported;
555 Printf("Omitting %zu more leak(s).\n", remaining);
559 void LeakReport::PrintReportForLeak(uptr index) {
561 Printf("%s", d.Leak());
562 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
563 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
564 leaks_[index].total_size, leaks_[index].hit_count);
565 Printf("%s", d.End());
567 PrintStackTraceById(leaks_[index].stack_trace_id);
569 if (flags()->report_objects) {
570 Printf("Objects leaked above:\n");
571 PrintLeakedObjectsForLeak(index);
576 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
577 u32 leak_id = leaks_[index].id;
578 for (uptr j = 0; j < leaked_objects_.size(); j++) {
579 if (leaked_objects_[j].leak_id == leak_id)
580 Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
581 leaked_objects_[j].size);
585 void LeakReport::PrintSummary() {
586 CHECK(leaks_.size() <= kMaxLeaksConsidered);
587 uptr bytes = 0, allocations = 0;
588 for (uptr i = 0; i < leaks_.size(); i++) {
589 if (leaks_[i].is_suppressed) continue;
590 bytes += leaks_[i].total_size;
591 allocations += leaks_[i].hit_count;
593 InternalScopedString summary(kMaxSummaryLength);
594 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
596 ReportErrorSummary(summary.data());
599 void LeakReport::ApplySuppressions() {
600 for (uptr i = 0; i < leaks_.size(); i++) {
601 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
603 s->weight += leaks_[i].total_size;
604 s->hit_count += leaks_[i].hit_count;
605 leaks_[i].is_suppressed = true;
610 uptr LeakReport::UnsuppressedLeakCount() {
612 for (uptr i = 0; i < leaks_.size(); i++)
613 if (!leaks_[i].is_suppressed) result++;
617 } // namespace __lsan
618 #endif // CAN_SANITIZE_LEAKS
620 using namespace __lsan; // NOLINT
623 SANITIZER_INTERFACE_ATTRIBUTE
624 void __lsan_ignore_object(const void *p) {
625 #if CAN_SANITIZE_LEAKS
626 if (!common_flags()->detect_leaks)
628 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
630 BlockingMutexLock l(&global_mutex);
631 IgnoreObjectResult res = IgnoreObjectLocked(p);
632 if (res == kIgnoreObjectInvalid)
633 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
634 if (res == kIgnoreObjectAlreadyIgnored)
635 VReport(1, "__lsan_ignore_object(): "
636 "heap object at %p is already being ignored\n", p);
637 if (res == kIgnoreObjectSuccess)
638 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
639 #endif // CAN_SANITIZE_LEAKS
642 SANITIZER_INTERFACE_ATTRIBUTE
643 void __lsan_register_root_region(const void *begin, uptr size) {
644 #if CAN_SANITIZE_LEAKS
645 BlockingMutexLock l(&global_mutex);
647 RootRegion region = {begin, size};
648 root_regions->push_back(region);
649 VReport(1, "Registered root region at %p of size %llu\n", begin, size);
650 #endif // CAN_SANITIZE_LEAKS
653 SANITIZER_INTERFACE_ATTRIBUTE
654 void __lsan_unregister_root_region(const void *begin, uptr size) {
655 #if CAN_SANITIZE_LEAKS
656 BlockingMutexLock l(&global_mutex);
658 bool removed = false;
659 for (uptr i = 0; i < root_regions->size(); i++) {
660 RootRegion region = (*root_regions)[i];
661 if (region.begin == begin && region.size == size) {
663 uptr last_index = root_regions->size() - 1;
664 (*root_regions)[i] = (*root_regions)[last_index];
665 root_regions->pop_back();
666 VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
672 "__lsan_unregister_root_region(): region at %p of size %llu has not "
673 "been registered.\n",
677 #endif // CAN_SANITIZE_LEAKS
680 SANITIZER_INTERFACE_ATTRIBUTE
681 void __lsan_disable() {
682 #if CAN_SANITIZE_LEAKS
683 __lsan::disable_counter++;
687 SANITIZER_INTERFACE_ATTRIBUTE
688 void __lsan_enable() {
689 #if CAN_SANITIZE_LEAKS
690 if (!__lsan::disable_counter && common_flags()->detect_leaks) {
691 Report("Unmatched call to __lsan_enable().\n");
694 __lsan::disable_counter--;
698 SANITIZER_INTERFACE_ATTRIBUTE
699 void __lsan_do_leak_check() {
700 #if CAN_SANITIZE_LEAKS
701 if (common_flags()->detect_leaks)
702 __lsan::DoLeakCheck();
703 #endif // CAN_SANITIZE_LEAKS
706 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
707 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
708 int __lsan_is_turned_off() {