1 //===-- sanitizer_coverage.cc ---------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Sanitizer Coverage.
11 // This file implements run-time support for a poor man's coverage tool.
13 // Compiler instrumentation:
14 // For every interesting basic block the compiler injects the following code:
16 // __sanitizer_cov(&Guard);
18 // At the module start up time __sanitizer_cov_module_init sets the guards
19 // to consecutive negative numbers (-1, -2, -3, ...).
20 // It's fine to call __sanitizer_cov more than once for a given block.
23 // - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
24 // and atomically set Guard to -Guard.
25 // - __sanitizer_cov_dump: dump the coverage data to disk.
26 // For every module of the current process that has coverage data
27 // this will create a file module_name.PID.sancov.
29 // The file format is simple: the first 8 bytes is the magic,
30 // one of 0xC0BFFFFFFFFFFF64 and 0xC0BFFFFFFFFFFF32. The last byte of the
31 // magic defines the size of the following offsets.
32 // The rest of the data is the offsets in the module.
34 // Eventually, this coverage implementation should be obsoleted by a more
35 // powerful general purpose Clang/LLVM coverage instrumentation.
36 // Consider this implementation as prototype.
38 // FIXME: support (or at least test with) dlclose.
39 //===----------------------------------------------------------------------===//
41 #include "sanitizer_allocator_internal.h"
42 #include "sanitizer_common.h"
43 #include "sanitizer_libc.h"
44 #include "sanitizer_mutex.h"
45 #include "sanitizer_procmaps.h"
46 #include "sanitizer_stacktrace.h"
47 #include "sanitizer_symbolizer.h"
48 #include "sanitizer_flags.h"
50 static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
51 static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
53 static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
55 static atomic_uintptr_t coverage_counter;
57 // pc_array is the array containing the covered PCs.
58 // To make the pc_array thread- and async-signal-safe it has to be large enough.
59 // 128M counters "ought to be enough for anybody" (4M on 32-bit).
61 // With coverage_direct=1 in ASAN_OPTIONS, pc_array memory is mapped to a file.
62 // In this mode, __sanitizer_cov_dump does nothing, and CovUpdateMapping()
63 // dump current memory layout to another file.
65 static bool cov_sandboxed = false;
66 static fd_t cov_fd = kInvalidFd;
67 static unsigned int cov_max_block_size = 0;
68 static bool coverage_enabled = false;
69 static const char *coverage_dir;
71 namespace __sanitizer {
80 void AfterFork(int child_pid);
81 void Extend(uptr npcs);
82 void Add(uptr pc, u32 *guard);
83 void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
85 void DumpCallerCalleePairs();
93 void TraceBasicBlock(s32 *id);
95 void InitializeGuardArray(s32 *guards);
96 void InitializeGuards(s32 *guards, uptr n, const char *module_name,
98 void InitializeCounters(u8 *counters, uptr n);
99 void ReinitializeGuards();
100 uptr GetNumberOf8bitCounters();
101 uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset);
108 void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end);
110 // Maximal size pc array may ever grow.
111 // We MmapNoReserve this space to ensure that the array is contiguous.
112 static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(
113 1 << (SANITIZER_ANDROID ? 24 : (SANITIZER_WINDOWS ? 27 : 26)),
115 // The amount file mapping for the pc array is grown by.
116 static const uptr kPcArrayMmapSize = 64 * 1024;
118 // pc_array is allocated with MmapNoReserveOrDie and so it uses only as
119 // much RAM as it really needs.
121 // Index of the first available pc_array slot.
122 atomic_uintptr_t pc_array_index;
124 atomic_uintptr_t pc_array_size;
125 // Current file mapped size of the pc array.
126 uptr pc_array_mapped_size;
127 // Descriptor of the file mapped pc array.
130 // Vector of coverage guard arrays, protected by mu.
131 InternalMmapVectorNoCtor<s32*> guard_array_vec;
133 struct NamedPcRange {
134 const char *copied_module_name;
135 uptr beg, end; // elements [beg,end) in pc_array.
138 // Vector of module and compilation unit pc ranges.
139 InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
140 InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
142 struct CounterAndSize {
147 InternalMmapVectorNoCtor<CounterAndSize> counters_vec;
148 uptr num_8bit_counters;
150 // Caller-Callee (cc) array, size and current index.
151 static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
153 atomic_uintptr_t cc_array_index;
154 atomic_uintptr_t cc_array_size;
156 // Tracing event array, size and current pointer.
157 // We record all events (basic block entries) in a global buffer of u32
158 // values. Each such value is the index in pc_array.
159 // So far the tracing is highly experimental:
160 // - not thread-safe;
161 // - does not support long traces;
162 // - not tuned for performance.
163 static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
165 uptr tr_event_array_size;
166 u32 *tr_event_pointer;
167 static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
172 static CoverageData coverage_data;
174 void CovUpdateMapping(const char *path, uptr caller_pc = 0);
176 void CoverageData::DirectOpen() {
177 InternalScopedString path(kMaxPathLength);
178 internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
179 coverage_dir, internal_getpid());
180 pc_fd = OpenFile(path.data(), RdWr);
181 if (pc_fd == kInvalidFd) {
182 Report("Coverage: failed to open %s for reading/writing\n", path.data());
186 pc_array_mapped_size = 0;
187 CovUpdateMapping(coverage_dir);
190 void CoverageData::Init() {
194 void CoverageData::Enable() {
197 pc_array = reinterpret_cast<uptr *>(
198 MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
199 atomic_store(&pc_array_index, 0, memory_order_relaxed);
200 if (common_flags()->coverage_direct) {
201 atomic_store(&pc_array_size, 0, memory_order_relaxed);
203 atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
206 cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
207 sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
208 atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
209 atomic_store(&cc_array_index, 0, memory_order_relaxed);
211 // Allocate tr_event_array with a guard page at the end.
212 tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
213 sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
214 "CovInit::tr_event_array"));
216 reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
217 GetMmapGranularity());
218 tr_event_array_size = kTrEventArrayMaxSize;
219 tr_event_pointer = tr_event_array;
221 num_8bit_counters = 0;
224 void CoverageData::InitializeGuardArray(s32 *guards) {
225 Enable(); // Make sure coverage is enabled at this point.
227 for (s32 j = 1; j <= n; j++) {
228 uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
229 guards[j] = -static_cast<s32>(idx + 1);
233 void CoverageData::Disable() {
235 UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize);
239 UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
242 if (tr_event_array) {
243 UnmapOrDie(tr_event_array,
244 sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
245 GetMmapGranularity());
246 tr_event_array = nullptr;
247 tr_event_pointer = nullptr;
249 if (pc_fd != kInvalidFd) {
255 void CoverageData::ReinitializeGuards() {
256 // Assuming single thread.
257 atomic_store(&pc_array_index, 0, memory_order_relaxed);
258 for (uptr i = 0; i < guard_array_vec.size(); i++)
259 InitializeGuardArray(guard_array_vec[i]);
262 void CoverageData::ReInit() {
264 if (coverage_enabled) {
265 if (common_flags()->coverage_direct) {
266 // In memory-mapped mode we must extend the new file to the known array
268 uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
269 uptr npcs = size / sizeof(uptr);
271 if (size) Extend(npcs);
272 if (coverage_enabled) CovUpdateMapping(coverage_dir);
277 // Re-initialize the guards.
278 // We are single-threaded now, no need to grab any lock.
279 CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
280 ReinitializeGuards();
283 void CoverageData::BeforeFork() {
287 void CoverageData::AfterFork(int child_pid) {
288 // We are single-threaded so it's OK to release the lock early.
290 if (child_pid == 0) ReInit();
293 // Extend coverage PC array to fit additional npcs elements.
294 void CoverageData::Extend(uptr npcs) {
295 if (!common_flags()->coverage_direct) return;
296 SpinMutexLock l(&mu);
298 uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
299 size += npcs * sizeof(uptr);
301 if (coverage_enabled && size > pc_array_mapped_size) {
302 if (pc_fd == kInvalidFd) DirectOpen();
303 CHECK_NE(pc_fd, kInvalidFd);
305 uptr new_mapped_size = pc_array_mapped_size;
306 while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
307 CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
309 // Extend the file and map the new space at the end of pc_array.
310 uptr res = internal_ftruncate(pc_fd, new_mapped_size);
312 if (internal_iserror(res, &err)) {
313 Printf("failed to extend raw coverage file: %d\n", err);
317 uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
318 void *p = MapWritableFileToMemory((void *)next_map_base,
319 new_mapped_size - pc_array_mapped_size,
320 pc_fd, pc_array_mapped_size);
321 CHECK_EQ((uptr)p, next_map_base);
322 pc_array_mapped_size = new_mapped_size;
325 atomic_store(&pc_array_size, size, memory_order_release);
328 void CoverageData::InitializeCounters(u8 *counters, uptr n) {
329 if (!counters) return;
330 CHECK_EQ(reinterpret_cast<uptr>(counters) % 16, 0);
331 n = RoundUpTo(n, 16); // The compiler must ensure that counters is 16-aligned.
332 SpinMutexLock l(&mu);
333 counters_vec.push_back({counters, n});
334 num_8bit_counters += n;
337 void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg,
339 auto sym = Symbolizer::GetOrInit();
342 const char *module_name = sym->GetModuleNameForPc(caller_pc);
343 if (!module_name) return;
344 if (module_name_vec.empty() ||
345 module_name_vec.back().copied_module_name != module_name)
346 module_name_vec.push_back({module_name, range_beg, range_end});
348 module_name_vec.back().end = range_end;
351 void CoverageData::InitializeGuards(s32 *guards, uptr n,
352 const char *comp_unit_name,
354 // The array 'guards' has n+1 elements, we use the element zero
356 CHECK_LT(n, 1 << 30);
357 guards[0] = static_cast<s32>(n);
358 InitializeGuardArray(guards);
359 SpinMutexLock l(&mu);
360 uptr range_end = atomic_load(&pc_array_index, memory_order_relaxed);
361 uptr range_beg = range_end - n;
362 comp_unit_name_vec.push_back({comp_unit_name, range_beg, range_end});
363 guard_array_vec.push_back(guards);
364 UpdateModuleNameVec(caller_pc, range_beg, range_end);
367 static const uptr kBundleCounterBits = 16;
369 // When coverage_order_pcs==true and SANITIZER_WORDSIZE==64
370 // we insert the global counter into the first 16 bits of the PC.
371 uptr BundlePcAndCounter(uptr pc, uptr counter) {
372 if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
374 static const uptr kMaxCounter = (1 << kBundleCounterBits) - 1;
375 if (counter > kMaxCounter)
376 counter = kMaxCounter;
377 CHECK_EQ(0, pc >> (SANITIZER_WORDSIZE - kBundleCounterBits));
378 return pc | (counter << (SANITIZER_WORDSIZE - kBundleCounterBits));
381 uptr UnbundlePc(uptr bundle) {
382 if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
384 return (bundle << kBundleCounterBits) >> kBundleCounterBits;
387 uptr UnbundleCounter(uptr bundle) {
388 if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
390 return bundle >> (SANITIZER_WORDSIZE - kBundleCounterBits);
393 // If guard is negative, atomically set it to -guard and store the PC in
395 void CoverageData::Add(uptr pc, u32 *guard) {
396 atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
397 s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
398 if (guard_value >= 0) return;
400 atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
401 if (!pc_array) return;
403 uptr idx = -guard_value - 1;
404 if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
405 return; // May happen after fork when pc_array_index becomes 0.
406 CHECK_LT(idx * sizeof(uptr),
407 atomic_load(&pc_array_size, memory_order_acquire));
408 uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
409 pc_array[idx] = BundlePcAndCounter(pc, counter);
412 // Registers a pair caller=>callee.
413 // When a given caller is seen for the first time, the callee_cache is added
414 // to the global array cc_array, callee_cache[0] is set to caller and
415 // callee_cache[1] is set to cache_size.
416 // Then we are trying to add callee to callee_cache [2,cache_size) if it is
418 // If the cache is full we drop the callee (may want to fix this later).
419 void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[],
421 if (!cc_array) return;
422 atomic_uintptr_t *atomic_callee_cache =
423 reinterpret_cast<atomic_uintptr_t *>(callee_cache);
425 if (atomic_compare_exchange_strong(&atomic_callee_cache[0], &zero, caller,
426 memory_order_seq_cst)) {
427 uptr idx = atomic_fetch_add(&cc_array_index, 1, memory_order_relaxed);
428 CHECK_LT(idx * sizeof(uptr),
429 atomic_load(&cc_array_size, memory_order_acquire));
430 callee_cache[1] = cache_size;
431 cc_array[idx] = callee_cache;
433 CHECK_EQ(atomic_load(&atomic_callee_cache[0], memory_order_relaxed), caller);
434 for (uptr i = 2; i < cache_size; i++) {
436 if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee,
437 memory_order_seq_cst)) {
438 atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
441 if (was == callee) // Already have this callee.
446 uptr CoverageData::GetNumberOf8bitCounters() {
447 return num_8bit_counters;
450 // Map every 8bit counter to a 8-bit bitset and clear the counter.
451 uptr CoverageData::Update8bitCounterBitsetAndClearCounters(u8 *bitset) {
452 uptr num_new_bits = 0;
454 // For better speed we map 8 counters to 8 bytes of bitset at once.
455 static const uptr kBatchSize = 8;
456 CHECK_EQ(reinterpret_cast<uptr>(bitset) % kBatchSize, 0);
457 for (uptr i = 0, len = counters_vec.size(); i < len; i++) {
458 u8 *c = counters_vec[i].counters;
459 uptr n = counters_vec[i].n;
461 CHECK_EQ(cur % kBatchSize, 0);
462 CHECK_EQ(reinterpret_cast<uptr>(c) % kBatchSize, 0);
464 internal_bzero_aligned16(c, n);
468 for (uptr j = 0; j < n; j += kBatchSize, cur += kBatchSize) {
469 CHECK_LT(cur, num_8bit_counters);
470 u64 *pc64 = reinterpret_cast<u64*>(c + j);
471 u64 *pb64 = reinterpret_cast<u64*>(bitset + cur);
473 u64 old_bits_64 = *pb64;
474 u64 new_bits_64 = old_bits_64;
477 for (uptr k = 0; k < kBatchSize; k++) {
478 u64 x = (c64 >> (8 * k)) & 0xff;
481 /**/ if (x >= 128) bit = 128;
482 else if (x >= 32) bit = 64;
483 else if (x >= 16) bit = 32;
484 else if (x >= 8) bit = 16;
485 else if (x >= 4) bit = 8;
486 else if (x >= 3) bit = 4;
487 else if (x >= 2) bit = 2;
488 else if (x >= 1) bit = 1;
489 u64 mask = bit << (8 * k);
490 if (!(new_bits_64 & mask)) {
500 CHECK_EQ(cur, num_8bit_counters);
504 uptr *CoverageData::data() {
508 uptr CoverageData::size() {
509 return atomic_load(&pc_array_index, memory_order_relaxed);
512 // Block layout for packed file format: header, followed by module name (no
513 // trailing zero), followed by data blob.
516 unsigned int module_name_length;
517 unsigned int data_length;
520 static void CovWritePacked(int pid, const char *module, const void *blob,
521 unsigned int blob_size) {
522 if (cov_fd == kInvalidFd) return;
523 unsigned module_name_length = internal_strlen(module);
524 CovHeader header = {pid, module_name_length, blob_size};
526 if (cov_max_block_size == 0) {
527 // Writing to a file. Just go ahead.
528 WriteToFile(cov_fd, &header, sizeof(header));
529 WriteToFile(cov_fd, module, module_name_length);
530 WriteToFile(cov_fd, blob, blob_size);
532 // Writing to a socket. We want to split the data into appropriately sized
534 InternalScopedBuffer<char> block(cov_max_block_size);
535 CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
536 uptr header_size_with_module = sizeof(header) + module_name_length;
537 CHECK_LT(header_size_with_module, cov_max_block_size);
538 unsigned int max_payload_size =
539 cov_max_block_size - header_size_with_module;
540 char *block_pos = block.data();
541 internal_memcpy(block_pos, &header, sizeof(header));
542 block_pos += sizeof(header);
543 internal_memcpy(block_pos, module, module_name_length);
544 block_pos += module_name_length;
545 char *block_data_begin = block_pos;
546 const char *blob_pos = (const char *)blob;
547 while (blob_size > 0) {
548 unsigned int payload_size = Min(blob_size, max_payload_size);
549 blob_size -= payload_size;
550 internal_memcpy(block_data_begin, blob_pos, payload_size);
551 blob_pos += payload_size;
552 ((CovHeader *)block.data())->data_length = payload_size;
553 WriteToFile(cov_fd, block.data(), header_size_with_module + payload_size);
558 // If packed = false: <name>.<pid>.<sancov> (name = module name).
559 // If packed = true and name == 0: <pid>.<sancov>.<packed>.
560 // If packed = true and name != 0: <name>.<sancov>.<packed> (name is
562 static fd_t CovOpenFile(InternalScopedString *path, bool packed,
563 const char *name, const char *extension = "sancov") {
567 path->append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
571 path->append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
574 path->append("%s/%s.%s.packed", coverage_dir, name, extension);
577 fd_t fd = OpenFile(path->data(), WrOnly, &err);
578 if (fd == kInvalidFd)
579 Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
584 // Dump trace PCs and trace events into two separate files.
585 void CoverageData::DumpTrace() {
586 uptr max_idx = tr_event_pointer - tr_event_array;
587 if (!max_idx) return;
588 auto sym = Symbolizer::GetOrInit();
591 InternalScopedString out(32 << 20);
592 for (uptr i = 0, n = size(); i < n; i++) {
593 const char *module_name = "<unknown>";
594 uptr module_address = 0;
595 sym->GetModuleNameAndOffsetForPC(UnbundlePc(pc_array[i]), &module_name,
597 out.append("%s 0x%zx\n", module_name, module_address);
599 InternalScopedString path(kMaxPathLength);
600 fd_t fd = CovOpenFile(&path, false, "trace-points");
601 if (fd == kInvalidFd) return;
602 WriteToFile(fd, out.data(), out.length());
605 fd = CovOpenFile(&path, false, "trace-compunits");
606 if (fd == kInvalidFd) return;
608 for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
609 out.append("%s\n", comp_unit_name_vec[i].copied_module_name);
610 WriteToFile(fd, out.data(), out.length());
613 fd = CovOpenFile(&path, false, "trace-events");
614 if (fd == kInvalidFd) return;
615 uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
616 u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
617 // The trace file could be huge, and may not be written with a single syscall.
618 while (bytes_to_write) {
619 uptr actually_written;
620 if (WriteToFile(fd, event_bytes, bytes_to_write, &actually_written) &&
621 actually_written <= bytes_to_write) {
622 bytes_to_write -= actually_written;
623 event_bytes += actually_written;
629 VReport(1, " CovDump: Trace: %zd PCs written\n", size());
630 VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
633 // This function dumps the caller=>callee pairs into a file as a sequence of
634 // lines like "module_name offset".
635 void CoverageData::DumpCallerCalleePairs() {
636 uptr max_idx = atomic_load(&cc_array_index, memory_order_relaxed);
637 if (!max_idx) return;
638 auto sym = Symbolizer::GetOrInit();
641 InternalScopedString out(32 << 20);
643 for (uptr i = 0; i < max_idx; i++) {
644 uptr *cc_cache = cc_array[i];
646 uptr caller = cc_cache[0];
647 uptr n_callees = cc_cache[1];
648 const char *caller_module_name = "<unknown>";
649 uptr caller_module_address = 0;
650 sym->GetModuleNameAndOffsetForPC(caller, &caller_module_name,
651 &caller_module_address);
652 for (uptr j = 2; j < n_callees; j++) {
653 uptr callee = cc_cache[j];
656 const char *callee_module_name = "<unknown>";
657 uptr callee_module_address = 0;
658 sym->GetModuleNameAndOffsetForPC(callee, &callee_module_name,
659 &callee_module_address);
660 out.append("%s 0x%zx\n%s 0x%zx\n", caller_module_name,
661 caller_module_address, callee_module_name,
662 callee_module_address);
665 InternalScopedString path(kMaxPathLength);
666 fd_t fd = CovOpenFile(&path, false, "caller-callee");
667 if (fd == kInvalidFd) return;
668 WriteToFile(fd, out.data(), out.length());
670 VReport(1, " CovDump: %zd caller-callee pairs written\n", total);
673 // Record the current PC into the event buffer.
674 // Every event is a u32 value (index in tr_pc_array_index) so we compute
675 // it once and then cache in the provided 'cache' storage.
677 // This function will eventually be inlined by the compiler.
678 void CoverageData::TraceBasicBlock(s32 *id) {
680 // 1. coverage is not enabled at run-time.
681 // 2. The array tr_event_array is full.
682 *tr_event_pointer = static_cast<u32>(*id - 1);
686 void CoverageData::DumpCounters() {
687 if (!common_flags()->coverage_counters) return;
688 uptr n = coverage_data.GetNumberOf8bitCounters();
690 InternalScopedBuffer<u8> bitset(n);
691 coverage_data.Update8bitCounterBitsetAndClearCounters(bitset.data());
692 InternalScopedString path(kMaxPathLength);
694 for (uptr m = 0; m < module_name_vec.size(); m++) {
695 auto r = module_name_vec[m];
696 CHECK(r.copied_module_name);
697 CHECK_LE(r.beg, r.end);
698 CHECK_LE(r.end, size());
699 const char *base_name = StripModuleName(r.copied_module_name);
701 CovOpenFile(&path, /* packed */ false, base_name, "counters-sancov");
702 if (fd == kInvalidFd) return;
703 WriteToFile(fd, bitset.data() + r.beg, r.end - r.beg);
705 VReport(1, " CovDump: %zd counters written for '%s'\n", r.end - r.beg,
710 void CoverageData::DumpAsBitSet() {
711 if (!common_flags()->coverage_bitset) return;
713 InternalScopedBuffer<char> out(size());
714 InternalScopedString path(kMaxPathLength);
715 for (uptr m = 0; m < module_name_vec.size(); m++) {
717 auto r = module_name_vec[m];
718 CHECK(r.copied_module_name);
719 CHECK_LE(r.beg, r.end);
720 CHECK_LE(r.end, size());
721 for (uptr i = r.beg; i < r.end; i++) {
722 uptr pc = UnbundlePc(pc_array[i]);
723 out[i] = pc ? '1' : '0';
727 const char *base_name = StripModuleName(r.copied_module_name);
728 fd_t fd = CovOpenFile(&path, /* packed */false, base_name, "bitset-sancov");
729 if (fd == kInvalidFd) return;
730 WriteToFile(fd, out.data() + r.beg, r.end - r.beg);
733 " CovDump: bitset of %zd bits written for '%s', %zd bits are set\n",
734 r.end - r.beg, base_name, n_set_bits);
738 void CoverageData::DumpOffsets() {
739 auto sym = Symbolizer::GetOrInit();
740 if (!common_flags()->coverage_pcs) return;
741 CHECK_NE(sym, nullptr);
742 InternalMmapVector<uptr> offsets(0);
743 InternalScopedString path(kMaxPathLength);
744 for (uptr m = 0; m < module_name_vec.size(); m++) {
746 uptr num_words_for_magic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
747 for (uptr i = 0; i < num_words_for_magic; i++)
748 offsets.push_back(0);
749 auto r = module_name_vec[m];
750 CHECK(r.copied_module_name);
751 CHECK_LE(r.beg, r.end);
752 CHECK_LE(r.end, size());
753 for (uptr i = r.beg; i < r.end; i++) {
754 uptr pc = UnbundlePc(pc_array[i]);
755 uptr counter = UnbundleCounter(pc_array[i]);
756 if (!pc) continue; // Not visited.
758 sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
759 offsets.push_back(BundlePcAndCounter(offset, counter));
762 CHECK_GE(offsets.size(), num_words_for_magic);
763 SortArray(offsets.data(), offsets.size());
764 for (uptr i = 0; i < offsets.size(); i++)
765 offsets[i] = UnbundlePc(offsets[i]);
767 uptr num_offsets = offsets.size() - num_words_for_magic;
768 u64 *magic_p = reinterpret_cast<u64*>(offsets.data());
769 CHECK_EQ(*magic_p, 0ULL);
770 // FIXME: we may want to write 32-bit offsets even in 64-mode
771 // if all the offsets are small enough.
772 *magic_p = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
774 const char *module_name = StripModuleName(r.copied_module_name);
776 if (cov_fd != kInvalidFd) {
777 CovWritePacked(internal_getpid(), module_name, offsets.data(),
778 offsets.size() * sizeof(offsets[0]));
779 VReport(1, " CovDump: %zd PCs written to packed file\n", num_offsets);
782 // One file per module per process.
783 fd_t fd = CovOpenFile(&path, false /* packed */, module_name);
784 if (fd == kInvalidFd) continue;
785 WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0]));
787 VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets);
790 if (cov_fd != kInvalidFd)
794 void CoverageData::DumpAll() {
795 if (!coverage_enabled || common_flags()->coverage_direct) return;
796 if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
802 DumpCallerCalleePairs();
805 void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
807 if (!coverage_enabled) return;
808 cov_sandboxed = args->coverage_sandboxed;
809 if (!cov_sandboxed) return;
810 cov_max_block_size = args->coverage_max_block_size;
811 if (args->coverage_fd >= 0) {
812 cov_fd = (fd_t)args->coverage_fd;
814 InternalScopedString path(kMaxPathLength);
815 // Pre-open the file now. The sandbox won't allow us to do it later.
816 cov_fd = CovOpenFile(&path, true /* packed */, 0);
820 fd_t MaybeOpenCovFile(const char *name) {
822 if (!coverage_enabled) return kInvalidFd;
823 InternalScopedString path(kMaxPathLength);
824 return CovOpenFile(&path, true /* packed */, name);
827 void CovBeforeFork() {
828 coverage_data.BeforeFork();
831 void CovAfterFork(int child_pid) {
832 coverage_data.AfterFork(child_pid);
835 void InitializeCoverage(bool enabled, const char *dir) {
836 if (coverage_enabled)
837 return; // May happen if two sanitizer enable coverage in the same process.
838 coverage_enabled = enabled;
840 coverage_data.Init();
841 if (enabled) coverage_data.Enable();
842 if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
845 void ReInitializeCoverage(bool enabled, const char *dir) {
846 coverage_enabled = enabled;
848 coverage_data.ReInit();
851 void CoverageUpdateMapping() {
852 if (coverage_enabled)
853 CovUpdateMapping(coverage_dir);
856 } // namespace __sanitizer
859 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) {
860 coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
863 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
864 atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
865 if (static_cast<s32>(
866 __sanitizer::atomic_load(atomic_guard, memory_order_relaxed)) < 0)
867 __sanitizer_cov(guard);
869 SANITIZER_INTERFACE_ATTRIBUTE void
870 __sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
871 coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
872 callee, callee_cache16, 16);
874 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
875 coverage_enabled = true;
876 coverage_dir = common_flags()->coverage_dir;
877 coverage_data.Init();
879 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
880 coverage_data.DumpAll();
882 SANITIZER_INTERFACE_ATTRIBUTE void
883 __sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
884 const char *comp_unit_name) {
885 coverage_data.InitializeGuards(guards, npcs, comp_unit_name, GET_CALLER_PC());
886 coverage_data.InitializeCounters(counters, npcs);
887 if (!common_flags()->coverage_direct) return;
888 if (SANITIZER_ANDROID && coverage_enabled) {
889 // dlopen/dlclose interceptors do not work on Android, so we rely on
890 // Extend() calls to update .sancov.map.
891 CovUpdateMapping(coverage_dir, GET_CALLER_PC());
893 coverage_data.Extend(npcs);
895 SANITIZER_INTERFACE_ATTRIBUTE
896 sptr __sanitizer_maybe_open_cov_file(const char *name) {
897 return (sptr)MaybeOpenCovFile(name);
899 SANITIZER_INTERFACE_ATTRIBUTE
900 uptr __sanitizer_get_total_unique_coverage() {
901 return atomic_load(&coverage_counter, memory_order_relaxed);
904 SANITIZER_INTERFACE_ATTRIBUTE
905 void __sanitizer_cov_trace_func_enter(s32 *id) {
906 coverage_data.TraceBasicBlock(id);
908 SANITIZER_INTERFACE_ATTRIBUTE
909 void __sanitizer_cov_trace_basic_block(s32 *id) {
910 coverage_data.TraceBasicBlock(id);
912 SANITIZER_INTERFACE_ATTRIBUTE
913 void __sanitizer_reset_coverage() {
914 coverage_data.ReinitializeGuards();
915 internal_bzero_aligned16(
916 coverage_data.data(),
917 RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
919 SANITIZER_INTERFACE_ATTRIBUTE
920 uptr __sanitizer_get_coverage_guards(uptr **data) {
921 *data = coverage_data.data();
922 return coverage_data.size();
925 SANITIZER_INTERFACE_ATTRIBUTE
926 uptr __sanitizer_get_number_of_counters() {
927 return coverage_data.GetNumberOf8bitCounters();
930 SANITIZER_INTERFACE_ATTRIBUTE
931 uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
932 return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
934 // Default empty implementation (weak). Users should redefine it.
935 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
936 void __sanitizer_cov_trace_cmp() {}