1 //===-- sanitizer_coverage.cc ---------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Sanitizer Coverage.
11 // This file implements run-time support for a poor man's coverage tool.
13 // Compiler instrumentation:
14 // For every interesting basic block the compiler injects the following code:
16 // __sanitizer_cov(&Guard);
18 // At the module start up time __sanitizer_cov_module_init sets the guards
19 // to consecutive negative numbers (-1, -2, -3, ...).
20 // It's fine to call __sanitizer_cov more than once for a given block.
23 // - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
24 // and atomically set Guard to -Guard.
25 // - __sanitizer_cov_dump: dump the coverage data to disk.
26 // For every module of the current process that has coverage data
27 // this will create a file module_name.PID.sancov.
29 // The file format is simple: the first 8 bytes is the magic,
30 // one of 0xC0BFFFFFFFFFFF64 and 0xC0BFFFFFFFFFFF32. The last byte of the
31 // magic defines the size of the following offsets.
32 // The rest of the data is the offsets in the module.
34 // Eventually, this coverage implementation should be obsoleted by a more
35 // powerful general purpose Clang/LLVM coverage instrumentation.
36 // Consider this implementation as prototype.
38 // FIXME: support (or at least test with) dlclose.
39 //===----------------------------------------------------------------------===//
41 #include "sanitizer_allocator_internal.h"
42 #include "sanitizer_common.h"
43 #include "sanitizer_libc.h"
44 #include "sanitizer_mutex.h"
45 #include "sanitizer_procmaps.h"
46 #include "sanitizer_stacktrace.h"
47 #include "sanitizer_symbolizer.h"
48 #include "sanitizer_flags.h"
50 static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
51 static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
52 static const uptr kNumWordsForMagic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
53 static const u64 kMagic = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
55 static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
57 static atomic_uintptr_t coverage_counter;
58 static atomic_uintptr_t caller_callee_counter;
60 static void ResetGlobalCounters() {
61 return atomic_store(&coverage_counter, 0, memory_order_relaxed);
62 return atomic_store(&caller_callee_counter, 0, memory_order_relaxed);
65 // pc_array is the array containing the covered PCs.
66 // To make the pc_array thread- and async-signal-safe it has to be large enough.
67 // 128M counters "ought to be enough for anybody" (4M on 32-bit).
69 // With coverage_direct=1 in ASAN_OPTIONS, pc_array memory is mapped to a file.
70 // In this mode, __sanitizer_cov_dump does nothing, and CovUpdateMapping()
71 // dump current memory layout to another file.
73 static bool cov_sandboxed = false;
74 static fd_t cov_fd = kInvalidFd;
75 static unsigned int cov_max_block_size = 0;
76 static bool coverage_enabled = false;
77 static const char *coverage_dir;
79 namespace __sanitizer {
88 void AfterFork(int child_pid);
89 void Extend(uptr npcs);
90 void Add(uptr pc, u32 *guard);
91 void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
93 void DumpCallerCalleePairs();
101 void TraceBasicBlock(u32 *id);
103 void InitializeGuardArray(s32 *guards);
104 void InitializeGuards(s32 *guards, uptr n, const char *module_name,
106 void InitializeCounters(u8 *counters, uptr n);
107 void ReinitializeGuards();
108 uptr GetNumberOf8bitCounters();
109 uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset);
113 uptr *buffer() const { return pc_buffer; }
116 struct NamedPcRange {
117 const char *copied_module_name;
118 uptr beg, end; // elements [beg,end) in pc_array.
122 void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end);
123 void GetRangeOffsets(const NamedPcRange& r, Symbolizer* s,
124 InternalMmapVector<uptr>* offsets) const;
126 // Maximal size pc array may ever grow.
127 // We MmapNoReserve this space to ensure that the array is contiguous.
128 static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(
129 1 << (SANITIZER_ANDROID ? 24 : (SANITIZER_WINDOWS ? 27 : 26)),
131 // The amount file mapping for the pc array is grown by.
132 static const uptr kPcArrayMmapSize = 64 * 1024;
134 // pc_array is allocated with MmapNoReserveOrDie and so it uses only as
135 // much RAM as it really needs.
137 // Index of the first available pc_array slot.
138 atomic_uintptr_t pc_array_index;
140 atomic_uintptr_t pc_array_size;
141 // Current file mapped size of the pc array.
142 uptr pc_array_mapped_size;
143 // Descriptor of the file mapped pc array.
148 // Vector of coverage guard arrays, protected by mu.
149 InternalMmapVectorNoCtor<s32*> guard_array_vec;
151 // Vector of module and compilation unit pc ranges.
152 InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
153 InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
155 struct CounterAndSize {
160 InternalMmapVectorNoCtor<CounterAndSize> counters_vec;
161 uptr num_8bit_counters;
163 // Caller-Callee (cc) array, size and current index.
164 static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
166 atomic_uintptr_t cc_array_index;
167 atomic_uintptr_t cc_array_size;
169 // Tracing event array, size and current pointer.
170 // We record all events (basic block entries) in a global buffer of u32
171 // values. Each such value is the index in pc_array.
172 // So far the tracing is highly experimental:
173 // - not thread-safe;
174 // - does not support long traces;
175 // - not tuned for performance.
176 static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
178 uptr tr_event_array_size;
179 u32 *tr_event_pointer;
180 static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
185 static CoverageData coverage_data;
187 void CovUpdateMapping(const char *path, uptr caller_pc = 0);
189 void CoverageData::DirectOpen() {
190 InternalScopedString path(kMaxPathLength);
191 internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
192 coverage_dir, internal_getpid());
193 pc_fd = OpenFile(path.data(), RdWr);
194 if (pc_fd == kInvalidFd) {
195 Report("Coverage: failed to open %s for reading/writing\n", path.data());
199 pc_array_mapped_size = 0;
200 CovUpdateMapping(coverage_dir);
203 void CoverageData::Init() {
207 void CoverageData::Enable() {
210 pc_array = reinterpret_cast<uptr *>(
211 MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
212 atomic_store(&pc_array_index, 0, memory_order_relaxed);
213 if (common_flags()->coverage_direct) {
214 atomic_store(&pc_array_size, 0, memory_order_relaxed);
216 atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
220 if (common_flags()->coverage_pc_buffer)
221 pc_buffer = reinterpret_cast<uptr *>(MmapNoReserveOrDie(
222 sizeof(uptr) * kPcArrayMaxSize, "CovInit::pc_buffer"));
224 cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
225 sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
226 atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
227 atomic_store(&cc_array_index, 0, memory_order_relaxed);
229 // Allocate tr_event_array with a guard page at the end.
230 tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
231 sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
232 "CovInit::tr_event_array"));
234 reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
235 GetMmapGranularity());
236 tr_event_array_size = kTrEventArrayMaxSize;
237 tr_event_pointer = tr_event_array;
239 num_8bit_counters = 0;
242 void CoverageData::InitializeGuardArray(s32 *guards) {
243 Enable(); // Make sure coverage is enabled at this point.
245 for (s32 j = 1; j <= n; j++) {
246 uptr idx = atomic_load_relaxed(&pc_array_index);
247 atomic_store_relaxed(&pc_array_index, idx + 1);
248 guards[j] = -static_cast<s32>(idx + 1);
252 void CoverageData::Disable() {
254 UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize);
258 UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
262 UnmapOrDie(pc_buffer, sizeof(uptr) * kPcArrayMaxSize);
265 if (tr_event_array) {
266 UnmapOrDie(tr_event_array,
267 sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
268 GetMmapGranularity());
269 tr_event_array = nullptr;
270 tr_event_pointer = nullptr;
272 if (pc_fd != kInvalidFd) {
278 void CoverageData::ReinitializeGuards() {
279 // Assuming single thread.
280 atomic_store(&pc_array_index, 0, memory_order_relaxed);
281 for (uptr i = 0; i < guard_array_vec.size(); i++)
282 InitializeGuardArray(guard_array_vec[i]);
285 void CoverageData::ReInit() {
287 if (coverage_enabled) {
288 if (common_flags()->coverage_direct) {
289 // In memory-mapped mode we must extend the new file to the known array
291 uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
292 uptr npcs = size / sizeof(uptr);
294 if (size) Extend(npcs);
295 if (coverage_enabled) CovUpdateMapping(coverage_dir);
300 // Re-initialize the guards.
301 // We are single-threaded now, no need to grab any lock.
302 CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
303 ReinitializeGuards();
306 void CoverageData::BeforeFork() {
310 void CoverageData::AfterFork(int child_pid) {
311 // We are single-threaded so it's OK to release the lock early.
313 if (child_pid == 0) ReInit();
316 // Extend coverage PC array to fit additional npcs elements.
317 void CoverageData::Extend(uptr npcs) {
318 if (!common_flags()->coverage_direct) return;
319 SpinMutexLock l(&mu);
321 uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
322 size += npcs * sizeof(uptr);
324 if (coverage_enabled && size > pc_array_mapped_size) {
325 if (pc_fd == kInvalidFd) DirectOpen();
326 CHECK_NE(pc_fd, kInvalidFd);
328 uptr new_mapped_size = pc_array_mapped_size;
329 while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
330 CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
332 // Extend the file and map the new space at the end of pc_array.
333 uptr res = internal_ftruncate(pc_fd, new_mapped_size);
335 if (internal_iserror(res, &err)) {
336 Printf("failed to extend raw coverage file: %d\n", err);
340 uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
341 void *p = MapWritableFileToMemory((void *)next_map_base,
342 new_mapped_size - pc_array_mapped_size,
343 pc_fd, pc_array_mapped_size);
344 CHECK_EQ((uptr)p, next_map_base);
345 pc_array_mapped_size = new_mapped_size;
348 atomic_store(&pc_array_size, size, memory_order_release);
351 void CoverageData::InitializeCounters(u8 *counters, uptr n) {
352 if (!counters) return;
353 CHECK_EQ(reinterpret_cast<uptr>(counters) % 16, 0);
354 n = RoundUpTo(n, 16); // The compiler must ensure that counters is 16-aligned.
355 SpinMutexLock l(&mu);
356 counters_vec.push_back({counters, n});
357 num_8bit_counters += n;
360 void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg,
362 auto sym = Symbolizer::GetOrInit();
365 const char *module_name = sym->GetModuleNameForPc(caller_pc);
366 if (!module_name) return;
367 if (module_name_vec.empty() ||
368 module_name_vec.back().copied_module_name != module_name)
369 module_name_vec.push_back({module_name, range_beg, range_end});
371 module_name_vec.back().end = range_end;
374 void CoverageData::InitializeGuards(s32 *guards, uptr n,
375 const char *comp_unit_name,
377 // The array 'guards' has n+1 elements, we use the element zero
379 CHECK_LT(n, 1 << 30);
380 guards[0] = static_cast<s32>(n);
381 InitializeGuardArray(guards);
382 SpinMutexLock l(&mu);
383 uptr range_end = atomic_load(&pc_array_index, memory_order_relaxed);
384 uptr range_beg = range_end - n;
385 comp_unit_name_vec.push_back({comp_unit_name, range_beg, range_end});
386 guard_array_vec.push_back(guards);
387 UpdateModuleNameVec(caller_pc, range_beg, range_end);
390 static const uptr kBundleCounterBits = 16;
392 // When coverage_order_pcs==true and SANITIZER_WORDSIZE==64
393 // we insert the global counter into the first 16 bits of the PC.
394 uptr BundlePcAndCounter(uptr pc, uptr counter) {
395 if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
397 static const uptr kMaxCounter = (1 << kBundleCounterBits) - 1;
398 if (counter > kMaxCounter)
399 counter = kMaxCounter;
400 CHECK_EQ(0, pc >> (SANITIZER_WORDSIZE - kBundleCounterBits));
401 return pc | (counter << (SANITIZER_WORDSIZE - kBundleCounterBits));
404 uptr UnbundlePc(uptr bundle) {
405 if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
407 return (bundle << kBundleCounterBits) >> kBundleCounterBits;
410 uptr UnbundleCounter(uptr bundle) {
411 if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
413 return bundle >> (SANITIZER_WORDSIZE - kBundleCounterBits);
416 // If guard is negative, atomically set it to -guard and store the PC in
418 void CoverageData::Add(uptr pc, u32 *guard) {
419 atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
420 s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
421 if (guard_value >= 0) return;
423 atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
424 if (!pc_array) return;
426 uptr idx = -guard_value - 1;
427 if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
428 return; // May happen after fork when pc_array_index becomes 0.
429 CHECK_LT(idx * sizeof(uptr),
430 atomic_load(&pc_array_size, memory_order_acquire));
431 uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
432 pc_array[idx] = BundlePcAndCounter(pc, counter);
433 if (pc_buffer) pc_buffer[counter] = pc;
436 // Registers a pair caller=>callee.
437 // When a given caller is seen for the first time, the callee_cache is added
438 // to the global array cc_array, callee_cache[0] is set to caller and
439 // callee_cache[1] is set to cache_size.
440 // Then we are trying to add callee to callee_cache [2,cache_size) if it is
442 // If the cache is full we drop the callee (may want to fix this later).
443 void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[],
445 if (!cc_array) return;
446 atomic_uintptr_t *atomic_callee_cache =
447 reinterpret_cast<atomic_uintptr_t *>(callee_cache);
449 if (atomic_compare_exchange_strong(&atomic_callee_cache[0], &zero, caller,
450 memory_order_seq_cst)) {
451 uptr idx = atomic_fetch_add(&cc_array_index, 1, memory_order_relaxed);
452 CHECK_LT(idx * sizeof(uptr),
453 atomic_load(&cc_array_size, memory_order_acquire));
454 callee_cache[1] = cache_size;
455 cc_array[idx] = callee_cache;
457 CHECK_EQ(atomic_load(&atomic_callee_cache[0], memory_order_relaxed), caller);
458 for (uptr i = 2; i < cache_size; i++) {
460 if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee,
461 memory_order_seq_cst)) {
462 atomic_fetch_add(&caller_callee_counter, 1, memory_order_relaxed);
465 if (was == callee) // Already have this callee.
470 uptr CoverageData::GetNumberOf8bitCounters() {
471 return num_8bit_counters;
474 // Map every 8bit counter to a 8-bit bitset and clear the counter.
475 uptr CoverageData::Update8bitCounterBitsetAndClearCounters(u8 *bitset) {
476 uptr num_new_bits = 0;
478 // For better speed we map 8 counters to 8 bytes of bitset at once.
479 static const uptr kBatchSize = 8;
480 CHECK_EQ(reinterpret_cast<uptr>(bitset) % kBatchSize, 0);
481 for (uptr i = 0, len = counters_vec.size(); i < len; i++) {
482 u8 *c = counters_vec[i].counters;
483 uptr n = counters_vec[i].n;
485 CHECK_EQ(cur % kBatchSize, 0);
486 CHECK_EQ(reinterpret_cast<uptr>(c) % kBatchSize, 0);
488 internal_bzero_aligned16(c, n);
492 for (uptr j = 0; j < n; j += kBatchSize, cur += kBatchSize) {
493 CHECK_LT(cur, num_8bit_counters);
494 u64 *pc64 = reinterpret_cast<u64*>(c + j);
495 u64 *pb64 = reinterpret_cast<u64*>(bitset + cur);
497 u64 old_bits_64 = *pb64;
498 u64 new_bits_64 = old_bits_64;
501 for (uptr k = 0; k < kBatchSize; k++) {
502 u64 x = (c64 >> (8 * k)) & 0xff;
505 /**/ if (x >= 128) bit = 128;
506 else if (x >= 32) bit = 64;
507 else if (x >= 16) bit = 32;
508 else if (x >= 8) bit = 16;
509 else if (x >= 4) bit = 8;
510 else if (x >= 3) bit = 4;
511 else if (x >= 2) bit = 2;
512 else if (x >= 1) bit = 1;
513 u64 mask = bit << (8 * k);
514 if (!(new_bits_64 & mask)) {
524 CHECK_EQ(cur, num_8bit_counters);
528 uptr *CoverageData::data() {
532 uptr CoverageData::size() const {
533 return atomic_load(&pc_array_index, memory_order_relaxed);
536 // Block layout for packed file format: header, followed by module name (no
537 // trailing zero), followed by data blob.
540 unsigned int module_name_length;
541 unsigned int data_length;
544 static void CovWritePacked(int pid, const char *module, const void *blob,
545 unsigned int blob_size) {
546 if (cov_fd == kInvalidFd) return;
547 unsigned module_name_length = internal_strlen(module);
548 CovHeader header = {pid, module_name_length, blob_size};
550 if (cov_max_block_size == 0) {
551 // Writing to a file. Just go ahead.
552 WriteToFile(cov_fd, &header, sizeof(header));
553 WriteToFile(cov_fd, module, module_name_length);
554 WriteToFile(cov_fd, blob, blob_size);
556 // Writing to a socket. We want to split the data into appropriately sized
558 InternalScopedBuffer<char> block(cov_max_block_size);
559 CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
560 uptr header_size_with_module = sizeof(header) + module_name_length;
561 CHECK_LT(header_size_with_module, cov_max_block_size);
562 unsigned int max_payload_size =
563 cov_max_block_size - header_size_with_module;
564 char *block_pos = block.data();
565 internal_memcpy(block_pos, &header, sizeof(header));
566 block_pos += sizeof(header);
567 internal_memcpy(block_pos, module, module_name_length);
568 block_pos += module_name_length;
569 char *block_data_begin = block_pos;
570 const char *blob_pos = (const char *)blob;
571 while (blob_size > 0) {
572 unsigned int payload_size = Min(blob_size, max_payload_size);
573 blob_size -= payload_size;
574 internal_memcpy(block_data_begin, blob_pos, payload_size);
575 blob_pos += payload_size;
576 ((CovHeader *)block.data())->data_length = payload_size;
577 WriteToFile(cov_fd, block.data(), header_size_with_module + payload_size);
582 // If packed = false: <name>.<pid>.<sancov> (name = module name).
583 // If packed = true and name == 0: <pid>.<sancov>.<packed>.
584 // If packed = true and name != 0: <name>.<sancov>.<packed> (name is
586 static fd_t CovOpenFile(InternalScopedString *path, bool packed,
587 const char *name, const char *extension = "sancov") {
591 path->append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
595 path->append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
598 path->append("%s/%s.%s.packed", coverage_dir, name, extension);
601 fd_t fd = OpenFile(path->data(), WrOnly, &err);
602 if (fd == kInvalidFd)
603 Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
608 // Dump trace PCs and trace events into two separate files.
609 void CoverageData::DumpTrace() {
610 uptr max_idx = tr_event_pointer - tr_event_array;
611 if (!max_idx) return;
612 auto sym = Symbolizer::GetOrInit();
615 InternalScopedString out(32 << 20);
616 for (uptr i = 0, n = size(); i < n; i++) {
617 const char *module_name = "<unknown>";
618 uptr module_address = 0;
619 sym->GetModuleNameAndOffsetForPC(UnbundlePc(pc_array[i]), &module_name,
621 out.append("%s 0x%zx\n", module_name, module_address);
623 InternalScopedString path(kMaxPathLength);
624 fd_t fd = CovOpenFile(&path, false, "trace-points");
625 if (fd == kInvalidFd) return;
626 WriteToFile(fd, out.data(), out.length());
629 fd = CovOpenFile(&path, false, "trace-compunits");
630 if (fd == kInvalidFd) return;
632 for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
633 out.append("%s\n", comp_unit_name_vec[i].copied_module_name);
634 WriteToFile(fd, out.data(), out.length());
637 fd = CovOpenFile(&path, false, "trace-events");
638 if (fd == kInvalidFd) return;
639 uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
640 u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
641 // The trace file could be huge, and may not be written with a single syscall.
642 while (bytes_to_write) {
643 uptr actually_written;
644 if (WriteToFile(fd, event_bytes, bytes_to_write, &actually_written) &&
645 actually_written <= bytes_to_write) {
646 bytes_to_write -= actually_written;
647 event_bytes += actually_written;
653 VReport(1, " CovDump: Trace: %zd PCs written\n", size());
654 VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
657 // This function dumps the caller=>callee pairs into a file as a sequence of
658 // lines like "module_name offset".
659 void CoverageData::DumpCallerCalleePairs() {
660 uptr max_idx = atomic_load(&cc_array_index, memory_order_relaxed);
661 if (!max_idx) return;
662 auto sym = Symbolizer::GetOrInit();
665 InternalScopedString out(32 << 20);
667 for (uptr i = 0; i < max_idx; i++) {
668 uptr *cc_cache = cc_array[i];
670 uptr caller = cc_cache[0];
671 uptr n_callees = cc_cache[1];
672 const char *caller_module_name = "<unknown>";
673 uptr caller_module_address = 0;
674 sym->GetModuleNameAndOffsetForPC(caller, &caller_module_name,
675 &caller_module_address);
676 for (uptr j = 2; j < n_callees; j++) {
677 uptr callee = cc_cache[j];
680 const char *callee_module_name = "<unknown>";
681 uptr callee_module_address = 0;
682 sym->GetModuleNameAndOffsetForPC(callee, &callee_module_name,
683 &callee_module_address);
684 out.append("%s 0x%zx\n%s 0x%zx\n", caller_module_name,
685 caller_module_address, callee_module_name,
686 callee_module_address);
689 InternalScopedString path(kMaxPathLength);
690 fd_t fd = CovOpenFile(&path, false, "caller-callee");
691 if (fd == kInvalidFd) return;
692 WriteToFile(fd, out.data(), out.length());
694 VReport(1, " CovDump: %zd caller-callee pairs written\n", total);
697 // Record the current PC into the event buffer.
698 // Every event is a u32 value (index in tr_pc_array_index) so we compute
699 // it once and then cache in the provided 'cache' storage.
701 // This function will eventually be inlined by the compiler.
702 void CoverageData::TraceBasicBlock(u32 *id) {
704 // 1. coverage is not enabled at run-time.
705 // 2. The array tr_event_array is full.
706 *tr_event_pointer = *id - 1;
710 void CoverageData::DumpCounters() {
711 if (!common_flags()->coverage_counters) return;
712 uptr n = coverage_data.GetNumberOf8bitCounters();
714 InternalScopedBuffer<u8> bitset(n);
715 coverage_data.Update8bitCounterBitsetAndClearCounters(bitset.data());
716 InternalScopedString path(kMaxPathLength);
718 for (uptr m = 0; m < module_name_vec.size(); m++) {
719 auto r = module_name_vec[m];
720 CHECK(r.copied_module_name);
721 CHECK_LE(r.beg, r.end);
722 CHECK_LE(r.end, size());
723 const char *base_name = StripModuleName(r.copied_module_name);
725 CovOpenFile(&path, /* packed */ false, base_name, "counters-sancov");
726 if (fd == kInvalidFd) return;
727 WriteToFile(fd, bitset.data() + r.beg, r.end - r.beg);
729 VReport(1, " CovDump: %zd counters written for '%s'\n", r.end - r.beg,
734 void CoverageData::DumpAsBitSet() {
735 if (!common_flags()->coverage_bitset) return;
737 InternalScopedBuffer<char> out(size());
738 InternalScopedString path(kMaxPathLength);
739 for (uptr m = 0; m < module_name_vec.size(); m++) {
741 auto r = module_name_vec[m];
742 CHECK(r.copied_module_name);
743 CHECK_LE(r.beg, r.end);
744 CHECK_LE(r.end, size());
745 for (uptr i = r.beg; i < r.end; i++) {
746 uptr pc = UnbundlePc(pc_array[i]);
747 out[i] = pc ? '1' : '0';
751 const char *base_name = StripModuleName(r.copied_module_name);
752 fd_t fd = CovOpenFile(&path, /* packed */false, base_name, "bitset-sancov");
753 if (fd == kInvalidFd) return;
754 WriteToFile(fd, out.data() + r.beg, r.end - r.beg);
757 " CovDump: bitset of %zd bits written for '%s', %zd bits are set\n",
758 r.end - r.beg, base_name, n_set_bits);
763 void CoverageData::GetRangeOffsets(const NamedPcRange& r, Symbolizer* sym,
764 InternalMmapVector<uptr>* offsets) const {
766 for (uptr i = 0; i < kNumWordsForMagic; i++)
767 offsets->push_back(0);
768 CHECK(r.copied_module_name);
769 CHECK_LE(r.beg, r.end);
770 CHECK_LE(r.end, size());
771 for (uptr i = r.beg; i < r.end; i++) {
772 uptr pc = UnbundlePc(pc_array[i]);
773 uptr counter = UnbundleCounter(pc_array[i]);
774 if (!pc) continue; // Not visited.
776 sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
777 offsets->push_back(BundlePcAndCounter(offset, counter));
780 CHECK_GE(offsets->size(), kNumWordsForMagic);
781 SortArray(offsets->data(), offsets->size());
782 for (uptr i = 0; i < offsets->size(); i++)
783 (*offsets)[i] = UnbundlePc((*offsets)[i]);
786 static void GenerateHtmlReport(const InternalMmapVector<char *> &cov_files) {
787 if (!common_flags()->html_cov_report) {
790 char *sancov_path = FindPathToBinary(common_flags()->sancov_path);
791 if (sancov_path == nullptr) {
795 InternalMmapVector<char *> sancov_argv(cov_files.size() * 2 + 3);
796 sancov_argv.push_back(sancov_path);
797 sancov_argv.push_back(internal_strdup("-html-report"));
798 auto argv_deleter = at_scope_exit([&] {
799 for (uptr i = 0; i < sancov_argv.size(); ++i) {
800 InternalFree(sancov_argv[i]);
804 for (const auto &cov_file : cov_files) {
805 sancov_argv.push_back(internal_strdup(cov_file));
809 ListOfModules modules;
811 for (const LoadedModule &module : modules) {
812 sancov_argv.push_back(internal_strdup(module.full_name()));
816 InternalScopedString report_path(kMaxPathLength);
818 CovOpenFile(&report_path, false /* packed */, GetProcessName(), "html");
819 int pid = StartSubprocess(sancov_argv[0], sancov_argv.data(),
820 kInvalidFd /* stdin */, report_fd /* std_out */);
822 int result = WaitForProcess(pid);
824 Printf("coverage report generated to %s\n", report_path.data());
828 void CoverageData::DumpOffsets() {
829 auto sym = Symbolizer::GetOrInit();
830 if (!common_flags()->coverage_pcs) return;
831 CHECK_NE(sym, nullptr);
832 InternalMmapVector<uptr> offsets(0);
833 InternalScopedString path(kMaxPathLength);
835 InternalMmapVector<char *> cov_files(module_name_vec.size());
836 auto cov_files_deleter = at_scope_exit([&] {
837 for (uptr i = 0; i < cov_files.size(); ++i) {
838 InternalFree(cov_files[i]);
842 for (uptr m = 0; m < module_name_vec.size(); m++) {
843 auto r = module_name_vec[m];
844 GetRangeOffsets(r, sym, &offsets);
846 uptr num_offsets = offsets.size() - kNumWordsForMagic;
847 u64 *magic_p = reinterpret_cast<u64*>(offsets.data());
848 CHECK_EQ(*magic_p, 0ULL);
849 // FIXME: we may want to write 32-bit offsets even in 64-mode
850 // if all the offsets are small enough.
853 const char *module_name = StripModuleName(r.copied_module_name);
855 if (cov_fd != kInvalidFd) {
856 CovWritePacked(internal_getpid(), module_name, offsets.data(),
857 offsets.size() * sizeof(offsets[0]));
858 VReport(1, " CovDump: %zd PCs written to packed file\n", num_offsets);
861 // One file per module per process.
862 fd_t fd = CovOpenFile(&path, false /* packed */, module_name);
863 if (fd == kInvalidFd) continue;
864 WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0]));
866 cov_files.push_back(internal_strdup(path.data()));
867 VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets);
870 if (cov_fd != kInvalidFd)
873 GenerateHtmlReport(cov_files);
876 void CoverageData::DumpAll() {
877 if (!coverage_enabled || common_flags()->coverage_direct) return;
878 if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
884 DumpCallerCalleePairs();
887 void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
889 if (!coverage_enabled) return;
890 cov_sandboxed = args->coverage_sandboxed;
891 if (!cov_sandboxed) return;
892 cov_max_block_size = args->coverage_max_block_size;
893 if (args->coverage_fd >= 0) {
894 cov_fd = (fd_t)args->coverage_fd;
896 InternalScopedString path(kMaxPathLength);
897 // Pre-open the file now. The sandbox won't allow us to do it later.
898 cov_fd = CovOpenFile(&path, true /* packed */, nullptr);
902 fd_t MaybeOpenCovFile(const char *name) {
904 if (!coverage_enabled) return kInvalidFd;
905 InternalScopedString path(kMaxPathLength);
906 return CovOpenFile(&path, true /* packed */, name);
909 void CovBeforeFork() {
910 coverage_data.BeforeFork();
913 void CovAfterFork(int child_pid) {
914 coverage_data.AfterFork(child_pid);
917 static void MaybeDumpCoverage() {
918 if (common_flags()->coverage)
919 __sanitizer_cov_dump();
922 void InitializeCoverage(bool enabled, const char *dir) {
923 if (coverage_enabled)
924 return; // May happen if two sanitizer enable coverage in the same process.
925 coverage_enabled = enabled;
927 coverage_data.Init();
928 if (enabled) coverage_data.Enable();
929 if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
930 AddDieCallback(MaybeDumpCoverage);
933 void ReInitializeCoverage(bool enabled, const char *dir) {
934 coverage_enabled = enabled;
936 coverage_data.ReInit();
939 void CoverageUpdateMapping() {
940 if (coverage_enabled)
941 CovUpdateMapping(coverage_dir);
944 } // namespace __sanitizer
947 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) {
948 coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
951 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
952 atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
953 if (static_cast<s32>(
954 __sanitizer::atomic_load(atomic_guard, memory_order_relaxed)) < 0)
955 __sanitizer_cov(guard);
957 SANITIZER_INTERFACE_ATTRIBUTE void
958 __sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
959 coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
960 callee, callee_cache16, 16);
962 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
963 coverage_enabled = true;
964 coverage_dir = common_flags()->coverage_dir;
965 coverage_data.Init();
967 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
968 coverage_data.DumpAll();
970 SANITIZER_INTERFACE_ATTRIBUTE void
971 __sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
972 const char *comp_unit_name) {
973 coverage_data.InitializeGuards(guards, npcs, comp_unit_name, GET_CALLER_PC());
974 coverage_data.InitializeCounters(counters, npcs);
975 if (!common_flags()->coverage_direct) return;
976 if (SANITIZER_ANDROID && coverage_enabled) {
977 // dlopen/dlclose interceptors do not work on Android, so we rely on
978 // Extend() calls to update .sancov.map.
979 CovUpdateMapping(coverage_dir, GET_CALLER_PC());
981 coverage_data.Extend(npcs);
983 SANITIZER_INTERFACE_ATTRIBUTE
984 sptr __sanitizer_maybe_open_cov_file(const char *name) {
985 return (sptr)MaybeOpenCovFile(name);
987 SANITIZER_INTERFACE_ATTRIBUTE
988 uptr __sanitizer_get_total_unique_coverage() {
989 return atomic_load(&coverage_counter, memory_order_relaxed);
992 SANITIZER_INTERFACE_ATTRIBUTE
993 uptr __sanitizer_get_total_unique_caller_callee_pairs() {
994 return atomic_load(&caller_callee_counter, memory_order_relaxed);
997 SANITIZER_INTERFACE_ATTRIBUTE
998 void __sanitizer_cov_trace_func_enter(u32 *id) {
999 __sanitizer_cov_with_check(id);
1000 coverage_data.TraceBasicBlock(id);
1002 SANITIZER_INTERFACE_ATTRIBUTE
1003 void __sanitizer_cov_trace_basic_block(u32 *id) {
1004 __sanitizer_cov_with_check(id);
1005 coverage_data.TraceBasicBlock(id);
1007 SANITIZER_INTERFACE_ATTRIBUTE
1008 void __sanitizer_reset_coverage() {
1009 ResetGlobalCounters();
1010 coverage_data.ReinitializeGuards();
1011 internal_bzero_aligned16(
1012 coverage_data.data(),
1013 RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
1015 SANITIZER_INTERFACE_ATTRIBUTE
1016 uptr __sanitizer_get_coverage_guards(uptr **data) {
1017 *data = coverage_data.data();
1018 return coverage_data.size();
1021 SANITIZER_INTERFACE_ATTRIBUTE
1022 uptr __sanitizer_get_coverage_pc_buffer(uptr **data) {
1023 *data = coverage_data.buffer();
1024 return __sanitizer_get_total_unique_coverage();
1027 SANITIZER_INTERFACE_ATTRIBUTE
1028 uptr __sanitizer_get_number_of_counters() {
1029 return coverage_data.GetNumberOf8bitCounters();
1032 SANITIZER_INTERFACE_ATTRIBUTE
1033 uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
1034 return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
1036 // Default empty implementations (weak). Users should redefine them.
1037 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
1038 void __sanitizer_cov_trace_cmp() {}
1039 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
1040 void __sanitizer_cov_trace_switch() {}