1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between run-time libraries of sanitizers.
11 // It declares common functions and classes that are used in both runtimes.
12 // Implementation of some functions are provided in sanitizer_common, while
13 // others must be defined by run-time library itself.
14 //===----------------------------------------------------------------------===//
15 #ifndef SANITIZER_COMMON_H
16 #define SANITIZER_COMMON_H
18 #include "sanitizer_flags.h"
19 #include "sanitizer_interface_internal.h"
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_libc.h"
22 #include "sanitizer_list.h"
23 #include "sanitizer_mutex.h"
25 #if defined(_MSC_VER) && !defined(__clang__)
26 extern "C" void _ReadWriteBarrier();
27 #pragma intrinsic(_ReadWriteBarrier)
30 namespace __sanitizer {
33 struct BufferedStackTrace;
38 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
39 const uptr kWordSizeInBits = 8 * kWordSize;
41 const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
43 const uptr kMaxPathLength = 4096;
45 const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
47 static const uptr kErrorMessageBufferSize = 1 << 16;
49 // Denotes fake PC values that come from JIT/JAVA/etc.
50 // For such PC values __tsan_symbolize_external_ex() will be called.
51 const u64 kExternalPCBit = 1ULL << 60;
53 extern const char *SanitizerToolName; // Can be changed by the tool.
55 extern atomic_uint32_t current_verbosity;
56 INLINE void SetVerbosity(int verbosity) {
57 atomic_store(¤t_verbosity, verbosity, memory_order_relaxed);
59 INLINE int Verbosity() {
60 return atomic_load(¤t_verbosity, memory_order_relaxed);
64 INLINE uptr GetPageSize() {
65 // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
68 INLINE uptr GetPageSizeCached() {
73 extern uptr PageSizeCached;
74 INLINE uptr GetPageSizeCached() {
76 PageSizeCached = GetPageSize();
77 return PageSizeCached;
80 uptr GetMmapGranularity();
81 uptr GetMaxVirtualAddress();
82 uptr GetMaxUserVirtualAddress();
85 int TgKill(pid_t pid, tid_t tid, int sig);
87 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
89 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
90 uptr *tls_addr, uptr *tls_size);
93 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
94 INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
95 return MmapOrDie(size, mem_type, /*raw_report*/ true);
97 void UnmapOrDie(void *addr, uptr size);
98 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
99 // case returns nullptr.
100 void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
101 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
103 bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
104 const char *name = nullptr) WARN_UNUSED_RESULT;
105 void *MmapNoReserveOrDie(uptr size, const char *mem_type);
106 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
107 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
108 // that case returns nullptr.
109 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
110 const char *name = nullptr);
111 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
112 void *MmapNoAccess(uptr size);
113 // Map aligned chunk of address space; size and alignment are powers of two.
114 // Dies on all but out of memory errors, in the latter case returns nullptr.
115 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
116 const char *mem_type);
117 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
118 // unaccessible memory.
119 bool MprotectNoAccess(uptr addr, uptr size);
120 bool MprotectReadOnly(uptr addr, uptr size);
122 void MprotectMallocZones(void *addr, int prot);
124 // Find an available address space.
125 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
126 uptr *largest_gap_found, uptr *max_occupied_addr);
128 // Used to check if we can map shadow memory to a fixed location.
129 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
130 // Releases memory pages entirely within the [beg, end] address range. Noop if
131 // the provided range does not contain at least one entire page.
132 void ReleaseMemoryPagesToOS(uptr beg, uptr end);
133 void IncreaseTotalMmap(uptr size);
134 void DecreaseTotalMmap(uptr size);
136 void SetShadowRegionHugePageMode(uptr addr, uptr length);
137 bool DontDumpShadowMemory(uptr addr, uptr length);
138 // Check if the built VMA size matches the runtime one.
140 void RunMallocHooks(const void *ptr, uptr size);
141 void RunFreeHooks(const void *ptr);
143 class ReservedAddressRange {
145 uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
146 uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
147 uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
148 uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
149 void Unmap(uptr addr, uptr size);
150 void *base() const { return base_; }
151 uptr size() const { return size_; }
160 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
161 /*out*/uptr *stats, uptr stats_size);
163 // Parse the contents of /proc/self/smaps and generate a memory profile.
164 // |cb| is a tool-specific callback that fills the |stats| array containing
165 // |stats_size| elements.
166 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
168 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
169 // constructor, so all instances of LowLevelAllocator should be
170 // linker initialized.
171 class LowLevelAllocator {
173 // Requires an external lock.
174 void *Allocate(uptr size);
176 char *allocated_end_;
177 char *allocated_current_;
179 // Set the min alignment of LowLevelAllocator to at least alignment.
180 void SetLowLevelAllocateMinAlignment(uptr alignment);
181 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
182 // Allows to register tool-specific callbacks for LowLevelAllocator.
183 // Passing NULL removes the callback.
184 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
187 void CatastrophicErrorWrite(const char *buffer, uptr length);
188 void RawWrite(const char *buffer);
189 bool ColorizeReports();
190 void RemoveANSIEscapeSequencesFromString(char *buffer);
191 void Printf(const char *format, ...);
192 void Report(const char *format, ...);
193 void SetPrintfAndReportCallback(void (*callback)(const char *));
194 #define VReport(level, ...) \
196 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
198 #define VPrintf(level, ...) \
200 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
203 // Lock sanitizer error reporting and protects against nested errors.
204 class ScopedErrorReportLock {
206 ScopedErrorReportLock();
207 ~ScopedErrorReportLock();
209 static void CheckLocked();
212 extern uptr stoptheworld_tracer_pid;
213 extern uptr stoptheworld_tracer_ppid;
215 bool IsAccessibleMemoryRange(uptr beg, uptr size);
217 // Error report formatting.
218 const char *StripPathPrefix(const char *filepath,
219 const char *strip_file_prefix);
220 // Strip the directories from the module name.
221 const char *StripModuleName(const char *module);
224 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
225 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
226 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
227 const char *GetProcessName();
228 void UpdateProcessName();
229 void CacheBinaryName();
230 void DisableCoreDumperIfNecessary();
231 void DumpProcessMap();
232 void PrintModuleMap();
233 const char *GetEnv(const char *name);
234 bool SetEnv(const char *name, const char *value);
239 void CheckMPROTECT();
243 bool StackSizeIsUnlimited();
244 void SetStackSizeLimitInBytes(uptr limit);
245 bool AddressSpaceIsUnlimited();
246 void SetAddressSpaceUnlimited();
247 void AdjustStackSize(void *attr);
248 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
249 void SetSandboxingCallback(void (*f)());
251 void InitializeCoverage(bool enabled, const char *coverage_dir);
257 void SleepForSeconds(int seconds);
258 void SleepForMillis(int millis);
260 u64 MonotonicNanoTime();
261 int Atexit(void (*function)(void));
262 bool TemplateMatch(const char *templ, const char *str);
265 void NORETURN Abort();
268 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
269 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
270 const char *mmap_type, error_t err,
271 bool raw_report = false);
273 // Specific tools may override behavior of "Die" and "CheckFailed" functions
274 // to do tool-specific job.
275 typedef void (*DieCallbackType)(void);
277 // It's possible to add several callbacks that would be run when "Die" is
278 // called. The callbacks will be run in the opposite order. The tools are
279 // strongly recommended to setup all callbacks during initialization, when there
280 // is only a single thread.
281 bool AddDieCallback(DieCallbackType callback);
282 bool RemoveDieCallback(DieCallbackType callback);
284 void SetUserDieCallback(DieCallbackType callback);
286 typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
288 void SetCheckFailedCallback(CheckFailedCallbackType callback);
290 // Callback will be called if soft_rss_limit_mb is given and the limit is
291 // exceeded (exceeded==true) or if rss went down below the limit
292 // (exceeded==false).
293 // The callback should be registered once at the tool init time.
294 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
296 // Functions related to signal handling.
297 typedef void (*SignalHandlerType)(int, void *, void *);
298 HandleSignalMode GetHandleSignalMode(int signum);
299 void InstallDeadlySignalHandlers(SignalHandlerType handler);
302 // Each sanitizer uses slightly different implementation of stack unwinding.
303 typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
304 const void *callback_context,
305 BufferedStackTrace *stack);
306 // Print deadly signal report and die.
307 void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
308 UnwindSignalStackCallbackType unwind,
309 const void *unwind_context);
311 // Part of HandleDeadlySignal, exposed for asan.
312 void StartReportDeadlySignal();
313 // Part of HandleDeadlySignal, exposed for asan.
314 void ReportDeadlySignal(const SignalContext &sig, u32 tid,
315 UnwindSignalStackCallbackType unwind,
316 const void *unwind_context);
318 // Alternative signal stack (POSIX-only).
319 void SetAlternateSignalStack();
320 void UnsetAlternateSignalStack();
322 // We don't want a summary too long.
323 const int kMaxSummaryLength = 1024;
324 // Construct a one-line string:
325 // SUMMARY: SanitizerToolName: error_message
326 // and pass it to __sanitizer_report_error_summary.
327 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
328 void ReportErrorSummary(const char *error_message,
329 const char *alt_tool_name = nullptr);
330 // Same as above, but construct error_message as:
331 // error_type file:line[:column][ function]
332 void ReportErrorSummary(const char *error_type, const AddressInfo &info,
333 const char *alt_tool_name = nullptr);
334 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
335 void ReportErrorSummary(const char *error_type, const StackTrace *trace,
336 const char *alt_tool_name = nullptr);
338 void ReportMmapWriteExec(int prot);
341 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
343 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
344 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
346 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
347 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
352 INLINE uptr MostSignificantSetBitIndex(uptr x) {
355 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
357 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
359 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
361 #elif defined(_WIN64)
362 _BitScanReverse64(&up, x);
364 _BitScanReverse(&up, x);
369 INLINE uptr LeastSignificantSetBitIndex(uptr x) {
372 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
374 up = __builtin_ctzll(x);
376 up = __builtin_ctzl(x);
378 #elif defined(_WIN64)
379 _BitScanForward64(&up, x);
381 _BitScanForward(&up, x);
386 INLINE bool IsPowerOfTwo(uptr x) {
387 return (x & (x - 1)) == 0;
390 INLINE uptr RoundUpToPowerOfTwo(uptr size) {
392 if (IsPowerOfTwo(size)) return size;
394 uptr up = MostSignificantSetBitIndex(size);
395 CHECK_LT(size, (1ULL << (up + 1)));
396 CHECK_GT(size, (1ULL << up));
397 return 1ULL << (up + 1);
400 INLINE uptr RoundUpTo(uptr size, uptr boundary) {
401 RAW_CHECK(IsPowerOfTwo(boundary));
402 return (size + boundary - 1) & ~(boundary - 1);
405 INLINE uptr RoundDownTo(uptr x, uptr boundary) {
406 return x & ~(boundary - 1);
409 INLINE bool IsAligned(uptr a, uptr alignment) {
410 return (a & (alignment - 1)) == 0;
413 INLINE uptr Log2(uptr x) {
414 CHECK(IsPowerOfTwo(x));
415 return LeastSignificantSetBitIndex(x);
418 // Don't use std::min, std::max or std::swap, to minimize dependency
420 template<class T> T Min(T a, T b) { return a < b ? a : b; }
421 template<class T> T Max(T a, T b) { return a > b ? a : b; }
422 template<class T> void Swap(T& a, T& b) {
429 INLINE bool IsSpace(int c) {
430 return (c == ' ') || (c == '\n') || (c == '\t') ||
431 (c == '\f') || (c == '\r') || (c == '\v');
433 INLINE bool IsDigit(int c) {
434 return (c >= '0') && (c <= '9');
436 INLINE int ToLower(int c) {
437 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
440 // A low-level vector based on mmap. May incur a significant memory overhead for
442 // WARNING: The current implementation supports only POD types.
444 class InternalMmapVectorNoCtor {
446 void Initialize(uptr initial_capacity) {
450 reserve(initial_capacity);
452 void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
453 T &operator[](uptr i) {
457 const T &operator[](uptr i) const {
461 void push_back(const T &element) {
462 CHECK_LE(size_, capacity());
463 if (size_ == capacity()) {
464 uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
465 Realloc(new_capacity);
467 internal_memcpy(&data_[size_++], &element, sizeof(T));
471 return data_[size_ - 1];
480 const T *data() const {
486 uptr capacity() const { return capacity_bytes_ / sizeof(T); }
487 void reserve(uptr new_size) {
488 // Never downsize internal buffer.
489 if (new_size > capacity())
492 void resize(uptr new_size) {
493 if (new_size > size_) {
495 internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
500 void clear() { size_ = 0; }
501 bool empty() const { return size() == 0; }
503 const T *begin() const {
509 const T *end() const {
510 return data() + size();
513 return data() + size();
516 void swap(InternalMmapVectorNoCtor &other) {
517 Swap(data_, other.data_);
518 Swap(capacity_bytes_, other.capacity_bytes_);
519 Swap(size_, other.size_);
523 void Realloc(uptr new_capacity) {
524 CHECK_GT(new_capacity, 0);
525 CHECK_LE(size_, new_capacity);
526 uptr new_capacity_bytes =
527 RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
528 T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
529 internal_memcpy(new_data, data_, size_ * sizeof(T));
530 UnmapOrDie(data_, capacity_bytes_);
532 capacity_bytes_ = new_capacity_bytes;
536 uptr capacity_bytes_;
540 template <typename T>
541 bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
542 const InternalMmapVectorNoCtor<T> &rhs) {
543 if (lhs.size() != rhs.size()) return false;
544 return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
547 template <typename T>
548 bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
549 const InternalMmapVectorNoCtor<T> &rhs) {
550 return !(lhs == rhs);
554 class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
556 InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
557 explicit InternalMmapVector(uptr cnt) {
558 InternalMmapVectorNoCtor<T>::Initialize(cnt);
561 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
562 // Disallow copies and moves.
563 InternalMmapVector(const InternalMmapVector &) = delete;
564 InternalMmapVector &operator=(const InternalMmapVector &) = delete;
565 InternalMmapVector(InternalMmapVector &&) = delete;
566 InternalMmapVector &operator=(InternalMmapVector &&) = delete;
569 class InternalScopedString : public InternalMmapVector<char> {
571 explicit InternalScopedString(uptr max_length)
572 : InternalMmapVector<char>(max_length), length_(0) {
575 uptr length() { return length_; }
580 void append(const char *format, ...);
588 bool operator()(const T &a, const T &b) const { return a < b; }
591 // HeapSort for arrays and InternalMmapVector.
592 template <class T, class Compare = CompareLess<T>>
593 void Sort(T *v, uptr size, Compare comp = {}) {
596 // Stage 1: insert elements to the heap.
597 for (uptr i = 1; i < size; i++) {
599 for (j = i; j > 0; j = p) {
601 if (comp(v[p], v[j]))
607 // Stage 2: swap largest element with the last one,
608 // and sink the new top.
609 for (uptr i = size - 1; i > 0; i--) {
612 for (j = 0; j < i; j = max_ind) {
613 uptr left = 2 * j + 1;
614 uptr right = 2 * j + 2;
616 if (left < i && comp(v[max_ind], v[left]))
618 if (right < i && comp(v[max_ind], v[right]))
621 Swap(v[j], v[max_ind]);
628 // Works like std::lower_bound: finds the first element that is not less
630 template <class Container, class Value, class Compare>
631 uptr InternalLowerBound(const Container &v, uptr first, uptr last,
632 const Value &val, Compare comp) {
633 while (last > first) {
634 uptr mid = (first + last) / 2;
635 if (comp(v[mid], val))
655 // Opens the file 'file_name" and reads up to 'max_len' bytes.
656 // The resulting buffer is mmaped and stored in '*buff'.
657 // Returns true if file was successfully opened and read.
658 bool ReadFileToVector(const char *file_name,
659 InternalMmapVectorNoCtor<char> *buff,
660 uptr max_len = 1 << 26, error_t *errno_p = nullptr);
662 // Opens the file 'file_name" and reads up to 'max_len' bytes.
663 // This function is less I/O efficient than ReadFileToVector as it may reread
664 // file multiple times to avoid mmap during read attempts. It's used to read
665 // procmap, so short reads with mmap in between can produce inconsistent result.
666 // The resulting buffer is mmaped and stored in '*buff'.
667 // The size of the mmaped region is stored in '*buff_size'.
668 // The total number of read bytes is stored in '*read_len'.
669 // Returns true if file was successfully opened and read.
670 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
671 uptr *read_len, uptr max_len = 1 << 26,
672 error_t *errno_p = nullptr);
674 // When adding a new architecture, don't forget to also update
675 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
676 inline const char *ModuleArchToString(ModuleArch arch) {
678 case kModuleArchUnknown:
680 case kModuleArchI386:
682 case kModuleArchX86_64:
684 case kModuleArchX86_64H:
686 case kModuleArchARMV6:
688 case kModuleArchARMV7:
690 case kModuleArchARMV7S:
692 case kModuleArchARMV7K:
694 case kModuleArchARM64:
697 CHECK(0 && "Invalid module arch");
701 const uptr kModuleUUIDSize = 16;
702 const uptr kMaxSegName = 16;
704 // Represents a binary loaded into virtual memory (e.g. this can be an
705 // executable or a shared object).
709 : full_name_(nullptr),
711 max_executable_address_(0),
712 arch_(kModuleArchUnknown),
713 instrumented_(false) {
714 internal_memset(uuid_, 0, kModuleUUIDSize);
717 void set(const char *module_name, uptr base_address);
718 void set(const char *module_name, uptr base_address, ModuleArch arch,
719 u8 uuid[kModuleUUIDSize], bool instrumented);
721 void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
722 const char *name = nullptr);
723 bool containsAddress(uptr address) const;
725 const char *full_name() const { return full_name_; }
726 uptr base_address() const { return base_address_; }
727 uptr max_executable_address() const { return max_executable_address_; }
728 ModuleArch arch() const { return arch_; }
729 const u8 *uuid() const { return uuid_; }
730 bool instrumented() const { return instrumented_; }
732 struct AddressRange {
738 char name[kMaxSegName];
740 AddressRange(uptr beg, uptr end, bool executable, bool writable,
745 executable(executable),
747 internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
751 const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
754 char *full_name_; // Owned.
756 uptr max_executable_address_;
758 u8 uuid_[kModuleUUIDSize];
760 IntrusiveList<AddressRange> ranges_;
763 // List of LoadedModules. OS-dependent implementation is responsible for
764 // filling this information.
765 class ListOfModules {
767 ListOfModules() : initialized(false) {}
768 ~ListOfModules() { clear(); }
770 void fallbackInit(); // Uses fallback init if available, otherwise clears
771 const LoadedModule *begin() const { return modules_.begin(); }
772 LoadedModule *begin() { return modules_.begin(); }
773 const LoadedModule *end() const { return modules_.end(); }
774 LoadedModule *end() { return modules_.end(); }
775 uptr size() const { return modules_.size(); }
776 const LoadedModule &operator[](uptr i) const {
777 CHECK_LT(i, modules_.size());
783 for (auto &module : modules_) module.clear();
787 initialized ? clear() : modules_.Initialize(kInitialCapacity);
791 InternalMmapVectorNoCtor<LoadedModule> modules_;
792 // We rarely have more than 16K loaded modules.
793 static const uptr kInitialCapacity = 1 << 14;
797 // Callback type for iterating over a set of memory ranges.
798 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
800 enum AndroidApiLevel {
801 ANDROID_NOT_ANDROID = 0,
803 ANDROID_LOLLIPOP_MR1 = 22,
804 ANDROID_POST_LOLLIPOP = 23
807 void WriteToSyslog(const char *buffer);
809 #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
810 #define SANITIZER_WIN_TRACE 1
812 #define SANITIZER_WIN_TRACE 0
815 #if SANITIZER_MAC || SANITIZER_WIN_TRACE
816 void LogFullErrorReport(const char *buffer);
818 INLINE void LogFullErrorReport(const char *buffer) {}
821 #if SANITIZER_LINUX || SANITIZER_MAC
822 void WriteOneLineToSyslog(const char *s);
823 void LogMessageOnPrintf(const char *str);
825 INLINE void WriteOneLineToSyslog(const char *s) {}
826 INLINE void LogMessageOnPrintf(const char *str) {}
829 #if SANITIZER_LINUX || SANITIZER_WIN_TRACE
830 // Initialize Android logging. Any writes before this are silently lost.
831 void AndroidLogInit();
832 void SetAbortMessage(const char *);
834 INLINE void AndroidLogInit() {}
835 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
836 INLINE void SetAbortMessage(const char *) {}
839 #if SANITIZER_ANDROID
840 void SanitizerInitializeUnwinder();
841 AndroidApiLevel AndroidGetApiLevel();
843 INLINE void AndroidLogWrite(const char *buffer_unused) {}
844 INLINE void SanitizerInitializeUnwinder() {}
845 INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
848 INLINE uptr GetPthreadDestructorIterations() {
849 #if SANITIZER_ANDROID
850 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
851 #elif SANITIZER_POSIX
854 // Unused on Windows.
859 void *internal_start_thread(void *(*func)(void*), void *arg);
860 void internal_join_thread(void *th);
861 void MaybeStartBackgroudThread();
863 // Make the compiler think that something is going on there.
864 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
865 // compiler from recognising it and turning it into an actual call to
866 // memset/memcpy/etc.
867 static inline void SanitizerBreakOptimization(void *arg) {
868 #if defined(_MSC_VER) && !defined(__clang__)
871 __asm__ __volatile__("" : : "r" (arg) : "memory");
875 struct SignalContext {
882 bool is_memory_access;
883 enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
885 // In some cases the kernel cannot provide the true faulting address; `addr`
886 // will be zero then. This field allows to distinguish between these cases
887 // and dereferences of null.
888 bool is_true_faulting_addr;
890 // VS2013 doesn't implement unrestricted unions, so we need a trivial default
892 SignalContext() = default;
894 // Creates signal context in a platform-specific manner.
895 // SignalContext is going to keep pointers to siginfo and context without
897 SignalContext(void *siginfo, void *context)
901 is_memory_access(IsMemoryAccess()),
902 write_flag(GetWriteFlag()),
903 is_true_faulting_addr(IsTrueFaultingAddress()) {
907 static void DumpAllRegisters(void *context);
909 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
912 // String description of the signal.
913 const char *Describe() const;
915 // Returns true if signal is stack overflow.
916 bool IsStackOverflow() const;
919 // Platform specific initialization.
921 uptr GetAddress() const;
922 WriteFlag GetWriteFlag() const;
923 bool IsMemoryAccess() const;
924 bool IsTrueFaultingAddress() const;
927 void InitializePlatformEarly();
930 template <typename Fn>
931 class RunOnDestruction {
933 explicit RunOnDestruction(Fn fn) : fn_(fn) {}
934 ~RunOnDestruction() { fn_(); }
940 // A simple scope guard. Usage:
941 // auto cleanup = at_scope_exit([]{ do_cleanup; });
942 template <typename Fn>
943 RunOnDestruction<Fn> at_scope_exit(Fn fn) {
944 return RunOnDestruction<Fn>(fn);
947 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
948 // if a process uses virtual memory over 4TB (as many sanitizers like
949 // to do). This function will abort the process if running on a kernel
950 // that looks vulnerable.
951 #if SANITIZER_LINUX && SANITIZER_S390_64
952 void AvoidCVE_2016_2143();
954 INLINE void AvoidCVE_2016_2143() {}
957 struct StackDepotStats {
962 // The default value for allocator_release_to_os_interval_ms common flag to
963 // indicate that sanitizer allocator should not attempt to release memory to OS.
964 const s32 kReleaseToOSIntervalNever = -1;
966 void CheckNoDeepBind(const char *filename, int flag);
968 // Returns the requested amount of random data (up to 256 bytes) that can then
969 // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
970 bool GetRandom(void *buffer, uptr length, bool blocking = true);
972 // Returns the number of logical processors on the system.
973 u32 GetNumberOfCPUs();
974 extern u32 NumberOfCPUsCached;
975 INLINE u32 GetNumberOfCPUsCached() {
976 if (!NumberOfCPUsCached)
977 NumberOfCPUsCached = GetNumberOfCPUs();
978 return NumberOfCPUsCached;
981 template <typename T>
985 ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
987 T *begin() { return begin_; }
988 T *end() { return end_; }
995 } // namespace __sanitizer
997 inline void *operator new(__sanitizer::operator_new_size_type size,
998 __sanitizer::LowLevelAllocator &alloc) { // NOLINT
999 return alloc.Allocate(size);
1002 #endif // SANITIZER_COMMON_H