1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is shared between run-time libraries of sanitizers.
12 // It declares common functions and classes that are used in both runtimes.
13 // Implementation of some functions are provided in sanitizer_common, while
14 // others must be defined by run-time library itself.
15 //===----------------------------------------------------------------------===//
16 #ifndef SANITIZER_COMMON_H
17 #define SANITIZER_COMMON_H
19 #include "sanitizer_flags.h"
20 #include "sanitizer_interface_internal.h"
21 #include "sanitizer_internal_defs.h"
22 #include "sanitizer_libc.h"
23 #include "sanitizer_list.h"
24 #include "sanitizer_mutex.h"
26 #if defined(_MSC_VER) && !defined(__clang__)
27 extern "C" void _ReadWriteBarrier();
28 #pragma intrinsic(_ReadWriteBarrier)
31 namespace __sanitizer {
36 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
37 const uptr kWordSizeInBits = 8 * kWordSize;
39 #if defined(__powerpc__) || defined(__powerpc64__)
40 const uptr kCacheLineSize = 128;
42 const uptr kCacheLineSize = 64;
45 const uptr kMaxPathLength = 4096;
47 const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
49 static const uptr kErrorMessageBufferSize = 1 << 16;
51 // Denotes fake PC values that come from JIT/JAVA/etc.
52 // For such PC values __tsan_symbolize_external() will be called.
53 const u64 kExternalPCBit = 1ULL << 60;
55 extern const char *SanitizerToolName; // Can be changed by the tool.
57 extern atomic_uint32_t current_verbosity;
58 INLINE void SetVerbosity(int verbosity) {
59 atomic_store(¤t_verbosity, verbosity, memory_order_relaxed);
61 INLINE int Verbosity() {
62 return atomic_load(¤t_verbosity, memory_order_relaxed);
66 extern uptr PageSizeCached;
67 INLINE uptr GetPageSizeCached() {
69 PageSizeCached = GetPageSize();
70 return PageSizeCached;
72 uptr GetMmapGranularity();
73 uptr GetMaxVirtualAddress();
77 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
79 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
80 uptr *tls_addr, uptr *tls_size);
83 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
84 INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
85 return MmapOrDie(size, mem_type, /*raw_report*/ true);
87 void UnmapOrDie(void *addr, uptr size);
88 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
89 // case returns nullptr.
90 void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
91 void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
92 const char *name = nullptr);
93 void *MmapNoReserveOrDie(uptr size, const char *mem_type);
94 void *MmapFixedOrDie(uptr fixed_addr, uptr size);
95 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
96 // that case returns nullptr.
97 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size);
98 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
99 void *MmapNoAccess(uptr size);
100 // Map aligned chunk of address space; size and alignment are powers of two.
101 // Dies on all but out of memory errors, in the latter case returns nullptr.
102 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
103 const char *mem_type);
104 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
105 // unaccessible memory.
106 bool MprotectNoAccess(uptr addr, uptr size);
107 bool MprotectReadOnly(uptr addr, uptr size);
109 // Find an available address space.
110 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
111 uptr *largest_gap_found);
113 // Used to check if we can map shadow memory to a fixed location.
114 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
115 // Releases memory pages entirely within the [beg, end] address range. Noop if
116 // the provided range does not contain at least one entire page.
117 void ReleaseMemoryPagesToOS(uptr beg, uptr end);
118 void IncreaseTotalMmap(uptr size);
119 void DecreaseTotalMmap(uptr size);
121 void NoHugePagesInRegion(uptr addr, uptr length);
122 void DontDumpShadowMemory(uptr addr, uptr length);
123 // Check if the built VMA size matches the runtime one.
125 void RunMallocHooks(const void *ptr, uptr size);
126 void RunFreeHooks(const void *ptr);
128 // InternalScopedBuffer can be used instead of large stack arrays to
129 // keep frame size low.
130 // FIXME: use InternalAlloc instead of MmapOrDie once
131 // InternalAlloc is made libc-free.
132 template <typename T>
133 class InternalScopedBuffer {
135 explicit InternalScopedBuffer(uptr cnt) {
137 ptr_ = (T *)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
139 ~InternalScopedBuffer() { UnmapOrDie(ptr_, cnt_ * sizeof(T)); }
140 T &operator[](uptr i) { return ptr_[i]; }
141 T *data() { return ptr_; }
142 uptr size() { return cnt_ * sizeof(T); }
147 // Disallow copies and moves.
148 InternalScopedBuffer(const InternalScopedBuffer &) = delete;
149 InternalScopedBuffer &operator=(const InternalScopedBuffer &) = delete;
150 InternalScopedBuffer(InternalScopedBuffer &&) = delete;
151 InternalScopedBuffer &operator=(InternalScopedBuffer &&) = delete;
154 class InternalScopedString : public InternalScopedBuffer<char> {
156 explicit InternalScopedString(uptr max_length)
157 : InternalScopedBuffer<char>(max_length), length_(0) {
160 uptr length() { return length_; }
165 void append(const char *format, ...);
171 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
172 // constructor, so all instances of LowLevelAllocator should be
173 // linker initialized.
174 class LowLevelAllocator {
176 // Requires an external lock.
177 void *Allocate(uptr size);
179 char *allocated_end_;
180 char *allocated_current_;
182 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
183 // Allows to register tool-specific callbacks for LowLevelAllocator.
184 // Passing NULL removes the callback.
185 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
188 void RawWrite(const char *buffer);
189 bool ColorizeReports();
190 void RemoveANSIEscapeSequencesFromString(char *buffer);
191 void Printf(const char *format, ...);
192 void Report(const char *format, ...);
193 void SetPrintfAndReportCallback(void (*callback)(const char *));
194 #define VReport(level, ...) \
196 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
198 #define VPrintf(level, ...) \
200 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
203 // Can be used to prevent mixing error reports from different sanitizers.
204 extern StaticSpinMutex CommonSanitizerReportMutex;
207 void Write(const char *buffer, uptr length);
208 bool SupportsColors();
209 void SetReportPath(const char *path);
211 // Don't use fields directly. They are only declared public to allow
212 // aggregate initialization.
214 // Protects fields below.
216 // Opened file descriptor. Defaults to stderr. It may be equal to
217 // kInvalidFd, in which case new file will be opened when necessary.
219 // Path prefix of report file, set via __sanitizer_set_report_path.
220 char path_prefix[kMaxPathLength];
221 // Full path to report, obtained as <path_prefix>.PID
222 char full_path[kMaxPathLength];
223 // PID of the process that opened fd. If a fork() occurs,
224 // the PID of child will be different from fd_pid.
228 void ReopenIfNecessary();
230 extern ReportFile report_file;
232 extern uptr stoptheworld_tracer_pid;
233 extern uptr stoptheworld_tracer_ppid;
235 enum FileAccessMode {
241 // Returns kInvalidFd on error.
242 fd_t OpenFile(const char *filename, FileAccessMode mode,
243 error_t *errno_p = nullptr);
244 void CloseFile(fd_t);
246 // Return true on success, false on error.
247 bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
248 uptr *bytes_read = nullptr, error_t *error_p = nullptr);
249 bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
250 uptr *bytes_written = nullptr, error_t *error_p = nullptr);
252 bool RenameFile(const char *oldpath, const char *newpath,
253 error_t *error_p = nullptr);
255 // Scoped file handle closer.
257 explicit FileCloser(fd_t fd) : fd(fd) {}
258 ~FileCloser() { CloseFile(fd); }
262 bool SupportsColoredOutput(fd_t fd);
264 // Opens the file 'file_name" and reads up to 'max_len' bytes.
265 // The resulting buffer is mmaped and stored in '*buff'.
266 // The size of the mmaped region is stored in '*buff_size'.
267 // The total number of read bytes is stored in '*read_len'.
268 // Returns true if file was successfully opened and read.
269 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
270 uptr *read_len, uptr max_len = 1 << 26,
271 error_t *errno_p = nullptr);
272 // Maps given file to virtual memory, and returns pointer to it
273 // (or NULL if mapping fails). Stores the size of mmaped region
275 void *MapFileToMemory(const char *file_name, uptr *buff_size);
276 void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
278 bool IsAccessibleMemoryRange(uptr beg, uptr size);
280 // Error report formatting.
281 const char *StripPathPrefix(const char *filepath,
282 const char *strip_file_prefix);
283 // Strip the directories from the module name.
284 const char *StripModuleName(const char *module);
287 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
288 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
289 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
290 const char *GetProcessName();
291 void UpdateProcessName();
292 void CacheBinaryName();
293 void DisableCoreDumperIfNecessary();
294 void DumpProcessMap();
295 void PrintModuleMap();
296 bool FileExists(const char *filename);
297 const char *GetEnv(const char *name);
298 bool SetEnv(const char *name, const char *value);
299 const char *GetPwd();
300 char *FindPathToBinary(const char *name);
301 bool IsPathSeparator(const char c);
302 bool IsAbsolutePath(const char *path);
303 // Starts a subprocess and returs its pid.
304 // If *_fd parameters are not kInvalidFd their corresponding input/output
305 // streams will be redirect to the file. The files will always be closed
306 // in parent process even in case of an error.
307 // The child process will close all fds after STDERR_FILENO
308 // before passing control to a program.
309 pid_t StartSubprocess(const char *filename, const char *const argv[],
310 fd_t stdin_fd = kInvalidFd, fd_t stdout_fd = kInvalidFd,
311 fd_t stderr_fd = kInvalidFd);
312 // Checks if specified process is still running
313 bool IsProcessRunning(pid_t pid);
314 // Waits for the process to finish and returns its exit code.
315 // Returns -1 in case of an error.
316 int WaitForProcess(pid_t pid);
322 bool StackSizeIsUnlimited();
323 uptr GetStackSizeLimitInBytes();
324 void SetStackSizeLimitInBytes(uptr limit);
325 bool AddressSpaceIsUnlimited();
326 void SetAddressSpaceUnlimited();
327 void AdjustStackSize(void *attr);
328 void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
329 void SetSandboxingCallback(void (*f)());
331 void InitializeCoverage(bool enabled, const char *coverage_dir);
337 void SleepForSeconds(int seconds);
338 void SleepForMillis(int millis);
340 int Atexit(void (*function)(void));
341 void SortArray(uptr *array, uptr size);
342 void SortArray(u32 *array, uptr size);
343 bool TemplateMatch(const char *templ, const char *str);
346 void NORETURN Abort();
349 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
350 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
351 const char *mmap_type, error_t err,
352 bool raw_report = false);
354 // Set the name of the current thread to 'name', return true on succees.
355 // The name may be truncated to a system-dependent limit.
356 bool SanitizerSetThreadName(const char *name);
357 // Get the name of the current thread (no more than max_len bytes),
358 // return true on succees. name should have space for at least max_len+1 bytes.
359 bool SanitizerGetThreadName(char *name, int max_len);
361 // Specific tools may override behavior of "Die" and "CheckFailed" functions
362 // to do tool-specific job.
363 typedef void (*DieCallbackType)(void);
365 // It's possible to add several callbacks that would be run when "Die" is
366 // called. The callbacks will be run in the opposite order. The tools are
367 // strongly recommended to setup all callbacks during initialization, when there
368 // is only a single thread.
369 bool AddDieCallback(DieCallbackType callback);
370 bool RemoveDieCallback(DieCallbackType callback);
372 void SetUserDieCallback(DieCallbackType callback);
374 typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
376 void SetCheckFailedCallback(CheckFailedCallbackType callback);
378 // Callback will be called if soft_rss_limit_mb is given and the limit is
379 // exceeded (exceeded==true) or if rss went down below the limit
380 // (exceeded==false).
381 // The callback should be registered once at the tool init time.
382 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
384 // Functions related to signal handling.
385 typedef void (*SignalHandlerType)(int, void *, void *);
386 HandleSignalMode GetHandleSignalMode(int signum);
387 void InstallDeadlySignalHandlers(SignalHandlerType handler);
388 const char *DescribeSignalOrException(int signo);
389 // Alternative signal stack (POSIX-only).
390 void SetAlternateSignalStack();
391 void UnsetAlternateSignalStack();
393 // We don't want a summary too long.
394 const int kMaxSummaryLength = 1024;
395 // Construct a one-line string:
396 // SUMMARY: SanitizerToolName: error_message
397 // and pass it to __sanitizer_report_error_summary.
398 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
399 void ReportErrorSummary(const char *error_message,
400 const char *alt_tool_name = nullptr);
401 // Same as above, but construct error_message as:
402 // error_type file:line[:column][ function]
403 void ReportErrorSummary(const char *error_type, const AddressInfo &info,
404 const char *alt_tool_name = nullptr);
405 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
406 void ReportErrorSummary(const char *error_type, const StackTrace *trace,
407 const char *alt_tool_name = nullptr);
410 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
412 unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
413 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
415 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
416 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
421 INLINE uptr MostSignificantSetBitIndex(uptr x) {
423 unsigned long up; // NOLINT
424 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
426 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
428 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
430 #elif defined(_WIN64)
431 _BitScanReverse64(&up, x);
433 _BitScanReverse(&up, x);
438 INLINE uptr LeastSignificantSetBitIndex(uptr x) {
440 unsigned long up; // NOLINT
441 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
443 up = __builtin_ctzll(x);
445 up = __builtin_ctzl(x);
447 #elif defined(_WIN64)
448 _BitScanForward64(&up, x);
450 _BitScanForward(&up, x);
455 INLINE bool IsPowerOfTwo(uptr x) {
456 return (x & (x - 1)) == 0;
459 INLINE uptr RoundUpToPowerOfTwo(uptr size) {
461 if (IsPowerOfTwo(size)) return size;
463 uptr up = MostSignificantSetBitIndex(size);
464 CHECK_LT(size, (1ULL << (up + 1)));
465 CHECK_GT(size, (1ULL << up));
466 return 1ULL << (up + 1);
469 INLINE uptr RoundUpTo(uptr size, uptr boundary) {
470 RAW_CHECK(IsPowerOfTwo(boundary));
471 return (size + boundary - 1) & ~(boundary - 1);
474 INLINE uptr RoundDownTo(uptr x, uptr boundary) {
475 return x & ~(boundary - 1);
478 INLINE bool IsAligned(uptr a, uptr alignment) {
479 return (a & (alignment - 1)) == 0;
482 INLINE uptr Log2(uptr x) {
483 CHECK(IsPowerOfTwo(x));
484 return LeastSignificantSetBitIndex(x);
487 // Don't use std::min, std::max or std::swap, to minimize dependency
489 template<class T> T Min(T a, T b) { return a < b ? a : b; }
490 template<class T> T Max(T a, T b) { return a > b ? a : b; }
491 template<class T> void Swap(T& a, T& b) {
498 INLINE bool IsSpace(int c) {
499 return (c == ' ') || (c == '\n') || (c == '\t') ||
500 (c == '\f') || (c == '\r') || (c == '\v');
502 INLINE bool IsDigit(int c) {
503 return (c >= '0') && (c <= '9');
505 INLINE int ToLower(int c) {
506 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
509 // A low-level vector based on mmap. May incur a significant memory overhead for
511 // WARNING: The current implementation supports only POD types.
513 class InternalMmapVectorNoCtor {
515 void Initialize(uptr initial_capacity) {
516 capacity_ = Max(initial_capacity, (uptr)1);
518 data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
521 UnmapOrDie(data_, capacity_ * sizeof(T));
523 T &operator[](uptr i) {
527 const T &operator[](uptr i) const {
531 void push_back(const T &element) {
532 CHECK_LE(size_, capacity_);
533 if (size_ == capacity_) {
534 uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
535 Resize(new_capacity);
537 internal_memcpy(&data_[size_++], &element, sizeof(T));
541 return data_[size_ - 1];
550 const T *data() const {
556 uptr capacity() const {
559 void resize(uptr new_size) {
561 if (new_size > size_) {
562 internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
567 void clear() { size_ = 0; }
568 bool empty() const { return size() == 0; }
570 const T *begin() const {
576 const T *end() const {
577 return data() + size();
580 return data() + size();
584 void Resize(uptr new_capacity) {
585 CHECK_GT(new_capacity, 0);
586 CHECK_LE(size_, new_capacity);
587 T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T),
588 "InternalMmapVector");
589 internal_memcpy(new_data, data_, size_ * sizeof(T));
592 UnmapOrDie(old_data, capacity_ * sizeof(T));
593 capacity_ = new_capacity;
602 class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
604 explicit InternalMmapVector(uptr initial_capacity) {
605 InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
607 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
608 // Disallow evil constructors.
609 InternalMmapVector(const InternalMmapVector&);
610 void operator=(const InternalMmapVector&);
613 // HeapSort for arrays and InternalMmapVector.
614 template<class Container, class Compare>
615 void InternalSort(Container *v, uptr size, Compare comp) {
618 // Stage 1: insert elements to the heap.
619 for (uptr i = 1; i < size; i++) {
621 for (j = i; j > 0; j = p) {
623 if (comp((*v)[p], (*v)[j]))
624 Swap((*v)[j], (*v)[p]);
629 // Stage 2: swap largest element with the last one,
630 // and sink the new top.
631 for (uptr i = size - 1; i > 0; i--) {
632 Swap((*v)[0], (*v)[i]);
634 for (j = 0; j < i; j = max_ind) {
635 uptr left = 2 * j + 1;
636 uptr right = 2 * j + 2;
638 if (left < i && comp((*v)[max_ind], (*v)[left]))
640 if (right < i && comp((*v)[max_ind], (*v)[right]))
643 Swap((*v)[j], (*v)[max_ind]);
650 // Works like std::lower_bound: finds the first element that is not less
652 template <class Container, class Value, class Compare>
653 uptr InternalLowerBound(const Container &v, uptr first, uptr last,
654 const Value &val, Compare comp) {
655 while (last > first) {
656 uptr mid = (first + last) / 2;
657 if (comp(v[mid], val))
677 // When adding a new architecture, don't forget to also update
678 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cc.
679 inline const char *ModuleArchToString(ModuleArch arch) {
681 case kModuleArchUnknown:
683 case kModuleArchI386:
685 case kModuleArchX86_64:
687 case kModuleArchX86_64H:
689 case kModuleArchARMV6:
691 case kModuleArchARMV7:
693 case kModuleArchARMV7S:
695 case kModuleArchARMV7K:
697 case kModuleArchARM64:
700 CHECK(0 && "Invalid module arch");
704 const uptr kModuleUUIDSize = 16;
706 // Represents a binary loaded into virtual memory (e.g. this can be an
707 // executable or a shared object).
711 : full_name_(nullptr),
713 max_executable_address_(0),
714 arch_(kModuleArchUnknown),
715 instrumented_(false) {
716 internal_memset(uuid_, 0, kModuleUUIDSize);
719 void set(const char *module_name, uptr base_address);
720 void set(const char *module_name, uptr base_address, ModuleArch arch,
721 u8 uuid[kModuleUUIDSize], bool instrumented);
723 void addAddressRange(uptr beg, uptr end, bool executable, bool writable);
724 bool containsAddress(uptr address) const;
726 const char *full_name() const { return full_name_; }
727 uptr base_address() const { return base_address_; }
728 uptr max_executable_address() const { return max_executable_address_; }
729 ModuleArch arch() const { return arch_; }
730 const u8 *uuid() const { return uuid_; }
731 bool instrumented() const { return instrumented_; }
733 struct AddressRange {
740 AddressRange(uptr beg, uptr end, bool executable, bool writable)
744 executable(executable),
745 writable(writable) {}
748 const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
751 char *full_name_; // Owned.
753 uptr max_executable_address_;
755 u8 uuid_[kModuleUUIDSize];
757 IntrusiveList<AddressRange> ranges_;
760 // List of LoadedModules. OS-dependent implementation is responsible for
761 // filling this information.
762 class ListOfModules {
764 ListOfModules() : modules_(kInitialCapacity) {}
765 ~ListOfModules() { clear(); }
767 const LoadedModule *begin() const { return modules_.begin(); }
768 LoadedModule *begin() { return modules_.begin(); }
769 const LoadedModule *end() const { return modules_.end(); }
770 LoadedModule *end() { return modules_.end(); }
771 uptr size() const { return modules_.size(); }
772 const LoadedModule &operator[](uptr i) const {
773 CHECK_LT(i, modules_.size());
779 for (auto &module : modules_) module.clear();
783 InternalMmapVector<LoadedModule> modules_;
784 // We rarely have more than 16K loaded modules.
785 static const uptr kInitialCapacity = 1 << 14;
788 // Callback type for iterating over a set of memory ranges.
789 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
791 enum AndroidApiLevel {
792 ANDROID_NOT_ANDROID = 0,
794 ANDROID_LOLLIPOP_MR1 = 22,
795 ANDROID_POST_LOLLIPOP = 23
798 void WriteToSyslog(const char *buffer);
801 void LogFullErrorReport(const char *buffer);
803 INLINE void LogFullErrorReport(const char *buffer) {}
806 #if SANITIZER_LINUX || SANITIZER_MAC
807 void WriteOneLineToSyslog(const char *s);
808 void LogMessageOnPrintf(const char *str);
810 INLINE void WriteOneLineToSyslog(const char *s) {}
811 INLINE void LogMessageOnPrintf(const char *str) {}
815 // Initialize Android logging. Any writes before this are silently lost.
816 void AndroidLogInit();
817 void SetAbortMessage(const char *);
819 INLINE void AndroidLogInit() {}
820 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
821 INLINE void SetAbortMessage(const char *) {}
824 #if SANITIZER_ANDROID
825 void SanitizerInitializeUnwinder();
826 AndroidApiLevel AndroidGetApiLevel();
828 INLINE void AndroidLogWrite(const char *buffer_unused) {}
829 INLINE void SanitizerInitializeUnwinder() {}
830 INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
833 INLINE uptr GetPthreadDestructorIterations() {
834 #if SANITIZER_ANDROID
835 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
836 #elif SANITIZER_POSIX
839 // Unused on Windows.
844 void *internal_start_thread(void(*func)(void*), void *arg);
845 void internal_join_thread(void *th);
846 void MaybeStartBackgroudThread();
848 // Make the compiler think that something is going on there.
849 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
850 // compiler from recognising it and turning it into an actual call to
851 // memset/memcpy/etc.
852 static inline void SanitizerBreakOptimization(void *arg) {
853 #if defined(_MSC_VER) && !defined(__clang__)
856 __asm__ __volatile__("" : : "r" (arg) : "memory");
860 struct SignalContext {
866 bool is_memory_access;
868 enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
870 SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp,
871 bool is_memory_access, WriteFlag write_flag)
877 is_memory_access(is_memory_access),
878 write_flag(write_flag) {}
880 static void DumpAllRegisters(void *context);
882 // Creates signal context in a platform-specific manner.
883 static SignalContext Create(void *siginfo, void *context);
885 // Returns true if the "context" indicates a memory write.
886 static WriteFlag GetWriteFlag(void *context);
889 void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
893 template <typename Fn>
894 class RunOnDestruction {
896 explicit RunOnDestruction(Fn fn) : fn_(fn) {}
897 ~RunOnDestruction() { fn_(); }
903 // A simple scope guard. Usage:
904 // auto cleanup = at_scope_exit([]{ do_cleanup; });
905 template <typename Fn>
906 RunOnDestruction<Fn> at_scope_exit(Fn fn) {
907 return RunOnDestruction<Fn>(fn);
910 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
911 // if a process uses virtual memory over 4TB (as many sanitizers like
912 // to do). This function will abort the process if running on a kernel
913 // that looks vulnerable.
914 #if SANITIZER_LINUX && SANITIZER_S390_64
915 void AvoidCVE_2016_2143();
917 INLINE void AvoidCVE_2016_2143() {}
920 struct StackDepotStats {
925 // The default value for allocator_release_to_os_interval_ms common flag to
926 // indicate that sanitizer allocator should not attempt to release memory to OS.
927 const s32 kReleaseToOSIntervalNever = -1;
929 void CheckNoDeepBind(const char *filename, int flag);
931 // Returns the requested amount of random data (up to 256 bytes) that can then
932 // be used to seed a PRNG.
933 bool GetRandom(void *buffer, uptr length);
935 } // namespace __sanitizer
937 inline void *operator new(__sanitizer::operator_new_size_type size,
938 __sanitizer::LowLevelAllocator &alloc) {
939 return alloc.Allocate(size);
942 #endif // SANITIZER_COMMON_H