1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is shared between run-time libraries of sanitizers.
12 // It declares common functions and classes that are used in both runtimes.
13 // Implementation of some functions are provided in sanitizer_common, while
14 // others must be defined by run-time library itself.
15 //===----------------------------------------------------------------------===//
16 #ifndef SANITIZER_COMMON_H
17 #define SANITIZER_COMMON_H
19 #include "sanitizer_flags.h"
20 #include "sanitizer_interface_internal.h"
21 #include "sanitizer_internal_defs.h"
22 #include "sanitizer_libc.h"
23 #include "sanitizer_list.h"
24 #include "sanitizer_mutex.h"
26 namespace __sanitizer {
30 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
31 const uptr kWordSizeInBits = 8 * kWordSize;
33 #if defined(__powerpc__) || defined(__powerpc64__)
34 const uptr kCacheLineSize = 128;
36 const uptr kCacheLineSize = 64;
39 const uptr kMaxPathLength = 4096;
41 const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
43 extern const char *SanitizerToolName; // Can be changed by the tool.
45 extern atomic_uint32_t current_verbosity;
46 INLINE void SetVerbosity(int verbosity) {
47 atomic_store(¤t_verbosity, verbosity, memory_order_relaxed);
49 INLINE int Verbosity() {
50 return atomic_load(¤t_verbosity, memory_order_relaxed);
54 uptr GetPageSizeCached();
55 uptr GetMmapGranularity();
56 uptr GetMaxVirtualAddress();
60 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
62 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
63 uptr *tls_addr, uptr *tls_size);
66 void *MmapOrDie(uptr size, const char *mem_type);
67 void UnmapOrDie(void *addr, uptr size);
68 void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
69 void *MmapNoReserveOrDie(uptr size, const char *mem_type);
70 void *MmapFixedOrDie(uptr fixed_addr, uptr size);
71 void *Mprotect(uptr fixed_addr, uptr size);
72 // Map aligned chunk of address space; size and alignment are powers of two.
73 void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
74 // Used to check if we can map shadow memory to a fixed location.
75 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
76 void FlushUnneededShadowMemory(uptr addr, uptr size);
77 void IncreaseTotalMmap(uptr size);
78 void DecreaseTotalMmap(uptr size);
80 void NoHugePagesInRegion(uptr addr, uptr length);
81 void DontDumpShadowMemory(uptr addr, uptr length);
83 // InternalScopedBuffer can be used instead of large stack arrays to
84 // keep frame size low.
85 // FIXME: use InternalAlloc instead of MmapOrDie once
86 // InternalAlloc is made libc-free.
88 class InternalScopedBuffer {
90 explicit InternalScopedBuffer(uptr cnt) {
92 ptr_ = (T*)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
94 ~InternalScopedBuffer() {
95 UnmapOrDie(ptr_, cnt_ * sizeof(T));
97 T &operator[](uptr i) { return ptr_[i]; }
98 T *data() { return ptr_; }
99 uptr size() { return cnt_ * sizeof(T); }
104 // Disallow evil constructors.
105 InternalScopedBuffer(const InternalScopedBuffer&);
106 void operator=(const InternalScopedBuffer&);
109 class InternalScopedString : public InternalScopedBuffer<char> {
111 explicit InternalScopedString(uptr max_length)
112 : InternalScopedBuffer<char>(max_length), length_(0) {
115 uptr length() { return length_; }
120 void append(const char *format, ...);
126 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
127 // constructor, so all instances of LowLevelAllocator should be
128 // linker initialized.
129 class LowLevelAllocator {
131 // Requires an external lock.
132 void *Allocate(uptr size);
134 char *allocated_end_;
135 char *allocated_current_;
137 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
138 // Allows to register tool-specific callbacks for LowLevelAllocator.
139 // Passing NULL removes the callback.
140 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
143 void RawWrite(const char *buffer);
144 bool ColorizeReports();
145 void Printf(const char *format, ...);
146 void Report(const char *format, ...);
147 void SetPrintfAndReportCallback(void (*callback)(const char *));
148 #define VReport(level, ...) \
150 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
152 #define VPrintf(level, ...) \
154 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
157 // Can be used to prevent mixing error reports from different sanitizers.
158 extern StaticSpinMutex CommonSanitizerReportMutex;
161 void Write(const char *buffer, uptr length);
163 void SetReportPath(const char *path);
165 // Don't use fields directly. They are only declared public to allow
166 // aggregate initialization.
168 // Protects fields below.
170 // Opened file descriptor. Defaults to stderr. It may be equal to
171 // kInvalidFd, in which case new file will be opened when necessary.
173 // Path prefix of report file, set via __sanitizer_set_report_path.
174 char path_prefix[kMaxPathLength];
175 // Full path to report, obtained as <path_prefix>.PID
176 char full_path[kMaxPathLength];
177 // PID of the process that opened fd. If a fork() occurs,
178 // the PID of child will be different from fd_pid.
182 void ReopenIfNecessary();
184 extern ReportFile report_file;
186 extern uptr stoptheworld_tracer_pid;
187 extern uptr stoptheworld_tracer_ppid;
189 uptr OpenFile(const char *filename, bool write);
190 // Opens the file 'file_name" and reads up to 'max_len' bytes.
191 // The resulting buffer is mmaped and stored in '*buff'.
192 // The size of the mmaped region is stored in '*buff_size',
193 // Returns the number of read bytes or 0 if file can not be opened.
194 uptr ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
195 uptr max_len, int *errno_p = nullptr);
196 // Maps given file to virtual memory, and returns pointer to it
197 // (or NULL if the mapping failes). Stores the size of mmaped region
199 void *MapFileToMemory(const char *file_name, uptr *buff_size);
200 void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset);
202 bool IsAccessibleMemoryRange(uptr beg, uptr size);
204 // Error report formatting.
205 const char *StripPathPrefix(const char *filepath,
206 const char *strip_file_prefix);
207 // Strip the directories from the module name.
208 const char *StripModuleName(const char *module);
211 void DisableCoreDumperIfNecessary();
212 void DumpProcessMap();
213 bool FileExists(const char *filename);
214 const char *GetEnv(const char *name);
215 bool SetEnv(const char *name, const char *value);
216 const char *GetPwd();
217 char *FindPathToBinary(const char *name);
220 bool StackSizeIsUnlimited();
221 void SetStackSizeLimitInBytes(uptr limit);
222 bool AddressSpaceIsUnlimited();
223 void SetAddressSpaceUnlimited();
224 void AdjustStackSize(void *attr);
225 void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
226 void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
227 void SetSandboxingCallback(void (*f)());
229 void CoverageUpdateMapping();
230 void CovBeforeFork();
231 void CovAfterFork(int child_pid);
233 void InitializeCoverage(bool enabled, const char *coverage_dir);
234 void ReInitializeCoverage(bool enabled, const char *coverage_dir);
240 void SleepForSeconds(int seconds);
241 void SleepForMillis(int millis);
243 int Atexit(void (*function)(void));
244 void SortArray(uptr *array, uptr size);
245 bool TemplateMatch(const char *templ, const char *str);
248 void NORETURN Abort();
251 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
253 // Set the name of the current thread to 'name', return true on succees.
254 // The name may be truncated to a system-dependent limit.
255 bool SanitizerSetThreadName(const char *name);
256 // Get the name of the current thread (no more than max_len bytes),
257 // return true on succees. name should have space for at least max_len+1 bytes.
258 bool SanitizerGetThreadName(char *name, int max_len);
260 // Specific tools may override behavior of "Die" and "CheckFailed" functions
261 // to do tool-specific job.
262 typedef void (*DieCallbackType)(void);
263 void SetDieCallback(DieCallbackType);
264 void SetUserDieCallback(DieCallbackType);
265 DieCallbackType GetDieCallback();
266 typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
268 void SetCheckFailedCallback(CheckFailedCallbackType callback);
270 // Callback will be called if soft_rss_limit_mb is given and the limit is
271 // exceeded (exceeded==true) or if rss went down below the limit
272 // (exceeded==false).
273 // The callback should be registered once at the tool init time.
274 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
276 // Functions related to signal handling.
277 typedef void (*SignalHandlerType)(int, void *, void *);
278 bool IsDeadlySignal(int signum);
279 void InstallDeadlySignalHandlers(SignalHandlerType handler);
280 // Alternative signal stack (POSIX-only).
281 void SetAlternateSignalStack();
282 void UnsetAlternateSignalStack();
284 // We don't want a summary too long.
285 const int kMaxSummaryLength = 1024;
286 // Construct a one-line string:
287 // SUMMARY: SanitizerToolName: error_message
288 // and pass it to __sanitizer_report_error_summary.
289 void ReportErrorSummary(const char *error_message);
290 // Same as above, but construct error_message as:
291 // error_type file:line function
292 void ReportErrorSummary(const char *error_type, const char *file,
293 int line, const char *function);
294 void ReportErrorSummary(const char *error_type, StackTrace *trace);
297 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
299 unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
300 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
302 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
303 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
308 INLINE uptr MostSignificantSetBitIndex(uptr x) {
310 unsigned long up; // NOLINT
311 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
312 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
313 #elif defined(_WIN64)
314 _BitScanReverse64(&up, x);
316 _BitScanReverse(&up, x);
321 INLINE uptr LeastSignificantSetBitIndex(uptr x) {
323 unsigned long up; // NOLINT
324 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
325 up = __builtin_ctzl(x);
326 #elif defined(_WIN64)
327 _BitScanForward64(&up, x);
329 _BitScanForward(&up, x);
334 INLINE bool IsPowerOfTwo(uptr x) {
335 return (x & (x - 1)) == 0;
338 INLINE uptr RoundUpToPowerOfTwo(uptr size) {
340 if (IsPowerOfTwo(size)) return size;
342 uptr up = MostSignificantSetBitIndex(size);
343 CHECK(size < (1ULL << (up + 1)));
344 CHECK(size > (1ULL << up));
345 return 1UL << (up + 1);
348 INLINE uptr RoundUpTo(uptr size, uptr boundary) {
349 CHECK(IsPowerOfTwo(boundary));
350 return (size + boundary - 1) & ~(boundary - 1);
353 INLINE uptr RoundDownTo(uptr x, uptr boundary) {
354 return x & ~(boundary - 1);
357 INLINE bool IsAligned(uptr a, uptr alignment) {
358 return (a & (alignment - 1)) == 0;
361 INLINE uptr Log2(uptr x) {
362 CHECK(IsPowerOfTwo(x));
363 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
364 return __builtin_ctzl(x);
365 #elif defined(_WIN64)
366 unsigned long ret; // NOLINT
367 _BitScanForward64(&ret, x);
370 unsigned long ret; // NOLINT
371 _BitScanForward(&ret, x);
376 // Don't use std::min, std::max or std::swap, to minimize dependency
378 template<class T> T Min(T a, T b) { return a < b ? a : b; }
379 template<class T> T Max(T a, T b) { return a > b ? a : b; }
380 template<class T> void Swap(T& a, T& b) {
387 INLINE bool IsSpace(int c) {
388 return (c == ' ') || (c == '\n') || (c == '\t') ||
389 (c == '\f') || (c == '\r') || (c == '\v');
391 INLINE bool IsDigit(int c) {
392 return (c >= '0') && (c <= '9');
394 INLINE int ToLower(int c) {
395 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
398 // A low-level vector based on mmap. May incur a significant memory overhead for
400 // WARNING: The current implementation supports only POD types.
402 class InternalMmapVectorNoCtor {
404 void Initialize(uptr initial_capacity) {
405 capacity_ = Max(initial_capacity, (uptr)1);
407 data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
410 UnmapOrDie(data_, capacity_ * sizeof(T));
412 T &operator[](uptr i) {
416 const T &operator[](uptr i) const {
420 void push_back(const T &element) {
421 CHECK_LE(size_, capacity_);
422 if (size_ == capacity_) {
423 uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
424 Resize(new_capacity);
426 data_[size_++] = element;
430 return data_[size_ - 1];
439 const T *data() const {
442 uptr capacity() const {
446 void clear() { size_ = 0; }
449 void Resize(uptr new_capacity) {
450 CHECK_GT(new_capacity, 0);
451 CHECK_LE(size_, new_capacity);
452 T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T),
453 "InternalMmapVector");
454 internal_memcpy(new_data, data_, size_ * sizeof(T));
457 UnmapOrDie(old_data, capacity_ * sizeof(T));
458 capacity_ = new_capacity;
467 class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
469 explicit InternalMmapVector(uptr initial_capacity) {
470 InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
472 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
473 // Disallow evil constructors.
474 InternalMmapVector(const InternalMmapVector&);
475 void operator=(const InternalMmapVector&);
478 // HeapSort for arrays and InternalMmapVector.
479 template<class Container, class Compare>
480 void InternalSort(Container *v, uptr size, Compare comp) {
483 // Stage 1: insert elements to the heap.
484 for (uptr i = 1; i < size; i++) {
486 for (j = i; j > 0; j = p) {
488 if (comp((*v)[p], (*v)[j]))
489 Swap((*v)[j], (*v)[p]);
494 // Stage 2: swap largest element with the last one,
495 // and sink the new top.
496 for (uptr i = size - 1; i > 0; i--) {
497 Swap((*v)[0], (*v)[i]);
499 for (j = 0; j < i; j = max_ind) {
500 uptr left = 2 * j + 1;
501 uptr right = 2 * j + 2;
503 if (left < i && comp((*v)[max_ind], (*v)[left]))
505 if (right < i && comp((*v)[max_ind], (*v)[right]))
508 Swap((*v)[j], (*v)[max_ind]);
515 template<class Container, class Value, class Compare>
516 uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
517 const Value &val, Compare comp) {
518 uptr not_found = last + 1;
519 while (last >= first) {
520 uptr mid = (first + last) / 2;
521 if (comp(v[mid], val))
523 else if (comp(val, v[mid]))
531 // Represents a binary loaded into virtual memory (e.g. this can be an
532 // executable or a shared object).
535 LoadedModule(const char *module_name, uptr base_address);
537 void addAddressRange(uptr beg, uptr end, bool executable);
538 bool containsAddress(uptr address) const;
540 const char *full_name() const { return full_name_; }
541 uptr base_address() const { return base_address_; }
543 struct AddressRange {
549 AddressRange(uptr beg, uptr end, bool executable)
550 : next(nullptr), beg(beg), end(end), executable(executable) {}
553 typedef IntrusiveList<AddressRange>::ConstIterator Iterator;
554 Iterator ranges() const { return Iterator(&ranges_); }
557 char *full_name_; // Owned.
559 IntrusiveList<AddressRange> ranges_;
562 // OS-dependent function that fills array with descriptions of at most
563 // "max_modules" currently loaded modules. Returns the number of
564 // initialized modules. If filter is nonzero, ignores modules for which
565 // filter(full_name) is false.
566 typedef bool (*string_predicate_t)(const char *);
567 uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
568 string_predicate_t filter);
571 const uptr kPthreadDestructorIterations = 4;
573 // Unused on Windows.
574 const uptr kPthreadDestructorIterations = 0;
577 // Callback type for iterating over a set of memory ranges.
578 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
580 #if SANITIZER_ANDROID
581 // Initialize Android logging. Any writes before this are silently lost.
582 void AndroidLogInit();
583 void AndroidLogWrite(const char *buffer);
584 void GetExtraActivationFlags(char *buf, uptr size);
585 void SanitizerInitializeUnwinder();
587 INLINE void AndroidLogInit() {}
588 INLINE void AndroidLogWrite(const char *buffer_unused) {}
589 INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
590 INLINE void SanitizerInitializeUnwinder() {}
593 void *internal_start_thread(void(*func)(void*), void *arg);
594 void internal_join_thread(void *th);
595 void MaybeStartBackgroudThread();
597 // Make the compiler think that something is going on there.
598 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
599 // compiler from recognising it and turning it into an actual call to
600 // memset/memcpy/etc.
601 static inline void SanitizerBreakOptimization(void *arg) {
603 // FIXME: make sure this is actually enough.
606 __asm__ __volatile__("" : : "r" (arg) : "memory");
610 } // namespace __sanitizer
612 inline void *operator new(__sanitizer::operator_new_size_type size,
613 __sanitizer::LowLevelAllocator &alloc) {
614 return alloc.Allocate(size);
617 struct StackDepotStats {
622 #endif // SANITIZER_COMMON_H