]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cc
Upgrade our copies of clang, llvm, lld, lldb, compiler-rt and libc++ to
[FreeBSD/FreeBSD.git] / contrib / compiler-rt / lib / sanitizer_common / sanitizer_common_libcdep.cc
1 //===-- sanitizer_common_libcdep.cc ---------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries.
12 //===----------------------------------------------------------------------===//
13
14 #include "sanitizer_common.h"
15
16 #include "sanitizer_allocator_interface.h"
17 #include "sanitizer_file.h"
18 #include "sanitizer_flags.h"
19 #include "sanitizer_procmaps.h"
20 #include "sanitizer_report_decorator.h"
21 #include "sanitizer_stackdepot.h"
22 #include "sanitizer_stacktrace.h"
23 #include "sanitizer_symbolizer.h"
24
25 #if SANITIZER_POSIX
26 #include "sanitizer_posix.h"
27 #endif
28
29 namespace __sanitizer {
30
31 #if !SANITIZER_FUCHSIA
32
33 bool ReportFile::SupportsColors() {
34   SpinMutexLock l(mu);
35   ReopenIfNecessary();
36   return SupportsColoredOutput(fd);
37 }
38
39 static INLINE bool ReportSupportsColors() {
40   return report_file.SupportsColors();
41 }
42
43 #else  // SANITIZER_FUCHSIA
44
45 // Fuchsia's logs always go through post-processing that handles colorization.
46 static INLINE bool ReportSupportsColors() { return true; }
47
48 #endif  // !SANITIZER_FUCHSIA
49
50 bool ColorizeReports() {
51   // FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
52   // printing on Windows.
53   if (SANITIZER_WINDOWS)
54     return false;
55
56   const char *flag = common_flags()->color;
57   return internal_strcmp(flag, "always") == 0 ||
58          (internal_strcmp(flag, "auto") == 0 && ReportSupportsColors());
59 }
60
61 static void (*sandboxing_callback)();
62 void SetSandboxingCallback(void (*f)()) {
63   sandboxing_callback = f;
64 }
65
66 void ReportErrorSummary(const char *error_type, const StackTrace *stack,
67                         const char *alt_tool_name) {
68 #if !SANITIZER_GO
69   if (!common_flags()->print_summary)
70     return;
71   if (stack->size == 0) {
72     ReportErrorSummary(error_type);
73     return;
74   }
75   // Currently, we include the first stack frame into the report summary.
76   // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
77   uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
78   SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
79   ReportErrorSummary(error_type, frame->info, alt_tool_name);
80   frame->ClearAll();
81 #endif
82 }
83
84 static void (*SoftRssLimitExceededCallback)(bool exceeded);
85 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
86   CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
87   SoftRssLimitExceededCallback = Callback;
88 }
89
90 #if SANITIZER_LINUX && !SANITIZER_GO
91 void BackgroundThread(void *arg) {
92   uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
93   uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
94   bool heap_profile = common_flags()->heap_profile;
95   uptr prev_reported_rss = 0;
96   uptr prev_reported_stack_depot_size = 0;
97   bool reached_soft_rss_limit = false;
98   uptr rss_during_last_reported_profile = 0;
99   while (true) {
100     SleepForMillis(100);
101     uptr current_rss_mb = GetRSS() >> 20;
102     if (Verbosity()) {
103       // If RSS has grown 10% since last time, print some information.
104       if (prev_reported_rss * 11 / 10 < current_rss_mb) {
105         Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb);
106         prev_reported_rss = current_rss_mb;
107       }
108       // If stack depot has grown 10% since last time, print it too.
109       StackDepotStats *stack_depot_stats = StackDepotGetStats();
110       if (prev_reported_stack_depot_size * 11 / 10 <
111           stack_depot_stats->allocated) {
112         Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
113                SanitizerToolName,
114                stack_depot_stats->n_uniq_ids,
115                stack_depot_stats->allocated >> 20);
116         prev_reported_stack_depot_size = stack_depot_stats->allocated;
117       }
118     }
119     // Check RSS against the limit.
120     if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {
121       Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n",
122              SanitizerToolName, hard_rss_limit_mb, current_rss_mb);
123       DumpProcessMap();
124       Die();
125     }
126     if (soft_rss_limit_mb) {
127       if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {
128         reached_soft_rss_limit = true;
129         Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
130                SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
131         if (SoftRssLimitExceededCallback)
132           SoftRssLimitExceededCallback(true);
133       } else if (soft_rss_limit_mb >= current_rss_mb &&
134                  reached_soft_rss_limit) {
135         reached_soft_rss_limit = false;
136         if (SoftRssLimitExceededCallback)
137           SoftRssLimitExceededCallback(false);
138       }
139     }
140     if (heap_profile &&
141         current_rss_mb > rss_during_last_reported_profile * 1.1) {
142       Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
143       __sanitizer_print_memory_profile(90, 20);
144       rss_during_last_reported_profile = current_rss_mb;
145     }
146   }
147 }
148 #endif
149
150 #if !SANITIZER_FUCHSIA && !SANITIZER_GO
151 void StartReportDeadlySignal() {
152   // Write the first message using fd=2, just in case.
153   // It may actually fail to write in case stderr is closed.
154   CatastrophicErrorWrite(SanitizerToolName, internal_strlen(SanitizerToolName));
155   static const char kDeadlySignal[] = ":DEADLYSIGNAL\n";
156   CatastrophicErrorWrite(kDeadlySignal, sizeof(kDeadlySignal) - 1);
157 }
158
159 static void MaybeReportNonExecRegion(uptr pc) {
160 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
161   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
162   MemoryMappedSegment segment;
163   while (proc_maps.Next(&segment)) {
164     if (pc >= segment.start && pc < segment.end && !segment.IsExecutable())
165       Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n");
166   }
167 #endif
168 }
169
170 static void PrintMemoryByte(InternalScopedString *str, const char *before,
171                             u8 byte) {
172   SanitizerCommonDecorator d;
173   str->append("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15,
174               d.Default());
175 }
176
177 static void MaybeDumpInstructionBytes(uptr pc) {
178   if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
179     return;
180   InternalScopedString str(1024);
181   str.append("First 16 instruction bytes at pc: ");
182   if (IsAccessibleMemoryRange(pc, 16)) {
183     for (int i = 0; i < 16; ++i) {
184       PrintMemoryByte(&str, "", ((u8 *)pc)[i]);
185     }
186     str.append("\n");
187   } else {
188     str.append("unaccessible\n");
189   }
190   Report("%s", str.data());
191 }
192
193 static void MaybeDumpRegisters(void *context) {
194   if (!common_flags()->dump_registers) return;
195   SignalContext::DumpAllRegisters(context);
196 }
197
198 static void ReportStackOverflowImpl(const SignalContext &sig, u32 tid,
199                                     UnwindSignalStackCallbackType unwind,
200                                     const void *unwind_context) {
201   SanitizerCommonDecorator d;
202   Printf("%s", d.Warning());
203   static const char kDescription[] = "stack-overflow";
204   Report("ERROR: %s: %s on address %p (pc %p bp %p sp %p T%d)\n",
205          SanitizerToolName, kDescription, (void *)sig.addr, (void *)sig.pc,
206          (void *)sig.bp, (void *)sig.sp, tid);
207   Printf("%s", d.Default());
208   InternalScopedBuffer<BufferedStackTrace> stack_buffer(1);
209   BufferedStackTrace *stack = stack_buffer.data();
210   stack->Reset();
211   unwind(sig, unwind_context, stack);
212   stack->Print();
213   ReportErrorSummary(kDescription, stack);
214 }
215
216 static void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid,
217                                    UnwindSignalStackCallbackType unwind,
218                                    const void *unwind_context) {
219   SanitizerCommonDecorator d;
220   Printf("%s", d.Warning());
221   const char *description = sig.Describe();
222   Report("ERROR: %s: %s on unknown address %p (pc %p bp %p sp %p T%d)\n",
223          SanitizerToolName, description, (void *)sig.addr, (void *)sig.pc,
224          (void *)sig.bp, (void *)sig.sp, tid);
225   Printf("%s", d.Default());
226   if (sig.pc < GetPageSizeCached())
227     Report("Hint: pc points to the zero page.\n");
228   if (sig.is_memory_access) {
229     const char *access_type =
230         sig.write_flag == SignalContext::WRITE
231             ? "WRITE"
232             : (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
233     Report("The signal is caused by a %s memory access.\n", access_type);
234     if (sig.addr < GetPageSizeCached())
235       Report("Hint: address points to the zero page.\n");
236   }
237   MaybeReportNonExecRegion(sig.pc);
238   InternalScopedBuffer<BufferedStackTrace> stack_buffer(1);
239   BufferedStackTrace *stack = stack_buffer.data();
240   stack->Reset();
241   unwind(sig, unwind_context, stack);
242   stack->Print();
243   MaybeDumpInstructionBytes(sig.pc);
244   MaybeDumpRegisters(sig.context);
245   Printf("%s can not provide additional info.\n", SanitizerToolName);
246   ReportErrorSummary(description, stack);
247 }
248
249 void ReportDeadlySignal(const SignalContext &sig, u32 tid,
250                         UnwindSignalStackCallbackType unwind,
251                         const void *unwind_context) {
252   if (sig.IsStackOverflow())
253     ReportStackOverflowImpl(sig, tid, unwind, unwind_context);
254   else
255     ReportDeadlySignalImpl(sig, tid, unwind, unwind_context);
256 }
257
258 void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
259                         UnwindSignalStackCallbackType unwind,
260                         const void *unwind_context) {
261   StartReportDeadlySignal();
262   ScopedErrorReportLock rl;
263   SignalContext sig(siginfo, context);
264   ReportDeadlySignal(sig, tid, unwind, unwind_context);
265   Report("ABORTING\n");
266   Die();
267 }
268
269 #endif  // !SANITIZER_FUCHSIA && !SANITIZER_GO
270
271 void WriteToSyslog(const char *msg) {
272   InternalScopedString msg_copy(kErrorMessageBufferSize);
273   msg_copy.append("%s", msg);
274   char *p = msg_copy.data();
275   char *q;
276
277   // Print one line at a time.
278   // syslog, at least on Android, has an implicit message length limit.
279   do {
280     q = internal_strchr(p, '\n');
281     if (q)
282       *q = '\0';
283     WriteOneLineToSyslog(p);
284     if (q)
285       p = q + 1;
286   } while (q);
287 }
288
289 void MaybeStartBackgroudThread() {
290 #if SANITIZER_LINUX && \
291     !SANITIZER_GO  // Need to implement/test on other platforms.
292   // Start the background thread if one of the rss limits is given.
293   if (!common_flags()->hard_rss_limit_mb &&
294       !common_flags()->soft_rss_limit_mb &&
295       !common_flags()->heap_profile) return;
296   if (!&real_pthread_create) return;  // Can't spawn the thread anyway.
297   internal_start_thread(BackgroundThread, nullptr);
298 #endif
299 }
300
301 static atomic_uintptr_t reporting_thread = {0};
302 static StaticSpinMutex CommonSanitizerReportMutex;
303
304 ScopedErrorReportLock::ScopedErrorReportLock() {
305   uptr current = GetThreadSelf();
306   for (;;) {
307     uptr expected = 0;
308     if (atomic_compare_exchange_strong(&reporting_thread, &expected, current,
309                                        memory_order_relaxed)) {
310       // We've claimed reporting_thread so proceed.
311       CommonSanitizerReportMutex.Lock();
312       return;
313     }
314
315     if (expected == current) {
316       // This is either asynch signal or nested error during error reporting.
317       // Fail simple to avoid deadlocks in Report().
318
319       // Can't use Report() here because of potential deadlocks in nested
320       // signal handlers.
321       CatastrophicErrorWrite(SanitizerToolName,
322                              internal_strlen(SanitizerToolName));
323       static const char msg[] = ": nested bug in the same thread, aborting.\n";
324       CatastrophicErrorWrite(msg, sizeof(msg) - 1);
325
326       internal__exit(common_flags()->exitcode);
327     }
328
329     internal_sched_yield();
330   }
331 }
332
333 ScopedErrorReportLock::~ScopedErrorReportLock() {
334   CommonSanitizerReportMutex.Unlock();
335   atomic_store_relaxed(&reporting_thread, 0);
336 }
337
338 void ScopedErrorReportLock::CheckLocked() {
339   CommonSanitizerReportMutex.CheckLocked();
340 }
341
342 }  // namespace __sanitizer
343
344 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
345                              __sanitizer_sandbox_arguments *args) {
346   __sanitizer::PrepareForSandboxing(args);
347   if (__sanitizer::sandboxing_callback)
348     __sanitizer::sandboxing_callback();
349 }