1 //===-- xray_fdr_logging.cc ------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of XRay, a dynamic runtime instrumentation system.
12 // Here we implement the Flight Data Recorder mode for XRay, where we use
13 // compact structures to store records in memory as well as when writing out the
16 //===----------------------------------------------------------------------===//
17 #include "xray_fdr_logging.h"
23 #include <sys/syscall.h>
28 #include "sanitizer_common/sanitizer_allocator_internal.h"
29 #include "sanitizer_common/sanitizer_atomic.h"
30 #include "sanitizer_common/sanitizer_common.h"
31 #include "xray/xray_interface.h"
32 #include "xray/xray_records.h"
33 #include "xray_buffer_queue.h"
34 #include "xray_defs.h"
35 #include "xray_fdr_flags.h"
36 #include "xray_flags.h"
37 #include "xray_recursion_guard.h"
39 #include "xray_utils.h"
43 atomic_sint32_t LoggingStatus = {XRayLogInitStatus::XRAY_LOG_UNINITIALIZED};
45 // Group together thread-local-data in a struct, then hide it behind a function
46 // call so that it can be initialized on first use instead of as a global. We
47 // force the alignment to 64-bytes for x86 cache line alignment, as this
48 // structure is used in the hot path of implementation.
49 struct alignas(64) ThreadLocalData {
50 BufferQueue::Buffer Buffer;
51 char *RecordPtr = nullptr;
52 // The number of FunctionEntry records immediately preceding RecordPtr.
53 uint8_t NumConsecutiveFnEnters = 0;
55 // The number of adjacent, consecutive pairs of FunctionEntry, Tail Exit
56 // records preceding RecordPtr.
57 uint8_t NumTailCalls = 0;
59 // We use a thread_local variable to keep track of which CPUs we've already
60 // run, and the TSC times for these CPUs. This allows us to stop repeating the
61 // CPU field in the function records.
63 // We assume that we'll support only 65536 CPUs for x86_64.
64 uint16_t CurrentCPU = std::numeric_limits<uint16_t>::max();
66 uint64_t LastFunctionEntryTSC = 0;
68 // Make sure a thread that's ever called handleArg0 has a thread-local
69 // live reference to the buffer queue for this particular instance of
70 // FDRLogging, and that we're going to clean it up when the thread exits.
71 BufferQueue *BQ = nullptr;
74 static_assert(std::is_trivially_destructible<ThreadLocalData>::value,
75 "ThreadLocalData must be trivially destructible");
77 static constexpr auto MetadataRecSize = sizeof(MetadataRecord);
78 static constexpr auto FunctionRecSize = sizeof(FunctionRecord);
80 // Use a global pthread key to identify thread-local data for logging.
81 static pthread_key_t Key;
83 // Global BufferQueue.
84 static BufferQueue *BQ = nullptr;
86 static atomic_sint32_t LogFlushStatus = {
87 XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING};
89 static FDRLoggingOptions FDROptions;
91 static SpinMutex FDROptionsMutex;
93 // This function will initialize the thread-local data structure used by the FDR
94 // logging implementation and return a reference to it. The implementation
95 // details require a bit of care to maintain.
97 // First, some requirements on the implementation in general:
99 // - XRay handlers should not call any memory allocation routines that may
100 // delegate to an instrumented implementation. This means functions like
101 // malloc() and free() should not be called while instrumenting.
103 // - We would like to use some thread-local data initialized on first-use of
104 // the XRay instrumentation. These allow us to implement unsynchronized
105 // routines that access resources associated with the thread.
107 // The implementation here uses a few mechanisms that allow us to provide both
108 // the requirements listed above. We do this by:
110 // 1. Using a thread-local aligned storage buffer for representing the
111 // ThreadLocalData struct. This data will be uninitialized memory by
114 // 2. Not requiring a thread exit handler/implementation, keeping the
115 // thread-local as purely a collection of references/data that do not
118 // We're doing this to avoid using a `thread_local` object that has a
119 // non-trivial destructor, because the C++ runtime might call std::malloc(...)
120 // to register calls to destructors. Deadlocks may arise when, for example, an
121 // externally provided malloc implementation is XRay instrumented, and
122 // initializing the thread-locals involves calling into malloc. A malloc
123 // implementation that does global synchronization might be holding a lock for a
124 // critical section, calling a function that might be XRay instrumented (and
125 // thus in turn calling into malloc by virtue of registration of the
126 // thread_local's destructor).
127 static_assert(alignof(ThreadLocalData) >= 64,
128 "ThreadLocalData must be cache line aligned.");
129 static ThreadLocalData &getThreadLocalData() {
130 thread_local typename std::aligned_storage<
131 sizeof(ThreadLocalData), alignof(ThreadLocalData)>::type TLDStorage{};
133 if (pthread_getspecific(Key) == NULL) {
134 new (reinterpret_cast<ThreadLocalData *>(&TLDStorage)) ThreadLocalData{};
135 pthread_setspecific(Key, &TLDStorage);
138 return *reinterpret_cast<ThreadLocalData *>(&TLDStorage);
141 static void writeNewBufferPreamble(tid_t Tid, timespec TS,
142 pid_t Pid) XRAY_NEVER_INSTRUMENT {
143 static constexpr int InitRecordsCount = 3;
144 auto &TLD = getThreadLocalData();
145 MetadataRecord Metadata[InitRecordsCount];
147 // Write out a MetadataRecord to signify that this is the start of a new
148 // buffer, associated with a particular thread, with a new CPU. For the
149 // data, we have 15 bytes to squeeze as much information as we can. At this
150 // point we only write down the following bytes:
151 // - Thread ID (tid_t, cast to 4 bytes type due to Darwin being 8 bytes)
152 auto &NewBuffer = Metadata[0];
153 NewBuffer.Type = uint8_t(RecordType::Metadata);
154 NewBuffer.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewBuffer);
155 int32_t tid = static_cast<int32_t>(Tid);
156 internal_memcpy(&NewBuffer.Data, &tid, sizeof(tid));
159 // Also write the WalltimeMarker record.
161 static_assert(sizeof(time_t) <= 8, "time_t needs to be at most 8 bytes");
162 auto &WalltimeMarker = Metadata[1];
163 WalltimeMarker.Type = uint8_t(RecordType::Metadata);
164 WalltimeMarker.RecordKind =
165 uint8_t(MetadataRecord::RecordKinds::WalltimeMarker);
167 // We only really need microsecond precision here, and enforce across
168 // platforms that we need 64-bit seconds and 32-bit microseconds encoded in
169 // the Metadata record.
170 int32_t Micros = TS.tv_nsec / 1000;
171 int64_t Seconds = TS.tv_sec;
172 internal_memcpy(WalltimeMarker.Data, &Seconds, sizeof(Seconds));
173 internal_memcpy(WalltimeMarker.Data + sizeof(Seconds), &Micros,
177 // Also write the Pid record.
179 // Write out a MetadataRecord that contains the current pid
180 auto &PidMetadata = Metadata[2];
181 PidMetadata.Type = uint8_t(RecordType::Metadata);
182 PidMetadata.RecordKind = uint8_t(MetadataRecord::RecordKinds::Pid);
183 int32_t pid = static_cast<int32_t>(Pid);
184 internal_memcpy(&PidMetadata.Data, &pid, sizeof(pid));
187 TLD.NumConsecutiveFnEnters = 0;
188 TLD.NumTailCalls = 0;
189 if (TLD.BQ == nullptr || TLD.BQ->finalizing())
191 internal_memcpy(TLD.RecordPtr, Metadata, sizeof(Metadata));
192 TLD.RecordPtr += sizeof(Metadata);
193 // Since we write out the extents as the first metadata record of the
194 // buffer, we need to write out the extents including the extents record.
195 atomic_store(&TLD.Buffer.Extents->Size, sizeof(Metadata),
196 memory_order_release);
199 static void setupNewBuffer(int (*wall_clock_reader)(
200 clockid_t, struct timespec *)) XRAY_NEVER_INSTRUMENT {
201 auto &TLD = getThreadLocalData();
202 auto &B = TLD.Buffer;
203 TLD.RecordPtr = static_cast<char *>(B.Data);
204 tid_t Tid = GetTid();
206 pid_t Pid = internal_getpid();
207 // This is typically clock_gettime, but callers have injection ability.
208 wall_clock_reader(CLOCK_MONOTONIC, &TS);
209 writeNewBufferPreamble(Tid, TS, Pid);
210 TLD.NumConsecutiveFnEnters = 0;
211 TLD.NumTailCalls = 0;
214 static void incrementExtents(size_t Add) {
215 auto &TLD = getThreadLocalData();
216 atomic_fetch_add(&TLD.Buffer.Extents->Size, Add, memory_order_acq_rel);
219 static void decrementExtents(size_t Subtract) {
220 auto &TLD = getThreadLocalData();
221 atomic_fetch_sub(&TLD.Buffer.Extents->Size, Subtract, memory_order_acq_rel);
224 static void writeNewCPUIdMetadata(uint16_t CPU,
225 uint64_t TSC) XRAY_NEVER_INSTRUMENT {
226 auto &TLD = getThreadLocalData();
227 MetadataRecord NewCPUId;
228 NewCPUId.Type = uint8_t(RecordType::Metadata);
229 NewCPUId.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewCPUId);
231 // The data for the New CPU will contain the following bytes:
232 // - CPU ID (uint16_t, 2 bytes)
233 // - Full TSC (uint64_t, 8 bytes)
235 internal_memcpy(&NewCPUId.Data, &CPU, sizeof(CPU));
236 internal_memcpy(&NewCPUId.Data[sizeof(CPU)], &TSC, sizeof(TSC));
237 internal_memcpy(TLD.RecordPtr, &NewCPUId, sizeof(MetadataRecord));
238 TLD.RecordPtr += sizeof(MetadataRecord);
239 TLD.NumConsecutiveFnEnters = 0;
240 TLD.NumTailCalls = 0;
241 incrementExtents(sizeof(MetadataRecord));
244 static void writeTSCWrapMetadata(uint64_t TSC) XRAY_NEVER_INSTRUMENT {
245 auto &TLD = getThreadLocalData();
246 MetadataRecord TSCWrap;
247 TSCWrap.Type = uint8_t(RecordType::Metadata);
248 TSCWrap.RecordKind = uint8_t(MetadataRecord::RecordKinds::TSCWrap);
250 // The data for the TSCWrap record contains the following bytes:
251 // - Full TSC (uint64_t, 8 bytes)
253 internal_memcpy(&TSCWrap.Data, &TSC, sizeof(TSC));
254 internal_memcpy(TLD.RecordPtr, &TSCWrap, sizeof(MetadataRecord));
255 TLD.RecordPtr += sizeof(MetadataRecord);
256 TLD.NumConsecutiveFnEnters = 0;
257 TLD.NumTailCalls = 0;
258 incrementExtents(sizeof(MetadataRecord));
261 // Call Argument metadata records store the arguments to a function in the
262 // order of their appearance; holes are not supported by the buffer format.
263 static void writeCallArgumentMetadata(uint64_t A) XRAY_NEVER_INSTRUMENT {
264 auto &TLD = getThreadLocalData();
265 MetadataRecord CallArg;
266 CallArg.Type = uint8_t(RecordType::Metadata);
267 CallArg.RecordKind = uint8_t(MetadataRecord::RecordKinds::CallArgument);
269 internal_memcpy(CallArg.Data, &A, sizeof(A));
270 internal_memcpy(TLD.RecordPtr, &CallArg, sizeof(MetadataRecord));
271 TLD.RecordPtr += sizeof(MetadataRecord);
272 incrementExtents(sizeof(MetadataRecord));
275 static void writeFunctionRecord(int FuncId, uint32_t TSCDelta,
276 XRayEntryType EntryType) XRAY_NEVER_INSTRUMENT {
277 FunctionRecord FuncRecord;
278 FuncRecord.Type = uint8_t(RecordType::Function);
279 // Only take 28 bits of the function id.
280 FuncRecord.FuncId = FuncId & ~(0x0F << 28);
281 FuncRecord.TSCDelta = TSCDelta;
283 auto &TLD = getThreadLocalData();
285 case XRayEntryType::ENTRY:
286 ++TLD.NumConsecutiveFnEnters;
287 FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter);
289 case XRayEntryType::LOG_ARGS_ENTRY:
290 // We should not rewind functions with logged args.
291 TLD.NumConsecutiveFnEnters = 0;
292 TLD.NumTailCalls = 0;
293 FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter);
295 case XRayEntryType::EXIT:
296 // If we've decided to log the function exit, we will never erase the log
298 TLD.NumConsecutiveFnEnters = 0;
299 TLD.NumTailCalls = 0;
300 FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionExit);
302 case XRayEntryType::TAIL:
303 // If we just entered the function we're tail exiting from or erased every
304 // invocation since then, this function entry tail pair is a candidate to
305 // be erased when the child function exits.
306 if (TLD.NumConsecutiveFnEnters > 0) {
308 TLD.NumConsecutiveFnEnters = 0;
310 // We will never be able to erase this tail call since we have logged
311 // something in between the function entry and tail exit.
312 TLD.NumTailCalls = 0;
313 TLD.NumConsecutiveFnEnters = 0;
315 FuncRecord.RecordKind =
316 uint8_t(FunctionRecord::RecordKinds::FunctionTailExit);
318 case XRayEntryType::CUSTOM_EVENT: {
319 // This is a bug in patching, so we'll report it once and move on.
320 static atomic_uint8_t ErrorLatch{0};
321 if (!atomic_exchange(&ErrorLatch, 1, memory_order_acq_rel))
322 Report("Internal error: patched an XRay custom event call as a function; "
327 case XRayEntryType::TYPED_EVENT: {
328 static atomic_uint8_t ErrorLatch{0};
329 if (!atomic_exchange(&ErrorLatch, 1, memory_order_acq_rel))
330 Report("Internal error: patched an XRay typed event call as a function; "
337 internal_memcpy(TLD.RecordPtr, &FuncRecord, sizeof(FunctionRecord));
338 TLD.RecordPtr += sizeof(FunctionRecord);
339 incrementExtents(sizeof(FunctionRecord));
342 static atomic_uint64_t TicksPerSec{0};
343 static atomic_uint64_t ThresholdTicks{0};
345 // Re-point the thread local pointer into this thread's Buffer before the recent
346 // "Function Entry" record and any "Tail Call Exit" records after that.
347 static void rewindRecentCall(uint64_t TSC, uint64_t &LastTSC,
348 uint64_t &LastFunctionEntryTSC, int32_t FuncId) {
349 auto &TLD = getThreadLocalData();
350 TLD.RecordPtr -= FunctionRecSize;
351 decrementExtents(FunctionRecSize);
352 FunctionRecord FuncRecord;
353 internal_memcpy(&FuncRecord, TLD.RecordPtr, FunctionRecSize);
354 DCHECK(FuncRecord.RecordKind ==
355 uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&
356 "Expected to find function entry recording when rewinding.");
357 DCHECK(FuncRecord.FuncId == (FuncId & ~(0x0F << 28)) &&
358 "Expected matching function id when rewinding Exit");
359 --TLD.NumConsecutiveFnEnters;
360 LastTSC -= FuncRecord.TSCDelta;
362 // We unwound one call. Update the state and return without writing a log.
363 if (TLD.NumConsecutiveFnEnters != 0) {
364 LastFunctionEntryTSC -= FuncRecord.TSCDelta;
368 // Otherwise we've rewound the stack of all function entries, we might be
369 // able to rewind further by erasing tail call functions that are being
370 // exited from via this exit.
371 LastFunctionEntryTSC = 0;
372 auto RewindingTSC = LastTSC;
373 auto RewindingRecordPtr = TLD.RecordPtr - FunctionRecSize;
374 while (TLD.NumTailCalls > 0) {
375 // Rewind the TSC back over the TAIL EXIT record.
376 FunctionRecord ExpectedTailExit;
377 internal_memcpy(&ExpectedTailExit, RewindingRecordPtr, FunctionRecSize);
379 DCHECK(ExpectedTailExit.RecordKind ==
380 uint8_t(FunctionRecord::RecordKinds::FunctionTailExit) &&
381 "Expected to find tail exit when rewinding.");
382 RewindingRecordPtr -= FunctionRecSize;
383 RewindingTSC -= ExpectedTailExit.TSCDelta;
384 FunctionRecord ExpectedFunctionEntry;
385 internal_memcpy(&ExpectedFunctionEntry, RewindingRecordPtr,
387 DCHECK(ExpectedFunctionEntry.RecordKind ==
388 uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&
389 "Expected to find function entry when rewinding tail call.");
390 DCHECK(ExpectedFunctionEntry.FuncId == ExpectedTailExit.FuncId &&
391 "Expected funcids to match when rewinding tail call.");
393 // This tail call exceeded the threshold duration. It will not be erased.
394 if ((TSC - RewindingTSC) >= atomic_load_relaxed(&ThresholdTicks)) {
395 TLD.NumTailCalls = 0;
399 // We can erase a tail exit pair that we're exiting through since
400 // its duration is under threshold.
402 RewindingRecordPtr -= FunctionRecSize;
403 RewindingTSC -= ExpectedFunctionEntry.TSCDelta;
404 TLD.RecordPtr -= 2 * FunctionRecSize;
405 LastTSC = RewindingTSC;
406 decrementExtents(2 * FunctionRecSize);
410 static bool releaseThreadLocalBuffer(BufferQueue &BQArg) {
411 auto &TLD = getThreadLocalData();
412 auto EC = BQArg.releaseBuffer(TLD.Buffer);
413 if (EC != BufferQueue::ErrorCode::Ok) {
414 Report("Failed to release buffer at %p; error=%s\n", TLD.Buffer.Data,
415 BufferQueue::getErrorString(EC));
421 static bool prepareBuffer(uint64_t TSC, unsigned char CPU,
422 int (*wall_clock_reader)(clockid_t,
424 size_t MaxSize) XRAY_NEVER_INSTRUMENT {
425 auto &TLD = getThreadLocalData();
426 char *BufferStart = static_cast<char *>(TLD.Buffer.Data);
427 if ((TLD.RecordPtr + MaxSize) > (BufferStart + TLD.Buffer.Size)) {
428 if (!releaseThreadLocalBuffer(*TLD.BQ))
430 auto EC = TLD.BQ->getBuffer(TLD.Buffer);
431 if (EC != BufferQueue::ErrorCode::Ok) {
432 Report("Failed to prepare a buffer; error = '%s'\n",
433 BufferQueue::getErrorString(EC));
436 setupNewBuffer(wall_clock_reader);
438 // Always write the CPU metadata as the first record in the buffer.
439 writeNewCPUIdMetadata(CPU, TSC);
445 isLogInitializedAndReady(BufferQueue *LBQ, uint64_t TSC, unsigned char CPU,
446 int (*wall_clock_reader)(clockid_t, struct timespec *))
447 XRAY_NEVER_INSTRUMENT {
448 // Bail out right away if logging is not initialized yet.
449 // We should take the opportunity to release the buffer though.
450 auto Status = atomic_load(&LoggingStatus, memory_order_acquire);
451 auto &TLD = getThreadLocalData();
452 if (Status != XRayLogInitStatus::XRAY_LOG_INITIALIZED) {
453 if (TLD.RecordPtr != nullptr &&
454 (Status == XRayLogInitStatus::XRAY_LOG_FINALIZING ||
455 Status == XRayLogInitStatus::XRAY_LOG_FINALIZED)) {
456 if (!releaseThreadLocalBuffer(*LBQ))
458 TLD.RecordPtr = nullptr;
464 if (atomic_load(&LoggingStatus, memory_order_acquire) !=
465 XRayLogInitStatus::XRAY_LOG_INITIALIZED ||
467 if (!releaseThreadLocalBuffer(*LBQ))
469 TLD.RecordPtr = nullptr;
472 if (TLD.Buffer.Data == nullptr) {
473 auto EC = LBQ->getBuffer(TLD.Buffer);
474 if (EC != BufferQueue::ErrorCode::Ok) {
475 auto LS = atomic_load(&LoggingStatus, memory_order_acquire);
476 if (LS != XRayLogInitStatus::XRAY_LOG_FINALIZING &&
477 LS != XRayLogInitStatus::XRAY_LOG_FINALIZED)
478 Report("Failed to acquire a buffer; error = '%s'\n",
479 BufferQueue::getErrorString(EC));
483 setupNewBuffer(wall_clock_reader);
485 // Always write the CPU metadata as the first record in the buffer.
486 writeNewCPUIdMetadata(CPU, TSC);
489 if (TLD.CurrentCPU == std::numeric_limits<uint16_t>::max()) {
490 // This means this is the first CPU this thread has ever run on. We set
491 // the current CPU and record this as the first TSC we've seen.
492 TLD.CurrentCPU = CPU;
493 writeNewCPUIdMetadata(CPU, TSC);
499 // Compute the TSC difference between the time of measurement and the previous
500 // event. There are a few interesting situations we need to account for:
502 // - The thread has migrated to a different CPU. If this is the case, then
503 // we write down the following records:
505 // 1. A 'NewCPUId' Metadata record.
506 // 2. A FunctionRecord with a 0 for the TSCDelta field.
508 // - The TSC delta is greater than the 32 bits we can store in a
509 // FunctionRecord. In this case we write down the following records:
511 // 1. A 'TSCWrap' Metadata record.
512 // 2. A FunctionRecord with a 0 for the TSCDelta field.
514 // - The TSC delta is representable within the 32 bits we can store in a
515 // FunctionRecord. In this case we write down just a FunctionRecord with
516 // the correct TSC delta.
517 static uint32_t writeCurrentCPUTSC(ThreadLocalData &TLD, uint64_t TSC,
519 if (CPU != TLD.CurrentCPU) {
520 // We've moved to a new CPU.
521 writeNewCPUIdMetadata(CPU, TSC);
524 // If the delta is greater than the range for a uint32_t, then we write out
525 // the TSC wrap metadata entry with the full TSC, and the TSC for the
526 // function record be 0.
527 uint64_t Delta = TSC - TLD.LastTSC;
528 if (Delta <= std::numeric_limits<uint32_t>::max())
531 writeTSCWrapMetadata(TSC);
535 static void endBufferIfFull() XRAY_NEVER_INSTRUMENT {
536 auto &TLD = getThreadLocalData();
537 auto BufferStart = static_cast<char *>(TLD.Buffer.Data);
538 if ((TLD.RecordPtr + MetadataRecSize) - BufferStart <=
539 ptrdiff_t{MetadataRecSize}) {
540 if (!releaseThreadLocalBuffer(*TLD.BQ))
542 TLD.RecordPtr = nullptr;
546 thread_local atomic_uint8_t Running{0};
548 /// Here's where the meat of the processing happens. The writer captures
549 /// function entry, exit and tail exit points with a time and will create
550 /// TSCWrap, NewCPUId and Function records as necessary. The writer might
551 /// walk backward through its buffer and erase trivial functions to avoid
552 /// polluting the log and may use the buffer queue to obtain or release a
554 static void processFunctionHook(int32_t FuncId, XRayEntryType Entry,
555 uint64_t TSC, unsigned char CPU, uint64_t Arg1,
556 int (*wall_clock_reader)(clockid_t,
558 XRAY_NEVER_INSTRUMENT {
559 __asm volatile("# LLVM-MCA-BEGIN processFunctionHook");
560 // Prevent signal handler recursion, so in case we're already in a log writing
561 // mode and the signal handler comes in (and is also instrumented) then we
562 // don't want to be clobbering potentially partial writes already happening in
563 // the thread. We use a simple thread_local latch to only allow one on-going
564 // handleArg0 to happen at any given time.
565 RecursionGuard Guard{Running};
567 DCHECK(atomic_load_relaxed(&Running) && "RecursionGuard is buggy!");
571 auto &TLD = getThreadLocalData();
573 if (TLD.BQ == nullptr)
576 if (!isLogInitializedAndReady(TLD.BQ, TSC, CPU, wall_clock_reader))
579 // Before we go setting up writing new function entries, we need to be really
580 // careful about the pointer math we're doing. This means we need to ensure
581 // that the record we are about to write is going to fit into the buffer,
582 // without overflowing the buffer.
584 // To do this properly, we use the following assumptions:
586 // - The least number of bytes we will ever write is 8
587 // (sizeof(FunctionRecord)) only if the delta between the previous entry
588 // and this entry is within 32 bits.
589 // - The most number of bytes we will ever write is 8 + 16 + 16 = 40.
590 // This is computed by:
592 // MaxSize = sizeof(FunctionRecord) + 2 * sizeof(MetadataRecord)
594 // These arise in the following cases:
596 // 1. When the delta between the TSC we get and the previous TSC for the
597 // same CPU is outside of the uint32_t range, we end up having to
598 // write a MetadataRecord to indicate a "tsc wrap" before the actual
600 // 2. When we learn that we've moved CPUs, we need to write a
601 // MetadataRecord to indicate a "cpu change", and thus write out the
602 // current TSC for that CPU before writing out the actual
604 // 3. When we learn about a new CPU ID, we need to write down a "new cpu
605 // id" MetadataRecord before writing out the actual FunctionRecord.
606 // 4. The second MetadataRecord is the optional function call argument.
608 // So the math we need to do is to determine whether writing 40 bytes past the
609 // current pointer exceeds the buffer's maximum size. If we don't have enough
610 // space to write 40 bytes in the buffer, we need get a new Buffer, set it up
611 // properly before doing any further writing.
612 size_t MaxSize = FunctionRecSize + 2 * MetadataRecSize;
613 if (!prepareBuffer(TSC, CPU, wall_clock_reader, MaxSize)) {
618 // By this point, we are now ready to write up to 40 bytes (explained above).
619 DCHECK((TLD.RecordPtr + MaxSize) - static_cast<char *>(TLD.Buffer.Data) >=
620 static_cast<ptrdiff_t>(MetadataRecSize) &&
621 "Misconfigured BufferQueue provided; Buffer size not large enough.");
623 auto RecordTSCDelta = writeCurrentCPUTSC(TLD, TSC, CPU);
625 TLD.CurrentCPU = CPU;
627 case XRayEntryType::ENTRY:
628 case XRayEntryType::LOG_ARGS_ENTRY:
629 // Update the thread local state for the next invocation.
630 TLD.LastFunctionEntryTSC = TSC;
632 case XRayEntryType::TAIL:
633 case XRayEntryType::EXIT:
634 // Break out and write the exit record if we can't erase any functions.
635 if (TLD.NumConsecutiveFnEnters == 0 ||
636 (TSC - TLD.LastFunctionEntryTSC) >=
637 atomic_load_relaxed(&ThresholdTicks))
639 rewindRecentCall(TSC, TLD.LastTSC, TLD.LastFunctionEntryTSC, FuncId);
640 return; // without writing log.
641 case XRayEntryType::CUSTOM_EVENT: {
642 // This is a bug in patching, so we'll report it once and move on.
643 static atomic_uint8_t ErrorLatch{0};
644 if (!atomic_exchange(&ErrorLatch, 1, memory_order_acq_rel))
645 Report("Internal error: patched an XRay custom event call as a function; "
650 case XRayEntryType::TYPED_EVENT: {
651 static atomic_uint8_t ErrorLatch{0};
652 if (!atomic_exchange(&ErrorLatch, 1, memory_order_acq_rel))
653 Report("Internal error: patched an XRay typed event call as a function; "
660 writeFunctionRecord(FuncId, RecordTSCDelta, Entry);
661 if (Entry == XRayEntryType::LOG_ARGS_ENTRY)
662 writeCallArgumentMetadata(Arg1);
664 // If we've exhausted the buffer by this time, we then release the buffer to
665 // make sure that other threads may start using this buffer.
667 __asm volatile("# LLVM-MCA-END");
670 static XRayFileHeader &fdrCommonHeaderInfo() {
671 static std::aligned_storage<sizeof(XRayFileHeader)>::type HStorage;
672 static pthread_once_t OnceInit = PTHREAD_ONCE_INIT;
673 static bool TSCSupported = true;
674 static uint64_t CycleFrequency = NanosecondsPerSecond;
675 pthread_once(&OnceInit, +[] {
676 XRayFileHeader &H = reinterpret_cast<XRayFileHeader &>(HStorage);
677 // Version 2 of the log writes the extents of the buffer, instead of
678 // relying on an end-of-buffer record.
679 // Version 3 includes PID metadata record
681 H.Type = FileTypes::FDR_LOG;
683 // Test for required CPU features and cache the cycle frequency
684 TSCSupported = probeRequiredCPUFeatures();
686 CycleFrequency = getTSCFrequency();
687 H.CycleFrequency = CycleFrequency;
689 // FIXME: Actually check whether we have 'constant_tsc' and
690 // 'nonstop_tsc' before setting the values in the header.
694 return reinterpret_cast<XRayFileHeader &>(HStorage);
697 // This is the iterator implementation, which knows how to handle FDR-mode
698 // specific buffers. This is used as an implementation of the iterator function
699 // needed by __xray_set_buffer_iterator(...). It maintains a global state of the
700 // buffer iteration for the currently installed FDR mode buffers. In particular:
702 // - If the argument represents the initial state of XRayBuffer ({nullptr, 0})
703 // then the iterator returns the header information.
704 // - If the argument represents the header information ({address of header
705 // info, size of the header info}) then it returns the first FDR buffer's
706 // address and extents.
707 // - It will keep returning the next buffer and extents as there are more
708 // buffers to process. When the input represents the last buffer, it will
709 // return the initial state to signal completion ({nullptr, 0}).
711 // See xray/xray_log_interface.h for more details on the requirements for the
712 // implementations of __xray_set_buffer_iterator(...) and
713 // __xray_log_process_buffers(...).
714 XRayBuffer fdrIterator(const XRayBuffer B) {
715 DCHECK(internal_strcmp(__xray_log_get_current_mode(), "xray-fdr") == 0);
716 DCHECK(BQ->finalizing());
718 if (BQ == nullptr || !BQ->finalizing()) {
721 "XRay FDR: Failed global buffer queue is null or not finalizing!\n");
725 // We use a global scratch-pad for the header information, which only gets
726 // initialized the first time this function is called. We'll update one part
727 // of this information with some relevant data (in particular the number of
728 // buffers to expect).
729 static std::aligned_storage<sizeof(XRayFileHeader)>::type HeaderStorage;
730 static pthread_once_t HeaderOnce = PTHREAD_ONCE_INIT;
731 pthread_once(&HeaderOnce, +[] {
732 reinterpret_cast<XRayFileHeader &>(HeaderStorage) = fdrCommonHeaderInfo();
735 // We use a convenience alias for code referring to Header from here on out.
736 auto &Header = reinterpret_cast<XRayFileHeader &>(HeaderStorage);
737 if (B.Data == nullptr && B.Size == 0) {
738 Header.FdrData = FdrAdditionalHeaderData{BQ->ConfiguredBufferSize()};
739 return XRayBuffer{static_cast<void *>(&Header), sizeof(Header)};
742 static BufferQueue::const_iterator It{};
743 static BufferQueue::const_iterator End{};
744 static void *CurrentBuffer{nullptr};
745 if (B.Data == static_cast<void *>(&Header) && B.Size == sizeof(Header)) {
746 // From this point on, we provide raw access to the raw buffer we're getting
747 // from the BufferQueue. We're relying on the iterators from the current
753 if (CurrentBuffer != nullptr) {
754 InternalFree(CurrentBuffer);
755 CurrentBuffer = nullptr;
761 // Set up the current buffer to contain the extents like we would when writing
762 // out to disk. The difference here would be that we still write "empty"
763 // buffers, or at least go through the iterators faithfully to let the
764 // handlers see the empty buffers in the queue.
765 auto BufferSize = atomic_load(&It->Extents->Size, memory_order_acquire);
766 auto SerializedBufferSize = BufferSize + sizeof(MetadataRecord);
767 CurrentBuffer = InternalAlloc(SerializedBufferSize);
768 if (CurrentBuffer == nullptr)
771 // Write out the extents as a Metadata Record into the CurrentBuffer.
772 MetadataRecord ExtentsRecord;
773 ExtentsRecord.Type = uint8_t(RecordType::Metadata);
774 ExtentsRecord.RecordKind =
775 uint8_t(MetadataRecord::RecordKinds::BufferExtents);
776 internal_memcpy(ExtentsRecord.Data, &BufferSize, sizeof(BufferSize));
778 static_cast<char *>(internal_memcpy(CurrentBuffer, &ExtentsRecord,
779 sizeof(MetadataRecord))) +
780 sizeof(MetadataRecord);
781 internal_memcpy(AfterExtents, It->Data, BufferSize);
784 Result.Data = CurrentBuffer;
785 Result.Size = SerializedBufferSize;
790 // Must finalize before flushing.
791 XRayLogFlushStatus fdrLoggingFlush() XRAY_NEVER_INSTRUMENT {
792 if (atomic_load(&LoggingStatus, memory_order_acquire) !=
793 XRayLogInitStatus::XRAY_LOG_FINALIZED) {
795 Report("Not flushing log, implementation is not finalized.\n");
796 return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
799 s32 Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
800 if (!atomic_compare_exchange_strong(&LogFlushStatus, &Result,
801 XRayLogFlushStatus::XRAY_LOG_FLUSHING,
802 memory_order_release)) {
804 Report("Not flushing log, implementation is still finalizing.\n");
805 return static_cast<XRayLogFlushStatus>(Result);
810 Report("Cannot flush when global buffer queue is null.\n");
811 return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
814 // We wait a number of milliseconds to allow threads to see that we've
815 // finalised before attempting to flush the log.
816 SleepForMillis(fdrFlags()->grace_period_ms);
818 // At this point, we're going to uninstall the iterator implementation, before
819 // we decide to do anything further with the global buffer queue.
820 __xray_log_remove_buffer_iterator();
822 // Once flushed, we should set the global status of the logging implementation
823 // to "uninitialized" to allow for FDR-logging multiple runs.
824 auto ResetToUnitialized = at_scope_exit([] {
825 atomic_store(&LoggingStatus, XRayLogInitStatus::XRAY_LOG_UNINITIALIZED,
826 memory_order_release);
829 auto CleanupBuffers = at_scope_exit([] {
831 auto &TLD = getThreadLocalData();
832 if (TLD.RecordPtr != nullptr && TLD.BQ != nullptr)
833 releaseThreadLocalBuffer(*TLD.BQ);
840 if (fdrFlags()->no_file_flush) {
842 Report("XRay FDR: Not flushing to file, 'no_file_flush=true'.\n");
844 atomic_store(&LogFlushStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHED,
845 memory_order_release);
846 return XRayLogFlushStatus::XRAY_LOG_FLUSHED;
849 // We write out the file in the following format:
851 // 1) We write down the XRay file header with version 1, type FDR_LOG.
852 // 2) Then we use the 'apply' member of the BufferQueue that's live, to
853 // ensure that at this point in time we write down the buffers that have
854 // been released (and marked "used") -- we dump the full buffer for now
855 // (fixed-sized) and let the tools reading the buffers deal with the data
860 // FIXME: Remove this section of the code, when we remove the struct-based
861 // configuration API.
862 SpinMutexLock Guard(&FDROptionsMutex);
868 auto Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
869 atomic_store(&LogFlushStatus, Result, memory_order_release);
873 XRayFileHeader Header = fdrCommonHeaderInfo();
874 Header.FdrData = FdrAdditionalHeaderData{BQ->ConfiguredBufferSize()};
875 retryingWriteAll(Fd, reinterpret_cast<char *>(&Header),
876 reinterpret_cast<char *>(&Header) + sizeof(Header));
878 BQ->apply([&](const BufferQueue::Buffer &B) {
879 // Starting at version 2 of the FDR logging implementation, we only write
880 // the records identified by the extents of the buffer. We use the Extents
881 // from the Buffer and write that out as the first record in the buffer. We
882 // still use a Metadata record, but fill in the extents instead for the
884 MetadataRecord ExtentsRecord;
885 auto BufferExtents = atomic_load(&B.Extents->Size, memory_order_acquire);
886 DCHECK(BufferExtents <= B.Size);
887 ExtentsRecord.Type = uint8_t(RecordType::Metadata);
888 ExtentsRecord.RecordKind =
889 uint8_t(MetadataRecord::RecordKinds::BufferExtents);
890 internal_memcpy(ExtentsRecord.Data, &BufferExtents, sizeof(BufferExtents));
891 if (BufferExtents > 0) {
892 retryingWriteAll(Fd, reinterpret_cast<char *>(&ExtentsRecord),
893 reinterpret_cast<char *>(&ExtentsRecord) +
894 sizeof(MetadataRecord));
895 retryingWriteAll(Fd, reinterpret_cast<char *>(B.Data),
896 reinterpret_cast<char *>(B.Data) + BufferExtents);
900 atomic_store(&LogFlushStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHED,
901 memory_order_release);
902 return XRayLogFlushStatus::XRAY_LOG_FLUSHED;
905 XRayLogInitStatus fdrLoggingFinalize() XRAY_NEVER_INSTRUMENT {
906 s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_INITIALIZED;
907 if (!atomic_compare_exchange_strong(&LoggingStatus, &CurrentStatus,
908 XRayLogInitStatus::XRAY_LOG_FINALIZING,
909 memory_order_release)) {
911 Report("Cannot finalize log, implementation not initialized.\n");
912 return static_cast<XRayLogInitStatus>(CurrentStatus);
915 // Do special things to make the log finalize itself, and not allow any more
916 // operations to be performed until re-initialized.
919 atomic_store(&LoggingStatus, XRayLogInitStatus::XRAY_LOG_FINALIZED,
920 memory_order_release);
921 return XRayLogInitStatus::XRAY_LOG_FINALIZED;
926 unsigned char CPU = 0;
929 static TSCAndCPU getTimestamp() XRAY_NEVER_INSTRUMENT {
930 // We want to get the TSC as early as possible, so that we can check whether
931 // we've seen this CPU before. We also do it before we load anything else,
932 // to allow for forward progress with the scheduling.
935 // Test once for required CPU features
936 static pthread_once_t OnceProbe = PTHREAD_ONCE_INIT;
937 static bool TSCSupported = true;
938 pthread_once(&OnceProbe, +[] { TSCSupported = probeRequiredCPUFeatures(); });
941 Result.TSC = __xray::readTSC(Result.CPU);
943 // FIXME: This code needs refactoring as it appears in multiple locations
945 int result = clock_gettime(CLOCK_REALTIME, &TS);
947 Report("clock_gettime(2) return %d, errno=%d", result, int(errno));
951 Result.TSC = TS.tv_sec * __xray::NanosecondsPerSecond + TS.tv_nsec;
956 void fdrLoggingHandleArg0(int32_t FuncId,
957 XRayEntryType Entry) XRAY_NEVER_INSTRUMENT {
958 auto TC = getTimestamp();
959 processFunctionHook(FuncId, Entry, TC.TSC, TC.CPU, 0, clock_gettime);
962 void fdrLoggingHandleArg1(int32_t FuncId, XRayEntryType Entry,
963 uint64_t Arg) XRAY_NEVER_INSTRUMENT {
964 auto TC = getTimestamp();
965 processFunctionHook(FuncId, Entry, TC.TSC, TC.CPU, Arg, clock_gettime);
968 void fdrLoggingHandleCustomEvent(void *Event,
969 std::size_t EventSize) XRAY_NEVER_INSTRUMENT {
970 auto TC = getTimestamp();
973 RecursionGuard Guard{Running};
976 if (EventSize > std::numeric_limits<int32_t>::max()) {
977 static pthread_once_t Once = PTHREAD_ONCE_INIT;
978 pthread_once(&Once, +[] { Report("Event size too large.\n"); });
980 int32_t ReducedEventSize = static_cast<int32_t>(EventSize);
981 auto &TLD = getThreadLocalData();
982 if (!isLogInitializedAndReady(TLD.BQ, TSC, CPU, clock_gettime))
985 // Here we need to prepare the log to handle:
986 // - The metadata record we're going to write. (16 bytes)
987 // - The additional data we're going to write. Currently, that's the size
988 // of the event we're going to dump into the log as free-form bytes.
989 if (!prepareBuffer(TSC, CPU, clock_gettime, MetadataRecSize + EventSize)) {
994 // Write the custom event metadata record, which consists of the following
996 // - 8 bytes (64-bits) for the full TSC when the event started.
997 // - 4 bytes (32-bits) for the length of the data.
998 MetadataRecord CustomEvent;
999 CustomEvent.Type = uint8_t(RecordType::Metadata);
1000 CustomEvent.RecordKind =
1001 uint8_t(MetadataRecord::RecordKinds::CustomEventMarker);
1002 constexpr auto TSCSize = sizeof(TC.TSC);
1003 internal_memcpy(&CustomEvent.Data, &ReducedEventSize, sizeof(int32_t));
1004 internal_memcpy(&CustomEvent.Data[sizeof(int32_t)], &TSC, TSCSize);
1005 internal_memcpy(TLD.RecordPtr, &CustomEvent, sizeof(CustomEvent));
1006 TLD.RecordPtr += sizeof(CustomEvent);
1007 internal_memcpy(TLD.RecordPtr, Event, ReducedEventSize);
1008 incrementExtents(MetadataRecSize + EventSize);
1012 void fdrLoggingHandleTypedEvent(
1013 uint16_t EventType, const void *Event,
1014 std::size_t EventSize) noexcept XRAY_NEVER_INSTRUMENT {
1015 auto TC = getTimestamp();
1018 RecursionGuard Guard{Running};
1021 if (EventSize > std::numeric_limits<int32_t>::max()) {
1022 static pthread_once_t Once = PTHREAD_ONCE_INIT;
1023 pthread_once(&Once, +[] { Report("Event size too large.\n"); });
1025 int32_t ReducedEventSize = static_cast<int32_t>(EventSize);
1026 auto &TLD = getThreadLocalData();
1027 if (!isLogInitializedAndReady(TLD.BQ, TSC, CPU, clock_gettime))
1030 // Here we need to prepare the log to handle:
1031 // - The metadata record we're going to write. (16 bytes)
1032 // - The additional data we're going to write. Currently, that's the size
1033 // of the event we're going to dump into the log as free-form bytes.
1034 if (!prepareBuffer(TSC, CPU, clock_gettime, MetadataRecSize + EventSize)) {
1038 // Write the custom event metadata record, which consists of the following
1040 // - 8 bytes (64-bits) for the full TSC when the event started.
1041 // - 4 bytes (32-bits) for the length of the data.
1042 // - 2 bytes (16-bits) for the event type. 3 bytes remain since one of the
1043 // bytes has the record type (Metadata Record) and kind (TypedEvent).
1044 // We'll log the error if the event type is greater than 2 bytes.
1045 // Event types are generated sequentially, so 2^16 is enough.
1046 MetadataRecord TypedEvent;
1047 TypedEvent.Type = uint8_t(RecordType::Metadata);
1048 TypedEvent.RecordKind =
1049 uint8_t(MetadataRecord::RecordKinds::TypedEventMarker);
1050 constexpr auto TSCSize = sizeof(TC.TSC);
1051 internal_memcpy(&TypedEvent.Data, &ReducedEventSize, sizeof(int32_t));
1052 internal_memcpy(&TypedEvent.Data[sizeof(int32_t)], &TSC, TSCSize);
1053 internal_memcpy(&TypedEvent.Data[sizeof(int32_t) + TSCSize], &EventType,
1055 internal_memcpy(TLD.RecordPtr, &TypedEvent, sizeof(TypedEvent));
1057 TLD.RecordPtr += sizeof(TypedEvent);
1058 internal_memcpy(TLD.RecordPtr, Event, ReducedEventSize);
1059 incrementExtents(MetadataRecSize + EventSize);
1063 XRayLogInitStatus fdrLoggingInit(size_t BufferSize, size_t BufferMax,
1065 size_t OptionsSize) XRAY_NEVER_INSTRUMENT {
1066 if (Options == nullptr)
1067 return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
1069 s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
1070 if (!atomic_compare_exchange_strong(&LoggingStatus, &CurrentStatus,
1071 XRayLogInitStatus::XRAY_LOG_INITIALIZING,
1072 memory_order_release)) {
1074 Report("Cannot initialize already initialized implementation.\n");
1075 return static_cast<XRayLogInitStatus>(CurrentStatus);
1078 // Because of __xray_log_init_mode(...) which guarantees that this will be
1079 // called with BufferSize == 0 and BufferMax == 0 we parse the configuration
1080 // provided in the Options pointer as a string instead.
1081 if (BufferSize == 0 && BufferMax == 0) {
1083 Report("Initializing FDR mode with options: %s\n",
1084 static_cast<const char *>(Options));
1086 // TODO: Factor out the flags specific to the FDR mode implementation. For
1087 // now, use the global/single definition of the flags, since the FDR mode
1088 // flags are already defined there.
1089 FlagParser FDRParser;
1091 registerXRayFDRFlags(&FDRParser, &FDRFlags);
1092 FDRFlags.setDefaults();
1094 // Override first from the general XRAY_DEFAULT_OPTIONS compiler-provided
1095 // options until we migrate everyone to use the XRAY_FDR_OPTIONS
1096 // compiler-provided options.
1097 FDRParser.ParseString(useCompilerDefinedFlags());
1098 FDRParser.ParseString(useCompilerDefinedFDRFlags());
1099 auto *EnvOpts = GetEnv("XRAY_FDR_OPTIONS");
1100 if (EnvOpts == nullptr)
1102 FDRParser.ParseString(EnvOpts);
1104 // FIXME: Remove this when we fully remove the deprecated flags.
1105 if (internal_strlen(EnvOpts) == 0) {
1106 FDRFlags.func_duration_threshold_us =
1107 flags()->xray_fdr_log_func_duration_threshold_us;
1108 FDRFlags.grace_period_ms = flags()->xray_fdr_log_grace_period_ms;
1111 // The provided options should always override the compiler-provided and
1112 // environment-variable defined options.
1113 FDRParser.ParseString(static_cast<const char *>(Options));
1114 *fdrFlags() = FDRFlags;
1115 BufferSize = FDRFlags.buffer_size;
1116 BufferMax = FDRFlags.buffer_max;
1117 SpinMutexLock Guard(&FDROptionsMutex);
1119 FDROptions.ReportErrors = true;
1120 } else if (OptionsSize != sizeof(FDRLoggingOptions)) {
1121 // FIXME: This is deprecated, and should really be removed.
1122 // At this point we use the flag parser specific to the FDR mode
1125 Report("Cannot initialize FDR logging; wrong size for options: %d\n",
1127 return static_cast<XRayLogInitStatus>(
1128 atomic_load(&LoggingStatus, memory_order_acquire));
1131 Report("XRay FDR: struct-based init is deprecated, please use "
1132 "string-based configuration instead.\n");
1133 SpinMutexLock Guard(&FDROptionsMutex);
1134 internal_memcpy(&FDROptions, Options, OptionsSize);
1137 bool Success = false;
1139 if (BQ != nullptr) {
1145 if (BQ == nullptr) {
1146 BQ = reinterpret_cast<BufferQueue *>(
1147 InternalAlloc(sizeof(BufferQueue), nullptr, 64));
1148 new (BQ) BufferQueue(BufferSize, BufferMax, Success);
1152 Report("BufferQueue init failed.\n");
1153 if (BQ != nullptr) {
1158 return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
1161 static pthread_once_t OnceInit = PTHREAD_ONCE_INIT;
1162 pthread_once(&OnceInit, +[] {
1163 atomic_store(&TicksPerSec,
1164 probeRequiredCPUFeatures() ? getTSCFrequency()
1165 : __xray::NanosecondsPerSecond,
1166 memory_order_release);
1167 pthread_key_create(&Key, +[](void *TLDPtr) {
1168 if (TLDPtr == nullptr)
1170 auto &TLD = *reinterpret_cast<ThreadLocalData *>(TLDPtr);
1171 if (TLD.BQ == nullptr)
1173 auto EC = TLD.BQ->releaseBuffer(TLD.Buffer);
1174 if (EC != BufferQueue::ErrorCode::Ok)
1175 Report("At thread exit, failed to release buffer at %p; error=%s\n",
1176 TLD.Buffer.Data, BufferQueue::getErrorString(EC));
1180 atomic_store(&ThresholdTicks,
1181 atomic_load_relaxed(&TicksPerSec) *
1182 fdrFlags()->func_duration_threshold_us / 1000000,
1183 memory_order_release);
1184 // Arg1 handler should go in first to avoid concurrent code accidentally
1185 // falling back to arg0 when it should have ran arg1.
1186 __xray_set_handler_arg1(fdrLoggingHandleArg1);
1187 // Install the actual handleArg0 handler after initialising the buffers.
1188 __xray_set_handler(fdrLoggingHandleArg0);
1189 __xray_set_customevent_handler(fdrLoggingHandleCustomEvent);
1190 __xray_set_typedevent_handler(fdrLoggingHandleTypedEvent);
1192 // Install the buffer iterator implementation.
1193 __xray_log_set_buffer_iterator(fdrIterator);
1195 atomic_store(&LoggingStatus, XRayLogInitStatus::XRAY_LOG_INITIALIZED,
1196 memory_order_release);
1199 Report("XRay FDR init successful.\n");
1200 return XRayLogInitStatus::XRAY_LOG_INITIALIZED;
1203 bool fdrLogDynamicInitializer() XRAY_NEVER_INSTRUMENT {
1207 fdrLoggingHandleArg0,
1210 auto RegistrationResult = __xray_log_register_mode("xray-fdr", Impl);
1211 if (RegistrationResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK &&
1213 Report("Cannot register XRay FDR mode to 'xray-fdr'; error = %d\n",
1214 RegistrationResult);
1215 if (flags()->xray_fdr_log || !internal_strcmp(flags()->xray_mode, "xray-fdr"))
1216 __xray_set_log_impl(Impl);
1220 } // namespace __xray
1222 static auto UNUSED Unused = __xray::fdrLogDynamicInitializer();