1 //===-- xray_fdr_logging_impl.h ---------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of XRay, a dynamic runtime instrumentation system.
12 // Here we implement the thread local state management and record i/o for Flight
13 // Data Recorder mode for XRay, where we use compact structures to store records
14 // in memory as well as when writing out the data to files.
16 //===----------------------------------------------------------------------===//
17 #ifndef XRAY_XRAY_FDR_LOGGING_IMPL_H
18 #define XRAY_XRAY_FDR_LOGGING_IMPL_H
25 #include <sys/syscall.h>
27 #include <type_traits>
30 #include "sanitizer_common/sanitizer_common.h"
31 #include "xray/xray_log_interface.h"
32 #include "xray_buffer_queue.h"
33 #include "xray_defs.h"
34 #include "xray_fdr_log_records.h"
35 #include "xray_flags.h"
40 __sanitizer::atomic_sint32_t LoggingStatus = {
41 XRayLogInitStatus::XRAY_LOG_UNINITIALIZED};
43 /// We expose some of the state transitions when FDR logging mode is operating
44 /// such that we can simulate a series of log events that may occur without
45 /// and test with determinism without worrying about the real CPU time.
47 /// Because the code uses thread_local allocation extensively as part of its
48 /// design, callers that wish to test events occuring on different threads
49 /// will actually have to run them on different threads.
51 /// This also means that it is possible to break invariants maintained by
52 /// cooperation with xray_fdr_logging class, so be careful and think twice.
53 namespace __xray_fdr_internal {
55 /// Writes the new buffer record and wallclock time that begin a buffer for the
57 static void writeNewBufferPreamble(pid_t Tid, timespec TS);
59 /// Writes a Function Record to the buffer associated with the current thread.
60 static void writeFunctionRecord(int FuncId, uint32_t TSCDelta,
61 XRayEntryType EntryType);
63 /// Sets up a new buffer in thread_local storage and writes a preamble. The
64 /// wall_clock_reader function is used to populate the WallTimeRecord entry.
65 static void setupNewBuffer(int (*wall_clock_reader)(clockid_t,
68 /// TSC Wrap records are written when a TSC delta encoding scheme overflows.
69 static void writeTSCWrapMetadata(uint64_t TSC);
71 // Group together thread-local-data in a struct, then hide it behind a function
72 // call so that it can be initialized on first use instead of as a global. We
73 // force the alignment to 64-bytes for x86 cache line alignment, as this
74 // structure is used in the hot path of implementation.
75 struct alignas(64) ThreadLocalData {
76 BufferQueue::Buffer Buffer;
77 char *RecordPtr = nullptr;
78 // The number of FunctionEntry records immediately preceding RecordPtr.
79 uint8_t NumConsecutiveFnEnters = 0;
81 // The number of adjacent, consecutive pairs of FunctionEntry, Tail Exit
82 // records preceding RecordPtr.
83 uint8_t NumTailCalls = 0;
85 // We use a thread_local variable to keep track of which CPUs we've already
86 // run, and the TSC times for these CPUs. This allows us to stop repeating the
87 // CPU field in the function records.
89 // We assume that we'll support only 65536 CPUs for x86_64.
90 uint16_t CurrentCPU = std::numeric_limits<uint16_t>::max();
92 uint64_t LastFunctionEntryTSC = 0;
94 // Make sure a thread that's ever called handleArg0 has a thread-local
95 // live reference to the buffer queue for this particular instance of
96 // FDRLogging, and that we're going to clean it up when the thread exits.
97 BufferQueue *BQ = nullptr;
100 static_assert(std::is_trivially_destructible<ThreadLocalData>::value,
101 "ThreadLocalData must be trivially destructible");
103 static constexpr auto MetadataRecSize = sizeof(MetadataRecord);
104 static constexpr auto FunctionRecSize = sizeof(FunctionRecord);
106 // Use a global pthread key to identify thread-local data for logging.
107 static pthread_key_t Key;
109 // This function will initialize the thread-local data structure used by the FDR
110 // logging implementation and return a reference to it. The implementation
111 // details require a bit of care to maintain.
113 // First, some requirements on the implementation in general:
115 // - XRay handlers should not call any memory allocation routines that may
116 // delegate to an instrumented implementation. This means functions like
117 // malloc() and free() should not be called while instrumenting.
119 // - We would like to use some thread-local data initialized on first-use of
120 // the XRay instrumentation. These allow us to implement unsynchronized
121 // routines that access resources associated with the thread.
123 // The implementation here uses a few mechanisms that allow us to provide both
124 // the requirements listed above. We do this by:
126 // 1. Using a thread-local aligned storage buffer for representing the
127 // ThreadLocalData struct. This data will be uninitialized memory by
130 // 2. Not requiring a thread exit handler/implementation, keeping the
131 // thread-local as purely a collection of references/data that do not
134 // We're doing this to avoid using a `thread_local` object that has a
135 // non-trivial destructor, because the C++ runtime might call std::malloc(...)
136 // to register calls to destructors. Deadlocks may arise when, for example, an
137 // externally provided malloc implementation is XRay instrumented, and
138 // initializing the thread-locals involves calling into malloc. A malloc
139 // implementation that does global synchronization might be holding a lock for a
140 // critical section, calling a function that might be XRay instrumented (and
141 // thus in turn calling into malloc by virtue of registration of the
142 // thread_local's destructor).
143 static ThreadLocalData &getThreadLocalData() {
144 static_assert(alignof(ThreadLocalData) >= 64,
145 "ThreadLocalData must be cache line aligned.");
146 thread_local ThreadLocalData TLD;
147 thread_local bool UNUSED ThreadOnce = [] {
148 pthread_setspecific(Key, &TLD);
154 //-----------------------------------------------------------------------------|
155 // The rest of the file is implementation. |
156 //-----------------------------------------------------------------------------|
157 // Functions are implemented in the header for inlining since we don't want |
158 // to grow the stack when we've hijacked the binary for logging. |
159 //-----------------------------------------------------------------------------|
163 class RecursionGuard {
164 volatile bool &Running;
168 explicit RecursionGuard(volatile bool &R) : Running(R), Valid(!R) {
173 RecursionGuard(const RecursionGuard &) = delete;
174 RecursionGuard(RecursionGuard &&) = delete;
175 RecursionGuard &operator=(const RecursionGuard &) = delete;
176 RecursionGuard &operator=(RecursionGuard &&) = delete;
178 explicit operator bool() const { return Valid; }
180 ~RecursionGuard() noexcept {
188 static void writeNewBufferPreamble(pid_t Tid,
189 timespec TS) XRAY_NEVER_INSTRUMENT {
190 static constexpr int InitRecordsCount = 2;
191 auto &TLD = getThreadLocalData();
192 MetadataRecord Metadata[InitRecordsCount];
194 // Write out a MetadataRecord to signify that this is the start of a new
195 // buffer, associated with a particular thread, with a new CPU. For the
196 // data, we have 15 bytes to squeeze as much information as we can. At this
197 // point we only write down the following bytes:
198 // - Thread ID (pid_t, 4 bytes)
199 auto &NewBuffer = Metadata[0];
200 NewBuffer.Type = uint8_t(RecordType::Metadata);
201 NewBuffer.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewBuffer);
202 std::memcpy(&NewBuffer.Data, &Tid, sizeof(pid_t));
205 // Also write the WalltimeMarker record.
207 static_assert(sizeof(time_t) <= 8, "time_t needs to be at most 8 bytes");
208 auto &WalltimeMarker = Metadata[1];
209 WalltimeMarker.Type = uint8_t(RecordType::Metadata);
210 WalltimeMarker.RecordKind =
211 uint8_t(MetadataRecord::RecordKinds::WalltimeMarker);
213 // We only really need microsecond precision here, and enforce across
214 // platforms that we need 64-bit seconds and 32-bit microseconds encoded in
215 // the Metadata record.
216 int32_t Micros = TS.tv_nsec / 1000;
217 int64_t Seconds = TS.tv_sec;
218 std::memcpy(WalltimeMarker.Data, &Seconds, sizeof(Seconds));
219 std::memcpy(WalltimeMarker.Data + sizeof(Seconds), &Micros, sizeof(Micros));
222 TLD.NumConsecutiveFnEnters = 0;
223 TLD.NumTailCalls = 0;
224 if (TLD.BQ == nullptr || TLD.BQ->finalizing())
226 std::memcpy(TLD.RecordPtr, Metadata, sizeof(Metadata));
227 TLD.RecordPtr += sizeof(Metadata);
228 // Since we write out the extents as the first metadata record of the
229 // buffer, we need to write out the extents including the extents record.
230 __sanitizer::atomic_store(&TLD.Buffer.Extents->Size, sizeof(Metadata),
231 __sanitizer::memory_order_release);
234 inline void setupNewBuffer(int (*wall_clock_reader)(
235 clockid_t, struct timespec *)) XRAY_NEVER_INSTRUMENT {
236 auto &TLD = getThreadLocalData();
237 auto &B = TLD.Buffer;
238 TLD.RecordPtr = static_cast<char *>(B.Buffer);
239 pid_t Tid = syscall(SYS_gettid);
241 // This is typically clock_gettime, but callers have injection ability.
242 wall_clock_reader(CLOCK_MONOTONIC, &TS);
243 writeNewBufferPreamble(Tid, TS);
244 TLD.NumConsecutiveFnEnters = 0;
245 TLD.NumTailCalls = 0;
248 static void incrementExtents(size_t Add) {
249 auto &TLD = getThreadLocalData();
250 __sanitizer::atomic_fetch_add(&TLD.Buffer.Extents->Size, Add,
251 __sanitizer::memory_order_acq_rel);
254 static void decrementExtents(size_t Subtract) {
255 auto &TLD = getThreadLocalData();
256 __sanitizer::atomic_fetch_sub(&TLD.Buffer.Extents->Size, Subtract,
257 __sanitizer::memory_order_acq_rel);
260 inline void writeNewCPUIdMetadata(uint16_t CPU,
261 uint64_t TSC) XRAY_NEVER_INSTRUMENT {
262 auto &TLD = getThreadLocalData();
263 MetadataRecord NewCPUId;
264 NewCPUId.Type = uint8_t(RecordType::Metadata);
265 NewCPUId.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewCPUId);
267 // The data for the New CPU will contain the following bytes:
268 // - CPU ID (uint16_t, 2 bytes)
269 // - Full TSC (uint64_t, 8 bytes)
271 std::memcpy(&NewCPUId.Data, &CPU, sizeof(CPU));
272 std::memcpy(&NewCPUId.Data[sizeof(CPU)], &TSC, sizeof(TSC));
273 std::memcpy(TLD.RecordPtr, &NewCPUId, sizeof(MetadataRecord));
274 TLD.RecordPtr += sizeof(MetadataRecord);
275 TLD.NumConsecutiveFnEnters = 0;
276 TLD.NumTailCalls = 0;
277 incrementExtents(sizeof(MetadataRecord));
280 inline void writeTSCWrapMetadata(uint64_t TSC) XRAY_NEVER_INSTRUMENT {
281 auto &TLD = getThreadLocalData();
282 MetadataRecord TSCWrap;
283 TSCWrap.Type = uint8_t(RecordType::Metadata);
284 TSCWrap.RecordKind = uint8_t(MetadataRecord::RecordKinds::TSCWrap);
286 // The data for the TSCWrap record contains the following bytes:
287 // - Full TSC (uint64_t, 8 bytes)
289 std::memcpy(&TSCWrap.Data, &TSC, sizeof(TSC));
290 std::memcpy(TLD.RecordPtr, &TSCWrap, sizeof(MetadataRecord));
291 TLD.RecordPtr += sizeof(MetadataRecord);
292 TLD.NumConsecutiveFnEnters = 0;
293 TLD.NumTailCalls = 0;
294 incrementExtents(sizeof(MetadataRecord));
297 // Call Argument metadata records store the arguments to a function in the
298 // order of their appearance; holes are not supported by the buffer format.
299 static inline void writeCallArgumentMetadata(uint64_t A) XRAY_NEVER_INSTRUMENT {
300 auto &TLD = getThreadLocalData();
301 MetadataRecord CallArg;
302 CallArg.Type = uint8_t(RecordType::Metadata);
303 CallArg.RecordKind = uint8_t(MetadataRecord::RecordKinds::CallArgument);
305 std::memcpy(CallArg.Data, &A, sizeof(A));
306 std::memcpy(TLD.RecordPtr, &CallArg, sizeof(MetadataRecord));
307 TLD.RecordPtr += sizeof(MetadataRecord);
308 incrementExtents(sizeof(MetadataRecord));
312 writeFunctionRecord(int FuncId, uint32_t TSCDelta,
313 XRayEntryType EntryType) XRAY_NEVER_INSTRUMENT {
314 FunctionRecord FuncRecord;
315 FuncRecord.Type = uint8_t(RecordType::Function);
316 // Only take 28 bits of the function id.
317 FuncRecord.FuncId = FuncId & ~(0x0F << 28);
318 FuncRecord.TSCDelta = TSCDelta;
320 auto &TLD = getThreadLocalData();
322 case XRayEntryType::ENTRY:
323 ++TLD.NumConsecutiveFnEnters;
324 FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter);
326 case XRayEntryType::LOG_ARGS_ENTRY:
327 // We should not rewind functions with logged args.
328 TLD.NumConsecutiveFnEnters = 0;
329 TLD.NumTailCalls = 0;
330 FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter);
332 case XRayEntryType::EXIT:
333 // If we've decided to log the function exit, we will never erase the log
335 TLD.NumConsecutiveFnEnters = 0;
336 TLD.NumTailCalls = 0;
337 FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionExit);
339 case XRayEntryType::TAIL:
340 // If we just entered the function we're tail exiting from or erased every
341 // invocation since then, this function entry tail pair is a candidate to
342 // be erased when the child function exits.
343 if (TLD.NumConsecutiveFnEnters > 0) {
345 TLD.NumConsecutiveFnEnters = 0;
347 // We will never be able to erase this tail call since we have logged
348 // something in between the function entry and tail exit.
349 TLD.NumTailCalls = 0;
350 TLD.NumConsecutiveFnEnters = 0;
352 FuncRecord.RecordKind =
353 uint8_t(FunctionRecord::RecordKinds::FunctionTailExit);
355 case XRayEntryType::CUSTOM_EVENT: {
356 // This is a bug in patching, so we'll report it once and move on.
357 static bool Once = [&] {
358 Report("Internal error: patched an XRay custom event call as a function; "
368 std::memcpy(TLD.RecordPtr, &FuncRecord, sizeof(FunctionRecord));
369 TLD.RecordPtr += sizeof(FunctionRecord);
370 incrementExtents(sizeof(FunctionRecord));
373 static uint64_t thresholdTicks() {
374 static uint64_t TicksPerSec = probeRequiredCPUFeatures()
376 : __xray::NanosecondsPerSecond;
377 static const uint64_t ThresholdTicks =
378 TicksPerSec * flags()->xray_fdr_log_func_duration_threshold_us / 1000000;
379 return ThresholdTicks;
382 // Re-point the thread local pointer into this thread's Buffer before the recent
383 // "Function Entry" record and any "Tail Call Exit" records after that.
384 static void rewindRecentCall(uint64_t TSC, uint64_t &LastTSC,
385 uint64_t &LastFunctionEntryTSC, int32_t FuncId) {
386 auto &TLD = getThreadLocalData();
387 TLD.RecordPtr -= FunctionRecSize;
388 decrementExtents(FunctionRecSize);
389 FunctionRecord FuncRecord;
390 std::memcpy(&FuncRecord, TLD.RecordPtr, FunctionRecSize);
391 assert(FuncRecord.RecordKind ==
392 uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&
393 "Expected to find function entry recording when rewinding.");
394 assert(FuncRecord.FuncId == (FuncId & ~(0x0F << 28)) &&
395 "Expected matching function id when rewinding Exit");
396 --TLD.NumConsecutiveFnEnters;
397 LastTSC -= FuncRecord.TSCDelta;
399 // We unwound one call. Update the state and return without writing a log.
400 if (TLD.NumConsecutiveFnEnters != 0) {
401 LastFunctionEntryTSC -= FuncRecord.TSCDelta;
405 // Otherwise we've rewound the stack of all function entries, we might be
406 // able to rewind further by erasing tail call functions that are being
407 // exited from via this exit.
408 LastFunctionEntryTSC = 0;
409 auto RewindingTSC = LastTSC;
410 auto RewindingRecordPtr = TLD.RecordPtr - FunctionRecSize;
411 while (TLD.NumTailCalls > 0) {
412 // Rewind the TSC back over the TAIL EXIT record.
413 FunctionRecord ExpectedTailExit;
414 std::memcpy(&ExpectedTailExit, RewindingRecordPtr, FunctionRecSize);
416 assert(ExpectedTailExit.RecordKind ==
417 uint8_t(FunctionRecord::RecordKinds::FunctionTailExit) &&
418 "Expected to find tail exit when rewinding.");
419 RewindingRecordPtr -= FunctionRecSize;
420 RewindingTSC -= ExpectedTailExit.TSCDelta;
421 FunctionRecord ExpectedFunctionEntry;
422 std::memcpy(&ExpectedFunctionEntry, RewindingRecordPtr, FunctionRecSize);
423 assert(ExpectedFunctionEntry.RecordKind ==
424 uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&
425 "Expected to find function entry when rewinding tail call.");
426 assert(ExpectedFunctionEntry.FuncId == ExpectedTailExit.FuncId &&
427 "Expected funcids to match when rewinding tail call.");
429 // This tail call exceeded the threshold duration. It will not be erased.
430 if ((TSC - RewindingTSC) >= thresholdTicks()) {
431 TLD.NumTailCalls = 0;
435 // We can erase a tail exit pair that we're exiting through since
436 // its duration is under threshold.
438 RewindingRecordPtr -= FunctionRecSize;
439 RewindingTSC -= ExpectedFunctionEntry.TSCDelta;
440 TLD.RecordPtr -= 2 * FunctionRecSize;
441 LastTSC = RewindingTSC;
442 decrementExtents(2 * FunctionRecSize);
446 inline bool releaseThreadLocalBuffer(BufferQueue &BQArg) {
447 auto &TLD = getThreadLocalData();
448 auto EC = BQArg.releaseBuffer(TLD.Buffer);
449 if (EC != BufferQueue::ErrorCode::Ok) {
450 Report("Failed to release buffer at %p; error=%s\n", TLD.Buffer.Buffer,
451 BufferQueue::getErrorString(EC));
457 inline bool prepareBuffer(uint64_t TSC, unsigned char CPU,
458 int (*wall_clock_reader)(clockid_t,
460 size_t MaxSize) XRAY_NEVER_INSTRUMENT {
461 auto &TLD = getThreadLocalData();
462 char *BufferStart = static_cast<char *>(TLD.Buffer.Buffer);
463 if ((TLD.RecordPtr + MaxSize) > (BufferStart + TLD.Buffer.Size)) {
464 if (!releaseThreadLocalBuffer(*TLD.BQ))
466 auto EC = TLD.BQ->getBuffer(TLD.Buffer);
467 if (EC != BufferQueue::ErrorCode::Ok) {
468 Report("Failed to acquire a buffer; error=%s\n",
469 BufferQueue::getErrorString(EC));
472 setupNewBuffer(wall_clock_reader);
474 // Always write the CPU metadata as the first record in the buffer.
475 writeNewCPUIdMetadata(CPU, TSC);
481 isLogInitializedAndReady(BufferQueue *LBQ, uint64_t TSC, unsigned char CPU,
482 int (*wall_clock_reader)(clockid_t, struct timespec *))
483 XRAY_NEVER_INSTRUMENT {
484 // Bail out right away if logging is not initialized yet.
485 // We should take the opportunity to release the buffer though.
486 auto Status = __sanitizer::atomic_load(&LoggingStatus,
487 __sanitizer::memory_order_acquire);
488 auto &TLD = getThreadLocalData();
489 if (Status != XRayLogInitStatus::XRAY_LOG_INITIALIZED) {
490 if (TLD.RecordPtr != nullptr &&
491 (Status == XRayLogInitStatus::XRAY_LOG_FINALIZING ||
492 Status == XRayLogInitStatus::XRAY_LOG_FINALIZED)) {
493 if (!releaseThreadLocalBuffer(*LBQ))
495 TLD.RecordPtr = nullptr;
501 if (__sanitizer::atomic_load(&LoggingStatus,
502 __sanitizer::memory_order_acquire) !=
503 XRayLogInitStatus::XRAY_LOG_INITIALIZED ||
505 if (!releaseThreadLocalBuffer(*LBQ))
507 TLD.RecordPtr = nullptr;
510 if (TLD.Buffer.Buffer == nullptr) {
511 auto EC = LBQ->getBuffer(TLD.Buffer);
512 if (EC != BufferQueue::ErrorCode::Ok) {
513 auto LS = __sanitizer::atomic_load(&LoggingStatus,
514 __sanitizer::memory_order_acquire);
515 if (LS != XRayLogInitStatus::XRAY_LOG_FINALIZING &&
516 LS != XRayLogInitStatus::XRAY_LOG_FINALIZED)
517 Report("Failed to acquire a buffer; error=%s\n",
518 BufferQueue::getErrorString(EC));
522 setupNewBuffer(wall_clock_reader);
524 // Always write the CPU metadata as the first record in the buffer.
525 writeNewCPUIdMetadata(CPU, TSC);
528 if (TLD.CurrentCPU == std::numeric_limits<uint16_t>::max()) {
529 // This means this is the first CPU this thread has ever run on. We set
530 // the current CPU and record this as the first TSC we've seen.
531 TLD.CurrentCPU = CPU;
532 writeNewCPUIdMetadata(CPU, TSC);
536 } // namespace __xray_fdr_internal
538 // Compute the TSC difference between the time of measurement and the previous
539 // event. There are a few interesting situations we need to account for:
541 // - The thread has migrated to a different CPU. If this is the case, then
542 // we write down the following records:
544 // 1. A 'NewCPUId' Metadata record.
545 // 2. A FunctionRecord with a 0 for the TSCDelta field.
547 // - The TSC delta is greater than the 32 bits we can store in a
548 // FunctionRecord. In this case we write down the following records:
550 // 1. A 'TSCWrap' Metadata record.
551 // 2. A FunctionRecord with a 0 for the TSCDelta field.
553 // - The TSC delta is representable within the 32 bits we can store in a
554 // FunctionRecord. In this case we write down just a FunctionRecord with
555 // the correct TSC delta.
556 inline uint32_t writeCurrentCPUTSC(ThreadLocalData &TLD, uint64_t TSC,
558 if (CPU != TLD.CurrentCPU) {
559 // We've moved to a new CPU.
560 writeNewCPUIdMetadata(CPU, TSC);
563 // If the delta is greater than the range for a uint32_t, then we write out
564 // the TSC wrap metadata entry with the full TSC, and the TSC for the
565 // function record be 0.
566 uint64_t Delta = TSC - TLD.LastTSC;
567 if (Delta <= std::numeric_limits<uint32_t>::max())
570 writeTSCWrapMetadata(TSC);
574 inline void endBufferIfFull() XRAY_NEVER_INSTRUMENT {
575 auto &TLD = getThreadLocalData();
576 auto BufferStart = static_cast<char *>(TLD.Buffer.Buffer);
577 if ((TLD.RecordPtr + MetadataRecSize) - BufferStart <=
578 ptrdiff_t{MetadataRecSize}) {
579 if (!releaseThreadLocalBuffer(*TLD.BQ))
581 TLD.RecordPtr = nullptr;
585 thread_local volatile bool Running = false;
587 /// Here's where the meat of the processing happens. The writer captures
588 /// function entry, exit and tail exit points with a time and will create
589 /// TSCWrap, NewCPUId and Function records as necessary. The writer might
590 /// walk backward through its buffer and erase trivial functions to avoid
591 /// polluting the log and may use the buffer queue to obtain or release a
593 inline void processFunctionHook(int32_t FuncId, XRayEntryType Entry,
594 uint64_t TSC, unsigned char CPU, uint64_t Arg1,
595 int (*wall_clock_reader)(clockid_t,
597 BufferQueue *BQ) XRAY_NEVER_INSTRUMENT {
598 // Prevent signal handler recursion, so in case we're already in a log writing
599 // mode and the signal handler comes in (and is also instrumented) then we
600 // don't want to be clobbering potentially partial writes already happening in
601 // the thread. We use a simple thread_local latch to only allow one on-going
602 // handleArg0 to happen at any given time.
603 RecursionGuard Guard{Running};
605 assert(Running == true && "RecursionGuard is buggy!");
609 auto &TLD = getThreadLocalData();
611 // In case the reference has been cleaned up before, we make sure we
612 // initialize it to the provided BufferQueue.
613 if (TLD.BQ == nullptr)
616 if (!isLogInitializedAndReady(TLD.BQ, TSC, CPU, wall_clock_reader))
619 // Before we go setting up writing new function entries, we need to be really
620 // careful about the pointer math we're doing. This means we need to ensure
621 // that the record we are about to write is going to fit into the buffer,
622 // without overflowing the buffer.
624 // To do this properly, we use the following assumptions:
626 // - The least number of bytes we will ever write is 8
627 // (sizeof(FunctionRecord)) only if the delta between the previous entry
628 // and this entry is within 32 bits.
629 // - The most number of bytes we will ever write is 8 + 16 + 16 = 40.
630 // This is computed by:
632 // MaxSize = sizeof(FunctionRecord) + 2 * sizeof(MetadataRecord)
634 // These arise in the following cases:
636 // 1. When the delta between the TSC we get and the previous TSC for the
637 // same CPU is outside of the uint32_t range, we end up having to
638 // write a MetadataRecord to indicate a "tsc wrap" before the actual
640 // 2. When we learn that we've moved CPUs, we need to write a
641 // MetadataRecord to indicate a "cpu change", and thus write out the
642 // current TSC for that CPU before writing out the actual
644 // 3. When we learn about a new CPU ID, we need to write down a "new cpu
645 // id" MetadataRecord before writing out the actual FunctionRecord.
646 // 4. The second MetadataRecord is the optional function call argument.
648 // So the math we need to do is to determine whether writing 40 bytes past the
649 // current pointer exceeds the buffer's maximum size. If we don't have enough
650 // space to write 40 bytes in the buffer, we need get a new Buffer, set it up
651 // properly before doing any further writing.
652 size_t MaxSize = FunctionRecSize + 2 * MetadataRecSize;
653 if (!prepareBuffer(TSC, CPU, wall_clock_reader, MaxSize)) {
658 // By this point, we are now ready to write up to 40 bytes (explained above).
659 assert((TLD.RecordPtr + MaxSize) - static_cast<char *>(TLD.Buffer.Buffer) >=
660 static_cast<ptrdiff_t>(MetadataRecSize) &&
661 "Misconfigured BufferQueue provided; Buffer size not large enough.");
663 auto RecordTSCDelta = writeCurrentCPUTSC(TLD, TSC, CPU);
665 TLD.CurrentCPU = CPU;
667 case XRayEntryType::ENTRY:
668 case XRayEntryType::LOG_ARGS_ENTRY:
669 // Update the thread local state for the next invocation.
670 TLD.LastFunctionEntryTSC = TSC;
672 case XRayEntryType::TAIL:
673 case XRayEntryType::EXIT:
674 // Break out and write the exit record if we can't erase any functions.
675 if (TLD.NumConsecutiveFnEnters == 0 ||
676 (TSC - TLD.LastFunctionEntryTSC) >= thresholdTicks())
678 rewindRecentCall(TSC, TLD.LastTSC, TLD.LastFunctionEntryTSC, FuncId);
679 return; // without writing log.
680 case XRayEntryType::CUSTOM_EVENT: {
681 // This is a bug in patching, so we'll report it once and move on.
682 static bool Once = [&] {
683 Report("Internal error: patched an XRay custom event call as a function; "
693 writeFunctionRecord(FuncId, RecordTSCDelta, Entry);
694 if (Entry == XRayEntryType::LOG_ARGS_ENTRY)
695 writeCallArgumentMetadata(Arg1);
697 // If we've exhausted the buffer by this time, we then release the buffer to
698 // make sure that other threads may start using this buffer.
702 } // namespace __xray_fdr_internal
703 } // namespace __xray
705 #endif // XRAY_XRAY_FDR_LOGGING_IMPL_H