1 //===-- xray_buffer_queue.cc -----------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of XRay, a dynamic runtime instruementation system.
12 // Defines the interface for a buffer queue implementation.
14 //===----------------------------------------------------------------------===//
15 #include "xray_buffer_queue.h"
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_libc.h"
19 #if !SANITIZER_FUCHSIA
20 #include "sanitizer_common/sanitizer_posix.h"
22 #include "xray_allocator.h"
23 #include "xray_defs.h"
27 using namespace __xray;
31 BufferQueue::ControlBlock *allocControlBlock(size_t Size, size_t Count) {
33 allocateBuffer((sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
34 return B == nullptr ? nullptr
35 : reinterpret_cast<BufferQueue::ControlBlock *>(B);
38 void deallocControlBlock(BufferQueue::ControlBlock *C, size_t Size,
40 deallocateBuffer(reinterpret_cast<unsigned char *>(C),
41 (sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
44 void decRefCount(BufferQueue::ControlBlock *C, size_t Size, size_t Count) {
47 if (atomic_fetch_sub(&C->RefCount, 1, memory_order_acq_rel) == 1)
48 deallocControlBlock(C, Size, Count);
51 void incRefCount(BufferQueue::ControlBlock *C) {
54 atomic_fetch_add(&C->RefCount, 1, memory_order_acq_rel);
57 // We use a struct to ensure that we are allocating one atomic_uint64_t per
58 // cache line. This allows us to not worry about false-sharing among atomic
59 // objects being updated (constantly) by different threads.
60 struct ExtentsPadded {
62 atomic_uint64_t Extents;
63 unsigned char Storage[kCacheLineSize];
67 constexpr size_t kExtentsSize = sizeof(ExtentsPadded);
71 BufferQueue::ErrorCode BufferQueue::init(size_t BS, size_t BC) {
72 SpinMutexLock Guard(&Mutex);
75 return BufferQueue::ErrorCode::AlreadyInitialized;
83 BackingStore = allocControlBlock(BufferSize, BufferCount);
84 if (BackingStore == nullptr)
85 return BufferQueue::ErrorCode::NotEnoughMemory;
87 auto CleanupBackingStore = at_scope_exit([&, this] {
90 deallocControlBlock(BackingStore, BufferSize, BufferCount);
91 BackingStore = nullptr;
94 // Initialize enough atomic_uint64_t instances, each
95 ExtentsBackingStore = allocControlBlock(kExtentsSize, BufferCount);
96 if (ExtentsBackingStore == nullptr)
97 return BufferQueue::ErrorCode::NotEnoughMemory;
99 auto CleanupExtentsBackingStore = at_scope_exit([&, this] {
102 deallocControlBlock(ExtentsBackingStore, kExtentsSize, BufferCount);
103 ExtentsBackingStore = nullptr;
106 Buffers = initArray<BufferRep>(BufferCount);
107 if (Buffers == nullptr)
108 return BufferQueue::ErrorCode::NotEnoughMemory;
110 // At this point we increment the generation number to associate the buffers
111 // to the new generation.
112 atomic_fetch_add(&Generation, 1, memory_order_acq_rel);
114 // First, we initialize the refcount in the ControlBlock, which we treat as
115 // being at the start of the BackingStore pointer.
116 atomic_store(&BackingStore->RefCount, 1, memory_order_release);
117 atomic_store(&ExtentsBackingStore->RefCount, 1, memory_order_release);
119 // Then we initialise the individual buffers that sub-divide the whole backing
120 // store. Each buffer will start at the `Data` member of the ControlBlock, and
121 // will be offsets from these locations.
122 for (size_t i = 0; i < BufferCount; ++i) {
123 auto &T = Buffers[i];
125 auto *E = reinterpret_cast<ExtentsPadded *>(&ExtentsBackingStore->Data +
127 Buf.Extents = &E->Extents;
128 atomic_store(Buf.Extents, 0, memory_order_release);
129 Buf.Generation = generation();
130 Buf.Data = &BackingStore->Data + (BufferSize * i);
131 Buf.Size = BufferSize;
132 Buf.BackingStore = BackingStore;
133 Buf.ExtentsBackingStore = ExtentsBackingStore;
134 Buf.Count = BufferCount;
141 atomic_store(&Finalizing, 0, memory_order_release);
143 return BufferQueue::ErrorCode::Ok;
146 BufferQueue::BufferQueue(size_t B, size_t N,
147 bool &Success) XRAY_NEVER_INSTRUMENT
152 BackingStore(nullptr),
153 ExtentsBackingStore(nullptr),
159 Success = init(B, N) == BufferQueue::ErrorCode::Ok;
162 BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) {
163 if (atomic_load(&Finalizing, memory_order_acquire))
164 return ErrorCode::QueueFinalizing;
166 BufferRep *B = nullptr;
168 SpinMutexLock Guard(&Mutex);
169 if (LiveBuffers == BufferCount)
170 return ErrorCode::NotEnoughMemory;
172 if (Next == (Buffers + BufferCount))
177 incRefCount(BackingStore);
178 incRefCount(ExtentsBackingStore);
180 Buf.Generation = generation();
182 return ErrorCode::Ok;
185 BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) {
186 // Check whether the buffer being referred to is within the bounds of the
187 // backing store's range.
188 BufferRep *B = nullptr;
190 SpinMutexLock Guard(&Mutex);
191 if (Buf.Generation != generation() || LiveBuffers == 0) {
193 decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
194 decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);
195 return BufferQueue::ErrorCode::Ok;
198 if (Buf.Data < &BackingStore->Data ||
199 Buf.Data > &BackingStore->Data + (BufferCount * BufferSize))
200 return BufferQueue::ErrorCode::UnrecognizedBuffer;
204 if (First == (Buffers + BufferCount))
208 // Now that the buffer has been released, we mark it as "used".
211 decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
212 decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);
213 atomic_store(B->Buff.Extents, atomic_load(Buf.Extents, memory_order_acquire),
214 memory_order_release);
216 return ErrorCode::Ok;
219 BufferQueue::ErrorCode BufferQueue::finalize() {
220 if (atomic_exchange(&Finalizing, 1, memory_order_acq_rel))
221 return ErrorCode::QueueFinalizing;
222 return ErrorCode::Ok;
225 void BufferQueue::cleanupBuffers() {
226 for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B)
228 deallocateBuffer(Buffers, BufferCount);
229 decRefCount(BackingStore, BufferSize, BufferCount);
230 decRefCount(ExtentsBackingStore, kExtentsSize, BufferCount);
231 BackingStore = nullptr;
232 ExtentsBackingStore = nullptr;
238 BufferQueue::~BufferQueue() { cleanupBuffers(); }