1 //===-- tsan_fd.cc --------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
15 #include <sanitizer_common/sanitizer_atomic.h>
19 const int kTableSizeL1 = 1024;
20 const int kTableSizeL2 = 1024;
21 const int kTableSize = kTableSizeL1 * kTableSizeL2;
34 atomic_uintptr_t tab[kTableSizeL1];
35 // Addresses used for synchronization.
42 static FdContext fdctx;
44 static bool bogusfd(int fd) {
45 // Apparently a bogus fd value.
46 return fd < 0 || fd >= kTableSize;
49 static FdSync *allocsync(ThreadState *thr, uptr pc) {
50 FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
51 kDefaultAlignment, false);
52 atomic_store(&s->rc, 1, memory_order_relaxed);
56 static FdSync *ref(FdSync *s) {
57 if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
58 atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
62 static void unref(ThreadState *thr, uptr pc, FdSync *s) {
63 if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
64 if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
65 CHECK_NE(s, &fdctx.globsync);
66 CHECK_NE(s, &fdctx.filesync);
67 CHECK_NE(s, &fdctx.socksync);
68 user_free(thr, pc, s, false);
73 static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
75 CHECK_LT(fd, kTableSize);
76 atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
77 uptr l1 = atomic_load(pl1, memory_order_consume);
79 uptr size = kTableSizeL2 * sizeof(FdDesc);
80 // We need this to reside in user memory to properly catch races on it.
81 void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
82 internal_memset(p, 0, size);
83 MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
84 if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
87 user_free(thr, pc, p, false);
89 return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
92 // pd must be already ref'ed.
93 static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
95 FdDesc *d = fddesc(thr, pc, fd);
96 // As a matter of fact, we don't intercept all close calls.
97 // See e.g. libc __res_iclose().
99 unref(thr, pc, d->sync);
102 if (flags()->io_sync == 0) {
104 } else if (flags()->io_sync == 1) {
106 } else if (flags()->io_sync == 2) {
108 d->sync = &fdctx.globsync;
110 d->creation_tid = thr->tid;
111 d->creation_stack = CurrentStackId(thr, pc);
113 // To catch races between fd usage and open.
114 MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
116 // See the dup-related comment in FdClose.
117 MemoryRead(thr, pc, (uptr)d, kSizeLog8);
122 atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
123 atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
124 atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
127 void FdOnFork(ThreadState *thr, uptr pc) {
128 // On fork() we need to reset all fd's, because the child is going
129 // close all them, and that will cause races between previous read/write
131 for (int l1 = 0; l1 < kTableSizeL1; l1++) {
132 FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
135 for (int l2 = 0; l2 < kTableSizeL2; l2++) {
136 FdDesc *d = &tab[l2];
137 MemoryResetRange(thr, pc, (uptr)d, 8);
142 bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
143 for (int l1 = 0; l1 < kTableSizeL1; l1++) {
144 FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
147 if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
148 int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
149 FdDesc *d = &tab[l2];
150 *fd = l1 * kTableSizeL1 + l2;
151 *tid = d->creation_tid;
152 *stack = d->creation_stack;
159 void FdAcquire(ThreadState *thr, uptr pc, int fd) {
162 FdDesc *d = fddesc(thr, pc, fd);
164 DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
165 MemoryRead(thr, pc, (uptr)d, kSizeLog8);
167 Acquire(thr, pc, (uptr)s);
170 void FdRelease(ThreadState *thr, uptr pc, int fd) {
173 FdDesc *d = fddesc(thr, pc, fd);
175 DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
176 MemoryRead(thr, pc, (uptr)d, kSizeLog8);
178 Release(thr, pc, (uptr)s);
181 void FdAccess(ThreadState *thr, uptr pc, int fd) {
182 DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
185 FdDesc *d = fddesc(thr, pc, fd);
186 MemoryRead(thr, pc, (uptr)d, kSizeLog8);
189 void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
190 DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
193 FdDesc *d = fddesc(thr, pc, fd);
195 // To catch races between fd usage and close.
196 MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
198 // This path is used only by dup2/dup3 calls.
199 // We do read instead of write because there is a number of legitimate
200 // cases where write would lead to false positives:
201 // 1. Some software dups a closed pipe in place of a socket before closing
202 // the socket (to prevent races actually).
203 // 2. Some daemons dup /dev/null in place of stdin/stdout.
204 // On the other hand we have not seen cases when write here catches real
206 MemoryRead(thr, pc, (uptr)d, kSizeLog8);
208 // We need to clear it, because if we do not intercept any call out there
209 // that creates fd, we will hit false postives.
210 MemoryResetRange(thr, pc, (uptr)d, 8);
211 unref(thr, pc, d->sync);
214 d->creation_stack = 0;
217 void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
218 DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
221 init(thr, pc, fd, &fdctx.filesync);
224 void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
225 DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
226 if (bogusfd(oldfd) || bogusfd(newfd))
228 // Ignore the case when user dups not yet connected socket.
229 FdDesc *od = fddesc(thr, pc, oldfd);
230 MemoryRead(thr, pc, (uptr)od, kSizeLog8);
231 FdClose(thr, pc, newfd, write);
232 init(thr, pc, newfd, ref(od->sync), write);
235 void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
236 DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
237 FdSync *s = allocsync(thr, pc);
238 init(thr, pc, rfd, ref(s));
239 init(thr, pc, wfd, ref(s));
243 void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
244 DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
247 init(thr, pc, fd, allocsync(thr, pc));
250 void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
251 DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
254 init(thr, pc, fd, 0);
257 void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
258 DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
261 init(thr, pc, fd, 0);
264 void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
265 DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
268 init(thr, pc, fd, allocsync(thr, pc));
271 void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
272 DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
275 // It can be a UDP socket.
276 init(thr, pc, fd, &fdctx.socksync);
279 void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
280 DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
283 // Synchronize connect->accept.
284 Acquire(thr, pc, (uptr)&fdctx.connectsync);
285 init(thr, pc, newfd, &fdctx.socksync);
288 void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
289 DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
292 // Synchronize connect->accept.
293 Release(thr, pc, (uptr)&fdctx.connectsync);
296 void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
297 DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
300 init(thr, pc, fd, &fdctx.socksync);
303 uptr File2addr(const char *path) {
309 uptr Dir2addr(const char *path) {
315 } // namespace __tsan