1 //===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of Sanitizer runtime.
11 // The deadlock detector maintains a directed graph of lock acquisitions.
12 // When a lock event happens, the detector checks if the locks already held by
13 // the current thread are reachable from the newly acquired lock.
15 // The detector can handle only a fixed amount of simultaneously live locks
16 // (a lock is alive if it has been locked at least once and has not been
17 // destroyed). When the maximal number of locks is reached the entire graph
18 // is flushed and the new lock epoch is started. The node ids from the old
19 // epochs can not be used with any of the detector methods except for
20 // nodeBelongsToCurrentEpoch().
22 // FIXME: this is work in progress, nothing really works yet.
24 //===----------------------------------------------------------------------===//
26 #ifndef SANITIZER_DEADLOCK_DETECTOR_H
27 #define SANITIZER_DEADLOCK_DETECTOR_H
29 #include "sanitizer_common.h"
30 #include "sanitizer_bvgraph.h"
32 namespace __sanitizer {
34 // Thread-local state for DeadlockDetector.
35 // It contains the locks currently held by the owning thread.
37 class DeadlockDetectorTLS {
43 n_recursive_locks = 0;
47 bool empty() const { return bv_.empty(); }
49 void ensureCurrentEpoch(uptr current_epoch) {
50 if (epoch_ == current_epoch) return;
52 epoch_ = current_epoch;
53 n_recursive_locks = 0;
57 uptr getEpoch() const { return epoch_; }
59 // Returns true if this is the first (non-recursive) acquisition of this lock.
60 bool addLock(uptr lock_id, uptr current_epoch, u32 stk) {
61 // Printf("addLock: %zx %zx stk %u\n", lock_id, current_epoch, stk);
62 CHECK_EQ(epoch_, current_epoch);
63 if (!bv_.setBit(lock_id)) {
64 // The lock is already held by this thread, it must be recursive.
65 CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks));
66 recursive_locks[n_recursive_locks++] = lock_id;
69 CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_));
70 // lock_id < BV::kSize, can cast to a smaller int.
71 u32 lock_id_short = static_cast<u32>(lock_id);
72 LockWithContext l = {lock_id_short, stk};
73 all_locks_with_contexts_[n_all_locks_++] = l;
77 void removeLock(uptr lock_id) {
78 if (n_recursive_locks) {
79 for (sptr i = n_recursive_locks - 1; i >= 0; i--) {
80 if (recursive_locks[i] == lock_id) {
82 Swap(recursive_locks[i], recursive_locks[n_recursive_locks]);
87 // Printf("remLock: %zx %zx\n", lock_id, epoch_);
88 if (!bv_.clearBit(lock_id))
89 return; // probably addLock happened before flush
91 for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
92 if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
93 Swap(all_locks_with_contexts_[i],
94 all_locks_with_contexts_[n_all_locks_ - 1]);
102 u32 findLockContext(uptr lock_id) {
103 for (uptr i = 0; i < n_all_locks_; i++)
104 if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id))
105 return all_locks_with_contexts_[i].stk;
109 const BV &getLocks(uptr current_epoch) const {
110 CHECK_EQ(epoch_, current_epoch);
114 uptr getNumLocks() const { return n_all_locks_; }
115 uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; }
120 uptr recursive_locks[64];
121 uptr n_recursive_locks;
122 struct LockWithContext {
126 LockWithContext all_locks_with_contexts_[64];
131 // For deadlock detection to work we need one global DeadlockDetector object
132 // and one DeadlockDetectorTLS object per evey thread.
133 // This class is not thread safe, all concurrent accesses should be guarded
134 // by an external lock.
135 // Most of the methods of this class are not thread-safe (i.e. should
136 // be protected by an external lock) unless explicitly told otherwise.
138 class DeadlockDetector {
140 typedef BV BitVector;
142 uptr size() const { return g_.size(); }
147 available_nodes_.clear();
148 recycled_nodes_.clear();
153 // Allocate new deadlock detector node.
154 // If we are out of available nodes first try to recycle some.
155 // If there is nothing to recycle, flush the graph and increment the epoch.
156 // Associate 'data' (opaque user's object) with the new node.
157 uptr newNode(uptr data) {
158 if (!available_nodes_.empty())
159 return getAvailableNode(data);
160 if (!recycled_nodes_.empty()) {
161 // Printf("recycling: n_edges_ %zd\n", n_edges_);
162 for (sptr i = n_edges_ - 1; i >= 0; i--) {
163 if (recycled_nodes_.getBit(edges_[i].from) ||
164 recycled_nodes_.getBit(edges_[i].to)) {
165 Swap(edges_[i], edges_[n_edges_ - 1]);
169 CHECK(available_nodes_.empty());
170 // removeEdgesFrom was called in removeNode.
171 g_.removeEdgesTo(recycled_nodes_);
172 available_nodes_.setUnion(recycled_nodes_);
173 recycled_nodes_.clear();
174 return getAvailableNode(data);
176 // We are out of vacant nodes. Flush and increment the current_epoch_.
177 current_epoch_ += size();
178 recycled_nodes_.clear();
179 available_nodes_.setAll();
182 return getAvailableNode(data);
185 // Get data associated with the node created by newNode().
186 uptr getData(uptr node) const { return data_[nodeToIndex(node)]; }
188 bool nodeBelongsToCurrentEpoch(uptr node) {
189 return node && (node / size() * size()) == current_epoch_;
192 void removeNode(uptr node) {
193 uptr idx = nodeToIndex(node);
194 CHECK(!available_nodes_.getBit(idx));
195 CHECK(recycled_nodes_.setBit(idx));
196 g_.removeEdgesFrom(idx);
199 void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) {
200 dtls->ensureCurrentEpoch(current_epoch_);
203 // Returns true if there is a cycle in the graph after this lock event.
204 // Ideally should be called before the lock is acquired so that we can
205 // report a deadlock before a real deadlock happens.
206 bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
207 ensureCurrentEpoch(dtls);
208 uptr cur_idx = nodeToIndex(cur_node);
209 return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
212 u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) {
213 return dtls->findLockContext(nodeToIndex(node));
216 // Add cur_node to the set of locks held currently by dtls.
217 void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
218 ensureCurrentEpoch(dtls);
219 uptr cur_idx = nodeToIndex(cur_node);
220 dtls->addLock(cur_idx, current_epoch_, stk);
223 // Experimental *racy* fast path function.
224 // Returns true if all edges from the currently held locks to cur_node exist.
225 bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
226 uptr local_epoch = dtls->getEpoch();
227 // Read from current_epoch_ is racy.
228 if (cur_node && local_epoch == current_epoch_ &&
229 local_epoch == nodeToEpoch(cur_node)) {
230 uptr cur_idx = nodeToIndexUnchecked(cur_node);
231 for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) {
232 if (!g_.hasEdge(dtls->getLock(i), cur_idx))
240 // Adds edges from currently held locks to cur_node,
241 // returns the number of added edges, and puts the sources of added edges
242 // into added_edges[].
243 // Should be called before onLockAfter.
244 uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk,
246 ensureCurrentEpoch(dtls);
247 uptr cur_idx = nodeToIndex(cur_node);
248 uptr added_edges[40];
249 uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx,
250 added_edges, ARRAY_SIZE(added_edges));
251 for (uptr i = 0; i < n_added_edges; i++) {
252 if (n_edges_ < ARRAY_SIZE(edges_)) {
253 Edge e = {(u16)added_edges[i], (u16)cur_idx,
254 dtls->findLockContext(added_edges[i]), stk,
256 edges_[n_edges_++] = e;
258 // Printf("Edge%zd: %u %zd=>%zd in T%d\n",
259 // n_edges_, stk, added_edges[i], cur_idx, unique_tid);
261 return n_added_edges;
264 bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to,
266 uptr from_idx = nodeToIndex(from_node);
267 uptr to_idx = nodeToIndex(to_node);
268 for (uptr i = 0; i < n_edges_; i++) {
269 if (edges_[i].from == from_idx && edges_[i].to == to_idx) {
270 *stk_from = edges_[i].stk_from;
271 *stk_to = edges_[i].stk_to;
272 *unique_tid = edges_[i].unique_tid;
279 // Test-only function. Handles the before/after lock events,
280 // returns true if there is a cycle.
281 bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
282 ensureCurrentEpoch(dtls);
283 bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node);
284 addEdges(dtls, cur_node, stk, 0);
285 onLockAfter(dtls, cur_node, stk);
289 // Handles the try_lock event, returns false.
290 // When a try_lock event happens (i.e. a try_lock call succeeds) we need
291 // to add this lock to the currently held locks, but we should not try to
292 // change the lock graph or to detect a cycle. We may want to investigate
293 // whether a more aggressive strategy is possible for try_lock.
294 bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
295 ensureCurrentEpoch(dtls);
296 uptr cur_idx = nodeToIndex(cur_node);
297 dtls->addLock(cur_idx, current_epoch_, stk);
301 // Returns true iff dtls is empty (no locks are currently held) and we can
302 // add the node to the currently held locks w/o chanding the global state.
303 // This operation is thread-safe as it only touches the dtls.
304 bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
305 if (!dtls->empty()) return false;
306 if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) {
307 dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
313 // Finds a path between the lock 'cur_node' (currently not held in dtls)
314 // and some currently held lock, returns the length of the path
316 uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path,
318 tmp_bv_.copyFrom(dtls->getLocks(current_epoch_));
319 uptr idx = nodeToIndex(cur_node);
320 CHECK(!tmp_bv_.getBit(idx));
321 uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size);
322 for (uptr i = 0; i < res; i++)
323 path[i] = indexToNode(path[i]);
325 CHECK_EQ(path[0], cur_node);
329 // Handle the unlock event.
330 // This operation is thread-safe as it only touches the dtls.
331 void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) {
332 if (dtls->getEpoch() == nodeToEpoch(node))
333 dtls->removeLock(nodeToIndexUnchecked(node));
336 // Tries to handle the lock event w/o writing to global state.
337 // Returns true on success.
338 // This operation is thread-safe as it only touches the dtls
339 // (modulo racy nature of hasAllEdges).
340 bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
341 if (hasAllEdges(dtls, node)) {
342 dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
348 bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const {
349 return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node));
352 uptr testOnlyGetEpoch() const { return current_epoch_; }
353 bool testOnlyHasEdge(uptr l1, uptr l2) {
354 return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2));
356 // idx1 and idx2 are raw indices to g_, not lock IDs.
357 bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) {
358 return g_.hasEdge(idx1, idx2);
362 for (uptr from = 0; from < size(); from++)
363 for (uptr to = 0; to < size(); to++)
364 if (g_.hasEdge(from, to))
365 Printf(" %zx => %zx\n", from, to);
369 void check_idx(uptr idx) const { CHECK_LT(idx, size()); }
371 void check_node(uptr node) const {
372 CHECK_GE(node, size());
373 CHECK_EQ(current_epoch_, nodeToEpoch(node));
376 uptr indexToNode(uptr idx) const {
378 return idx + current_epoch_;
381 uptr nodeToIndexUnchecked(uptr node) const { return node % size(); }
383 uptr nodeToIndex(uptr node) const {
385 return nodeToIndexUnchecked(node);
388 uptr nodeToEpoch(uptr node) const { return node / size() * size(); }
390 uptr getAvailableNode(uptr data) {
391 uptr idx = available_nodes_.getAndClearFirstOne();
393 return indexToNode(idx);
409 uptr data_[BV::kSize];
410 Edge edges_[BV::kSize * 32];
414 } // namespace __sanitizer
416 #endif // SANITIZER_DEADLOCK_DETECTOR_H