1 //===-- sanitizer_stackdepot.cc -------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_stackdepot.h"
16 #include "sanitizer_common.h"
17 #include "sanitizer_stackdepotbase.h"
19 namespace __sanitizer {
21 struct StackDepotNode {
24 atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
27 uptr stack[1]; // [size]
29 static const u32 kTabSizeLog = 20;
30 // Lower kTabSizeLog bits are equal for all items in one bucket.
31 // We use these bits to store the per-stack use counter.
32 static const u32 kUseCountBits = kTabSizeLog;
33 static const u32 kMaxUseCount = 1 << kUseCountBits;
34 static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
35 static const u32 kHashMask = ~kUseCountMask;
37 typedef StackTrace args_type;
38 bool eq(u32 hash, const args_type &args) const {
40 atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
41 if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
44 for (; i < size; i++) {
45 if (stack[i] != args.trace[i]) return false;
49 static uptr storage_size(const args_type &args) {
50 return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
52 static u32 hash(const args_type &args) {
54 const u32 m = 0x5bd1e995;
55 const u32 seed = 0x9747b28c;
57 u32 h = seed ^ (args.size * sizeof(uptr));
58 for (uptr i = 0; i < args.size; i++) {
59 u32 k = args.trace[i];
71 static bool is_valid(const args_type &args) {
72 return args.size > 0 && args.trace;
74 void store(const args_type &args, u32 hash) {
75 atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
78 internal_memcpy(stack, args.trace, size * sizeof(uptr));
80 args_type load() const {
81 return args_type(&stack[0], size, tag);
83 StackDepotHandle get_handle() { return StackDepotHandle(this); }
85 typedef StackDepotHandle handle_type;
88 COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
90 u32 StackDepotHandle::id() { return node_->id; }
91 int StackDepotHandle::use_count() {
92 return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
93 StackDepotNode::kUseCountMask;
95 void StackDepotHandle::inc_use_count_unsafe() {
97 atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
98 StackDepotNode::kUseCountMask;
99 CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
102 // FIXME(dvyukov): this single reserved bit is used in TSan.
103 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
105 static StackDepot theDepot;
107 StackDepotStats *StackDepotGetStats() {
108 return theDepot.GetStats();
111 u32 StackDepotPut(StackTrace stack) {
112 StackDepotHandle h = theDepot.Put(stack);
113 return h.valid() ? h.id() : 0;
116 StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
117 return theDepot.Put(stack);
120 StackTrace StackDepotGet(u32 id) {
121 return theDepot.Get(id);
124 void StackDepotLockAll() {
128 void StackDepotUnlockAll() {
129 theDepot.UnlockAll();
132 bool StackDepotReverseMap::IdDescPair::IdComparator(
133 const StackDepotReverseMap::IdDescPair &a,
134 const StackDepotReverseMap::IdDescPair &b) {
138 StackDepotReverseMap::StackDepotReverseMap()
139 : map_(StackDepotGetStats()->n_uniq_ids + 100) {
140 for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
141 atomic_uintptr_t *p = &theDepot.tab[idx];
142 uptr v = atomic_load(p, memory_order_consume);
143 StackDepotNode *s = (StackDepotNode*)(v & ~1);
144 for (; s; s = s->link) {
145 IdDescPair pair = {s->id, s};
146 map_.push_back(pair);
149 InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
152 StackTrace StackDepotReverseMap::Get(u32 id) {
155 IdDescPair pair = {id, nullptr};
157 InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
158 if (idx > map_.size() || map_[idx].id != id)
160 return map_[idx].desc->load();
163 } // namespace __sanitizer