1 //===-- sanitizer_stackdepot.cc -------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_stackdepot.h"
15 #include "sanitizer_common.h"
16 #include "sanitizer_hash.h"
17 #include "sanitizer_stackdepotbase.h"
19 namespace __sanitizer {
21 struct StackDepotNode {
24 atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
27 uptr stack[1]; // [size]
29 static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
30 // Lower kTabSizeLog bits are equal for all items in one bucket.
31 // We use these bits to store the per-stack use counter.
32 static const u32 kUseCountBits = kTabSizeLog;
33 static const u32 kMaxUseCount = 1 << kUseCountBits;
34 static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
35 static const u32 kHashMask = ~kUseCountMask;
37 typedef StackTrace args_type;
38 bool eq(u32 hash, const args_type &args) const {
40 atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
41 if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
44 for (; i < size; i++) {
45 if (stack[i] != args.trace[i]) return false;
49 static uptr storage_size(const args_type &args) {
50 return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
52 static u32 hash(const args_type &args) {
53 MurMur2HashBuilder H(args.size * sizeof(uptr));
54 for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
57 static bool is_valid(const args_type &args) {
58 return args.size > 0 && args.trace;
60 void store(const args_type &args, u32 hash) {
61 atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
64 internal_memcpy(stack, args.trace, size * sizeof(uptr));
66 args_type load() const {
67 return args_type(&stack[0], size, tag);
69 StackDepotHandle get_handle() { return StackDepotHandle(this); }
71 typedef StackDepotHandle handle_type;
74 COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
76 u32 StackDepotHandle::id() { return node_->id; }
77 int StackDepotHandle::use_count() {
78 return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
79 StackDepotNode::kUseCountMask;
81 void StackDepotHandle::inc_use_count_unsafe() {
83 atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
84 StackDepotNode::kUseCountMask;
85 CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
88 // FIXME(dvyukov): this single reserved bit is used in TSan.
89 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
91 static StackDepot theDepot;
93 StackDepotStats *StackDepotGetStats() {
94 return theDepot.GetStats();
97 u32 StackDepotPut(StackTrace stack) {
98 StackDepotHandle h = theDepot.Put(stack);
99 return h.valid() ? h.id() : 0;
102 StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
103 return theDepot.Put(stack);
106 StackTrace StackDepotGet(u32 id) {
107 return theDepot.Get(id);
110 void StackDepotLockAll() {
114 void StackDepotUnlockAll() {
115 theDepot.UnlockAll();
118 bool StackDepotReverseMap::IdDescPair::IdComparator(
119 const StackDepotReverseMap::IdDescPair &a,
120 const StackDepotReverseMap::IdDescPair &b) {
124 StackDepotReverseMap::StackDepotReverseMap() {
125 map_.reserve(StackDepotGetStats()->n_uniq_ids + 100);
126 for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
127 atomic_uintptr_t *p = &theDepot.tab[idx];
128 uptr v = atomic_load(p, memory_order_consume);
129 StackDepotNode *s = (StackDepotNode*)(v & ~1);
130 for (; s; s = s->link) {
131 IdDescPair pair = {s->id, s};
132 map_.push_back(pair);
135 Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
138 StackTrace StackDepotReverseMap::Get(u32 id) {
141 IdDescPair pair = {id, nullptr};
143 InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
144 if (idx > map_.size() || map_[idx].id != id)
146 return map_[idx].desc->load();
149 } // namespace __sanitizer