2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * This module holds the global variables and functions used to maintain
32 * lock_object structures.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 #include "opt_mprof.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
44 #include <sys/linker_set.h>
47 #include <sys/sysctl.h>
48 #include <sys/lock_profile.h>
54 CTASSERT(LOCK_CLASS_MAX == 15);
56 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
58 &lock_class_mtx_sleep,
65 #include <machine/cpufunc.h>
67 SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
68 SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
69 int lock_prof_enable = 0;
70 SYSCTL_INT(_debug_lock_prof, OID_AUTO, enable, CTLFLAG_RW,
71 &lock_prof_enable, 0, "Enable lock profiling");
74 * lprof_buf is a static pool of profiling records to avoid possible
75 * reentrance of the memory allocation functions.
77 * Note: NUM_LPROF_BUFFERS must be smaller than LPROF_HASH_SIZE.
79 struct lock_prof lprof_buf[LPROF_HASH_SIZE];
80 static int allocated_lprof_buf;
81 struct mtx lprof_locks[LPROF_LOCK_SIZE];
84 /* SWAG: sbuf size = avg stat. line size * number of locks */
85 #define LPROF_SBUF_SIZE 256 * 400
87 static int lock_prof_acquisitions;
88 SYSCTL_INT(_debug_lock_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
89 &lock_prof_acquisitions, 0, "Number of lock acquistions recorded");
90 static int lock_prof_records;
91 SYSCTL_INT(_debug_lock_prof, OID_AUTO, records, CTLFLAG_RD,
92 &lock_prof_records, 0, "Number of profiling records");
93 static int lock_prof_maxrecords = LPROF_HASH_SIZE;
94 SYSCTL_INT(_debug_lock_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
95 &lock_prof_maxrecords, 0, "Maximum number of profiling records");
96 static int lock_prof_rejected;
97 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
98 &lock_prof_rejected, 0, "Number of rejected profiling records");
99 static int lock_prof_hashsize = LPROF_HASH_SIZE;
100 SYSCTL_INT(_debug_lock_prof, OID_AUTO, hashsize, CTLFLAG_RD,
101 &lock_prof_hashsize, 0, "Hash size");
102 static int lock_prof_collisions = 0;
103 SYSCTL_INT(_debug_lock_prof, OID_AUTO, collisions, CTLFLAG_RD,
104 &lock_prof_collisions, 0, "Number of hash collisions");
106 #ifndef USE_CPU_NANOSECONDS
113 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
118 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
122 static int multiplier = 1;
125 if (allocated_lprof_buf == 0)
126 return (SYSCTL_OUT(req, "No locking recorded",
127 sizeof("No locking recorded")));
130 sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
131 sbuf_printf(sb, "\n%6s %12s %12s %11s %5s %5s %12s %12s %s\n",
132 "max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
133 for (i = 0; i < LPROF_HASH_SIZE; ++i) {
134 if (lprof_buf[i].name == NULL)
136 for (p = lprof_buf[i].file;
137 p != NULL && strncmp(p, "../", 3) == 0; p += 3)
139 sbuf_printf(sb, "%6ju %12ju %12ju %11ju %5ju %5ju %12ju %12ju %s:%d (%s:%s)\n",
140 lprof_buf[i].cnt_max / 1000,
141 lprof_buf[i].cnt_tot / 1000,
142 lprof_buf[i].cnt_wait / 1000,
143 lprof_buf[i].cnt_cur,
144 lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
145 lprof_buf[i].cnt_tot / (lprof_buf[i].cnt_cur * 1000),
146 lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
147 lprof_buf[i].cnt_wait / (lprof_buf[i].cnt_cur * 1000),
148 lprof_buf[i].cnt_contest_holding,
149 lprof_buf[i].cnt_contest_locking,
150 p, lprof_buf[i].line,
153 if (sbuf_overflowed(sb)) {
161 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
166 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
170 if (allocated_lprof_buf == 0)
174 error = sysctl_handle_int(oidp, &v, 0, req);
177 if (req->newptr == NULL)
182 bzero(lprof_buf, LPROF_HASH_SIZE*sizeof(*lprof_buf));
183 allocated_lprof_buf = 0;
187 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
188 NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
190 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
191 NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
195 lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
196 const char *type, int flags)
200 /* Check for double-init and zero object. */
201 KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
204 /* Look up lock class to find its index. */
205 for (i = 0; i < LOCK_CLASS_MAX; i++)
206 if (lock_classes[i] == class) {
207 lock->lo_flags = i << LO_CLASSSHIFT;
210 KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
212 /* Initialize the lock object. */
213 lock->lo_name = name;
214 lock->lo_type = type != NULL ? type : name;
215 lock->lo_flags |= flags | LO_INITIALIZED;
216 LOCK_LOG_INIT(lock, 0);
218 lock_profile_object_init(lock, class, name);
222 lock_destroy(struct lock_object *lock)
225 KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
226 lock_profile_object_destroy(lock);
227 WITNESS_DESTROY(lock);
228 LOCK_LOG_DESTROY(lock, 0);
229 lock->lo_flags &= ~LO_INITIALIZED;
233 DB_SHOW_COMMAND(lock, db_show_lock)
235 struct lock_object *lock;
236 struct lock_class *class;
240 lock = (struct lock_object *)addr;
241 if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
242 db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
245 class = LOCK_CLASS(lock);
246 db_printf(" class: %s\n", class->lc_name);
247 db_printf(" name: %s\n", lock->lo_name);
248 if (lock->lo_type && lock->lo_type != lock->lo_name)
249 db_printf(" type: %s\n", lock->lo_type);
250 class->lc_ddb_show(lock);
254 #ifdef LOCK_PROFILING
255 void _lock_profile_obtain_lock_success(struct lock_object *lo, int contested, uint64_t waittime, const char *file, int line)
257 struct lock_profile_object *l = &lo->lo_profile_obj;
259 lo->lo_profile_obj.lpo_contest_holding = 0;
262 lo->lo_profile_obj.lpo_contest_locking++;
264 l->lpo_filename = file;
265 l->lpo_lineno = line;
266 l->lpo_acqtime = nanoseconds();
267 if (waittime && (l->lpo_acqtime > waittime))
268 l->lpo_waittime = l->lpo_acqtime - waittime;
273 void _lock_profile_release_lock(struct lock_object *lo)
275 struct lock_profile_object *l = &lo->lo_profile_obj;
277 if (l->lpo_acqtime) {
278 const char *unknown = "(unknown)";
279 u_int64_t acqtime, now, waittime;
280 struct lock_prof *mpp;
282 const char *p = l->lpo_filename;
286 acqtime = l->lpo_acqtime;
287 waittime = l->lpo_waittime;
290 if (p == NULL || *p == '\0')
292 hash = (l->lpo_namehash * 31 * 31 + (uintptr_t)p * 31 + l->lpo_lineno) & LPROF_HASH_MASK;
293 mpp = &lprof_buf[hash];
294 while (mpp->name != NULL) {
295 if (mpp->line == l->lpo_lineno &&
297 mpp->namehash == l->lpo_namehash)
299 /* If the lprof_hash entry is allocated to someone
300 * else, try the next one
303 hash = (hash + 1) & LPROF_HASH_MASK;
304 mpp = &lprof_buf[hash];
306 if (mpp->name == NULL) {
309 buf = atomic_fetchadd_int(&allocated_lprof_buf, 1);
310 /* Just exit if we cannot get a trace buffer */
311 if (buf >= LPROF_HASH_SIZE) {
312 ++lock_prof_rejected;
316 mpp->line = l->lpo_lineno;
317 mpp->namehash = l->lpo_namehash;
318 mpp->type = l->lpo_type;
319 mpp->name = lo->lo_name;
322 ++lock_prof_collisions;
325 * We might have raced someone else but who cares,
326 * they'll try again next time
332 * Record if the lock has been held longer now than ever
335 if (now - acqtime > mpp->cnt_max)
336 mpp->cnt_max = now - acqtime;
337 mpp->cnt_tot += now - acqtime;
338 mpp->cnt_wait += waittime;
341 * There's a small race, really we should cmpxchg
342 * 0 with the current value, but that would bill
343 * the contention to the wrong lock instance if
344 * it followed this also.
346 mpp->cnt_contest_holding += l->lpo_contest_holding;
347 mpp->cnt_contest_locking += l->lpo_contest_locking;
353 l->lpo_contest_locking = 0;
354 l->lpo_contest_holding = 0;