2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * This module holds the global variables and functions used to maintain
31 * lock_object structures.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
38 #include "opt_mprof.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
45 #include <sys/lock_profile.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
51 #include <sys/sched.h>
53 #include <sys/sysctl.h>
59 #include <machine/cpufunc.h>
61 SDT_PROVIDER_DEFINE(lock);
62 SDT_PROBE_DEFINE1(lock, , , starvation, "u_int");
64 CTASSERT(LOCK_CLASS_MAX == 15);
66 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
68 &lock_class_mtx_sleep,
71 &lock_class_rm_sleepable,
77 lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
78 const char *type, int flags)
82 /* Check for double-init and zero object. */
83 KASSERT(flags & LO_NEW || !lock_initialized(lock),
84 ("lock \"%s\" %p already initialized", name, lock));
86 /* Look up lock class to find its index. */
87 for (i = 0; i < LOCK_CLASS_MAX; i++)
88 if (lock_classes[i] == class) {
89 lock->lo_flags = i << LO_CLASSSHIFT;
92 KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
94 /* Initialize the lock object. */
96 lock->lo_flags |= flags | LO_INITIALIZED;
97 LOCK_LOG_INIT(lock, 0);
98 WITNESS_INIT(lock, (type != NULL) ? type : name);
102 lock_destroy(struct lock_object *lock)
105 KASSERT(lock_initialized(lock), ("lock %p is not initialized", lock));
106 WITNESS_DESTROY(lock);
107 LOCK_LOG_DESTROY(lock, 0);
108 lock->lo_flags &= ~LO_INITIALIZED;
111 static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
112 static SYSCTL_NODE(_debug_lock, OID_AUTO, delay, CTLFLAG_RD, NULL,
115 static u_int __read_mostly starvation_limit = 131072;
116 SYSCTL_INT(_debug_lock_delay, OID_AUTO, starvation_limit, CTLFLAG_RW,
117 &starvation_limit, 0, "");
119 static u_int __read_mostly restrict_starvation = 0;
120 SYSCTL_INT(_debug_lock_delay, OID_AUTO, restrict_starvation, CTLFLAG_RW,
121 &restrict_starvation, 0, "");
124 lock_delay(struct lock_delay_arg *la)
126 struct lock_delay_config *lc = la->config;
130 if (__predict_false(la->delay > lc->max))
133 for (i = la->delay; i > 0; i--)
136 la->spin_cnt += la->delay;
137 if (__predict_false(la->spin_cnt > starvation_limit)) {
138 SDT_PROBE1(lock, , , starvation, la->delay);
139 if (restrict_starvation)
140 la->delay = lc->base;
145 lock_roundup_2(u_int val)
149 for (res = 1; res <= val; res <<= 1)
156 lock_delay_default_init(struct lock_delay_config *lc)
159 lc->base = lock_roundup_2(mp_ncpus) / 4;
160 lc->max = lc->base * 1024;
164 DB_SHOW_COMMAND(lock, db_show_lock)
166 struct lock_object *lock;
167 struct lock_class *class;
171 lock = (struct lock_object *)addr;
172 if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
173 db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
176 class = LOCK_CLASS(lock);
177 db_printf(" class: %s\n", class->lc_name);
178 db_printf(" name: %s\n", lock->lo_name);
179 class->lc_ddb_show(lock);
183 #ifdef LOCK_PROFILING
186 * One object per-thread for each lock the thread owns. Tracks individual
189 struct lock_profile_object {
190 LIST_ENTRY(lock_profile_object) lpo_link;
191 struct lock_object *lpo_obj;
192 const char *lpo_file;
196 uint64_t lpo_acqtime;
197 uint64_t lpo_waittime;
198 u_int lpo_contest_locking;
202 * One lock_prof for each (file, line, lock object) triple.
205 SLIST_ENTRY(lock_prof) link;
206 struct lock_class *class;
211 uintmax_t cnt_wait_max;
216 uintmax_t cnt_contest_locking;
219 SLIST_HEAD(lphead, lock_prof);
221 #define LPROF_HASH_SIZE 4096
222 #define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1)
223 #define LPROF_CACHE_SIZE 4096
226 * Array of objects and profs for each type of object for each cpu. Spinlocks
227 * are handled separately because a thread may be preempted and acquire a
228 * spinlock while in the lock profiling code of a non-spinlock. In this way
229 * we only need a critical section to protect the per-cpu lists.
231 struct lock_prof_type {
232 struct lphead lpt_lpalloc;
233 struct lpohead lpt_lpoalloc;
234 struct lphead lpt_hash[LPROF_HASH_SIZE];
235 struct lock_prof lpt_prof[LPROF_CACHE_SIZE];
236 struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
239 struct lock_prof_cpu {
240 struct lock_prof_type lpc_types[2]; /* One for spin one for other. */
243 struct lock_prof_cpu *lp_cpu[MAXCPU];
245 volatile int __read_mostly lock_prof_enable;
246 static volatile int lock_prof_resetting;
248 #define LPROF_SBUF_SIZE 256
250 static int lock_prof_rejected;
251 static int lock_prof_skipspin;
252 static int lock_prof_skipcount;
254 #ifndef USE_CPU_NANOSECONDS
262 /* From bintime2timespec */
263 ns = bt.sec * (uint64_t)1000000000;
264 ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
270 lock_prof_init_type(struct lock_prof_type *type)
274 SLIST_INIT(&type->lpt_lpalloc);
275 LIST_INIT(&type->lpt_lpoalloc);
276 for (i = 0; i < LPROF_CACHE_SIZE; i++) {
277 SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
279 LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
285 lock_prof_init(void *arg)
289 for (cpu = 0; cpu <= mp_maxid; cpu++) {
290 lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
292 lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
293 lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
296 SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
299 lock_prof_reset_wait(void)
303 * Spin relinquishing our cpu so that quiesce_all_cpus may
306 while (lock_prof_resetting)
307 sched_relinquish(curthread);
311 lock_prof_reset(void)
313 struct lock_prof_cpu *lpc;
317 * We not only race with acquiring and releasing locks but also
318 * thread exit. To be certain that threads exit without valid head
319 * pointers they must see resetting set before enabled is cleared.
320 * Otherwise a lock may not be removed from a per-thread list due
321 * to disabled being set but not wait for reset() to remove it below.
323 atomic_store_rel_int(&lock_prof_resetting, 1);
324 enabled = lock_prof_enable;
325 lock_prof_enable = 0;
326 quiesce_all_cpus("profreset", 0);
328 * Some objects may have migrated between CPUs. Clear all links
329 * before we zero the structures. Some items may still be linked
330 * into per-thread lists as well.
332 for (cpu = 0; cpu <= mp_maxid; cpu++) {
334 for (i = 0; i < LPROF_CACHE_SIZE; i++) {
335 LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
336 LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
339 for (cpu = 0; cpu <= mp_maxid; cpu++) {
341 bzero(lpc, sizeof(*lpc));
342 lock_prof_init_type(&lpc->lpc_types[0]);
343 lock_prof_init_type(&lpc->lpc_types[1]);
345 atomic_store_rel_int(&lock_prof_resetting, 0);
346 lock_prof_enable = enabled;
350 lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
354 for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
356 "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
357 lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
358 lp->cnt_wait / 1000, lp->cnt_cur,
359 lp->cnt_cur == 0 ? (uintmax_t)0 :
360 lp->cnt_tot / (lp->cnt_cur * 1000),
361 lp->cnt_cur == 0 ? (uintmax_t)0 :
362 lp->cnt_wait / (lp->cnt_cur * 1000),
363 (uintmax_t)0, lp->cnt_contest_locking,
364 p, lp->line, lp->class->lc_name, lp->name);
368 lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
371 struct lock_prof_type *type;
375 dst->file = match->file;
376 dst->line = match->line;
377 dst->class = match->class;
378 dst->name = match->name;
380 for (cpu = 0; cpu <= mp_maxid; cpu++) {
381 if (lp_cpu[cpu] == NULL)
383 type = &lp_cpu[cpu]->lpc_types[spin];
384 SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
387 if (l->file != match->file || l->line != match->line ||
388 l->name != match->name)
391 if (l->cnt_max > dst->cnt_max)
392 dst->cnt_max = l->cnt_max;
393 if (l->cnt_wait_max > dst->cnt_wait_max)
394 dst->cnt_wait_max = l->cnt_wait_max;
395 dst->cnt_tot += l->cnt_tot;
396 dst->cnt_wait += l->cnt_wait;
397 dst->cnt_cur += l->cnt_cur;
398 dst->cnt_contest_locking += l->cnt_contest_locking;
405 lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
411 for (i = 0; i < LPROF_HASH_SIZE; ++i) {
412 SLIST_FOREACH(l, &type->lpt_hash[i], link) {
413 struct lock_prof lp = {};
417 lock_prof_sum(l, &lp, i, spin, t);
418 lock_prof_output(&lp, sb);
424 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
430 error = sysctl_wire_old_buffer(req, 0);
433 sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req);
434 sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
435 "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
436 enabled = lock_prof_enable;
437 lock_prof_enable = 0;
438 quiesce_all_cpus("profstat", 0);
440 for (cpu = 0; cpu <= mp_maxid; cpu++) {
441 if (lp_cpu[cpu] == NULL)
443 lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
444 lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
446 lock_prof_enable = enabled;
448 error = sbuf_finish(sb);
449 /* Output a trailing NUL. */
451 error = SYSCTL_OUT(req, "", 1);
457 enable_lock_prof(SYSCTL_HANDLER_ARGS)
461 v = lock_prof_enable;
462 error = sysctl_handle_int(oidp, &v, v, req);
465 if (req->newptr == NULL)
467 if (v == lock_prof_enable)
471 lock_prof_enable = !!v;
477 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
482 error = sysctl_handle_int(oidp, &v, 0, req);
485 if (req->newptr == NULL)
494 static struct lock_prof *
495 lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
498 const char *unknown = "(unknown)";
499 struct lock_prof_type *type;
500 struct lock_prof *lp;
506 if (p == NULL || *p == '\0')
508 hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
509 hash &= LPROF_HASH_MASK;
510 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
511 head = &type->lpt_hash[hash];
512 SLIST_FOREACH(lp, head, link) {
513 if (lp->line == line && lp->file == p &&
514 lp->name == lo->lo_name)
518 lp = SLIST_FIRST(&type->lpt_lpalloc);
520 lock_prof_rejected++;
523 SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
526 lp->class = LOCK_CLASS(lo);
527 lp->name = lo->lo_name;
528 SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
532 static struct lock_profile_object *
533 lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
536 struct lock_profile_object *l;
537 struct lock_prof_type *type;
538 struct lpohead *head;
540 head = &curthread->td_lprof[spin];
541 LIST_FOREACH(l, head, lpo_link)
542 if (l->lpo_obj == lo && l->lpo_file == file &&
545 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
546 l = LIST_FIRST(&type->lpt_lpoalloc);
548 lock_prof_rejected++;
551 LIST_REMOVE(l, lpo_link);
556 LIST_INSERT_HEAD(head, l, lpo_link);
562 lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
563 uint64_t waittime, const char *file, int line)
565 static int lock_prof_count;
566 struct lock_profile_object *l;
569 if (SCHEDULER_STOPPED())
572 /* don't reset the timer when/if recursing */
573 if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
575 if (lock_prof_skipcount &&
576 (++lock_prof_count % lock_prof_skipcount) != 0)
578 spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
579 if (spin && lock_prof_skipspin == 1)
582 /* Recheck enabled now that we're in a critical section. */
583 if (lock_prof_enable == 0)
585 l = lock_profile_object_lookup(lo, spin, file, line);
589 if (++l->lpo_ref > 1)
591 l->lpo_contest_locking = contested;
592 l->lpo_acqtime = nanoseconds();
593 if (waittime && (l->lpo_acqtime > waittime))
594 l->lpo_waittime = l->lpo_acqtime - waittime;
602 lock_profile_thread_exit(struct thread *td)
605 struct lock_profile_object *l;
607 MPASS(curthread->td_critnest == 0);
610 * If lock profiling was disabled we have to wait for reset to
611 * clear our pointers before we can exit safely.
613 lock_prof_reset_wait();
615 LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
616 printf("thread still holds lock acquired at %s:%d\n",
617 l->lpo_file, l->lpo_line);
618 LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
619 printf("thread still holds lock acquired at %s:%d\n",
620 l->lpo_file, l->lpo_line);
622 MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
623 MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
627 lock_profile_release_lock(struct lock_object *lo)
629 struct lock_profile_object *l;
630 struct lock_prof_type *type;
631 struct lock_prof *lp;
632 uint64_t curtime, holdtime;
633 struct lpohead *head;
636 if (SCHEDULER_STOPPED())
638 if (lo->lo_flags & LO_NOPROFILE)
640 spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
641 head = &curthread->td_lprof[spin];
642 if (LIST_FIRST(head) == NULL)
645 /* Recheck enabled now that we're in a critical section. */
646 if (lock_prof_enable == 0 && lock_prof_resetting == 1)
649 * If lock profiling is not enabled we still want to remove the
650 * lpo from our queue.
652 LIST_FOREACH(l, head, lpo_link)
653 if (l->lpo_obj == lo)
657 if (--l->lpo_ref > 0)
659 lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
662 curtime = nanoseconds();
663 if (curtime < l->lpo_acqtime)
665 holdtime = curtime - l->lpo_acqtime;
668 * Record if the lock has been held longer now than ever
671 if (holdtime > lp->cnt_max)
672 lp->cnt_max = holdtime;
673 if (l->lpo_waittime > lp->cnt_wait_max)
674 lp->cnt_wait_max = l->lpo_waittime;
675 lp->cnt_tot += holdtime;
676 lp->cnt_wait += l->lpo_waittime;
677 lp->cnt_contest_locking += l->lpo_contest_locking;
678 lp->cnt_cur += l->lpo_cnt;
680 LIST_REMOVE(l, lpo_link);
681 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
682 LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
687 static SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL,
689 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
690 &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
691 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
692 &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
693 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
694 &lock_prof_rejected, 0, "Number of rejected profiling records");
695 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
696 NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
697 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
698 NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
699 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
700 NULL, 0, enable_lock_prof, "I", "Enable lock profiling");