2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5 * Copyright (c) 2013 EMC Corp.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
33 * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
38 * - Magazines and Vmem: Extending the Slab Allocator
39 * to Many CPUs and Arbitrary Resources
40 * http://www.usenix.org/event/usenix01/bonwick.html
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/queue.h>
52 #include <sys/callout.h>
55 #include <sys/malloc.h>
56 #include <sys/mutex.h>
58 #include <sys/condvar.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
62 #include <sys/vmmeter.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_extern.h>
73 #include <vm/vm_param.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_phys.h>
77 #include <vm/vm_pagequeue.h>
78 #include <vm/uma_int.h>
80 int vmem_startup_count(void);
82 #define VMEM_OPTORDER 5
83 #define VMEM_OPTVALUE (1 << VMEM_OPTORDER)
84 #define VMEM_MAXORDER \
85 (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
87 #define VMEM_HASHSIZE_MIN 16
88 #define VMEM_HASHSIZE_MAX 131072
90 #define VMEM_QCACHE_IDX_MAX 16
92 #define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
94 #define VMEM_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | \
95 M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
97 #define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
99 #define QC_NAME_MAX 16
102 * Data structures private to vmem.
104 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
106 typedef struct vmem_btag bt_t;
108 TAILQ_HEAD(vmem_seglist, vmem_btag);
109 LIST_HEAD(vmem_freelist, vmem_btag);
110 LIST_HEAD(vmem_hashlist, vmem_btag);
116 char qc_name[QC_NAME_MAX];
118 typedef struct qcache qcache_t;
119 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
121 #define VMEM_NAME_MAX 16
125 TAILQ_ENTRY(vmem_btag) bt_seglist;
127 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
128 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
130 #define bt_hashlist bt_u.u_hashlist
131 #define bt_freelist bt_u.u_freelist
132 vmem_addr_t bt_start;
139 struct mtx_padalign vm_lock;
141 char vm_name[VMEM_NAME_MAX+1];
142 LIST_ENTRY(vmem) vm_alllist;
143 struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN];
144 struct vmem_freelist vm_freelist[VMEM_MAXORDER];
145 struct vmem_seglist vm_seglist;
146 struct vmem_hashlist *vm_hashlist;
147 vmem_size_t vm_hashsize;
149 /* Constant after init */
150 vmem_size_t vm_qcache_max;
151 vmem_size_t vm_quantum_mask;
152 vmem_size_t vm_import_quantum;
153 int vm_quantum_shift;
155 /* Written on alloc/free */
156 LIST_HEAD(, vmem_btag) vm_freetags;
159 vmem_size_t vm_inuse;
161 vmem_size_t vm_limit;
162 struct vmem_btag vm_cursor;
164 /* Used on import. */
165 vmem_import_t *vm_importfn;
166 vmem_release_t *vm_releasefn;
169 /* Space exhaustion callback. */
170 vmem_reclaim_t *vm_reclaimfn;
173 qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX];
176 #define BT_TYPE_SPAN 1 /* Allocated from importfn */
177 #define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */
178 #define BT_TYPE_FREE 3 /* Available space. */
179 #define BT_TYPE_BUSY 4 /* Used space. */
180 #define BT_TYPE_CURSOR 5 /* Cursor for nextfit allocations. */
181 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
183 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1)
185 #if defined(DIAGNOSTIC)
186 static int enable_vmem_check = 1;
187 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
188 &enable_vmem_check, 0, "Enable vmem check");
189 static void vmem_check(vmem_t *);
192 static struct callout vmem_periodic_ch;
193 static int vmem_periodic_interval;
194 static struct task vmem_periodic_wk;
196 static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
197 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
198 static uma_zone_t vmem_zone;
201 #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
202 #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
203 #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
204 #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
207 #define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock)
208 #define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock)
209 #define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock)
210 #define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
211 #define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock)
212 #define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED);
214 #define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align)))
216 #define VMEM_CROSS_P(addr1, addr2, boundary) \
217 ((((addr1) ^ (addr2)) & -(boundary)) != 0)
219 #define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \
220 (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
221 #define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
222 (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
225 * Maximum number of boundary tags that may be required to satisfy an
226 * allocation. Two may be required to import. Another two may be
227 * required to clip edges.
229 #define BT_MAXALLOC 4
232 * Max free limits the number of locally cached boundary tags. We
233 * just want to avoid hitting the zone allocator for every call.
235 #define BT_MAXFREE (BT_MAXALLOC * 8)
237 /* Allocator for boundary tags. */
238 static uma_zone_t vmem_bt_zone;
240 /* boot time arena storage. */
241 static struct vmem kernel_arena_storage;
242 static struct vmem buffer_arena_storage;
243 static struct vmem transient_arena_storage;
244 /* kernel and kmem arenas are aliased for backwards KPI compat. */
245 vmem_t *kernel_arena = &kernel_arena_storage;
246 vmem_t *kmem_arena = &kernel_arena_storage;
247 vmem_t *buffer_arena = &buffer_arena_storage;
248 vmem_t *transient_arena = &transient_arena_storage;
250 #ifdef DEBUG_MEMGUARD
251 static struct vmem memguard_arena_storage;
252 vmem_t *memguard_arena = &memguard_arena_storage;
256 * Fill the vmem's boundary tag cache. We guarantee that boundary tag
257 * allocation will not fail once bt_fill() passes. To do so we cache
258 * at least the maximum possible tag allocations in the arena.
260 static __noinline int
261 _bt_fill(vmem_t *vm, int flags)
265 VMEM_ASSERT_LOCKED(vm);
268 * Only allow the kernel arena and arenas derived from kernel arena to
269 * dip into reserve tags. They are where new tags come from.
272 if (vm != kernel_arena && vm->vm_arg != kernel_arena)
273 flags &= ~M_USE_RESERVE;
276 * Loop until we meet the reserve. To minimize the lock shuffle
277 * and prevent simultaneous fills we first try a NOWAIT regardless
278 * of the caller's flags. Specify M_NOVM so we don't recurse while
279 * holding a vmem lock.
281 while (vm->vm_nfreetags < BT_MAXALLOC) {
282 bt = uma_zalloc(vmem_bt_zone,
283 (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
286 bt = uma_zalloc(vmem_bt_zone, flags);
291 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
295 if (vm->vm_nfreetags < BT_MAXALLOC)
302 bt_fill(vmem_t *vm, int flags)
304 if (vm->vm_nfreetags >= BT_MAXALLOC)
306 return (_bt_fill(vm, flags));
310 * Pop a tag off of the freetag stack.
317 VMEM_ASSERT_LOCKED(vm);
318 bt = LIST_FIRST(&vm->vm_freetags);
320 LIST_REMOVE(bt, bt_freelist);
327 * Trim the per-vmem free list. Returns with the lock released to
328 * avoid allocator recursions.
331 bt_freetrim(vmem_t *vm, int freelimit)
333 LIST_HEAD(, vmem_btag) freetags;
336 LIST_INIT(&freetags);
337 VMEM_ASSERT_LOCKED(vm);
338 while (vm->vm_nfreetags > freelimit) {
339 bt = LIST_FIRST(&vm->vm_freetags);
340 LIST_REMOVE(bt, bt_freelist);
342 LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
345 while ((bt = LIST_FIRST(&freetags)) != NULL) {
346 LIST_REMOVE(bt, bt_freelist);
347 uma_zfree(vmem_bt_zone, bt);
352 bt_free(vmem_t *vm, bt_t *bt)
355 VMEM_ASSERT_LOCKED(vm);
356 MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
357 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
362 * Hide MAXALLOC tags before dropping the arena lock to ensure that a
363 * concurrent allocation attempt does not grab them.
368 KASSERT(vm->vm_nfreetags >= BT_MAXALLOC,
369 ("%s: insufficient free tags %d", __func__, vm->vm_nfreetags));
370 vm->vm_nfreetags -= BT_MAXALLOC;
374 bt_restore(vmem_t *vm)
376 vm->vm_nfreetags += BT_MAXALLOC;
380 * freelist[0] ... [1, 1]
381 * freelist[1] ... [2, 2]
383 * freelist[29] ... [30, 30]
384 * freelist[30] ... [31, 31]
385 * freelist[31] ... [32, 63]
386 * freelist[33] ... [64, 127]
388 * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
392 static struct vmem_freelist *
393 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
395 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
396 const int idx = SIZE2ORDER(qsize);
398 MPASS(size != 0 && qsize != 0);
399 MPASS((size & vm->vm_quantum_mask) == 0);
401 MPASS(idx < VMEM_MAXORDER);
403 return &vm->vm_freelist[idx];
407 * bt_freehead_toalloc: return the freelist for the given size and allocation
410 * For M_FIRSTFIT, return the list in which any blocks are large enough
411 * for the requested size. otherwise, return the list which can have blocks
412 * large enough for the requested size.
414 static struct vmem_freelist *
415 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
417 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
418 int idx = SIZE2ORDER(qsize);
420 MPASS(size != 0 && qsize != 0);
421 MPASS((size & vm->vm_quantum_mask) == 0);
423 if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
425 /* check too large request? */
428 MPASS(idx < VMEM_MAXORDER);
430 return &vm->vm_freelist[idx];
433 /* ---- boundary tag hash */
435 static struct vmem_hashlist *
436 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
438 struct vmem_hashlist *list;
441 hash = hash32_buf(&addr, sizeof(addr), 0);
442 list = &vm->vm_hashlist[hash % vm->vm_hashsize];
448 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
450 struct vmem_hashlist *list;
453 VMEM_ASSERT_LOCKED(vm);
454 list = bt_hashhead(vm, addr);
455 LIST_FOREACH(bt, list, bt_hashlist) {
456 if (bt->bt_start == addr) {
465 bt_rembusy(vmem_t *vm, bt_t *bt)
468 VMEM_ASSERT_LOCKED(vm);
469 MPASS(vm->vm_nbusytag > 0);
470 vm->vm_inuse -= bt->bt_size;
472 LIST_REMOVE(bt, bt_hashlist);
476 bt_insbusy(vmem_t *vm, bt_t *bt)
478 struct vmem_hashlist *list;
480 VMEM_ASSERT_LOCKED(vm);
481 MPASS(bt->bt_type == BT_TYPE_BUSY);
483 list = bt_hashhead(vm, bt->bt_start);
484 LIST_INSERT_HEAD(list, bt, bt_hashlist);
486 vm->vm_inuse += bt->bt_size;
489 /* ---- boundary tag list */
492 bt_remseg(vmem_t *vm, bt_t *bt)
495 MPASS(bt->bt_type != BT_TYPE_CURSOR);
496 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
501 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
504 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
508 bt_insseg_tail(vmem_t *vm, bt_t *bt)
511 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
515 bt_remfree(vmem_t *vm __unused, bt_t *bt)
518 MPASS(bt->bt_type == BT_TYPE_FREE);
520 LIST_REMOVE(bt, bt_freelist);
524 bt_insfree(vmem_t *vm, bt_t *bt)
526 struct vmem_freelist *list;
528 list = bt_freehead_tofree(vm, bt->bt_size);
529 LIST_INSERT_HEAD(list, bt, bt_freelist);
532 /* ---- vmem internal functions */
535 * Import from the arena into the quantum cache in UMA.
537 * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate
538 * failure, so UMA can't be used to cache a resource with value 0.
541 qc_import(void *arg, void **store, int cnt, int domain, int flags)
547 KASSERT((flags & M_WAITOK) == 0, ("blocking allocation"));
550 for (i = 0; i < cnt; i++) {
551 if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
552 VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
554 store[i] = (void *)addr;
560 * Release memory from the UMA cache to the arena.
563 qc_release(void *arg, void **store, int cnt)
569 for (i = 0; i < cnt; i++)
570 vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
574 qc_init(vmem_t *vm, vmem_size_t qcache_max)
581 MPASS((qcache_max & vm->vm_quantum_mask) == 0);
582 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
583 VMEM_QCACHE_IDX_MAX);
584 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
585 for (i = 0; i < qcache_idx_max; i++) {
586 qc = &vm->vm_qcache[i];
587 size = (i + 1) << vm->vm_quantum_shift;
588 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
592 qc->qc_cache = uma_zcache_create(qc->qc_name, size,
593 NULL, NULL, NULL, NULL, qc_import, qc_release, qc,
600 qc_destroy(vmem_t *vm)
605 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
606 for (i = 0; i < qcache_idx_max; i++)
607 uma_zdestroy(vm->vm_qcache[i].qc_cache);
616 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
617 for (i = 0; i < qcache_idx_max; i++)
618 zone_drain(vm->vm_qcache[i].qc_cache);
621 #ifndef UMA_MD_SMALL_ALLOC
623 static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
626 * vmem_bt_alloc: Allocate a new page of boundary tags.
628 * On architectures with uma_small_alloc there is no recursion; no address
629 * space need be allocated to allocate boundary tags. For the others, we
630 * must handle recursion. Boundary tags are necessary to allocate new
633 * UMA guarantees that enough tags are held in reserve to allocate a new
634 * page of kva. We dip into this reserve by specifying M_USE_RESERVE only
635 * when allocating the page to hold new boundary tags. In this way the
636 * reserve is automatically filled by the allocation that uses the reserve.
638 * We still have to guarantee that the new tags are allocated atomically since
639 * many threads may try concurrently. The bt_lock provides this guarantee.
640 * We convert WAITOK allocations to NOWAIT and then handle the blocking here
641 * on failure. It's ok to return NULL for a WAITOK allocation as UMA will
642 * loop again after checking to see if we lost the race to allocate.
644 * There is a small race between vmem_bt_alloc() returning the page and the
645 * zone lock being acquired to add the page to the zone. For WAITOK
646 * allocations we just pause briefly. NOWAIT may experience a transient
647 * failure. To alleviate this we permit a small number of simultaneous
648 * fills to proceed concurrently so NOWAIT is less likely to fail unless
649 * we are really out of KVA.
652 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
657 *pflag = UMA_SLAB_KERNEL;
660 * Single thread boundary tag allocation so that the address space
661 * and memory are added in one atomic operation.
663 mtx_lock(&vmem_bt_lock);
664 if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0,
665 VMEM_ADDR_MIN, VMEM_ADDR_MAX,
666 M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
667 if (kmem_back_domain(domain, kernel_object, addr, bytes,
668 M_NOWAIT | M_USE_RESERVE) == 0) {
669 mtx_unlock(&vmem_bt_lock);
670 return ((void *)addr);
672 vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes);
673 mtx_unlock(&vmem_bt_lock);
675 * Out of memory, not address space. This may not even be
676 * possible due to M_USE_RESERVE page allocation.
679 vm_wait_domain(domain);
682 mtx_unlock(&vmem_bt_lock);
684 * We're either out of address space or lost a fill race.
693 * How many pages do we need to startup_alloc.
696 vmem_startup_count(void)
699 return (howmany(BT_MAXALLOC,
700 UMA_SLAB_SPACE / sizeof(struct vmem_btag)));
708 mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
709 vmem_zone = uma_zcreate("vmem",
710 sizeof(struct vmem), NULL, NULL, NULL, NULL,
711 UMA_ALIGN_PTR, UMA_ZONE_VM);
712 vmem_bt_zone = uma_zcreate("vmem btag",
713 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
714 UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
715 #ifndef UMA_MD_SMALL_ALLOC
716 mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
717 uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
719 * Reserve enough tags to allocate new tags. We allow multiple
720 * CPUs to attempt to allocate new tags concurrently to limit
721 * false restarts in UMA. vmem_bt_alloc() allocates from a per-domain
722 * arena, which may involve importing a range from the kernel arena,
723 * so we need to keep at least 2 * BT_MAXALLOC tags reserved.
725 uma_zone_reserve(vmem_bt_zone, 2 * BT_MAXALLOC * mp_ncpus);
726 uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
733 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
736 struct vmem_hashlist *newhashlist;
737 struct vmem_hashlist *oldhashlist;
738 vmem_size_t i, oldhashsize;
740 MPASS(newhashsize > 0);
742 newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
744 if (newhashlist == NULL)
746 for (i = 0; i < newhashsize; i++) {
747 LIST_INIT(&newhashlist[i]);
751 oldhashlist = vm->vm_hashlist;
752 oldhashsize = vm->vm_hashsize;
753 vm->vm_hashlist = newhashlist;
754 vm->vm_hashsize = newhashsize;
755 if (oldhashlist == NULL) {
759 for (i = 0; i < oldhashsize; i++) {
760 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
767 if (oldhashlist != vm->vm_hash0)
768 free(oldhashlist, M_VMEM);
774 vmem_periodic_kick(void *dummy)
777 taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
781 vmem_periodic(void *unused, int pending)
787 mtx_lock(&vmem_list_lock);
788 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
790 /* Convenient time to verify vmem state. */
791 if (enable_vmem_check == 1) {
797 desired = 1 << flsl(vm->vm_nbusytag);
798 desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
800 current = vm->vm_hashsize;
802 /* Grow in powers of two. Shrink less aggressively. */
803 if (desired >= current * 2 || desired * 4 <= current)
804 vmem_rehash(vm, desired);
807 * Periodically wake up threads waiting for resources,
808 * so they could ask for reclamation again.
810 VMEM_CONDVAR_BROADCAST(vm);
812 mtx_unlock(&vmem_list_lock);
814 callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
815 vmem_periodic_kick, NULL);
819 vmem_start_callout(void *unused)
822 TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
823 vmem_periodic_interval = hz * 10;
824 callout_init(&vmem_periodic_ch, 1);
825 callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
826 vmem_periodic_kick, NULL);
828 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
831 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
836 MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
837 MPASS((size & vm->vm_quantum_mask) == 0);
839 btspan = bt_alloc(vm);
840 btspan->bt_type = type;
841 btspan->bt_start = addr;
842 btspan->bt_size = size;
843 bt_insseg_tail(vm, btspan);
845 btfree = bt_alloc(vm);
846 btfree->bt_type = BT_TYPE_FREE;
847 btfree->bt_start = addr;
848 btfree->bt_size = size;
849 bt_insseg(vm, btfree, btspan);
850 bt_insfree(vm, btfree);
856 vmem_destroy1(vmem_t *vm)
861 * Drain per-cpu quantum caches.
866 * The vmem should now only contain empty segments.
869 MPASS(vm->vm_nbusytag == 0);
871 TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
872 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
875 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
876 free(vm->vm_hashlist, M_VMEM);
880 VMEM_CONDVAR_DESTROY(vm);
881 VMEM_LOCK_DESTROY(vm);
882 uma_zfree(vmem_zone, vm);
886 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
891 if (vm->vm_importfn == NULL)
895 * To make sure we get a span that meets the alignment we double it
896 * and add the size to the tail. This slightly overestimates.
898 if (align != vm->vm_quantum_mask + 1)
899 size = (align * 2) + size;
900 size = roundup(size, vm->vm_import_quantum);
902 if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size)
907 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
913 vmem_add1(vm, addr, size, BT_TYPE_SPAN);
919 * vmem_fit: check if a bt can satisfy the given restrictions.
921 * it's a caller's responsibility to ensure the region is big enough
925 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
926 vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
927 vmem_addr_t maxaddr, vmem_addr_t *addrp)
933 MPASS(bt->bt_size >= size); /* caller's responsibility */
936 * XXX assumption: vmem_addr_t and vmem_size_t are
937 * unsigned integer of the same size.
940 start = bt->bt_start;
941 if (start < minaddr) {
950 start = VMEM_ALIGNUP(start - phase, align) + phase;
951 if (start < bt->bt_start)
953 if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
954 MPASS(align < nocross);
955 start = VMEM_ALIGNUP(start - phase, nocross) + phase;
957 if (start <= end && end - start >= size - 1) {
958 MPASS((start & (align - 1)) == phase);
959 MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
960 MPASS(minaddr <= start);
961 MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
962 MPASS(bt->bt_start <= start);
963 MPASS(BT_END(bt) - start >= size - 1);
972 * vmem_clip: Trim the boundary tag edges to the requested start and size.
975 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
980 VMEM_ASSERT_LOCKED(vm);
981 MPASS(bt->bt_type == BT_TYPE_FREE);
982 MPASS(bt->bt_size >= size);
984 if (bt->bt_start != start) {
985 btprev = bt_alloc(vm);
986 btprev->bt_type = BT_TYPE_FREE;
987 btprev->bt_start = bt->bt_start;
988 btprev->bt_size = start - bt->bt_start;
989 bt->bt_start = start;
990 bt->bt_size -= btprev->bt_size;
991 bt_insfree(vm, btprev);
992 bt_insseg(vm, btprev,
993 TAILQ_PREV(bt, vmem_seglist, bt_seglist));
995 MPASS(bt->bt_start == start);
996 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
998 btnew = bt_alloc(vm);
999 btnew->bt_type = BT_TYPE_BUSY;
1000 btnew->bt_start = bt->bt_start;
1001 btnew->bt_size = size;
1002 bt->bt_start = bt->bt_start + size;
1003 bt->bt_size -= size;
1005 bt_insseg(vm, btnew,
1006 TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1007 bt_insbusy(vm, btnew);
1010 bt->bt_type = BT_TYPE_BUSY;
1013 MPASS(bt->bt_size >= size);
1017 vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags)
1021 VMEM_ASSERT_LOCKED(vm);
1024 * XXX it is possible to fail to meet xalloc constraints with the
1025 * imported region. It is up to the user to specify the
1026 * import quantum such that it can satisfy any allocation.
1028 if (vmem_import(vm, size, align, flags) == 0)
1032 * Try to free some space from the quantum cache or reclaim
1033 * functions if available.
1035 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1036 avail = vm->vm_size - vm->vm_inuse;
1039 if (vm->vm_qcache_max != 0)
1041 if (vm->vm_reclaimfn != NULL)
1042 vm->vm_reclaimfn(vm, flags);
1045 /* If we were successful retry even NOWAIT. */
1046 if (vm->vm_size - vm->vm_inuse > avail)
1049 if ((flags & M_NOWAIT) != 0)
1052 VMEM_CONDVAR_WAIT(vm);
1058 vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree)
1060 struct vmem_btag *prev;
1062 MPASS(bt->bt_type == BT_TYPE_FREE);
1064 if (vm->vm_releasefn == NULL)
1067 prev = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1068 MPASS(prev != NULL);
1069 MPASS(prev->bt_type != BT_TYPE_FREE);
1071 if (prev->bt_type == BT_TYPE_SPAN && prev->bt_size == bt->bt_size) {
1072 vmem_addr_t spanaddr;
1073 vmem_size_t spansize;
1075 MPASS(prev->bt_start == bt->bt_start);
1076 spanaddr = prev->bt_start;
1077 spansize = prev->bt_size;
1081 bt_remseg(vm, prev);
1082 vm->vm_size -= spansize;
1083 VMEM_CONDVAR_BROADCAST(vm);
1084 bt_freetrim(vm, BT_MAXFREE);
1085 vm->vm_releasefn(vm->vm_arg, spanaddr, spansize);
1092 vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align,
1093 const vmem_size_t phase, const vmem_size_t nocross, int flags,
1096 struct vmem_btag *bt, *cursor, *next, *prev;
1103 * Make sure we have enough tags to complete the operation.
1105 if (bt_fill(vm, flags) != 0)
1110 * Find the next free tag meeting our constraints. If one is found,
1111 * perform the allocation.
1113 for (cursor = &vm->vm_cursor, bt = TAILQ_NEXT(cursor, bt_seglist);
1114 bt != cursor; bt = TAILQ_NEXT(bt, bt_seglist)) {
1116 bt = TAILQ_FIRST(&vm->vm_seglist);
1117 if (bt->bt_type == BT_TYPE_FREE && bt->bt_size >= size &&
1118 (error = vmem_fit(bt, size, align, phase, nocross,
1119 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1120 vmem_clip(vm, bt, *addrp, size);
1126 * Try to coalesce free segments around the cursor. If we succeed, and
1127 * have not yet satisfied the allocation request, try again with the
1128 * newly coalesced segment.
1130 if ((next = TAILQ_NEXT(cursor, bt_seglist)) != NULL &&
1131 (prev = TAILQ_PREV(cursor, vmem_seglist, bt_seglist)) != NULL &&
1132 next->bt_type == BT_TYPE_FREE && prev->bt_type == BT_TYPE_FREE &&
1133 prev->bt_start + prev->bt_size == next->bt_start) {
1134 prev->bt_size += next->bt_size;
1135 bt_remfree(vm, next);
1136 bt_remseg(vm, next);
1139 * The coalesced segment might be able to satisfy our request.
1140 * If not, we might need to release it from the arena.
1142 if (error == ENOMEM && prev->bt_size >= size &&
1143 (error = vmem_fit(prev, size, align, phase, nocross,
1144 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1145 vmem_clip(vm, prev, *addrp, size);
1148 (void)vmem_try_release(vm, prev, true);
1152 * If the allocation was successful, advance the cursor.
1155 TAILQ_REMOVE(&vm->vm_seglist, cursor, bt_seglist);
1156 for (; bt != NULL && bt->bt_start < *addrp + size;
1157 bt = TAILQ_NEXT(bt, bt_seglist))
1160 TAILQ_INSERT_BEFORE(bt, cursor, bt_seglist);
1162 TAILQ_INSERT_HEAD(&vm->vm_seglist, cursor, bt_seglist);
1166 * Attempt to bring additional resources into the arena. If that fails
1167 * and M_WAITOK is specified, sleep waiting for resources to be freed.
1169 if (error == ENOMEM && vmem_try_fetch(vm, size, align, flags))
1180 vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
1181 vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
1185 vm->vm_importfn = importfn;
1186 vm->vm_releasefn = releasefn;
1188 vm->vm_import_quantum = import_quantum;
1193 vmem_set_limit(vmem_t *vm, vmem_size_t limit)
1197 vm->vm_limit = limit;
1202 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
1206 vm->vm_reclaimfn = reclaimfn;
1211 * vmem_init: Initializes vmem arena.
1214 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
1215 vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1220 MPASS((quantum & (quantum - 1)) == 0);
1222 bzero(vm, sizeof(*vm));
1224 VMEM_CONDVAR_INIT(vm, name);
1225 VMEM_LOCK_INIT(vm, name);
1226 vm->vm_nfreetags = 0;
1227 LIST_INIT(&vm->vm_freetags);
1228 strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
1229 vm->vm_quantum_mask = quantum - 1;
1230 vm->vm_quantum_shift = flsl(quantum) - 1;
1231 vm->vm_nbusytag = 0;
1235 qc_init(vm, qcache_max);
1237 TAILQ_INIT(&vm->vm_seglist);
1238 vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0;
1239 vm->vm_cursor.bt_type = BT_TYPE_CURSOR;
1240 TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
1242 for (i = 0; i < VMEM_MAXORDER; i++)
1243 LIST_INIT(&vm->vm_freelist[i]);
1245 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
1246 vm->vm_hashsize = VMEM_HASHSIZE_MIN;
1247 vm->vm_hashlist = vm->vm_hash0;
1250 if (vmem_add(vm, base, size, flags) != 0) {
1256 mtx_lock(&vmem_list_lock);
1257 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1258 mtx_unlock(&vmem_list_lock);
1264 * vmem_create: create an arena.
1267 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1268 vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1273 vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
1276 if (vmem_init(vm, name, base, size, quantum, qcache_max,
1283 vmem_destroy(vmem_t *vm)
1286 mtx_lock(&vmem_list_lock);
1287 LIST_REMOVE(vm, vm_alllist);
1288 mtx_unlock(&vmem_list_lock);
1294 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1297 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1301 * vmem_alloc: allocate resource from the arena.
1304 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
1306 const int strat __unused = flags & VMEM_FITMASK;
1309 flags &= VMEM_FLAGS;
1311 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1312 if ((flags & M_NOWAIT) == 0)
1313 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
1315 if (size <= vm->vm_qcache_max) {
1317 * Resource 0 cannot be cached, so avoid a blocking allocation
1318 * in qc_import() and give the vmem_xalloc() call below a chance
1321 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1322 *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache,
1323 (flags & ~M_WAITOK) | M_NOWAIT);
1324 if (__predict_true(*addrp != 0))
1328 return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1333 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1334 const vmem_size_t phase, const vmem_size_t nocross,
1335 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
1338 const vmem_size_t size = vmem_roundup_size(vm, size0);
1339 struct vmem_freelist *list;
1340 struct vmem_freelist *first;
1341 struct vmem_freelist *end;
1346 flags &= VMEM_FLAGS;
1347 strat = flags & VMEM_FITMASK;
1350 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1351 MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
1352 if ((flags & M_NOWAIT) == 0)
1353 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
1354 MPASS((align & vm->vm_quantum_mask) == 0);
1355 MPASS((align & (align - 1)) == 0);
1356 MPASS((phase & vm->vm_quantum_mask) == 0);
1357 MPASS((nocross & vm->vm_quantum_mask) == 0);
1358 MPASS((nocross & (nocross - 1)) == 0);
1359 MPASS((align == 0 && phase == 0) || phase < align);
1360 MPASS(nocross == 0 || nocross >= size);
1361 MPASS(minaddr <= maxaddr);
1362 MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1363 if (strat == M_NEXTFIT)
1364 MPASS(minaddr == VMEM_ADDR_MIN && maxaddr == VMEM_ADDR_MAX);
1367 align = vm->vm_quantum_mask + 1;
1371 * Next-fit allocations don't use the freelists.
1373 if (strat == M_NEXTFIT)
1374 return (vmem_xalloc_nextfit(vm, size0, align, phase, nocross,
1377 end = &vm->vm_freelist[VMEM_MAXORDER];
1379 * choose a free block from which we allocate.
1381 first = bt_freehead_toalloc(vm, size, strat);
1385 * Make sure we have enough tags to complete the operation.
1387 error = bt_fill(vm, flags);
1392 * Scan freelists looking for a tag that satisfies the
1393 * allocation. If we're doing BESTFIT we may encounter
1394 * sizes below the request. If we're doing FIRSTFIT we
1395 * inspect only the first element from each list.
1397 for (list = first; list < end; list++) {
1398 LIST_FOREACH(bt, list, bt_freelist) {
1399 if (bt->bt_size >= size) {
1400 error = vmem_fit(bt, size, align, phase,
1401 nocross, minaddr, maxaddr, addrp);
1403 vmem_clip(vm, bt, *addrp, size);
1407 /* FIRST skips to the next list. */
1408 if (strat == M_FIRSTFIT)
1414 * Retry if the fast algorithm failed.
1416 if (strat == M_FIRSTFIT) {
1418 first = bt_freehead_toalloc(vm, size, strat);
1423 * Try a few measures to bring additional resources into the
1424 * arena. If all else fails, we will sleep waiting for
1425 * resources to be freed.
1427 if (!vmem_try_fetch(vm, size, align, flags)) {
1434 if (error != 0 && (flags & M_NOWAIT) == 0)
1435 panic("failed to allocate waiting allocation\n");
1441 * vmem_free: free the resource to the arena.
1444 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1449 if (size <= vm->vm_qcache_max &&
1450 __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) {
1451 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1452 uma_zfree(qc->qc_cache, (void *)addr);
1454 vmem_xfree(vm, addr, size);
1458 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size __unused)
1466 bt = bt_lookupbusy(vm, addr);
1468 MPASS(bt->bt_start == addr);
1469 MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1470 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1471 MPASS(bt->bt_type == BT_TYPE_BUSY);
1473 bt->bt_type = BT_TYPE_FREE;
1476 t = TAILQ_NEXT(bt, bt_seglist);
1477 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1478 MPASS(BT_END(bt) < t->bt_start); /* YYY */
1479 bt->bt_size += t->bt_size;
1483 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1484 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1485 MPASS(BT_END(t) < bt->bt_start); /* YYY */
1486 bt->bt_size += t->bt_size;
1487 bt->bt_start = t->bt_start;
1492 if (!vmem_try_release(vm, bt, false)) {
1494 VMEM_CONDVAR_BROADCAST(vm);
1495 bt_freetrim(vm, BT_MAXFREE);
1504 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1508 flags &= VMEM_FLAGS;
1511 error = bt_fill(vm, flags);
1513 vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
1520 * vmem_size: information about arenas size
1523 vmem_size(vmem_t *vm, int typemask)
1529 return vm->vm_inuse;
1531 return vm->vm_size - vm->vm_inuse;
1532 case VMEM_FREE|VMEM_ALLOC:
1536 for (i = VMEM_MAXORDER - 1; i >= 0; i--) {
1537 if (LIST_EMPTY(&vm->vm_freelist[i]))
1540 return ((vmem_size_t)ORDER2SIZE(i) <<
1541 vm->vm_quantum_shift);
1552 #if defined(DDB) || defined(DIAGNOSTIC)
1554 static void bt_dump(const bt_t *, int (*)(const char *, ...)
1555 __printflike(1, 2));
1558 bt_type_string(int type)
1568 case BT_TYPE_SPAN_STATIC:
1569 return "static span";
1570 case BT_TYPE_CURSOR:
1579 bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
1582 (*pr)("\t%p: %jx %jx, %d(%s)\n",
1583 bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
1584 bt->bt_type, bt_type_string(bt->bt_type));
1588 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1593 (*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1594 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1598 for (i = 0; i < VMEM_MAXORDER; i++) {
1599 const struct vmem_freelist *fl = &vm->vm_freelist[i];
1601 if (LIST_EMPTY(fl)) {
1605 (*pr)("freelist[%d]\n", i);
1606 LIST_FOREACH(bt, fl, bt_freelist) {
1612 #endif /* defined(DDB) || defined(DIAGNOSTIC) */
1615 #include <ddb/ddb.h>
1618 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1622 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1623 if (BT_ISSPAN_P(bt)) {
1626 if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1635 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
1639 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1642 bt = vmem_whatis_lookup(vm, addr);
1646 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1647 (void *)addr, (void *)bt->bt_start,
1648 (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1649 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1654 vmem_printall(const char *modif, int (*pr)(const char *, ...))
1658 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1664 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
1666 const vmem_t *vm = (const void *)addr;
1671 DB_SHOW_COMMAND(vmemdump, vmemdump)
1675 db_printf("usage: show vmemdump <addr>\n");
1679 vmem_dump((const vmem_t *)addr, db_printf);
1682 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
1686 LIST_FOREACH(vm, &vmem_list, vm_alllist)
1687 vmem_dump(vm, db_printf);
1690 DB_SHOW_COMMAND(vmem, vmem_summ)
1692 const vmem_t *vm = (const void *)addr;
1694 size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
1695 size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
1699 db_printf("usage: show vmem <addr>\n");
1703 db_printf("vmem %p '%s'\n", vm, vm->vm_name);
1704 db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1705 db_printf("\tsize:\t%zu\n", vm->vm_size);
1706 db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
1707 db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1708 db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
1709 db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
1711 memset(&ft, 0, sizeof(ft));
1712 memset(&ut, 0, sizeof(ut));
1713 memset(&fs, 0, sizeof(fs));
1714 memset(&us, 0, sizeof(us));
1715 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1716 ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
1717 if (bt->bt_type == BT_TYPE_BUSY) {
1719 us[ord] += bt->bt_size;
1720 } else if (bt->bt_type == BT_TYPE_FREE) {
1722 fs[ord] += bt->bt_size;
1725 db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
1726 for (ord = 0; ord < VMEM_MAXORDER; ord++) {
1727 if (ut[ord] == 0 && ft[ord] == 0)
1729 db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
1730 ORDER2SIZE(ord) << vm->vm_quantum_shift,
1731 ut[ord], us[ord], ft[ord], fs[ord]);
1735 DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
1739 LIST_FOREACH(vm, &vmem_list, vm_alllist)
1740 vmem_summ((db_expr_t)vm, TRUE, count, modif);
1742 #endif /* defined(DDB) */
1744 #define vmem_printf printf
1746 #if defined(DIAGNOSTIC)
1749 vmem_check_sanity(vmem_t *vm)
1751 const bt_t *bt, *bt2;
1755 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1756 if (bt->bt_start > BT_END(bt)) {
1757 printf("corrupted tag\n");
1758 bt_dump(bt, vmem_printf);
1762 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1763 if (bt->bt_type == BT_TYPE_CURSOR) {
1764 if (bt->bt_start != 0 || bt->bt_size != 0) {
1765 printf("corrupted cursor\n");
1770 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1774 if (bt2->bt_type == BT_TYPE_CURSOR) {
1777 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1780 if (bt->bt_start <= BT_END(bt2) &&
1781 bt2->bt_start <= BT_END(bt)) {
1782 printf("overwrapped tags\n");
1783 bt_dump(bt, vmem_printf);
1784 bt_dump(bt2, vmem_printf);
1794 vmem_check(vmem_t *vm)
1797 if (!vmem_check_sanity(vm)) {
1798 panic("insanity vmem %p", vm);
1802 #endif /* defined(DIAGNOSTIC) */