2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
5 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6 * Copyright (c) 2004-2006 Robert N. M. Watson
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * uma_core.c Implementation of the Universal Memory allocator
34 * This allocator is intended to replace the multitude of similar object caches
35 * in the standard FreeBSD kernel. The intent is to be flexible as well as
36 * efficient. A primary design goal is to return unused memory to the rest of
37 * the system. This will make the system as a whole more flexible due to the
38 * ability to move memory to subsystems which most need it instead of leaving
39 * pools of reserved memory unused.
41 * The basic ideas stem from similar slab/zone based allocators whose algorithms
48 * - Improve memory usage for large allocations
49 * - Investigate cache size adjustments
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
56 #include "opt_param.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/eventhandler.h>
63 #include <sys/kernel.h>
64 #include <sys/types.h>
65 #include <sys/limits.h>
66 #include <sys/queue.h>
67 #include <sys/malloc.h>
70 #include <sys/sysctl.h>
71 #include <sys/mutex.h>
73 #include <sys/random.h>
74 #include <sys/rwlock.h>
76 #include <sys/sched.h>
78 #include <sys/taskqueue.h>
79 #include <sys/vmmeter.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_phys.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_extern.h>
91 #include <vm/uma_int.h>
92 #include <vm/uma_dbg.h>
97 #include <vm/memguard.h>
101 * This is the zone and keg from which all zones are spawned.
103 static uma_zone_t kegs;
104 static uma_zone_t zones;
106 /* This is the zone from which all offpage uma_slab_ts are allocated. */
107 static uma_zone_t slabzone;
110 * The initial hash tables come out of this zone so they can be allocated
111 * prior to malloc coming up.
113 static uma_zone_t hashzone;
115 /* The boot-time adjusted value for cache line alignment. */
116 int uma_align_cache = 64 - 1;
118 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
121 * Are we allowed to allocate buckets?
123 static int bucketdisable = 1;
125 /* Linked list of all kegs in the system */
126 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
128 /* Linked list of all cache-only zones in the system */
129 static LIST_HEAD(,uma_zone) uma_cachezones =
130 LIST_HEAD_INITIALIZER(uma_cachezones);
132 /* This RW lock protects the keg list */
133 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
136 * Pointer and counter to pool of pages, that is preallocated at
137 * startup to bootstrap UMA.
139 static char *bootmem;
140 static int boot_pages;
142 static struct sx uma_drain_lock;
144 /* kmem soft limit. */
145 static unsigned long uma_kmem_limit = LONG_MAX;
146 static volatile unsigned long uma_kmem_total;
148 /* Is the VM done starting up? */
149 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
150 BOOT_RUNNING } booted = BOOT_COLD;
153 * This is the handle used to schedule events that need to happen
154 * outside of the allocation fast path.
156 static struct callout uma_callout;
157 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */
160 * This structure is passed as the zone ctor arg so that I don't have to create
161 * a special allocation function just for zones.
163 struct uma_zctor_args {
178 struct uma_kctor_args {
187 struct uma_bucket_zone {
190 int ubz_entries; /* Number of items it can hold. */
191 int ubz_maxsize; /* Maximum allocation size per-item. */
195 * Compute the actual number of bucket entries to pack them in power
196 * of two sizes for more efficient space utilization.
198 #define BUCKET_SIZE(n) \
199 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
201 #define BUCKET_MAX BUCKET_SIZE(256)
203 struct uma_bucket_zone bucket_zones[] = {
204 { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
205 { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
206 { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
207 { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
208 { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
209 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
210 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
211 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
212 { NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
217 * Flags and enumerations to be passed to internal functions.
219 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
221 #define UMA_ANYDOMAIN -1 /* Special value for domain search. */
225 int uma_startup_count(int);
226 void uma_startup(void *, int);
227 void uma_startup1(void);
228 void uma_startup2(void);
230 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
231 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
232 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
233 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
234 static void page_free(void *, vm_size_t, uint8_t);
235 static void pcpu_page_free(void *, vm_size_t, uint8_t);
236 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int);
237 static void cache_drain(uma_zone_t);
238 static void bucket_drain(uma_zone_t, uma_bucket_t);
239 static void bucket_cache_drain(uma_zone_t zone);
240 static int keg_ctor(void *, int, void *, int);
241 static void keg_dtor(void *, int, void *);
242 static int zone_ctor(void *, int, void *, int);
243 static void zone_dtor(void *, int, void *);
244 static int zero_init(void *, int, int);
245 static void keg_small_init(uma_keg_t keg);
246 static void keg_large_init(uma_keg_t keg);
247 static void zone_foreach(void (*zfunc)(uma_zone_t));
248 static void zone_timeout(uma_zone_t zone);
249 static int hash_alloc(struct uma_hash *);
250 static int hash_expand(struct uma_hash *, struct uma_hash *);
251 static void hash_free(struct uma_hash *hash);
252 static void uma_timeout(void *);
253 static void uma_startup3(void);
254 static void *zone_alloc_item(uma_zone_t, void *, int, int);
255 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
256 static void bucket_enable(void);
257 static void bucket_init(void);
258 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
259 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
260 static void bucket_zone_drain(void);
261 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
262 static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
263 static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int);
264 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
265 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
266 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
267 uma_fini fini, int align, uint32_t flags);
268 static int zone_import(uma_zone_t, void **, int, int, int);
269 static void zone_release(uma_zone_t, void **, int);
270 static void uma_zero_item(void *, uma_zone_t);
272 void uma_print_zone(uma_zone_t);
273 void uma_print_stats(void);
274 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
275 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
278 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
279 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
280 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
281 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
283 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
284 "Memory allocation debugging");
286 static u_int dbg_divisor = 1;
287 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
288 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
289 "Debug & thrash every this item in memory allocator");
291 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
292 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
293 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
294 &uma_dbg_cnt, "memory items debugged");
295 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
296 &uma_skip_cnt, "memory items skipped, not debugged");
299 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
301 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
302 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
304 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
305 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
307 static int zone_warnings = 1;
308 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
309 "Warn when UMA zones becomes full");
311 /* Adjust bytes under management by UMA. */
313 uma_total_dec(unsigned long size)
316 atomic_subtract_long(&uma_kmem_total, size);
320 uma_total_inc(unsigned long size)
323 if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
324 uma_reclaim_wakeup();
328 * This routine checks to see whether or not it's safe to enable buckets.
333 bucketdisable = vm_page_count_min();
337 * Initialize bucket_zones, the array of zones of buckets of various sizes.
339 * For each zone, calculate the memory required for each bucket, consisting
340 * of the header and an array of pointers.
345 struct uma_bucket_zone *ubz;
348 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
349 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
350 size += sizeof(void *) * ubz->ubz_entries;
351 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
352 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
353 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
358 * Given a desired number of entries for a bucket, return the zone from which
359 * to allocate the bucket.
361 static struct uma_bucket_zone *
362 bucket_zone_lookup(int entries)
364 struct uma_bucket_zone *ubz;
366 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
367 if (ubz->ubz_entries >= entries)
374 bucket_select(int size)
376 struct uma_bucket_zone *ubz;
378 ubz = &bucket_zones[0];
379 if (size > ubz->ubz_maxsize)
380 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
382 for (; ubz->ubz_entries != 0; ubz++)
383 if (ubz->ubz_maxsize < size)
386 return (ubz->ubz_entries);
390 bucket_alloc(uma_zone_t zone, void *udata, int flags)
392 struct uma_bucket_zone *ubz;
396 * This is to stop us from allocating per cpu buckets while we're
397 * running out of vm.boot_pages. Otherwise, we would exhaust the
398 * boot pages. This also prevents us from allocating buckets in
399 * low memory situations.
404 * To limit bucket recursion we store the original zone flags
405 * in a cookie passed via zalloc_arg/zfree_arg. This allows the
406 * NOVM flag to persist even through deep recursions. We also
407 * store ZFLAG_BUCKET once we have recursed attempting to allocate
408 * a bucket for a bucket zone so we do not allow infinite bucket
409 * recursion. This cookie will even persist to frees of unused
410 * buckets via the allocation path or bucket allocations in the
413 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
414 udata = (void *)(uintptr_t)zone->uz_flags;
416 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
418 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
420 if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
422 ubz = bucket_zone_lookup(zone->uz_count);
423 if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
425 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
428 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
431 bucket->ub_entries = ubz->ubz_entries;
438 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
440 struct uma_bucket_zone *ubz;
442 KASSERT(bucket->ub_cnt == 0,
443 ("bucket_free: Freeing a non free bucket."));
444 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
445 udata = (void *)(uintptr_t)zone->uz_flags;
446 ubz = bucket_zone_lookup(bucket->ub_entries);
447 uma_zfree_arg(ubz->ubz_zone, bucket, udata);
451 bucket_zone_drain(void)
453 struct uma_bucket_zone *ubz;
455 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
456 zone_drain(ubz->ubz_zone);
460 zone_log_warning(uma_zone_t zone)
462 static const struct timeval warninterval = { 300, 0 };
464 if (!zone_warnings || zone->uz_warning == NULL)
467 if (ratecheck(&zone->uz_ratecheck, &warninterval))
468 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
472 zone_maxaction(uma_zone_t zone)
475 if (zone->uz_maxaction.ta_func != NULL)
476 taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
480 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
484 LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
485 kegfn(klink->kl_keg);
489 * Routine called by timeout which is used to fire off some time interval
490 * based calculations. (stats, hash size, etc.)
499 uma_timeout(void *unused)
502 zone_foreach(zone_timeout);
504 /* Reschedule this event */
505 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
509 * Routine to perform timeout driven calculations. This expands the
510 * hashes and does per cpu statistics aggregation.
515 keg_timeout(uma_keg_t keg)
520 * Expand the keg hash table.
522 * This is done if the number of slabs is larger than the hash size.
523 * What I'm trying to do here is completely reduce collisions. This
524 * may be a little aggressive. Should I allow for two collisions max?
526 if (keg->uk_flags & UMA_ZONE_HASH &&
527 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
528 struct uma_hash newhash;
529 struct uma_hash oldhash;
533 * This is so involved because allocating and freeing
534 * while the keg lock is held will lead to deadlock.
535 * I have to do everything in stages and check for
538 newhash = keg->uk_hash;
540 ret = hash_alloc(&newhash);
543 if (hash_expand(&keg->uk_hash, &newhash)) {
544 oldhash = keg->uk_hash;
545 keg->uk_hash = newhash;
558 zone_timeout(uma_zone_t zone)
561 zone_foreach_keg(zone, &keg_timeout);
565 * Allocate and zero fill the next sized hash table from the appropriate
569 * hash A new hash structure with the old hash size in uh_hashsize
572 * 1 on success and 0 on failure.
575 hash_alloc(struct uma_hash *hash)
580 oldsize = hash->uh_hashsize;
582 /* We're just going to go to a power of two greater */
584 hash->uh_hashsize = oldsize * 2;
585 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
586 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
587 M_UMAHASH, M_NOWAIT);
589 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
590 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
591 UMA_ANYDOMAIN, M_WAITOK);
592 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
594 if (hash->uh_slab_hash) {
595 bzero(hash->uh_slab_hash, alloc);
596 hash->uh_hashmask = hash->uh_hashsize - 1;
604 * Expands the hash table for HASH zones. This is done from zone_timeout
605 * to reduce collisions. This must not be done in the regular allocation
606 * path, otherwise, we can recurse on the vm while allocating pages.
609 * oldhash The hash you want to expand
610 * newhash The hash structure for the new table
618 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
624 if (!newhash->uh_slab_hash)
627 if (oldhash->uh_hashsize >= newhash->uh_hashsize)
631 * I need to investigate hash algorithms for resizing without a
635 for (i = 0; i < oldhash->uh_hashsize; i++)
636 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
637 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
638 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
639 hval = UMA_HASH(newhash, slab->us_data);
640 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
648 * Free the hash bucket to the appropriate backing store.
651 * slab_hash The hash bucket we're freeing
652 * hashsize The number of entries in that hash bucket
658 hash_free(struct uma_hash *hash)
660 if (hash->uh_slab_hash == NULL)
662 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
663 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
665 free(hash->uh_slab_hash, M_UMAHASH);
669 * Frees all outstanding items in a bucket
672 * zone The zone to free to, must be unlocked.
673 * bucket The free/alloc bucket with items, cpu queue must be locked.
680 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
688 for (i = 0; i < bucket->ub_cnt; i++)
689 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
690 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
695 * Drains the per cpu caches for a zone.
697 * NOTE: This may only be called while the zone is being turn down, and not
698 * during normal operation. This is necessary in order that we do not have
699 * to migrate CPUs to drain the per-CPU caches.
702 * zone The zone to drain, must be unlocked.
708 cache_drain(uma_zone_t zone)
714 * XXX: It is safe to not lock the per-CPU caches, because we're
715 * tearing down the zone anyway. I.e., there will be no further use
716 * of the caches at this point.
718 * XXX: It would good to be able to assert that the zone is being
719 * torn down to prevent improper use of cache_drain().
721 * XXX: We lock the zone before passing into bucket_cache_drain() as
722 * it is used elsewhere. Should the tear-down path be made special
723 * there in some form?
726 cache = &zone->uz_cpu[cpu];
727 bucket_drain(zone, cache->uc_allocbucket);
728 bucket_drain(zone, cache->uc_freebucket);
729 if (cache->uc_allocbucket != NULL)
730 bucket_free(zone, cache->uc_allocbucket, NULL);
731 if (cache->uc_freebucket != NULL)
732 bucket_free(zone, cache->uc_freebucket, NULL);
733 cache->uc_allocbucket = cache->uc_freebucket = NULL;
736 bucket_cache_drain(zone);
741 cache_shrink(uma_zone_t zone)
744 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
748 zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
753 cache_drain_safe_cpu(uma_zone_t zone)
759 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
765 if (zone->uz_flags & UMA_ZONE_NUMA)
766 domain = PCPU_GET(domain);
769 cache = &zone->uz_cpu[curcpu];
770 if (cache->uc_allocbucket) {
771 if (cache->uc_allocbucket->ub_cnt != 0)
772 LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets,
773 cache->uc_allocbucket, ub_link);
775 b1 = cache->uc_allocbucket;
776 cache->uc_allocbucket = NULL;
778 if (cache->uc_freebucket) {
779 if (cache->uc_freebucket->ub_cnt != 0)
780 LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets,
781 cache->uc_freebucket, ub_link);
783 b2 = cache->uc_freebucket;
784 cache->uc_freebucket = NULL;
789 bucket_free(zone, b1, NULL);
791 bucket_free(zone, b2, NULL);
795 * Safely drain per-CPU caches of a zone(s) to alloc bucket.
796 * This is an expensive call because it needs to bind to all CPUs
797 * one by one and enter a critical section on each of them in order
798 * to safely access their cache buckets.
799 * Zone lock must not be held on call this function.
802 cache_drain_safe(uma_zone_t zone)
807 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
812 zone_foreach(cache_shrink);
815 thread_lock(curthread);
816 sched_bind(curthread, cpu);
817 thread_unlock(curthread);
820 cache_drain_safe_cpu(zone);
822 zone_foreach(cache_drain_safe_cpu);
824 thread_lock(curthread);
825 sched_unbind(curthread);
826 thread_unlock(curthread);
830 * Drain the cached buckets from a zone. Expects a locked zone on entry.
833 bucket_cache_drain(uma_zone_t zone)
835 uma_zone_domain_t zdom;
840 * Drain the bucket queues and free the buckets.
842 for (i = 0; i < vm_ndomains; i++) {
843 zdom = &zone->uz_domain[i];
844 while ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
845 LIST_REMOVE(bucket, ub_link);
847 bucket_drain(zone, bucket);
848 bucket_free(zone, bucket, NULL);
854 * Shrink further bucket sizes. Price of single zone lock collision
855 * is probably lower then price of global cache drain.
857 if (zone->uz_count > zone->uz_count_min)
862 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
868 CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
869 keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
872 flags = slab->us_flags;
874 if (keg->uk_fini != NULL) {
875 for (i--; i > -1; i--)
878 * trash_fini implies that dtor was trash_dtor. trash_fini
879 * would check that memory hasn't been modified since free,
880 * which executed trash_dtor.
881 * That's why we need to run uma_dbg_kskip() check here,
882 * albeit we don't make skip check for other init/fini
885 if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
886 keg->uk_fini != trash_fini)
888 keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
891 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
892 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
893 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
894 uma_total_dec(PAGE_SIZE * keg->uk_ppera);
898 * Frees pages from a keg back to the system. This is done on demand from
899 * the pageout daemon.
904 keg_drain(uma_keg_t keg)
906 struct slabhead freeslabs = { 0 };
908 uma_slab_t slab, tmp;
912 * We don't want to take pages from statically allocated kegs at this
915 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
918 CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
919 keg->uk_name, keg, keg->uk_free);
921 if (keg->uk_free == 0)
924 for (i = 0; i < vm_ndomains; i++) {
925 dom = &keg->uk_domain[i];
926 LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
927 /* We have nowhere to free these to. */
928 if (slab->us_flags & UMA_SLAB_BOOT)
931 LIST_REMOVE(slab, us_link);
932 keg->uk_pages -= keg->uk_ppera;
933 keg->uk_free -= keg->uk_ipers;
935 if (keg->uk_flags & UMA_ZONE_HASH)
936 UMA_HASH_REMOVE(&keg->uk_hash, slab,
939 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
946 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
947 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
948 keg_free_slab(keg, slab, keg->uk_ipers);
953 zone_drain_wait(uma_zone_t zone, int waitok)
957 * Set draining to interlock with zone_dtor() so we can release our
958 * locks as we go. Only dtor() should do a WAITOK call since it
959 * is the only call that knows the structure will still be available
963 while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
964 if (waitok == M_NOWAIT)
966 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
968 zone->uz_flags |= UMA_ZFLAG_DRAINING;
969 bucket_cache_drain(zone);
972 * The DRAINING flag protects us from being freed while
973 * we're running. Normally the uma_rwlock would protect us but we
974 * must be able to release and acquire the right lock for each keg.
976 zone_foreach_keg(zone, &keg_drain);
978 zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
985 zone_drain(uma_zone_t zone)
988 zone_drain_wait(zone, M_NOWAIT);
992 * Allocate a new slab for a keg. This does not insert the slab onto a list.
995 * wait Shall we wait?
998 * The slab that was allocated or NULL if there is no memory and the
999 * caller specified M_NOWAIT.
1002 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait)
1011 KASSERT(domain >= 0 && domain < vm_ndomains,
1012 ("keg_alloc_slab: domain %d out of range", domain));
1013 mtx_assert(&keg->uk_lock, MA_OWNED);
1017 allocf = keg->uk_allocf;
1019 size = keg->uk_ppera * PAGE_SIZE;
1021 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1022 slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait);
1028 * This reproduces the old vm_zone behavior of zero filling pages the
1029 * first time they are added to a zone.
1031 * Malloced items are zeroed in uma_zalloc.
1034 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1039 if (keg->uk_flags & UMA_ZONE_NODUMP)
1042 /* zone is passed for legacy reasons. */
1043 mem = allocf(zone, size, domain, &flags, wait);
1045 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1046 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1050 uma_total_inc(size);
1052 /* Point the slab into the allocated memory */
1053 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1054 slab = (uma_slab_t )(mem + keg->uk_pgoff);
1056 if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1057 for (i = 0; i < keg->uk_ppera; i++)
1058 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
1061 slab->us_data = mem;
1062 slab->us_freecount = keg->uk_ipers;
1063 slab->us_flags = flags;
1064 slab->us_domain = domain;
1065 BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1067 BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1070 if (keg->uk_init != NULL) {
1071 for (i = 0; i < keg->uk_ipers; i++)
1072 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1073 keg->uk_size, wait) != 0)
1075 if (i != keg->uk_ipers) {
1076 keg_free_slab(keg, slab, i);
1084 CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1085 slab, keg->uk_name, keg);
1088 if (keg->uk_flags & UMA_ZONE_HASH)
1089 UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1091 keg->uk_pages += keg->uk_ppera;
1092 keg->uk_free += keg->uk_ipers;
1099 * This function is intended to be used early on in place of page_alloc() so
1100 * that we may use the boot time page cache to satisfy allocations before
1104 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1111 keg = zone_first_keg(zone);
1114 * If we are in BOOT_BUCKETS or higher, than switch to real
1115 * allocator. Zones with page sized slabs switch at BOOT_PAGEALLOC.
1121 case BOOT_PAGEALLOC:
1122 if (keg->uk_ppera > 1)
1126 #ifdef UMA_MD_SMALL_ALLOC
1127 keg->uk_allocf = (keg->uk_ppera > 1) ?
1128 page_alloc : uma_small_alloc;
1130 keg->uk_allocf = page_alloc;
1132 return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1136 * Check our small startup cache to see if it has pages remaining.
1138 pages = howmany(bytes, PAGE_SIZE);
1139 KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1140 if (pages > boot_pages)
1141 panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1143 printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1147 boot_pages -= pages;
1148 bootmem += pages * PAGE_SIZE;
1149 *pflag = UMA_SLAB_BOOT;
1155 * Allocates a number of pages from the system
1158 * bytes The number of bytes requested
1159 * wait Shall we wait?
1162 * A pointer to the alloced memory or possibly
1163 * NULL if M_NOWAIT is set.
1166 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1169 void *p; /* Returned page */
1171 *pflag = UMA_SLAB_KERNEL;
1172 p = (void *) kmem_malloc_domain(kernel_arena, domain, bytes, wait);
1178 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1181 struct pglist alloctail;
1182 vm_offset_t addr, zkva;
1184 vm_page_t p, p_next;
1189 MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1191 TAILQ_INIT(&alloctail);
1192 flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1193 malloc2vm_flags(wait);
1194 *pflag = UMA_SLAB_KERNEL;
1195 for (cpu = 0; cpu <= mp_maxid; cpu++) {
1196 if (CPU_ABSENT(cpu)) {
1197 p = vm_page_alloc(NULL, 0, flags);
1200 p = vm_page_alloc(NULL, 0, flags);
1202 pc = pcpu_find(cpu);
1203 p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1204 if (__predict_false(p == NULL))
1205 p = vm_page_alloc(NULL, 0, flags);
1208 if (__predict_false(p == NULL))
1210 TAILQ_INSERT_TAIL(&alloctail, p, listq);
1212 if ((addr = kva_alloc(bytes)) == 0)
1215 TAILQ_FOREACH(p, &alloctail, listq) {
1216 pmap_qenter(zkva, &p, 1);
1219 return ((void*)addr);
1221 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1222 vm_page_unwire(p, PQ_NONE);
1229 * Allocates a number of pages from within an object
1232 * bytes The number of bytes requested
1233 * wait Shall we wait?
1236 * A pointer to the alloced memory or possibly
1237 * NULL if M_NOWAIT is set.
1240 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1243 TAILQ_HEAD(, vm_page) alloctail;
1245 vm_offset_t retkva, zkva;
1246 vm_page_t p, p_next;
1249 TAILQ_INIT(&alloctail);
1250 keg = zone_first_keg(zone);
1252 npages = howmany(bytes, PAGE_SIZE);
1253 while (npages > 0) {
1254 p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1255 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1256 ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1260 * Since the page does not belong to an object, its
1263 TAILQ_INSERT_TAIL(&alloctail, p, listq);
1268 * Page allocation failed, free intermediate pages and
1271 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1272 vm_page_unwire(p, PQ_NONE);
1277 *flags = UMA_SLAB_PRIV;
1278 zkva = keg->uk_kva +
1279 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1281 TAILQ_FOREACH(p, &alloctail, listq) {
1282 pmap_qenter(zkva, &p, 1);
1286 return ((void *)retkva);
1290 * Frees a number of pages to the system
1293 * mem A pointer to the memory to be freed
1294 * size The size of the memory being freed
1295 * flags The original p->us_flags field
1301 page_free(void *mem, vm_size_t size, uint8_t flags)
1305 if (flags & UMA_SLAB_KERNEL)
1306 vmem = kernel_arena;
1308 panic("UMA: page_free used with invalid flags %x", flags);
1310 kmem_free(vmem, (vm_offset_t)mem, size);
1314 * Frees pcpu zone allocations
1317 * mem A pointer to the memory to be freed
1318 * size The size of the memory being freed
1319 * flags The original p->us_flags field
1325 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1327 vm_offset_t sva, curva;
1331 MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1332 sva = (vm_offset_t)mem;
1333 for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1334 paddr = pmap_kextract(curva);
1335 m = PHYS_TO_VM_PAGE(paddr);
1336 vm_page_unwire(m, PQ_NONE);
1339 pmap_qremove(sva, size >> PAGE_SHIFT);
1340 kva_free(sva, size);
1345 * Zero fill initializer
1347 * Arguments/Returns follow uma_init specifications
1350 zero_init(void *mem, int size, int flags)
1357 * Finish creating a small uma keg. This calculates ipers, and the keg size.
1360 * keg The zone we should initialize
1366 keg_small_init(uma_keg_t keg)
1374 if (keg->uk_flags & UMA_ZONE_PCPU) {
1375 u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1377 slabsize = UMA_PCPU_ALLOC_SIZE;
1378 keg->uk_ppera = ncpus;
1380 slabsize = UMA_SLAB_SIZE;
1385 * Calculate the size of each allocation (rsize) according to
1386 * alignment. If the requested size is smaller than we have
1387 * allocation bits for we round it up.
1389 rsize = keg->uk_size;
1390 if (rsize < slabsize / SLAB_SETSIZE)
1391 rsize = slabsize / SLAB_SETSIZE;
1392 if (rsize & keg->uk_align)
1393 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1394 keg->uk_rsize = rsize;
1396 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1397 keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
1398 ("%s: size %u too large", __func__, keg->uk_rsize));
1400 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1403 shsize = sizeof(struct uma_slab);
1405 if (rsize <= slabsize - shsize)
1406 keg->uk_ipers = (slabsize - shsize) / rsize;
1408 /* Handle special case when we have 1 item per slab, so
1409 * alignment requirement can be relaxed. */
1410 KASSERT(keg->uk_size <= slabsize - shsize,
1411 ("%s: size %u greater than slab", __func__, keg->uk_size));
1414 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1415 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1417 memused = keg->uk_ipers * rsize + shsize;
1418 wastedspace = slabsize - memused;
1421 * We can't do OFFPAGE if we're internal or if we've been
1422 * asked to not go to the VM for buckets. If we do this we
1423 * may end up going to the VM for slabs which we do not
1424 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1425 * of UMA_ZONE_VM, which clearly forbids it.
1427 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1428 (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1432 * See if using an OFFPAGE slab will limit our waste. Only do
1433 * this if it permits more items per-slab.
1435 * XXX We could try growing slabsize to limit max waste as well.
1436 * Historically this was not done because the VM could not
1437 * efficiently handle contiguous allocations.
1439 if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1440 (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1441 keg->uk_ipers = slabsize / keg->uk_rsize;
1442 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1443 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1444 CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1445 "keg: %s(%p), calculated wastedspace = %d, "
1446 "maximum wasted space allowed = %d, "
1447 "calculated ipers = %d, "
1448 "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1449 slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1450 slabsize - keg->uk_ipers * keg->uk_rsize);
1451 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1454 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1455 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1456 keg->uk_flags |= UMA_ZONE_HASH;
1460 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do
1461 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
1465 * keg The keg we should initialize
1471 keg_large_init(uma_keg_t keg)
1475 KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1476 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1477 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1478 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1479 ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1481 keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1483 keg->uk_rsize = keg->uk_size;
1485 /* Check whether we have enough space to not do OFFPAGE. */
1486 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1487 shsize = sizeof(struct uma_slab);
1488 if (shsize & UMA_ALIGN_PTR)
1489 shsize = (shsize & ~UMA_ALIGN_PTR) +
1490 (UMA_ALIGN_PTR + 1);
1492 if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) {
1494 * We can't do OFFPAGE if we're internal, in which case
1495 * we need an extra page per allocation to contain the
1498 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1499 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1505 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1506 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1507 keg->uk_flags |= UMA_ZONE_HASH;
1511 keg_cachespread_init(uma_keg_t keg)
1518 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1519 ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1521 alignsize = keg->uk_align + 1;
1522 rsize = keg->uk_size;
1524 * We want one item to start on every align boundary in a page. To
1525 * do this we will span pages. We will also extend the item by the
1526 * size of align if it is an even multiple of align. Otherwise, it
1527 * would fall on the same boundary every time.
1529 if (rsize & keg->uk_align)
1530 rsize = (rsize & ~keg->uk_align) + alignsize;
1531 if ((rsize & alignsize) == 0)
1533 trailer = rsize - keg->uk_size;
1534 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1535 pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1536 keg->uk_rsize = rsize;
1537 keg->uk_ppera = pages;
1538 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1539 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1540 KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1541 ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1546 * Keg header ctor. This initializes all fields, locks, etc. And inserts
1547 * the keg onto the global keg list.
1549 * Arguments/Returns follow uma_ctor specifications
1550 * udata Actually uma_kctor_args
1553 keg_ctor(void *mem, int size, void *udata, int flags)
1555 struct uma_kctor_args *arg = udata;
1556 uma_keg_t keg = mem;
1560 keg->uk_size = arg->size;
1561 keg->uk_init = arg->uminit;
1562 keg->uk_fini = arg->fini;
1563 keg->uk_align = arg->align;
1566 keg->uk_reserve = 0;
1568 keg->uk_flags = arg->flags;
1569 keg->uk_slabzone = NULL;
1572 * The master zone is passed to us at keg-creation time.
1575 keg->uk_name = zone->uz_name;
1577 if (arg->flags & UMA_ZONE_VM)
1578 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1580 if (arg->flags & UMA_ZONE_ZINIT)
1581 keg->uk_init = zero_init;
1583 if (arg->flags & UMA_ZONE_MALLOC)
1584 keg->uk_flags |= UMA_ZONE_VTOSLAB;
1586 if (arg->flags & UMA_ZONE_PCPU)
1588 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1590 keg->uk_flags &= ~UMA_ZONE_PCPU;
1593 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1594 keg_cachespread_init(keg);
1596 if (keg->uk_size > UMA_SLAB_SPACE)
1597 keg_large_init(keg);
1599 keg_small_init(keg);
1602 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1603 keg->uk_slabzone = slabzone;
1606 * If we haven't booted yet we need allocations to go through the
1607 * startup cache until the vm is ready.
1609 if (booted < BOOT_PAGEALLOC)
1610 keg->uk_allocf = startup_alloc;
1611 #ifdef UMA_MD_SMALL_ALLOC
1612 else if (keg->uk_ppera == 1)
1613 keg->uk_allocf = uma_small_alloc;
1615 else if (keg->uk_flags & UMA_ZONE_PCPU)
1616 keg->uk_allocf = pcpu_page_alloc;
1618 keg->uk_allocf = page_alloc;
1619 #ifdef UMA_MD_SMALL_ALLOC
1620 if (keg->uk_ppera == 1)
1621 keg->uk_freef = uma_small_free;
1624 if (keg->uk_flags & UMA_ZONE_PCPU)
1625 keg->uk_freef = pcpu_page_free;
1627 keg->uk_freef = page_free;
1630 * Initialize keg's lock
1632 KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1635 * If we're putting the slab header in the actual page we need to
1636 * figure out where in each page it goes. This calculates a right
1637 * justified offset into the memory on an ALIGN_PTR boundary.
1639 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1642 /* Size of the slab struct and free list */
1643 totsize = sizeof(struct uma_slab);
1645 if (totsize & UMA_ALIGN_PTR)
1646 totsize = (totsize & ~UMA_ALIGN_PTR) +
1647 (UMA_ALIGN_PTR + 1);
1648 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1651 * The only way the following is possible is if with our
1652 * UMA_ALIGN_PTR adjustments we are now bigger than
1653 * UMA_SLAB_SIZE. I haven't checked whether this is
1654 * mathematically possible for all cases, so we make
1657 totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1658 if (totsize > PAGE_SIZE * keg->uk_ppera) {
1659 printf("zone %s ipers %d rsize %d size %d\n",
1660 zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1662 panic("UMA slab won't fit.");
1666 if (keg->uk_flags & UMA_ZONE_HASH)
1667 hash_alloc(&keg->uk_hash);
1669 CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1670 keg, zone->uz_name, zone,
1671 (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1674 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1676 rw_wlock(&uma_rwlock);
1677 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1678 rw_wunlock(&uma_rwlock);
1683 * Zone header ctor. This initializes all fields, locks, etc.
1685 * Arguments/Returns follow uma_ctor specifications
1686 * udata Actually uma_zctor_args
1689 zone_ctor(void *mem, int size, void *udata, int flags)
1691 struct uma_zctor_args *arg = udata;
1692 uma_zone_t zone = mem;
1697 zone->uz_name = arg->name;
1698 zone->uz_ctor = arg->ctor;
1699 zone->uz_dtor = arg->dtor;
1700 zone->uz_slab = zone_fetch_slab;
1701 zone->uz_init = NULL;
1702 zone->uz_fini = NULL;
1703 zone->uz_allocs = 0;
1706 zone->uz_sleeps = 0;
1708 zone->uz_count_min = 0;
1710 zone->uz_warning = NULL;
1711 /* The domain structures follow the cpu structures. */
1712 zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
1713 timevalclear(&zone->uz_ratecheck);
1716 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1719 * This is a pure cache zone, no kegs.
1722 if (arg->flags & UMA_ZONE_VM)
1723 arg->flags |= UMA_ZFLAG_CACHEONLY;
1724 zone->uz_flags = arg->flags;
1725 zone->uz_size = arg->size;
1726 zone->uz_import = arg->import;
1727 zone->uz_release = arg->release;
1728 zone->uz_arg = arg->arg;
1729 zone->uz_lockptr = &zone->uz_lock;
1730 rw_wlock(&uma_rwlock);
1731 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1732 rw_wunlock(&uma_rwlock);
1737 * Use the regular zone/keg/slab allocator.
1739 zone->uz_import = (uma_import)zone_import;
1740 zone->uz_release = (uma_release)zone_release;
1741 zone->uz_arg = zone;
1743 if (arg->flags & UMA_ZONE_SECONDARY) {
1744 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1745 zone->uz_init = arg->uminit;
1746 zone->uz_fini = arg->fini;
1747 zone->uz_lockptr = &keg->uk_lock;
1748 zone->uz_flags |= UMA_ZONE_SECONDARY;
1749 rw_wlock(&uma_rwlock);
1751 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1752 if (LIST_NEXT(z, uz_link) == NULL) {
1753 LIST_INSERT_AFTER(z, zone, uz_link);
1758 rw_wunlock(&uma_rwlock);
1759 } else if (keg == NULL) {
1760 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1761 arg->align, arg->flags)) == NULL)
1764 struct uma_kctor_args karg;
1767 /* We should only be here from uma_startup() */
1768 karg.size = arg->size;
1769 karg.uminit = arg->uminit;
1770 karg.fini = arg->fini;
1771 karg.align = arg->align;
1772 karg.flags = arg->flags;
1774 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1781 * Link in the first keg.
1783 zone->uz_klink.kl_keg = keg;
1784 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1785 zone->uz_lockptr = &keg->uk_lock;
1786 zone->uz_size = keg->uk_size;
1787 zone->uz_flags |= (keg->uk_flags &
1788 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1791 * Some internal zones don't have room allocated for the per cpu
1792 * caches. If we're internal, bail out here.
1794 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1795 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1796 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1801 KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
1802 (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
1803 ("Invalid zone flag combination"));
1804 if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
1805 zone->uz_count = BUCKET_MAX;
1806 else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
1809 zone->uz_count = bucket_select(zone->uz_size);
1810 zone->uz_count_min = zone->uz_count;
1816 * Keg header dtor. This frees all data, destroys locks, frees the hash
1817 * table and removes the keg from the global list.
1819 * Arguments/Returns follow uma_dtor specifications
1823 keg_dtor(void *arg, int size, void *udata)
1827 keg = (uma_keg_t)arg;
1829 if (keg->uk_free != 0) {
1830 printf("Freed UMA keg (%s) was not empty (%d items). "
1831 " Lost %d pages of memory.\n",
1832 keg->uk_name ? keg->uk_name : "",
1833 keg->uk_free, keg->uk_pages);
1837 hash_free(&keg->uk_hash);
1845 * Arguments/Returns follow uma_dtor specifications
1849 zone_dtor(void *arg, int size, void *udata)
1855 zone = (uma_zone_t)arg;
1856 keg = zone_first_keg(zone);
1858 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1861 rw_wlock(&uma_rwlock);
1862 LIST_REMOVE(zone, uz_link);
1863 rw_wunlock(&uma_rwlock);
1865 * XXX there are some races here where
1866 * the zone can be drained but zone lock
1867 * released and then refilled before we
1868 * remove it... we dont care for now
1870 zone_drain_wait(zone, M_WAITOK);
1872 * Unlink all of our kegs.
1874 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1875 klink->kl_keg = NULL;
1876 LIST_REMOVE(klink, kl_link);
1877 if (klink == &zone->uz_klink)
1879 free(klink, M_TEMP);
1882 * We only destroy kegs from non secondary zones.
1884 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
1885 rw_wlock(&uma_rwlock);
1886 LIST_REMOVE(keg, uk_link);
1887 rw_wunlock(&uma_rwlock);
1888 zone_free_item(kegs, keg, NULL, SKIP_NONE);
1890 ZONE_LOCK_FINI(zone);
1894 * Traverses every zone in the system and calls a callback
1897 * zfunc A pointer to a function which accepts a zone
1904 zone_foreach(void (*zfunc)(uma_zone_t))
1909 rw_rlock(&uma_rwlock);
1910 LIST_FOREACH(keg, &uma_kegs, uk_link) {
1911 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1914 rw_runlock(&uma_rwlock);
1918 * Count how many pages do we need to bootstrap. VM supplies
1919 * its need in early zones in the argument, we add up our zones,
1920 * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
1921 * zone of zones and zone of kegs are accounted separately.
1923 #define UMA_BOOT_ZONES 11
1924 /* Zone of zones and zone of kegs have arbitrary alignment. */
1925 #define UMA_BOOT_ALIGN 32
1926 static int zsize, ksize;
1928 uma_startup_count(int vm_zones)
1932 ksize = sizeof(struct uma_keg) +
1933 (sizeof(struct uma_domain) * vm_ndomains);
1934 zsize = sizeof(struct uma_zone) +
1935 (sizeof(struct uma_cache) * (mp_maxid + 1)) +
1936 (sizeof(struct uma_zone_domain) * vm_ndomains);
1939 * Memory for the zone of kegs and its keg,
1940 * and for zone of zones.
1942 pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
1943 roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
1945 #ifdef UMA_MD_SMALL_ALLOC
1946 zones = UMA_BOOT_ZONES;
1948 zones = UMA_BOOT_ZONES + vm_zones;
1952 /* Memory for the rest of startup zones, UMA and VM, ... */
1953 if (zsize > UMA_SLAB_SPACE)
1954 pages += (zones + vm_zones) *
1955 howmany(roundup2(zsize, UMA_BOOT_ALIGN), UMA_SLAB_SIZE);
1956 else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE)
1959 pages += howmany(zones,
1960 UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN));
1962 /* ... and their kegs. Note that zone of zones allocates a keg! */
1963 pages += howmany(zones + 1,
1964 UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN));
1967 * Most of startup zones are not going to be offpages, that's
1968 * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
1969 * calculations. Some large bucket zones will be offpage, and
1970 * thus will allocate hashes. We take conservative approach
1971 * and assume that all zones may allocate hash. This may give
1972 * us some positive inaccuracy, usually an extra single page.
1974 pages += howmany(zones, UMA_SLAB_SPACE /
1975 (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
1981 uma_startup(void *mem, int npages)
1983 struct uma_zctor_args args;
1984 uma_keg_t masterkeg;
1988 printf("Entering %s with %d boot pages configured\n", __func__, npages);
1991 rw_init(&uma_rwlock, "UMA lock");
1993 /* Use bootpages memory for the zone of zones and zone of kegs. */
1995 zones = (uma_zone_t)m;
1996 m += roundup(zsize, CACHE_LINE_SIZE);
1997 kegs = (uma_zone_t)m;
1998 m += roundup(zsize, CACHE_LINE_SIZE);
1999 masterkeg = (uma_keg_t)m;
2000 m += roundup(ksize, CACHE_LINE_SIZE);
2001 m = roundup(m, PAGE_SIZE);
2002 npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2005 /* "manually" create the initial zone */
2006 memset(&args, 0, sizeof(args));
2007 args.name = "UMA Kegs";
2009 args.ctor = keg_ctor;
2010 args.dtor = keg_dtor;
2011 args.uminit = zero_init;
2013 args.keg = masterkeg;
2014 args.align = UMA_BOOT_ALIGN - 1;
2015 args.flags = UMA_ZFLAG_INTERNAL;
2016 zone_ctor(kegs, zsize, &args, M_WAITOK);
2019 boot_pages = npages;
2021 args.name = "UMA Zones";
2023 args.ctor = zone_ctor;
2024 args.dtor = zone_dtor;
2025 args.uminit = zero_init;
2028 args.align = UMA_BOOT_ALIGN - 1;
2029 args.flags = UMA_ZFLAG_INTERNAL;
2030 zone_ctor(zones, zsize, &args, M_WAITOK);
2032 /* Now make a zone for slab headers */
2033 slabzone = uma_zcreate("UMA Slabs",
2034 sizeof(struct uma_slab),
2035 NULL, NULL, NULL, NULL,
2036 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2038 hashzone = uma_zcreate("UMA Hash",
2039 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2040 NULL, NULL, NULL, NULL,
2041 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2045 booted = BOOT_STRAPPED;
2053 printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2055 booted = BOOT_PAGEALLOC;
2063 printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2065 booted = BOOT_BUCKETS;
2066 sx_init(&uma_drain_lock, "umadrain");
2071 * Initialize our callout handle
2079 TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2080 uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2081 uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2083 callout_init(&uma_callout, 1);
2084 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2085 booted = BOOT_RUNNING;
2089 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2090 int align, uint32_t flags)
2092 struct uma_kctor_args args;
2095 args.uminit = uminit;
2097 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2100 return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2103 /* Public functions */
2106 uma_set_align(int align)
2109 if (align != UMA_ALIGN_CACHE)
2110 uma_align_cache = align;
2115 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2116 uma_init uminit, uma_fini fini, int align, uint32_t flags)
2119 struct uma_zctor_args args;
2123 KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2126 /* This stuff is essential for the zone ctor */
2127 memset(&args, 0, sizeof(args));
2132 args.uminit = uminit;
2136 * If a zone is being created with an empty constructor and
2137 * destructor, pass UMA constructor/destructor which checks for
2138 * memory use after free.
2140 if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
2141 ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
2142 args.ctor = trash_ctor;
2143 args.dtor = trash_dtor;
2144 args.uminit = trash_init;
2145 args.fini = trash_fini;
2152 if (booted < BOOT_BUCKETS) {
2155 sx_slock(&uma_drain_lock);
2158 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2160 sx_sunlock(&uma_drain_lock);
2166 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2167 uma_init zinit, uma_fini zfini, uma_zone_t master)
2169 struct uma_zctor_args args;
2174 keg = zone_first_keg(master);
2175 memset(&args, 0, sizeof(args));
2177 args.size = keg->uk_size;
2180 args.uminit = zinit;
2182 args.align = keg->uk_align;
2183 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2186 if (booted < BOOT_BUCKETS) {
2189 sx_slock(&uma_drain_lock);
2192 /* XXX Attaches only one keg of potentially many. */
2193 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2195 sx_sunlock(&uma_drain_lock);
2201 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2202 uma_init zinit, uma_fini zfini, uma_import zimport,
2203 uma_release zrelease, void *arg, int flags)
2205 struct uma_zctor_args args;
2207 memset(&args, 0, sizeof(args));
2212 args.uminit = zinit;
2214 args.import = zimport;
2215 args.release = zrelease;
2220 return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2224 zone_lock_pair(uma_zone_t a, uma_zone_t b)
2228 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
2231 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
2236 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
2244 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
2251 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
2253 zone_lock_pair(zone, master);
2255 * zone must use vtoslab() to resolve objects and must already be
2258 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
2259 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
2264 * The new master must also use vtoslab().
2266 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2272 * The underlying object must be the same size. rsize
2275 if (master->uz_size != zone->uz_size) {
2280 * Put it at the end of the list.
2282 klink->kl_keg = zone_first_keg(master);
2283 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2284 if (LIST_NEXT(kl, kl_link) == NULL) {
2285 LIST_INSERT_AFTER(kl, klink, kl_link);
2290 zone->uz_flags |= UMA_ZFLAG_MULTI;
2291 zone->uz_slab = zone_fetch_slab_multi;
2294 zone_unlock_pair(zone, master);
2296 free(klink, M_TEMP);
2304 uma_zdestroy(uma_zone_t zone)
2307 sx_slock(&uma_drain_lock);
2308 zone_free_item(zones, zone, NULL, SKIP_NONE);
2309 sx_sunlock(&uma_drain_lock);
2313 uma_zwait(uma_zone_t zone)
2317 item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2318 uma_zfree(zone, item);
2322 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2328 MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2330 item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2331 if (item != NULL && (flags & M_ZERO)) {
2333 for (i = 0; i <= mp_maxid; i++)
2334 bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2336 bzero(item, zone->uz_size);
2343 * A stub while both regular and pcpu cases are identical.
2346 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2350 MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2352 uma_zfree_arg(zone, item, udata);
2357 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2359 uma_zone_domain_t zdom;
2360 uma_bucket_t bucket;
2363 int cpu, domain, lockfail;
2368 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2369 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2371 /* This is the fast path allocation */
2372 CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2373 curthread, zone->uz_name, zone, flags);
2375 if (flags & M_WAITOK) {
2376 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2377 "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2379 KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2380 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2381 ("uma_zalloc_arg: called with spinlock or critical section held"));
2382 if (zone->uz_flags & UMA_ZONE_PCPU)
2383 KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2384 "with M_ZERO passed"));
2386 #ifdef DEBUG_MEMGUARD
2387 if (memguard_cmp_zone(zone)) {
2388 item = memguard_alloc(zone->uz_size, flags);
2390 if (zone->uz_init != NULL &&
2391 zone->uz_init(item, zone->uz_size, flags) != 0)
2393 if (zone->uz_ctor != NULL &&
2394 zone->uz_ctor(item, zone->uz_size, udata,
2396 zone->uz_fini(item, zone->uz_size);
2401 /* This is unfortunate but should not be fatal. */
2405 * If possible, allocate from the per-CPU cache. There are two
2406 * requirements for safe access to the per-CPU cache: (1) the thread
2407 * accessing the cache must not be preempted or yield during access,
2408 * and (2) the thread must not migrate CPUs without switching which
2409 * cache it accesses. We rely on a critical section to prevent
2410 * preemption and migration. We release the critical section in
2411 * order to acquire the zone mutex if we are unable to allocate from
2412 * the current cache; when we re-acquire the critical section, we
2413 * must detect and handle migration if it has occurred.
2417 cache = &zone->uz_cpu[cpu];
2420 bucket = cache->uc_allocbucket;
2421 if (bucket != NULL && bucket->ub_cnt > 0) {
2423 item = bucket->ub_bucket[bucket->ub_cnt];
2425 bucket->ub_bucket[bucket->ub_cnt] = NULL;
2427 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2431 skipdbg = uma_dbg_zskip(zone, item);
2433 if (zone->uz_ctor != NULL &&
2435 (!skipdbg || zone->uz_ctor != trash_ctor ||
2436 zone->uz_dtor != trash_dtor) &&
2438 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2439 atomic_add_long(&zone->uz_fails, 1);
2440 zone_free_item(zone, item, udata, SKIP_DTOR);
2445 uma_dbg_alloc(zone, NULL, item);
2448 uma_zero_item(item, zone);
2453 * We have run out of items in our alloc bucket.
2454 * See if we can switch with our free bucket.
2456 bucket = cache->uc_freebucket;
2457 if (bucket != NULL && bucket->ub_cnt > 0) {
2459 "uma_zalloc: zone %s(%p) swapping empty with alloc",
2460 zone->uz_name, zone);
2461 cache->uc_freebucket = cache->uc_allocbucket;
2462 cache->uc_allocbucket = bucket;
2467 * Discard any empty allocation bucket while we hold no locks.
2469 bucket = cache->uc_allocbucket;
2470 cache->uc_allocbucket = NULL;
2473 bucket_free(zone, bucket, udata);
2475 if (zone->uz_flags & UMA_ZONE_NUMA)
2476 domain = PCPU_GET(domain);
2478 domain = UMA_ANYDOMAIN;
2480 /* Short-circuit for zones without buckets and low memory. */
2481 if (zone->uz_count == 0 || bucketdisable)
2485 * Attempt to retrieve the item from the per-CPU cache has failed, so
2486 * we must go back to the zone. This requires the zone lock, so we
2487 * must drop the critical section, then re-acquire it when we go back
2488 * to the cache. Since the critical section is released, we may be
2489 * preempted or migrate. As such, make sure not to maintain any
2490 * thread-local state specific to the cache from prior to releasing
2491 * the critical section.
2494 if (ZONE_TRYLOCK(zone) == 0) {
2495 /* Record contention to size the buckets. */
2501 cache = &zone->uz_cpu[cpu];
2503 /* See if we lost the race to fill the cache. */
2504 if (cache->uc_allocbucket != NULL) {
2510 * Check the zone's cache of buckets.
2512 if (domain == UMA_ANYDOMAIN)
2513 zdom = &zone->uz_domain[0];
2515 zdom = &zone->uz_domain[domain];
2516 if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
2517 KASSERT(bucket->ub_cnt != 0,
2518 ("uma_zalloc_arg: Returning an empty bucket."));
2520 LIST_REMOVE(bucket, ub_link);
2521 cache->uc_allocbucket = bucket;
2525 /* We are no longer associated with this CPU. */
2529 * We bump the uz count when the cache size is insufficient to
2530 * handle the working set.
2532 if (lockfail && zone->uz_count < BUCKET_MAX)
2537 * Now lets just fill a bucket and put it on the free list. If that
2538 * works we'll restart the allocation from the beginning and it
2539 * will use the just filled bucket.
2541 bucket = zone_alloc_bucket(zone, udata, domain, flags);
2542 CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2543 zone->uz_name, zone, bucket);
2544 if (bucket != NULL) {
2548 cache = &zone->uz_cpu[cpu];
2550 * See if we lost the race or were migrated. Cache the
2551 * initialized bucket to make this less likely or claim
2552 * the memory directly.
2554 if (cache->uc_allocbucket != NULL ||
2555 (zone->uz_flags & UMA_ZONE_NUMA &&
2556 domain != PCPU_GET(domain)))
2557 LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
2559 cache->uc_allocbucket = bucket;
2565 * We may not be able to get a bucket so return an actual item.
2568 item = zone_alloc_item(zone, udata, domain, flags);
2574 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2577 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2578 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2580 /* This is the fast path allocation */
2582 "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2583 curthread, zone->uz_name, zone, domain, flags);
2585 if (flags & M_WAITOK) {
2586 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2587 "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2589 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2590 ("uma_zalloc_domain: called with spinlock or critical section held"));
2592 return (zone_alloc_item(zone, udata, domain, flags));
2596 * Find a slab with some space. Prefer slabs that are partially used over those
2597 * that are totally full. This helps to reduce fragmentation.
2599 * If 'rr' is 1, search all domains starting from 'domain'. Otherwise check
2603 keg_first_slab(uma_keg_t keg, int domain, int rr)
2609 KASSERT(domain >= 0 && domain < vm_ndomains,
2610 ("keg_first_slab: domain %d out of range", domain));
2615 dom = &keg->uk_domain[domain];
2616 if (!LIST_EMPTY(&dom->ud_part_slab))
2617 return (LIST_FIRST(&dom->ud_part_slab));
2618 if (!LIST_EMPTY(&dom->ud_free_slab)) {
2619 slab = LIST_FIRST(&dom->ud_free_slab);
2620 LIST_REMOVE(slab, us_link);
2621 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2625 domain = (domain + 1) % vm_ndomains;
2626 } while (domain != start);
2632 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, int flags)
2636 int allocflags, domain, reserve, rr, start;
2638 mtx_assert(&keg->uk_lock, MA_OWNED);
2642 if ((flags & M_USE_RESERVE) == 0)
2643 reserve = keg->uk_reserve;
2646 * Round-robin for non first-touch zones when there is more than one
2649 if (vm_ndomains == 1)
2651 rr = rdomain == UMA_ANYDOMAIN;
2653 keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains;
2654 domain = start = keg->uk_cursor;
2655 /* Only block on the second pass. */
2656 if ((flags & (M_WAITOK | M_NOVM)) == M_WAITOK)
2657 allocflags = (allocflags & ~M_WAITOK) | M_NOWAIT;
2659 domain = start = rdomain;
2663 if (keg->uk_free > reserve &&
2664 (slab = keg_first_slab(keg, domain, rr)) != NULL) {
2665 MPASS(slab->us_keg == keg);
2670 * M_NOVM means don't ask at all!
2675 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2676 keg->uk_flags |= UMA_ZFLAG_FULL;
2678 * If this is not a multi-zone, set the FULL bit.
2679 * Otherwise slab_multi() takes care of it.
2681 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2682 zone->uz_flags |= UMA_ZFLAG_FULL;
2683 zone_log_warning(zone);
2684 zone_maxaction(zone);
2686 if (flags & M_NOWAIT)
2689 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2692 slab = keg_alloc_slab(keg, zone, domain, allocflags);
2694 * If we got a slab here it's safe to mark it partially used
2695 * and return. We assume that the caller is going to remove
2696 * at least one item.
2699 MPASS(slab->us_keg == keg);
2700 dom = &keg->uk_domain[slab->us_domain];
2701 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2705 keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains;
2706 domain = keg->uk_cursor;
2708 } while (domain != start);
2710 /* Retry domain scan with blocking. */
2711 if (allocflags != flags) {
2717 * We might not have been able to get a slab but another cpu
2718 * could have while we were unlocked. Check again before we
2721 if (keg->uk_free > reserve &&
2722 (slab = keg_first_slab(keg, domain, rr)) != NULL) {
2723 MPASS(slab->us_keg == keg);
2730 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
2735 keg = zone_first_keg(zone);
2740 slab = keg_fetch_slab(keg, zone, domain, flags);
2743 if (flags & (M_NOWAIT | M_NOVM))
2751 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
2752 * with the keg locked. On NULL no lock is held.
2754 * The last pointer is used to seed the search. It is not required.
2757 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags)
2767 * Don't wait on the first pass. This will skip limit tests
2768 * as well. We don't want to block if we can find a provider
2771 flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2773 * Use the last slab allocated as a hint for where to start
2777 slab = keg_fetch_slab(last, zone, domain, flags);
2783 * Loop until we have a slab incase of transient failures
2784 * while M_WAITOK is specified. I'm not sure this is 100%
2785 * required but we've done it for so long now.
2791 * Search the available kegs for slabs. Be careful to hold the
2792 * correct lock while calling into the keg layer.
2794 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2795 keg = klink->kl_keg;
2797 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2798 slab = keg_fetch_slab(keg, zone, domain, flags);
2802 if (keg->uk_flags & UMA_ZFLAG_FULL)
2808 if (rflags & (M_NOWAIT | M_NOVM))
2812 * All kegs are full. XXX We can't atomically check all kegs
2813 * and sleep so just sleep for a short period and retry.
2815 if (full && !empty) {
2817 zone->uz_flags |= UMA_ZFLAG_FULL;
2819 zone_log_warning(zone);
2820 zone_maxaction(zone);
2821 msleep(zone, zone->uz_lockptr, PVM,
2822 "zonelimit", hz/100);
2823 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2832 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2838 MPASS(keg == slab->us_keg);
2839 mtx_assert(&keg->uk_lock, MA_OWNED);
2841 freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2842 BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2843 item = slab->us_data + (keg->uk_rsize * freei);
2844 slab->us_freecount--;
2847 /* Move this slab to the full list */
2848 if (slab->us_freecount == 0) {
2849 LIST_REMOVE(slab, us_link);
2850 dom = &keg->uk_domain[slab->us_domain];
2851 LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
2858 zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
2869 /* Try to keep the buckets totally full */
2870 for (i = 0; i < max; ) {
2871 if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL)
2875 stripe = howmany(max, vm_ndomains);
2877 while (slab->us_freecount && i < max) {
2878 bucket[i++] = slab_alloc_item(keg, slab);
2879 if (keg->uk_free <= keg->uk_reserve)
2883 * If the zone is striped we pick a new slab for every
2884 * N allocations. Eliminating this conditional will
2885 * instead pick a new domain for each bucket rather
2886 * than stripe within each bucket. The current option
2887 * produces more fragmentation and requires more cpu
2888 * time but yields better distribution.
2890 if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
2891 vm_ndomains > 1 && --stripe == 0)
2895 /* Don't block if we allocated any successfully. */
2906 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
2908 uma_bucket_t bucket;
2911 /* Don't wait for buckets, preserve caller's NOVM setting. */
2912 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2916 max = MIN(bucket->ub_entries, zone->uz_count);
2917 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2918 max, domain, flags);
2921 * Initialize the memory if necessary.
2923 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2926 for (i = 0; i < bucket->ub_cnt; i++)
2927 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2931 * If we couldn't initialize the whole bucket, put the
2932 * rest back onto the freelist.
2934 if (i != bucket->ub_cnt) {
2935 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2936 bucket->ub_cnt - i);
2938 bzero(&bucket->ub_bucket[i],
2939 sizeof(void *) * (bucket->ub_cnt - i));
2945 if (bucket->ub_cnt == 0) {
2946 bucket_free(zone, bucket, udata);
2947 atomic_add_long(&zone->uz_fails, 1);
2955 * Allocates a single item from a zone.
2958 * zone The zone to alloc for.
2959 * udata The data to be passed to the constructor.
2960 * domain The domain to allocate from or UMA_ANYDOMAIN.
2961 * flags M_WAITOK, M_NOWAIT, M_ZERO.
2964 * NULL if there is no memory and M_NOWAIT is set
2965 * An item if successful
2969 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
2978 if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
2980 atomic_add_long(&zone->uz_allocs, 1);
2983 skipdbg = uma_dbg_zskip(zone, item);
2986 * We have to call both the zone's init (not the keg's init)
2987 * and the zone's ctor. This is because the item is going from
2988 * a keg slab directly to the user, and the user is expecting it
2989 * to be both zone-init'd as well as zone-ctor'd.
2991 if (zone->uz_init != NULL) {
2992 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2993 zone_free_item(zone, item, udata, SKIP_FINI);
2997 if (zone->uz_ctor != NULL &&
2999 (!skipdbg || zone->uz_ctor != trash_ctor ||
3000 zone->uz_dtor != trash_dtor) &&
3002 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
3003 zone_free_item(zone, item, udata, SKIP_DTOR);
3008 uma_dbg_alloc(zone, NULL, item);
3011 uma_zero_item(item, zone);
3013 CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
3014 zone->uz_name, zone);
3019 CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
3020 zone->uz_name, zone);
3021 atomic_add_long(&zone->uz_fails, 1);
3027 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3030 uma_bucket_t bucket;
3031 uma_zone_domain_t zdom;
3032 int cpu, domain, lockfail;
3037 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3038 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
3040 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
3043 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3044 ("uma_zfree_arg: called with spinlock or critical section held"));
3046 /* uma_zfree(..., NULL) does nothing, to match free(9). */
3049 #ifdef DEBUG_MEMGUARD
3050 if (is_memguard_addr(item)) {
3051 if (zone->uz_dtor != NULL)
3052 zone->uz_dtor(item, zone->uz_size, udata);
3053 if (zone->uz_fini != NULL)
3054 zone->uz_fini(item, zone->uz_size);
3055 memguard_free(item);
3060 skipdbg = uma_dbg_zskip(zone, item);
3061 if (skipdbg == false) {
3062 if (zone->uz_flags & UMA_ZONE_MALLOC)
3063 uma_dbg_free(zone, udata, item);
3065 uma_dbg_free(zone, NULL, item);
3067 if (zone->uz_dtor != NULL && (!skipdbg ||
3068 zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor))
3070 if (zone->uz_dtor != NULL)
3072 zone->uz_dtor(item, zone->uz_size, udata);
3075 * The race here is acceptable. If we miss it we'll just have to wait
3076 * a little longer for the limits to be reset.
3078 if (zone->uz_flags & UMA_ZFLAG_FULL)
3082 * If possible, free to the per-CPU cache. There are two
3083 * requirements for safe access to the per-CPU cache: (1) the thread
3084 * accessing the cache must not be preempted or yield during access,
3085 * and (2) the thread must not migrate CPUs without switching which
3086 * cache it accesses. We rely on a critical section to prevent
3087 * preemption and migration. We release the critical section in
3088 * order to acquire the zone mutex if we are unable to free to the
3089 * current cache; when we re-acquire the critical section, we must
3090 * detect and handle migration if it has occurred.
3095 cache = &zone->uz_cpu[cpu];
3099 * Try to free into the allocbucket first to give LIFO ordering
3100 * for cache-hot datastructures. Spill over into the freebucket
3101 * if necessary. Alloc will swap them if one runs dry.
3103 bucket = cache->uc_allocbucket;
3104 if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
3105 bucket = cache->uc_freebucket;
3106 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3107 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
3108 ("uma_zfree: Freeing to non free bucket index."));
3109 bucket->ub_bucket[bucket->ub_cnt] = item;
3117 * We must go back the zone, which requires acquiring the zone lock,
3118 * which in turn means we must release and re-acquire the critical
3119 * section. Since the critical section is released, we may be
3120 * preempted or migrate. As such, make sure not to maintain any
3121 * thread-local state specific to the cache from prior to releasing
3122 * the critical section.
3125 if (zone->uz_count == 0 || bucketdisable)
3129 if (ZONE_TRYLOCK(zone) == 0) {
3130 /* Record contention to size the buckets. */
3136 cache = &zone->uz_cpu[cpu];
3138 bucket = cache->uc_freebucket;
3139 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3143 cache->uc_freebucket = NULL;
3144 /* We are no longer associated with this CPU. */
3147 if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
3148 domain = PCPU_GET(domain);
3151 zdom = &zone->uz_domain[0];
3153 /* Can we throw this on the zone full list? */
3154 if (bucket != NULL) {
3156 "uma_zfree: zone %s(%p) putting bucket %p on free list",
3157 zone->uz_name, zone, bucket);
3158 /* ub_cnt is pointing to the last free item */
3159 KASSERT(bucket->ub_cnt != 0,
3160 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
3161 if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) {
3163 bucket_drain(zone, bucket);
3164 bucket_free(zone, bucket, udata);
3167 LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
3171 * We bump the uz count when the cache size is insufficient to
3172 * handle the working set.
3174 if (lockfail && zone->uz_count < BUCKET_MAX)
3178 bucket = bucket_alloc(zone, udata, M_NOWAIT);
3179 CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
3180 zone->uz_name, zone, bucket);
3184 cache = &zone->uz_cpu[cpu];
3185 if (cache->uc_freebucket == NULL &&
3186 ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
3187 domain == PCPU_GET(domain))) {
3188 cache->uc_freebucket = bucket;
3192 * We lost the race, start over. We have to drop our
3193 * critical section to free the bucket.
3196 bucket_free(zone, bucket, udata);
3201 * If nothing else caught this, we'll just do an internal free.
3204 zone_free_item(zone, item, udata, SKIP_DTOR);
3210 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3213 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3214 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
3216 CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3219 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3220 ("uma_zfree_domain: called with spinlock or critical section held"));
3222 /* uma_zfree(..., NULL) does nothing, to match free(9). */
3225 zone_free_item(zone, item, udata, SKIP_NONE);
3229 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
3234 mtx_assert(&keg->uk_lock, MA_OWNED);
3235 MPASS(keg == slab->us_keg);
3237 dom = &keg->uk_domain[slab->us_domain];
3239 /* Do we need to remove from any lists? */
3240 if (slab->us_freecount+1 == keg->uk_ipers) {
3241 LIST_REMOVE(slab, us_link);
3242 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3243 } else if (slab->us_freecount == 0) {
3244 LIST_REMOVE(slab, us_link);
3245 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3248 /* Slab management. */
3249 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3250 BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
3251 slab->us_freecount++;
3253 /* Keg statistics. */
3258 zone_release(uma_zone_t zone, void **bucket, int cnt)
3268 keg = zone_first_keg(zone);
3270 for (i = 0; i < cnt; i++) {
3272 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
3273 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3274 if (zone->uz_flags & UMA_ZONE_HASH) {
3275 slab = hash_sfind(&keg->uk_hash, mem);
3277 mem += keg->uk_pgoff;
3278 slab = (uma_slab_t)mem;
3281 slab = vtoslab((vm_offset_t)item);
3282 if (slab->us_keg != keg) {
3288 slab_free_item(keg, slab, item);
3289 if (keg->uk_flags & UMA_ZFLAG_FULL) {
3290 if (keg->uk_pages < keg->uk_maxpages) {
3291 keg->uk_flags &= ~UMA_ZFLAG_FULL;
3296 * We can handle one more allocation. Since we're
3297 * clearing ZFLAG_FULL, wake up all procs blocked
3298 * on pages. This should be uncommon, so keeping this
3299 * simple for now (rather than adding count of blocked
3308 zone->uz_flags &= ~UMA_ZFLAG_FULL;
3316 * Frees a single item to any zone.
3319 * zone The zone to free to
3320 * item The item we're freeing
3321 * udata User supplied data for the dtor
3322 * skip Skip dtors and finis
3325 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
3330 skipdbg = uma_dbg_zskip(zone, item);
3331 if (skip == SKIP_NONE && !skipdbg) {
3332 if (zone->uz_flags & UMA_ZONE_MALLOC)
3333 uma_dbg_free(zone, udata, item);
3335 uma_dbg_free(zone, NULL, item);
3338 if (skip < SKIP_DTOR && zone->uz_dtor != NULL &&
3339 (!skipdbg || zone->uz_dtor != trash_dtor ||
3340 zone->uz_ctor != trash_ctor))
3342 if (skip < SKIP_DTOR && zone->uz_dtor != NULL)
3344 zone->uz_dtor(item, zone->uz_size, udata);
3346 if (skip < SKIP_FINI && zone->uz_fini)
3347 zone->uz_fini(item, zone->uz_size);
3349 atomic_add_long(&zone->uz_frees, 1);
3350 zone->uz_release(zone->uz_arg, &item, 1);
3355 uma_zone_set_max(uma_zone_t zone, int nitems)
3359 keg = zone_first_keg(zone);
3363 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
3364 if (keg->uk_maxpages * keg->uk_ipers < nitems)
3365 keg->uk_maxpages += keg->uk_ppera;
3366 nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
3374 uma_zone_get_max(uma_zone_t zone)
3379 keg = zone_first_keg(zone);
3383 nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
3391 uma_zone_set_warning(uma_zone_t zone, const char *warning)
3395 zone->uz_warning = warning;
3401 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3405 TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3411 uma_zone_get_cur(uma_zone_t zone)
3417 nitems = zone->uz_allocs - zone->uz_frees;
3420 * See the comment in sysctl_vm_zone_stats() regarding the
3421 * safety of accessing the per-cpu caches. With the zone lock
3422 * held, it is safe, but can potentially result in stale data.
3424 nitems += zone->uz_cpu[i].uc_allocs -
3425 zone->uz_cpu[i].uc_frees;
3429 return (nitems < 0 ? 0 : nitems);
3434 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3438 keg = zone_first_keg(zone);
3439 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3441 KASSERT(keg->uk_pages == 0,
3442 ("uma_zone_set_init on non-empty keg"));
3443 keg->uk_init = uminit;
3449 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3453 keg = zone_first_keg(zone);
3454 KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
3456 KASSERT(keg->uk_pages == 0,
3457 ("uma_zone_set_fini on non-empty keg"));
3458 keg->uk_fini = fini;
3464 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3468 KASSERT(zone_first_keg(zone)->uk_pages == 0,
3469 ("uma_zone_set_zinit on non-empty keg"));
3470 zone->uz_init = zinit;
3476 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3480 KASSERT(zone_first_keg(zone)->uk_pages == 0,
3481 ("uma_zone_set_zfini on non-empty keg"));
3482 zone->uz_fini = zfini;
3487 /* XXX uk_freef is not actually used with the zone locked */
3489 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3493 keg = zone_first_keg(zone);
3494 KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3496 keg->uk_freef = freef;
3501 /* XXX uk_allocf is not actually used with the zone locked */
3503 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3507 keg = zone_first_keg(zone);
3509 keg->uk_allocf = allocf;
3515 uma_zone_reserve(uma_zone_t zone, int items)
3519 keg = zone_first_keg(zone);
3523 keg->uk_reserve = items;
3531 uma_zone_reserve_kva(uma_zone_t zone, int count)
3537 keg = zone_first_keg(zone);
3540 pages = count / keg->uk_ipers;
3542 if (pages * keg->uk_ipers < count)
3544 pages *= keg->uk_ppera;
3546 #ifdef UMA_MD_SMALL_ALLOC
3547 if (keg->uk_ppera > 1) {
3551 kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3559 keg->uk_maxpages = pages;
3560 #ifdef UMA_MD_SMALL_ALLOC
3561 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3563 keg->uk_allocf = noobj_alloc;
3565 keg->uk_flags |= UMA_ZONE_NOFREE;
3573 uma_prealloc(uma_zone_t zone, int items)
3580 keg = zone_first_keg(zone);
3584 slabs = items / keg->uk_ipers;
3586 if (slabs * keg->uk_ipers < items)
3589 slab = keg_alloc_slab(keg, zone, domain, M_WAITOK);
3592 MPASS(slab->us_keg == keg);
3593 dom = &keg->uk_domain[slab->us_domain];
3594 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3596 domain = (domain + 1) % vm_ndomains;
3603 uma_reclaim_locked(bool kmem_danger)
3606 CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
3607 sx_assert(&uma_drain_lock, SA_XLOCKED);
3609 zone_foreach(zone_drain);
3610 if (vm_page_count_min() || kmem_danger) {
3611 cache_drain_safe(NULL);
3612 zone_foreach(zone_drain);
3615 * Some slabs may have been freed but this zone will be visited early
3616 * we visit again so that we can free pages that are empty once other
3617 * zones are drained. We have to do the same for buckets.
3619 zone_drain(slabzone);
3620 bucket_zone_drain();
3627 sx_xlock(&uma_drain_lock);
3628 uma_reclaim_locked(false);
3629 sx_xunlock(&uma_drain_lock);
3632 static volatile int uma_reclaim_needed;
3635 uma_reclaim_wakeup(void)
3638 if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
3639 wakeup(uma_reclaim);
3643 uma_reclaim_worker(void *arg __unused)
3647 sx_xlock(&uma_drain_lock);
3648 while (atomic_load_int(&uma_reclaim_needed) == 0)
3649 sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl",
3651 sx_xunlock(&uma_drain_lock);
3652 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
3653 sx_xlock(&uma_drain_lock);
3654 uma_reclaim_locked(true);
3655 atomic_store_int(&uma_reclaim_needed, 0);
3656 sx_xunlock(&uma_drain_lock);
3657 /* Don't fire more than once per-second. */
3658 pause("umarclslp", hz);
3664 uma_zone_exhausted(uma_zone_t zone)
3669 full = (zone->uz_flags & UMA_ZFLAG_FULL);
3675 uma_zone_exhausted_nolock(uma_zone_t zone)
3677 return (zone->uz_flags & UMA_ZFLAG_FULL);
3681 uma_large_malloc_domain(vm_size_t size, int domain, int wait)
3687 #if VM_NRESERVLEVEL > 0
3688 if (__predict_true((wait & M_EXEC) == 0))
3689 arena = kernel_arena;
3691 arena = kernel_rwx_arena;
3693 arena = kernel_arena;
3696 slab = zone_alloc_item(slabzone, NULL, domain, wait);
3699 if (domain == UMA_ANYDOMAIN)
3700 addr = kmem_malloc(arena, size, wait);
3702 addr = kmem_malloc_domain(arena, domain, size, wait);
3704 vsetslab(addr, slab);
3705 slab->us_data = (void *)addr;
3706 slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
3707 #if VM_NRESERVLEVEL > 0
3708 if (__predict_false(arena == kernel_rwx_arena))
3709 slab->us_flags |= UMA_SLAB_KRWX;
3711 slab->us_size = size;
3712 slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE(
3713 pmap_kextract(addr)));
3714 uma_total_inc(size);
3716 zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3719 return ((void *)addr);
3723 uma_large_malloc(vm_size_t size, int wait)
3726 return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait);
3730 uma_large_free(uma_slab_t slab)
3734 KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0,
3735 ("uma_large_free: Memory not allocated with uma_large_malloc."));
3736 #if VM_NRESERVLEVEL > 0
3737 if (__predict_true((slab->us_flags & UMA_SLAB_KRWX) == 0))
3738 arena = kernel_arena;
3740 arena = kernel_rwx_arena;
3742 arena = kernel_arena;
3744 kmem_free(arena, (vm_offset_t)slab->us_data, slab->us_size);
3745 uma_total_dec(slab->us_size);
3746 zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3750 uma_zero_item(void *item, uma_zone_t zone)
3753 bzero(item, zone->uz_size);
3760 return (uma_kmem_limit);
3764 uma_set_limit(unsigned long limit)
3767 uma_kmem_limit = limit;
3774 return (uma_kmem_total);
3781 return (uma_kmem_limit - uma_kmem_total);
3785 uma_print_stats(void)
3787 zone_foreach(uma_print_zone);
3791 slab_print(uma_slab_t slab)
3793 printf("slab: keg %p, data %p, freecount %d\n",
3794 slab->us_keg, slab->us_data, slab->us_freecount);
3798 cache_print(uma_cache_t cache)
3800 printf("alloc: %p(%d), free: %p(%d)\n",
3801 cache->uc_allocbucket,
3802 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3803 cache->uc_freebucket,
3804 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3808 uma_print_keg(uma_keg_t keg)
3814 printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3815 "out %d free %d limit %d\n",
3816 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3817 keg->uk_ipers, keg->uk_ppera,
3818 (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
3819 keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3820 for (i = 0; i < vm_ndomains; i++) {
3821 dom = &keg->uk_domain[i];
3822 printf("Part slabs:\n");
3823 LIST_FOREACH(slab, &dom->ud_part_slab, us_link)
3825 printf("Free slabs:\n");
3826 LIST_FOREACH(slab, &dom->ud_free_slab, us_link)
3828 printf("Full slabs:\n");
3829 LIST_FOREACH(slab, &dom->ud_full_slab, us_link)
3835 uma_print_zone(uma_zone_t zone)
3841 printf("zone: %s(%p) size %d flags %#x\n",
3842 zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3843 LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3844 uma_print_keg(kl->kl_keg);
3846 cache = &zone->uz_cpu[i];
3847 printf("CPU %d Cache:\n", i);
3854 * Generate statistics across both the zone and its per-cpu cache's. Return
3855 * desired statistics if the pointer is non-NULL for that statistic.
3857 * Note: does not update the zone statistics, as it can't safely clear the
3858 * per-CPU cache statistic.
3860 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3861 * safe from off-CPU; we should modify the caches to track this information
3862 * directly so that we don't have to.
3865 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3866 uint64_t *freesp, uint64_t *sleepsp)
3869 uint64_t allocs, frees, sleeps;
3872 allocs = frees = sleeps = 0;
3875 cache = &z->uz_cpu[cpu];
3876 if (cache->uc_allocbucket != NULL)
3877 cachefree += cache->uc_allocbucket->ub_cnt;
3878 if (cache->uc_freebucket != NULL)
3879 cachefree += cache->uc_freebucket->ub_cnt;
3880 allocs += cache->uc_allocs;
3881 frees += cache->uc_frees;
3883 allocs += z->uz_allocs;
3884 frees += z->uz_frees;
3885 sleeps += z->uz_sleeps;
3886 if (cachefreep != NULL)
3887 *cachefreep = cachefree;
3888 if (allocsp != NULL)
3892 if (sleepsp != NULL)
3898 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3905 rw_rlock(&uma_rwlock);
3906 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3907 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3910 rw_runlock(&uma_rwlock);
3911 return (sysctl_handle_int(oidp, &count, 0, req));
3915 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3917 struct uma_stream_header ush;
3918 struct uma_type_header uth;
3919 struct uma_percpu_stat *ups;
3920 uma_bucket_t bucket;
3921 uma_zone_domain_t zdom;
3928 int count, error, i;
3930 error = sysctl_wire_old_buffer(req, 0);
3933 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3934 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
3935 ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
3938 rw_rlock(&uma_rwlock);
3939 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3940 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3945 * Insert stream header.
3947 bzero(&ush, sizeof(ush));
3948 ush.ush_version = UMA_STREAM_VERSION;
3949 ush.ush_maxcpus = (mp_maxid + 1);
3950 ush.ush_count = count;
3951 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3953 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3954 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3955 bzero(&uth, sizeof(uth));
3957 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3958 uth.uth_align = kz->uk_align;
3959 uth.uth_size = kz->uk_size;
3960 uth.uth_rsize = kz->uk_rsize;
3961 LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3963 uth.uth_maxpages += k->uk_maxpages;
3964 uth.uth_pages += k->uk_pages;
3965 uth.uth_keg_free += k->uk_free;
3966 uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3971 * A zone is secondary is it is not the first entry
3972 * on the keg's zone list.
3974 if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3975 (LIST_FIRST(&kz->uk_zones) != z))
3976 uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3978 for (i = 0; i < vm_ndomains; i++) {
3979 zdom = &z->uz_domain[i];
3980 LIST_FOREACH(bucket, &zdom->uzd_buckets,
3982 uth.uth_zone_free += bucket->ub_cnt;
3984 uth.uth_allocs = z->uz_allocs;
3985 uth.uth_frees = z->uz_frees;
3986 uth.uth_fails = z->uz_fails;
3987 uth.uth_sleeps = z->uz_sleeps;
3989 * While it is not normally safe to access the cache
3990 * bucket pointers while not on the CPU that owns the
3991 * cache, we only allow the pointers to be exchanged
3992 * without the zone lock held, not invalidated, so
3993 * accept the possible race associated with bucket
3994 * exchange during monitoring.
3996 for (i = 0; i < mp_maxid + 1; i++) {
3997 bzero(&ups[i], sizeof(*ups));
3998 if (kz->uk_flags & UMA_ZFLAG_INTERNAL ||
4001 cache = &z->uz_cpu[i];
4002 if (cache->uc_allocbucket != NULL)
4003 ups[i].ups_cache_free +=
4004 cache->uc_allocbucket->ub_cnt;
4005 if (cache->uc_freebucket != NULL)
4006 ups[i].ups_cache_free +=
4007 cache->uc_freebucket->ub_cnt;
4008 ups[i].ups_allocs = cache->uc_allocs;
4009 ups[i].ups_frees = cache->uc_frees;
4012 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4013 for (i = 0; i < mp_maxid + 1; i++)
4014 (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4017 rw_runlock(&uma_rwlock);
4018 error = sbuf_finish(&sbuf);
4025 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
4027 uma_zone_t zone = *(uma_zone_t *)arg1;
4030 max = uma_zone_get_max(zone);
4031 error = sysctl_handle_int(oidp, &max, 0, req);
4032 if (error || !req->newptr)
4035 uma_zone_set_max(zone, max);
4041 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4043 uma_zone_t zone = *(uma_zone_t *)arg1;
4046 cur = uma_zone_get_cur(zone);
4047 return (sysctl_handle_int(oidp, &cur, 0, req));
4052 uma_dbg_getslab(uma_zone_t zone, void *item)
4058 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4059 if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
4060 slab = vtoslab((vm_offset_t)mem);
4063 * It is safe to return the slab here even though the
4064 * zone is unlocked because the item's allocation state
4065 * essentially holds a reference.
4068 keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
4069 if (keg->uk_flags & UMA_ZONE_HASH)
4070 slab = hash_sfind(&keg->uk_hash, mem);
4072 slab = (uma_slab_t)(mem + keg->uk_pgoff);
4080 uma_dbg_zskip(uma_zone_t zone, void *mem)
4084 if ((keg = zone_first_keg(zone)) == NULL)
4087 return (uma_dbg_kskip(keg, mem));
4091 uma_dbg_kskip(uma_keg_t keg, void *mem)
4095 if (dbg_divisor == 0)
4098 if (dbg_divisor == 1)
4101 idx = (uintptr_t)mem >> PAGE_SHIFT;
4102 if (keg->uk_ipers > 1) {
4103 idx *= keg->uk_ipers;
4104 idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4107 if ((idx / dbg_divisor) * dbg_divisor != idx) {
4108 counter_u64_add(uma_skip_cnt, 1);
4111 counter_u64_add(uma_dbg_cnt, 1);
4117 * Set up the slab's freei data such that uma_dbg_free can function.
4121 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
4127 slab = uma_dbg_getslab(zone, item);
4129 panic("uma: item %p did not belong to zone %s\n",
4130 item, zone->uz_name);
4133 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4135 if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4136 panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
4137 item, zone, zone->uz_name, slab, freei);
4138 BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4144 * Verifies freed addresses. Checks for alignment, valid slab membership
4145 * and duplicate frees.
4149 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
4155 slab = uma_dbg_getslab(zone, item);
4157 panic("uma: Freed item %p did not belong to zone %s\n",
4158 item, zone->uz_name);
4161 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4163 if (freei >= keg->uk_ipers)
4164 panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
4165 item, zone, zone->uz_name, slab, freei);
4167 if (((freei * keg->uk_rsize) + slab->us_data) != item)
4168 panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
4169 item, zone, zone->uz_name, slab, freei);
4171 if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4172 panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
4173 item, zone, zone->uz_name, slab, freei);
4175 BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4177 #endif /* INVARIANTS */
4180 DB_SHOW_COMMAND(uma, db_show_uma)
4182 uma_bucket_t bucket;
4185 uma_zone_domain_t zdom;
4186 uint64_t allocs, frees, sleeps;
4189 db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
4190 "Free", "Requests", "Sleeps", "Bucket");
4191 LIST_FOREACH(kz, &uma_kegs, uk_link) {
4192 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4193 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
4194 allocs = z->uz_allocs;
4195 frees = z->uz_frees;
4196 sleeps = z->uz_sleeps;
4199 uma_zone_sumstat(z, &cachefree, &allocs,
4201 if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
4202 (LIST_FIRST(&kz->uk_zones) != z)))
4203 cachefree += kz->uk_free;
4204 for (i = 0; i < vm_ndomains; i++) {
4205 zdom = &z->uz_domain[i];
4206 LIST_FOREACH(bucket, &zdom->uzd_buckets,
4208 cachefree += bucket->ub_cnt;
4210 db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
4211 z->uz_name, (uintmax_t)kz->uk_size,
4212 (intmax_t)(allocs - frees), cachefree,
4213 (uintmax_t)allocs, sleeps, z->uz_count);
4220 DB_SHOW_COMMAND(umacache, db_show_umacache)
4222 uma_bucket_t bucket;
4224 uma_zone_domain_t zdom;
4225 uint64_t allocs, frees;
4228 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
4229 "Requests", "Bucket");
4230 LIST_FOREACH(z, &uma_cachezones, uz_link) {
4231 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
4232 for (i = 0; i < vm_ndomains; i++) {
4233 zdom = &z->uz_domain[i];
4234 LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link)
4235 cachefree += bucket->ub_cnt;
4237 db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
4238 z->uz_name, (uintmax_t)z->uz_size,
4239 (intmax_t)(allocs - frees), cachefree,
4240 (uintmax_t)allocs, z->uz_count);