2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/counter.h>
34 #include <sys/_bitset.h>
35 #include <sys/_domainset.h>
36 #include <sys/_task.h>
39 * This file includes definitions, structures, prototypes, and inlines that
40 * should not be used outside of the actual implementation of UMA.
44 * The brief summary; Zones describe unique allocation types. Zones are
45 * organized into per-CPU caches which are filled by buckets. Buckets are
46 * organized according to memory domains. Buckets are filled from kegs which
47 * are also organized according to memory domains. Kegs describe a unique
48 * allocation type, backend memory provider, and layout. Kegs are associated
49 * with one or more zones and zones reference one or more kegs. Kegs provide
50 * slabs which are virtually contiguous collections of pages. Each slab is
51 * broken down int one or more items that will satisfy an individual allocation.
53 * Allocation is satisfied in the following order:
55 * 2) Per-domain cache of buckets
56 * 3) Slab from any of N kegs
57 * 4) Backend page provider
59 * More detail on individual objects is contained below:
61 * Kegs contain lists of slabs which are stored in either the full bin, empty
62 * bin, or partially allocated bin, to reduce fragmentation. They also contain
63 * the user supplied value for size, which is adjusted for alignment purposes
64 * and rsize is the result of that. The Keg also stores information for
65 * managing a hash of page addresses that maps pages to uma_slab_t structures
66 * for pages that don't have embedded uma_slab_t's.
68 * Keg slab lists are organized by memory domain to support NUMA allocation
69 * policies. By default allocations are spread across domains to reduce the
70 * potential for hotspots. Special keg creation flags may be specified to
71 * prefer location allocation. However there is no strict enforcement as frees
72 * may happen on any CPU and these are returned to the CPU-local cache
73 * regardless of the originating domain.
75 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
76 * be allocated off the page from a special slab zone. The free list within a
77 * slab is managed with a bitmask. For item sizes that would yield more than
78 * 10% memory waste we potentially allocate a separate uma_slab_t if this will
79 * improve the number of items per slab that will fit.
81 * The only really gross cases, with regards to memory waste, are for those
82 * items that are just over half the page size. You can get nearly 50% waste,
83 * so you fall back to the memory footprint of the power of two allocator. I
84 * have looked at memory allocation sizes on many of the machines available to
85 * me, and there does not seem to be an abundance of allocations at this range
86 * so at this time it may not make sense to optimize for it. This can, of
87 * course, be solved with dynamic slab sizes.
89 * Kegs may serve multiple Zones but by far most of the time they only serve
90 * one. When a Zone is created, a Keg is allocated and setup for it. While
91 * the backing Keg stores slabs, the Zone caches Buckets of items allocated
92 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
93 * pair, as well as with its own set of small per-CPU caches, layered above
94 * the Zone's general Bucket cache.
96 * The PCPU caches are protected by critical sections, and may be accessed
97 * safely only from their associated CPU, while the Zones backed by the same
98 * Keg all share a common Keg lock (to coalesce contention on the backing
99 * slabs). The backing Keg typically only serves one Zone but in the case of
100 * multiple Zones, one of the Zones is considered the Master Zone and all
101 * Zone-related stats from the Keg are done in the Master Zone. For an
102 * example of a Multi-Zone setup, refer to the Mbuf allocation code.
106 * This is the representation for normal (Non OFFPAGE slab)
111 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
112 * ___________________________________________________________
113 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
114 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
115 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
116 * |___________________________________________________________|
119 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
121 * ___________________________________________________________
122 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
123 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
124 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
125 * |___________________________________________________________|
135 #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
136 #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
137 #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
139 /* Max waste percentage before going to off page slab management */
140 #define UMA_MAX_WASTE 10
142 /* Max size of a CACHESPREAD slab. */
143 #define UMA_CACHESPREAD_MAX_SIZE (128 * 1024)
146 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
148 #define UMA_ZFLAG_OFFPAGE 0x00200000 /*
149 * Force the slab structure
150 * allocation off of the real
153 #define UMA_ZFLAG_HASH 0x00400000 /*
154 * Use a hash table instead of
155 * caching information in the
158 #define UMA_ZFLAG_VTOSLAB 0x00800000 /*
159 * Zone uses vtoslab for
162 #define UMA_ZFLAG_CTORDTOR 0x01000000 /* Zone has ctor/dtor set. */
163 #define UMA_ZFLAG_LIMIT 0x02000000 /* Zone has limit set. */
164 #define UMA_ZFLAG_CACHE 0x04000000 /* uma_zcache_create()d it */
165 #define UMA_ZFLAG_RECLAIMING 0x08000000 /* Running zone_reclaim(). */
166 #define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */
167 #define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
168 #define UMA_ZFLAG_TRASH 0x40000000 /* Add trash ctor/dtor. */
169 #define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
171 #define UMA_ZFLAG_INHERIT \
172 (UMA_ZFLAG_OFFPAGE | UMA_ZFLAG_HASH | UMA_ZFLAG_VTOSLAB | \
173 UMA_ZFLAG_BUCKET | UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY)
175 #define PRINT_UMA_ZFLAGS "\20" \
205 * Hash table for freed address -> slab translation.
207 * Only zones with memory not touchable by the allocator use the
208 * hash table. Otherwise slabs are found with vtoslab().
210 #define UMA_HASH_SIZE_INIT 32
212 #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
214 #define UMA_HASH_INSERT(h, s, mem) \
215 LIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
216 (mem))], (uma_hash_slab_t)(s), uhs_hlink)
218 #define UMA_HASH_REMOVE(h, s) \
219 LIST_REMOVE((uma_hash_slab_t)(s), uhs_hlink)
221 LIST_HEAD(slabhashhead, uma_hash_slab);
224 struct slabhashhead *uh_slab_hash; /* Hash table for slabs */
225 u_int uh_hashsize; /* Current size of the hash table */
226 u_int uh_hashmask; /* Mask used during hashing */
230 * Align field or structure to cache 'sector' in intel terminology. This
231 * is more efficient with adjacent line prefetch.
233 #if defined(__amd64__) || defined(__powerpc64__)
234 #define UMA_SUPER_ALIGN (CACHE_LINE_SIZE * 2)
236 #define UMA_SUPER_ALIGN CACHE_LINE_SIZE
239 #define UMA_ALIGN __aligned(UMA_SUPER_ALIGN)
242 * The uma_bucket structure is used to queue and manage buckets divorced
243 * from per-cpu caches. They are loaded into uma_cache_bucket structures
247 TAILQ_ENTRY(uma_bucket) ub_link; /* Link into the zone */
248 int16_t ub_cnt; /* Count of items in bucket. */
249 int16_t ub_entries; /* Max items. */
250 void *ub_bucket[]; /* actual allocation storage */
253 typedef struct uma_bucket * uma_bucket_t;
256 * The uma_cache_bucket structure is statically allocated on each per-cpu
257 * cache. Its use reduces branches and cache misses in the fast path.
259 struct uma_cache_bucket {
260 uma_bucket_t ucb_bucket;
266 typedef struct uma_cache_bucket * uma_cache_bucket_t;
269 * The uma_cache structure is allocated for each cpu for every zone
270 * type. This optimizes synchronization out of the allocator fast path.
273 struct uma_cache_bucket uc_freebucket; /* Bucket we're freeing to */
274 struct uma_cache_bucket uc_allocbucket; /* Bucket to allocate from */
275 struct uma_cache_bucket uc_crossbucket; /* cross domain bucket */
276 uint64_t uc_allocs; /* Count of allocations */
277 uint64_t uc_frees; /* Count of frees */
280 typedef struct uma_cache * uma_cache_t;
282 LIST_HEAD(slabhead, uma_slab);
285 * The cache structure pads perfectly into 64 bytes so we use spare
286 * bits from the embedded cache buckets to store information from the zone
287 * and keep all fast-path allocations accessing a single per-cpu line.
290 cache_set_uz_flags(uma_cache_t cache, uint32_t flags)
293 cache->uc_freebucket.ucb_spare = flags;
297 cache_set_uz_size(uma_cache_t cache, uint32_t size)
300 cache->uc_allocbucket.ucb_spare = size;
303 static inline uint32_t
304 cache_uz_flags(uma_cache_t cache)
307 return (cache->uc_freebucket.ucb_spare);
310 static inline uint32_t
311 cache_uz_size(uma_cache_t cache)
314 return (cache->uc_allocbucket.ucb_spare);
318 * Per-domain slab lists. Embedded in the kegs.
321 struct mtx_padalign ud_lock; /* Lock for the domain lists. */
322 struct slabhead ud_part_slab; /* partially allocated slabs */
323 struct slabhead ud_free_slab; /* completely unallocated slabs */
324 struct slabhead ud_full_slab; /* fully allocated slabs */
325 uint32_t ud_pages; /* Total page count */
326 uint32_t ud_free; /* Count of items free in slabs */
327 } __aligned(CACHE_LINE_SIZE);
329 typedef struct uma_domain * uma_domain_t;
332 * Keg management structure
334 * TODO: Optimize for cache line size
338 struct uma_hash uk_hash;
339 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
341 struct domainset_ref uk_dr; /* Domain selection policy. */
342 uint32_t uk_align; /* Alignment mask */
343 uint32_t uk_reserve; /* Number of reserved items. */
344 uint32_t uk_size; /* Requested size of each item */
345 uint32_t uk_rsize; /* Real size of each item */
347 uma_init uk_init; /* Keg's init routine */
348 uma_fini uk_fini; /* Keg's fini routine */
349 uma_alloc uk_allocf; /* Allocation function */
350 uma_free uk_freef; /* Free routine */
352 u_long uk_offset; /* Next free offset from base KVA */
353 vm_offset_t uk_kva; /* Zone base KVA */
354 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
356 uint32_t uk_pgoff; /* Offset to uma_slab struct */
357 uint16_t uk_ppera; /* pages per allocation from backend */
358 uint16_t uk_ipers; /* Items per slab */
359 uint32_t uk_flags; /* Internal flags */
361 /* Least used fields go to the last cache line. */
362 const char *uk_name; /* Name of creating zone. */
363 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
365 /* Must be last, variable sized. */
366 struct uma_domain uk_domain[]; /* Keg's slab lists. */
368 typedef struct uma_keg * uma_keg_t;
371 #define KEG_ASSERT_COLD(k) \
372 KASSERT(uma_keg_get_allocs((k)) == 0, \
373 ("keg %s initialization after use.", (k)->uk_name))
376 * Free bits per-slab.
378 #define SLAB_MAX_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)
379 #define SLAB_MIN_SETSIZE _BITSET_BITS
380 BITSET_DEFINE(slabbits, SLAB_MAX_SETSIZE);
381 BITSET_DEFINE(noslabbits, 0);
384 * The slab structure manages a single contiguous allocation from backing
385 * store and subdivides it into individually allocatable items.
388 LIST_ENTRY(uma_slab) us_link; /* slabs in zone */
389 uint16_t us_freecount; /* How many are free? */
390 uint8_t us_flags; /* Page flags see uma.h */
391 uint8_t us_domain; /* Backing NUMA domain. */
392 struct noslabbits us_free; /* Free bitmask, flexible. */
394 _Static_assert(sizeof(struct uma_slab) == offsetof(struct uma_slab, us_free),
395 "us_free field must be last");
397 #error "Slab domain type insufficient"
400 typedef struct uma_slab * uma_slab_t;
403 * On INVARIANTS builds, the slab contains a second bitset of the same size,
404 * "dbg_bits", which is laid out immediately after us_free.
407 #define SLAB_BITSETS 2
409 #define SLAB_BITSETS 1
412 /* These three functions are for embedded (!OFFPAGE) use only. */
413 size_t slab_sizeof(int nitems);
414 size_t slab_space(int nitems);
415 int slab_ipers(size_t size, int align);
418 * Slab structure with a full sized bitset and hash link for both
419 * HASH and OFFPAGE zones.
421 struct uma_hash_slab {
422 struct uma_slab uhs_slab; /* Must be first. */
423 struct slabbits uhs_bits1; /* Must be second. */
425 struct slabbits uhs_bits2; /* Must be third. */
427 LIST_ENTRY(uma_hash_slab) uhs_hlink; /* Link for hash table */
428 uint8_t *uhs_data; /* First item */
431 typedef struct uma_hash_slab * uma_hash_slab_t;
434 slab_data(uma_slab_t slab, uma_keg_t keg)
437 if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) == 0)
438 return ((void *)((uintptr_t)slab - keg->uk_pgoff));
440 return (((uma_hash_slab_t)slab)->uhs_data);
444 slab_item(uma_slab_t slab, uma_keg_t keg, int index)
448 data = (uintptr_t)slab_data(slab, keg);
449 return ((void *)(data + keg->uk_rsize * index));
453 slab_item_index(uma_slab_t slab, uma_keg_t keg, void *item)
457 data = (uintptr_t)slab_data(slab, keg);
458 return (((uintptr_t)item - data) / keg->uk_rsize);
462 TAILQ_HEAD(uma_bucketlist, uma_bucket);
464 struct uma_zone_domain {
465 struct uma_bucketlist uzd_buckets; /* full buckets */
466 uma_bucket_t uzd_cross; /* Fills from cross buckets. */
467 long uzd_nitems; /* total item count */
468 long uzd_imax; /* maximum item count this period */
469 long uzd_imin; /* minimum item count this period */
470 long uzd_wss; /* working set size estimate */
471 } __aligned(CACHE_LINE_SIZE);
473 typedef struct uma_zone_domain * uma_zone_domain_t;
476 * Zone structure - per memory type.
479 /* Offset 0, used in alloc/free fast/medium fast path and const. */
480 uma_keg_t uz_keg; /* This zone's keg if !CACHE */
481 struct uma_zone_domain *uz_domain; /* per-domain buckets */
482 uint32_t uz_flags; /* Flags inherited from kegs */
483 uint32_t uz_size; /* Size inherited from kegs */
484 uma_ctor uz_ctor; /* Constructor for each allocation */
485 uma_dtor uz_dtor; /* Destructor */
487 uint64_t uz_max_items; /* Maximum number of items to alloc */
488 uint32_t uz_sleepers; /* Threads sleeping on limit */
489 uint16_t uz_bucket_size; /* Number of items in full bucket */
490 uint16_t uz_bucket_size_max; /* Maximum number of bucket items */
492 /* Offset 64, used in bucket replenish. */
493 uma_import uz_import; /* Import new memory to cache. */
494 uma_release uz_release; /* Release memory from cache. */
495 void *uz_arg; /* Import/release argument. */
496 uma_init uz_init; /* Initializer for each item */
497 uma_fini uz_fini; /* Finalizer for each item. */
499 uint64_t uz_bkt_count; /* Items in bucket cache */
500 uint64_t uz_bkt_max; /* Maximum bucket cache size */
502 /* Offset 128 Rare. */
504 * The lock is placed here to avoid adjacent line prefetcher
505 * in fast paths and to take up space near infrequently accessed
506 * members to reduce alignment overhead.
508 struct mtx uz_lock; /* Lock for the zone */
509 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
510 const char *uz_name; /* Text name of the zone */
511 /* The next two fields are used to print a rate-limited warnings. */
512 const char *uz_warning; /* Warning to print on failure */
513 struct timeval uz_ratecheck; /* Warnings rate-limiting */
514 struct task uz_maxaction; /* Task to run when at limit */
515 uint16_t uz_bucket_size_min; /* Min number of items in bucket */
517 struct mtx_padalign uz_cross_lock; /* Cross domain free lock */
519 /* Offset 256+, stats and misc. */
520 counter_u64_t uz_allocs; /* Total number of allocations */
521 counter_u64_t uz_frees; /* Total number of frees */
522 counter_u64_t uz_fails; /* Total number of alloc failures */
523 uint64_t uz_sleeps; /* Total number of alloc sleeps */
524 uint64_t uz_xdomain; /* Total number of cross-domain frees */
525 volatile uint64_t uz_items; /* Total items count & sleepers */
527 char *uz_ctlname; /* sysctl safe name string. */
528 struct sysctl_oid *uz_oid; /* sysctl oid pointer. */
529 int uz_namecnt; /* duplicate name count. */
532 * This HAS to be the last item because we adjust the zone size
533 * based on NCPU and then allocate the space for the zones.
535 struct uma_cache uz_cpu[]; /* Per cpu caches */
537 /* uz_domain follows here. */
541 * Macros for interpreting the uz_items field. 20 bits of sleeper count
542 * and 44 bit of item count.
544 #define UZ_ITEMS_SLEEPER_SHIFT 44LL
545 #define UZ_ITEMS_SLEEPERS_MAX ((1 << (64 - UZ_ITEMS_SLEEPER_SHIFT)) - 1)
546 #define UZ_ITEMS_COUNT_MASK ((1LL << UZ_ITEMS_SLEEPER_SHIFT) - 1)
547 #define UZ_ITEMS_COUNT(x) ((x) & UZ_ITEMS_COUNT_MASK)
548 #define UZ_ITEMS_SLEEPERS(x) ((x) >> UZ_ITEMS_SLEEPER_SHIFT)
549 #define UZ_ITEMS_SLEEPER (1LL << UZ_ITEMS_SLEEPER_SHIFT)
551 #define ZONE_ASSERT_COLD(z) \
552 KASSERT(uma_zone_get_allocs((z)) == 0, \
553 ("zone %s initialization after use.", (z)->uz_name))
558 /* Internal prototypes */
559 static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
563 #define KEG_LOCKPTR(k, d) (struct mtx *)&(k)->uk_domain[(d)].ud_lock
564 #define KEG_LOCK_INIT(k, d, lc) \
567 mtx_init(KEG_LOCKPTR(k, d), (k)->uk_name, \
568 (k)->uk_name, MTX_DEF | MTX_DUPOK); \
570 mtx_init(KEG_LOCKPTR(k, d), (k)->uk_name, \
571 "UMA zone", MTX_DEF | MTX_DUPOK); \
574 #define KEG_LOCK_FINI(k, d) mtx_destroy(KEG_LOCKPTR(k, d))
575 #define KEG_LOCK(k, d) \
576 ({ mtx_lock(KEG_LOCKPTR(k, d)); KEG_LOCKPTR(k, d); })
577 #define KEG_UNLOCK(k, d) mtx_unlock(KEG_LOCKPTR(k, d))
578 #define KEG_LOCK_ASSERT(k, d) mtx_assert(KEG_LOCKPTR(k, d), MA_OWNED)
580 #define KEG_GET(zone, keg) do { \
581 (keg) = (zone)->uz_keg; \
582 KASSERT((void *)(keg) != (void *)&(zone)->uz_lock, \
583 ("%s: Invalid zone %p type", __func__, (zone))); \
586 #define ZONE_LOCK_INIT(z, lc) \
589 mtx_init(&(z)->uz_lock, (z)->uz_name, \
590 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
592 mtx_init(&(z)->uz_lock, (z)->uz_name, \
593 "UMA zone", MTX_DEF | MTX_DUPOK); \
596 #define ZONE_LOCK(z) mtx_lock(&(z)->uz_lock)
597 #define ZONE_TRYLOCK(z) mtx_trylock(&(z)->uz_lock)
598 #define ZONE_UNLOCK(z) mtx_unlock(&(z)->uz_lock)
599 #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
600 #define ZONE_LOCK_ASSERT(z) mtx_assert(&(z)->uz_lock, MA_OWNED)
602 #define ZONE_CROSS_LOCK_INIT(z) \
603 mtx_init(&(z)->uz_cross_lock, "UMA Cross", NULL, MTX_DEF)
604 #define ZONE_CROSS_LOCK(z) mtx_lock(&(z)->uz_cross_lock)
605 #define ZONE_CROSS_UNLOCK(z) mtx_unlock(&(z)->uz_cross_lock)
606 #define ZONE_CROSS_LOCK_FINI(z) mtx_destroy(&(z)->uz_cross_lock)
609 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
610 * the slab structure.
613 * hash The hash table to search.
614 * data The base page of the item.
617 * A pointer to a slab if successful, else NULL.
619 static __inline uma_slab_t
620 hash_sfind(struct uma_hash *hash, uint8_t *data)
622 uma_hash_slab_t slab;
625 hval = UMA_HASH(hash, data);
627 LIST_FOREACH(slab, &hash->uh_slab_hash[hval], uhs_hlink) {
628 if ((uint8_t *)slab->uhs_data == data)
629 return (&slab->uhs_slab);
634 static __inline uma_slab_t
635 vtoslab(vm_offset_t va)
639 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
640 return (p->plinks.uma.slab);
644 vtozoneslab(vm_offset_t va, uma_zone_t *zone, uma_slab_t *slab)
648 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
649 *slab = p->plinks.uma.slab;
650 *zone = p->plinks.uma.zone;
654 vsetzoneslab(vm_offset_t va, uma_zone_t zone, uma_slab_t slab)
658 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
659 p->plinks.uma.slab = slab;
660 p->plinks.uma.zone = zone;
663 extern unsigned long uma_kmem_limit;
664 extern unsigned long uma_kmem_total;
666 /* Adjust bytes under management by UMA. */
668 uma_total_dec(unsigned long size)
671 atomic_subtract_long(&uma_kmem_total, size);
675 uma_total_inc(unsigned long size)
678 if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
679 uma_reclaim_wakeup();
683 * The following two functions may be defined by architecture specific code
684 * if they can provide more efficient allocation functions. This is useful
685 * for using direct mapped addresses.
687 void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
688 uint8_t *pflag, int wait);
689 void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
691 /* Set a global soft limit on UMA managed memory. */
692 void uma_set_limit(unsigned long limit);
695 #endif /* VM_UMA_INT_H */