2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/counter.h>
34 #include <sys/_bitset.h>
35 #include <sys/_domainset.h>
36 #include <sys/_task.h>
39 * This file includes definitions, structures, prototypes, and inlines that
40 * should not be used outside of the actual implementation of UMA.
44 * The brief summary; Zones describe unique allocation types. Zones are
45 * organized into per-CPU caches which are filled by buckets. Buckets are
46 * organized according to memory domains. Buckets are filled from kegs which
47 * are also organized according to memory domains. Kegs describe a unique
48 * allocation type, backend memory provider, and layout. Kegs are associated
49 * with one or more zones and zones reference one or more kegs. Kegs provide
50 * slabs which are virtually contiguous collections of pages. Each slab is
51 * broken down int one or more items that will satisfy an individual allocation.
53 * Allocation is satisfied in the following order:
55 * 2) Per-domain cache of buckets
56 * 3) Slab from any of N kegs
57 * 4) Backend page provider
59 * More detail on individual objects is contained below:
61 * Kegs contain lists of slabs which are stored in either the full bin, empty
62 * bin, or partially allocated bin, to reduce fragmentation. They also contain
63 * the user supplied value for size, which is adjusted for alignment purposes
64 * and rsize is the result of that. The Keg also stores information for
65 * managing a hash of page addresses that maps pages to uma_slab_t structures
66 * for pages that don't have embedded uma_slab_t's.
68 * Keg slab lists are organized by memory domain to support NUMA allocation
69 * policies. By default allocations are spread across domains to reduce the
70 * potential for hotspots. Special keg creation flags may be specified to
71 * prefer location allocation. However there is no strict enforcement as frees
72 * may happen on any CPU and these are returned to the CPU-local cache
73 * regardless of the originating domain.
75 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
76 * be allocated off the page from a special slab zone. The free list within a
77 * slab is managed with a bitmask. For item sizes that would yield more than
78 * 10% memory waste we potentially allocate a separate uma_slab_t if this will
79 * improve the number of items per slab that will fit.
81 * The only really gross cases, with regards to memory waste, are for those
82 * items that are just over half the page size. You can get nearly 50% waste,
83 * so you fall back to the memory footprint of the power of two allocator. I
84 * have looked at memory allocation sizes on many of the machines available to
85 * me, and there does not seem to be an abundance of allocations at this range
86 * so at this time it may not make sense to optimize for it. This can, of
87 * course, be solved with dynamic slab sizes.
89 * Kegs may serve multiple Zones but by far most of the time they only serve
90 * one. When a Zone is created, a Keg is allocated and setup for it. While
91 * the backing Keg stores slabs, the Zone caches Buckets of items allocated
92 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
93 * pair, as well as with its own set of small per-CPU caches, layered above
94 * the Zone's general Bucket cache.
96 * The PCPU caches are protected by critical sections, and may be accessed
97 * safely only from their associated CPU, while the Zones backed by the same
98 * Keg all share a common Keg lock (to coalesce contention on the backing
99 * slabs). The backing Keg typically only serves one Zone but in the case of
100 * multiple Zones, one of the Zones is considered the Master Zone and all
101 * Zone-related stats from the Keg are done in the Master Zone. For an
102 * example of a Multi-Zone setup, refer to the Mbuf allocation code.
106 * This is the representation for normal (Non OFFPAGE slab)
111 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
112 * ___________________________________________________________
113 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
114 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
115 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
116 * |___________________________________________________________|
119 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
121 * ___________________________________________________________
122 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
123 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
124 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
125 * |___________________________________________________________|
135 #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
136 #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
137 #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
139 /* Max waste percentage before going to off page slab management */
140 #define UMA_MAX_WASTE 10
144 * Hash table for freed address -> slab translation.
146 * Only zones with memory not touchable by the allocator use the
147 * hash table. Otherwise slabs are found with vtoslab().
149 #define UMA_HASH_SIZE_INIT 32
151 #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
153 #define UMA_HASH_INSERT(h, s, mem) \
154 LIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
155 (mem))], (uma_hash_slab_t)(s), uhs_hlink)
157 #define UMA_HASH_REMOVE(h, s) \
158 LIST_REMOVE((uma_hash_slab_t)(s), uhs_hlink)
160 LIST_HEAD(slabhashhead, uma_hash_slab);
163 struct slabhashhead *uh_slab_hash; /* Hash table for slabs */
164 u_int uh_hashsize; /* Current size of the hash table */
165 u_int uh_hashmask; /* Mask used during hashing */
169 * align field or structure to cache line
171 #if defined(__amd64__) || defined(__powerpc64__)
172 #define UMA_ALIGN __aligned(128)
178 * Structures for per cpu queues.
182 TAILQ_ENTRY(uma_bucket) ub_link; /* Link into the zone */
183 int16_t ub_cnt; /* Count of items in bucket. */
184 int16_t ub_entries; /* Max items. */
185 void *ub_bucket[]; /* actual allocation storage */
188 typedef struct uma_bucket * uma_bucket_t;
191 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
192 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
193 uma_bucket_t uc_crossbucket; /* cross domain bucket */
194 uint64_t uc_allocs; /* Count of allocations */
195 uint64_t uc_frees; /* Count of frees */
198 typedef struct uma_cache * uma_cache_t;
200 LIST_HEAD(slabhead, uma_slab);
203 * Per-domain memory list. Embedded in the kegs.
206 struct slabhead ud_part_slab; /* partially allocated slabs */
207 struct slabhead ud_free_slab; /* completely unallocated slabs */
208 struct slabhead ud_full_slab; /* fully allocated slabs */
211 typedef struct uma_domain * uma_domain_t;
214 * Keg management structure
216 * TODO: Optimize for cache line size
220 struct mtx uk_lock; /* Lock for the keg must be first.
221 * See shared uz_keg/uz_lockptr
222 * member of struct uma_zone. */
223 struct uma_hash uk_hash;
224 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
226 struct domainset_ref uk_dr; /* Domain selection policy. */
227 uint32_t uk_align; /* Alignment mask */
228 uint32_t uk_pages; /* Total page count */
229 uint32_t uk_free; /* Count of items free in slabs */
230 uint32_t uk_reserve; /* Number of reserved items. */
231 uint32_t uk_size; /* Requested size of each item */
232 uint32_t uk_rsize; /* Real size of each item */
234 uma_init uk_init; /* Keg's init routine */
235 uma_fini uk_fini; /* Keg's fini routine */
236 uma_alloc uk_allocf; /* Allocation function */
237 uma_free uk_freef; /* Free routine */
239 u_long uk_offset; /* Next free offset from base KVA */
240 vm_offset_t uk_kva; /* Zone base KVA */
241 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
243 uint32_t uk_pgoff; /* Offset to uma_slab struct */
244 uint16_t uk_ppera; /* pages per allocation from backend */
245 uint16_t uk_ipers; /* Items per slab */
246 uint32_t uk_flags; /* Internal flags */
248 /* Least used fields go to the last cache line. */
249 const char *uk_name; /* Name of creating zone. */
250 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
252 /* Must be last, variable sized. */
253 struct uma_domain uk_domain[]; /* Keg's slab lists. */
255 typedef struct uma_keg * uma_keg_t;
258 * Free bits per-slab.
260 #define SLAB_MAX_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)
261 #define SLAB_MIN_SETSIZE _BITSET_BITS
262 BITSET_DEFINE(slabbits, SLAB_MAX_SETSIZE);
263 BITSET_DEFINE(noslabbits, 0);
266 * The slab structure manages a single contiguous allocation from backing
267 * store and subdivides it into individually allocatable items.
270 LIST_ENTRY(uma_slab) us_link; /* slabs in zone */
271 uint16_t us_freecount; /* How many are free? */
272 uint8_t us_flags; /* Page flags see uma.h */
273 uint8_t us_domain; /* Backing NUMA domain. */
274 struct noslabbits us_free; /* Free bitmask, flexible. */
276 _Static_assert(sizeof(struct uma_slab) == offsetof(struct uma_slab, us_free),
277 "us_free field must be last");
279 #error "Slab domain type insufficient"
282 typedef struct uma_slab * uma_slab_t;
285 * On INVARIANTS builds, the slab contains a second bitset of the same size,
286 * "dbg_bits", which is laid out immediately after us_free.
289 #define SLAB_BITSETS 2
291 #define SLAB_BITSETS 1
294 /* These three functions are for embedded (!OFFPAGE) use only. */
295 size_t slab_sizeof(int nitems);
296 size_t slab_space(int nitems);
297 int slab_ipers(size_t size, int align);
300 * Slab structure with a full sized bitset and hash link for both
301 * HASH and OFFPAGE zones.
303 struct uma_hash_slab {
304 struct uma_slab uhs_slab; /* Must be first. */
305 struct slabbits uhs_bits1; /* Must be second. */
307 struct slabbits uhs_bits2; /* Must be third. */
309 LIST_ENTRY(uma_hash_slab) uhs_hlink; /* Link for hash table */
310 uint8_t *uhs_data; /* First item */
313 typedef struct uma_hash_slab * uma_hash_slab_t;
316 slab_data(uma_slab_t slab, uma_keg_t keg)
319 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0)
320 return ((void *)((uintptr_t)slab - keg->uk_pgoff));
322 return (((uma_hash_slab_t)slab)->uhs_data);
326 slab_item(uma_slab_t slab, uma_keg_t keg, int index)
330 data = (uintptr_t)slab_data(slab, keg);
331 return ((void *)(data + keg->uk_rsize * index));
335 slab_item_index(uma_slab_t slab, uma_keg_t keg, void *item)
339 data = (uintptr_t)slab_data(slab, keg);
340 return (((uintptr_t)item - data) / keg->uk_rsize);
343 TAILQ_HEAD(uma_bucketlist, uma_bucket);
345 struct uma_zone_domain {
346 struct uma_bucketlist uzd_buckets; /* full buckets */
347 long uzd_nitems; /* total item count */
348 long uzd_imax; /* maximum item count this period */
349 long uzd_imin; /* minimum item count this period */
350 long uzd_wss; /* working set size estimate */
353 typedef struct uma_zone_domain * uma_zone_domain_t;
356 * Zone management structure
358 * TODO: Optimize for cache line size
362 /* Offset 0, used in alloc/free fast/medium fast path and const. */
364 uma_keg_t uz_keg; /* This zone's keg */
365 struct mtx *uz_lockptr; /* To keg or to self */
367 struct uma_zone_domain *uz_domain; /* per-domain buckets */
368 uint32_t uz_flags; /* Flags inherited from kegs */
369 uint32_t uz_size; /* Size inherited from kegs */
370 uma_ctor uz_ctor; /* Constructor for each allocation */
371 uma_dtor uz_dtor; /* Destructor */
372 uint64_t uz_items; /* Total items count */
373 uint64_t uz_max_items; /* Maximum number of items to alloc */
374 uint32_t uz_sleepers; /* Number of sleepers on memory */
375 uint16_t uz_bucket_size; /* Number of items in full bucket */
376 uint16_t uz_bucket_size_max; /* Maximum number of bucket items */
378 /* Offset 64, used in bucket replenish. */
379 uma_import uz_import; /* Import new memory to cache. */
380 uma_release uz_release; /* Release memory from cache. */
381 void *uz_arg; /* Import/release argument. */
382 uma_init uz_init; /* Initializer for each item */
383 uma_fini uz_fini; /* Finalizer for each item. */
385 uint64_t uz_bkt_count; /* Items in bucket cache */
386 uint64_t uz_bkt_max; /* Maximum bucket cache size */
388 /* Offset 128 Rare. */
390 * The lock is placed here to avoid adjacent line prefetcher
391 * in fast paths and to take up space near infrequently accessed
392 * members to reduce alignment overhead.
394 struct mtx uz_lock; /* Lock for the zone */
395 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
396 const char *uz_name; /* Text name of the zone */
397 /* The next two fields are used to print a rate-limited warnings. */
398 const char *uz_warning; /* Warning to print on failure */
399 struct timeval uz_ratecheck; /* Warnings rate-limiting */
400 struct task uz_maxaction; /* Task to run when at limit */
401 uint16_t uz_bucket_size_min; /* Min number of items in bucket */
403 /* Offset 256+, stats and misc. */
404 counter_u64_t uz_allocs; /* Total number of allocations */
405 counter_u64_t uz_frees; /* Total number of frees */
406 counter_u64_t uz_fails; /* Total number of alloc failures */
407 uint64_t uz_sleeps; /* Total number of alloc sleeps */
408 uint64_t uz_xdomain; /* Total number of cross-domain frees */
409 char *uz_ctlname; /* sysctl safe name string. */
410 struct sysctl_oid *uz_oid; /* sysctl oid pointer. */
411 int uz_namecnt; /* duplicate name count. */
414 * This HAS to be the last item because we adjust the zone size
415 * based on NCPU and then allocate the space for the zones.
417 struct uma_cache uz_cpu[]; /* Per cpu caches */
419 /* uz_domain follows here. */
423 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
425 #define UMA_ZFLAG_CACHE 0x04000000 /* uma_zcache_create()d it */
426 #define UMA_ZFLAG_RECLAIMING 0x08000000 /* Running zone_reclaim(). */
427 #define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */
428 #define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
429 #define UMA_ZFLAG_TRASH 0x40000000 /* Add trash ctor/dtor. */
430 #define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
432 #define UMA_ZFLAG_INHERIT \
433 (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET)
435 #define PRINT_UMA_ZFLAGS "\20" \
464 /* Internal prototypes */
465 static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
469 #define KEG_LOCK_INIT(k, lc) \
472 mtx_init(&(k)->uk_lock, (k)->uk_name, \
473 (k)->uk_name, MTX_DEF | MTX_DUPOK); \
475 mtx_init(&(k)->uk_lock, (k)->uk_name, \
476 "UMA zone", MTX_DEF | MTX_DUPOK); \
479 #define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock)
480 #define KEG_LOCK(k) mtx_lock(&(k)->uk_lock)
481 #define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock)
482 #define KEG_LOCK_ASSERT(k) mtx_assert(&(k)->uk_lock, MA_OWNED)
484 #define KEG_GET(zone, keg) do { \
485 (keg) = (zone)->uz_keg; \
486 KASSERT((void *)(keg) != (void *)&(zone)->uz_lock, \
487 ("%s: Invalid zone %p type", __func__, (zone))); \
490 #define ZONE_LOCK_INIT(z, lc) \
493 mtx_init(&(z)->uz_lock, (z)->uz_name, \
494 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
496 mtx_init(&(z)->uz_lock, (z)->uz_name, \
497 "UMA zone", MTX_DEF | MTX_DUPOK); \
500 #define ZONE_LOCK(z) mtx_lock((z)->uz_lockptr)
501 #define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lockptr)
502 #define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lockptr)
503 #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
504 #define ZONE_LOCK_ASSERT(z) mtx_assert((z)->uz_lockptr, MA_OWNED)
507 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
508 * the slab structure.
511 * hash The hash table to search.
512 * data The base page of the item.
515 * A pointer to a slab if successful, else NULL.
517 static __inline uma_slab_t
518 hash_sfind(struct uma_hash *hash, uint8_t *data)
520 uma_hash_slab_t slab;
523 hval = UMA_HASH(hash, data);
525 LIST_FOREACH(slab, &hash->uh_slab_hash[hval], uhs_hlink) {
526 if ((uint8_t *)slab->uhs_data == data)
527 return (&slab->uhs_slab);
532 static __inline uma_slab_t
533 vtoslab(vm_offset_t va)
537 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
538 return (p->plinks.uma.slab);
542 vtozoneslab(vm_offset_t va, uma_zone_t *zone, uma_slab_t *slab)
546 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
547 *slab = p->plinks.uma.slab;
548 *zone = p->plinks.uma.zone;
552 vsetzoneslab(vm_offset_t va, uma_zone_t zone, uma_slab_t slab)
556 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
557 p->plinks.uma.slab = slab;
558 p->plinks.uma.zone = zone;
561 extern unsigned long uma_kmem_limit;
562 extern unsigned long uma_kmem_total;
564 /* Adjust bytes under management by UMA. */
566 uma_total_dec(unsigned long size)
569 atomic_subtract_long(&uma_kmem_total, size);
573 uma_total_inc(unsigned long size)
576 if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
577 uma_reclaim_wakeup();
581 * The following two functions may be defined by architecture specific code
582 * if they can provide more efficient allocation functions. This is useful
583 * for using direct mapped addresses.
585 void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
586 uint8_t *pflag, int wait);
587 void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
589 /* Set a global soft limit on UMA managed memory. */
590 void uma_set_limit(unsigned long limit);
593 #endif /* VM_UMA_INT_H */