2 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * This file includes definitions, structures, prototypes, and inlines that
32 * should not be used outside of the actual implementation of UMA.
36 * Here's a quick description of the relationship between the objects:
38 * Kegs contain lists of slabs which are stored in either the full bin, empty
39 * bin, or partially allocated bin, to reduce fragmentation. They also contain
40 * the user supplied value for size, which is adjusted for alignment purposes
41 * and rsize is the result of that. The Keg also stores information for
42 * managing a hash of page addresses that maps pages to uma_slab_t structures
43 * for pages that don't have embedded uma_slab_t's.
45 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
46 * be allocated off the page from a special slab zone. The free list within a
47 * slab is managed with a linked list of indexes, which are 8 bit values. If
48 * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit
49 * values. Currently on alpha you can get 250 or so 32 byte items and on x86
50 * you can get 250 or so 16byte items. For item sizes that would yield more
51 * than 10% memory waste we potentially allocate a separate uma_slab_t if this
52 * will improve the number of items per slab that will fit.
54 * Other potential space optimizations are storing the 8bit of linkage in space
55 * wasted between items due to alignment problems. This may yield a much better
56 * memory footprint for certain sizes of objects. Another alternative is to
57 * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer
58 * dynamic slab sizes because we could stick with 8 bit indexes and only use
59 * large slab sizes for zones with a lot of waste per slab. This may create
60 * ineffeciencies in the vm subsystem due to fragmentation in the address space.
62 * The only really gross cases, with regards to memory waste, are for those
63 * items that are just over half the page size. You can get nearly 50% waste,
64 * so you fall back to the memory footprint of the power of two allocator. I
65 * have looked at memory allocation sizes on many of the machines available to
66 * me, and there does not seem to be an abundance of allocations at this range
67 * so at this time it may not make sense to optimize for it. This can, of
68 * course, be solved with dynamic slab sizes.
70 * Kegs may serve multiple Zones but by far most of the time they only serve
71 * one. When a Zone is created, a Keg is allocated and setup for it. While
72 * the backing Keg stores slabs, the Zone caches Buckets of items allocated
73 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
74 * pair, as well as with its own set of small per-CPU caches, layered above
75 * the Zone's general Bucket cache.
77 * The PCPU caches are protected by their own locks, while the Zones backed
78 * by the same Keg all share a common Keg lock (to coalesce contention on
79 * the backing slabs). The backing Keg typically only serves one Zone but
80 * in the case of multiple Zones, one of the Zones is considered the
81 * Master Zone and all Zone-related stats from the Keg are done in the
82 * Master Zone. For an example of a Multi-Zone setup, refer to the
83 * Mbuf allocation code.
87 * This is the representation for normal (Non OFFPAGE slab)
92 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
93 * ___________________________________________________________
94 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
95 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
96 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
97 * |___________________________________________________________|
100 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
102 * ___________________________________________________________
103 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
104 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
105 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
106 * |___________________________________________________________|
116 #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
117 #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
118 #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
120 #define UMA_BOOT_PAGES 40 /* Pages allocated for startup */
122 /* Max waste before going to off page slab management */
123 #define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10)
126 * I doubt there will be many cases where this is exceeded. This is the initial
127 * size of the hash table for uma_slabs that are managed off page. This hash
128 * does expand by powers of two. Currently it doesn't get smaller.
130 #define UMA_HASH_SIZE_INIT 32
133 * I should investigate other hashing algorithms. This should yield a low
134 * number of collisions if the pages are relatively contiguous.
136 * This is the same algorithm that most processor caches use.
138 * I'm shifting and masking instead of % because it should be faster.
141 #define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) & \
144 #define UMA_HASH_INSERT(h, s, mem) \
145 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
146 (mem))], (s), us_hlink);
147 #define UMA_HASH_REMOVE(h, s, mem) \
148 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
149 (mem))], (s), uma_slab, us_hlink);
151 /* Hash table for freed address -> slab translation */
153 SLIST_HEAD(slabhead, uma_slab);
156 struct slabhead *uh_slab_hash; /* Hash table for slabs */
157 int uh_hashsize; /* Current size of the hash table */
158 int uh_hashmask; /* Mask used during hashing */
162 * Structures for per cpu queues.
166 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */
167 int16_t ub_cnt; /* Count of free items. */
168 int16_t ub_entries; /* Max items. */
169 void *ub_bucket[]; /* actual allocation storage */
172 typedef struct uma_bucket * uma_bucket_t;
175 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
176 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
177 u_int64_t uc_allocs; /* Count of allocations */
180 typedef struct uma_cache * uma_cache_t;
183 * Keg management structure
185 * TODO: Optimize for cache line size
189 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
191 struct mtx uk_lock; /* Lock for the keg */
192 struct uma_hash uk_hash;
194 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
195 LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
196 LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
197 LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
199 u_int32_t uk_recurse; /* Allocation recursion count */
200 u_int32_t uk_align; /* Alignment mask */
201 u_int32_t uk_pages; /* Total page count */
202 u_int32_t uk_free; /* Count of items free in slabs */
203 u_int32_t uk_size; /* Requested size of each item */
204 u_int32_t uk_rsize; /* Real size of each item */
205 u_int32_t uk_maxpages; /* Maximum number of pages to alloc */
207 uma_init uk_init; /* Keg's init routine */
208 uma_fini uk_fini; /* Keg's fini routine */
209 uma_alloc uk_allocf; /* Allocation function */
210 uma_free uk_freef; /* Free routine */
212 struct vm_object *uk_obj; /* Zone specific object */
213 vm_offset_t uk_kva; /* Base kva for zones with objs */
214 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
216 u_int16_t uk_pgoff; /* Offset to uma_slab struct */
217 u_int16_t uk_ppera; /* pages per allocation from backend */
218 u_int16_t uk_ipers; /* Items per slab */
219 u_int16_t uk_flags; /* Internal flags */
222 /* Simpler reference to uma_keg for internal use. */
223 typedef struct uma_keg * uma_keg_t;
225 /* Page management structure */
227 /* Sorry for the union, but space efficiency is important */
228 struct uma_slab_head {
229 uma_keg_t us_keg; /* Keg we live in */
231 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
232 unsigned long _us_size; /* Size of allocation */
234 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
235 u_int8_t *us_data; /* First item */
236 u_int8_t us_flags; /* Page flags see uma.h */
237 u_int8_t us_freecount; /* How many are free? */
238 u_int8_t us_firstfree; /* First free item index */
241 /* The standard slab structure */
243 struct uma_slab_head us_head; /* slab header data */
246 } us_freelist[1]; /* actual number bigger */
250 * The slab structure for UMA_ZONE_REFCNT zones for whose items we
251 * maintain reference counters in the slab for.
253 struct uma_slab_refcnt {
254 struct uma_slab_head us_head; /* slab header data */
258 } us_freelist[1]; /* actual number bigger */
261 #define us_keg us_head.us_keg
262 #define us_link us_head.us_type._us_link
263 #define us_size us_head.us_type._us_size
264 #define us_hlink us_head.us_hlink
265 #define us_data us_head.us_data
266 #define us_flags us_head.us_flags
267 #define us_freecount us_head.us_freecount
268 #define us_firstfree us_head.us_firstfree
270 typedef struct uma_slab * uma_slab_t;
271 typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
274 * These give us the size of one free item reference within our corresponding
275 * uma_slab structures, so that our calculations during zone setup are correct
276 * regardless of what the compiler decides to do with padding the structure
277 * arrays within uma_slab.
279 #define UMA_FRITM_SZ (sizeof(struct uma_slab) - sizeof(struct uma_slab_head))
280 #define UMA_FRITMREF_SZ (sizeof(struct uma_slab_refcnt) - \
281 sizeof(struct uma_slab_head))
284 * Zone management structure
286 * TODO: Optimize for cache line size
290 char *uz_name; /* Text name of the zone */
291 struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
292 uma_keg_t uz_keg; /* Our underlying Keg */
294 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
295 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
296 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
298 uma_ctor uz_ctor; /* Constructor for each allocation */
299 uma_dtor uz_dtor; /* Destructor */
300 uma_init uz_init; /* Initializer for each item */
301 uma_fini uz_fini; /* Discards memory */
303 u_int64_t uz_allocs; /* Total number of allocations */
304 uint16_t uz_fills; /* Outstanding bucket fills */
305 uint16_t uz_count; /* Highest value ub_ptr can have */
308 * This HAS to be the last item because we adjust the zone size
309 * based on NCPU and then allocate the space for the zones.
311 struct uma_cache uz_cpu[1]; /* Per cpu caches */
315 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
317 #define UMA_ZFLAG_PRIVALLOC 0x1000 /* Use uz_allocf. */
318 #define UMA_ZFLAG_INTERNAL 0x2000 /* No offpage no PCPU. */
319 #define UMA_ZFLAG_FULL 0x4000 /* Reached uz_maxpages */
320 #define UMA_ZFLAG_CACHEONLY 0x8000 /* Don't ask VM for buckets. */
322 /* Internal prototypes */
323 static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
324 void *uma_large_malloc(int size, int wait);
325 void uma_large_free(uma_slab_t slab);
329 #define ZONE_LOCK_INIT(z, lc) \
332 mtx_init((z)->uz_lock, (z)->uz_name, \
333 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
335 mtx_init((z)->uz_lock, (z)->uz_name, \
336 "UMA zone", MTX_DEF | MTX_DUPOK); \
339 #define ZONE_LOCK_FINI(z) mtx_destroy((z)->uz_lock)
340 #define ZONE_LOCK(z) mtx_lock((z)->uz_lock)
341 #define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock)
343 #define CPU_LOCK_INIT(cpu) \
344 mtx_init(&uma_pcpu_mtx[(cpu)], "UMA pcpu", "UMA pcpu", \
347 #define CPU_LOCK(cpu) \
348 mtx_lock(&uma_pcpu_mtx[(cpu)])
350 #define CPU_UNLOCK(cpu) \
351 mtx_unlock(&uma_pcpu_mtx[(cpu)])
354 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
355 * the slab structure.
358 * hash The hash table to search.
359 * data The base page of the item.
362 * A pointer to a slab if successful, else NULL.
364 static __inline uma_slab_t
365 hash_sfind(struct uma_hash *hash, u_int8_t *data)
370 hval = UMA_HASH(hash, data);
372 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
373 if ((u_int8_t *)slab->us_data == data)
379 static __inline uma_slab_t
380 vtoslab(vm_offset_t va)
385 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
386 slab = (uma_slab_t )p->object;
388 if (p->flags & PG_SLAB)
395 vsetslab(vm_offset_t va, uma_slab_t slab)
399 p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va));
400 p->object = (vm_object_t)slab;
405 vsetobj(vm_offset_t va, vm_object_t obj)
409 p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va));
411 p->flags &= ~PG_SLAB;
415 * The following two functions may be defined by architecture specific code
416 * if they can provide more effecient allocation functions. This is useful
417 * for using direct mapped addresses.
419 void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait);
420 void uma_small_free(void *mem, int size, u_int8_t flags);
422 #endif /* VM_UMA_INT_H */