2 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * This file includes definitions, structures, prototypes, and inlines that
32 * should not be used outside of the actual implementation of UMA.
36 * Here's a quick description of the relationship between the objects:
38 * Zones contain lists of slabs which are stored in either the full bin, empty
39 * bin, or partially allocated bin, to reduce fragmentation. They also contain
40 * the user supplied value for size, which is adjusted for alignment purposes
41 * and rsize is the result of that. The zone also stores information for
42 * managing a hash of page addresses that maps pages to uma_slab_t structures
43 * for pages that don't have embedded uma_slab_t's.
45 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
46 * be allocated off the page from a special slab zone. The free list within a
47 * slab is managed with a linked list of indexes, which are 8 bit values. If
48 * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit
49 * values. Currently on alpha you can get 250 or so 32 byte items and on x86
50 * you can get 250 or so 16byte items. For item sizes that would yield more
51 * than 10% memory waste we potentially allocate a separate uma_slab_t if this
52 * will improve the number of items per slab that will fit.
54 * Other potential space optimizations are storing the 8bit of linkage in space
55 * wasted between items due to alignment problems. This may yield a much better
56 * memory footprint for certain sizes of objects. Another alternative is to
57 * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer
58 * dynamic slab sizes because we could stick with 8 bit indexes and only use
59 * large slab sizes for zones with a lot of waste per slab. This may create
60 * ineffeciencies in the vm subsystem due to fragmentation in the address space.
62 * The only really gross cases, with regards to memory waste, are for those
63 * items that are just over half the page size. You can get nearly 50% waste,
64 * so you fall back to the memory footprint of the power of two allocator. I
65 * have looked at memory allocation sizes on many of the machines available to
66 * me, and there does not seem to be an abundance of allocations at this range
67 * so at this time it may not make sense to optimize for it. This can, of
68 * course, be solved with dynamic slab sizes.
73 * This is the representation for normal (Non OFFPAGE slab)
78 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
79 * ___________________________________________________________
80 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
81 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
82 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
83 * |___________________________________________________________|
86 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
88 * ___________________________________________________________
89 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
90 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
91 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
92 * |___________________________________________________________|
102 #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
103 #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
104 #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
106 #define UMA_BOOT_PAGES 30 /* Pages allocated for startup */
108 /* Max waste before going to off page slab management */
109 #define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10)
112 * I doubt there will be many cases where this is exceeded. This is the initial
113 * size of the hash table for uma_slabs that are managed off page. This hash
114 * does expand by powers of two. Currently it doesn't get smaller.
116 #define UMA_HASH_SIZE_INIT 32
119 * I should investigate other hashing algorithms. This should yield a low
120 * number of collisions if the pages are relatively contiguous.
122 * This is the same algorithm that most processor caches use.
124 * I'm shifting and masking instead of % because it should be faster.
127 #define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) & \
130 #define UMA_HASH_INSERT(h, s, mem) \
131 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
132 (mem))], (s), us_hlink);
133 #define UMA_HASH_REMOVE(h, s, mem) \
134 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
135 (mem))], (s), uma_slab, us_hlink);
137 /* Page management structure */
139 /* Sorry for the union, but space efficiency is important */
141 uma_zone_t us_zone; /* Zone we live in */
143 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
144 unsigned long _us_size; /* Size of allocation */
146 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
147 u_int8_t *us_data; /* First item */
148 u_int8_t us_flags; /* Page flags see uma.h */
149 u_int8_t us_freecount; /* How many are free? */
150 u_int8_t us_firstfree; /* First free item index */
151 u_int8_t us_freelist[1]; /* Free List (actually larger) */
154 #define us_link us_type._us_link
155 #define us_size us_type._us_size
157 typedef struct uma_slab * uma_slab_t;
159 /* Hash table for freed address -> slab translation */
161 SLIST_HEAD(slabhead, uma_slab);
164 struct slabhead *uh_slab_hash; /* Hash table for slabs */
165 int uh_hashsize; /* Current size of the hash table */
166 int uh_hashmask; /* Mask used during hashing */
170 * Structures for per cpu queues.
174 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */
175 int16_t ub_cnt; /* Count of free items. */
176 int16_t ub_entries; /* Max items. */
177 void *ub_bucket[]; /* actual allocation storage */
180 typedef struct uma_bucket * uma_bucket_t;
183 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
184 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
185 u_int64_t uc_allocs; /* Count of allocations */
188 typedef struct uma_cache * uma_cache_t;
191 * Zone management structure
193 * TODO: Optimize for cache line size
197 char *uz_name; /* Text name of the zone */
198 LIST_ENTRY(uma_zone) uz_link; /* List of all zones */
199 u_int32_t uz_align; /* Alignment mask */
200 u_int32_t uz_pages; /* Total page count */
202 /* Used during alloc / free */
203 struct mtx uz_lock; /* Lock for the zone */
204 u_int32_t uz_free; /* Count of items free in slabs */
205 u_int16_t uz_ipers; /* Items per slab */
206 u_int16_t uz_flags; /* Internal flags */
208 LIST_HEAD(,uma_slab) uz_part_slab; /* partially allocated slabs */
209 LIST_HEAD(,uma_slab) uz_free_slab; /* empty slab list */
210 LIST_HEAD(,uma_slab) uz_full_slab; /* full slabs */
211 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
212 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
213 u_int32_t uz_size; /* Requested size of each item */
214 u_int32_t uz_rsize; /* Real size of each item */
216 struct uma_hash uz_hash;
217 u_int16_t uz_pgoff; /* Offset to uma_slab struct */
218 u_int16_t uz_ppera; /* pages per allocation from backend */
220 uma_ctor uz_ctor; /* Constructor for each allocation */
221 uma_dtor uz_dtor; /* Destructor */
222 u_int64_t uz_allocs; /* Total number of allocations */
224 uma_init uz_init; /* Initializer for each item */
225 uma_fini uz_fini; /* Discards memory */
226 uma_alloc uz_allocf; /* Allocation function */
227 uma_free uz_freef; /* Free routine */
228 struct vm_object *uz_obj; /* Zone specific object */
229 vm_offset_t uz_kva; /* Base kva for zones with objs */
230 u_int32_t uz_maxpages; /* Maximum number of pages to alloc */
231 int uz_recurse; /* Allocation recursion count */
232 uint16_t uz_fills; /* Outstanding bucket fills */
233 uint16_t uz_count; /* Highest value ub_ptr can have */
235 * This HAS to be the last item because we adjust the zone size
236 * based on NCPU and then allocate the space for the zones.
238 struct uma_cache uz_cpu[1]; /* Per cpu caches */
242 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
244 #define UMA_ZFLAG_PRIVALLOC 0x1000 /* Use uz_allocf. */
245 #define UMA_ZFLAG_INTERNAL 0x2000 /* No offpage no PCPU. */
246 #define UMA_ZFLAG_FULL 0x4000 /* Reached uz_maxpages */
247 #define UMA_ZFLAG_CACHEONLY 0x8000 /* Don't ask VM for buckets. */
249 /* Internal prototypes */
250 static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
251 void *uma_large_malloc(int size, int wait);
252 void uma_large_free(uma_slab_t slab);
256 #define ZONE_LOCK_INIT(z, lc) \
259 mtx_init(&(z)->uz_lock, (z)->uz_name, \
260 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
262 mtx_init(&(z)->uz_lock, (z)->uz_name, \
263 "UMA zone", MTX_DEF | MTX_DUPOK); \
266 #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
267 #define ZONE_LOCK(z) mtx_lock(&(z)->uz_lock)
268 #define ZONE_UNLOCK(z) mtx_unlock(&(z)->uz_lock)
270 #define CPU_LOCK_INIT(cpu) \
271 mtx_init(&uma_pcpu_mtx[(cpu)], "UMA pcpu", "UMA pcpu", \
274 #define CPU_LOCK(cpu) \
275 mtx_lock(&uma_pcpu_mtx[(cpu)])
277 #define CPU_UNLOCK(cpu) \
278 mtx_unlock(&uma_pcpu_mtx[(cpu)])
281 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
282 * the slab structure.
285 * hash The hash table to search.
286 * data The base page of the item.
289 * A pointer to a slab if successful, else NULL.
291 static __inline uma_slab_t
292 hash_sfind(struct uma_hash *hash, u_int8_t *data)
297 hval = UMA_HASH(hash, data);
299 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
300 if ((u_int8_t *)slab->us_data == data)
306 static __inline uma_slab_t
307 vtoslab(vm_offset_t va)
312 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
313 slab = (uma_slab_t )p->object;
315 if (p->flags & PG_SLAB)
322 vsetslab(vm_offset_t va, uma_slab_t slab)
326 p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va));
327 p->object = (vm_object_t)slab;
332 vsetobj(vm_offset_t va, vm_object_t obj)
336 p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va));
338 p->flags &= ~PG_SLAB;
342 * The following two functions may be defined by architecture specific code
343 * if they can provide more effecient allocation functions. This is useful
344 * for using direct mapped addresses.
346 void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait);
347 void uma_small_free(void *mem, int size, u_int8_t flags);
349 #endif /* VM_UMA_INT_H */