2 * Copyright (c) 1997, 1998 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/kernel.h>
21 #include <sys/malloc.h>
23 #include <sys/mutex.h>
24 #include <sys/queue.h>
25 #include <sys/sysctl.h>
26 #include <sys/vmmeter.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_param.h>
32 #include <vm/vm_map.h>
33 #include <vm/vm_kern.h>
34 #include <vm/vm_extern.h>
35 #include <vm/vm_zone.h>
37 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
39 #define ZENTRY_FREE (void*)0x12342378
41 #define ZONE_ROUNDING 32
44 * This file comprises a very simple zone allocator. This is used
45 * in lieu of the malloc allocator, where needed or more optimal.
47 * Note that the initial implementation of this had coloring, and
48 * absolutely no improvement (actually perf degradation) occurred.
50 * Note also that the zones are type stable. The only restriction is
51 * that the first two longwords of a data structure can be changed
52 * between allocations. Any data that must be stable between allocations
53 * must reside in areas after the first two longwords.
55 * zinitna, zinit, zbootinit are the initialization routines.
56 * zalloc, zfree, are the allocation/free routines.
60 * Subsystem lock. Never grab it while holding a zone lock.
62 static struct mtx zone_mtx;
65 * Singly-linked list of zones, for book-keeping purposes
67 static SLIST_HEAD(vm_zone_list, vm_zone) zlist;
72 static int zone_kmem_pages; /* Number of interrupt-safe pages allocated */
73 static int zone_kern_pages; /* Number of KVA pages allocated */
74 static int zone_kmem_kvaspace; /* Number of non-intsafe pages allocated */
77 * Subsystem initialization, called from vm_mem_init()
82 mtx_init(&zone_mtx, "zone subsystem", MTX_DEF);
90 * LATER: traverse zlist looking for partially initialized
91 * LATER: zones and finish initializing them.
96 * Create a zone, but don't allocate the zone structure. If the
97 * zone had been previously created by the zone boot code, initialize
98 * various parts of the zone code.
100 * If waits are not allowed during allocation (e.g. during interrupt
101 * code), a-priori allocate the kernel virtual space, and allocate
102 * only pages when needed.
105 * z pointer to zone structure.
106 * obj pointer to VM object (opt).
108 * size size of zone entries.
109 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
110 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
111 * zalloc number of pages allocated when memory is needed.
113 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
114 * by the nentries argument. The size of the memory allocatable is
115 * unlimited if ZONE_INTERRUPT is not set.
119 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
120 int nentries, int flags, int zalloc)
122 int totsize, oldzflags;
126 oldzflags = z->zflags;
127 if ((z->zflags & ZONE_BOOT) == 0) {
128 z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
140 * If we cannot wait, allocate KVA space up front, and we will fill
141 * in pages as needed.
143 if (z->zflags & ZONE_INTERRUPT) {
144 totsize = round_page(z->zsize * nentries);
145 atomic_add_int(&zone_kmem_kvaspace, totsize);
146 z->zkva = kmem_alloc_pageable(kernel_map, totsize);
150 z->zpagemax = totsize / PAGE_SIZE;
152 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
155 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
157 z->zallocflag = VM_ALLOC_INTERRUPT;
160 z->zallocflag = VM_ALLOC_SYSTEM;
165 if (z->zsize > PAGE_SIZE)
168 z->zfreemin = PAGE_SIZE / z->zsize;
176 /* our zone is good and ready, add it to the list */
177 if ((z->zflags & ZONE_BOOT) == 0) {
178 mtx_init(&(z)->zmtx, "zone", MTX_DEF);
180 SLIST_INSERT_HEAD(&zlist, z, zent);
181 mtx_unlock(&zone_mtx);
188 * Subroutine same as zinitna, except zone data structure is allocated
189 * automatically by malloc. This routine should normally be used, except
190 * in certain tricky startup conditions in the VM system -- then
191 * zbootinit and zinitna can be used. Zinit is the standard zone
192 * initialization call.
195 zinit(char *name, int size, int nentries, int flags, int zalloc)
199 z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT | M_ZERO);
203 if (zinitna(z, NULL, name, size, nentries, flags, zalloc) == 0) {
212 * Initialize a zone before the system is fully up.
214 * We can't rely on being able to allocate items dynamically, so we
215 * kickstart the zone with a number of static items provided by the
218 * This routine should only be called before full VM startup.
221 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
229 z->zflags = ZONE_BOOT;
235 mtx_init(&(z)->zmtx, "zone", MTX_DEF);
237 bzero(item, nitems * z->zsize);
239 for (i = 0; i < nitems; i++) {
240 ((void **) item)[0] = z->zitems;
242 ((void **) item)[1] = ZENTRY_FREE;
245 (char *) item += z->zsize;
247 z->zfreecnt = nitems;
252 SLIST_INSERT_HEAD(&zlist, z, zent);
253 mtx_unlock(&zone_mtx);
257 * Destroy a zone, freeing the allocated memory.
258 * This does not do any locking for the zone; make sure it is not used
259 * any more before calling. All zalloc()'ated memory in the zone must have
261 * zdestroy() may not be used with zbootinit()'ed zones.
264 zdestroy(vm_zone_t z)
266 int i, nitems, nbytes;
267 void *item, *min, **itp;
269 vm_map_entry_t entry;
276 KASSERT(z != NULL, ("invalid zone"));
278 * This is needed, or the algorithm used for non-interrupt zones will
281 KASSERT(z->ztotal == z->zfreecnt,
282 ("zdestroy() used with an active zone"));
283 KASSERT((z->zflags & ZONE_BOOT) == 0,
284 ("zdestroy() used with a zbootinit()'ed zone"));
286 if (z->zflags & ZONE_INTERRUPT) {
287 kmem_free(kernel_map, z->zkva, z->zpagemax * PAGE_SIZE);
288 vm_object_deallocate(z->zobj);
289 atomic_subtract_int(&zone_kmem_kvaspace,
290 z->zpagemax * PAGE_SIZE);
291 atomic_subtract_int(&zone_kmem_pages,
293 cnt.v_wire_count -= z->zpagecount;
296 * This is evil h0h0 magic:
297 * The items may be in z->zitems in a random oder; we have to
298 * free the start of an allocated area, but do not want to save
299 * extra information. Additionally, we may not access items that
300 * were in a freed area.
301 * This is achieved in the following way: the smallest address
302 * is selected, and, after removing all items that are in a
303 * range of z->zalloc * PAGE_SIZE (one allocation unit) from
304 * it, kmem_free is called on it (since it is the smallest one,
305 * it must be the start of an area). This is repeated until all
308 nbytes = z->zalloc * PAGE_SIZE;
309 nitems = nbytes / z->zsize;
310 while (z->zitems != NULL) {
311 /* Find minimal element. */
312 item = min = z->zitems;
313 while (item != NULL) {
316 item = ((void **)item)[0];
321 while (*itp != NULL && i < nitems) {
322 if ((char *)*itp >= (char *)min &&
323 (char *)*itp < (char *)min + nbytes) {
324 *itp = ((void **)*itp)[0];
327 itp = &((void **)*itp)[0];
329 KASSERT(i == nitems, ("zdestroy(): corrupt zone"));
331 * We can allocate from kmem_map (kmem_malloc) or
332 * kernel_map (kmem_alloc).
333 * kmem_map is a submap of kernel_map, so we can use
334 * vm_map_lookup to retrieve the map we need to use.
337 if (vm_map_lookup(&map, (vm_offset_t)min, VM_PROT_NONE,
338 &entry, &obj, &pindex, &prot, &wired) !=
340 panic("zalloc mapping lost");
341 /* Need to unlock. */
342 vm_map_lookup_done(map, entry);
343 if (map == kmem_map) {
344 atomic_subtract_int(&zone_kmem_pages,
346 } else if (map == kernel_map) {
347 atomic_subtract_int(&zone_kern_pages,
350 panic("zdestroy(): bad map");
351 kmem_free(map, (vm_offset_t)min, nbytes);
356 SLIST_REMOVE(&zlist, z, vm_zone, zent);
357 mtx_unlock(&zone_mtx);
358 mtx_destroy(&z->zmtx);
363 * Grow the specified zone to accomodate more items.
373 KASSERT(z != NULL, ("invalid zone"));
375 if (z->zflags & ZONE_INTERRUPT) {
376 nbytes = z->zpagecount * PAGE_SIZE;
377 nbytes -= nbytes % z->zsize;
378 item = (char *) z->zkva + nbytes;
379 for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
383 m = vm_page_alloc(z->zobj, z->zpagecount,
388 zkva = z->zkva + z->zpagecount * PAGE_SIZE;
389 pmap_qenter(zkva, &m, 1);
390 bzero((caddr_t) zkva, PAGE_SIZE);
392 atomic_add_int(&zone_kmem_pages, 1);
395 nitems = ((z->zpagecount * PAGE_SIZE) - nbytes) / z->zsize;
397 /* Please check zdestroy() when changing this! */
398 nbytes = z->zalloc * PAGE_SIZE;
401 * Check to see if the kernel map is already locked. We could allow
402 * for recursive locks, but that eliminates a valuable debugging
403 * mechanism, and opens up the kernel map for potential corruption
404 * by inconsistent data structure manipulation. We could also use
405 * the interrupt allocation mechanism, but that has size limitations.
406 * Luckily, we have kmem_map that is a submap of kernel map available
407 * for memory allocation, and manipulation of that map doesn't affect
408 * the kernel map structures themselves.
410 * We can wait, so just do normal map allocation in the appropriate
413 mtx_unlock(&z->zmtx);
414 if (lockstatus(&kernel_map->lock, NULL)) {
415 item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
417 atomic_add_int(&zone_kmem_pages, z->zalloc);
419 item = (void *) kmem_alloc(kernel_map, nbytes);
421 atomic_add_int(&zone_kern_pages, z->zalloc);
428 nitems = nbytes / z->zsize;
434 * Save one for immediate allocation
438 for (i = 0; i < nitems; i++) {
439 ((void **) item)[0] = z->zitems;
441 ((void **) item)[1] = ZENTRY_FREE;
444 (char *) item += z->zsize;
446 z->zfreecnt += nitems;
448 } else if (z->zfreecnt > 0) {
450 z->zitems = ((void **) item)[0];
452 KASSERT(((void **) item)[1] == ZENTRY_FREE,
453 ("item is not free"));
454 ((void **) item)[1] = 0;
462 mtx_assert(&z->zmtx, MA_OWNED);
467 * Allocates an item from the specified zone.
474 KASSERT(z != NULL, ("invalid zone"));
477 if (z->zfreecnt <= z->zfreemin) {
483 z->zitems = ((void **) item)[0];
485 KASSERT(((void **) item)[1] == ZENTRY_FREE,
486 ("item is not free"));
487 ((void **) item)[1] = 0;
494 mtx_unlock(&z->zmtx);
499 * Frees an item back to the specified zone.
502 zfree(vm_zone_t z, void *item)
504 KASSERT(z != NULL, ("invalid zone"));
505 KASSERT(item != NULL, ("invalid item"));
508 ((void **) item)[0] = z->zitems;
510 KASSERT(((void **) item)[1] != ZENTRY_FREE,
511 ("item is already free"));
512 ((void **) item)[1] = (void *) ZENTRY_FREE;
517 mtx_unlock(&z->zmtx);
521 * Sysctl handler for vm.zone
524 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
527 const int linesize = 128; /* conservative */
528 char *tmpbuf, *offset;
534 SLIST_FOREACH(z, &zlist, zent)
536 mtx_unlock(&zone_mtx);
537 MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
539 len = snprintf(tmpbuf, linesize,
540 "\nITEM SIZE LIMIT USED FREE REQUESTS\n\n");
542 tmpbuf[len - 1] = '\0';
543 error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
544 if (error || cnt == 0)
548 SLIST_FOREACH(z, &zlist, zent) {
549 if (cnt == 0) /* list may have changed size */
552 len = snprintf(offset, linesize,
553 "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
554 z->zname, z->zsize, z->zmax, (z->ztotal - z->zfreecnt),
555 z->zfreecnt, z->znalloc);
556 mtx_unlock(&z->zmtx);
557 for (p = offset + 12; p > offset && *p == ' '; --p)
563 mtx_unlock(&zone_mtx);
565 error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
567 FREE(tmpbuf, M_TEMP);
571 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
572 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
574 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages, CTLFLAG_RD, &zone_kmem_pages, 0,
575 "Number of interrupt safe pages allocated by zone");
576 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_kvaspace, CTLFLAG_RD, &zone_kmem_kvaspace, 0,
577 "KVA space allocated by zone");
578 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages, CTLFLAG_RD, &zone_kern_pages, 0,
579 "Number of non-interrupt safe pages allocated by zone");