2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002-2006 Rice University
5 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
8 * This software was developed for the FreeBSD Project by Alan L. Cox,
9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * Superpage reservation management module
37 * Any external functions defined by this module are only to be used by the
38 * virtual memory system.
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
46 #include <sys/param.h>
47 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #include <sys/queue.h>
52 #include <sys/rwlock.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56 #include <sys/counter.h>
58 #include <sys/vmmeter.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_phys.h>
67 #include <vm/vm_pagequeue.h>
68 #include <vm/vm_radix.h>
69 #include <vm/vm_reserv.h>
72 * The reservation system supports the speculative allocation of large physical
73 * pages ("superpages"). Speculative allocation enables the fully automatic
74 * utilization of superpages by the virtual memory system. In other words, no
75 * programmatic directives are required to use superpages.
78 #if VM_NRESERVLEVEL > 0
81 * The number of small pages that are contained in a level 0 reservation
83 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER)
86 * The number of bits by which a physical address is shifted to obtain the
89 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
92 * The size of a level 0 reservation in bytes
94 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT)
97 * Computes the index of the small page underlying the given (object, pindex)
98 * within the reservation's array of small pages.
100 #define VM_RESERV_INDEX(object, pindex) \
101 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
104 * The size of a population map entry
106 typedef u_long popmap_t;
109 * The number of bits in a population map entry
111 #define NBPOPMAP (NBBY * sizeof(popmap_t))
114 * The number of population map entries in a reservation
116 #define NPOPMAP howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
119 * Clear a bit in the population map.
122 popmap_clear(popmap_t popmap[], int i)
125 popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP));
129 * Set a bit in the population map.
132 popmap_set(popmap_t popmap[], int i)
135 popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP);
139 * Is a bit in the population map clear?
141 static __inline boolean_t
142 popmap_is_clear(popmap_t popmap[], int i)
145 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0);
149 * Is a bit in the population map set?
151 static __inline boolean_t
152 popmap_is_set(popmap_t popmap[], int i)
155 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0);
159 * The reservation structure
161 * A reservation structure is constructed whenever a large physical page is
162 * speculatively allocated to an object. The reservation provides the small
163 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
164 * within that object. The reservation's "popcnt" tracks the number of these
165 * small physical pages that are in use at any given time. When and if the
166 * reservation is not fully utilized, it appears in the queue of partially
167 * populated reservations. The reservation always appears on the containing
168 * object's list of reservations.
170 * A partially populated reservation can be broken and reclaimed at any time.
173 * d - vm_reserv_domain_lock
174 * o - vm_reserv_object_lock
175 * c - constant after boot
178 struct mtx lock; /* reservation lock. */
179 TAILQ_ENTRY(vm_reserv) partpopq; /* (d) per-domain queue. */
180 LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */
181 vm_object_t object; /* (o, r) containing object */
182 vm_pindex_t pindex; /* (o, r) offset in object */
183 vm_page_t pages; /* (c) first page */
184 uint16_t domain; /* (c) NUMA domain. */
185 uint16_t popcnt; /* (r) # of pages in use */
186 char inpartpopq; /* (d) */
187 popmap_t popmap[NPOPMAP]; /* (r) bit vector, used pages */
190 #define vm_reserv_lockptr(rv) (&(rv)->lock)
191 #define vm_reserv_assert_locked(rv) \
192 mtx_assert(vm_reserv_lockptr(rv), MA_OWNED)
193 #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv))
194 #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv))
195 #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv))
197 static struct mtx_padalign vm_reserv_domain_locks[MAXMEMDOM];
199 #define vm_reserv_domain_lockptr(d) &vm_reserv_domain_locks[(d)]
200 #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d))
201 #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d))
204 * The reservation array
206 * This array is analoguous in function to vm_page_array. It differs in the
207 * respect that it may contain a greater number of useful reservation
208 * structures than there are (physical) superpages. These "invalid"
209 * reservation structures exist to trade-off space for time in the
210 * implementation of vm_reserv_from_page(). Invalid reservation structures are
211 * distinguishable from "valid" reservation structures by inspecting the
212 * reservation's "pages" field. Invalid reservation structures have a NULL
215 * vm_reserv_from_page() maps a small (physical) page to an element of this
216 * array by computing a physical reservation number from the page's physical
217 * address. The physical reservation number is used as the array index.
219 * An "active" reservation is a valid reservation structure that has a non-NULL
220 * "object" field and a non-zero "popcnt" field. In other words, every active
221 * reservation belongs to a particular object. Moreover, every active
222 * reservation has an entry in the containing object's list of reservations.
224 static vm_reserv_t vm_reserv_array;
227 * The partially populated reservation queue
229 * This queue enables the fast recovery of an unused free small page from a
230 * partially populated reservation. The reservation at the head of this queue
231 * is the least recently changed, partially populated reservation.
233 * Access to this queue is synchronized by the free page queue lock.
235 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop[MAXMEMDOM];
237 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
239 static counter_u64_t vm_reserv_broken = EARLY_COUNTER;
240 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
241 &vm_reserv_broken, "Cumulative number of broken reservations");
243 static counter_u64_t vm_reserv_freed = EARLY_COUNTER;
244 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
245 &vm_reserv_freed, "Cumulative number of freed reservations");
247 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
249 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
250 sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
252 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
254 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
255 sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues");
257 static counter_u64_t vm_reserv_reclaimed = EARLY_COUNTER;
258 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
259 &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations");
262 * The object lock pool is used to synchronize the rvq. We can not use a
263 * pool mutex because it is required before malloc works.
265 * The "hash" function could be made faster without divide and modulo.
267 #define VM_RESERV_OBJ_LOCK_COUNT MAXCPU
269 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT];
271 #define vm_reserv_object_lock_idx(object) \
272 (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT)
273 #define vm_reserv_object_lock_ptr(object) \
274 &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))]
275 #define vm_reserv_object_lock(object) \
276 mtx_lock(vm_reserv_object_lock_ptr((object)))
277 #define vm_reserv_object_unlock(object) \
278 mtx_unlock(vm_reserv_object_lock_ptr((object)))
280 static void vm_reserv_break(vm_reserv_t rv);
281 static void vm_reserv_depopulate(vm_reserv_t rv, int index);
282 static vm_reserv_t vm_reserv_from_page(vm_page_t m);
283 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv,
285 static void vm_reserv_populate(vm_reserv_t rv, int index);
286 static void vm_reserv_reclaim(vm_reserv_t rv);
289 * Returns the current number of full reservations.
291 * Since the number of full reservations is computed without acquiring the
292 * free page queue lock, the returned value may be inexact.
295 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
298 struct vm_phys_seg *seg;
303 for (segind = 0; segind < vm_phys_nsegs; segind++) {
304 seg = &vm_phys_segs[segind];
305 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
306 while (paddr + VM_LEVEL_0_SIZE <= seg->end) {
307 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
308 fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
309 paddr += VM_LEVEL_0_SIZE;
312 return (sysctl_handle_int(oidp, &fullpop, 0, req));
316 * Describes the current state of the partially populated reservation queue.
319 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
323 int counter, error, domain, level, unused_pages;
325 error = sysctl_wire_old_buffer(req, 0);
328 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
329 sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n");
330 for (domain = 0; domain < vm_ndomains; domain++) {
331 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
334 vm_reserv_domain_lock(domain);
335 TAILQ_FOREACH(rv, &vm_rvq_partpop[domain], partpopq) {
337 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
339 vm_reserv_domain_unlock(domain);
340 sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
342 unused_pages * ((int)PAGE_SIZE / 1024), counter);
345 error = sbuf_finish(&sbuf);
351 * Remove a reservation from the object's objq.
354 vm_reserv_remove(vm_reserv_t rv)
358 vm_reserv_assert_locked(rv);
359 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
360 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
361 KASSERT(rv->object != NULL,
362 ("vm_reserv_remove: reserv %p is free", rv));
363 KASSERT(!rv->inpartpopq,
364 ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv));
366 vm_reserv_object_lock(object);
367 LIST_REMOVE(rv, objq);
369 vm_reserv_object_unlock(object);
373 * Insert a new reservation into the object's objq.
376 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex)
380 vm_reserv_assert_locked(rv);
382 "%s: rv %p(%p) object %p new %p popcnt %d",
383 __FUNCTION__, rv, rv->pages, rv->object, object,
385 KASSERT(rv->object == NULL,
386 ("vm_reserv_insert: reserv %p isn't free", rv));
387 KASSERT(rv->popcnt == 0,
388 ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv));
389 KASSERT(!rv->inpartpopq,
390 ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv));
391 for (i = 0; i < NPOPMAP; i++)
392 KASSERT(rv->popmap[i] == 0,
393 ("vm_reserv_insert: reserv %p's popmap is corrupted", rv));
394 vm_reserv_object_lock(object);
397 LIST_INSERT_HEAD(&object->rvq, rv, objq);
398 vm_reserv_object_unlock(object);
402 * Reduces the given reservation's population count. If the population count
403 * becomes zero, the reservation is destroyed. Additionally, moves the
404 * reservation to the tail of the partially populated reservation queue if the
405 * population count is non-zero.
408 vm_reserv_depopulate(vm_reserv_t rv, int index)
410 struct vm_domain *vmd;
412 vm_reserv_assert_locked(rv);
413 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
414 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
415 KASSERT(rv->object != NULL,
416 ("vm_reserv_depopulate: reserv %p is free", rv));
417 KASSERT(popmap_is_set(rv->popmap, index),
418 ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
420 KASSERT(rv->popcnt > 0,
421 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
422 KASSERT(rv->domain < vm_ndomains,
423 ("vm_reserv_depopulate: reserv %p's domain is corrupted %d",
425 if (rv->popcnt == VM_LEVEL_0_NPAGES) {
426 KASSERT(rv->pages->psind == 1,
427 ("vm_reserv_depopulate: reserv %p is already demoted",
429 rv->pages->psind = 0;
431 popmap_clear(rv->popmap, index);
433 vm_reserv_domain_lock(rv->domain);
434 if (rv->inpartpopq) {
435 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
436 rv->inpartpopq = FALSE;
438 if (rv->popcnt != 0) {
439 rv->inpartpopq = TRUE;
440 TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq);
442 vm_reserv_domain_unlock(rv->domain);
443 vmd = VM_DOMAIN(rv->domain);
444 if (rv->popcnt == 0) {
445 vm_reserv_remove(rv);
446 vm_domain_free_lock(vmd);
447 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
448 vm_domain_free_unlock(vmd);
449 counter_u64_add(vm_reserv_freed, 1);
451 vm_domain_freecnt_inc(vmd, 1);
455 * Returns the reservation to which the given page might belong.
457 static __inline vm_reserv_t
458 vm_reserv_from_page(vm_page_t m)
461 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
465 * Returns an existing reservation or NULL and initialized successor pointer.
468 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex,
469 vm_page_t mpred, vm_page_t *msuccp)
476 KASSERT(mpred->object == object,
477 ("vm_reserv_from_object: object doesn't contain mpred"));
478 KASSERT(mpred->pindex < pindex,
479 ("vm_reserv_from_object: mpred doesn't precede pindex"));
480 rv = vm_reserv_from_page(mpred);
481 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
483 msucc = TAILQ_NEXT(mpred, listq);
485 msucc = TAILQ_FIRST(&object->memq);
487 KASSERT(msucc->pindex > pindex,
488 ("vm_reserv_from_object: msucc doesn't succeed pindex"));
489 rv = vm_reserv_from_page(msucc);
490 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
502 * Returns TRUE if the given reservation contains the given page index and
505 static __inline boolean_t
506 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
509 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
513 * Increases the given reservation's population count. Moves the reservation
514 * to the tail of the partially populated reservation queue.
516 * The free page queue must be locked.
519 vm_reserv_populate(vm_reserv_t rv, int index)
522 vm_reserv_assert_locked(rv);
523 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
524 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
525 KASSERT(rv->object != NULL,
526 ("vm_reserv_populate: reserv %p is free", rv));
527 KASSERT(popmap_is_clear(rv->popmap, index),
528 ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
530 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
531 ("vm_reserv_populate: reserv %p is already full", rv));
532 KASSERT(rv->pages->psind == 0,
533 ("vm_reserv_populate: reserv %p is already promoted", rv));
534 KASSERT(rv->domain < vm_ndomains,
535 ("vm_reserv_populate: reserv %p's domain is corrupted %d",
537 popmap_set(rv->popmap, index);
539 vm_reserv_domain_lock(rv->domain);
540 if (rv->inpartpopq) {
541 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
542 rv->inpartpopq = FALSE;
544 if (rv->popcnt < VM_LEVEL_0_NPAGES) {
545 rv->inpartpopq = TRUE;
546 TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq);
548 KASSERT(rv->pages->psind == 0,
549 ("vm_reserv_populate: reserv %p is already promoted",
551 rv->pages->psind = 1;
553 vm_reserv_domain_unlock(rv->domain);
557 * Attempts to allocate a contiguous set of physical pages from existing
558 * reservations. See vm_reserv_alloc_contig() for a description of the
559 * function's parameters.
561 * The page "mpred" must immediately precede the offset "pindex" within the
564 * The object must be locked.
567 vm_reserv_extend_contig(int req, vm_object_t object, vm_pindex_t pindex,
568 int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
569 u_long alignment, vm_paddr_t boundary, vm_page_t mpred)
571 struct vm_domain *vmd;
577 VM_OBJECT_ASSERT_WLOCKED(object);
578 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
581 * Is a reservation fundamentally impossible?
583 if (pindex < VM_RESERV_INDEX(object, pindex) ||
584 pindex + npages > object->size || object->resident_page_count == 0)
588 * All reservations of a particular size have the same alignment.
589 * Assuming that the first page is allocated from a reservation, the
590 * least significant bits of its physical address can be determined
591 * from its offset from the beginning of the reservation and the size
592 * of the reservation.
594 * Could the specified index within a reservation of the smallest
595 * possible size satisfy the alignment and boundary requirements?
597 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
598 if ((pa & (alignment - 1)) != 0)
600 size = npages << PAGE_SHIFT;
601 if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
605 * Look for an existing reservation.
607 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
610 KASSERT(object != kernel_object || rv->domain == domain,
611 ("vm_reserv_extend_contig: Domain mismatch from reservation."));
612 index = VM_RESERV_INDEX(object, pindex);
613 /* Does the allocation fit within the reservation? */
614 if (index + npages > VM_LEVEL_0_NPAGES)
617 vmd = VM_DOMAIN(domain);
619 if (rv->object != object)
621 m = &rv->pages[index];
622 pa = VM_PAGE_TO_PHYS(m);
623 if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
624 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
626 /* Handle vm_page_rename(m, new_object, ...). */
627 for (i = 0; i < npages; i++) {
628 if (popmap_is_set(rv->popmap, index + i))
631 if (!vm_domain_allocate(vmd, req, npages))
633 for (i = 0; i < npages; i++)
634 vm_reserv_populate(rv, index + i);
635 vm_reserv_unlock(rv);
639 vm_reserv_unlock(rv);
644 * Allocates a contiguous set of physical pages of the given size "npages"
645 * from newly created reservations. All of the physical pages
646 * must be at or above the given physical address "low" and below the given
647 * physical address "high". The given value "alignment" determines the
648 * alignment of the first physical page in the set. If the given value
649 * "boundary" is non-zero, then the set of physical pages cannot cross any
650 * physical address boundary that is a multiple of that value. Both
651 * "alignment" and "boundary" must be a power of two.
653 * Callers should first invoke vm_reserv_extend_contig() to attempt an
654 * allocation from existing reservations.
656 * The page "mpred" must immediately precede the offset "pindex" within the
659 * The object and free page queue must be locked.
662 vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex, int domain,
663 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
664 vm_paddr_t boundary, vm_page_t mpred)
666 struct vm_domain *vmd;
668 vm_page_t m, m_ret, msucc;
669 vm_pindex_t first, leftcap, rightcap;
671 u_long allocpages, maxpages, minpages;
674 VM_OBJECT_ASSERT_WLOCKED(object);
675 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
678 * Is a reservation fundamentally impossible?
680 if (pindex < VM_RESERV_INDEX(object, pindex) ||
681 pindex + npages > object->size)
685 * All reservations of a particular size have the same alignment.
686 * Assuming that the first page is allocated from a reservation, the
687 * least significant bits of its physical address can be determined
688 * from its offset from the beginning of the reservation and the size
689 * of the reservation.
691 * Could the specified index within a reservation of the smallest
692 * possible size satisfy the alignment and boundary requirements?
694 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
695 if ((pa & (alignment - 1)) != 0)
697 size = npages << PAGE_SHIFT;
698 if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
702 * Callers should've extended an existing reservation prior to
703 * calling this function. If a reservation exists it is
704 * incompatible with the allocation.
706 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
711 * Could at least one reservation fit between the first index to the
712 * left that can be used ("leftcap") and the first index to the right
713 * that cannot be used ("rightcap")?
715 * We must synchronize with the reserv object lock to protect the
716 * pindex/object of the resulting reservations against rename while
719 first = pindex - VM_RESERV_INDEX(object, pindex);
720 minpages = VM_RESERV_INDEX(object, pindex) + npages;
721 maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
722 allocpages = maxpages;
723 vm_reserv_object_lock(object);
725 if ((rv = vm_reserv_from_page(mpred))->object != object)
726 leftcap = mpred->pindex + 1;
728 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
729 if (leftcap > first) {
730 vm_reserv_object_unlock(object);
735 if ((rv = vm_reserv_from_page(msucc))->object != object)
736 rightcap = msucc->pindex;
738 rightcap = rv->pindex;
739 if (first + maxpages > rightcap) {
740 if (maxpages == VM_LEVEL_0_NPAGES) {
741 vm_reserv_object_unlock(object);
746 * At least one reservation will fit between "leftcap"
747 * and "rightcap". However, a reservation for the
748 * last of the requested pages will not fit. Reduce
749 * the size of the upcoming allocation accordingly.
751 allocpages = minpages;
754 vm_reserv_object_unlock(object);
757 * Would the last new reservation extend past the end of the object?
759 if (first + maxpages > object->size) {
761 * Don't allocate the last new reservation if the object is a
762 * vnode or backed by another object that is a vnode.
764 if (object->type == OBJT_VNODE ||
765 (object->backing_object != NULL &&
766 object->backing_object->type == OBJT_VNODE)) {
767 if (maxpages == VM_LEVEL_0_NPAGES)
769 allocpages = minpages;
771 /* Speculate that the object may grow. */
775 * Allocate the physical pages. The alignment and boundary specified
776 * for this allocation may be different from the alignment and
777 * boundary specified for the requested pages. For instance, the
778 * specified index may not be the first page within the first new
782 vmd = VM_DOMAIN(domain);
783 if (vm_domain_allocate(vmd, req, npages)) {
784 vm_domain_free_lock(vmd);
785 m = vm_phys_alloc_contig(domain, allocpages, low, high,
786 ulmax(alignment, VM_LEVEL_0_SIZE),
787 boundary > VM_LEVEL_0_SIZE ? boundary : 0);
788 vm_domain_free_unlock(vmd);
790 vm_domain_freecnt_inc(vmd, npages);
795 KASSERT(vm_phys_domain(m) == domain,
796 ("vm_reserv_alloc_contig: Page domain does not match requested."));
799 * The allocated physical pages always begin at a reservation
800 * boundary, but they do not always end at a reservation boundary.
801 * Initialize every reservation that is completely covered by the
802 * allocated physical pages.
805 index = VM_RESERV_INDEX(object, pindex);
807 rv = vm_reserv_from_page(m);
808 KASSERT(rv->pages == m,
809 ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
812 vm_reserv_insert(rv, object, first);
813 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
814 for (i = 0; i < n; i++)
815 vm_reserv_populate(rv, index + i);
818 m_ret = &rv->pages[index];
821 vm_reserv_unlock(rv);
822 m += VM_LEVEL_0_NPAGES;
823 first += VM_LEVEL_0_NPAGES;
824 allocpages -= VM_LEVEL_0_NPAGES;
825 } while (allocpages >= VM_LEVEL_0_NPAGES);
830 * Attempts to extend an existing reservation and allocate the page to the
833 * The page "mpred" must immediately precede the offset "pindex" within the
836 * The object must be locked.
839 vm_reserv_extend(int req, vm_object_t object, vm_pindex_t pindex, int domain,
842 struct vm_domain *vmd;
847 VM_OBJECT_ASSERT_WLOCKED(object);
850 * Could a reservation currently exist?
852 if (pindex < VM_RESERV_INDEX(object, pindex) ||
853 pindex >= object->size || object->resident_page_count == 0)
857 * Look for an existing reservation.
859 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
863 KASSERT(object != kernel_object || rv->domain == domain,
864 ("vm_reserv_extend: Domain mismatch from reservation."));
866 vmd = VM_DOMAIN(domain);
867 index = VM_RESERV_INDEX(object, pindex);
868 m = &rv->pages[index];
870 /* Handle reclaim race. */
871 if (rv->object != object ||
872 /* Handle vm_page_rename(m, new_object, ...). */
873 popmap_is_set(rv->popmap, index)) {
877 if (vm_domain_allocate(vmd, req, 1) == 0)
880 vm_reserv_populate(rv, index);
882 vm_reserv_unlock(rv);
888 * Attempts to allocate a new reservation for the object, and allocates a
889 * page from that reservation. Callers should first invoke vm_reserv_extend()
890 * to attempt an allocation from an existing reservation.
892 * The page "mpred" must immediately precede the offset "pindex" within the
895 * The object and free page queue must be locked.
898 vm_reserv_alloc_page(int req, vm_object_t object, vm_pindex_t pindex, int domain,
901 struct vm_domain *vmd;
903 vm_pindex_t first, leftcap, rightcap;
907 VM_OBJECT_ASSERT_WLOCKED(object);
910 * Is a reservation fundamentally impossible?
912 if (pindex < VM_RESERV_INDEX(object, pindex) ||
913 pindex >= object->size)
917 * Callers should've extended an existing reservation prior to
918 * calling this function. If a reservation exists it is
919 * incompatible with the allocation.
921 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
926 * Could a reservation fit between the first index to the left that
927 * can be used and the first index to the right that cannot be used?
929 * We must synchronize with the reserv object lock to protect the
930 * pindex/object of the resulting reservations against rename while
933 first = pindex - VM_RESERV_INDEX(object, pindex);
934 vm_reserv_object_lock(object);
936 if ((rv = vm_reserv_from_page(mpred))->object != object)
937 leftcap = mpred->pindex + 1;
939 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
940 if (leftcap > first) {
941 vm_reserv_object_unlock(object);
946 if ((rv = vm_reserv_from_page(msucc))->object != object)
947 rightcap = msucc->pindex;
949 rightcap = rv->pindex;
950 if (first + VM_LEVEL_0_NPAGES > rightcap) {
951 vm_reserv_object_unlock(object);
955 vm_reserv_object_unlock(object);
958 * Would a new reservation extend past the end of the object?
960 if (first + VM_LEVEL_0_NPAGES > object->size) {
962 * Don't allocate a new reservation if the object is a vnode or
963 * backed by another object that is a vnode.
965 if (object->type == OBJT_VNODE ||
966 (object->backing_object != NULL &&
967 object->backing_object->type == OBJT_VNODE))
969 /* Speculate that the object may grow. */
973 * Allocate and populate the new reservation.
976 vmd = VM_DOMAIN(domain);
977 if (vm_domain_allocate(vmd, req, 1)) {
978 vm_domain_free_lock(vmd);
979 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
981 vm_domain_free_unlock(vmd);
983 vm_domain_freecnt_inc(vmd, 1);
988 rv = vm_reserv_from_page(m);
990 KASSERT(rv->pages == m,
991 ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
992 vm_reserv_insert(rv, object, first);
993 index = VM_RESERV_INDEX(object, pindex);
994 vm_reserv_populate(rv, index);
995 vm_reserv_unlock(rv);
997 return (&rv->pages[index]);
1001 * Breaks the given reservation. All free pages in the reservation
1002 * are returned to the physical memory allocator. The reservation's
1003 * population count and map are reset to their initial state.
1005 * The given reservation must not be in the partially populated reservation
1006 * queue. The free page queue lock must be held.
1009 vm_reserv_break(vm_reserv_t rv)
1011 int begin_zeroes, hi, i, lo;
1013 vm_reserv_assert_locked(rv);
1014 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1015 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1016 vm_reserv_remove(rv);
1017 rv->pages->psind = 0;
1020 /* Find the next 0 bit. Any previous 0 bits are < "hi". */
1021 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
1023 /* Redundantly clears bits < "hi". */
1025 rv->popcnt -= NBPOPMAP - hi;
1026 while (++i < NPOPMAP) {
1027 lo = ffsl(~rv->popmap[i]);
1030 rv->popcnt -= NBPOPMAP;
1038 KASSERT(lo > 0, ("vm_reserv_break: lo is %d", lo));
1039 /* Convert from ffsl() to ordinary bit numbering. */
1042 /* Redundantly clears bits < "hi". */
1043 rv->popmap[i] &= ~((1UL << lo) - 1);
1044 rv->popcnt -= lo - hi;
1046 begin_zeroes = NBPOPMAP * i + lo;
1047 /* Find the next 1 bit. */
1049 hi = ffsl(rv->popmap[i]);
1050 while (hi == 0 && ++i < NPOPMAP);
1052 /* Convert from ffsl() to ordinary bit numbering. */
1054 vm_domain_free_lock(VM_DOMAIN(rv->domain));
1055 vm_phys_free_contig(&rv->pages[begin_zeroes], NBPOPMAP * i +
1057 vm_domain_free_unlock(VM_DOMAIN(rv->domain));
1058 } while (i < NPOPMAP);
1059 KASSERT(rv->popcnt == 0,
1060 ("vm_reserv_break: reserv %p's popcnt is corrupted", rv));
1061 counter_u64_add(vm_reserv_broken, 1);
1065 * Breaks all reservations belonging to the given object.
1068 vm_reserv_break_all(vm_object_t object)
1073 * This access of object->rvq is unsynchronized so that the
1074 * object rvq lock can nest after the domain_free lock. We
1075 * must check for races in the results. However, the object
1076 * lock prevents new additions, so we are guaranteed that when
1077 * it returns NULL the object is properly empty.
1079 while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
1082 if (rv->object != object) {
1083 vm_reserv_unlock(rv);
1086 vm_reserv_domain_lock(rv->domain);
1087 if (rv->inpartpopq) {
1088 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
1089 rv->inpartpopq = FALSE;
1091 vm_reserv_domain_unlock(rv->domain);
1092 vm_reserv_break(rv);
1093 vm_reserv_unlock(rv);
1098 * Frees the given page if it belongs to a reservation. Returns TRUE if the
1099 * page is freed and FALSE otherwise.
1101 * The free page queue lock must be held.
1104 vm_reserv_free_page(vm_page_t m)
1109 rv = vm_reserv_from_page(m);
1110 if (rv->object == NULL)
1113 /* Re-validate after lock. */
1114 if (rv->object != NULL) {
1115 vm_reserv_depopulate(rv, m - rv->pages);
1119 vm_reserv_unlock(rv);
1125 * Initializes the reservation management system. Specifically, initializes
1126 * the reservation array.
1128 * Requires that vm_page_array and first_page are initialized!
1131 vm_reserv_init(void)
1134 struct vm_phys_seg *seg;
1135 struct vm_reserv *rv;
1139 * Initialize the reservation array. Specifically, initialize the
1140 * "pages" field for every element that has an underlying superpage.
1142 for (segind = 0; segind < vm_phys_nsegs; segind++) {
1143 seg = &vm_phys_segs[segind];
1144 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
1145 while (paddr + VM_LEVEL_0_SIZE <= seg->end) {
1146 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
1147 rv->pages = PHYS_TO_VM_PAGE(paddr);
1148 rv->domain = seg->domain;
1149 mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF);
1150 paddr += VM_LEVEL_0_SIZE;
1153 for (i = 0; i < MAXMEMDOM; i++) {
1154 mtx_init(&vm_reserv_domain_locks[i], "VM reserv domain", NULL,
1156 TAILQ_INIT(&vm_rvq_partpop[i]);
1159 for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++)
1160 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL,
1165 * Returns true if the given page belongs to a reservation and that page is
1166 * free. Otherwise, returns false.
1169 vm_reserv_is_page_free(vm_page_t m)
1173 rv = vm_reserv_from_page(m);
1174 if (rv->object == NULL)
1176 return (popmap_is_clear(rv->popmap, m - rv->pages));
1180 * If the given page belongs to a reservation, returns the level of that
1181 * reservation. Otherwise, returns -1.
1184 vm_reserv_level(vm_page_t m)
1188 rv = vm_reserv_from_page(m);
1189 return (rv->object != NULL ? 0 : -1);
1193 * Returns a reservation level if the given page belongs to a fully populated
1194 * reservation and -1 otherwise.
1197 vm_reserv_level_iffullpop(vm_page_t m)
1201 rv = vm_reserv_from_page(m);
1202 return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
1206 * Breaks the given partially populated reservation, releasing its free pages
1207 * to the physical memory allocator.
1209 * The free page queue lock must be held.
1212 vm_reserv_reclaim(vm_reserv_t rv)
1215 vm_reserv_assert_locked(rv);
1216 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1217 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1218 vm_reserv_domain_lock(rv->domain);
1219 KASSERT(rv->inpartpopq,
1220 ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
1221 KASSERT(rv->domain < vm_ndomains,
1222 ("vm_reserv_reclaim: reserv %p's domain is corrupted %d",
1224 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
1225 rv->inpartpopq = FALSE;
1226 vm_reserv_domain_unlock(rv->domain);
1227 vm_reserv_break(rv);
1228 counter_u64_add(vm_reserv_reclaimed, 1);
1232 * Breaks the reservation at the head of the partially populated reservation
1233 * queue, releasing its free pages to the physical memory allocator. Returns
1234 * TRUE if a reservation is broken and FALSE otherwise.
1236 * The free page queue lock must be held.
1239 vm_reserv_reclaim_inactive(int domain)
1243 while ((rv = TAILQ_FIRST(&vm_rvq_partpop[domain])) != NULL) {
1245 if (rv != TAILQ_FIRST(&vm_rvq_partpop[domain])) {
1246 vm_reserv_unlock(rv);
1249 vm_reserv_reclaim(rv);
1250 vm_reserv_unlock(rv);
1257 * Searches the partially populated reservation queue for the least recently
1258 * changed reservation with free pages that satisfy the given request for
1259 * contiguous physical memory. If a satisfactory reservation is found, it is
1260 * broken. Returns TRUE if a reservation is broken and FALSE otherwise.
1262 * The free page queue lock must be held.
1265 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
1266 vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1268 vm_paddr_t pa, size;
1269 vm_reserv_t rv, rvn;
1270 int hi, i, lo, low_index, next_free;
1272 if (npages > VM_LEVEL_0_NPAGES - 1)
1274 size = npages << PAGE_SHIFT;
1275 vm_reserv_domain_lock(domain);
1277 for (rv = TAILQ_FIRST(&vm_rvq_partpop[domain]); rv != NULL; rv = rvn) {
1278 rvn = TAILQ_NEXT(rv, partpopq);
1279 pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
1280 if (pa + PAGE_SIZE - size < low) {
1281 /* This entire reservation is too low; go to next. */
1284 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1285 if (pa + size > high) {
1286 /* This entire reservation is too high; go to next. */
1289 if (vm_reserv_trylock(rv) == 0) {
1290 vm_reserv_domain_unlock(domain);
1292 if (!rv->inpartpopq) {
1293 vm_reserv_domain_lock(domain);
1294 if (!rvn->inpartpopq)
1299 vm_reserv_domain_unlock(domain);
1301 /* Start the search for free pages at "low". */
1302 low_index = (low + PAGE_MASK - pa) >> PAGE_SHIFT;
1303 i = low_index / NBPOPMAP;
1304 hi = low_index % NBPOPMAP;
1308 /* Find the next free page. */
1309 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
1310 while (lo == 0 && ++i < NPOPMAP)
1311 lo = ffsl(~rv->popmap[i]);
1314 /* Convert from ffsl() to ordinary bit numbering. */
1316 next_free = NBPOPMAP * i + lo;
1317 pa = VM_PAGE_TO_PHYS(&rv->pages[next_free]);
1319 ("vm_reserv_reclaim_contig: pa is too low"));
1320 if (pa + size > high) {
1321 /* The rest of this reservation is too high. */
1323 } else if ((pa & (alignment - 1)) != 0 ||
1324 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) {
1326 * The current page doesn't meet the alignment
1327 * and/or boundary requirements. Continue
1328 * searching this reservation until the rest
1329 * of its free pages are either excluded or
1333 if (hi >= NBPOPMAP) {
1339 /* Find the next used page. */
1340 hi = ffsl(rv->popmap[i] & ~((1UL << lo) - 1));
1341 while (hi == 0 && ++i < NPOPMAP) {
1342 if ((NBPOPMAP * i - next_free) * PAGE_SIZE >=
1344 vm_reserv_reclaim(rv);
1345 vm_reserv_unlock(rv);
1348 hi = ffsl(rv->popmap[i]);
1350 /* Convert from ffsl() to ordinary bit numbering. */
1353 if ((NBPOPMAP * i + hi - next_free) * PAGE_SIZE >=
1355 vm_reserv_reclaim(rv);
1356 vm_reserv_unlock(rv);
1359 } while (i < NPOPMAP);
1360 vm_reserv_unlock(rv);
1361 vm_reserv_domain_lock(domain);
1362 if (rvn != NULL && !rvn->inpartpopq)
1365 vm_reserv_domain_unlock(domain);
1370 * Transfers the reservation underlying the given page to a new object.
1372 * The object must be locked.
1375 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1376 vm_pindex_t old_object_offset)
1380 VM_OBJECT_ASSERT_WLOCKED(new_object);
1381 rv = vm_reserv_from_page(m);
1382 if (rv->object == old_object) {
1385 "%s: rv %p object %p new %p popcnt %d inpartpop %d",
1386 __FUNCTION__, rv, rv->object, new_object, rv->popcnt,
1388 if (rv->object == old_object) {
1389 vm_reserv_object_lock(old_object);
1391 LIST_REMOVE(rv, objq);
1392 vm_reserv_object_unlock(old_object);
1393 vm_reserv_object_lock(new_object);
1394 rv->object = new_object;
1395 rv->pindex -= old_object_offset;
1396 LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1397 vm_reserv_object_unlock(new_object);
1399 vm_reserv_unlock(rv);
1404 * Returns the size (in bytes) of a reservation of the specified level.
1407 vm_reserv_size(int level)
1412 return (VM_LEVEL_0_SIZE);
1421 * Allocates the virtual and physical memory required by the reservation
1422 * management system's data structures, in particular, the reservation array.
1425 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
1431 * Calculate the size (in bytes) of the reservation array. Round up
1432 * from "high_water" because every small page is mapped to an element
1433 * in the reservation array based on its physical address. Thus, the
1434 * number of elements in the reservation array can be greater than the
1435 * number of superpages.
1437 size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
1440 * Allocate and map the physical memory for the reservation array. The
1441 * next available virtual address is returned by reference.
1443 new_end = end - round_page(size);
1444 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1445 VM_PROT_READ | VM_PROT_WRITE);
1446 bzero(vm_reserv_array, size);
1449 * Return the next available physical address.
1455 * Initializes the reservation management system. Specifically, initializes
1456 * the reservation counters.
1459 vm_reserv_counter_init(void *unused)
1462 vm_reserv_freed = counter_u64_alloc(M_WAITOK);
1463 vm_reserv_broken = counter_u64_alloc(M_WAITOK);
1464 vm_reserv_reclaimed = counter_u64_alloc(M_WAITOK);
1466 SYSINIT(vm_reserv_counter_init, SI_SUB_CPU, SI_ORDER_ANY,
1467 vm_reserv_counter_init, NULL);
1470 * Returns the superpage containing the given page.
1473 vm_reserv_to_superpage(vm_page_t m)
1477 VM_OBJECT_ASSERT_LOCKED(m->object);
1478 rv = vm_reserv_from_page(m);
1479 if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES)
1487 #endif /* VM_NRESERVLEVEL > 0 */