2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2002-2006 Rice University
5 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
8 * This software was developed for the FreeBSD Project by Alan L. Cox,
9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * Superpage reservation management module
37 * Any external functions defined by this module are only to be used by the
38 * virtual memory system.
41 #include <sys/cdefs.h>
44 #include <sys/param.h>
45 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/mutex.h>
49 #include <sys/queue.h>
50 #include <sys/rwlock.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
54 #include <sys/bitstring.h>
55 #include <sys/counter.h>
57 #include <sys/vmmeter.h>
61 #include <vm/vm_extern.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_pagequeue.h>
67 #include <vm/vm_phys.h>
68 #include <vm/vm_radix.h>
69 #include <vm/vm_reserv.h>
72 * The reservation system supports the speculative allocation of large physical
73 * pages ("superpages"). Speculative allocation enables the fully automatic
74 * utilization of superpages by the virtual memory system. In other words, no
75 * programmatic directives are required to use superpages.
78 #if VM_NRESERVLEVEL > 0
80 #ifndef VM_LEVEL_0_ORDER_MAX
81 #define VM_LEVEL_0_ORDER_MAX VM_LEVEL_0_ORDER
85 * The number of small pages that are contained in a level 0 reservation
87 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER)
88 #define VM_LEVEL_0_NPAGES_MAX (1 << VM_LEVEL_0_ORDER_MAX)
91 * The number of bits by which a physical address is shifted to obtain the
94 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
97 * The size of a level 0 reservation in bytes
99 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT)
102 * Computes the index of the small page underlying the given (object, pindex)
103 * within the reservation's array of small pages.
105 #define VM_RESERV_INDEX(object, pindex) \
106 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
109 * Number of elapsed ticks before we update the LRU queue position. Used
110 * to reduce contention and churn on the list.
112 #define PARTPOPSLOP 1
115 * The reservation structure
117 * A reservation structure is constructed whenever a large physical page is
118 * speculatively allocated to an object. The reservation provides the small
119 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
120 * within that object. The reservation's "popcnt" tracks the number of these
121 * small physical pages that are in use at any given time. When and if the
122 * reservation is not fully utilized, it appears in the queue of partially
123 * populated reservations. The reservation always appears on the containing
124 * object's list of reservations.
126 * A partially populated reservation can be broken and reclaimed at any time.
128 * c - constant after boot
129 * d - vm_reserv_domain_lock
130 * o - vm_reserv_object_lock
132 * s - vm_reserv_domain_scan_lock
135 struct mtx lock; /* reservation lock. */
136 TAILQ_ENTRY(vm_reserv) partpopq; /* (d, r) per-domain queue. */
137 LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */
138 vm_object_t object; /* (o, r) containing object */
139 vm_pindex_t pindex; /* (o, r) offset in object */
140 vm_page_t pages; /* (c) first page */
141 uint16_t popcnt; /* (r) # of pages in use */
142 uint8_t domain; /* (c) NUMA domain. */
143 char inpartpopq; /* (d, r) */
144 int lasttick; /* (r) last pop update tick. */
145 bitstr_t bit_decl(popmap, VM_LEVEL_0_NPAGES_MAX);
146 /* (r) bit vector, used pages */
149 TAILQ_HEAD(vm_reserv_queue, vm_reserv);
151 #define vm_reserv_lockptr(rv) (&(rv)->lock)
152 #define vm_reserv_assert_locked(rv) \
153 mtx_assert(vm_reserv_lockptr(rv), MA_OWNED)
154 #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv))
155 #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv))
156 #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv))
159 * The reservation array
161 * This array is analoguous in function to vm_page_array. It differs in the
162 * respect that it may contain a greater number of useful reservation
163 * structures than there are (physical) superpages. These "invalid"
164 * reservation structures exist to trade-off space for time in the
165 * implementation of vm_reserv_from_page(). Invalid reservation structures are
166 * distinguishable from "valid" reservation structures by inspecting the
167 * reservation's "pages" field. Invalid reservation structures have a NULL
170 * vm_reserv_from_page() maps a small (physical) page to an element of this
171 * array by computing a physical reservation number from the page's physical
172 * address. The physical reservation number is used as the array index.
174 * An "active" reservation is a valid reservation structure that has a non-NULL
175 * "object" field and a non-zero "popcnt" field. In other words, every active
176 * reservation belongs to a particular object. Moreover, every active
177 * reservation has an entry in the containing object's list of reservations.
179 static vm_reserv_t vm_reserv_array;
182 * The per-domain partially populated reservation queues
184 * These queues enable the fast recovery of an unused free small page from a
185 * partially populated reservation. The reservation at the head of a queue
186 * is the least recently changed, partially populated reservation.
188 * Access to this queue is synchronized by the per-domain reservation lock.
189 * Threads reclaiming free pages from the queue must hold the per-domain scan
192 struct vm_reserv_domain {
194 struct vm_reserv_queue partpop; /* (d) */
195 struct vm_reserv marker; /* (d, s) scan marker/lock */
196 } __aligned(CACHE_LINE_SIZE);
198 static struct vm_reserv_domain vm_rvd[MAXMEMDOM];
200 #define vm_reserv_domain_lockptr(d) (&vm_rvd[(d)].lock)
201 #define vm_reserv_domain_assert_locked(d) \
202 mtx_assert(vm_reserv_domain_lockptr(d), MA_OWNED)
203 #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d))
204 #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d))
206 #define vm_reserv_domain_scan_lock(d) mtx_lock(&vm_rvd[(d)].marker.lock)
207 #define vm_reserv_domain_scan_unlock(d) mtx_unlock(&vm_rvd[(d)].marker.lock)
209 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
212 static COUNTER_U64_DEFINE_EARLY(vm_reserv_broken);
213 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
214 &vm_reserv_broken, "Cumulative number of broken reservations");
216 static COUNTER_U64_DEFINE_EARLY(vm_reserv_freed);
217 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
218 &vm_reserv_freed, "Cumulative number of freed reservations");
220 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
222 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD,
223 NULL, 0, sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
225 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
227 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq,
228 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
229 sysctl_vm_reserv_partpopq, "A",
230 "Partially populated reservation queues");
232 static COUNTER_U64_DEFINE_EARLY(vm_reserv_reclaimed);
233 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
234 &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations");
237 * The object lock pool is used to synchronize the rvq. We can not use a
238 * pool mutex because it is required before malloc works.
240 * The "hash" function could be made faster without divide and modulo.
242 #define VM_RESERV_OBJ_LOCK_COUNT MAXCPU
244 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT];
246 #define vm_reserv_object_lock_idx(object) \
247 (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT)
248 #define vm_reserv_object_lock_ptr(object) \
249 &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))]
250 #define vm_reserv_object_lock(object) \
251 mtx_lock(vm_reserv_object_lock_ptr((object)))
252 #define vm_reserv_object_unlock(object) \
253 mtx_unlock(vm_reserv_object_lock_ptr((object)))
255 static void vm_reserv_break(vm_reserv_t rv);
256 static void vm_reserv_depopulate(vm_reserv_t rv, int index);
257 static vm_reserv_t vm_reserv_from_page(vm_page_t m);
258 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv,
260 static void vm_reserv_populate(vm_reserv_t rv, int index);
261 static void vm_reserv_reclaim(vm_reserv_t rv);
264 * Returns the current number of full reservations.
266 * Since the number of full reservations is computed without acquiring any
267 * locks, the returned value is inexact.
270 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
273 struct vm_phys_seg *seg;
278 for (segind = 0; segind < vm_phys_nsegs; segind++) {
279 seg = &vm_phys_segs[segind];
280 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
281 #ifdef VM_PHYSSEG_SPARSE
282 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) -
283 (seg->start >> VM_LEVEL_0_SHIFT);
285 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
287 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
288 VM_LEVEL_0_SIZE <= seg->end) {
289 fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
290 paddr += VM_LEVEL_0_SIZE;
294 return (sysctl_handle_int(oidp, &fullpop, 0, req));
298 * Describes the current state of the partially populated reservation queue.
301 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
305 int counter, error, domain, level, unused_pages;
307 error = sysctl_wire_old_buffer(req, 0);
310 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
311 sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n");
312 for (domain = 0; domain < vm_ndomains; domain++) {
313 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
316 vm_reserv_domain_lock(domain);
317 TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
318 if (rv == &vm_rvd[domain].marker)
321 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
323 vm_reserv_domain_unlock(domain);
324 sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
326 unused_pages * ((int)PAGE_SIZE / 1024), counter);
329 error = sbuf_finish(&sbuf);
335 * Remove a reservation from the object's objq.
338 vm_reserv_remove(vm_reserv_t rv)
342 vm_reserv_assert_locked(rv);
343 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
344 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
345 KASSERT(rv->object != NULL,
346 ("vm_reserv_remove: reserv %p is free", rv));
347 KASSERT(!rv->inpartpopq,
348 ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv));
350 vm_reserv_object_lock(object);
351 LIST_REMOVE(rv, objq);
353 vm_reserv_object_unlock(object);
357 * Insert a new reservation into the object's objq.
360 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex)
363 vm_reserv_assert_locked(rv);
365 "%s: rv %p(%p) object %p new %p popcnt %d",
366 __FUNCTION__, rv, rv->pages, rv->object, object,
368 KASSERT(rv->object == NULL,
369 ("vm_reserv_insert: reserv %p isn't free", rv));
370 KASSERT(rv->popcnt == 0,
371 ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv));
372 KASSERT(!rv->inpartpopq,
373 ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv));
374 KASSERT(bit_ntest(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1, 0),
375 ("vm_reserv_insert: reserv %p's popmap is corrupted", rv));
376 vm_reserv_object_lock(object);
379 rv->lasttick = ticks;
380 LIST_INSERT_HEAD(&object->rvq, rv, objq);
381 vm_reserv_object_unlock(object);
385 * Reduces the given reservation's population count. If the population count
386 * becomes zero, the reservation is destroyed. Additionally, moves the
387 * reservation to the tail of the partially populated reservation queue if the
388 * population count is non-zero.
391 vm_reserv_depopulate(vm_reserv_t rv, int index)
393 struct vm_domain *vmd;
395 vm_reserv_assert_locked(rv);
396 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
397 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
398 KASSERT(rv->object != NULL,
399 ("vm_reserv_depopulate: reserv %p is free", rv));
400 KASSERT(bit_test(rv->popmap, index),
401 ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
403 KASSERT(rv->popcnt > 0,
404 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
405 KASSERT(rv->domain < vm_ndomains,
406 ("vm_reserv_depopulate: reserv %p's domain is corrupted %d",
408 if (rv->popcnt == VM_LEVEL_0_NPAGES) {
409 KASSERT(rv->pages->psind == 1,
410 ("vm_reserv_depopulate: reserv %p is already demoted",
412 rv->pages->psind = 0;
414 bit_clear(rv->popmap, index);
416 if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP ||
418 vm_reserv_domain_lock(rv->domain);
419 if (rv->inpartpopq) {
420 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
421 rv->inpartpopq = FALSE;
423 if (rv->popcnt != 0) {
424 rv->inpartpopq = TRUE;
425 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv,
428 vm_reserv_domain_unlock(rv->domain);
429 rv->lasttick = ticks;
431 vmd = VM_DOMAIN(rv->domain);
432 if (rv->popcnt == 0) {
433 vm_reserv_remove(rv);
434 vm_domain_free_lock(vmd);
435 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
436 vm_domain_free_unlock(vmd);
437 counter_u64_add(vm_reserv_freed, 1);
439 vm_domain_freecnt_inc(vmd, 1);
443 * Returns the reservation to which the given page might belong.
445 static __inline vm_reserv_t
446 vm_reserv_from_page(vm_page_t m)
448 #ifdef VM_PHYSSEG_SPARSE
449 struct vm_phys_seg *seg;
451 seg = &vm_phys_segs[m->segind];
452 return (seg->first_reserv + (VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT) -
453 (seg->start >> VM_LEVEL_0_SHIFT));
455 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
460 * Returns an existing reservation or NULL and initialized successor pointer.
463 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex,
464 vm_page_t mpred, vm_page_t *msuccp)
471 KASSERT(mpred->object == object,
472 ("vm_reserv_from_object: object doesn't contain mpred"));
473 KASSERT(mpred->pindex < pindex,
474 ("vm_reserv_from_object: mpred doesn't precede pindex"));
475 rv = vm_reserv_from_page(mpred);
476 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
478 msucc = TAILQ_NEXT(mpred, listq);
480 msucc = TAILQ_FIRST(&object->memq);
482 KASSERT(msucc->pindex > pindex,
483 ("vm_reserv_from_object: msucc doesn't succeed pindex"));
484 rv = vm_reserv_from_page(msucc);
485 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
497 * Returns TRUE if the given reservation contains the given page index and
500 static __inline boolean_t
501 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
504 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
508 * Increases the given reservation's population count. Moves the reservation
509 * to the tail of the partially populated reservation queue.
512 vm_reserv_populate(vm_reserv_t rv, int index)
515 vm_reserv_assert_locked(rv);
516 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
517 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
518 KASSERT(rv->object != NULL,
519 ("vm_reserv_populate: reserv %p is free", rv));
520 KASSERT(!bit_test(rv->popmap, index),
521 ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
523 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
524 ("vm_reserv_populate: reserv %p is already full", rv));
525 KASSERT(rv->pages->psind == 0,
526 ("vm_reserv_populate: reserv %p is already promoted", rv));
527 KASSERT(rv->domain < vm_ndomains,
528 ("vm_reserv_populate: reserv %p's domain is corrupted %d",
530 bit_set(rv->popmap, index);
532 if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP &&
533 rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES)
535 rv->lasttick = ticks;
536 vm_reserv_domain_lock(rv->domain);
537 if (rv->inpartpopq) {
538 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
539 rv->inpartpopq = FALSE;
541 if (rv->popcnt < VM_LEVEL_0_NPAGES) {
542 rv->inpartpopq = TRUE;
543 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq);
545 KASSERT(rv->pages->psind == 0,
546 ("vm_reserv_populate: reserv %p is already promoted",
548 rv->pages->psind = 1;
550 vm_reserv_domain_unlock(rv->domain);
554 * Allocates a contiguous set of physical pages of the given size "npages"
555 * from existing or newly created reservations. All of the physical pages
556 * must be at or above the given physical address "low" and below the given
557 * physical address "high". The given value "alignment" determines the
558 * alignment of the first physical page in the set. If the given value
559 * "boundary" is non-zero, then the set of physical pages cannot cross any
560 * physical address boundary that is a multiple of that value. Both
561 * "alignment" and "boundary" must be a power of two.
563 * The page "mpred" must immediately precede the offset "pindex" within the
566 * The object must be locked.
569 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
570 int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high,
571 u_long alignment, vm_paddr_t boundary)
573 struct vm_domain *vmd;
575 vm_page_t m, m_ret, msucc;
576 vm_pindex_t first, leftcap, rightcap;
578 u_long allocpages, maxpages, minpages;
581 VM_OBJECT_ASSERT_WLOCKED(object);
582 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
585 * Is a reservation fundamentally impossible?
587 if (pindex < VM_RESERV_INDEX(object, pindex) ||
588 pindex + npages > object->size)
592 * All reservations of a particular size have the same alignment.
593 * Assuming that the first page is allocated from a reservation, the
594 * least significant bits of its physical address can be determined
595 * from its offset from the beginning of the reservation and the size
596 * of the reservation.
598 * Could the specified index within a reservation of the smallest
599 * possible size satisfy the alignment and boundary requirements?
601 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
602 size = npages << PAGE_SHIFT;
603 if (!vm_addr_ok(pa, size, alignment, boundary))
607 * Look for an existing reservation.
609 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
611 KASSERT(object != kernel_object || rv->domain == domain,
612 ("vm_reserv_alloc_contig: domain mismatch"));
613 index = VM_RESERV_INDEX(object, pindex);
614 /* Does the allocation fit within the reservation? */
615 if (index + npages > VM_LEVEL_0_NPAGES)
618 vmd = VM_DOMAIN(domain);
620 /* Handle reclaim race. */
621 if (rv->object != object)
623 m = &rv->pages[index];
624 pa = VM_PAGE_TO_PHYS(m);
625 if (pa < low || pa + size > high ||
626 !vm_addr_ok(pa, size, alignment, boundary))
628 /* Handle vm_page_rename(m, new_object, ...). */
629 if (!bit_ntest(rv->popmap, index, index + npages - 1, 0))
631 if (!vm_domain_allocate(vmd, req, npages))
633 for (i = 0; i < npages; i++)
634 vm_reserv_populate(rv, index + i);
635 vm_reserv_unlock(rv);
638 vm_reserv_unlock(rv);
643 * Could at least one reservation fit between the first index to the
644 * left that can be used ("leftcap") and the first index to the right
645 * that cannot be used ("rightcap")?
647 * We must synchronize with the reserv object lock to protect the
648 * pindex/object of the resulting reservations against rename while
651 first = pindex - VM_RESERV_INDEX(object, pindex);
652 minpages = VM_RESERV_INDEX(object, pindex) + npages;
653 maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
654 allocpages = maxpages;
655 vm_reserv_object_lock(object);
657 if ((rv = vm_reserv_from_page(mpred))->object != object)
658 leftcap = mpred->pindex + 1;
660 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
661 if (leftcap > first) {
662 vm_reserv_object_unlock(object);
667 if ((rv = vm_reserv_from_page(msucc))->object != object)
668 rightcap = msucc->pindex;
670 rightcap = rv->pindex;
671 if (first + maxpages > rightcap) {
672 if (maxpages == VM_LEVEL_0_NPAGES) {
673 vm_reserv_object_unlock(object);
678 * At least one reservation will fit between "leftcap"
679 * and "rightcap". However, a reservation for the
680 * last of the requested pages will not fit. Reduce
681 * the size of the upcoming allocation accordingly.
683 allocpages = minpages;
686 vm_reserv_object_unlock(object);
689 * Would the last new reservation extend past the end of the object?
691 * If the object is unlikely to grow don't allocate a reservation for
694 if ((object->flags & OBJ_ANON) == 0 &&
695 first + maxpages > object->size) {
696 if (maxpages == VM_LEVEL_0_NPAGES)
698 allocpages = minpages;
702 * Allocate the physical pages. The alignment and boundary specified
703 * for this allocation may be different from the alignment and
704 * boundary specified for the requested pages. For instance, the
705 * specified index may not be the first page within the first new
709 vmd = VM_DOMAIN(domain);
710 if (vm_domain_allocate(vmd, req, npages)) {
711 vm_domain_free_lock(vmd);
712 m = vm_phys_alloc_contig(domain, allocpages, low, high,
713 ulmax(alignment, VM_LEVEL_0_SIZE),
714 boundary > VM_LEVEL_0_SIZE ? boundary : 0);
715 vm_domain_free_unlock(vmd);
717 vm_domain_freecnt_inc(vmd, npages);
722 KASSERT(vm_page_domain(m) == domain,
723 ("vm_reserv_alloc_contig: Page domain does not match requested."));
726 * The allocated physical pages always begin at a reservation
727 * boundary, but they do not always end at a reservation boundary.
728 * Initialize every reservation that is completely covered by the
729 * allocated physical pages.
732 index = VM_RESERV_INDEX(object, pindex);
734 rv = vm_reserv_from_page(m);
735 KASSERT(rv->pages == m,
736 ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
739 vm_reserv_insert(rv, object, first);
740 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
741 for (i = 0; i < n; i++)
742 vm_reserv_populate(rv, index + i);
745 m_ret = &rv->pages[index];
748 vm_reserv_unlock(rv);
749 m += VM_LEVEL_0_NPAGES;
750 first += VM_LEVEL_0_NPAGES;
751 allocpages -= VM_LEVEL_0_NPAGES;
752 } while (allocpages >= VM_LEVEL_0_NPAGES);
757 * Allocate a physical page from an existing or newly created reservation.
759 * The page "mpred" must immediately precede the offset "pindex" within the
762 * The object must be locked.
765 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
766 int req, vm_page_t mpred)
768 struct vm_domain *vmd;
770 vm_pindex_t first, leftcap, rightcap;
774 VM_OBJECT_ASSERT_WLOCKED(object);
777 * Is a reservation fundamentally impossible?
779 if (pindex < VM_RESERV_INDEX(object, pindex) ||
780 pindex >= object->size)
784 * Look for an existing reservation.
786 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
788 KASSERT(object != kernel_object || rv->domain == domain,
789 ("vm_reserv_alloc_page: domain mismatch"));
791 vmd = VM_DOMAIN(domain);
792 index = VM_RESERV_INDEX(object, pindex);
793 m = &rv->pages[index];
795 /* Handle reclaim race. */
796 if (rv->object != object ||
797 /* Handle vm_page_rename(m, new_object, ...). */
798 bit_test(rv->popmap, index)) {
802 if (vm_domain_allocate(vmd, req, 1) == 0)
805 vm_reserv_populate(rv, index);
807 vm_reserv_unlock(rv);
812 * Could a reservation fit between the first index to the left that
813 * can be used and the first index to the right that cannot be used?
815 * We must synchronize with the reserv object lock to protect the
816 * pindex/object of the resulting reservations against rename while
819 first = pindex - VM_RESERV_INDEX(object, pindex);
820 vm_reserv_object_lock(object);
822 if ((rv = vm_reserv_from_page(mpred))->object != object)
823 leftcap = mpred->pindex + 1;
825 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
826 if (leftcap > first) {
827 vm_reserv_object_unlock(object);
832 if ((rv = vm_reserv_from_page(msucc))->object != object)
833 rightcap = msucc->pindex;
835 rightcap = rv->pindex;
836 if (first + VM_LEVEL_0_NPAGES > rightcap) {
837 vm_reserv_object_unlock(object);
841 vm_reserv_object_unlock(object);
844 * Would the last new reservation extend past the end of the object?
846 * If the object is unlikely to grow don't allocate a reservation for
849 if ((object->flags & OBJ_ANON) == 0 &&
850 first + VM_LEVEL_0_NPAGES > object->size)
854 * Allocate and populate the new reservation.
857 vmd = VM_DOMAIN(domain);
858 if (vm_domain_allocate(vmd, req, 1)) {
859 vm_domain_free_lock(vmd);
860 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
862 vm_domain_free_unlock(vmd);
864 vm_domain_freecnt_inc(vmd, 1);
869 rv = vm_reserv_from_page(m);
871 KASSERT(rv->pages == m,
872 ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
873 vm_reserv_insert(rv, object, first);
874 index = VM_RESERV_INDEX(object, pindex);
875 vm_reserv_populate(rv, index);
876 vm_reserv_unlock(rv);
878 return (&rv->pages[index]);
882 * Breaks the given reservation. All free pages in the reservation
883 * are returned to the physical memory allocator. The reservation's
884 * population count and map are reset to their initial state.
886 * The given reservation must not be in the partially populated reservation
890 vm_reserv_break(vm_reserv_t rv)
894 vm_reserv_assert_locked(rv);
895 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
896 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
897 vm_reserv_remove(rv);
898 rv->pages->psind = 0;
902 bit_ff_at(rv->popmap, pos, VM_LEVEL_0_NPAGES, lo != hi, &pos);
910 pos = VM_LEVEL_0_NPAGES;
912 vm_domain_free_lock(VM_DOMAIN(rv->domain));
913 vm_phys_enqueue_contig(&rv->pages[lo], hi - lo);
914 vm_domain_free_unlock(VM_DOMAIN(rv->domain));
917 bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1);
919 counter_u64_add(vm_reserv_broken, 1);
923 * Breaks all reservations belonging to the given object.
926 vm_reserv_break_all(vm_object_t object)
931 * This access of object->rvq is unsynchronized so that the
932 * object rvq lock can nest after the domain_free lock. We
933 * must check for races in the results. However, the object
934 * lock prevents new additions, so we are guaranteed that when
935 * it returns NULL the object is properly empty.
937 while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
940 if (rv->object != object) {
941 vm_reserv_unlock(rv);
944 vm_reserv_domain_lock(rv->domain);
945 if (rv->inpartpopq) {
946 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
947 rv->inpartpopq = FALSE;
949 vm_reserv_domain_unlock(rv->domain);
951 vm_reserv_unlock(rv);
956 * Frees the given page if it belongs to a reservation. Returns TRUE if the
957 * page is freed and FALSE otherwise.
960 vm_reserv_free_page(vm_page_t m)
965 rv = vm_reserv_from_page(m);
966 if (rv->object == NULL)
969 /* Re-validate after lock. */
970 if (rv->object != NULL) {
971 vm_reserv_depopulate(rv, m - rv->pages);
975 vm_reserv_unlock(rv);
981 * Initializes the reservation management system. Specifically, initializes
982 * the reservation array.
984 * Requires that vm_page_array and first_page are initialized!
990 struct vm_phys_seg *seg;
991 struct vm_reserv *rv;
992 struct vm_reserv_domain *rvd;
993 #ifdef VM_PHYSSEG_SPARSE
999 * Initialize the reservation array. Specifically, initialize the
1000 * "pages" field for every element that has an underlying superpage.
1002 #ifdef VM_PHYSSEG_SPARSE
1005 for (segind = 0; segind < vm_phys_nsegs; segind++) {
1006 seg = &vm_phys_segs[segind];
1007 #ifdef VM_PHYSSEG_SPARSE
1008 seg->first_reserv = &vm_reserv_array[used];
1009 used += howmany(seg->end, VM_LEVEL_0_SIZE) -
1010 seg->start / VM_LEVEL_0_SIZE;
1013 &vm_reserv_array[seg->start >> VM_LEVEL_0_SHIFT];
1015 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
1016 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) -
1017 (seg->start >> VM_LEVEL_0_SHIFT);
1018 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
1019 VM_LEVEL_0_SIZE <= seg->end) {
1020 rv->pages = PHYS_TO_VM_PAGE(paddr);
1021 rv->domain = seg->domain;
1022 mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF);
1023 paddr += VM_LEVEL_0_SIZE;
1027 for (i = 0; i < MAXMEMDOM; i++) {
1029 mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF);
1030 TAILQ_INIT(&rvd->partpop);
1031 mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF);
1034 * Fully populated reservations should never be present in the
1035 * partially populated reservation queues.
1037 rvd->marker.popcnt = VM_LEVEL_0_NPAGES;
1038 bit_nset(rvd->marker.popmap, 0, VM_LEVEL_0_NPAGES - 1);
1041 for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++)
1042 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL,
1047 * Returns true if the given page belongs to a reservation and that page is
1048 * free. Otherwise, returns false.
1051 vm_reserv_is_page_free(vm_page_t m)
1055 rv = vm_reserv_from_page(m);
1056 if (rv->object == NULL)
1058 return (!bit_test(rv->popmap, m - rv->pages));
1062 * If the given page belongs to a reservation, returns the level of that
1063 * reservation. Otherwise, returns -1.
1066 vm_reserv_level(vm_page_t m)
1070 rv = vm_reserv_from_page(m);
1071 return (rv->object != NULL ? 0 : -1);
1075 * Returns a reservation level if the given page belongs to a fully populated
1076 * reservation and -1 otherwise.
1079 vm_reserv_level_iffullpop(vm_page_t m)
1083 rv = vm_reserv_from_page(m);
1084 return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
1088 * Remove a partially populated reservation from the queue.
1091 vm_reserv_dequeue(vm_reserv_t rv)
1094 vm_reserv_domain_assert_locked(rv->domain);
1095 vm_reserv_assert_locked(rv);
1096 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1097 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1098 KASSERT(rv->inpartpopq,
1099 ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
1101 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
1102 rv->inpartpopq = FALSE;
1106 * Breaks the given partially populated reservation, releasing its free pages
1107 * to the physical memory allocator.
1110 vm_reserv_reclaim(vm_reserv_t rv)
1113 vm_reserv_assert_locked(rv);
1114 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1115 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1116 if (rv->inpartpopq) {
1117 vm_reserv_domain_lock(rv->domain);
1118 vm_reserv_dequeue(rv);
1119 vm_reserv_domain_unlock(rv->domain);
1121 vm_reserv_break(rv);
1122 counter_u64_add(vm_reserv_reclaimed, 1);
1126 * Breaks a reservation near the head of the partially populated reservation
1127 * queue, releasing its free pages to the physical memory allocator. Returns
1128 * TRUE if a reservation is broken and FALSE otherwise.
1131 vm_reserv_reclaim_inactive(int domain)
1135 vm_reserv_domain_lock(domain);
1136 TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
1138 * A locked reservation is likely being updated or reclaimed,
1139 * so just skip ahead.
1141 if (rv != &vm_rvd[domain].marker && vm_reserv_trylock(rv)) {
1142 vm_reserv_dequeue(rv);
1146 vm_reserv_domain_unlock(domain);
1148 vm_reserv_reclaim(rv);
1149 vm_reserv_unlock(rv);
1156 * Determine whether this reservation has free pages that satisfy the given
1157 * request for contiguous physical memory. Start searching from the lower
1158 * bound, defined by lo, and stop at the upper bound, hi. Return the index
1159 * of the first satisfactory free page, or -1 if none is found.
1162 vm_reserv_find_contig(vm_reserv_t rv, int npages, int lo,
1163 int hi, int ppn_align, int ppn_bound)
1166 vm_reserv_assert_locked(rv);
1167 KASSERT(npages <= VM_LEVEL_0_NPAGES - 1,
1168 ("%s: Too many pages", __func__));
1169 KASSERT(ppn_bound <= VM_LEVEL_0_NPAGES,
1170 ("%s: Too big a boundary for reservation size", __func__));
1171 KASSERT(npages <= ppn_bound,
1172 ("%s: Too many pages for given boundary", __func__));
1173 KASSERT(ppn_align != 0 && powerof2(ppn_align),
1174 ("ppn_align is not a positive power of 2"));
1175 KASSERT(ppn_bound != 0 && powerof2(ppn_bound),
1176 ("ppn_bound is not a positive power of 2"));
1177 while (bit_ffc_area_at(rv->popmap, lo, hi, npages, &lo), lo != -1) {
1178 if (lo < roundup2(lo, ppn_align)) {
1179 /* Skip to next aligned page. */
1180 lo = roundup2(lo, ppn_align);
1181 } else if (roundup2(lo + 1, ppn_bound) >= lo + npages)
1183 if (roundup2(lo + 1, ppn_bound) < lo + npages) {
1184 /* Skip to next boundary-matching page. */
1185 lo = roundup2(lo + 1, ppn_bound);
1192 * Searches the partially populated reservation queue for the least recently
1193 * changed reservation with free pages that satisfy the given request for
1194 * contiguous physical memory. If a satisfactory reservation is found, it is
1195 * broken. Returns true if a reservation is broken and false otherwise.
1198 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
1199 vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1201 struct vm_reserv_queue *queue;
1202 vm_paddr_t pa, size;
1204 vm_reserv_t marker, rv, rvn;
1205 int hi, lo, posn, ppn_align, ppn_bound;
1207 KASSERT(npages > 0, ("npages is 0"));
1208 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1209 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1210 if (npages > VM_LEVEL_0_NPAGES - 1)
1212 size = npages << PAGE_SHIFT;
1214 * Ensure that a free range starting at a boundary-multiple
1215 * doesn't include a boundary-multiple within it. Otherwise,
1216 * no boundary-constrained allocation is possible.
1218 if (!vm_addr_bound_ok(0, size, boundary))
1220 marker = &vm_rvd[domain].marker;
1221 queue = &vm_rvd[domain].partpop;
1223 * Compute shifted alignment, boundary values for page-based
1224 * calculations. Constrain to range [1, VM_LEVEL_0_NPAGES] to
1227 ppn_align = (int)(ulmin(ulmax(PAGE_SIZE, alignment),
1228 VM_LEVEL_0_SIZE) >> PAGE_SHIFT);
1229 ppn_bound = boundary == 0 ? VM_LEVEL_0_NPAGES :
1230 (int)(MIN(MAX(PAGE_SIZE, boundary),
1231 VM_LEVEL_0_SIZE) >> PAGE_SHIFT);
1233 vm_reserv_domain_scan_lock(domain);
1234 vm_reserv_domain_lock(domain);
1235 TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) {
1236 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1237 if (pa + VM_LEVEL_0_SIZE - size < low) {
1238 /* This entire reservation is too low; go to next. */
1241 if (pa + size > high) {
1242 /* This entire reservation is too high; go to next. */
1245 if (!vm_addr_align_ok(pa, alignment)) {
1246 /* This entire reservation is unaligned; go to next. */
1250 if (vm_reserv_trylock(rv) == 0) {
1251 TAILQ_INSERT_AFTER(queue, rv, marker, partpopq);
1252 vm_reserv_domain_unlock(domain);
1254 if (TAILQ_PREV(marker, vm_reserv_queue, partpopq) !=
1256 vm_reserv_unlock(rv);
1257 vm_reserv_domain_lock(domain);
1258 rvn = TAILQ_NEXT(marker, partpopq);
1259 TAILQ_REMOVE(queue, marker, partpopq);
1262 vm_reserv_domain_lock(domain);
1263 TAILQ_REMOVE(queue, marker, partpopq);
1265 vm_reserv_domain_unlock(domain);
1266 lo = (pa >= low) ? 0 :
1267 (int)((low + PAGE_MASK - pa) >> PAGE_SHIFT);
1268 hi = (pa + VM_LEVEL_0_SIZE <= high) ? VM_LEVEL_0_NPAGES :
1269 (int)((high - pa) >> PAGE_SHIFT);
1270 posn = vm_reserv_find_contig(rv, (int)npages, lo, hi,
1271 ppn_align, ppn_bound);
1273 vm_reserv_domain_scan_unlock(domain);
1274 /* Allocate requested space */
1275 rv->popcnt += npages;
1276 bit_nset(rv->popmap, posn, posn + npages - 1);
1277 vm_reserv_reclaim(rv);
1278 vm_reserv_unlock(rv);
1279 m_ret = &rv->pages[posn];
1280 pa = VM_PAGE_TO_PHYS(m_ret);
1281 KASSERT(vm_addr_ok(pa, size, alignment, boundary),
1282 ("%s: adjusted address not aligned/bounded to "
1284 __func__, alignment, (uintmax_t)boundary));
1287 vm_reserv_domain_lock(domain);
1288 rvn = TAILQ_NEXT(rv, partpopq);
1289 vm_reserv_unlock(rv);
1291 vm_reserv_domain_unlock(domain);
1292 vm_reserv_domain_scan_unlock(domain);
1297 * Transfers the reservation underlying the given page to a new object.
1299 * The object must be locked.
1302 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1303 vm_pindex_t old_object_offset)
1307 VM_OBJECT_ASSERT_WLOCKED(new_object);
1308 rv = vm_reserv_from_page(m);
1309 if (rv->object == old_object) {
1312 "%s: rv %p object %p new %p popcnt %d inpartpop %d",
1313 __FUNCTION__, rv, rv->object, new_object, rv->popcnt,
1315 if (rv->object == old_object) {
1316 vm_reserv_object_lock(old_object);
1318 LIST_REMOVE(rv, objq);
1319 vm_reserv_object_unlock(old_object);
1320 vm_reserv_object_lock(new_object);
1321 rv->object = new_object;
1322 rv->pindex -= old_object_offset;
1323 LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1324 vm_reserv_object_unlock(new_object);
1326 vm_reserv_unlock(rv);
1331 * Returns the size (in bytes) of a reservation of the specified level.
1334 vm_reserv_size(int level)
1339 return (VM_LEVEL_0_SIZE);
1348 * Allocates the virtual and physical memory required by the reservation
1349 * management system's data structures, in particular, the reservation array.
1352 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end)
1360 for (i = 0; i < vm_phys_nsegs; i++) {
1361 #ifdef VM_PHYSSEG_SPARSE
1362 count += howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE) -
1363 vm_phys_segs[i].start / VM_LEVEL_0_SIZE;
1366 howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE));
1370 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1371 #ifdef VM_PHYSSEG_SPARSE
1372 count += howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE) -
1373 phys_avail[i] / VM_LEVEL_0_SIZE;
1376 howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE));
1381 * Calculate the size (in bytes) of the reservation array. Rounding up
1382 * for partial superpages at boundaries, as every small page is mapped
1383 * to an element in the reservation array based on its physical address.
1384 * Thus, the number of elements in the reservation array can be greater
1385 * than the number of superpages.
1387 size = count * sizeof(struct vm_reserv);
1390 * Allocate and map the physical memory for the reservation array. The
1391 * next available virtual address is returned by reference.
1393 new_end = end - round_page(size);
1394 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1395 VM_PROT_READ | VM_PROT_WRITE);
1396 bzero(vm_reserv_array, size);
1399 * Return the next available physical address.
1405 * Returns the superpage containing the given page.
1408 vm_reserv_to_superpage(vm_page_t m)
1412 VM_OBJECT_ASSERT_LOCKED(m->object);
1413 rv = vm_reserv_from_page(m);
1414 if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES)
1422 #endif /* VM_NRESERVLEVEL > 0 */