2 * Copyright (c) 2002-2006 Rice University
3 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
6 * This software was developed for the FreeBSD Project by Alan L. Cox,
7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Superpage reservation management module
35 * Any external functions defined by this module are only to be used by the
36 * virtual memory system.
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
44 #include <sys/param.h>
45 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/mutex.h>
49 #include <sys/queue.h>
50 #include <sys/rwlock.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_phys.h>
60 #include <vm/vm_radix.h>
61 #include <vm/vm_reserv.h>
64 * The reservation system supports the speculative allocation of large physical
65 * pages ("superpages"). Speculative allocation enables the fully-automatic
66 * utilization of superpages by the virtual memory system. In other words, no
67 * programmatic directives are required to use superpages.
70 #if VM_NRESERVLEVEL > 0
73 * The number of small pages that are contained in a level 0 reservation
75 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER)
78 * The number of bits by which a physical address is shifted to obtain the
81 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
84 * The size of a level 0 reservation in bytes
86 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT)
89 * Computes the index of the small page underlying the given (object, pindex)
90 * within the reservation's array of small pages.
92 #define VM_RESERV_INDEX(object, pindex) \
93 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
96 * The size of a population map entry
98 typedef u_long popmap_t;
101 * The number of bits in a population map entry
103 #define NBPOPMAP (NBBY * sizeof(popmap_t))
106 * The number of population map entries in a reservation
108 #define NPOPMAP howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
111 * The reservation structure
113 * A reservation structure is constructed whenever a large physical page is
114 * speculatively allocated to an object. The reservation provides the small
115 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
116 * within that object. The reservation's "popcnt" tracks the number of these
117 * small physical pages that are in use at any given time. When and if the
118 * reservation is not fully utilized, it appears in the queue of partially-
119 * populated reservations. The reservation always appears on the containing
120 * object's list of reservations.
122 * A partially-populated reservation can be broken and reclaimed at any time.
125 TAILQ_ENTRY(vm_reserv) partpopq;
126 LIST_ENTRY(vm_reserv) objq;
127 vm_object_t object; /* containing object */
128 vm_pindex_t pindex; /* offset within object */
129 vm_page_t pages; /* first page of a superpage */
130 int popcnt; /* # of pages in use */
132 popmap_t popmap[NPOPMAP]; /* bit vector of used pages */
136 * The reservation array
138 * This array is analoguous in function to vm_page_array. It differs in the
139 * respect that it may contain a greater number of useful reservation
140 * structures than there are (physical) superpages. These "invalid"
141 * reservation structures exist to trade-off space for time in the
142 * implementation of vm_reserv_from_page(). Invalid reservation structures are
143 * distinguishable from "valid" reservation structures by inspecting the
144 * reservation's "pages" field. Invalid reservation structures have a NULL
147 * vm_reserv_from_page() maps a small (physical) page to an element of this
148 * array by computing a physical reservation number from the page's physical
149 * address. The physical reservation number is used as the array index.
151 * An "active" reservation is a valid reservation structure that has a non-NULL
152 * "object" field and a non-zero "popcnt" field. In other words, every active
153 * reservation belongs to a particular object. Moreover, every active
154 * reservation has an entry in the containing object's list of reservations.
156 static vm_reserv_t vm_reserv_array;
159 * The partially-populated reservation queue
161 * This queue enables the fast recovery of an unused cached or free small page
162 * from a partially-populated reservation. The reservation at the head of
163 * this queue is the least-recently-changed, partially-populated reservation.
165 * Access to this queue is synchronized by the free page queue lock.
167 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop =
168 TAILQ_HEAD_INITIALIZER(vm_rvq_partpop);
170 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
172 static long vm_reserv_broken;
173 SYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
174 &vm_reserv_broken, 0, "Cumulative number of broken reservations");
176 static long vm_reserv_freed;
177 SYSCTL_LONG(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
178 &vm_reserv_freed, 0, "Cumulative number of freed reservations");
180 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
182 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
183 sysctl_vm_reserv_partpopq, "A", "Partially-populated reservation queues");
185 static long vm_reserv_reclaimed;
186 SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
187 &vm_reserv_reclaimed, 0, "Cumulative number of reclaimed reservations");
189 static void vm_reserv_break(vm_reserv_t rv, vm_page_t m);
190 static void vm_reserv_depopulate(vm_reserv_t rv, int index);
191 static vm_reserv_t vm_reserv_from_page(vm_page_t m);
192 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv,
194 static void vm_reserv_populate(vm_reserv_t rv, int index);
195 static void vm_reserv_reclaim(vm_reserv_t rv);
198 * Describes the current state of the partially-populated reservation queue.
201 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
205 int counter, error, level, unused_pages;
207 error = sysctl_wire_old_buffer(req, 0);
210 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
211 sbuf_printf(&sbuf, "\nLEVEL SIZE NUMBER\n\n");
212 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
215 mtx_lock(&vm_page_queue_free_mtx);
216 TAILQ_FOREACH(rv, &vm_rvq_partpop/*[level]*/, partpopq) {
218 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
220 mtx_unlock(&vm_page_queue_free_mtx);
221 sbuf_printf(&sbuf, "%5d: %6dK, %6d\n", level,
222 unused_pages * ((int)PAGE_SIZE / 1024), counter);
224 error = sbuf_finish(&sbuf);
230 * Reduces the given reservation's population count. If the population count
231 * becomes zero, the reservation is destroyed. Additionally, moves the
232 * reservation to the tail of the partially-populated reservation queue if the
233 * population count is non-zero.
235 * The free page queue lock must be held.
238 vm_reserv_depopulate(vm_reserv_t rv, int index)
241 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
242 KASSERT(rv->object != NULL,
243 ("vm_reserv_depopulate: reserv %p is free", rv));
244 KASSERT(isset(rv->popmap, index),
245 ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
247 KASSERT(rv->popcnt > 0,
248 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
249 if (rv->inpartpopq) {
250 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
251 rv->inpartpopq = FALSE;
253 clrbit(rv->popmap, index);
255 if (rv->popcnt == 0) {
256 LIST_REMOVE(rv, objq);
258 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
261 rv->inpartpopq = TRUE;
262 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
267 * Returns the reservation to which the given page might belong.
269 static __inline vm_reserv_t
270 vm_reserv_from_page(vm_page_t m)
273 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
277 * Returns TRUE if the given reservation contains the given page index and
280 static __inline boolean_t
281 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
284 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
288 * Increases the given reservation's population count. Moves the reservation
289 * to the tail of the partially-populated reservation queue.
291 * The free page queue must be locked.
294 vm_reserv_populate(vm_reserv_t rv, int index)
297 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
298 KASSERT(rv->object != NULL,
299 ("vm_reserv_populate: reserv %p is free", rv));
300 KASSERT(isclr(rv->popmap, index),
301 ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
303 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
304 ("vm_reserv_populate: reserv %p is already full", rv));
305 if (rv->inpartpopq) {
306 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
307 rv->inpartpopq = FALSE;
309 setbit(rv->popmap, index);
311 if (rv->popcnt < VM_LEVEL_0_NPAGES) {
312 rv->inpartpopq = TRUE;
313 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
318 * Allocates a contiguous set of physical pages of the given size "npages"
319 * from an existing or newly-created reservation. All of the physical pages
320 * must be at or above the given physical address "low" and below the given
321 * physical address "high". The given value "alignment" determines the
322 * alignment of the first physical page in the set. If the given value
323 * "boundary" is non-zero, then the set of physical pages cannot cross any
324 * physical address boundary that is a multiple of that value. Both
325 * "alignment" and "boundary" must be a power of two.
327 * The object and free page queue must be locked.
330 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
331 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
334 vm_page_t m, m_ret, mpred, msucc;
335 vm_pindex_t first, leftcap, rightcap;
337 u_long allocpages, maxpages, minpages;
340 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
341 VM_OBJECT_ASSERT_WLOCKED(object);
342 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
345 * Is a reservation fundamentally impossible?
347 if (pindex < VM_RESERV_INDEX(object, pindex) ||
348 pindex + npages > object->size)
352 * All reservations of a particular size have the same alignment.
353 * Assuming that the first page is allocated from a reservation, the
354 * least significant bits of its physical address can be determined
355 * from its offset from the beginning of the reservation and the size
356 * of the reservation.
358 * Could the specified index within a reservation of the smallest
359 * possible size satisfy the alignment and boundary requirements?
361 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
362 if ((pa & (alignment - 1)) != 0)
364 size = npages << PAGE_SHIFT;
365 if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
369 * Look for an existing reservation.
371 mpred = vm_radix_lookup_le(&object->rtree, pindex);
373 KASSERT(mpred->pindex < pindex,
374 ("vm_reserv_alloc_contig: pindex already allocated"));
375 rv = vm_reserv_from_page(mpred);
376 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
378 msucc = TAILQ_NEXT(mpred, listq);
380 msucc = TAILQ_FIRST(&object->memq);
382 KASSERT(msucc->pindex > pindex,
383 ("vm_reserv_alloc_page: pindex already allocated"));
384 rv = vm_reserv_from_page(msucc);
385 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
390 * Could at least one reservation fit between the first index to the
391 * left that can be used and the first index to the right that cannot
394 first = pindex - VM_RESERV_INDEX(object, pindex);
396 if ((rv = vm_reserv_from_page(mpred))->object != object)
397 leftcap = mpred->pindex + 1;
399 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
403 minpages = VM_RESERV_INDEX(object, pindex) + npages;
404 maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
405 allocpages = maxpages;
407 if ((rv = vm_reserv_from_page(msucc))->object != object)
408 rightcap = msucc->pindex;
410 rightcap = rv->pindex;
411 if (first + maxpages > rightcap) {
412 if (maxpages == VM_LEVEL_0_NPAGES)
414 allocpages = minpages;
419 * Would the last new reservation extend past the end of the object?
421 if (first + maxpages > object->size) {
423 * Don't allocate the last new reservation if the object is a
424 * vnode or backed by another object that is a vnode.
426 if (object->type == OBJT_VNODE ||
427 (object->backing_object != NULL &&
428 object->backing_object->type == OBJT_VNODE)) {
429 if (maxpages == VM_LEVEL_0_NPAGES)
431 allocpages = minpages;
433 /* Speculate that the object may grow. */
437 * Allocate and populate the new reservations. The alignment and
438 * boundary specified for this allocation may be different from the
439 * alignment and boundary specified for the requested pages. For
440 * instance, the specified index may not be the first page within the
441 * first new reservation.
443 m = vm_phys_alloc_contig(allocpages, low, high, ulmax(alignment,
444 VM_LEVEL_0_SIZE), boundary > VM_LEVEL_0_SIZE ? boundary : 0);
448 index = VM_RESERV_INDEX(object, pindex);
450 rv = vm_reserv_from_page(m);
451 KASSERT(rv->pages == m,
452 ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
454 KASSERT(rv->object == NULL,
455 ("vm_reserv_alloc_contig: reserv %p isn't free", rv));
456 LIST_INSERT_HEAD(&object->rvq, rv, objq);
459 KASSERT(rv->popcnt == 0,
460 ("vm_reserv_alloc_contig: reserv %p's popcnt is corrupted",
462 KASSERT(!rv->inpartpopq,
463 ("vm_reserv_alloc_contig: reserv %p's inpartpopq is TRUE",
465 for (i = 0; i < NPOPMAP; i++)
466 KASSERT(rv->popmap[i] == 0,
467 ("vm_reserv_alloc_contig: reserv %p's popmap is corrupted",
469 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
470 for (i = 0; i < n; i++)
471 vm_reserv_populate(rv, index + i);
474 m_ret = &rv->pages[index];
477 m += VM_LEVEL_0_NPAGES;
478 first += VM_LEVEL_0_NPAGES;
479 allocpages -= VM_LEVEL_0_NPAGES;
480 } while (allocpages > 0);
484 * Found a matching reservation.
487 index = VM_RESERV_INDEX(object, pindex);
488 /* Does the allocation fit within the reservation? */
489 if (index + npages > VM_LEVEL_0_NPAGES)
491 m = &rv->pages[index];
492 pa = VM_PAGE_TO_PHYS(m);
493 if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
494 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
496 /* Handle vm_page_rename(m, new_object, ...). */
497 for (i = 0; i < npages; i++)
498 if (isset(rv->popmap, index + i))
500 for (i = 0; i < npages; i++)
501 vm_reserv_populate(rv, index + i);
506 * Allocates a page from an existing or newly-created reservation.
508 * The page "mpred" must immediately precede the offset "pindex" within the
511 * The object and free page queue must be locked.
514 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
517 vm_pindex_t first, leftcap, rightcap;
521 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
522 VM_OBJECT_ASSERT_WLOCKED(object);
525 * Is a reservation fundamentally impossible?
527 if (pindex < VM_RESERV_INDEX(object, pindex) ||
528 pindex >= object->size)
532 * Look for an existing reservation.
535 KASSERT(mpred->object == object,
536 ("vm_reserv_alloc_page: object doesn't contain mpred"));
537 KASSERT(mpred->pindex < pindex,
538 ("vm_reserv_alloc_page: mpred doesn't precede pindex"));
539 rv = vm_reserv_from_page(mpred);
540 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
542 msucc = TAILQ_NEXT(mpred, listq);
544 msucc = TAILQ_FIRST(&object->memq);
546 KASSERT(msucc->pindex > pindex,
547 ("vm_reserv_alloc_page: msucc doesn't succeed pindex"));
548 rv = vm_reserv_from_page(msucc);
549 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
554 * Could a reservation fit between the first index to the left that
555 * can be used and the first index to the right that cannot be used?
557 first = pindex - VM_RESERV_INDEX(object, pindex);
559 if ((rv = vm_reserv_from_page(mpred))->object != object)
560 leftcap = mpred->pindex + 1;
562 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
567 if ((rv = vm_reserv_from_page(msucc))->object != object)
568 rightcap = msucc->pindex;
570 rightcap = rv->pindex;
571 if (first + VM_LEVEL_0_NPAGES > rightcap)
576 * Would a new reservation extend past the end of the object?
578 if (first + VM_LEVEL_0_NPAGES > object->size) {
580 * Don't allocate a new reservation if the object is a vnode or
581 * backed by another object that is a vnode.
583 if (object->type == OBJT_VNODE ||
584 (object->backing_object != NULL &&
585 object->backing_object->type == OBJT_VNODE))
587 /* Speculate that the object may grow. */
591 * Allocate and populate the new reservation.
593 m = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER);
596 rv = vm_reserv_from_page(m);
597 KASSERT(rv->pages == m,
598 ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
599 KASSERT(rv->object == NULL,
600 ("vm_reserv_alloc_page: reserv %p isn't free", rv));
601 LIST_INSERT_HEAD(&object->rvq, rv, objq);
604 KASSERT(rv->popcnt == 0,
605 ("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", rv));
606 KASSERT(!rv->inpartpopq,
607 ("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", rv));
608 for (i = 0; i < NPOPMAP; i++)
609 KASSERT(rv->popmap[i] == 0,
610 ("vm_reserv_alloc_page: reserv %p's popmap is corrupted",
612 index = VM_RESERV_INDEX(object, pindex);
613 vm_reserv_populate(rv, index);
614 return (&rv->pages[index]);
617 * Found a matching reservation.
620 index = VM_RESERV_INDEX(object, pindex);
621 m = &rv->pages[index];
622 /* Handle vm_page_rename(m, new_object, ...). */
623 if (isset(rv->popmap, index))
625 vm_reserv_populate(rv, index);
630 * Breaks the given reservation. Except for the specified cached or free
631 * page, all cached and free pages in the reservation are returned to the
632 * physical memory allocator. The reservation's population count and map are
633 * reset to their initial state.
635 * The given reservation must not be in the partially-populated reservation
636 * queue. The free page queue lock must be held.
639 vm_reserv_break(vm_reserv_t rv, vm_page_t m)
641 int begin_zeroes, hi, i, lo;
643 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
644 KASSERT(rv->object != NULL,
645 ("vm_reserv_break: reserv %p is free", rv));
646 KASSERT(!rv->inpartpopq,
647 ("vm_reserv_break: reserv %p's inpartpopq is TRUE", rv));
648 LIST_REMOVE(rv, objq);
652 * Since the reservation is being broken, there is no harm in
653 * abusing the population map to stop "m" from being returned
654 * to the physical memory allocator.
657 KASSERT(isclr(rv->popmap, i),
658 ("vm_reserv_break: reserv %p's popmap is corrupted", rv));
659 setbit(rv->popmap, i);
664 /* Find the next 0 bit. Any previous 0 bits are < "hi". */
665 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
667 /* Redundantly clears bits < "hi". */
669 rv->popcnt -= NBPOPMAP - hi;
670 while (++i < NPOPMAP) {
671 lo = ffsl(~rv->popmap[i]);
674 rv->popcnt -= NBPOPMAP;
682 KASSERT(lo > 0, ("vm_reserv_break: lo is %d", lo));
683 /* Convert from ffsl() to ordinary bit numbering. */
686 /* Redundantly clears bits < "hi". */
687 rv->popmap[i] &= ~((1UL << lo) - 1);
688 rv->popcnt -= lo - hi;
690 begin_zeroes = NBPOPMAP * i + lo;
691 /* Find the next 1 bit. */
693 hi = ffsl(rv->popmap[i]);
694 while (hi == 0 && ++i < NPOPMAP);
696 /* Convert from ffsl() to ordinary bit numbering. */
698 vm_phys_free_contig(&rv->pages[begin_zeroes], NBPOPMAP * i +
700 } while (i < NPOPMAP);
701 KASSERT(rv->popcnt == 0,
702 ("vm_reserv_break: reserv %p's popcnt is corrupted", rv));
707 * Breaks all reservations belonging to the given object.
710 vm_reserv_break_all(vm_object_t object)
714 mtx_lock(&vm_page_queue_free_mtx);
715 while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
716 KASSERT(rv->object == object,
717 ("vm_reserv_break_all: reserv %p is corrupted", rv));
718 if (rv->inpartpopq) {
719 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
720 rv->inpartpopq = FALSE;
722 vm_reserv_break(rv, NULL);
724 mtx_unlock(&vm_page_queue_free_mtx);
728 * Frees the given page if it belongs to a reservation. Returns TRUE if the
729 * page is freed and FALSE otherwise.
731 * The free page queue lock must be held.
734 vm_reserv_free_page(vm_page_t m)
738 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
739 rv = vm_reserv_from_page(m);
740 if (rv->object == NULL)
742 if ((m->flags & PG_CACHED) != 0 && m->pool != VM_FREEPOOL_CACHE)
743 vm_phys_set_pool(VM_FREEPOOL_CACHE, rv->pages,
745 vm_reserv_depopulate(rv, m - rv->pages);
750 * Initializes the reservation management system. Specifically, initializes
751 * the reservation array.
753 * Requires that vm_page_array and first_page are initialized!
762 * Initialize the reservation array. Specifically, initialize the
763 * "pages" field for every element that has an underlying superpage.
765 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
766 paddr = roundup2(phys_avail[i], VM_LEVEL_0_SIZE);
767 while (paddr + VM_LEVEL_0_SIZE <= phys_avail[i + 1]) {
768 vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages =
769 PHYS_TO_VM_PAGE(paddr);
770 paddr += VM_LEVEL_0_SIZE;
776 * Returns a reservation level if the given page belongs to a fully-populated
777 * reservation and -1 otherwise.
780 vm_reserv_level_iffullpop(vm_page_t m)
784 rv = vm_reserv_from_page(m);
785 return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
789 * Prepare for the reactivation of a cached page.
791 * First, suppose that the given page "m" was allocated individually, i.e., not
792 * as part of a reservation, and cached. Then, suppose a reservation
793 * containing "m" is allocated by the same object. Although "m" and the
794 * reservation belong to the same object, "m"'s pindex may not match the
797 * The free page queue must be locked.
800 vm_reserv_reactivate_page(vm_page_t m)
805 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
806 rv = vm_reserv_from_page(m);
807 if (rv->object == NULL)
809 KASSERT((m->flags & PG_CACHED) != 0,
810 ("vm_reserv_reactivate_page: page %p is not cached", m));
811 if (m->object == rv->object &&
812 m->pindex - rv->pindex == (index = VM_RESERV_INDEX(m->object,
814 vm_reserv_populate(rv, index);
816 KASSERT(rv->inpartpopq,
817 ("vm_reserv_reactivate_page: reserv %p's inpartpopq is FALSE",
819 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
820 rv->inpartpopq = FALSE;
821 /* Don't release "m" to the physical memory allocator. */
822 vm_reserv_break(rv, m);
828 * Breaks the given partially-populated reservation, releasing its cached and
829 * free pages to the physical memory allocator.
831 * The free page queue lock must be held.
834 vm_reserv_reclaim(vm_reserv_t rv)
837 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
838 KASSERT(rv->inpartpopq,
839 ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
840 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
841 rv->inpartpopq = FALSE;
842 vm_reserv_break(rv, NULL);
843 vm_reserv_reclaimed++;
847 * Breaks the reservation at the head of the partially-populated reservation
848 * queue, releasing its cached and free pages to the physical memory
849 * allocator. Returns TRUE if a reservation is broken and FALSE otherwise.
851 * The free page queue lock must be held.
854 vm_reserv_reclaim_inactive(void)
858 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
859 if ((rv = TAILQ_FIRST(&vm_rvq_partpop)) != NULL) {
860 vm_reserv_reclaim(rv);
867 * Searches the partially-populated reservation queue for the least recently
868 * active reservation with unused pages, i.e., cached or free, that satisfy the
869 * given request for contiguous physical memory. If a satisfactory reservation
870 * is found, it is broken. Returns TRUE if a reservation is broken and FALSE
873 * The free page queue lock must be held.
876 vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
877 u_long alignment, vm_paddr_t boundary)
881 int hi, i, lo, next_free;
883 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
884 if (npages > VM_LEVEL_0_NPAGES - 1)
886 size = npages << PAGE_SHIFT;
887 TAILQ_FOREACH(rv, &vm_rvq_partpop, partpopq) {
888 pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
889 if (pa + PAGE_SIZE - size < low) {
890 /* This entire reservation is too low; go to next. */
893 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
894 if (pa + size > high) {
895 /* This entire reservation is too high; go to next. */
899 /* Start the search for free pages at "low". */
900 i = (low - pa) / NBPOPMAP;
901 hi = (low - pa) % NBPOPMAP;
905 /* Find the next free page. */
906 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
907 while (lo == 0 && ++i < NPOPMAP)
908 lo = ffsl(~rv->popmap[i]);
911 /* Convert from ffsl() to ordinary bit numbering. */
913 next_free = NBPOPMAP * i + lo;
914 pa = VM_PAGE_TO_PHYS(&rv->pages[next_free]);
916 ("vm_reserv_reclaim_contig: pa is too low"));
917 if (pa + size > high) {
918 /* The rest of this reservation is too high. */
920 } else if ((pa & (alignment - 1)) != 0 ||
921 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) {
922 /* Continue with this reservation. */
926 /* Find the next used page. */
927 hi = ffsl(rv->popmap[i] & ~((1UL << lo) - 1));
928 while (hi == 0 && ++i < NPOPMAP) {
929 if ((NBPOPMAP * i - next_free) * PAGE_SIZE >=
931 vm_reserv_reclaim(rv);
934 hi = ffsl(rv->popmap[i]);
936 /* Convert from ffsl() to ordinary bit numbering. */
939 if ((NBPOPMAP * i + hi - next_free) * PAGE_SIZE >=
941 vm_reserv_reclaim(rv);
944 } while (i < NPOPMAP);
950 * Transfers the reservation underlying the given page to a new object.
952 * The object must be locked.
955 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
956 vm_pindex_t old_object_offset)
960 VM_OBJECT_ASSERT_WLOCKED(new_object);
961 rv = vm_reserv_from_page(m);
962 if (rv->object == old_object) {
963 mtx_lock(&vm_page_queue_free_mtx);
964 if (rv->object == old_object) {
965 LIST_REMOVE(rv, objq);
966 LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
967 rv->object = new_object;
968 rv->pindex -= old_object_offset;
970 mtx_unlock(&vm_page_queue_free_mtx);
975 * Allocates the virtual and physical memory required by the reservation
976 * management system's data structures, in particular, the reservation array.
979 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
985 * Calculate the size (in bytes) of the reservation array. Round up
986 * from "high_water" because every small page is mapped to an element
987 * in the reservation array based on its physical address. Thus, the
988 * number of elements in the reservation array can be greater than the
989 * number of superpages.
991 size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
994 * Allocate and map the physical memory for the reservation array. The
995 * next available virtual address is returned by reference.
997 new_end = end - round_page(size);
998 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
999 VM_PROT_READ | VM_PROT_WRITE);
1000 bzero(vm_reserv_array, size);
1003 * Return the next available physical address.
1008 #endif /* VM_NRESERVLEVEL > 0 */