2 * Copyright (c) 2002-2006 Rice University
3 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
6 * This software was developed for the FreeBSD Project by Alan L. Cox,
7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Superpage reservation management module
35 * Any external functions defined by this module are only to be used by the
36 * virtual memory system.
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
44 #include <sys/param.h>
45 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/mutex.h>
49 #include <sys/queue.h>
50 #include <sys/rwlock.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_phys.h>
60 #include <vm/vm_radix.h>
61 #include <vm/vm_reserv.h>
64 * The reservation system supports the speculative allocation of large physical
65 * pages ("superpages"). Speculative allocation enables the fully-automatic
66 * utilization of superpages by the virtual memory system. In other words, no
67 * programmatic directives are required to use superpages.
70 #if VM_NRESERVLEVEL > 0
73 * The number of small pages that are contained in a level 0 reservation
75 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER)
78 * The number of bits by which a physical address is shifted to obtain the
81 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
84 * The size of a level 0 reservation in bytes
86 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT)
89 * Computes the index of the small page underlying the given (object, pindex)
90 * within the reservation's array of small pages.
92 #define VM_RESERV_INDEX(object, pindex) \
93 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
96 * The size of a population map entry
98 typedef u_long popmap_t;
101 * The number of bits in a population map entry
103 #define NBPOPMAP (NBBY * sizeof(popmap_t))
106 * The number of population map entries in a reservation
108 #define NPOPMAP howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
111 * Clear a bit in the population map.
114 popmap_clear(popmap_t popmap[], int i)
117 popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP));
121 * Set a bit in the population map.
124 popmap_set(popmap_t popmap[], int i)
127 popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP);
131 * Is a bit in the population map clear?
133 static __inline boolean_t
134 popmap_is_clear(popmap_t popmap[], int i)
137 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0);
141 * Is a bit in the population map set?
143 static __inline boolean_t
144 popmap_is_set(popmap_t popmap[], int i)
147 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0);
151 * The reservation structure
153 * A reservation structure is constructed whenever a large physical page is
154 * speculatively allocated to an object. The reservation provides the small
155 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
156 * within that object. The reservation's "popcnt" tracks the number of these
157 * small physical pages that are in use at any given time. When and if the
158 * reservation is not fully utilized, it appears in the queue of partially-
159 * populated reservations. The reservation always appears on the containing
160 * object's list of reservations.
162 * A partially-populated reservation can be broken and reclaimed at any time.
165 TAILQ_ENTRY(vm_reserv) partpopq;
166 LIST_ENTRY(vm_reserv) objq;
167 vm_object_t object; /* containing object */
168 vm_pindex_t pindex; /* offset within object */
169 vm_page_t pages; /* first page of a superpage */
170 int popcnt; /* # of pages in use */
172 popmap_t popmap[NPOPMAP]; /* bit vector of used pages */
176 * The reservation array
178 * This array is analoguous in function to vm_page_array. It differs in the
179 * respect that it may contain a greater number of useful reservation
180 * structures than there are (physical) superpages. These "invalid"
181 * reservation structures exist to trade-off space for time in the
182 * implementation of vm_reserv_from_page(). Invalid reservation structures are
183 * distinguishable from "valid" reservation structures by inspecting the
184 * reservation's "pages" field. Invalid reservation structures have a NULL
187 * vm_reserv_from_page() maps a small (physical) page to an element of this
188 * array by computing a physical reservation number from the page's physical
189 * address. The physical reservation number is used as the array index.
191 * An "active" reservation is a valid reservation structure that has a non-NULL
192 * "object" field and a non-zero "popcnt" field. In other words, every active
193 * reservation belongs to a particular object. Moreover, every active
194 * reservation has an entry in the containing object's list of reservations.
196 static vm_reserv_t vm_reserv_array;
199 * The partially-populated reservation queue
201 * This queue enables the fast recovery of an unused cached or free small page
202 * from a partially-populated reservation. The reservation at the head of
203 * this queue is the least-recently-changed, partially-populated reservation.
205 * Access to this queue is synchronized by the free page queue lock.
207 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop =
208 TAILQ_HEAD_INITIALIZER(vm_rvq_partpop);
210 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
212 static long vm_reserv_broken;
213 SYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
214 &vm_reserv_broken, 0, "Cumulative number of broken reservations");
216 static long vm_reserv_freed;
217 SYSCTL_LONG(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
218 &vm_reserv_freed, 0, "Cumulative number of freed reservations");
220 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
222 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
223 sysctl_vm_reserv_partpopq, "A", "Partially-populated reservation queues");
225 static long vm_reserv_reclaimed;
226 SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
227 &vm_reserv_reclaimed, 0, "Cumulative number of reclaimed reservations");
229 static void vm_reserv_break(vm_reserv_t rv, vm_page_t m);
230 static void vm_reserv_depopulate(vm_reserv_t rv, int index);
231 static vm_reserv_t vm_reserv_from_page(vm_page_t m);
232 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv,
234 static void vm_reserv_populate(vm_reserv_t rv, int index);
235 static void vm_reserv_reclaim(vm_reserv_t rv);
238 * Describes the current state of the partially-populated reservation queue.
241 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
245 int counter, error, level, unused_pages;
247 error = sysctl_wire_old_buffer(req, 0);
250 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
251 sbuf_printf(&sbuf, "\nLEVEL SIZE NUMBER\n\n");
252 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
255 mtx_lock(&vm_page_queue_free_mtx);
256 TAILQ_FOREACH(rv, &vm_rvq_partpop/*[level]*/, partpopq) {
258 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
260 mtx_unlock(&vm_page_queue_free_mtx);
261 sbuf_printf(&sbuf, "%5d: %6dK, %6d\n", level,
262 unused_pages * ((int)PAGE_SIZE / 1024), counter);
264 error = sbuf_finish(&sbuf);
270 * Reduces the given reservation's population count. If the population count
271 * becomes zero, the reservation is destroyed. Additionally, moves the
272 * reservation to the tail of the partially-populated reservation queue if the
273 * population count is non-zero.
275 * The free page queue lock must be held.
278 vm_reserv_depopulate(vm_reserv_t rv, int index)
281 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
282 KASSERT(rv->object != NULL,
283 ("vm_reserv_depopulate: reserv %p is free", rv));
284 KASSERT(popmap_is_set(rv->popmap, index),
285 ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
287 KASSERT(rv->popcnt > 0,
288 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
289 if (rv->inpartpopq) {
290 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
291 rv->inpartpopq = FALSE;
293 KASSERT(rv->pages->psind == 1,
294 ("vm_reserv_depopulate: reserv %p is already demoted",
296 rv->pages->psind = 0;
298 popmap_clear(rv->popmap, index);
300 if (rv->popcnt == 0) {
301 LIST_REMOVE(rv, objq);
303 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
306 rv->inpartpopq = TRUE;
307 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
312 * Returns the reservation to which the given page might belong.
314 static __inline vm_reserv_t
315 vm_reserv_from_page(vm_page_t m)
318 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
322 * Returns TRUE if the given reservation contains the given page index and
325 static __inline boolean_t
326 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
329 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
333 * Increases the given reservation's population count. Moves the reservation
334 * to the tail of the partially-populated reservation queue.
336 * The free page queue must be locked.
339 vm_reserv_populate(vm_reserv_t rv, int index)
342 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
343 KASSERT(rv->object != NULL,
344 ("vm_reserv_populate: reserv %p is free", rv));
345 KASSERT(popmap_is_clear(rv->popmap, index),
346 ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
348 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
349 ("vm_reserv_populate: reserv %p is already full", rv));
350 KASSERT(rv->pages->psind == 0,
351 ("vm_reserv_populate: reserv %p is already promoted", rv));
352 if (rv->inpartpopq) {
353 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
354 rv->inpartpopq = FALSE;
356 popmap_set(rv->popmap, index);
358 if (rv->popcnt < VM_LEVEL_0_NPAGES) {
359 rv->inpartpopq = TRUE;
360 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
362 rv->pages->psind = 1;
366 * Allocates a contiguous set of physical pages of the given size "npages"
367 * from existing or newly created reservations. All of the physical pages
368 * must be at or above the given physical address "low" and below the given
369 * physical address "high". The given value "alignment" determines the
370 * alignment of the first physical page in the set. If the given value
371 * "boundary" is non-zero, then the set of physical pages cannot cross any
372 * physical address boundary that is a multiple of that value. Both
373 * "alignment" and "boundary" must be a power of two.
375 * The object and free page queue must be locked.
378 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
379 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
382 vm_page_t m, m_ret, mpred, msucc;
383 vm_pindex_t first, leftcap, rightcap;
385 u_long allocpages, maxpages, minpages;
388 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
389 VM_OBJECT_ASSERT_WLOCKED(object);
390 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
393 * Is a reservation fundamentally impossible?
395 if (pindex < VM_RESERV_INDEX(object, pindex) ||
396 pindex + npages > object->size)
400 * All reservations of a particular size have the same alignment.
401 * Assuming that the first page is allocated from a reservation, the
402 * least significant bits of its physical address can be determined
403 * from its offset from the beginning of the reservation and the size
404 * of the reservation.
406 * Could the specified index within a reservation of the smallest
407 * possible size satisfy the alignment and boundary requirements?
409 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
410 if ((pa & (alignment - 1)) != 0)
412 size = npages << PAGE_SHIFT;
413 if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
417 * Look for an existing reservation.
419 mpred = vm_radix_lookup_le(&object->rtree, pindex);
421 KASSERT(mpred->pindex < pindex,
422 ("vm_reserv_alloc_contig: pindex already allocated"));
423 rv = vm_reserv_from_page(mpred);
424 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
426 msucc = TAILQ_NEXT(mpred, listq);
428 msucc = TAILQ_FIRST(&object->memq);
430 KASSERT(msucc->pindex > pindex,
431 ("vm_reserv_alloc_page: pindex already allocated"));
432 rv = vm_reserv_from_page(msucc);
433 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
438 * Could at least one reservation fit between the first index to the
439 * left that can be used ("leftcap") and the first index to the right
440 * that cannot be used ("rightcap")?
442 first = pindex - VM_RESERV_INDEX(object, pindex);
444 if ((rv = vm_reserv_from_page(mpred))->object != object)
445 leftcap = mpred->pindex + 1;
447 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
451 minpages = VM_RESERV_INDEX(object, pindex) + npages;
452 maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
453 allocpages = maxpages;
455 if ((rv = vm_reserv_from_page(msucc))->object != object)
456 rightcap = msucc->pindex;
458 rightcap = rv->pindex;
459 if (first + maxpages > rightcap) {
460 if (maxpages == VM_LEVEL_0_NPAGES)
464 * At least one reservation will fit between "leftcap"
465 * and "rightcap". However, a reservation for the
466 * last of the requested pages will not fit. Reduce
467 * the size of the upcoming allocation accordingly.
469 allocpages = minpages;
474 * Would the last new reservation extend past the end of the object?
476 if (first + maxpages > object->size) {
478 * Don't allocate the last new reservation if the object is a
479 * vnode or backed by another object that is a vnode.
481 if (object->type == OBJT_VNODE ||
482 (object->backing_object != NULL &&
483 object->backing_object->type == OBJT_VNODE)) {
484 if (maxpages == VM_LEVEL_0_NPAGES)
486 allocpages = minpages;
488 /* Speculate that the object may grow. */
492 * Allocate the physical pages. The alignment and boundary specified
493 * for this allocation may be different from the alignment and
494 * boundary specified for the requested pages. For instance, the
495 * specified index may not be the first page within the first new
498 m = vm_phys_alloc_contig(allocpages, low, high, ulmax(alignment,
499 VM_LEVEL_0_SIZE), boundary > VM_LEVEL_0_SIZE ? boundary : 0);
504 * The allocated physical pages always begin at a reservation
505 * boundary, but they do not always end at a reservation boundary.
506 * Initialize every reservation that is completely covered by the
507 * allocated physical pages.
510 index = VM_RESERV_INDEX(object, pindex);
512 rv = vm_reserv_from_page(m);
513 KASSERT(rv->pages == m,
514 ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
516 KASSERT(rv->object == NULL,
517 ("vm_reserv_alloc_contig: reserv %p isn't free", rv));
518 LIST_INSERT_HEAD(&object->rvq, rv, objq);
521 KASSERT(rv->popcnt == 0,
522 ("vm_reserv_alloc_contig: reserv %p's popcnt is corrupted",
524 KASSERT(!rv->inpartpopq,
525 ("vm_reserv_alloc_contig: reserv %p's inpartpopq is TRUE",
527 for (i = 0; i < NPOPMAP; i++)
528 KASSERT(rv->popmap[i] == 0,
529 ("vm_reserv_alloc_contig: reserv %p's popmap is corrupted",
531 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
532 for (i = 0; i < n; i++)
533 vm_reserv_populate(rv, index + i);
536 m_ret = &rv->pages[index];
539 m += VM_LEVEL_0_NPAGES;
540 first += VM_LEVEL_0_NPAGES;
541 allocpages -= VM_LEVEL_0_NPAGES;
542 } while (allocpages >= VM_LEVEL_0_NPAGES);
546 * Found a matching reservation.
549 index = VM_RESERV_INDEX(object, pindex);
550 /* Does the allocation fit within the reservation? */
551 if (index + npages > VM_LEVEL_0_NPAGES)
553 m = &rv->pages[index];
554 pa = VM_PAGE_TO_PHYS(m);
555 if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
556 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
558 /* Handle vm_page_rename(m, new_object, ...). */
559 for (i = 0; i < npages; i++)
560 if (popmap_is_set(rv->popmap, index + i))
562 for (i = 0; i < npages; i++)
563 vm_reserv_populate(rv, index + i);
568 * Allocates a page from an existing or newly-created reservation.
570 * The page "mpred" must immediately precede the offset "pindex" within the
573 * The object and free page queue must be locked.
576 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
579 vm_pindex_t first, leftcap, rightcap;
583 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
584 VM_OBJECT_ASSERT_WLOCKED(object);
587 * Is a reservation fundamentally impossible?
589 if (pindex < VM_RESERV_INDEX(object, pindex) ||
590 pindex >= object->size)
594 * Look for an existing reservation.
597 KASSERT(mpred->object == object,
598 ("vm_reserv_alloc_page: object doesn't contain mpred"));
599 KASSERT(mpred->pindex < pindex,
600 ("vm_reserv_alloc_page: mpred doesn't precede pindex"));
601 rv = vm_reserv_from_page(mpred);
602 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
604 msucc = TAILQ_NEXT(mpred, listq);
606 msucc = TAILQ_FIRST(&object->memq);
608 KASSERT(msucc->pindex > pindex,
609 ("vm_reserv_alloc_page: msucc doesn't succeed pindex"));
610 rv = vm_reserv_from_page(msucc);
611 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
616 * Could a reservation fit between the first index to the left that
617 * can be used and the first index to the right that cannot be used?
619 first = pindex - VM_RESERV_INDEX(object, pindex);
621 if ((rv = vm_reserv_from_page(mpred))->object != object)
622 leftcap = mpred->pindex + 1;
624 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
629 if ((rv = vm_reserv_from_page(msucc))->object != object)
630 rightcap = msucc->pindex;
632 rightcap = rv->pindex;
633 if (first + VM_LEVEL_0_NPAGES > rightcap)
638 * Would a new reservation extend past the end of the object?
640 if (first + VM_LEVEL_0_NPAGES > object->size) {
642 * Don't allocate a new reservation if the object is a vnode or
643 * backed by another object that is a vnode.
645 if (object->type == OBJT_VNODE ||
646 (object->backing_object != NULL &&
647 object->backing_object->type == OBJT_VNODE))
649 /* Speculate that the object may grow. */
653 * Allocate and populate the new reservation.
655 m = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER);
658 rv = vm_reserv_from_page(m);
659 KASSERT(rv->pages == m,
660 ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
661 KASSERT(rv->object == NULL,
662 ("vm_reserv_alloc_page: reserv %p isn't free", rv));
663 LIST_INSERT_HEAD(&object->rvq, rv, objq);
666 KASSERT(rv->popcnt == 0,
667 ("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", rv));
668 KASSERT(!rv->inpartpopq,
669 ("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", rv));
670 for (i = 0; i < NPOPMAP; i++)
671 KASSERT(rv->popmap[i] == 0,
672 ("vm_reserv_alloc_page: reserv %p's popmap is corrupted",
674 index = VM_RESERV_INDEX(object, pindex);
675 vm_reserv_populate(rv, index);
676 return (&rv->pages[index]);
679 * Found a matching reservation.
682 index = VM_RESERV_INDEX(object, pindex);
683 m = &rv->pages[index];
684 /* Handle vm_page_rename(m, new_object, ...). */
685 if (popmap_is_set(rv->popmap, index))
687 vm_reserv_populate(rv, index);
692 * Breaks the given reservation. Except for the specified cached or free
693 * page, all cached and free pages in the reservation are returned to the
694 * physical memory allocator. The reservation's population count and map are
695 * reset to their initial state.
697 * The given reservation must not be in the partially-populated reservation
698 * queue. The free page queue lock must be held.
701 vm_reserv_break(vm_reserv_t rv, vm_page_t m)
703 int begin_zeroes, hi, i, lo;
705 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
706 KASSERT(rv->object != NULL,
707 ("vm_reserv_break: reserv %p is free", rv));
708 KASSERT(!rv->inpartpopq,
709 ("vm_reserv_break: reserv %p's inpartpopq is TRUE", rv));
710 LIST_REMOVE(rv, objq);
714 * Since the reservation is being broken, there is no harm in
715 * abusing the population map to stop "m" from being returned
716 * to the physical memory allocator.
719 KASSERT(popmap_is_clear(rv->popmap, i),
720 ("vm_reserv_break: reserv %p's popmap is corrupted", rv));
721 popmap_set(rv->popmap, i);
726 /* Find the next 0 bit. Any previous 0 bits are < "hi". */
727 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
729 /* Redundantly clears bits < "hi". */
731 rv->popcnt -= NBPOPMAP - hi;
732 while (++i < NPOPMAP) {
733 lo = ffsl(~rv->popmap[i]);
736 rv->popcnt -= NBPOPMAP;
744 KASSERT(lo > 0, ("vm_reserv_break: lo is %d", lo));
745 /* Convert from ffsl() to ordinary bit numbering. */
748 /* Redundantly clears bits < "hi". */
749 rv->popmap[i] &= ~((1UL << lo) - 1);
750 rv->popcnt -= lo - hi;
752 begin_zeroes = NBPOPMAP * i + lo;
753 /* Find the next 1 bit. */
755 hi = ffsl(rv->popmap[i]);
756 while (hi == 0 && ++i < NPOPMAP);
758 /* Convert from ffsl() to ordinary bit numbering. */
760 vm_phys_free_contig(&rv->pages[begin_zeroes], NBPOPMAP * i +
762 } while (i < NPOPMAP);
763 KASSERT(rv->popcnt == 0,
764 ("vm_reserv_break: reserv %p's popcnt is corrupted", rv));
769 * Breaks all reservations belonging to the given object.
772 vm_reserv_break_all(vm_object_t object)
776 mtx_lock(&vm_page_queue_free_mtx);
777 while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
778 KASSERT(rv->object == object,
779 ("vm_reserv_break_all: reserv %p is corrupted", rv));
780 if (rv->inpartpopq) {
781 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
782 rv->inpartpopq = FALSE;
784 vm_reserv_break(rv, NULL);
786 mtx_unlock(&vm_page_queue_free_mtx);
790 * Frees the given page if it belongs to a reservation. Returns TRUE if the
791 * page is freed and FALSE otherwise.
793 * The free page queue lock must be held.
796 vm_reserv_free_page(vm_page_t m)
800 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
801 rv = vm_reserv_from_page(m);
802 if (rv->object == NULL)
804 if ((m->flags & PG_CACHED) != 0 && m->pool != VM_FREEPOOL_CACHE)
805 vm_phys_set_pool(VM_FREEPOOL_CACHE, rv->pages,
807 vm_reserv_depopulate(rv, m - rv->pages);
812 * Initializes the reservation management system. Specifically, initializes
813 * the reservation array.
815 * Requires that vm_page_array and first_page are initialized!
821 struct vm_phys_seg *seg;
825 * Initialize the reservation array. Specifically, initialize the
826 * "pages" field for every element that has an underlying superpage.
828 for (segind = 0; segind < vm_phys_nsegs; segind++) {
829 seg = &vm_phys_segs[segind];
830 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
831 while (paddr + VM_LEVEL_0_SIZE <= seg->end) {
832 vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages =
833 PHYS_TO_VM_PAGE(paddr);
834 paddr += VM_LEVEL_0_SIZE;
840 * Returns a reservation level if the given page belongs to a fully-populated
841 * reservation and -1 otherwise.
844 vm_reserv_level_iffullpop(vm_page_t m)
848 rv = vm_reserv_from_page(m);
849 return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
853 * Prepare for the reactivation of a cached page.
855 * First, suppose that the given page "m" was allocated individually, i.e., not
856 * as part of a reservation, and cached. Then, suppose a reservation
857 * containing "m" is allocated by the same object. Although "m" and the
858 * reservation belong to the same object, "m"'s pindex may not match the
861 * The free page queue must be locked.
864 vm_reserv_reactivate_page(vm_page_t m)
869 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
870 rv = vm_reserv_from_page(m);
871 if (rv->object == NULL)
873 KASSERT((m->flags & PG_CACHED) != 0,
874 ("vm_reserv_reactivate_page: page %p is not cached", m));
875 if (m->object == rv->object &&
876 m->pindex - rv->pindex == (index = VM_RESERV_INDEX(m->object,
878 vm_reserv_populate(rv, index);
880 KASSERT(rv->inpartpopq,
881 ("vm_reserv_reactivate_page: reserv %p's inpartpopq is FALSE",
883 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
884 rv->inpartpopq = FALSE;
885 /* Don't release "m" to the physical memory allocator. */
886 vm_reserv_break(rv, m);
892 * Breaks the given partially-populated reservation, releasing its cached and
893 * free pages to the physical memory allocator.
895 * The free page queue lock must be held.
898 vm_reserv_reclaim(vm_reserv_t rv)
901 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
902 KASSERT(rv->inpartpopq,
903 ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
904 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
905 rv->inpartpopq = FALSE;
906 vm_reserv_break(rv, NULL);
907 vm_reserv_reclaimed++;
911 * Breaks the reservation at the head of the partially-populated reservation
912 * queue, releasing its cached and free pages to the physical memory
913 * allocator. Returns TRUE if a reservation is broken and FALSE otherwise.
915 * The free page queue lock must be held.
918 vm_reserv_reclaim_inactive(void)
922 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
923 if ((rv = TAILQ_FIRST(&vm_rvq_partpop)) != NULL) {
924 vm_reserv_reclaim(rv);
931 * Searches the partially-populated reservation queue for the least recently
932 * active reservation with unused pages, i.e., cached or free, that satisfy the
933 * given request for contiguous physical memory. If a satisfactory reservation
934 * is found, it is broken. Returns TRUE if a reservation is broken and FALSE
937 * The free page queue lock must be held.
940 vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
941 u_long alignment, vm_paddr_t boundary)
945 int hi, i, lo, next_free;
947 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
948 if (npages > VM_LEVEL_0_NPAGES - 1)
950 size = npages << PAGE_SHIFT;
951 TAILQ_FOREACH(rv, &vm_rvq_partpop, partpopq) {
952 pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
953 if (pa + PAGE_SIZE - size < low) {
954 /* This entire reservation is too low; go to next. */
957 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
958 if (pa + size > high) {
959 /* This entire reservation is too high; go to next. */
963 /* Start the search for free pages at "low". */
964 i = (low - pa) / NBPOPMAP;
965 hi = (low - pa) % NBPOPMAP;
969 /* Find the next free page. */
970 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
971 while (lo == 0 && ++i < NPOPMAP)
972 lo = ffsl(~rv->popmap[i]);
975 /* Convert from ffsl() to ordinary bit numbering. */
977 next_free = NBPOPMAP * i + lo;
978 pa = VM_PAGE_TO_PHYS(&rv->pages[next_free]);
980 ("vm_reserv_reclaim_contig: pa is too low"));
981 if (pa + size > high) {
982 /* The rest of this reservation is too high. */
984 } else if ((pa & (alignment - 1)) != 0 ||
985 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) {
986 /* Continue with this reservation. */
990 /* Find the next used page. */
991 hi = ffsl(rv->popmap[i] & ~((1UL << lo) - 1));
992 while (hi == 0 && ++i < NPOPMAP) {
993 if ((NBPOPMAP * i - next_free) * PAGE_SIZE >=
995 vm_reserv_reclaim(rv);
998 hi = ffsl(rv->popmap[i]);
1000 /* Convert from ffsl() to ordinary bit numbering. */
1003 if ((NBPOPMAP * i + hi - next_free) * PAGE_SIZE >=
1005 vm_reserv_reclaim(rv);
1008 } while (i < NPOPMAP);
1014 * Transfers the reservation underlying the given page to a new object.
1016 * The object must be locked.
1019 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1020 vm_pindex_t old_object_offset)
1024 VM_OBJECT_ASSERT_WLOCKED(new_object);
1025 rv = vm_reserv_from_page(m);
1026 if (rv->object == old_object) {
1027 mtx_lock(&vm_page_queue_free_mtx);
1028 if (rv->object == old_object) {
1029 LIST_REMOVE(rv, objq);
1030 LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1031 rv->object = new_object;
1032 rv->pindex -= old_object_offset;
1034 mtx_unlock(&vm_page_queue_free_mtx);
1039 * Allocates the virtual and physical memory required by the reservation
1040 * management system's data structures, in particular, the reservation array.
1043 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
1049 * Calculate the size (in bytes) of the reservation array. Round up
1050 * from "high_water" because every small page is mapped to an element
1051 * in the reservation array based on its physical address. Thus, the
1052 * number of elements in the reservation array can be greater than the
1053 * number of superpages.
1055 size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
1058 * Allocate and map the physical memory for the reservation array. The
1059 * next available virtual address is returned by reference.
1061 new_end = end - round_page(size);
1062 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1063 VM_PROT_READ | VM_PROT_WRITE);
1064 bzero(vm_reserv_array, size);
1067 * Return the next available physical address.
1072 #endif /* VM_NRESERVLEVEL > 0 */