2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002-2006 Rice University
5 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
8 * This software was developed for the FreeBSD Project by Alan L. Cox,
9 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * Superpage reservation management module
37 * Any external functions defined by this module are only to be used by the
38 * virtual memory system.
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
46 #include <sys/param.h>
47 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #include <sys/queue.h>
52 #include <sys/rwlock.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56 #include <sys/counter.h>
58 #include <sys/vmmeter.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_phys.h>
67 #include <vm/vm_pagequeue.h>
68 #include <vm/vm_radix.h>
69 #include <vm/vm_reserv.h>
72 * The reservation system supports the speculative allocation of large physical
73 * pages ("superpages"). Speculative allocation enables the fully automatic
74 * utilization of superpages by the virtual memory system. In other words, no
75 * programmatic directives are required to use superpages.
78 #if VM_NRESERVLEVEL > 0
80 #ifndef VM_LEVEL_0_ORDER_MAX
81 #define VM_LEVEL_0_ORDER_MAX VM_LEVEL_0_ORDER
85 * The number of small pages that are contained in a level 0 reservation
87 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER)
88 #define VM_LEVEL_0_NPAGES_MAX (1 << VM_LEVEL_0_ORDER_MAX)
91 * The number of bits by which a physical address is shifted to obtain the
94 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
97 * The size of a level 0 reservation in bytes
99 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT)
102 * Computes the index of the small page underlying the given (object, pindex)
103 * within the reservation's array of small pages.
105 #define VM_RESERV_INDEX(object, pindex) \
106 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
109 * The size of a population map entry
111 typedef u_long popmap_t;
114 * The number of bits in a population map entry
116 #define NBPOPMAP (NBBY * sizeof(popmap_t))
119 * The number of population map entries in a reservation
121 #define NPOPMAP howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
122 #define NPOPMAP_MAX howmany(VM_LEVEL_0_NPAGES_MAX, NBPOPMAP)
125 * Number of elapsed ticks before we update the LRU queue position. Used
126 * to reduce contention and churn on the list.
128 #define PARTPOPSLOP 1
131 * Clear a bit in the population map.
134 popmap_clear(popmap_t popmap[], int i)
137 popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP));
141 * Set a bit in the population map.
144 popmap_set(popmap_t popmap[], int i)
147 popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP);
151 * Is a bit in the population map clear?
153 static __inline boolean_t
154 popmap_is_clear(popmap_t popmap[], int i)
157 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0);
161 * Is a bit in the population map set?
163 static __inline boolean_t
164 popmap_is_set(popmap_t popmap[], int i)
167 return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0);
171 * The reservation structure
173 * A reservation structure is constructed whenever a large physical page is
174 * speculatively allocated to an object. The reservation provides the small
175 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
176 * within that object. The reservation's "popcnt" tracks the number of these
177 * small physical pages that are in use at any given time. When and if the
178 * reservation is not fully utilized, it appears in the queue of partially
179 * populated reservations. The reservation always appears on the containing
180 * object's list of reservations.
182 * A partially populated reservation can be broken and reclaimed at any time.
185 * d - vm_reserv_domain_lock
186 * o - vm_reserv_object_lock
187 * c - constant after boot
190 struct mtx lock; /* reservation lock. */
191 TAILQ_ENTRY(vm_reserv) partpopq; /* (d) per-domain queue. */
192 LIST_ENTRY(vm_reserv) objq; /* (o, r) object queue */
193 vm_object_t object; /* (o, r) containing object */
194 vm_pindex_t pindex; /* (o, r) offset in object */
195 vm_page_t pages; /* (c) first page */
196 uint16_t domain; /* (c) NUMA domain. */
197 uint16_t popcnt; /* (r) # of pages in use */
198 int lasttick; /* (r) last pop update tick. */
199 char inpartpopq; /* (d) */
200 popmap_t popmap[NPOPMAP_MAX]; /* (r) bit vector, used pages */
203 #define vm_reserv_lockptr(rv) (&(rv)->lock)
204 #define vm_reserv_assert_locked(rv) \
205 mtx_assert(vm_reserv_lockptr(rv), MA_OWNED)
206 #define vm_reserv_lock(rv) mtx_lock(vm_reserv_lockptr(rv))
207 #define vm_reserv_trylock(rv) mtx_trylock(vm_reserv_lockptr(rv))
208 #define vm_reserv_unlock(rv) mtx_unlock(vm_reserv_lockptr(rv))
210 static struct mtx_padalign vm_reserv_domain_locks[MAXMEMDOM];
212 #define vm_reserv_domain_lockptr(d) &vm_reserv_domain_locks[(d)]
213 #define vm_reserv_domain_lock(d) mtx_lock(vm_reserv_domain_lockptr(d))
214 #define vm_reserv_domain_unlock(d) mtx_unlock(vm_reserv_domain_lockptr(d))
217 * The reservation array
219 * This array is analoguous in function to vm_page_array. It differs in the
220 * respect that it may contain a greater number of useful reservation
221 * structures than there are (physical) superpages. These "invalid"
222 * reservation structures exist to trade-off space for time in the
223 * implementation of vm_reserv_from_page(). Invalid reservation structures are
224 * distinguishable from "valid" reservation structures by inspecting the
225 * reservation's "pages" field. Invalid reservation structures have a NULL
228 * vm_reserv_from_page() maps a small (physical) page to an element of this
229 * array by computing a physical reservation number from the page's physical
230 * address. The physical reservation number is used as the array index.
232 * An "active" reservation is a valid reservation structure that has a non-NULL
233 * "object" field and a non-zero "popcnt" field. In other words, every active
234 * reservation belongs to a particular object. Moreover, every active
235 * reservation has an entry in the containing object's list of reservations.
237 static vm_reserv_t vm_reserv_array;
240 * The partially populated reservation queue
242 * This queue enables the fast recovery of an unused free small page from a
243 * partially populated reservation. The reservation at the head of this queue
244 * is the least recently changed, partially populated reservation.
246 * Access to this queue is synchronized by the free page queue lock.
248 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop[MAXMEMDOM];
250 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
252 static counter_u64_t vm_reserv_broken = EARLY_COUNTER;
253 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
254 &vm_reserv_broken, "Cumulative number of broken reservations");
256 static counter_u64_t vm_reserv_freed = EARLY_COUNTER;
257 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
258 &vm_reserv_freed, "Cumulative number of freed reservations");
260 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
262 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
263 sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
265 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
267 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
268 sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues");
270 static counter_u64_t vm_reserv_reclaimed = EARLY_COUNTER;
271 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
272 &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations");
275 * The object lock pool is used to synchronize the rvq. We can not use a
276 * pool mutex because it is required before malloc works.
278 * The "hash" function could be made faster without divide and modulo.
280 #define VM_RESERV_OBJ_LOCK_COUNT MAXCPU
282 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT];
284 #define vm_reserv_object_lock_idx(object) \
285 (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT)
286 #define vm_reserv_object_lock_ptr(object) \
287 &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))]
288 #define vm_reserv_object_lock(object) \
289 mtx_lock(vm_reserv_object_lock_ptr((object)))
290 #define vm_reserv_object_unlock(object) \
291 mtx_unlock(vm_reserv_object_lock_ptr((object)))
293 static void vm_reserv_break(vm_reserv_t rv);
294 static void vm_reserv_depopulate(vm_reserv_t rv, int index);
295 static vm_reserv_t vm_reserv_from_page(vm_page_t m);
296 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv,
298 static void vm_reserv_populate(vm_reserv_t rv, int index);
299 static void vm_reserv_reclaim(vm_reserv_t rv);
302 * Returns the current number of full reservations.
304 * Since the number of full reservations is computed without acquiring the
305 * free page queue lock, the returned value may be inexact.
308 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
311 struct vm_phys_seg *seg;
316 for (segind = 0; segind < vm_phys_nsegs; segind++) {
317 seg = &vm_phys_segs[segind];
318 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
319 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
320 VM_LEVEL_0_SIZE <= seg->end) {
321 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
322 fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
323 paddr += VM_LEVEL_0_SIZE;
326 return (sysctl_handle_int(oidp, &fullpop, 0, req));
330 * Describes the current state of the partially populated reservation queue.
333 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
337 int counter, error, domain, level, unused_pages;
339 error = sysctl_wire_old_buffer(req, 0);
342 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
343 sbuf_printf(&sbuf, "\nDOMAIN LEVEL SIZE NUMBER\n\n");
344 for (domain = 0; domain < vm_ndomains; domain++) {
345 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
348 vm_reserv_domain_lock(domain);
349 TAILQ_FOREACH(rv, &vm_rvq_partpop[domain], partpopq) {
351 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
353 vm_reserv_domain_unlock(domain);
354 sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
356 unused_pages * ((int)PAGE_SIZE / 1024), counter);
359 error = sbuf_finish(&sbuf);
365 * Remove a reservation from the object's objq.
368 vm_reserv_remove(vm_reserv_t rv)
372 vm_reserv_assert_locked(rv);
373 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
374 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
375 KASSERT(rv->object != NULL,
376 ("vm_reserv_remove: reserv %p is free", rv));
377 KASSERT(!rv->inpartpopq,
378 ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv));
380 vm_reserv_object_lock(object);
381 LIST_REMOVE(rv, objq);
383 vm_reserv_object_unlock(object);
387 * Insert a new reservation into the object's objq.
390 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex)
394 vm_reserv_assert_locked(rv);
396 "%s: rv %p(%p) object %p new %p popcnt %d",
397 __FUNCTION__, rv, rv->pages, rv->object, object,
399 KASSERT(rv->object == NULL,
400 ("vm_reserv_insert: reserv %p isn't free", rv));
401 KASSERT(rv->popcnt == 0,
402 ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv));
403 KASSERT(!rv->inpartpopq,
404 ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv));
405 for (i = 0; i < NPOPMAP; i++)
406 KASSERT(rv->popmap[i] == 0,
407 ("vm_reserv_insert: reserv %p's popmap is corrupted", rv));
408 vm_reserv_object_lock(object);
411 rv->lasttick = ticks;
412 LIST_INSERT_HEAD(&object->rvq, rv, objq);
413 vm_reserv_object_unlock(object);
417 * Reduces the given reservation's population count. If the population count
418 * becomes zero, the reservation is destroyed. Additionally, moves the
419 * reservation to the tail of the partially populated reservation queue if the
420 * population count is non-zero.
423 vm_reserv_depopulate(vm_reserv_t rv, int index)
425 struct vm_domain *vmd;
427 vm_reserv_assert_locked(rv);
428 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
429 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
430 KASSERT(rv->object != NULL,
431 ("vm_reserv_depopulate: reserv %p is free", rv));
432 KASSERT(popmap_is_set(rv->popmap, index),
433 ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
435 KASSERT(rv->popcnt > 0,
436 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
437 KASSERT(rv->domain < vm_ndomains,
438 ("vm_reserv_depopulate: reserv %p's domain is corrupted %d",
440 if (rv->popcnt == VM_LEVEL_0_NPAGES) {
441 KASSERT(rv->pages->psind == 1,
442 ("vm_reserv_depopulate: reserv %p is already demoted",
444 rv->pages->psind = 0;
446 popmap_clear(rv->popmap, index);
448 if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP ||
450 vm_reserv_domain_lock(rv->domain);
451 if (rv->inpartpopq) {
452 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
453 rv->inpartpopq = FALSE;
455 if (rv->popcnt != 0) {
456 rv->inpartpopq = TRUE;
457 TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq);
459 vm_reserv_domain_unlock(rv->domain);
460 rv->lasttick = ticks;
462 vmd = VM_DOMAIN(rv->domain);
463 if (rv->popcnt == 0) {
464 vm_reserv_remove(rv);
465 vm_domain_free_lock(vmd);
466 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
467 vm_domain_free_unlock(vmd);
468 counter_u64_add(vm_reserv_freed, 1);
470 vm_domain_freecnt_inc(vmd, 1);
474 * Returns the reservation to which the given page might belong.
476 static __inline vm_reserv_t
477 vm_reserv_from_page(vm_page_t m)
480 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
484 * Returns an existing reservation or NULL and initialized successor pointer.
487 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex,
488 vm_page_t mpred, vm_page_t *msuccp)
495 KASSERT(mpred->object == object,
496 ("vm_reserv_from_object: object doesn't contain mpred"));
497 KASSERT(mpred->pindex < pindex,
498 ("vm_reserv_from_object: mpred doesn't precede pindex"));
499 rv = vm_reserv_from_page(mpred);
500 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
502 msucc = TAILQ_NEXT(mpred, listq);
504 msucc = TAILQ_FIRST(&object->memq);
506 KASSERT(msucc->pindex > pindex,
507 ("vm_reserv_from_object: msucc doesn't succeed pindex"));
508 rv = vm_reserv_from_page(msucc);
509 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
521 * Returns TRUE if the given reservation contains the given page index and
524 static __inline boolean_t
525 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
528 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
532 * Increases the given reservation's population count. Moves the reservation
533 * to the tail of the partially populated reservation queue.
535 * The free page queue must be locked.
538 vm_reserv_populate(vm_reserv_t rv, int index)
541 vm_reserv_assert_locked(rv);
542 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
543 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
544 KASSERT(rv->object != NULL,
545 ("vm_reserv_populate: reserv %p is free", rv));
546 KASSERT(popmap_is_clear(rv->popmap, index),
547 ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
549 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
550 ("vm_reserv_populate: reserv %p is already full", rv));
551 KASSERT(rv->pages->psind == 0,
552 ("vm_reserv_populate: reserv %p is already promoted", rv));
553 KASSERT(rv->domain < vm_ndomains,
554 ("vm_reserv_populate: reserv %p's domain is corrupted %d",
556 popmap_set(rv->popmap, index);
558 if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP &&
559 rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES)
561 rv->lasttick = ticks;
562 vm_reserv_domain_lock(rv->domain);
563 if (rv->inpartpopq) {
564 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
565 rv->inpartpopq = FALSE;
567 if (rv->popcnt < VM_LEVEL_0_NPAGES) {
568 rv->inpartpopq = TRUE;
569 TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq);
571 KASSERT(rv->pages->psind == 0,
572 ("vm_reserv_populate: reserv %p is already promoted",
574 rv->pages->psind = 1;
576 vm_reserv_domain_unlock(rv->domain);
580 * Attempts to allocate a contiguous set of physical pages from existing
581 * reservations. See vm_reserv_alloc_contig() for a description of the
582 * function's parameters.
584 * The page "mpred" must immediately precede the offset "pindex" within the
587 * The object must be locked.
590 vm_reserv_extend_contig(int req, vm_object_t object, vm_pindex_t pindex,
591 int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
592 u_long alignment, vm_paddr_t boundary, vm_page_t mpred)
594 struct vm_domain *vmd;
600 VM_OBJECT_ASSERT_WLOCKED(object);
601 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
604 * Is a reservation fundamentally impossible?
606 if (pindex < VM_RESERV_INDEX(object, pindex) ||
607 pindex + npages > object->size || object->resident_page_count == 0)
611 * All reservations of a particular size have the same alignment.
612 * Assuming that the first page is allocated from a reservation, the
613 * least significant bits of its physical address can be determined
614 * from its offset from the beginning of the reservation and the size
615 * of the reservation.
617 * Could the specified index within a reservation of the smallest
618 * possible size satisfy the alignment and boundary requirements?
620 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
621 if ((pa & (alignment - 1)) != 0)
623 size = npages << PAGE_SHIFT;
624 if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
628 * Look for an existing reservation.
630 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
633 KASSERT(object != kernel_object || rv->domain == domain,
634 ("vm_reserv_extend_contig: Domain mismatch from reservation."));
635 index = VM_RESERV_INDEX(object, pindex);
636 /* Does the allocation fit within the reservation? */
637 if (index + npages > VM_LEVEL_0_NPAGES)
640 vmd = VM_DOMAIN(domain);
642 if (rv->object != object)
644 m = &rv->pages[index];
645 pa = VM_PAGE_TO_PHYS(m);
646 if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
647 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
649 /* Handle vm_page_rename(m, new_object, ...). */
650 for (i = 0; i < npages; i++) {
651 if (popmap_is_set(rv->popmap, index + i))
654 if (!vm_domain_allocate(vmd, req, npages))
656 for (i = 0; i < npages; i++)
657 vm_reserv_populate(rv, index + i);
658 vm_reserv_unlock(rv);
662 vm_reserv_unlock(rv);
667 * Allocates a contiguous set of physical pages of the given size "npages"
668 * from newly created reservations. All of the physical pages
669 * must be at or above the given physical address "low" and below the given
670 * physical address "high". The given value "alignment" determines the
671 * alignment of the first physical page in the set. If the given value
672 * "boundary" is non-zero, then the set of physical pages cannot cross any
673 * physical address boundary that is a multiple of that value. Both
674 * "alignment" and "boundary" must be a power of two.
676 * Callers should first invoke vm_reserv_extend_contig() to attempt an
677 * allocation from existing reservations.
679 * The page "mpred" must immediately precede the offset "pindex" within the
682 * The object and free page queue must be locked.
685 vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex, int domain,
686 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
687 vm_paddr_t boundary, vm_page_t mpred)
689 struct vm_domain *vmd;
691 vm_page_t m, m_ret, msucc;
692 vm_pindex_t first, leftcap, rightcap;
694 u_long allocpages, maxpages, minpages;
697 VM_OBJECT_ASSERT_WLOCKED(object);
698 KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
701 * Is a reservation fundamentally impossible?
703 if (pindex < VM_RESERV_INDEX(object, pindex) ||
704 pindex + npages > object->size)
708 * All reservations of a particular size have the same alignment.
709 * Assuming that the first page is allocated from a reservation, the
710 * least significant bits of its physical address can be determined
711 * from its offset from the beginning of the reservation and the size
712 * of the reservation.
714 * Could the specified index within a reservation of the smallest
715 * possible size satisfy the alignment and boundary requirements?
717 pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
718 if ((pa & (alignment - 1)) != 0)
720 size = npages << PAGE_SHIFT;
721 if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
725 * Callers should've extended an existing reservation prior to
726 * calling this function. If a reservation exists it is
727 * incompatible with the allocation.
729 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
734 * Could at least one reservation fit between the first index to the
735 * left that can be used ("leftcap") and the first index to the right
736 * that cannot be used ("rightcap")?
738 * We must synchronize with the reserv object lock to protect the
739 * pindex/object of the resulting reservations against rename while
742 first = pindex - VM_RESERV_INDEX(object, pindex);
743 minpages = VM_RESERV_INDEX(object, pindex) + npages;
744 maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
745 allocpages = maxpages;
746 vm_reserv_object_lock(object);
748 if ((rv = vm_reserv_from_page(mpred))->object != object)
749 leftcap = mpred->pindex + 1;
751 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
752 if (leftcap > first) {
753 vm_reserv_object_unlock(object);
758 if ((rv = vm_reserv_from_page(msucc))->object != object)
759 rightcap = msucc->pindex;
761 rightcap = rv->pindex;
762 if (first + maxpages > rightcap) {
763 if (maxpages == VM_LEVEL_0_NPAGES) {
764 vm_reserv_object_unlock(object);
769 * At least one reservation will fit between "leftcap"
770 * and "rightcap". However, a reservation for the
771 * last of the requested pages will not fit. Reduce
772 * the size of the upcoming allocation accordingly.
774 allocpages = minpages;
777 vm_reserv_object_unlock(object);
780 * Would the last new reservation extend past the end of the object?
782 if (first + maxpages > object->size) {
784 * Don't allocate the last new reservation if the object is a
785 * vnode or backed by another object that is a vnode.
787 if (object->type == OBJT_VNODE ||
788 (object->backing_object != NULL &&
789 object->backing_object->type == OBJT_VNODE)) {
790 if (maxpages == VM_LEVEL_0_NPAGES)
792 allocpages = minpages;
794 /* Speculate that the object may grow. */
798 * Allocate the physical pages. The alignment and boundary specified
799 * for this allocation may be different from the alignment and
800 * boundary specified for the requested pages. For instance, the
801 * specified index may not be the first page within the first new
805 vmd = VM_DOMAIN(domain);
806 if (vm_domain_allocate(vmd, req, npages)) {
807 vm_domain_free_lock(vmd);
808 m = vm_phys_alloc_contig(domain, allocpages, low, high,
809 ulmax(alignment, VM_LEVEL_0_SIZE),
810 boundary > VM_LEVEL_0_SIZE ? boundary : 0);
811 vm_domain_free_unlock(vmd);
813 vm_domain_freecnt_inc(vmd, npages);
818 KASSERT(vm_phys_domain(m) == domain,
819 ("vm_reserv_alloc_contig: Page domain does not match requested."));
822 * The allocated physical pages always begin at a reservation
823 * boundary, but they do not always end at a reservation boundary.
824 * Initialize every reservation that is completely covered by the
825 * allocated physical pages.
828 index = VM_RESERV_INDEX(object, pindex);
830 rv = vm_reserv_from_page(m);
831 KASSERT(rv->pages == m,
832 ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
835 vm_reserv_insert(rv, object, first);
836 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
837 for (i = 0; i < n; i++)
838 vm_reserv_populate(rv, index + i);
841 m_ret = &rv->pages[index];
844 vm_reserv_unlock(rv);
845 m += VM_LEVEL_0_NPAGES;
846 first += VM_LEVEL_0_NPAGES;
847 allocpages -= VM_LEVEL_0_NPAGES;
848 } while (allocpages >= VM_LEVEL_0_NPAGES);
853 * Attempts to extend an existing reservation and allocate the page to the
856 * The page "mpred" must immediately precede the offset "pindex" within the
859 * The object must be locked.
862 vm_reserv_extend(int req, vm_object_t object, vm_pindex_t pindex, int domain,
865 struct vm_domain *vmd;
870 VM_OBJECT_ASSERT_WLOCKED(object);
873 * Could a reservation currently exist?
875 if (pindex < VM_RESERV_INDEX(object, pindex) ||
876 pindex >= object->size || object->resident_page_count == 0)
880 * Look for an existing reservation.
882 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
886 KASSERT(object != kernel_object || rv->domain == domain,
887 ("vm_reserv_extend: Domain mismatch from reservation."));
889 vmd = VM_DOMAIN(domain);
890 index = VM_RESERV_INDEX(object, pindex);
891 m = &rv->pages[index];
893 /* Handle reclaim race. */
894 if (rv->object != object ||
895 /* Handle vm_page_rename(m, new_object, ...). */
896 popmap_is_set(rv->popmap, index)) {
900 if (vm_domain_allocate(vmd, req, 1) == 0)
903 vm_reserv_populate(rv, index);
905 vm_reserv_unlock(rv);
911 * Attempts to allocate a new reservation for the object, and allocates a
912 * page from that reservation. Callers should first invoke vm_reserv_extend()
913 * to attempt an allocation from an existing reservation.
915 * The page "mpred" must immediately precede the offset "pindex" within the
918 * The object and free page queue must be locked.
921 vm_reserv_alloc_page(int req, vm_object_t object, vm_pindex_t pindex, int domain,
924 struct vm_domain *vmd;
926 vm_pindex_t first, leftcap, rightcap;
930 VM_OBJECT_ASSERT_WLOCKED(object);
933 * Is a reservation fundamentally impossible?
935 if (pindex < VM_RESERV_INDEX(object, pindex) ||
936 pindex >= object->size)
940 * Callers should've extended an existing reservation prior to
941 * calling this function. If a reservation exists it is
942 * incompatible with the allocation.
944 rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
949 * Could a reservation fit between the first index to the left that
950 * can be used and the first index to the right that cannot be used?
952 * We must synchronize with the reserv object lock to protect the
953 * pindex/object of the resulting reservations against rename while
956 first = pindex - VM_RESERV_INDEX(object, pindex);
957 vm_reserv_object_lock(object);
959 if ((rv = vm_reserv_from_page(mpred))->object != object)
960 leftcap = mpred->pindex + 1;
962 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
963 if (leftcap > first) {
964 vm_reserv_object_unlock(object);
969 if ((rv = vm_reserv_from_page(msucc))->object != object)
970 rightcap = msucc->pindex;
972 rightcap = rv->pindex;
973 if (first + VM_LEVEL_0_NPAGES > rightcap) {
974 vm_reserv_object_unlock(object);
978 vm_reserv_object_unlock(object);
981 * Would a new reservation extend past the end of the object?
983 if (first + VM_LEVEL_0_NPAGES > object->size) {
985 * Don't allocate a new reservation if the object is a vnode or
986 * backed by another object that is a vnode.
988 if (object->type == OBJT_VNODE ||
989 (object->backing_object != NULL &&
990 object->backing_object->type == OBJT_VNODE))
992 /* Speculate that the object may grow. */
996 * Allocate and populate the new reservation.
999 vmd = VM_DOMAIN(domain);
1000 if (vm_domain_allocate(vmd, req, 1)) {
1001 vm_domain_free_lock(vmd);
1002 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
1004 vm_domain_free_unlock(vmd);
1006 vm_domain_freecnt_inc(vmd, 1);
1011 rv = vm_reserv_from_page(m);
1013 KASSERT(rv->pages == m,
1014 ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
1015 vm_reserv_insert(rv, object, first);
1016 index = VM_RESERV_INDEX(object, pindex);
1017 vm_reserv_populate(rv, index);
1018 vm_reserv_unlock(rv);
1020 return (&rv->pages[index]);
1024 * Breaks the given reservation. All free pages in the reservation
1025 * are returned to the physical memory allocator. The reservation's
1026 * population count and map are reset to their initial state.
1028 * The given reservation must not be in the partially populated reservation
1029 * queue. The free page queue lock must be held.
1032 vm_reserv_break(vm_reserv_t rv)
1034 int begin_zeroes, hi, i, lo;
1036 vm_reserv_assert_locked(rv);
1037 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1038 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1039 vm_reserv_remove(rv);
1040 rv->pages->psind = 0;
1043 /* Find the next 0 bit. Any previous 0 bits are < "hi". */
1044 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
1046 /* Redundantly clears bits < "hi". */
1048 rv->popcnt -= NBPOPMAP - hi;
1049 while (++i < NPOPMAP) {
1050 lo = ffsl(~rv->popmap[i]);
1053 rv->popcnt -= NBPOPMAP;
1061 KASSERT(lo > 0, ("vm_reserv_break: lo is %d", lo));
1062 /* Convert from ffsl() to ordinary bit numbering. */
1065 /* Redundantly clears bits < "hi". */
1066 rv->popmap[i] &= ~((1UL << lo) - 1);
1067 rv->popcnt -= lo - hi;
1069 begin_zeroes = NBPOPMAP * i + lo;
1070 /* Find the next 1 bit. */
1072 hi = ffsl(rv->popmap[i]);
1073 while (hi == 0 && ++i < NPOPMAP);
1075 /* Convert from ffsl() to ordinary bit numbering. */
1077 vm_domain_free_lock(VM_DOMAIN(rv->domain));
1078 vm_phys_free_contig(&rv->pages[begin_zeroes], NBPOPMAP * i +
1080 vm_domain_free_unlock(VM_DOMAIN(rv->domain));
1081 } while (i < NPOPMAP);
1082 KASSERT(rv->popcnt == 0,
1083 ("vm_reserv_break: reserv %p's popcnt is corrupted", rv));
1084 counter_u64_add(vm_reserv_broken, 1);
1088 * Breaks all reservations belonging to the given object.
1091 vm_reserv_break_all(vm_object_t object)
1096 * This access of object->rvq is unsynchronized so that the
1097 * object rvq lock can nest after the domain_free lock. We
1098 * must check for races in the results. However, the object
1099 * lock prevents new additions, so we are guaranteed that when
1100 * it returns NULL the object is properly empty.
1102 while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
1105 if (rv->object != object) {
1106 vm_reserv_unlock(rv);
1109 vm_reserv_domain_lock(rv->domain);
1110 if (rv->inpartpopq) {
1111 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
1112 rv->inpartpopq = FALSE;
1114 vm_reserv_domain_unlock(rv->domain);
1115 vm_reserv_break(rv);
1116 vm_reserv_unlock(rv);
1121 * Frees the given page if it belongs to a reservation. Returns TRUE if the
1122 * page is freed and FALSE otherwise.
1124 * The free page queue lock must be held.
1127 vm_reserv_free_page(vm_page_t m)
1132 rv = vm_reserv_from_page(m);
1133 if (rv->object == NULL)
1136 /* Re-validate after lock. */
1137 if (rv->object != NULL) {
1138 vm_reserv_depopulate(rv, m - rv->pages);
1142 vm_reserv_unlock(rv);
1148 * Initializes the reservation management system. Specifically, initializes
1149 * the reservation array.
1151 * Requires that vm_page_array and first_page are initialized!
1154 vm_reserv_init(void)
1157 struct vm_phys_seg *seg;
1158 struct vm_reserv *rv;
1162 * Initialize the reservation array. Specifically, initialize the
1163 * "pages" field for every element that has an underlying superpage.
1165 for (segind = 0; segind < vm_phys_nsegs; segind++) {
1166 seg = &vm_phys_segs[segind];
1167 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
1168 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
1169 VM_LEVEL_0_SIZE <= seg->end) {
1170 rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
1171 rv->pages = PHYS_TO_VM_PAGE(paddr);
1172 rv->domain = seg->domain;
1173 mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF);
1174 paddr += VM_LEVEL_0_SIZE;
1177 for (i = 0; i < MAXMEMDOM; i++) {
1178 mtx_init(&vm_reserv_domain_locks[i], "VM reserv domain", NULL,
1180 TAILQ_INIT(&vm_rvq_partpop[i]);
1183 for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++)
1184 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL,
1189 * Returns true if the given page belongs to a reservation and that page is
1190 * free. Otherwise, returns false.
1193 vm_reserv_is_page_free(vm_page_t m)
1197 rv = vm_reserv_from_page(m);
1198 if (rv->object == NULL)
1200 return (popmap_is_clear(rv->popmap, m - rv->pages));
1204 * If the given page belongs to a reservation, returns the level of that
1205 * reservation. Otherwise, returns -1.
1208 vm_reserv_level(vm_page_t m)
1212 rv = vm_reserv_from_page(m);
1213 return (rv->object != NULL ? 0 : -1);
1217 * Returns a reservation level if the given page belongs to a fully populated
1218 * reservation and -1 otherwise.
1221 vm_reserv_level_iffullpop(vm_page_t m)
1225 rv = vm_reserv_from_page(m);
1226 return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
1230 * Breaks the given partially populated reservation, releasing its free pages
1231 * to the physical memory allocator.
1233 * The free page queue lock must be held.
1236 vm_reserv_reclaim(vm_reserv_t rv)
1239 vm_reserv_assert_locked(rv);
1240 CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1241 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1242 vm_reserv_domain_lock(rv->domain);
1243 KASSERT(rv->inpartpopq,
1244 ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
1245 KASSERT(rv->domain < vm_ndomains,
1246 ("vm_reserv_reclaim: reserv %p's domain is corrupted %d",
1248 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
1249 rv->inpartpopq = FALSE;
1250 vm_reserv_domain_unlock(rv->domain);
1251 vm_reserv_break(rv);
1252 counter_u64_add(vm_reserv_reclaimed, 1);
1256 * Breaks the reservation at the head of the partially populated reservation
1257 * queue, releasing its free pages to the physical memory allocator. Returns
1258 * TRUE if a reservation is broken and FALSE otherwise.
1260 * The free page queue lock must be held.
1263 vm_reserv_reclaim_inactive(int domain)
1267 while ((rv = TAILQ_FIRST(&vm_rvq_partpop[domain])) != NULL) {
1269 if (rv != TAILQ_FIRST(&vm_rvq_partpop[domain])) {
1270 vm_reserv_unlock(rv);
1273 vm_reserv_reclaim(rv);
1274 vm_reserv_unlock(rv);
1281 * Searches the partially populated reservation queue for the least recently
1282 * changed reservation with free pages that satisfy the given request for
1283 * contiguous physical memory. If a satisfactory reservation is found, it is
1284 * broken. Returns TRUE if a reservation is broken and FALSE otherwise.
1286 * The free page queue lock must be held.
1289 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
1290 vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1292 vm_paddr_t pa, size;
1293 vm_reserv_t rv, rvn;
1294 int hi, i, lo, low_index, next_free;
1296 if (npages > VM_LEVEL_0_NPAGES - 1)
1298 size = npages << PAGE_SHIFT;
1299 vm_reserv_domain_lock(domain);
1301 for (rv = TAILQ_FIRST(&vm_rvq_partpop[domain]); rv != NULL; rv = rvn) {
1302 rvn = TAILQ_NEXT(rv, partpopq);
1303 pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
1304 if (pa + PAGE_SIZE - size < low) {
1305 /* This entire reservation is too low; go to next. */
1308 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1309 if (pa + size > high) {
1310 /* This entire reservation is too high; go to next. */
1313 if (vm_reserv_trylock(rv) == 0) {
1314 vm_reserv_domain_unlock(domain);
1316 if (!rv->inpartpopq) {
1317 vm_reserv_domain_lock(domain);
1318 if (!rvn->inpartpopq)
1323 vm_reserv_domain_unlock(domain);
1325 /* Start the search for free pages at "low". */
1326 low_index = (low + PAGE_MASK - pa) >> PAGE_SHIFT;
1327 i = low_index / NBPOPMAP;
1328 hi = low_index % NBPOPMAP;
1332 /* Find the next free page. */
1333 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
1334 while (lo == 0 && ++i < NPOPMAP)
1335 lo = ffsl(~rv->popmap[i]);
1338 /* Convert from ffsl() to ordinary bit numbering. */
1340 next_free = NBPOPMAP * i + lo;
1341 pa = VM_PAGE_TO_PHYS(&rv->pages[next_free]);
1343 ("vm_reserv_reclaim_contig: pa is too low"));
1344 if (pa + size > high) {
1345 /* The rest of this reservation is too high. */
1347 } else if ((pa & (alignment - 1)) != 0 ||
1348 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) {
1350 * The current page doesn't meet the alignment
1351 * and/or boundary requirements. Continue
1352 * searching this reservation until the rest
1353 * of its free pages are either excluded or
1357 if (hi >= NBPOPMAP) {
1363 /* Find the next used page. */
1364 hi = ffsl(rv->popmap[i] & ~((1UL << lo) - 1));
1365 while (hi == 0 && ++i < NPOPMAP) {
1366 if ((NBPOPMAP * i - next_free) * PAGE_SIZE >=
1368 vm_reserv_reclaim(rv);
1369 vm_reserv_unlock(rv);
1372 hi = ffsl(rv->popmap[i]);
1374 /* Convert from ffsl() to ordinary bit numbering. */
1377 if ((NBPOPMAP * i + hi - next_free) * PAGE_SIZE >=
1379 vm_reserv_reclaim(rv);
1380 vm_reserv_unlock(rv);
1383 } while (i < NPOPMAP);
1384 vm_reserv_unlock(rv);
1385 vm_reserv_domain_lock(domain);
1386 if (rvn != NULL && !rvn->inpartpopq)
1389 vm_reserv_domain_unlock(domain);
1394 * Transfers the reservation underlying the given page to a new object.
1396 * The object must be locked.
1399 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1400 vm_pindex_t old_object_offset)
1404 VM_OBJECT_ASSERT_WLOCKED(new_object);
1405 rv = vm_reserv_from_page(m);
1406 if (rv->object == old_object) {
1409 "%s: rv %p object %p new %p popcnt %d inpartpop %d",
1410 __FUNCTION__, rv, rv->object, new_object, rv->popcnt,
1412 if (rv->object == old_object) {
1413 vm_reserv_object_lock(old_object);
1415 LIST_REMOVE(rv, objq);
1416 vm_reserv_object_unlock(old_object);
1417 vm_reserv_object_lock(new_object);
1418 rv->object = new_object;
1419 rv->pindex -= old_object_offset;
1420 LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1421 vm_reserv_object_unlock(new_object);
1423 vm_reserv_unlock(rv);
1428 * Returns the size (in bytes) of a reservation of the specified level.
1431 vm_reserv_size(int level)
1436 return (VM_LEVEL_0_SIZE);
1445 * Allocates the virtual and physical memory required by the reservation
1446 * management system's data structures, in particular, the reservation array.
1449 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
1455 * Calculate the size (in bytes) of the reservation array. Round up
1456 * from "high_water" because every small page is mapped to an element
1457 * in the reservation array based on its physical address. Thus, the
1458 * number of elements in the reservation array can be greater than the
1459 * number of superpages.
1461 size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
1464 * Allocate and map the physical memory for the reservation array. The
1465 * next available virtual address is returned by reference.
1467 new_end = end - round_page(size);
1468 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1469 VM_PROT_READ | VM_PROT_WRITE);
1470 bzero(vm_reserv_array, size);
1473 * Return the next available physical address.
1479 * Initializes the reservation management system. Specifically, initializes
1480 * the reservation counters.
1483 vm_reserv_counter_init(void *unused)
1486 vm_reserv_freed = counter_u64_alloc(M_WAITOK);
1487 vm_reserv_broken = counter_u64_alloc(M_WAITOK);
1488 vm_reserv_reclaimed = counter_u64_alloc(M_WAITOK);
1490 SYSINIT(vm_reserv_counter_init, SI_SUB_CPU, SI_ORDER_ANY,
1491 vm_reserv_counter_init, NULL);
1494 * Returns the superpage containing the given page.
1497 vm_reserv_to_superpage(vm_page_t m)
1501 VM_OBJECT_ASSERT_LOCKED(m->object);
1502 rv = vm_reserv_from_page(m);
1503 if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES)
1511 #endif /* VM_NRESERVLEVEL > 0 */