]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_reserv.c
Check paddr for overflow.
[FreeBSD/FreeBSD.git] / sys / vm / vm_reserv.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2006 Rice University
5  * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Alan L. Cox,
9  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 /*
35  *      Superpage reservation management module
36  *
37  * Any external functions defined by this module are only to be used by the
38  * virtual memory system.
39  */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43
44 #include "opt_vm.h"
45
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #include <sys/queue.h>
52 #include <sys/rwlock.h>
53 #include <sys/sbuf.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56 #include <sys/counter.h>
57 #include <sys/ktr.h>
58 #include <sys/vmmeter.h>
59 #include <sys/smp.h>
60
61 #include <vm/vm.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_phys.h>
67 #include <vm/vm_pagequeue.h>
68 #include <vm/vm_radix.h>
69 #include <vm/vm_reserv.h>
70
71 /*
72  * The reservation system supports the speculative allocation of large physical
73  * pages ("superpages").  Speculative allocation enables the fully automatic
74  * utilization of superpages by the virtual memory system.  In other words, no
75  * programmatic directives are required to use superpages.
76  */
77
78 #if VM_NRESERVLEVEL > 0
79
80 #ifndef VM_LEVEL_0_ORDER_MAX
81 #define VM_LEVEL_0_ORDER_MAX    VM_LEVEL_0_ORDER
82 #endif
83
84 /*
85  * The number of small pages that are contained in a level 0 reservation
86  */
87 #define VM_LEVEL_0_NPAGES       (1 << VM_LEVEL_0_ORDER)
88 #define VM_LEVEL_0_NPAGES_MAX   (1 << VM_LEVEL_0_ORDER_MAX)
89
90 /*
91  * The number of bits by which a physical address is shifted to obtain the
92  * reservation number
93  */
94 #define VM_LEVEL_0_SHIFT        (VM_LEVEL_0_ORDER + PAGE_SHIFT)
95
96 /*
97  * The size of a level 0 reservation in bytes
98  */
99 #define VM_LEVEL_0_SIZE         (1 << VM_LEVEL_0_SHIFT)
100
101 /*
102  * Computes the index of the small page underlying the given (object, pindex)
103  * within the reservation's array of small pages.
104  */
105 #define VM_RESERV_INDEX(object, pindex) \
106     (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
107
108 /*
109  * The size of a population map entry
110  */
111 typedef u_long          popmap_t;
112
113 /*
114  * The number of bits in a population map entry
115  */
116 #define NBPOPMAP        (NBBY * sizeof(popmap_t))
117
118 /*
119  * The number of population map entries in a reservation
120  */
121 #define NPOPMAP         howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
122 #define NPOPMAP_MAX     howmany(VM_LEVEL_0_NPAGES_MAX, NBPOPMAP)
123
124 /*
125  * Number of elapsed ticks before we update the LRU queue position.  Used
126  * to reduce contention and churn on the list.
127  */
128 #define PARTPOPSLOP     1
129
130 /*
131  * Clear a bit in the population map.
132  */
133 static __inline void
134 popmap_clear(popmap_t popmap[], int i)
135 {
136
137         popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP));
138 }
139
140 /*
141  * Set a bit in the population map.
142  */
143 static __inline void
144 popmap_set(popmap_t popmap[], int i)
145 {
146
147         popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP);
148 }
149
150 /*
151  * Is a bit in the population map clear?
152  */
153 static __inline boolean_t
154 popmap_is_clear(popmap_t popmap[], int i)
155 {
156
157         return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0);
158 }
159
160 /*
161  * Is a bit in the population map set?
162  */
163 static __inline boolean_t
164 popmap_is_set(popmap_t popmap[], int i)
165 {
166
167         return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0);
168 }
169
170 /*
171  * The reservation structure
172  *
173  * A reservation structure is constructed whenever a large physical page is
174  * speculatively allocated to an object.  The reservation provides the small
175  * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
176  * within that object.  The reservation's "popcnt" tracks the number of these
177  * small physical pages that are in use at any given time.  When and if the
178  * reservation is not fully utilized, it appears in the queue of partially
179  * populated reservations.  The reservation always appears on the containing
180  * object's list of reservations.
181  *
182  * A partially populated reservation can be broken and reclaimed at any time.
183  *
184  * r - vm_reserv_lock
185  * d - vm_reserv_domain_lock
186  * o - vm_reserv_object_lock
187  * c - constant after boot
188  */
189 struct vm_reserv {
190         struct mtx      lock;                   /* reservation lock. */
191         TAILQ_ENTRY(vm_reserv) partpopq;        /* (d) per-domain queue. */
192         LIST_ENTRY(vm_reserv) objq;             /* (o, r) object queue */
193         vm_object_t     object;                 /* (o, r) containing object */
194         vm_pindex_t     pindex;                 /* (o, r) offset in object */
195         vm_page_t       pages;                  /* (c) first page  */
196         uint16_t        domain;                 /* (c) NUMA domain. */
197         uint16_t        popcnt;                 /* (r) # of pages in use */
198         int             lasttick;               /* (r) last pop update tick. */
199         char            inpartpopq;             /* (d) */
200         popmap_t        popmap[NPOPMAP_MAX];    /* (r) bit vector, used pages */
201 };
202
203 #define vm_reserv_lockptr(rv)           (&(rv)->lock)
204 #define vm_reserv_assert_locked(rv)                                     \
205             mtx_assert(vm_reserv_lockptr(rv), MA_OWNED)
206 #define vm_reserv_lock(rv)              mtx_lock(vm_reserv_lockptr(rv))
207 #define vm_reserv_trylock(rv)           mtx_trylock(vm_reserv_lockptr(rv))
208 #define vm_reserv_unlock(rv)            mtx_unlock(vm_reserv_lockptr(rv))
209
210 static struct mtx_padalign vm_reserv_domain_locks[MAXMEMDOM];
211
212 #define vm_reserv_domain_lockptr(d)     &vm_reserv_domain_locks[(d)]
213 #define vm_reserv_domain_lock(d)        mtx_lock(vm_reserv_domain_lockptr(d))
214 #define vm_reserv_domain_unlock(d)      mtx_unlock(vm_reserv_domain_lockptr(d))
215
216 /*
217  * The reservation array
218  *
219  * This array is analoguous in function to vm_page_array.  It differs in the
220  * respect that it may contain a greater number of useful reservation
221  * structures than there are (physical) superpages.  These "invalid"
222  * reservation structures exist to trade-off space for time in the
223  * implementation of vm_reserv_from_page().  Invalid reservation structures are
224  * distinguishable from "valid" reservation structures by inspecting the
225  * reservation's "pages" field.  Invalid reservation structures have a NULL
226  * "pages" field.
227  *
228  * vm_reserv_from_page() maps a small (physical) page to an element of this
229  * array by computing a physical reservation number from the page's physical
230  * address.  The physical reservation number is used as the array index.
231  *
232  * An "active" reservation is a valid reservation structure that has a non-NULL
233  * "object" field and a non-zero "popcnt" field.  In other words, every active
234  * reservation belongs to a particular object.  Moreover, every active
235  * reservation has an entry in the containing object's list of reservations.  
236  */
237 static vm_reserv_t vm_reserv_array;
238
239 /*
240  * The partially populated reservation queue
241  *
242  * This queue enables the fast recovery of an unused free small page from a
243  * partially populated reservation.  The reservation at the head of this queue
244  * is the least recently changed, partially populated reservation.
245  *
246  * Access to this queue is synchronized by the free page queue lock.
247  */
248 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop[MAXMEMDOM];
249
250 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
251
252 static counter_u64_t vm_reserv_broken = EARLY_COUNTER;
253 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
254     &vm_reserv_broken, "Cumulative number of broken reservations");
255
256 static counter_u64_t vm_reserv_freed = EARLY_COUNTER;
257 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
258     &vm_reserv_freed, "Cumulative number of freed reservations");
259
260 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
261
262 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
263     sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
264
265 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
266
267 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
268     sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues");
269
270 static counter_u64_t vm_reserv_reclaimed = EARLY_COUNTER;
271 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
272     &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations");
273
274 /*
275  * The object lock pool is used to synchronize the rvq.  We can not use a
276  * pool mutex because it is required before malloc works.
277  *
278  * The "hash" function could be made faster without divide and modulo.
279  */
280 #define VM_RESERV_OBJ_LOCK_COUNT        MAXCPU
281
282 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT];
283
284 #define vm_reserv_object_lock_idx(object)                       \
285             (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT)
286 #define vm_reserv_object_lock_ptr(object)                       \
287             &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))]
288 #define vm_reserv_object_lock(object)                           \
289             mtx_lock(vm_reserv_object_lock_ptr((object)))
290 #define vm_reserv_object_unlock(object)                         \
291             mtx_unlock(vm_reserv_object_lock_ptr((object)))
292
293 static void             vm_reserv_break(vm_reserv_t rv);
294 static void             vm_reserv_depopulate(vm_reserv_t rv, int index);
295 static vm_reserv_t      vm_reserv_from_page(vm_page_t m);
296 static boolean_t        vm_reserv_has_pindex(vm_reserv_t rv,
297                             vm_pindex_t pindex);
298 static void             vm_reserv_populate(vm_reserv_t rv, int index);
299 static void             vm_reserv_reclaim(vm_reserv_t rv);
300
301 /*
302  * Returns the current number of full reservations.
303  *
304  * Since the number of full reservations is computed without acquiring the
305  * free page queue lock, the returned value may be inexact.
306  */
307 static int
308 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
309 {
310         vm_paddr_t paddr;
311         struct vm_phys_seg *seg;
312         vm_reserv_t rv;
313         int fullpop, segind;
314
315         fullpop = 0;
316         for (segind = 0; segind < vm_phys_nsegs; segind++) {
317                 seg = &vm_phys_segs[segind];
318                 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
319                 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
320                     VM_LEVEL_0_SIZE <= seg->end) {
321                         rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
322                         fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
323                         paddr += VM_LEVEL_0_SIZE;
324                 }
325         }
326         return (sysctl_handle_int(oidp, &fullpop, 0, req));
327 }
328
329 /*
330  * Describes the current state of the partially populated reservation queue.
331  */
332 static int
333 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
334 {
335         struct sbuf sbuf;
336         vm_reserv_t rv;
337         int counter, error, domain, level, unused_pages;
338
339         error = sysctl_wire_old_buffer(req, 0);
340         if (error != 0)
341                 return (error);
342         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
343         sbuf_printf(&sbuf, "\nDOMAIN    LEVEL     SIZE  NUMBER\n\n");
344         for (domain = 0; domain < vm_ndomains; domain++) {
345                 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
346                         counter = 0;
347                         unused_pages = 0;
348                         vm_reserv_domain_lock(domain);
349                         TAILQ_FOREACH(rv, &vm_rvq_partpop[domain], partpopq) {
350                                 counter++;
351                                 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
352                         }
353                         vm_reserv_domain_unlock(domain);
354                         sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
355                             domain, level,
356                             unused_pages * ((int)PAGE_SIZE / 1024), counter);
357                 }
358         }
359         error = sbuf_finish(&sbuf);
360         sbuf_delete(&sbuf);
361         return (error);
362 }
363
364 /*
365  * Remove a reservation from the object's objq.
366  */
367 static void
368 vm_reserv_remove(vm_reserv_t rv)
369 {
370         vm_object_t object;
371
372         vm_reserv_assert_locked(rv);
373         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
374             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
375         KASSERT(rv->object != NULL,
376             ("vm_reserv_remove: reserv %p is free", rv));
377         KASSERT(!rv->inpartpopq,
378             ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv));
379         object = rv->object;
380         vm_reserv_object_lock(object);
381         LIST_REMOVE(rv, objq);
382         rv->object = NULL;
383         vm_reserv_object_unlock(object);
384 }
385
386 /*
387  * Insert a new reservation into the object's objq.
388  */
389 static void
390 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex)
391 {
392         int i;
393
394         vm_reserv_assert_locked(rv);
395         CTR6(KTR_VM,
396             "%s: rv %p(%p) object %p new %p popcnt %d",
397             __FUNCTION__, rv, rv->pages, rv->object, object,
398            rv->popcnt);
399         KASSERT(rv->object == NULL,
400             ("vm_reserv_insert: reserv %p isn't free", rv));
401         KASSERT(rv->popcnt == 0,
402             ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv));
403         KASSERT(!rv->inpartpopq,
404             ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv));
405         for (i = 0; i < NPOPMAP; i++)
406                 KASSERT(rv->popmap[i] == 0,
407                     ("vm_reserv_insert: reserv %p's popmap is corrupted", rv));
408         vm_reserv_object_lock(object);
409         rv->pindex = pindex;
410         rv->object = object;
411         rv->lasttick = ticks;
412         LIST_INSERT_HEAD(&object->rvq, rv, objq);
413         vm_reserv_object_unlock(object);
414 }
415
416 /*
417  * Reduces the given reservation's population count.  If the population count
418  * becomes zero, the reservation is destroyed.  Additionally, moves the
419  * reservation to the tail of the partially populated reservation queue if the
420  * population count is non-zero.
421  */
422 static void
423 vm_reserv_depopulate(vm_reserv_t rv, int index)
424 {
425         struct vm_domain *vmd;
426
427         vm_reserv_assert_locked(rv);
428         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
429             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
430         KASSERT(rv->object != NULL,
431             ("vm_reserv_depopulate: reserv %p is free", rv));
432         KASSERT(popmap_is_set(rv->popmap, index),
433             ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
434             index));
435         KASSERT(rv->popcnt > 0,
436             ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
437         KASSERT(rv->domain < vm_ndomains,
438             ("vm_reserv_depopulate: reserv %p's domain is corrupted %d",
439             rv, rv->domain));
440         if (rv->popcnt == VM_LEVEL_0_NPAGES) {
441                 KASSERT(rv->pages->psind == 1,
442                     ("vm_reserv_depopulate: reserv %p is already demoted",
443                     rv));
444                 rv->pages->psind = 0;
445         }
446         popmap_clear(rv->popmap, index);
447         rv->popcnt--;
448         if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP ||
449             rv->popcnt == 0) {
450                 vm_reserv_domain_lock(rv->domain);
451                 if (rv->inpartpopq) {
452                         TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
453                         rv->inpartpopq = FALSE;
454                 }
455                 if (rv->popcnt != 0) {
456                         rv->inpartpopq = TRUE;
457                         TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq);
458                 }
459                 vm_reserv_domain_unlock(rv->domain);
460                 rv->lasttick = ticks;
461         }
462         vmd = VM_DOMAIN(rv->domain);
463         if (rv->popcnt == 0) {
464                 vm_reserv_remove(rv);
465                 vm_domain_free_lock(vmd);
466                 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
467                 vm_domain_free_unlock(vmd);
468                 counter_u64_add(vm_reserv_freed, 1);
469         }
470         vm_domain_freecnt_inc(vmd, 1);
471 }
472
473 /*
474  * Returns the reservation to which the given page might belong.
475  */
476 static __inline vm_reserv_t
477 vm_reserv_from_page(vm_page_t m)
478 {
479
480         return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
481 }
482
483 /*
484  * Returns an existing reservation or NULL and initialized successor pointer.
485  */
486 static vm_reserv_t
487 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex,
488     vm_page_t mpred, vm_page_t *msuccp)
489 {
490         vm_reserv_t rv;
491         vm_page_t msucc;
492
493         msucc = NULL;
494         if (mpred != NULL) {
495                 KASSERT(mpred->object == object,
496                     ("vm_reserv_from_object: object doesn't contain mpred"));
497                 KASSERT(mpred->pindex < pindex,
498                     ("vm_reserv_from_object: mpred doesn't precede pindex"));
499                 rv = vm_reserv_from_page(mpred);
500                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
501                         goto found;
502                 msucc = TAILQ_NEXT(mpred, listq);
503         } else
504                 msucc = TAILQ_FIRST(&object->memq);
505         if (msucc != NULL) {
506                 KASSERT(msucc->pindex > pindex,
507                     ("vm_reserv_from_object: msucc doesn't succeed pindex"));
508                 rv = vm_reserv_from_page(msucc);
509                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
510                         goto found;
511         }
512         rv = NULL;
513
514 found:
515         *msuccp = msucc;
516
517         return (rv);
518 }
519
520 /*
521  * Returns TRUE if the given reservation contains the given page index and
522  * FALSE otherwise.
523  */
524 static __inline boolean_t
525 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
526 {
527
528         return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
529 }
530
531 /*
532  * Increases the given reservation's population count.  Moves the reservation
533  * to the tail of the partially populated reservation queue.
534  *
535  * The free page queue must be locked.
536  */
537 static void
538 vm_reserv_populate(vm_reserv_t rv, int index)
539 {
540
541         vm_reserv_assert_locked(rv);
542         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
543             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
544         KASSERT(rv->object != NULL,
545             ("vm_reserv_populate: reserv %p is free", rv));
546         KASSERT(popmap_is_clear(rv->popmap, index),
547             ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
548             index));
549         KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
550             ("vm_reserv_populate: reserv %p is already full", rv));
551         KASSERT(rv->pages->psind == 0,
552             ("vm_reserv_populate: reserv %p is already promoted", rv));
553         KASSERT(rv->domain < vm_ndomains,
554             ("vm_reserv_populate: reserv %p's domain is corrupted %d",
555             rv, rv->domain));
556         popmap_set(rv->popmap, index);
557         rv->popcnt++;
558         if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP &&
559             rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES)
560                 return;
561         rv->lasttick = ticks;
562         vm_reserv_domain_lock(rv->domain);
563         if (rv->inpartpopq) {
564                 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
565                 rv->inpartpopq = FALSE;
566         }
567         if (rv->popcnt < VM_LEVEL_0_NPAGES) {
568                 rv->inpartpopq = TRUE;
569                 TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq);
570         } else {
571                 KASSERT(rv->pages->psind == 0,
572                     ("vm_reserv_populate: reserv %p is already promoted",
573                     rv));
574                 rv->pages->psind = 1;
575         }
576         vm_reserv_domain_unlock(rv->domain);
577 }
578
579 /*
580  * Allocates a contiguous set of physical pages of the given size "npages"
581  * from existing or newly created reservations.  All of the physical pages
582  * must be at or above the given physical address "low" and below the given
583  * physical address "high".  The given value "alignment" determines the
584  * alignment of the first physical page in the set.  If the given value
585  * "boundary" is non-zero, then the set of physical pages cannot cross any
586  * physical address boundary that is a multiple of that value.  Both
587  * "alignment" and "boundary" must be a power of two.
588  *
589  * The page "mpred" must immediately precede the offset "pindex" within the
590  * specified object.
591  *
592  * The object must be locked.
593  */
594 vm_page_t
595 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
596     int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high,
597     u_long alignment, vm_paddr_t boundary)
598 {
599         struct vm_domain *vmd;
600         vm_paddr_t pa, size;
601         vm_page_t m, m_ret, msucc;
602         vm_pindex_t first, leftcap, rightcap;
603         vm_reserv_t rv;
604         u_long allocpages, maxpages, minpages;
605         int i, index, n;
606
607         VM_OBJECT_ASSERT_WLOCKED(object);
608         KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
609
610         /*
611          * Is a reservation fundamentally impossible?
612          */
613         if (pindex < VM_RESERV_INDEX(object, pindex) ||
614             pindex + npages > object->size)
615                 return (NULL);
616
617         /*
618          * All reservations of a particular size have the same alignment.
619          * Assuming that the first page is allocated from a reservation, the
620          * least significant bits of its physical address can be determined
621          * from its offset from the beginning of the reservation and the size
622          * of the reservation.
623          *
624          * Could the specified index within a reservation of the smallest
625          * possible size satisfy the alignment and boundary requirements?
626          */
627         pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
628         if ((pa & (alignment - 1)) != 0)
629                 return (NULL);
630         size = npages << PAGE_SHIFT;
631         if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
632                 return (NULL);
633
634         /*
635          * Look for an existing reservation.
636          */
637         rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
638         if (rv != NULL) {
639                 KASSERT(object != kernel_object || rv->domain == domain,
640                     ("vm_reserv_alloc_contig: domain mismatch"));
641                 index = VM_RESERV_INDEX(object, pindex);
642                 /* Does the allocation fit within the reservation? */
643                 if (index + npages > VM_LEVEL_0_NPAGES)
644                         return (NULL);
645                 domain = rv->domain;
646                 vmd = VM_DOMAIN(domain);
647                 vm_reserv_lock(rv);
648                 /* Handle reclaim race. */
649                 if (rv->object != object)
650                         goto out;
651                 m = &rv->pages[index];
652                 pa = VM_PAGE_TO_PHYS(m);
653                 if (pa < low || pa + size > high ||
654                     (pa & (alignment - 1)) != 0 ||
655                     ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
656                         goto out;
657                 /* Handle vm_page_rename(m, new_object, ...). */
658                 for (i = 0; i < npages; i++)
659                         if (popmap_is_set(rv->popmap, index + i))
660                                 goto out;
661                 if (!vm_domain_allocate(vmd, req, npages))
662                         goto out;
663                 for (i = 0; i < npages; i++)
664                         vm_reserv_populate(rv, index + i);
665                 vm_reserv_unlock(rv);
666                 return (m);
667 out:
668                 vm_reserv_unlock(rv);
669                 return (NULL);
670         }
671
672         /*
673          * Could at least one reservation fit between the first index to the
674          * left that can be used ("leftcap") and the first index to the right
675          * that cannot be used ("rightcap")?
676          *
677          * We must synchronize with the reserv object lock to protect the
678          * pindex/object of the resulting reservations against rename while
679          * we are inspecting.
680          */
681         first = pindex - VM_RESERV_INDEX(object, pindex);
682         minpages = VM_RESERV_INDEX(object, pindex) + npages;
683         maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
684         allocpages = maxpages;
685         vm_reserv_object_lock(object);
686         if (mpred != NULL) {
687                 if ((rv = vm_reserv_from_page(mpred))->object != object)
688                         leftcap = mpred->pindex + 1;
689                 else
690                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
691                 if (leftcap > first) {
692                         vm_reserv_object_unlock(object);
693                         return (NULL);
694                 }
695         }
696         if (msucc != NULL) {
697                 if ((rv = vm_reserv_from_page(msucc))->object != object)
698                         rightcap = msucc->pindex;
699                 else
700                         rightcap = rv->pindex;
701                 if (first + maxpages > rightcap) {
702                         if (maxpages == VM_LEVEL_0_NPAGES) {
703                                 vm_reserv_object_unlock(object);
704                                 return (NULL);
705                         }
706
707                         /*
708                          * At least one reservation will fit between "leftcap"
709                          * and "rightcap".  However, a reservation for the
710                          * last of the requested pages will not fit.  Reduce
711                          * the size of the upcoming allocation accordingly.
712                          */
713                         allocpages = minpages;
714                 }
715         }
716         vm_reserv_object_unlock(object);
717
718         /*
719          * Would the last new reservation extend past the end of the object?
720          */
721         if (first + maxpages > object->size) {
722                 /*
723                  * Don't allocate the last new reservation if the object is a
724                  * vnode or backed by another object that is a vnode. 
725                  */
726                 if (object->type == OBJT_VNODE ||
727                     (object->backing_object != NULL &&
728                     object->backing_object->type == OBJT_VNODE)) {
729                         if (maxpages == VM_LEVEL_0_NPAGES)
730                                 return (NULL);
731                         allocpages = minpages;
732                 }
733                 /* Speculate that the object may grow. */
734         }
735
736         /*
737          * Allocate the physical pages.  The alignment and boundary specified
738          * for this allocation may be different from the alignment and
739          * boundary specified for the requested pages.  For instance, the
740          * specified index may not be the first page within the first new
741          * reservation.
742          */
743         m = NULL;
744         vmd = VM_DOMAIN(domain);
745         if (vm_domain_allocate(vmd, req, npages)) {
746                 vm_domain_free_lock(vmd);
747                 m = vm_phys_alloc_contig(domain, allocpages, low, high,
748                     ulmax(alignment, VM_LEVEL_0_SIZE),
749                     boundary > VM_LEVEL_0_SIZE ? boundary : 0);
750                 vm_domain_free_unlock(vmd);
751                 if (m == NULL) {
752                         vm_domain_freecnt_inc(vmd, npages);
753                         return (NULL);
754                 }
755         } else
756                 return (NULL);
757         KASSERT(vm_phys_domain(m) == domain,
758             ("vm_reserv_alloc_contig: Page domain does not match requested."));
759
760         /*
761          * The allocated physical pages always begin at a reservation
762          * boundary, but they do not always end at a reservation boundary.
763          * Initialize every reservation that is completely covered by the
764          * allocated physical pages.
765          */
766         m_ret = NULL;
767         index = VM_RESERV_INDEX(object, pindex);
768         do {
769                 rv = vm_reserv_from_page(m);
770                 KASSERT(rv->pages == m,
771                     ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
772                     rv));
773                 vm_reserv_lock(rv);
774                 vm_reserv_insert(rv, object, first);
775                 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
776                 for (i = 0; i < n; i++)
777                         vm_reserv_populate(rv, index + i);
778                 npages -= n;
779                 if (m_ret == NULL) {
780                         m_ret = &rv->pages[index];
781                         index = 0;
782                 }
783                 vm_reserv_unlock(rv);
784                 m += VM_LEVEL_0_NPAGES;
785                 first += VM_LEVEL_0_NPAGES;
786                 allocpages -= VM_LEVEL_0_NPAGES;
787         } while (allocpages >= VM_LEVEL_0_NPAGES);
788         return (m_ret);
789 }
790
791 /*
792  * Allocate a physical page from an existing or newly created reservation.
793  *
794  * The page "mpred" must immediately precede the offset "pindex" within the
795  * specified object.
796  *
797  * The object must be locked.
798  */
799 vm_page_t
800 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
801     int req, vm_page_t mpred)
802 {
803         struct vm_domain *vmd;
804         vm_page_t m, msucc;
805         vm_pindex_t first, leftcap, rightcap;
806         vm_reserv_t rv;
807         int index;
808
809         VM_OBJECT_ASSERT_WLOCKED(object);
810
811         /*
812          * Is a reservation fundamentally impossible?
813          */
814         if (pindex < VM_RESERV_INDEX(object, pindex) ||
815             pindex >= object->size)
816                 return (NULL);
817
818         /*
819          * Look for an existing reservation.
820          */
821         rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
822         if (rv != NULL) {
823                 KASSERT(object != kernel_object || rv->domain == domain,
824                     ("vm_reserv_alloc_page: domain mismatch"));
825                 domain = rv->domain;
826                 vmd = VM_DOMAIN(domain);
827                 index = VM_RESERV_INDEX(object, pindex);
828                 m = &rv->pages[index];
829                 vm_reserv_lock(rv);
830                 /* Handle reclaim race. */
831                 if (rv->object != object ||
832                     /* Handle vm_page_rename(m, new_object, ...). */
833                     popmap_is_set(rv->popmap, index)) {
834                         m = NULL;
835                         goto out;
836                 }
837                 if (vm_domain_allocate(vmd, req, 1) == 0)
838                         m = NULL;
839                 else
840                         vm_reserv_populate(rv, index);
841 out:
842                 vm_reserv_unlock(rv);
843                 return (m);
844         }
845
846         /*
847          * Could a reservation fit between the first index to the left that
848          * can be used and the first index to the right that cannot be used?
849          *
850          * We must synchronize with the reserv object lock to protect the
851          * pindex/object of the resulting reservations against rename while
852          * we are inspecting.
853          */
854         first = pindex - VM_RESERV_INDEX(object, pindex);
855         vm_reserv_object_lock(object);
856         if (mpred != NULL) {
857                 if ((rv = vm_reserv_from_page(mpred))->object != object)
858                         leftcap = mpred->pindex + 1;
859                 else
860                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
861                 if (leftcap > first) {
862                         vm_reserv_object_unlock(object);
863                         return (NULL);
864                 }
865         }
866         if (msucc != NULL) {
867                 if ((rv = vm_reserv_from_page(msucc))->object != object)
868                         rightcap = msucc->pindex;
869                 else
870                         rightcap = rv->pindex;
871                 if (first + VM_LEVEL_0_NPAGES > rightcap) {
872                         vm_reserv_object_unlock(object);
873                         return (NULL);
874                 }
875         }
876         vm_reserv_object_unlock(object);
877
878         /*
879          * Would a new reservation extend past the end of the object? 
880          */
881         if (first + VM_LEVEL_0_NPAGES > object->size) {
882                 /*
883                  * Don't allocate a new reservation if the object is a vnode or
884                  * backed by another object that is a vnode. 
885                  */
886                 if (object->type == OBJT_VNODE ||
887                     (object->backing_object != NULL &&
888                     object->backing_object->type == OBJT_VNODE))
889                         return (NULL);
890                 /* Speculate that the object may grow. */
891         }
892
893         /*
894          * Allocate and populate the new reservation.
895          */
896         m = NULL;
897         vmd = VM_DOMAIN(domain);
898         if (vm_domain_allocate(vmd, req, 1)) {
899                 vm_domain_free_lock(vmd);
900                 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
901                     VM_LEVEL_0_ORDER);
902                 vm_domain_free_unlock(vmd);
903                 if (m == NULL) {
904                         vm_domain_freecnt_inc(vmd, 1);
905                         return (NULL);
906                 }
907         } else
908                 return (NULL);
909         rv = vm_reserv_from_page(m);
910         vm_reserv_lock(rv);
911         KASSERT(rv->pages == m,
912             ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
913         vm_reserv_insert(rv, object, first);
914         index = VM_RESERV_INDEX(object, pindex);
915         vm_reserv_populate(rv, index);
916         vm_reserv_unlock(rv);
917
918         return (&rv->pages[index]);
919 }
920
921 /*
922  * Breaks the given reservation.  All free pages in the reservation
923  * are returned to the physical memory allocator.  The reservation's
924  * population count and map are reset to their initial state.
925  *
926  * The given reservation must not be in the partially populated reservation
927  * queue.  The free page queue lock must be held.
928  */
929 static void
930 vm_reserv_break(vm_reserv_t rv)
931 {
932         u_long changes;
933         int bitpos, hi, i, lo;
934
935         vm_reserv_assert_locked(rv);
936         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
937             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
938         vm_reserv_remove(rv);
939         rv->pages->psind = 0;
940         hi = lo = -1;
941         for (i = 0; i <= NPOPMAP; i++) {
942                 /*
943                  * "changes" is a bitmask that marks where a new sequence of
944                  * 0s or 1s begins in popmap[i], with last bit in popmap[i-1]
945                  * considered to be 1 if and only if lo == hi.  The bits of
946                  * popmap[-1] and popmap[NPOPMAP] are considered all 1s.
947                  */
948                 if (i == NPOPMAP)
949                         changes = lo != hi;
950                 else {
951                         changes = rv->popmap[i];
952                         changes ^= (changes << 1) | (lo == hi);
953                         rv->popmap[i] = 0;
954                 }
955                 while (changes != 0) {
956                         /*
957                          * If the next change marked begins a run of 0s, set
958                          * lo to mark that position.  Otherwise set hi and
959                          * free pages from lo up to hi.
960                          */
961                         bitpos = ffsl(changes) - 1;
962                         changes ^= 1UL << bitpos;
963                         if (lo == hi)
964                                 lo = NBPOPMAP * i + bitpos;
965                         else {
966                                 hi = NBPOPMAP * i + bitpos;
967                                 vm_domain_free_lock(VM_DOMAIN(rv->domain));
968                                 vm_phys_enqueue_contig(&rv->pages[lo], hi - lo);
969                                 vm_domain_free_unlock(VM_DOMAIN(rv->domain));
970                                 lo = hi;
971                         }
972                 }
973         }
974         rv->popcnt = 0;
975         counter_u64_add(vm_reserv_broken, 1);
976 }
977
978 /*
979  * Breaks all reservations belonging to the given object.
980  */
981 void
982 vm_reserv_break_all(vm_object_t object)
983 {
984         vm_reserv_t rv;
985
986         /*
987          * This access of object->rvq is unsynchronized so that the
988          * object rvq lock can nest after the domain_free lock.  We
989          * must check for races in the results.  However, the object
990          * lock prevents new additions, so we are guaranteed that when
991          * it returns NULL the object is properly empty.
992          */
993         while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
994                 vm_reserv_lock(rv);
995                 /* Reclaim race. */
996                 if (rv->object != object) {
997                         vm_reserv_unlock(rv);
998                         continue;
999                 }
1000                 vm_reserv_domain_lock(rv->domain);
1001                 if (rv->inpartpopq) {
1002                         TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
1003                         rv->inpartpopq = FALSE;
1004                 }
1005                 vm_reserv_domain_unlock(rv->domain);
1006                 vm_reserv_break(rv);
1007                 vm_reserv_unlock(rv);
1008         }
1009 }
1010
1011 /*
1012  * Frees the given page if it belongs to a reservation.  Returns TRUE if the
1013  * page is freed and FALSE otherwise.
1014  *
1015  * The free page queue lock must be held.
1016  */
1017 boolean_t
1018 vm_reserv_free_page(vm_page_t m)
1019 {
1020         vm_reserv_t rv;
1021         boolean_t ret;
1022
1023         rv = vm_reserv_from_page(m);
1024         if (rv->object == NULL)
1025                 return (FALSE);
1026         vm_reserv_lock(rv);
1027         /* Re-validate after lock. */
1028         if (rv->object != NULL) {
1029                 vm_reserv_depopulate(rv, m - rv->pages);
1030                 ret = TRUE;
1031         } else
1032                 ret = FALSE;
1033         vm_reserv_unlock(rv);
1034
1035         return (ret);
1036 }
1037
1038 /*
1039  * Initializes the reservation management system.  Specifically, initializes
1040  * the reservation array.
1041  *
1042  * Requires that vm_page_array and first_page are initialized!
1043  */
1044 void
1045 vm_reserv_init(void)
1046 {
1047         vm_paddr_t paddr;
1048         struct vm_phys_seg *seg;
1049         struct vm_reserv *rv;
1050         int i, segind;
1051
1052         /*
1053          * Initialize the reservation array.  Specifically, initialize the
1054          * "pages" field for every element that has an underlying superpage.
1055          */
1056         for (segind = 0; segind < vm_phys_nsegs; segind++) {
1057                 seg = &vm_phys_segs[segind];
1058                 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
1059                 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
1060                     VM_LEVEL_0_SIZE <= seg->end) {
1061                         rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
1062                         rv->pages = PHYS_TO_VM_PAGE(paddr);
1063                         rv->domain = seg->domain;
1064                         mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF);
1065                         paddr += VM_LEVEL_0_SIZE;
1066                 }
1067         }
1068         for (i = 0; i < MAXMEMDOM; i++) {
1069                 mtx_init(&vm_reserv_domain_locks[i], "VM reserv domain", NULL,
1070                     MTX_DEF);
1071                 TAILQ_INIT(&vm_rvq_partpop[i]);
1072         }
1073
1074         for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++)
1075                 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL,
1076                     MTX_DEF);
1077 }
1078
1079 /*
1080  * Returns true if the given page belongs to a reservation and that page is
1081  * free.  Otherwise, returns false.
1082  */
1083 bool
1084 vm_reserv_is_page_free(vm_page_t m)
1085 {
1086         vm_reserv_t rv;
1087
1088         rv = vm_reserv_from_page(m);
1089         if (rv->object == NULL)
1090                 return (false);
1091         return (popmap_is_clear(rv->popmap, m - rv->pages));
1092 }
1093
1094 /*
1095  * If the given page belongs to a reservation, returns the level of that
1096  * reservation.  Otherwise, returns -1.
1097  */
1098 int
1099 vm_reserv_level(vm_page_t m)
1100 {
1101         vm_reserv_t rv;
1102
1103         rv = vm_reserv_from_page(m);
1104         return (rv->object != NULL ? 0 : -1);
1105 }
1106
1107 /*
1108  * Returns a reservation level if the given page belongs to a fully populated
1109  * reservation and -1 otherwise.
1110  */
1111 int
1112 vm_reserv_level_iffullpop(vm_page_t m)
1113 {
1114         vm_reserv_t rv;
1115
1116         rv = vm_reserv_from_page(m);
1117         return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
1118 }
1119
1120 /*
1121  * Breaks the given partially populated reservation, releasing its free pages
1122  * to the physical memory allocator.
1123  *
1124  * The free page queue lock must be held.
1125  */
1126 static void
1127 vm_reserv_reclaim(vm_reserv_t rv)
1128 {
1129
1130         vm_reserv_assert_locked(rv);
1131         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1132             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1133         vm_reserv_domain_lock(rv->domain);
1134         KASSERT(rv->inpartpopq,
1135             ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
1136         KASSERT(rv->domain < vm_ndomains,
1137             ("vm_reserv_reclaim: reserv %p's domain is corrupted %d",
1138             rv, rv->domain));
1139         TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
1140         rv->inpartpopq = FALSE;
1141         vm_reserv_domain_unlock(rv->domain);
1142         vm_reserv_break(rv);
1143         counter_u64_add(vm_reserv_reclaimed, 1);
1144 }
1145
1146 /*
1147  * Breaks the reservation at the head of the partially populated reservation
1148  * queue, releasing its free pages to the physical memory allocator.  Returns
1149  * TRUE if a reservation is broken and FALSE otherwise.
1150  *
1151  * The free page queue lock must be held.
1152  */
1153 boolean_t
1154 vm_reserv_reclaim_inactive(int domain)
1155 {
1156         vm_reserv_t rv;
1157
1158         while ((rv = TAILQ_FIRST(&vm_rvq_partpop[domain])) != NULL) {
1159                 vm_reserv_lock(rv);
1160                 if (rv != TAILQ_FIRST(&vm_rvq_partpop[domain])) {
1161                         vm_reserv_unlock(rv);
1162                         continue;
1163                 }
1164                 vm_reserv_reclaim(rv);
1165                 vm_reserv_unlock(rv);
1166                 return (TRUE);
1167         }
1168         return (FALSE);
1169 }
1170
1171 /*
1172  * Determine whether this reservation has free pages that satisfy the given
1173  * request for contiguous physical memory.  Start searching from the lower
1174  * bound, defined by low_index.
1175  *
1176  * The free page queue lock must be held.
1177  */
1178 static bool
1179 vm_reserv_test_contig(vm_reserv_t rv, u_long npages, vm_paddr_t low,
1180     vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1181 {
1182         vm_paddr_t pa, size;
1183         u_long changes;
1184         int bitpos, bits_left, i, hi, lo, n;
1185
1186         vm_reserv_assert_locked(rv);
1187         size = npages << PAGE_SHIFT;
1188         pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1189         lo = (pa < low) ?
1190             ((low + PAGE_MASK - pa) >> PAGE_SHIFT) : 0;
1191         i = lo / NBPOPMAP;
1192         changes = rv->popmap[i] | ((1UL << (lo % NBPOPMAP)) - 1);
1193         hi = (pa + VM_LEVEL_0_SIZE > high) ?
1194             ((high + PAGE_MASK - pa) >> PAGE_SHIFT) : VM_LEVEL_0_NPAGES;
1195         n = hi / NBPOPMAP;
1196         bits_left = hi % NBPOPMAP;
1197         hi = lo = -1;
1198         for (;;) {
1199                 /*
1200                  * "changes" is a bitmask that marks where a new sequence of
1201                  * 0s or 1s begins in popmap[i], with last bit in popmap[i-1]
1202                  * considered to be 1 if and only if lo == hi.  The bits of
1203                  * popmap[-1] and popmap[NPOPMAP] are considered all 1s.
1204                  */
1205                 changes ^= (changes << 1) | (lo == hi);
1206                 while (changes != 0) {
1207                         /*
1208                          * If the next change marked begins a run of 0s, set
1209                          * lo to mark that position.  Otherwise set hi and
1210                          * look for a satisfactory first page from lo up to hi.
1211                          */
1212                         bitpos = ffsl(changes) - 1;
1213                         changes ^= 1UL << bitpos;
1214                         if (lo == hi) {
1215                                 lo = NBPOPMAP * i + bitpos;
1216                                 continue;
1217                         }
1218                         hi = NBPOPMAP * i + bitpos;
1219                         pa = VM_PAGE_TO_PHYS(&rv->pages[lo]);
1220                         if ((pa & (alignment - 1)) != 0) {
1221                                 /* Skip to next aligned page. */
1222                                 lo += (((pa - 1) | (alignment - 1)) + 1) >>
1223                                     PAGE_SHIFT;
1224                                 if (lo >= VM_LEVEL_0_NPAGES)
1225                                         return (false);
1226                                 pa = VM_PAGE_TO_PHYS(&rv->pages[lo]);
1227                         }
1228                         if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) {
1229                                 /* Skip to next boundary-matching page. */
1230                                 lo += (((pa - 1) | (boundary - 1)) + 1) >>
1231                                     PAGE_SHIFT;
1232                                 if (lo >= VM_LEVEL_0_NPAGES)
1233                                         return (false);
1234                                 pa = VM_PAGE_TO_PHYS(&rv->pages[lo]);
1235                         }
1236                         if (lo * PAGE_SIZE + size <= hi * PAGE_SIZE)
1237                                 return (true);
1238                         lo = hi;
1239                 }
1240                 if (++i < n)
1241                         changes = rv->popmap[i];
1242                 else if (i == n)
1243                         changes = bits_left == 0 ? -1UL :
1244                             (rv->popmap[n] | (-1UL << bits_left));
1245                 else
1246                         return (false);
1247         }
1248 }
1249
1250 /*
1251  * Searches the partially populated reservation queue for the least recently
1252  * changed reservation with free pages that satisfy the given request for
1253  * contiguous physical memory.  If a satisfactory reservation is found, it is
1254  * broken.  Returns true if a reservation is broken and false otherwise.
1255  *
1256  * The free page queue lock must be held.
1257  */
1258 boolean_t
1259 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
1260     vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1261 {
1262         vm_paddr_t pa, size;
1263         vm_reserv_t rv, rvn;
1264
1265         if (npages > VM_LEVEL_0_NPAGES - 1)
1266                 return (false);
1267         size = npages << PAGE_SHIFT;
1268         vm_reserv_domain_lock(domain);
1269 again:
1270         for (rv = TAILQ_FIRST(&vm_rvq_partpop[domain]); rv != NULL; rv = rvn) {
1271                 rvn = TAILQ_NEXT(rv, partpopq);
1272                 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1273                 if (pa + VM_LEVEL_0_SIZE - size < low) {
1274                         /* This entire reservation is too low; go to next. */
1275                         continue;
1276                 }
1277                 if (pa + size > high) {
1278                         /* This entire reservation is too high; go to next. */
1279                         continue;
1280                 }
1281                 if (vm_reserv_trylock(rv) == 0) {
1282                         vm_reserv_domain_unlock(domain);
1283                         vm_reserv_lock(rv);
1284                         if (!rv->inpartpopq) {
1285                                 vm_reserv_domain_lock(domain);
1286                                 if (!rvn->inpartpopq)
1287                                         goto again;
1288                                 continue;
1289                         }
1290                 } else
1291                         vm_reserv_domain_unlock(domain);
1292                 if (vm_reserv_test_contig(rv, npages, low, high,
1293                     alignment, boundary)) {
1294                         vm_reserv_reclaim(rv);
1295                         vm_reserv_unlock(rv);
1296                         return (true);
1297                 }
1298                 vm_reserv_unlock(rv);
1299                 vm_reserv_domain_lock(domain);
1300                 if (rvn != NULL && !rvn->inpartpopq)
1301                         goto again;
1302         }
1303         vm_reserv_domain_unlock(domain);
1304         return (false);
1305 }
1306
1307 /*
1308  * Transfers the reservation underlying the given page to a new object.
1309  *
1310  * The object must be locked.
1311  */
1312 void
1313 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1314     vm_pindex_t old_object_offset)
1315 {
1316         vm_reserv_t rv;
1317
1318         VM_OBJECT_ASSERT_WLOCKED(new_object);
1319         rv = vm_reserv_from_page(m);
1320         if (rv->object == old_object) {
1321                 vm_reserv_lock(rv);
1322                 CTR6(KTR_VM,
1323                     "%s: rv %p object %p new %p popcnt %d inpartpop %d",
1324                     __FUNCTION__, rv, rv->object, new_object, rv->popcnt,
1325                     rv->inpartpopq);
1326                 if (rv->object == old_object) {
1327                         vm_reserv_object_lock(old_object);
1328                         rv->object = NULL;
1329                         LIST_REMOVE(rv, objq);
1330                         vm_reserv_object_unlock(old_object);
1331                         vm_reserv_object_lock(new_object);
1332                         rv->object = new_object;
1333                         rv->pindex -= old_object_offset;
1334                         LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1335                         vm_reserv_object_unlock(new_object);
1336                 }
1337                 vm_reserv_unlock(rv);
1338         }
1339 }
1340
1341 /*
1342  * Returns the size (in bytes) of a reservation of the specified level.
1343  */
1344 int
1345 vm_reserv_size(int level)
1346 {
1347
1348         switch (level) {
1349         case 0:
1350                 return (VM_LEVEL_0_SIZE);
1351         case -1:
1352                 return (PAGE_SIZE);
1353         default:
1354                 return (0);
1355         }
1356 }
1357
1358 /*
1359  * Allocates the virtual and physical memory required by the reservation
1360  * management system's data structures, in particular, the reservation array.
1361  */
1362 vm_paddr_t
1363 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
1364 {
1365         vm_paddr_t new_end;
1366         size_t size;
1367
1368         /*
1369          * Calculate the size (in bytes) of the reservation array.  Round up
1370          * from "high_water" because every small page is mapped to an element
1371          * in the reservation array based on its physical address.  Thus, the
1372          * number of elements in the reservation array can be greater than the
1373          * number of superpages. 
1374          */
1375         size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
1376
1377         /*
1378          * Allocate and map the physical memory for the reservation array.  The
1379          * next available virtual address is returned by reference.
1380          */
1381         new_end = end - round_page(size);
1382         vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1383             VM_PROT_READ | VM_PROT_WRITE);
1384         bzero(vm_reserv_array, size);
1385
1386         /*
1387          * Return the next available physical address.
1388          */
1389         return (new_end);
1390 }
1391
1392 /*
1393  * Initializes the reservation management system.  Specifically, initializes
1394  * the reservation counters.
1395  */
1396 static void
1397 vm_reserv_counter_init(void *unused)
1398 {
1399
1400         vm_reserv_freed = counter_u64_alloc(M_WAITOK); 
1401         vm_reserv_broken = counter_u64_alloc(M_WAITOK); 
1402         vm_reserv_reclaimed = counter_u64_alloc(M_WAITOK); 
1403 }
1404 SYSINIT(vm_reserv_counter_init, SI_SUB_CPU, SI_ORDER_ANY,
1405     vm_reserv_counter_init, NULL);
1406
1407 /*
1408  * Returns the superpage containing the given page.
1409  */
1410 vm_page_t
1411 vm_reserv_to_superpage(vm_page_t m)
1412 {
1413         vm_reserv_t rv;
1414
1415         VM_OBJECT_ASSERT_LOCKED(m->object);
1416         rv = vm_reserv_from_page(m);
1417         if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES)
1418                 m = rv->pages;
1419         else
1420                 m = NULL;
1421
1422         return (m);
1423 }
1424
1425 #endif  /* VM_NRESERVLEVEL > 0 */