]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_reserv.c
Upgrade Unbound to 1.9.2.
[FreeBSD/FreeBSD.git] / sys / vm / vm_reserv.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2006 Rice University
5  * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Alan L. Cox,
9  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 /*
35  *      Superpage reservation management module
36  *
37  * Any external functions defined by this module are only to be used by the
38  * virtual memory system.
39  */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43
44 #include "opt_vm.h"
45
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #include <sys/queue.h>
52 #include <sys/rwlock.h>
53 #include <sys/sbuf.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56 #include <sys/counter.h>
57 #include <sys/ktr.h>
58 #include <sys/vmmeter.h>
59 #include <sys/smp.h>
60
61 #include <vm/vm.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_phys.h>
67 #include <vm/vm_pagequeue.h>
68 #include <vm/vm_radix.h>
69 #include <vm/vm_reserv.h>
70
71 /*
72  * The reservation system supports the speculative allocation of large physical
73  * pages ("superpages").  Speculative allocation enables the fully automatic
74  * utilization of superpages by the virtual memory system.  In other words, no
75  * programmatic directives are required to use superpages.
76  */
77
78 #if VM_NRESERVLEVEL > 0
79
80 #ifndef VM_LEVEL_0_ORDER_MAX
81 #define VM_LEVEL_0_ORDER_MAX    VM_LEVEL_0_ORDER
82 #endif
83
84 /*
85  * The number of small pages that are contained in a level 0 reservation
86  */
87 #define VM_LEVEL_0_NPAGES       (1 << VM_LEVEL_0_ORDER)
88 #define VM_LEVEL_0_NPAGES_MAX   (1 << VM_LEVEL_0_ORDER_MAX)
89
90 /*
91  * The number of bits by which a physical address is shifted to obtain the
92  * reservation number
93  */
94 #define VM_LEVEL_0_SHIFT        (VM_LEVEL_0_ORDER + PAGE_SHIFT)
95
96 /*
97  * The size of a level 0 reservation in bytes
98  */
99 #define VM_LEVEL_0_SIZE         (1 << VM_LEVEL_0_SHIFT)
100
101 /*
102  * Computes the index of the small page underlying the given (object, pindex)
103  * within the reservation's array of small pages.
104  */
105 #define VM_RESERV_INDEX(object, pindex) \
106     (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
107
108 /*
109  * The size of a population map entry
110  */
111 typedef u_long          popmap_t;
112
113 /*
114  * The number of bits in a population map entry
115  */
116 #define NBPOPMAP        (NBBY * sizeof(popmap_t))
117
118 /*
119  * The number of population map entries in a reservation
120  */
121 #define NPOPMAP         howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
122 #define NPOPMAP_MAX     howmany(VM_LEVEL_0_NPAGES_MAX, NBPOPMAP)
123
124 /*
125  * Number of elapsed ticks before we update the LRU queue position.  Used
126  * to reduce contention and churn on the list.
127  */
128 #define PARTPOPSLOP     1
129
130 /*
131  * Clear a bit in the population map.
132  */
133 static __inline void
134 popmap_clear(popmap_t popmap[], int i)
135 {
136
137         popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP));
138 }
139
140 /*
141  * Set a bit in the population map.
142  */
143 static __inline void
144 popmap_set(popmap_t popmap[], int i)
145 {
146
147         popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP);
148 }
149
150 /*
151  * Is a bit in the population map clear?
152  */
153 static __inline boolean_t
154 popmap_is_clear(popmap_t popmap[], int i)
155 {
156
157         return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0);
158 }
159
160 /*
161  * Is a bit in the population map set?
162  */
163 static __inline boolean_t
164 popmap_is_set(popmap_t popmap[], int i)
165 {
166
167         return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0);
168 }
169
170 /*
171  * The reservation structure
172  *
173  * A reservation structure is constructed whenever a large physical page is
174  * speculatively allocated to an object.  The reservation provides the small
175  * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
176  * within that object.  The reservation's "popcnt" tracks the number of these
177  * small physical pages that are in use at any given time.  When and if the
178  * reservation is not fully utilized, it appears in the queue of partially
179  * populated reservations.  The reservation always appears on the containing
180  * object's list of reservations.
181  *
182  * A partially populated reservation can be broken and reclaimed at any time.
183  *
184  * r - vm_reserv_lock
185  * d - vm_reserv_domain_lock
186  * o - vm_reserv_object_lock
187  * c - constant after boot
188  */
189 struct vm_reserv {
190         struct mtx      lock;                   /* reservation lock. */
191         TAILQ_ENTRY(vm_reserv) partpopq;        /* (d) per-domain queue. */
192         LIST_ENTRY(vm_reserv) objq;             /* (o, r) object queue */
193         vm_object_t     object;                 /* (o, r) containing object */
194         vm_pindex_t     pindex;                 /* (o, r) offset in object */
195         vm_page_t       pages;                  /* (c) first page  */
196         uint16_t        domain;                 /* (c) NUMA domain. */
197         uint16_t        popcnt;                 /* (r) # of pages in use */
198         int             lasttick;               /* (r) last pop update tick. */
199         char            inpartpopq;             /* (d) */
200         popmap_t        popmap[NPOPMAP_MAX];    /* (r) bit vector, used pages */
201 };
202
203 #define vm_reserv_lockptr(rv)           (&(rv)->lock)
204 #define vm_reserv_assert_locked(rv)                                     \
205             mtx_assert(vm_reserv_lockptr(rv), MA_OWNED)
206 #define vm_reserv_lock(rv)              mtx_lock(vm_reserv_lockptr(rv))
207 #define vm_reserv_trylock(rv)           mtx_trylock(vm_reserv_lockptr(rv))
208 #define vm_reserv_unlock(rv)            mtx_unlock(vm_reserv_lockptr(rv))
209
210 static struct mtx_padalign vm_reserv_domain_locks[MAXMEMDOM];
211
212 #define vm_reserv_domain_lockptr(d)     &vm_reserv_domain_locks[(d)]
213 #define vm_reserv_domain_lock(d)        mtx_lock(vm_reserv_domain_lockptr(d))
214 #define vm_reserv_domain_unlock(d)      mtx_unlock(vm_reserv_domain_lockptr(d))
215
216 /*
217  * The reservation array
218  *
219  * This array is analoguous in function to vm_page_array.  It differs in the
220  * respect that it may contain a greater number of useful reservation
221  * structures than there are (physical) superpages.  These "invalid"
222  * reservation structures exist to trade-off space for time in the
223  * implementation of vm_reserv_from_page().  Invalid reservation structures are
224  * distinguishable from "valid" reservation structures by inspecting the
225  * reservation's "pages" field.  Invalid reservation structures have a NULL
226  * "pages" field.
227  *
228  * vm_reserv_from_page() maps a small (physical) page to an element of this
229  * array by computing a physical reservation number from the page's physical
230  * address.  The physical reservation number is used as the array index.
231  *
232  * An "active" reservation is a valid reservation structure that has a non-NULL
233  * "object" field and a non-zero "popcnt" field.  In other words, every active
234  * reservation belongs to a particular object.  Moreover, every active
235  * reservation has an entry in the containing object's list of reservations.  
236  */
237 static vm_reserv_t vm_reserv_array;
238
239 /*
240  * The partially populated reservation queue
241  *
242  * This queue enables the fast recovery of an unused free small page from a
243  * partially populated reservation.  The reservation at the head of this queue
244  * is the least recently changed, partially populated reservation.
245  *
246  * Access to this queue is synchronized by the free page queue lock.
247  */
248 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop[MAXMEMDOM];
249
250 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
251
252 static counter_u64_t vm_reserv_broken = EARLY_COUNTER;
253 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
254     &vm_reserv_broken, "Cumulative number of broken reservations");
255
256 static counter_u64_t vm_reserv_freed = EARLY_COUNTER;
257 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
258     &vm_reserv_freed, "Cumulative number of freed reservations");
259
260 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
261
262 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
263     sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
264
265 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
266
267 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
268     sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues");
269
270 static counter_u64_t vm_reserv_reclaimed = EARLY_COUNTER;
271 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
272     &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations");
273
274 /*
275  * The object lock pool is used to synchronize the rvq.  We can not use a
276  * pool mutex because it is required before malloc works.
277  *
278  * The "hash" function could be made faster without divide and modulo.
279  */
280 #define VM_RESERV_OBJ_LOCK_COUNT        MAXCPU
281
282 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT];
283
284 #define vm_reserv_object_lock_idx(object)                       \
285             (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT)
286 #define vm_reserv_object_lock_ptr(object)                       \
287             &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))]
288 #define vm_reserv_object_lock(object)                           \
289             mtx_lock(vm_reserv_object_lock_ptr((object)))
290 #define vm_reserv_object_unlock(object)                         \
291             mtx_unlock(vm_reserv_object_lock_ptr((object)))
292
293 static void             vm_reserv_break(vm_reserv_t rv);
294 static void             vm_reserv_depopulate(vm_reserv_t rv, int index);
295 static vm_reserv_t      vm_reserv_from_page(vm_page_t m);
296 static boolean_t        vm_reserv_has_pindex(vm_reserv_t rv,
297                             vm_pindex_t pindex);
298 static void             vm_reserv_populate(vm_reserv_t rv, int index);
299 static void             vm_reserv_reclaim(vm_reserv_t rv);
300
301 /*
302  * Returns the current number of full reservations.
303  *
304  * Since the number of full reservations is computed without acquiring the
305  * free page queue lock, the returned value may be inexact.
306  */
307 static int
308 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
309 {
310         vm_paddr_t paddr;
311         struct vm_phys_seg *seg;
312         vm_reserv_t rv;
313         int fullpop, segind;
314
315         fullpop = 0;
316         for (segind = 0; segind < vm_phys_nsegs; segind++) {
317                 seg = &vm_phys_segs[segind];
318                 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
319                 while (paddr + VM_LEVEL_0_SIZE <= seg->end) {
320                         rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
321                         fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
322                         paddr += VM_LEVEL_0_SIZE;
323                 }
324         }
325         return (sysctl_handle_int(oidp, &fullpop, 0, req));
326 }
327
328 /*
329  * Describes the current state of the partially populated reservation queue.
330  */
331 static int
332 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
333 {
334         struct sbuf sbuf;
335         vm_reserv_t rv;
336         int counter, error, domain, level, unused_pages;
337
338         error = sysctl_wire_old_buffer(req, 0);
339         if (error != 0)
340                 return (error);
341         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
342         sbuf_printf(&sbuf, "\nDOMAIN    LEVEL     SIZE  NUMBER\n\n");
343         for (domain = 0; domain < vm_ndomains; domain++) {
344                 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
345                         counter = 0;
346                         unused_pages = 0;
347                         vm_reserv_domain_lock(domain);
348                         TAILQ_FOREACH(rv, &vm_rvq_partpop[domain], partpopq) {
349                                 counter++;
350                                 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
351                         }
352                         vm_reserv_domain_unlock(domain);
353                         sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
354                             domain, level,
355                             unused_pages * ((int)PAGE_SIZE / 1024), counter);
356                 }
357         }
358         error = sbuf_finish(&sbuf);
359         sbuf_delete(&sbuf);
360         return (error);
361 }
362
363 /*
364  * Remove a reservation from the object's objq.
365  */
366 static void
367 vm_reserv_remove(vm_reserv_t rv)
368 {
369         vm_object_t object;
370
371         vm_reserv_assert_locked(rv);
372         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
373             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
374         KASSERT(rv->object != NULL,
375             ("vm_reserv_remove: reserv %p is free", rv));
376         KASSERT(!rv->inpartpopq,
377             ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv));
378         object = rv->object;
379         vm_reserv_object_lock(object);
380         LIST_REMOVE(rv, objq);
381         rv->object = NULL;
382         vm_reserv_object_unlock(object);
383 }
384
385 /*
386  * Insert a new reservation into the object's objq.
387  */
388 static void
389 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex)
390 {
391         int i;
392
393         vm_reserv_assert_locked(rv);
394         CTR6(KTR_VM,
395             "%s: rv %p(%p) object %p new %p popcnt %d",
396             __FUNCTION__, rv, rv->pages, rv->object, object,
397            rv->popcnt);
398         KASSERT(rv->object == NULL,
399             ("vm_reserv_insert: reserv %p isn't free", rv));
400         KASSERT(rv->popcnt == 0,
401             ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv));
402         KASSERT(!rv->inpartpopq,
403             ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv));
404         for (i = 0; i < NPOPMAP; i++)
405                 KASSERT(rv->popmap[i] == 0,
406                     ("vm_reserv_insert: reserv %p's popmap is corrupted", rv));
407         vm_reserv_object_lock(object);
408         rv->pindex = pindex;
409         rv->object = object;
410         rv->lasttick = ticks;
411         LIST_INSERT_HEAD(&object->rvq, rv, objq);
412         vm_reserv_object_unlock(object);
413 }
414
415 /*
416  * Reduces the given reservation's population count.  If the population count
417  * becomes zero, the reservation is destroyed.  Additionally, moves the
418  * reservation to the tail of the partially populated reservation queue if the
419  * population count is non-zero.
420  */
421 static void
422 vm_reserv_depopulate(vm_reserv_t rv, int index)
423 {
424         struct vm_domain *vmd;
425
426         vm_reserv_assert_locked(rv);
427         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
428             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
429         KASSERT(rv->object != NULL,
430             ("vm_reserv_depopulate: reserv %p is free", rv));
431         KASSERT(popmap_is_set(rv->popmap, index),
432             ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
433             index));
434         KASSERT(rv->popcnt > 0,
435             ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
436         KASSERT(rv->domain < vm_ndomains,
437             ("vm_reserv_depopulate: reserv %p's domain is corrupted %d",
438             rv, rv->domain));
439         if (rv->popcnt == VM_LEVEL_0_NPAGES) {
440                 KASSERT(rv->pages->psind == 1,
441                     ("vm_reserv_depopulate: reserv %p is already demoted",
442                     rv));
443                 rv->pages->psind = 0;
444         }
445         popmap_clear(rv->popmap, index);
446         rv->popcnt--;
447         if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP ||
448             rv->popcnt == 0) {
449                 vm_reserv_domain_lock(rv->domain);
450                 if (rv->inpartpopq) {
451                         TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
452                         rv->inpartpopq = FALSE;
453                 }
454                 if (rv->popcnt != 0) {
455                         rv->inpartpopq = TRUE;
456                         TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq);
457                 }
458                 vm_reserv_domain_unlock(rv->domain);
459                 rv->lasttick = ticks;
460         }
461         vmd = VM_DOMAIN(rv->domain);
462         if (rv->popcnt == 0) {
463                 vm_reserv_remove(rv);
464                 vm_domain_free_lock(vmd);
465                 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
466                 vm_domain_free_unlock(vmd);
467                 counter_u64_add(vm_reserv_freed, 1);
468         }
469         vm_domain_freecnt_inc(vmd, 1);
470 }
471
472 /*
473  * Returns the reservation to which the given page might belong.
474  */
475 static __inline vm_reserv_t
476 vm_reserv_from_page(vm_page_t m)
477 {
478
479         return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
480 }
481
482 /*
483  * Returns an existing reservation or NULL and initialized successor pointer.
484  */
485 static vm_reserv_t
486 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex,
487     vm_page_t mpred, vm_page_t *msuccp)
488 {
489         vm_reserv_t rv;
490         vm_page_t msucc;
491
492         msucc = NULL;
493         if (mpred != NULL) {
494                 KASSERT(mpred->object == object,
495                     ("vm_reserv_from_object: object doesn't contain mpred"));
496                 KASSERT(mpred->pindex < pindex,
497                     ("vm_reserv_from_object: mpred doesn't precede pindex"));
498                 rv = vm_reserv_from_page(mpred);
499                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
500                         goto found;
501                 msucc = TAILQ_NEXT(mpred, listq);
502         } else
503                 msucc = TAILQ_FIRST(&object->memq);
504         if (msucc != NULL) {
505                 KASSERT(msucc->pindex > pindex,
506                     ("vm_reserv_from_object: msucc doesn't succeed pindex"));
507                 rv = vm_reserv_from_page(msucc);
508                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
509                         goto found;
510         }
511         rv = NULL;
512
513 found:
514         *msuccp = msucc;
515
516         return (rv);
517 }
518
519 /*
520  * Returns TRUE if the given reservation contains the given page index and
521  * FALSE otherwise.
522  */
523 static __inline boolean_t
524 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
525 {
526
527         return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
528 }
529
530 /*
531  * Increases the given reservation's population count.  Moves the reservation
532  * to the tail of the partially populated reservation queue.
533  *
534  * The free page queue must be locked.
535  */
536 static void
537 vm_reserv_populate(vm_reserv_t rv, int index)
538 {
539
540         vm_reserv_assert_locked(rv);
541         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
542             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
543         KASSERT(rv->object != NULL,
544             ("vm_reserv_populate: reserv %p is free", rv));
545         KASSERT(popmap_is_clear(rv->popmap, index),
546             ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
547             index));
548         KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
549             ("vm_reserv_populate: reserv %p is already full", rv));
550         KASSERT(rv->pages->psind == 0,
551             ("vm_reserv_populate: reserv %p is already promoted", rv));
552         KASSERT(rv->domain < vm_ndomains,
553             ("vm_reserv_populate: reserv %p's domain is corrupted %d",
554             rv, rv->domain));
555         popmap_set(rv->popmap, index);
556         rv->popcnt++;
557         if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP &&
558             rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES)
559                 return;
560         rv->lasttick = ticks;
561         vm_reserv_domain_lock(rv->domain);
562         if (rv->inpartpopq) {
563                 TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
564                 rv->inpartpopq = FALSE;
565         }
566         if (rv->popcnt < VM_LEVEL_0_NPAGES) {
567                 rv->inpartpopq = TRUE;
568                 TAILQ_INSERT_TAIL(&vm_rvq_partpop[rv->domain], rv, partpopq);
569         } else {
570                 KASSERT(rv->pages->psind == 0,
571                     ("vm_reserv_populate: reserv %p is already promoted",
572                     rv));
573                 rv->pages->psind = 1;
574         }
575         vm_reserv_domain_unlock(rv->domain);
576 }
577
578 /*
579  * Allocates a contiguous set of physical pages of the given size "npages"
580  * from existing or newly created reservations.  All of the physical pages
581  * must be at or above the given physical address "low" and below the given
582  * physical address "high".  The given value "alignment" determines the
583  * alignment of the first physical page in the set.  If the given value
584  * "boundary" is non-zero, then the set of physical pages cannot cross any
585  * physical address boundary that is a multiple of that value.  Both
586  * "alignment" and "boundary" must be a power of two.
587  *
588  * The page "mpred" must immediately precede the offset "pindex" within the
589  * specified object.
590  *
591  * The object must be locked.
592  */
593 vm_page_t
594 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
595     int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high,
596     u_long alignment, vm_paddr_t boundary)
597 {
598         struct vm_domain *vmd;
599         vm_paddr_t pa, size;
600         vm_page_t m, m_ret, msucc;
601         vm_pindex_t first, leftcap, rightcap;
602         vm_reserv_t rv;
603         u_long allocpages, maxpages, minpages;
604         int i, index, n;
605
606         VM_OBJECT_ASSERT_WLOCKED(object);
607         KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
608
609         /*
610          * Is a reservation fundamentally impossible?
611          */
612         if (pindex < VM_RESERV_INDEX(object, pindex) ||
613             pindex + npages > object->size)
614                 return (NULL);
615
616         /*
617          * All reservations of a particular size have the same alignment.
618          * Assuming that the first page is allocated from a reservation, the
619          * least significant bits of its physical address can be determined
620          * from its offset from the beginning of the reservation and the size
621          * of the reservation.
622          *
623          * Could the specified index within a reservation of the smallest
624          * possible size satisfy the alignment and boundary requirements?
625          */
626         pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
627         if ((pa & (alignment - 1)) != 0)
628                 return (NULL);
629         size = npages << PAGE_SHIFT;
630         if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
631                 return (NULL);
632
633         /*
634          * Look for an existing reservation.
635          */
636         rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
637         if (rv != NULL) {
638                 KASSERT(object != kernel_object || rv->domain == domain,
639                     ("vm_reserv_alloc_contig: domain mismatch"));
640                 index = VM_RESERV_INDEX(object, pindex);
641                 /* Does the allocation fit within the reservation? */
642                 if (index + npages > VM_LEVEL_0_NPAGES)
643                         return (NULL);
644                 domain = rv->domain;
645                 vmd = VM_DOMAIN(domain);
646                 vm_reserv_lock(rv);
647                 /* Handle reclaim race. */
648                 if (rv->object != object)
649                         goto out;
650                 m = &rv->pages[index];
651                 pa = VM_PAGE_TO_PHYS(m);
652                 if (pa < low || pa + size > high ||
653                     (pa & (alignment - 1)) != 0 ||
654                     ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
655                         goto out;
656                 /* Handle vm_page_rename(m, new_object, ...). */
657                 for (i = 0; i < npages; i++)
658                         if (popmap_is_set(rv->popmap, index + i))
659                                 goto out;
660                 if (!vm_domain_allocate(vmd, req, npages))
661                         goto out;
662                 for (i = 0; i < npages; i++)
663                         vm_reserv_populate(rv, index + i);
664                 vm_reserv_unlock(rv);
665                 return (m);
666 out:
667                 vm_reserv_unlock(rv);
668                 return (NULL);
669         }
670
671         /*
672          * Could at least one reservation fit between the first index to the
673          * left that can be used ("leftcap") and the first index to the right
674          * that cannot be used ("rightcap")?
675          *
676          * We must synchronize with the reserv object lock to protect the
677          * pindex/object of the resulting reservations against rename while
678          * we are inspecting.
679          */
680         first = pindex - VM_RESERV_INDEX(object, pindex);
681         minpages = VM_RESERV_INDEX(object, pindex) + npages;
682         maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
683         allocpages = maxpages;
684         vm_reserv_object_lock(object);
685         if (mpred != NULL) {
686                 if ((rv = vm_reserv_from_page(mpred))->object != object)
687                         leftcap = mpred->pindex + 1;
688                 else
689                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
690                 if (leftcap > first) {
691                         vm_reserv_object_unlock(object);
692                         return (NULL);
693                 }
694         }
695         if (msucc != NULL) {
696                 if ((rv = vm_reserv_from_page(msucc))->object != object)
697                         rightcap = msucc->pindex;
698                 else
699                         rightcap = rv->pindex;
700                 if (first + maxpages > rightcap) {
701                         if (maxpages == VM_LEVEL_0_NPAGES) {
702                                 vm_reserv_object_unlock(object);
703                                 return (NULL);
704                         }
705
706                         /*
707                          * At least one reservation will fit between "leftcap"
708                          * and "rightcap".  However, a reservation for the
709                          * last of the requested pages will not fit.  Reduce
710                          * the size of the upcoming allocation accordingly.
711                          */
712                         allocpages = minpages;
713                 }
714         }
715         vm_reserv_object_unlock(object);
716
717         /*
718          * Would the last new reservation extend past the end of the object?
719          */
720         if (first + maxpages > object->size) {
721                 /*
722                  * Don't allocate the last new reservation if the object is a
723                  * vnode or backed by another object that is a vnode. 
724                  */
725                 if (object->type == OBJT_VNODE ||
726                     (object->backing_object != NULL &&
727                     object->backing_object->type == OBJT_VNODE)) {
728                         if (maxpages == VM_LEVEL_0_NPAGES)
729                                 return (NULL);
730                         allocpages = minpages;
731                 }
732                 /* Speculate that the object may grow. */
733         }
734
735         /*
736          * Allocate the physical pages.  The alignment and boundary specified
737          * for this allocation may be different from the alignment and
738          * boundary specified for the requested pages.  For instance, the
739          * specified index may not be the first page within the first new
740          * reservation.
741          */
742         m = NULL;
743         vmd = VM_DOMAIN(domain);
744         if (vm_domain_allocate(vmd, req, npages)) {
745                 vm_domain_free_lock(vmd);
746                 m = vm_phys_alloc_contig(domain, allocpages, low, high,
747                     ulmax(alignment, VM_LEVEL_0_SIZE),
748                     boundary > VM_LEVEL_0_SIZE ? boundary : 0);
749                 vm_domain_free_unlock(vmd);
750                 if (m == NULL) {
751                         vm_domain_freecnt_inc(vmd, npages);
752                         return (NULL);
753                 }
754         } else
755                 return (NULL);
756         KASSERT(vm_phys_domain(m) == domain,
757             ("vm_reserv_alloc_contig: Page domain does not match requested."));
758
759         /*
760          * The allocated physical pages always begin at a reservation
761          * boundary, but they do not always end at a reservation boundary.
762          * Initialize every reservation that is completely covered by the
763          * allocated physical pages.
764          */
765         m_ret = NULL;
766         index = VM_RESERV_INDEX(object, pindex);
767         do {
768                 rv = vm_reserv_from_page(m);
769                 KASSERT(rv->pages == m,
770                     ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
771                     rv));
772                 vm_reserv_lock(rv);
773                 vm_reserv_insert(rv, object, first);
774                 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
775                 for (i = 0; i < n; i++)
776                         vm_reserv_populate(rv, index + i);
777                 npages -= n;
778                 if (m_ret == NULL) {
779                         m_ret = &rv->pages[index];
780                         index = 0;
781                 }
782                 vm_reserv_unlock(rv);
783                 m += VM_LEVEL_0_NPAGES;
784                 first += VM_LEVEL_0_NPAGES;
785                 allocpages -= VM_LEVEL_0_NPAGES;
786         } while (allocpages >= VM_LEVEL_0_NPAGES);
787         return (m_ret);
788 }
789
790 /*
791  * Allocate a physical page from an existing or newly created reservation.
792  *
793  * The page "mpred" must immediately precede the offset "pindex" within the
794  * specified object.
795  *
796  * The object must be locked.
797  */
798 vm_page_t
799 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
800     int req, vm_page_t mpred)
801 {
802         struct vm_domain *vmd;
803         vm_page_t m, msucc;
804         vm_pindex_t first, leftcap, rightcap;
805         vm_reserv_t rv;
806         int index;
807
808         VM_OBJECT_ASSERT_WLOCKED(object);
809
810         /*
811          * Is a reservation fundamentally impossible?
812          */
813         if (pindex < VM_RESERV_INDEX(object, pindex) ||
814             pindex >= object->size)
815                 return (NULL);
816
817         /*
818          * Look for an existing reservation.
819          */
820         rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
821         if (rv != NULL) {
822                 KASSERT(object != kernel_object || rv->domain == domain,
823                     ("vm_reserv_alloc_page: domain mismatch"));
824                 domain = rv->domain;
825                 vmd = VM_DOMAIN(domain);
826                 index = VM_RESERV_INDEX(object, pindex);
827                 m = &rv->pages[index];
828                 vm_reserv_lock(rv);
829                 /* Handle reclaim race. */
830                 if (rv->object != object ||
831                     /* Handle vm_page_rename(m, new_object, ...). */
832                     popmap_is_set(rv->popmap, index)) {
833                         m = NULL;
834                         goto out;
835                 }
836                 if (vm_domain_allocate(vmd, req, 1) == 0)
837                         m = NULL;
838                 else
839                         vm_reserv_populate(rv, index);
840 out:
841                 vm_reserv_unlock(rv);
842                 return (m);
843         }
844
845         /*
846          * Could a reservation fit between the first index to the left that
847          * can be used and the first index to the right that cannot be used?
848          *
849          * We must synchronize with the reserv object lock to protect the
850          * pindex/object of the resulting reservations against rename while
851          * we are inspecting.
852          */
853         first = pindex - VM_RESERV_INDEX(object, pindex);
854         vm_reserv_object_lock(object);
855         if (mpred != NULL) {
856                 if ((rv = vm_reserv_from_page(mpred))->object != object)
857                         leftcap = mpred->pindex + 1;
858                 else
859                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
860                 if (leftcap > first) {
861                         vm_reserv_object_unlock(object);
862                         return (NULL);
863                 }
864         }
865         if (msucc != NULL) {
866                 if ((rv = vm_reserv_from_page(msucc))->object != object)
867                         rightcap = msucc->pindex;
868                 else
869                         rightcap = rv->pindex;
870                 if (first + VM_LEVEL_0_NPAGES > rightcap) {
871                         vm_reserv_object_unlock(object);
872                         return (NULL);
873                 }
874         }
875         vm_reserv_object_unlock(object);
876
877         /*
878          * Would a new reservation extend past the end of the object? 
879          */
880         if (first + VM_LEVEL_0_NPAGES > object->size) {
881                 /*
882                  * Don't allocate a new reservation if the object is a vnode or
883                  * backed by another object that is a vnode. 
884                  */
885                 if (object->type == OBJT_VNODE ||
886                     (object->backing_object != NULL &&
887                     object->backing_object->type == OBJT_VNODE))
888                         return (NULL);
889                 /* Speculate that the object may grow. */
890         }
891
892         /*
893          * Allocate and populate the new reservation.
894          */
895         m = NULL;
896         vmd = VM_DOMAIN(domain);
897         if (vm_domain_allocate(vmd, req, 1)) {
898                 vm_domain_free_lock(vmd);
899                 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
900                     VM_LEVEL_0_ORDER);
901                 vm_domain_free_unlock(vmd);
902                 if (m == NULL) {
903                         vm_domain_freecnt_inc(vmd, 1);
904                         return (NULL);
905                 }
906         } else
907                 return (NULL);
908         rv = vm_reserv_from_page(m);
909         vm_reserv_lock(rv);
910         KASSERT(rv->pages == m,
911             ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
912         vm_reserv_insert(rv, object, first);
913         index = VM_RESERV_INDEX(object, pindex);
914         vm_reserv_populate(rv, index);
915         vm_reserv_unlock(rv);
916
917         return (&rv->pages[index]);
918 }
919
920 /*
921  * Breaks the given reservation.  All free pages in the reservation
922  * are returned to the physical memory allocator.  The reservation's
923  * population count and map are reset to their initial state.
924  *
925  * The given reservation must not be in the partially populated reservation
926  * queue.  The free page queue lock must be held.
927  */
928 static void
929 vm_reserv_break(vm_reserv_t rv)
930 {
931         u_long changes;
932         int bitpos, hi, i, lo;
933
934         vm_reserv_assert_locked(rv);
935         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
936             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
937         vm_reserv_remove(rv);
938         rv->pages->psind = 0;
939         hi = lo = -1;
940         for (i = 0; i <= NPOPMAP; i++) {
941                 /*
942                  * "changes" is a bitmask that marks where a new sequence of
943                  * 0s or 1s begins in popmap[i], with last bit in popmap[i-1]
944                  * considered to be 1 if and only if lo == hi.  The bits of
945                  * popmap[-1] and popmap[NPOPMAP] are considered all 1s.
946                  */
947                 if (i == NPOPMAP)
948                         changes = lo != hi;
949                 else {
950                         changes = rv->popmap[i];
951                         changes ^= (changes << 1) | (lo == hi);
952                         rv->popmap[i] = 0;
953                 }
954                 while (changes != 0) {
955                         /*
956                          * If the next change marked begins a run of 0s, set
957                          * lo to mark that position.  Otherwise set hi and
958                          * free pages from lo up to hi.
959                          */
960                         bitpos = ffsl(changes) - 1;
961                         changes ^= 1UL << bitpos;
962                         if (lo == hi)
963                                 lo = NBPOPMAP * i + bitpos;
964                         else {
965                                 hi = NBPOPMAP * i + bitpos;
966                                 vm_domain_free_lock(VM_DOMAIN(rv->domain));
967                                 vm_phys_enqueue_contig(&rv->pages[lo], hi - lo);
968                                 vm_domain_free_unlock(VM_DOMAIN(rv->domain));
969                                 lo = hi;
970                         }
971                 }
972         }
973         rv->popcnt = 0;
974         counter_u64_add(vm_reserv_broken, 1);
975 }
976
977 /*
978  * Breaks all reservations belonging to the given object.
979  */
980 void
981 vm_reserv_break_all(vm_object_t object)
982 {
983         vm_reserv_t rv;
984
985         /*
986          * This access of object->rvq is unsynchronized so that the
987          * object rvq lock can nest after the domain_free lock.  We
988          * must check for races in the results.  However, the object
989          * lock prevents new additions, so we are guaranteed that when
990          * it returns NULL the object is properly empty.
991          */
992         while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
993                 vm_reserv_lock(rv);
994                 /* Reclaim race. */
995                 if (rv->object != object) {
996                         vm_reserv_unlock(rv);
997                         continue;
998                 }
999                 vm_reserv_domain_lock(rv->domain);
1000                 if (rv->inpartpopq) {
1001                         TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
1002                         rv->inpartpopq = FALSE;
1003                 }
1004                 vm_reserv_domain_unlock(rv->domain);
1005                 vm_reserv_break(rv);
1006                 vm_reserv_unlock(rv);
1007         }
1008 }
1009
1010 /*
1011  * Frees the given page if it belongs to a reservation.  Returns TRUE if the
1012  * page is freed and FALSE otherwise.
1013  *
1014  * The free page queue lock must be held.
1015  */
1016 boolean_t
1017 vm_reserv_free_page(vm_page_t m)
1018 {
1019         vm_reserv_t rv;
1020         boolean_t ret;
1021
1022         rv = vm_reserv_from_page(m);
1023         if (rv->object == NULL)
1024                 return (FALSE);
1025         vm_reserv_lock(rv);
1026         /* Re-validate after lock. */
1027         if (rv->object != NULL) {
1028                 vm_reserv_depopulate(rv, m - rv->pages);
1029                 ret = TRUE;
1030         } else
1031                 ret = FALSE;
1032         vm_reserv_unlock(rv);
1033
1034         return (ret);
1035 }
1036
1037 /*
1038  * Initializes the reservation management system.  Specifically, initializes
1039  * the reservation array.
1040  *
1041  * Requires that vm_page_array and first_page are initialized!
1042  */
1043 void
1044 vm_reserv_init(void)
1045 {
1046         vm_paddr_t paddr;
1047         struct vm_phys_seg *seg;
1048         struct vm_reserv *rv;
1049         int i, segind;
1050
1051         /*
1052          * Initialize the reservation array.  Specifically, initialize the
1053          * "pages" field for every element that has an underlying superpage.
1054          */
1055         for (segind = 0; segind < vm_phys_nsegs; segind++) {
1056                 seg = &vm_phys_segs[segind];
1057                 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
1058                 while (paddr + VM_LEVEL_0_SIZE <= seg->end) {
1059                         rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
1060                         rv->pages = PHYS_TO_VM_PAGE(paddr);
1061                         rv->domain = seg->domain;
1062                         mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF);
1063                         paddr += VM_LEVEL_0_SIZE;
1064                 }
1065         }
1066         for (i = 0; i < MAXMEMDOM; i++) {
1067                 mtx_init(&vm_reserv_domain_locks[i], "VM reserv domain", NULL,
1068                     MTX_DEF);
1069                 TAILQ_INIT(&vm_rvq_partpop[i]);
1070         }
1071
1072         for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++)
1073                 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL,
1074                     MTX_DEF);
1075 }
1076
1077 /*
1078  * Returns true if the given page belongs to a reservation and that page is
1079  * free.  Otherwise, returns false.
1080  */
1081 bool
1082 vm_reserv_is_page_free(vm_page_t m)
1083 {
1084         vm_reserv_t rv;
1085
1086         rv = vm_reserv_from_page(m);
1087         if (rv->object == NULL)
1088                 return (false);
1089         return (popmap_is_clear(rv->popmap, m - rv->pages));
1090 }
1091
1092 /*
1093  * If the given page belongs to a reservation, returns the level of that
1094  * reservation.  Otherwise, returns -1.
1095  */
1096 int
1097 vm_reserv_level(vm_page_t m)
1098 {
1099         vm_reserv_t rv;
1100
1101         rv = vm_reserv_from_page(m);
1102         return (rv->object != NULL ? 0 : -1);
1103 }
1104
1105 /*
1106  * Returns a reservation level if the given page belongs to a fully populated
1107  * reservation and -1 otherwise.
1108  */
1109 int
1110 vm_reserv_level_iffullpop(vm_page_t m)
1111 {
1112         vm_reserv_t rv;
1113
1114         rv = vm_reserv_from_page(m);
1115         return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
1116 }
1117
1118 /*
1119  * Breaks the given partially populated reservation, releasing its free pages
1120  * to the physical memory allocator.
1121  *
1122  * The free page queue lock must be held.
1123  */
1124 static void
1125 vm_reserv_reclaim(vm_reserv_t rv)
1126 {
1127
1128         vm_reserv_assert_locked(rv);
1129         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1130             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1131         vm_reserv_domain_lock(rv->domain);
1132         KASSERT(rv->inpartpopq,
1133             ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
1134         KASSERT(rv->domain < vm_ndomains,
1135             ("vm_reserv_reclaim: reserv %p's domain is corrupted %d",
1136             rv, rv->domain));
1137         TAILQ_REMOVE(&vm_rvq_partpop[rv->domain], rv, partpopq);
1138         rv->inpartpopq = FALSE;
1139         vm_reserv_domain_unlock(rv->domain);
1140         vm_reserv_break(rv);
1141         counter_u64_add(vm_reserv_reclaimed, 1);
1142 }
1143
1144 /*
1145  * Breaks the reservation at the head of the partially populated reservation
1146  * queue, releasing its free pages to the physical memory allocator.  Returns
1147  * TRUE if a reservation is broken and FALSE otherwise.
1148  *
1149  * The free page queue lock must be held.
1150  */
1151 boolean_t
1152 vm_reserv_reclaim_inactive(int domain)
1153 {
1154         vm_reserv_t rv;
1155
1156         while ((rv = TAILQ_FIRST(&vm_rvq_partpop[domain])) != NULL) {
1157                 vm_reserv_lock(rv);
1158                 if (rv != TAILQ_FIRST(&vm_rvq_partpop[domain])) {
1159                         vm_reserv_unlock(rv);
1160                         continue;
1161                 }
1162                 vm_reserv_reclaim(rv);
1163                 vm_reserv_unlock(rv);
1164                 return (TRUE);
1165         }
1166         return (FALSE);
1167 }
1168
1169 /*
1170  * Determine whether this reservation has free pages that satisfy the given
1171  * request for contiguous physical memory.  Start searching from the lower
1172  * bound, defined by low_index.
1173  *
1174  * The free page queue lock must be held.
1175  */
1176 static bool
1177 vm_reserv_test_contig(vm_reserv_t rv, u_long npages, vm_paddr_t low,
1178     vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1179 {
1180         vm_paddr_t pa, size;
1181         u_long changes;
1182         int bitpos, bits_left, i, hi, lo, n;
1183
1184         vm_reserv_assert_locked(rv);
1185         size = npages << PAGE_SHIFT;
1186         pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1187         lo = (pa < low) ?
1188             ((low + PAGE_MASK - pa) >> PAGE_SHIFT) : 0;
1189         i = lo / NBPOPMAP;
1190         changes = rv->popmap[i] | ((1UL << (lo % NBPOPMAP)) - 1);
1191         hi = (pa + VM_LEVEL_0_SIZE > high) ?
1192             ((high + PAGE_MASK - pa) >> PAGE_SHIFT) : VM_LEVEL_0_NPAGES;
1193         n = hi / NBPOPMAP;
1194         bits_left = hi % NBPOPMAP;
1195         hi = lo = -1;
1196         for (;;) {
1197                 /*
1198                  * "changes" is a bitmask that marks where a new sequence of
1199                  * 0s or 1s begins in popmap[i], with last bit in popmap[i-1]
1200                  * considered to be 1 if and only if lo == hi.  The bits of
1201                  * popmap[-1] and popmap[NPOPMAP] are considered all 1s.
1202                  */
1203                 changes ^= (changes << 1) | (lo == hi);
1204                 while (changes != 0) {
1205                         /*
1206                          * If the next change marked begins a run of 0s, set
1207                          * lo to mark that position.  Otherwise set hi and
1208                          * look for a satisfactory first page from lo up to hi.
1209                          */
1210                         bitpos = ffsl(changes) - 1;
1211                         changes ^= 1UL << bitpos;
1212                         if (lo == hi) {
1213                                 lo = NBPOPMAP * i + bitpos;
1214                                 continue;
1215                         }
1216                         hi = NBPOPMAP * i + bitpos;
1217                         pa = VM_PAGE_TO_PHYS(&rv->pages[lo]);
1218                         if ((pa & (alignment - 1)) != 0) {
1219                                 /* Skip to next aligned page. */
1220                                 lo += (((pa - 1) | (alignment - 1)) + 1) >>
1221                                     PAGE_SHIFT;
1222                                 if (lo >= VM_LEVEL_0_NPAGES)
1223                                         return (false);
1224                                 pa = VM_PAGE_TO_PHYS(&rv->pages[lo]);
1225                         }
1226                         if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) {
1227                                 /* Skip to next boundary-matching page. */
1228                                 lo += (((pa - 1) | (boundary - 1)) + 1) >>
1229                                     PAGE_SHIFT;
1230                                 if (lo >= VM_LEVEL_0_NPAGES)
1231                                         return (false);
1232                                 pa = VM_PAGE_TO_PHYS(&rv->pages[lo]);
1233                         }
1234                         if (lo * PAGE_SIZE + size <= hi * PAGE_SIZE)
1235                                 return (true);
1236                         lo = hi;
1237                 }
1238                 if (++i < n)
1239                         changes = rv->popmap[i];
1240                 else if (i == n)
1241                         changes = bits_left == 0 ? -1UL :
1242                             (rv->popmap[n] | (-1UL << bits_left));
1243                 else
1244                         return (false);
1245         }
1246 }
1247
1248 /*
1249  * Searches the partially populated reservation queue for the least recently
1250  * changed reservation with free pages that satisfy the given request for
1251  * contiguous physical memory.  If a satisfactory reservation is found, it is
1252  * broken.  Returns true if a reservation is broken and false otherwise.
1253  *
1254  * The free page queue lock must be held.
1255  */
1256 boolean_t
1257 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
1258     vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1259 {
1260         vm_paddr_t pa, size;
1261         vm_reserv_t rv, rvn;
1262
1263         if (npages > VM_LEVEL_0_NPAGES - 1)
1264                 return (false);
1265         size = npages << PAGE_SHIFT;
1266         vm_reserv_domain_lock(domain);
1267 again:
1268         for (rv = TAILQ_FIRST(&vm_rvq_partpop[domain]); rv != NULL; rv = rvn) {
1269                 rvn = TAILQ_NEXT(rv, partpopq);
1270                 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1271                 if (pa + VM_LEVEL_0_SIZE - size < low) {
1272                         /* This entire reservation is too low; go to next. */
1273                         continue;
1274                 }
1275                 if (pa + size > high) {
1276                         /* This entire reservation is too high; go to next. */
1277                         continue;
1278                 }
1279                 if (vm_reserv_trylock(rv) == 0) {
1280                         vm_reserv_domain_unlock(domain);
1281                         vm_reserv_lock(rv);
1282                         if (!rv->inpartpopq) {
1283                                 vm_reserv_domain_lock(domain);
1284                                 if (!rvn->inpartpopq)
1285                                         goto again;
1286                                 continue;
1287                         }
1288                 } else
1289                         vm_reserv_domain_unlock(domain);
1290                 if (vm_reserv_test_contig(rv, npages, low, high,
1291                     alignment, boundary)) {
1292                         vm_reserv_reclaim(rv);
1293                         vm_reserv_unlock(rv);
1294                         return (true);
1295                 }
1296                 vm_reserv_unlock(rv);
1297                 vm_reserv_domain_lock(domain);
1298                 if (rvn != NULL && !rvn->inpartpopq)
1299                         goto again;
1300         }
1301         vm_reserv_domain_unlock(domain);
1302         return (false);
1303 }
1304
1305 /*
1306  * Transfers the reservation underlying the given page to a new object.
1307  *
1308  * The object must be locked.
1309  */
1310 void
1311 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1312     vm_pindex_t old_object_offset)
1313 {
1314         vm_reserv_t rv;
1315
1316         VM_OBJECT_ASSERT_WLOCKED(new_object);
1317         rv = vm_reserv_from_page(m);
1318         if (rv->object == old_object) {
1319                 vm_reserv_lock(rv);
1320                 CTR6(KTR_VM,
1321                     "%s: rv %p object %p new %p popcnt %d inpartpop %d",
1322                     __FUNCTION__, rv, rv->object, new_object, rv->popcnt,
1323                     rv->inpartpopq);
1324                 if (rv->object == old_object) {
1325                         vm_reserv_object_lock(old_object);
1326                         rv->object = NULL;
1327                         LIST_REMOVE(rv, objq);
1328                         vm_reserv_object_unlock(old_object);
1329                         vm_reserv_object_lock(new_object);
1330                         rv->object = new_object;
1331                         rv->pindex -= old_object_offset;
1332                         LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1333                         vm_reserv_object_unlock(new_object);
1334                 }
1335                 vm_reserv_unlock(rv);
1336         }
1337 }
1338
1339 /*
1340  * Returns the size (in bytes) of a reservation of the specified level.
1341  */
1342 int
1343 vm_reserv_size(int level)
1344 {
1345
1346         switch (level) {
1347         case 0:
1348                 return (VM_LEVEL_0_SIZE);
1349         case -1:
1350                 return (PAGE_SIZE);
1351         default:
1352                 return (0);
1353         }
1354 }
1355
1356 /*
1357  * Allocates the virtual and physical memory required by the reservation
1358  * management system's data structures, in particular, the reservation array.
1359  */
1360 vm_paddr_t
1361 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
1362 {
1363         vm_paddr_t new_end;
1364         size_t size;
1365
1366         /*
1367          * Calculate the size (in bytes) of the reservation array.  Round up
1368          * from "high_water" because every small page is mapped to an element
1369          * in the reservation array based on its physical address.  Thus, the
1370          * number of elements in the reservation array can be greater than the
1371          * number of superpages. 
1372          */
1373         size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
1374
1375         /*
1376          * Allocate and map the physical memory for the reservation array.  The
1377          * next available virtual address is returned by reference.
1378          */
1379         new_end = end - round_page(size);
1380         vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1381             VM_PROT_READ | VM_PROT_WRITE);
1382         bzero(vm_reserv_array, size);
1383
1384         /*
1385          * Return the next available physical address.
1386          */
1387         return (new_end);
1388 }
1389
1390 /*
1391  * Initializes the reservation management system.  Specifically, initializes
1392  * the reservation counters.
1393  */
1394 static void
1395 vm_reserv_counter_init(void *unused)
1396 {
1397
1398         vm_reserv_freed = counter_u64_alloc(M_WAITOK); 
1399         vm_reserv_broken = counter_u64_alloc(M_WAITOK); 
1400         vm_reserv_reclaimed = counter_u64_alloc(M_WAITOK); 
1401 }
1402 SYSINIT(vm_reserv_counter_init, SI_SUB_CPU, SI_ORDER_ANY,
1403     vm_reserv_counter_init, NULL);
1404
1405 /*
1406  * Returns the superpage containing the given page.
1407  */
1408 vm_page_t
1409 vm_reserv_to_superpage(vm_page_t m)
1410 {
1411         vm_reserv_t rv;
1412
1413         VM_OBJECT_ASSERT_LOCKED(m->object);
1414         rv = vm_reserv_from_page(m);
1415         if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES)
1416                 m = rv->pages;
1417         else
1418                 m = NULL;
1419
1420         return (m);
1421 }
1422
1423 #endif  /* VM_NRESERVLEVEL > 0 */