]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_reserv.c
Merge ^/vendor/lvm-project/master up to its last change (upstream commit
[FreeBSD/FreeBSD.git] / sys / vm / vm_reserv.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2006 Rice University
5  * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
6  * All rights reserved.
7  *
8  * This software was developed for the FreeBSD Project by Alan L. Cox,
9  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
30  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 /*
35  *      Superpage reservation management module
36  *
37  * Any external functions defined by this module are only to be used by the
38  * virtual memory system.
39  */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43
44 #include "opt_vm.h"
45
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #include <sys/queue.h>
52 #include <sys/rwlock.h>
53 #include <sys/sbuf.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56 #include <sys/counter.h>
57 #include <sys/ktr.h>
58 #include <sys/vmmeter.h>
59 #include <sys/smp.h>
60
61 #include <vm/vm.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_page.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_phys.h>
67 #include <vm/vm_pagequeue.h>
68 #include <vm/vm_radix.h>
69 #include <vm/vm_reserv.h>
70
71 /*
72  * The reservation system supports the speculative allocation of large physical
73  * pages ("superpages").  Speculative allocation enables the fully automatic
74  * utilization of superpages by the virtual memory system.  In other words, no
75  * programmatic directives are required to use superpages.
76  */
77
78 #if VM_NRESERVLEVEL > 0
79
80 #ifndef VM_LEVEL_0_ORDER_MAX
81 #define VM_LEVEL_0_ORDER_MAX    VM_LEVEL_0_ORDER
82 #endif
83
84 /*
85  * The number of small pages that are contained in a level 0 reservation
86  */
87 #define VM_LEVEL_0_NPAGES       (1 << VM_LEVEL_0_ORDER)
88 #define VM_LEVEL_0_NPAGES_MAX   (1 << VM_LEVEL_0_ORDER_MAX)
89
90 /*
91  * The number of bits by which a physical address is shifted to obtain the
92  * reservation number
93  */
94 #define VM_LEVEL_0_SHIFT        (VM_LEVEL_0_ORDER + PAGE_SHIFT)
95
96 /*
97  * The size of a level 0 reservation in bytes
98  */
99 #define VM_LEVEL_0_SIZE         (1 << VM_LEVEL_0_SHIFT)
100
101 /*
102  * Computes the index of the small page underlying the given (object, pindex)
103  * within the reservation's array of small pages.
104  */
105 #define VM_RESERV_INDEX(object, pindex) \
106     (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
107
108 /*
109  * The size of a population map entry
110  */
111 typedef u_long          popmap_t;
112
113 /*
114  * The number of bits in a population map entry
115  */
116 #define NBPOPMAP        (NBBY * sizeof(popmap_t))
117
118 /*
119  * The number of population map entries in a reservation
120  */
121 #define NPOPMAP         howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
122 #define NPOPMAP_MAX     howmany(VM_LEVEL_0_NPAGES_MAX, NBPOPMAP)
123
124 /*
125  * Number of elapsed ticks before we update the LRU queue position.  Used
126  * to reduce contention and churn on the list.
127  */
128 #define PARTPOPSLOP     1
129
130 /*
131  * Clear a bit in the population map.
132  */
133 static __inline void
134 popmap_clear(popmap_t popmap[], int i)
135 {
136
137         popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP));
138 }
139
140 /*
141  * Set a bit in the population map.
142  */
143 static __inline void
144 popmap_set(popmap_t popmap[], int i)
145 {
146
147         popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP);
148 }
149
150 /*
151  * Is a bit in the population map clear?
152  */
153 static __inline boolean_t
154 popmap_is_clear(popmap_t popmap[], int i)
155 {
156
157         return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0);
158 }
159
160 /*
161  * Is a bit in the population map set?
162  */
163 static __inline boolean_t
164 popmap_is_set(popmap_t popmap[], int i)
165 {
166
167         return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0);
168 }
169
170 /*
171  * The reservation structure
172  *
173  * A reservation structure is constructed whenever a large physical page is
174  * speculatively allocated to an object.  The reservation provides the small
175  * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
176  * within that object.  The reservation's "popcnt" tracks the number of these
177  * small physical pages that are in use at any given time.  When and if the
178  * reservation is not fully utilized, it appears in the queue of partially
179  * populated reservations.  The reservation always appears on the containing
180  * object's list of reservations.
181  *
182  * A partially populated reservation can be broken and reclaimed at any time.
183  *
184  * c - constant after boot
185  * d - vm_reserv_domain_lock
186  * o - vm_reserv_object_lock
187  * r - vm_reserv_lock
188  * s - vm_reserv_domain_scan_lock
189  */
190 struct vm_reserv {
191         struct mtx      lock;                   /* reservation lock. */
192         TAILQ_ENTRY(vm_reserv) partpopq;        /* (d, r) per-domain queue. */
193         LIST_ENTRY(vm_reserv) objq;             /* (o, r) object queue */
194         vm_object_t     object;                 /* (o, r) containing object */
195         vm_pindex_t     pindex;                 /* (o, r) offset in object */
196         vm_page_t       pages;                  /* (c) first page  */
197         uint16_t        popcnt;                 /* (r) # of pages in use */
198         uint8_t         domain;                 /* (c) NUMA domain. */
199         char            inpartpopq;             /* (d, r) */
200         int             lasttick;               /* (r) last pop update tick. */
201         popmap_t        popmap[NPOPMAP_MAX];    /* (r) bit vector, used pages */
202 };
203
204 TAILQ_HEAD(vm_reserv_queue, vm_reserv);
205
206 #define vm_reserv_lockptr(rv)           (&(rv)->lock)
207 #define vm_reserv_assert_locked(rv)                                     \
208             mtx_assert(vm_reserv_lockptr(rv), MA_OWNED)
209 #define vm_reserv_lock(rv)              mtx_lock(vm_reserv_lockptr(rv))
210 #define vm_reserv_trylock(rv)           mtx_trylock(vm_reserv_lockptr(rv))
211 #define vm_reserv_unlock(rv)            mtx_unlock(vm_reserv_lockptr(rv))
212
213 /*
214  * The reservation array
215  *
216  * This array is analoguous in function to vm_page_array.  It differs in the
217  * respect that it may contain a greater number of useful reservation
218  * structures than there are (physical) superpages.  These "invalid"
219  * reservation structures exist to trade-off space for time in the
220  * implementation of vm_reserv_from_page().  Invalid reservation structures are
221  * distinguishable from "valid" reservation structures by inspecting the
222  * reservation's "pages" field.  Invalid reservation structures have a NULL
223  * "pages" field.
224  *
225  * vm_reserv_from_page() maps a small (physical) page to an element of this
226  * array by computing a physical reservation number from the page's physical
227  * address.  The physical reservation number is used as the array index.
228  *
229  * An "active" reservation is a valid reservation structure that has a non-NULL
230  * "object" field and a non-zero "popcnt" field.  In other words, every active
231  * reservation belongs to a particular object.  Moreover, every active
232  * reservation has an entry in the containing object's list of reservations.  
233  */
234 static vm_reserv_t vm_reserv_array;
235
236 /*
237  * The per-domain partially populated reservation queues
238  *
239  * These queues enable the fast recovery of an unused free small page from a
240  * partially populated reservation.  The reservation at the head of a queue
241  * is the least recently changed, partially populated reservation.
242  *
243  * Access to this queue is synchronized by the per-domain reservation lock.
244  * Threads reclaiming free pages from the queue must hold the per-domain scan
245  * lock.
246  */
247 struct vm_reserv_domain {
248         struct mtx              lock;
249         struct vm_reserv_queue  partpop;        /* (d) */
250         struct vm_reserv        marker;         /* (d, s) scan marker/lock */
251 } __aligned(CACHE_LINE_SIZE);
252
253 static struct vm_reserv_domain vm_rvd[MAXMEMDOM];
254
255 #define vm_reserv_domain_lockptr(d)     (&vm_rvd[(d)].lock)
256 #define vm_reserv_domain_assert_locked(d)       \
257         mtx_assert(vm_reserv_domain_lockptr(d), MA_OWNED)
258 #define vm_reserv_domain_lock(d)        mtx_lock(vm_reserv_domain_lockptr(d))
259 #define vm_reserv_domain_unlock(d)      mtx_unlock(vm_reserv_domain_lockptr(d))
260
261 #define vm_reserv_domain_scan_lock(d)   mtx_lock(&vm_rvd[(d)].marker.lock)
262 #define vm_reserv_domain_scan_unlock(d) mtx_unlock(&vm_rvd[(d)].marker.lock)
263
264 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
265
266 static counter_u64_t vm_reserv_broken = EARLY_COUNTER;
267 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
268     &vm_reserv_broken, "Cumulative number of broken reservations");
269
270 static counter_u64_t vm_reserv_freed = EARLY_COUNTER;
271 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
272     &vm_reserv_freed, "Cumulative number of freed reservations");
273
274 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
275
276 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RD,
277     NULL, 0, sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
278
279 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
280
281 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
282     sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues");
283
284 static counter_u64_t vm_reserv_reclaimed = EARLY_COUNTER;
285 SYSCTL_COUNTER_U64(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
286     &vm_reserv_reclaimed, "Cumulative number of reclaimed reservations");
287
288 /*
289  * The object lock pool is used to synchronize the rvq.  We can not use a
290  * pool mutex because it is required before malloc works.
291  *
292  * The "hash" function could be made faster without divide and modulo.
293  */
294 #define VM_RESERV_OBJ_LOCK_COUNT        MAXCPU
295
296 struct mtx_padalign vm_reserv_object_mtx[VM_RESERV_OBJ_LOCK_COUNT];
297
298 #define vm_reserv_object_lock_idx(object)                       \
299             (((uintptr_t)object / sizeof(*object)) % VM_RESERV_OBJ_LOCK_COUNT)
300 #define vm_reserv_object_lock_ptr(object)                       \
301             &vm_reserv_object_mtx[vm_reserv_object_lock_idx((object))]
302 #define vm_reserv_object_lock(object)                           \
303             mtx_lock(vm_reserv_object_lock_ptr((object)))
304 #define vm_reserv_object_unlock(object)                         \
305             mtx_unlock(vm_reserv_object_lock_ptr((object)))
306
307 static void             vm_reserv_break(vm_reserv_t rv);
308 static void             vm_reserv_depopulate(vm_reserv_t rv, int index);
309 static vm_reserv_t      vm_reserv_from_page(vm_page_t m);
310 static boolean_t        vm_reserv_has_pindex(vm_reserv_t rv,
311                             vm_pindex_t pindex);
312 static void             vm_reserv_populate(vm_reserv_t rv, int index);
313 static void             vm_reserv_reclaim(vm_reserv_t rv);
314
315 /*
316  * Returns the current number of full reservations.
317  *
318  * Since the number of full reservations is computed without acquiring any
319  * locks, the returned value is inexact.
320  */
321 static int
322 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
323 {
324         vm_paddr_t paddr;
325         struct vm_phys_seg *seg;
326         vm_reserv_t rv;
327         int fullpop, segind;
328
329         fullpop = 0;
330         for (segind = 0; segind < vm_phys_nsegs; segind++) {
331                 seg = &vm_phys_segs[segind];
332                 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
333                 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
334                     VM_LEVEL_0_SIZE <= seg->end) {
335                         rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
336                         fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
337                         paddr += VM_LEVEL_0_SIZE;
338                 }
339         }
340         return (sysctl_handle_int(oidp, &fullpop, 0, req));
341 }
342
343 /*
344  * Describes the current state of the partially populated reservation queue.
345  */
346 static int
347 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
348 {
349         struct sbuf sbuf;
350         vm_reserv_t rv;
351         int counter, error, domain, level, unused_pages;
352
353         error = sysctl_wire_old_buffer(req, 0);
354         if (error != 0)
355                 return (error);
356         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
357         sbuf_printf(&sbuf, "\nDOMAIN    LEVEL     SIZE  NUMBER\n\n");
358         for (domain = 0; domain < vm_ndomains; domain++) {
359                 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
360                         counter = 0;
361                         unused_pages = 0;
362                         vm_reserv_domain_lock(domain);
363                         TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
364                                 if (rv == &vm_rvd[domain].marker)
365                                         continue;
366                                 counter++;
367                                 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
368                         }
369                         vm_reserv_domain_unlock(domain);
370                         sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
371                             domain, level,
372                             unused_pages * ((int)PAGE_SIZE / 1024), counter);
373                 }
374         }
375         error = sbuf_finish(&sbuf);
376         sbuf_delete(&sbuf);
377         return (error);
378 }
379
380 /*
381  * Remove a reservation from the object's objq.
382  */
383 static void
384 vm_reserv_remove(vm_reserv_t rv)
385 {
386         vm_object_t object;
387
388         vm_reserv_assert_locked(rv);
389         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
390             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
391         KASSERT(rv->object != NULL,
392             ("vm_reserv_remove: reserv %p is free", rv));
393         KASSERT(!rv->inpartpopq,
394             ("vm_reserv_remove: reserv %p's inpartpopq is TRUE", rv));
395         object = rv->object;
396         vm_reserv_object_lock(object);
397         LIST_REMOVE(rv, objq);
398         rv->object = NULL;
399         vm_reserv_object_unlock(object);
400 }
401
402 /*
403  * Insert a new reservation into the object's objq.
404  */
405 static void
406 vm_reserv_insert(vm_reserv_t rv, vm_object_t object, vm_pindex_t pindex)
407 {
408         int i;
409
410         vm_reserv_assert_locked(rv);
411         CTR6(KTR_VM,
412             "%s: rv %p(%p) object %p new %p popcnt %d",
413             __FUNCTION__, rv, rv->pages, rv->object, object,
414            rv->popcnt);
415         KASSERT(rv->object == NULL,
416             ("vm_reserv_insert: reserv %p isn't free", rv));
417         KASSERT(rv->popcnt == 0,
418             ("vm_reserv_insert: reserv %p's popcnt is corrupted", rv));
419         KASSERT(!rv->inpartpopq,
420             ("vm_reserv_insert: reserv %p's inpartpopq is TRUE", rv));
421         for (i = 0; i < NPOPMAP; i++)
422                 KASSERT(rv->popmap[i] == 0,
423                     ("vm_reserv_insert: reserv %p's popmap is corrupted", rv));
424         vm_reserv_object_lock(object);
425         rv->pindex = pindex;
426         rv->object = object;
427         rv->lasttick = ticks;
428         LIST_INSERT_HEAD(&object->rvq, rv, objq);
429         vm_reserv_object_unlock(object);
430 }
431
432 /*
433  * Reduces the given reservation's population count.  If the population count
434  * becomes zero, the reservation is destroyed.  Additionally, moves the
435  * reservation to the tail of the partially populated reservation queue if the
436  * population count is non-zero.
437  */
438 static void
439 vm_reserv_depopulate(vm_reserv_t rv, int index)
440 {
441         struct vm_domain *vmd;
442
443         vm_reserv_assert_locked(rv);
444         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
445             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
446         KASSERT(rv->object != NULL,
447             ("vm_reserv_depopulate: reserv %p is free", rv));
448         KASSERT(popmap_is_set(rv->popmap, index),
449             ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
450             index));
451         KASSERT(rv->popcnt > 0,
452             ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
453         KASSERT(rv->domain < vm_ndomains,
454             ("vm_reserv_depopulate: reserv %p's domain is corrupted %d",
455             rv, rv->domain));
456         if (rv->popcnt == VM_LEVEL_0_NPAGES) {
457                 KASSERT(rv->pages->psind == 1,
458                     ("vm_reserv_depopulate: reserv %p is already demoted",
459                     rv));
460                 rv->pages->psind = 0;
461         }
462         popmap_clear(rv->popmap, index);
463         rv->popcnt--;
464         if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP ||
465             rv->popcnt == 0) {
466                 vm_reserv_domain_lock(rv->domain);
467                 if (rv->inpartpopq) {
468                         TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
469                         rv->inpartpopq = FALSE;
470                 }
471                 if (rv->popcnt != 0) {
472                         rv->inpartpopq = TRUE;
473                         TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv,
474                             partpopq);
475                 }
476                 vm_reserv_domain_unlock(rv->domain);
477                 rv->lasttick = ticks;
478         }
479         vmd = VM_DOMAIN(rv->domain);
480         if (rv->popcnt == 0) {
481                 vm_reserv_remove(rv);
482                 vm_domain_free_lock(vmd);
483                 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
484                 vm_domain_free_unlock(vmd);
485                 counter_u64_add(vm_reserv_freed, 1);
486         }
487         vm_domain_freecnt_inc(vmd, 1);
488 }
489
490 /*
491  * Returns the reservation to which the given page might belong.
492  */
493 static __inline vm_reserv_t
494 vm_reserv_from_page(vm_page_t m)
495 {
496
497         return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
498 }
499
500 /*
501  * Returns an existing reservation or NULL and initialized successor pointer.
502  */
503 static vm_reserv_t
504 vm_reserv_from_object(vm_object_t object, vm_pindex_t pindex,
505     vm_page_t mpred, vm_page_t *msuccp)
506 {
507         vm_reserv_t rv;
508         vm_page_t msucc;
509
510         msucc = NULL;
511         if (mpred != NULL) {
512                 KASSERT(mpred->object == object,
513                     ("vm_reserv_from_object: object doesn't contain mpred"));
514                 KASSERT(mpred->pindex < pindex,
515                     ("vm_reserv_from_object: mpred doesn't precede pindex"));
516                 rv = vm_reserv_from_page(mpred);
517                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
518                         goto found;
519                 msucc = TAILQ_NEXT(mpred, listq);
520         } else
521                 msucc = TAILQ_FIRST(&object->memq);
522         if (msucc != NULL) {
523                 KASSERT(msucc->pindex > pindex,
524                     ("vm_reserv_from_object: msucc doesn't succeed pindex"));
525                 rv = vm_reserv_from_page(msucc);
526                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
527                         goto found;
528         }
529         rv = NULL;
530
531 found:
532         *msuccp = msucc;
533
534         return (rv);
535 }
536
537 /*
538  * Returns TRUE if the given reservation contains the given page index and
539  * FALSE otherwise.
540  */
541 static __inline boolean_t
542 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
543 {
544
545         return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
546 }
547
548 /*
549  * Increases the given reservation's population count.  Moves the reservation
550  * to the tail of the partially populated reservation queue.
551  */
552 static void
553 vm_reserv_populate(vm_reserv_t rv, int index)
554 {
555
556         vm_reserv_assert_locked(rv);
557         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
558             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
559         KASSERT(rv->object != NULL,
560             ("vm_reserv_populate: reserv %p is free", rv));
561         KASSERT(popmap_is_clear(rv->popmap, index),
562             ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
563             index));
564         KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
565             ("vm_reserv_populate: reserv %p is already full", rv));
566         KASSERT(rv->pages->psind == 0,
567             ("vm_reserv_populate: reserv %p is already promoted", rv));
568         KASSERT(rv->domain < vm_ndomains,
569             ("vm_reserv_populate: reserv %p's domain is corrupted %d",
570             rv, rv->domain));
571         popmap_set(rv->popmap, index);
572         rv->popcnt++;
573         if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP &&
574             rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES)
575                 return;
576         rv->lasttick = ticks;
577         vm_reserv_domain_lock(rv->domain);
578         if (rv->inpartpopq) {
579                 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
580                 rv->inpartpopq = FALSE;
581         }
582         if (rv->popcnt < VM_LEVEL_0_NPAGES) {
583                 rv->inpartpopq = TRUE;
584                 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq);
585         } else {
586                 KASSERT(rv->pages->psind == 0,
587                     ("vm_reserv_populate: reserv %p is already promoted",
588                     rv));
589                 rv->pages->psind = 1;
590         }
591         vm_reserv_domain_unlock(rv->domain);
592 }
593
594 /*
595  * Allocates a contiguous set of physical pages of the given size "npages"
596  * from existing or newly created reservations.  All of the physical pages
597  * must be at or above the given physical address "low" and below the given
598  * physical address "high".  The given value "alignment" determines the
599  * alignment of the first physical page in the set.  If the given value
600  * "boundary" is non-zero, then the set of physical pages cannot cross any
601  * physical address boundary that is a multiple of that value.  Both
602  * "alignment" and "boundary" must be a power of two.
603  *
604  * The page "mpred" must immediately precede the offset "pindex" within the
605  * specified object.
606  *
607  * The object must be locked.
608  */
609 vm_page_t
610 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
611     int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high,
612     u_long alignment, vm_paddr_t boundary)
613 {
614         struct vm_domain *vmd;
615         vm_paddr_t pa, size;
616         vm_page_t m, m_ret, msucc;
617         vm_pindex_t first, leftcap, rightcap;
618         vm_reserv_t rv;
619         u_long allocpages, maxpages, minpages;
620         int i, index, n;
621
622         VM_OBJECT_ASSERT_WLOCKED(object);
623         KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
624
625         /*
626          * Is a reservation fundamentally impossible?
627          */
628         if (pindex < VM_RESERV_INDEX(object, pindex) ||
629             pindex + npages > object->size)
630                 return (NULL);
631
632         /*
633          * All reservations of a particular size have the same alignment.
634          * Assuming that the first page is allocated from a reservation, the
635          * least significant bits of its physical address can be determined
636          * from its offset from the beginning of the reservation and the size
637          * of the reservation.
638          *
639          * Could the specified index within a reservation of the smallest
640          * possible size satisfy the alignment and boundary requirements?
641          */
642         pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
643         if ((pa & (alignment - 1)) != 0)
644                 return (NULL);
645         size = npages << PAGE_SHIFT;
646         if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
647                 return (NULL);
648
649         /*
650          * Look for an existing reservation.
651          */
652         rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
653         if (rv != NULL) {
654                 KASSERT(object != kernel_object || rv->domain == domain,
655                     ("vm_reserv_alloc_contig: domain mismatch"));
656                 index = VM_RESERV_INDEX(object, pindex);
657                 /* Does the allocation fit within the reservation? */
658                 if (index + npages > VM_LEVEL_0_NPAGES)
659                         return (NULL);
660                 domain = rv->domain;
661                 vmd = VM_DOMAIN(domain);
662                 vm_reserv_lock(rv);
663                 /* Handle reclaim race. */
664                 if (rv->object != object)
665                         goto out;
666                 m = &rv->pages[index];
667                 pa = VM_PAGE_TO_PHYS(m);
668                 if (pa < low || pa + size > high ||
669                     (pa & (alignment - 1)) != 0 ||
670                     ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
671                         goto out;
672                 /* Handle vm_page_rename(m, new_object, ...). */
673                 for (i = 0; i < npages; i++)
674                         if (popmap_is_set(rv->popmap, index + i))
675                                 goto out;
676                 if (!vm_domain_allocate(vmd, req, npages))
677                         goto out;
678                 for (i = 0; i < npages; i++)
679                         vm_reserv_populate(rv, index + i);
680                 vm_reserv_unlock(rv);
681                 return (m);
682 out:
683                 vm_reserv_unlock(rv);
684                 return (NULL);
685         }
686
687         /*
688          * Could at least one reservation fit between the first index to the
689          * left that can be used ("leftcap") and the first index to the right
690          * that cannot be used ("rightcap")?
691          *
692          * We must synchronize with the reserv object lock to protect the
693          * pindex/object of the resulting reservations against rename while
694          * we are inspecting.
695          */
696         first = pindex - VM_RESERV_INDEX(object, pindex);
697         minpages = VM_RESERV_INDEX(object, pindex) + npages;
698         maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
699         allocpages = maxpages;
700         vm_reserv_object_lock(object);
701         if (mpred != NULL) {
702                 if ((rv = vm_reserv_from_page(mpred))->object != object)
703                         leftcap = mpred->pindex + 1;
704                 else
705                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
706                 if (leftcap > first) {
707                         vm_reserv_object_unlock(object);
708                         return (NULL);
709                 }
710         }
711         if (msucc != NULL) {
712                 if ((rv = vm_reserv_from_page(msucc))->object != object)
713                         rightcap = msucc->pindex;
714                 else
715                         rightcap = rv->pindex;
716                 if (first + maxpages > rightcap) {
717                         if (maxpages == VM_LEVEL_0_NPAGES) {
718                                 vm_reserv_object_unlock(object);
719                                 return (NULL);
720                         }
721
722                         /*
723                          * At least one reservation will fit between "leftcap"
724                          * and "rightcap".  However, a reservation for the
725                          * last of the requested pages will not fit.  Reduce
726                          * the size of the upcoming allocation accordingly.
727                          */
728                         allocpages = minpages;
729                 }
730         }
731         vm_reserv_object_unlock(object);
732
733         /*
734          * Would the last new reservation extend past the end of the object?
735          *
736          * If the object is unlikely to grow don't allocate a reservation for
737          * the tail.
738          */
739         if ((object->flags & OBJ_ANON) == 0 &&
740             first + maxpages > object->size) {
741                 if (maxpages == VM_LEVEL_0_NPAGES)
742                         return (NULL);
743                 allocpages = minpages;
744         }
745
746         /*
747          * Allocate the physical pages.  The alignment and boundary specified
748          * for this allocation may be different from the alignment and
749          * boundary specified for the requested pages.  For instance, the
750          * specified index may not be the first page within the first new
751          * reservation.
752          */
753         m = NULL;
754         vmd = VM_DOMAIN(domain);
755         if (vm_domain_allocate(vmd, req, npages)) {
756                 vm_domain_free_lock(vmd);
757                 m = vm_phys_alloc_contig(domain, allocpages, low, high,
758                     ulmax(alignment, VM_LEVEL_0_SIZE),
759                     boundary > VM_LEVEL_0_SIZE ? boundary : 0);
760                 vm_domain_free_unlock(vmd);
761                 if (m == NULL) {
762                         vm_domain_freecnt_inc(vmd, npages);
763                         return (NULL);
764                 }
765         } else
766                 return (NULL);
767         KASSERT(vm_phys_domain(m) == domain,
768             ("vm_reserv_alloc_contig: Page domain does not match requested."));
769
770         /*
771          * The allocated physical pages always begin at a reservation
772          * boundary, but they do not always end at a reservation boundary.
773          * Initialize every reservation that is completely covered by the
774          * allocated physical pages.
775          */
776         m_ret = NULL;
777         index = VM_RESERV_INDEX(object, pindex);
778         do {
779                 rv = vm_reserv_from_page(m);
780                 KASSERT(rv->pages == m,
781                     ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
782                     rv));
783                 vm_reserv_lock(rv);
784                 vm_reserv_insert(rv, object, first);
785                 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
786                 for (i = 0; i < n; i++)
787                         vm_reserv_populate(rv, index + i);
788                 npages -= n;
789                 if (m_ret == NULL) {
790                         m_ret = &rv->pages[index];
791                         index = 0;
792                 }
793                 vm_reserv_unlock(rv);
794                 m += VM_LEVEL_0_NPAGES;
795                 first += VM_LEVEL_0_NPAGES;
796                 allocpages -= VM_LEVEL_0_NPAGES;
797         } while (allocpages >= VM_LEVEL_0_NPAGES);
798         return (m_ret);
799 }
800
801 /*
802  * Allocate a physical page from an existing or newly created reservation.
803  *
804  * The page "mpred" must immediately precede the offset "pindex" within the
805  * specified object.
806  *
807  * The object must be locked.
808  */
809 vm_page_t
810 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
811     int req, vm_page_t mpred)
812 {
813         struct vm_domain *vmd;
814         vm_page_t m, msucc;
815         vm_pindex_t first, leftcap, rightcap;
816         vm_reserv_t rv;
817         int index;
818
819         VM_OBJECT_ASSERT_WLOCKED(object);
820
821         /*
822          * Is a reservation fundamentally impossible?
823          */
824         if (pindex < VM_RESERV_INDEX(object, pindex) ||
825             pindex >= object->size)
826                 return (NULL);
827
828         /*
829          * Look for an existing reservation.
830          */
831         rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
832         if (rv != NULL) {
833                 KASSERT(object != kernel_object || rv->domain == domain,
834                     ("vm_reserv_alloc_page: domain mismatch"));
835                 domain = rv->domain;
836                 vmd = VM_DOMAIN(domain);
837                 index = VM_RESERV_INDEX(object, pindex);
838                 m = &rv->pages[index];
839                 vm_reserv_lock(rv);
840                 /* Handle reclaim race. */
841                 if (rv->object != object ||
842                     /* Handle vm_page_rename(m, new_object, ...). */
843                     popmap_is_set(rv->popmap, index)) {
844                         m = NULL;
845                         goto out;
846                 }
847                 if (vm_domain_allocate(vmd, req, 1) == 0)
848                         m = NULL;
849                 else
850                         vm_reserv_populate(rv, index);
851 out:
852                 vm_reserv_unlock(rv);
853                 return (m);
854         }
855
856         /*
857          * Could a reservation fit between the first index to the left that
858          * can be used and the first index to the right that cannot be used?
859          *
860          * We must synchronize with the reserv object lock to protect the
861          * pindex/object of the resulting reservations against rename while
862          * we are inspecting.
863          */
864         first = pindex - VM_RESERV_INDEX(object, pindex);
865         vm_reserv_object_lock(object);
866         if (mpred != NULL) {
867                 if ((rv = vm_reserv_from_page(mpred))->object != object)
868                         leftcap = mpred->pindex + 1;
869                 else
870                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
871                 if (leftcap > first) {
872                         vm_reserv_object_unlock(object);
873                         return (NULL);
874                 }
875         }
876         if (msucc != NULL) {
877                 if ((rv = vm_reserv_from_page(msucc))->object != object)
878                         rightcap = msucc->pindex;
879                 else
880                         rightcap = rv->pindex;
881                 if (first + VM_LEVEL_0_NPAGES > rightcap) {
882                         vm_reserv_object_unlock(object);
883                         return (NULL);
884                 }
885         }
886         vm_reserv_object_unlock(object);
887
888         /*
889          * Would the last new reservation extend past the end of the object?
890          *
891          * If the object is unlikely to grow don't allocate a reservation for
892          * the tail.
893          */
894         if ((object->flags & OBJ_ANON) == 0 &&
895             first + VM_LEVEL_0_NPAGES > object->size)
896                 return (NULL);
897
898         /*
899          * Allocate and populate the new reservation.
900          */
901         m = NULL;
902         vmd = VM_DOMAIN(domain);
903         if (vm_domain_allocate(vmd, req, 1)) {
904                 vm_domain_free_lock(vmd);
905                 m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
906                     VM_LEVEL_0_ORDER);
907                 vm_domain_free_unlock(vmd);
908                 if (m == NULL) {
909                         vm_domain_freecnt_inc(vmd, 1);
910                         return (NULL);
911                 }
912         } else
913                 return (NULL);
914         rv = vm_reserv_from_page(m);
915         vm_reserv_lock(rv);
916         KASSERT(rv->pages == m,
917             ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
918         vm_reserv_insert(rv, object, first);
919         index = VM_RESERV_INDEX(object, pindex);
920         vm_reserv_populate(rv, index);
921         vm_reserv_unlock(rv);
922
923         return (&rv->pages[index]);
924 }
925
926 /*
927  * Breaks the given reservation.  All free pages in the reservation
928  * are returned to the physical memory allocator.  The reservation's
929  * population count and map are reset to their initial state.
930  *
931  * The given reservation must not be in the partially populated reservation
932  * queue.
933  */
934 static void
935 vm_reserv_break(vm_reserv_t rv)
936 {
937         u_long changes;
938         int bitpos, hi, i, lo;
939
940         vm_reserv_assert_locked(rv);
941         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
942             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
943         vm_reserv_remove(rv);
944         rv->pages->psind = 0;
945         hi = lo = -1;
946         for (i = 0; i <= NPOPMAP; i++) {
947                 /*
948                  * "changes" is a bitmask that marks where a new sequence of
949                  * 0s or 1s begins in popmap[i], with last bit in popmap[i-1]
950                  * considered to be 1 if and only if lo == hi.  The bits of
951                  * popmap[-1] and popmap[NPOPMAP] are considered all 1s.
952                  */
953                 if (i == NPOPMAP)
954                         changes = lo != hi;
955                 else {
956                         changes = rv->popmap[i];
957                         changes ^= (changes << 1) | (lo == hi);
958                         rv->popmap[i] = 0;
959                 }
960                 while (changes != 0) {
961                         /*
962                          * If the next change marked begins a run of 0s, set
963                          * lo to mark that position.  Otherwise set hi and
964                          * free pages from lo up to hi.
965                          */
966                         bitpos = ffsl(changes) - 1;
967                         changes ^= 1UL << bitpos;
968                         if (lo == hi)
969                                 lo = NBPOPMAP * i + bitpos;
970                         else {
971                                 hi = NBPOPMAP * i + bitpos;
972                                 vm_domain_free_lock(VM_DOMAIN(rv->domain));
973                                 vm_phys_enqueue_contig(&rv->pages[lo], hi - lo);
974                                 vm_domain_free_unlock(VM_DOMAIN(rv->domain));
975                                 lo = hi;
976                         }
977                 }
978         }
979         rv->popcnt = 0;
980         counter_u64_add(vm_reserv_broken, 1);
981 }
982
983 /*
984  * Breaks all reservations belonging to the given object.
985  */
986 void
987 vm_reserv_break_all(vm_object_t object)
988 {
989         vm_reserv_t rv;
990
991         /*
992          * This access of object->rvq is unsynchronized so that the
993          * object rvq lock can nest after the domain_free lock.  We
994          * must check for races in the results.  However, the object
995          * lock prevents new additions, so we are guaranteed that when
996          * it returns NULL the object is properly empty.
997          */
998         while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
999                 vm_reserv_lock(rv);
1000                 /* Reclaim race. */
1001                 if (rv->object != object) {
1002                         vm_reserv_unlock(rv);
1003                         continue;
1004                 }
1005                 vm_reserv_domain_lock(rv->domain);
1006                 if (rv->inpartpopq) {
1007                         TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
1008                         rv->inpartpopq = FALSE;
1009                 }
1010                 vm_reserv_domain_unlock(rv->domain);
1011                 vm_reserv_break(rv);
1012                 vm_reserv_unlock(rv);
1013         }
1014 }
1015
1016 /*
1017  * Frees the given page if it belongs to a reservation.  Returns TRUE if the
1018  * page is freed and FALSE otherwise.
1019  */
1020 boolean_t
1021 vm_reserv_free_page(vm_page_t m)
1022 {
1023         vm_reserv_t rv;
1024         boolean_t ret;
1025
1026         rv = vm_reserv_from_page(m);
1027         if (rv->object == NULL)
1028                 return (FALSE);
1029         vm_reserv_lock(rv);
1030         /* Re-validate after lock. */
1031         if (rv->object != NULL) {
1032                 vm_reserv_depopulate(rv, m - rv->pages);
1033                 ret = TRUE;
1034         } else
1035                 ret = FALSE;
1036         vm_reserv_unlock(rv);
1037
1038         return (ret);
1039 }
1040
1041 /*
1042  * Initializes the reservation management system.  Specifically, initializes
1043  * the reservation array.
1044  *
1045  * Requires that vm_page_array and first_page are initialized!
1046  */
1047 void
1048 vm_reserv_init(void)
1049 {
1050         vm_paddr_t paddr;
1051         struct vm_phys_seg *seg;
1052         struct vm_reserv *rv;
1053         struct vm_reserv_domain *rvd;
1054         int i, j, segind;
1055
1056         /*
1057          * Initialize the reservation array.  Specifically, initialize the
1058          * "pages" field for every element that has an underlying superpage.
1059          */
1060         for (segind = 0; segind < vm_phys_nsegs; segind++) {
1061                 seg = &vm_phys_segs[segind];
1062                 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
1063                 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
1064                     VM_LEVEL_0_SIZE <= seg->end) {
1065                         rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
1066                         rv->pages = PHYS_TO_VM_PAGE(paddr);
1067                         rv->domain = seg->domain;
1068                         mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF);
1069                         paddr += VM_LEVEL_0_SIZE;
1070                 }
1071         }
1072         for (i = 0; i < MAXMEMDOM; i++) {
1073                 rvd = &vm_rvd[i];
1074                 mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF);
1075                 TAILQ_INIT(&rvd->partpop);
1076                 mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF);
1077
1078                 /*
1079                  * Fully populated reservations should never be present in the
1080                  * partially populated reservation queues.
1081                  */
1082                 rvd->marker.popcnt = VM_LEVEL_0_NPAGES;
1083                 for (j = 0; j < NBPOPMAP; j++)
1084                         popmap_set(rvd->marker.popmap, j);
1085         }
1086
1087         for (i = 0; i < VM_RESERV_OBJ_LOCK_COUNT; i++)
1088                 mtx_init(&vm_reserv_object_mtx[i], "resv obj lock", NULL,
1089                     MTX_DEF);
1090 }
1091
1092 /*
1093  * Returns true if the given page belongs to a reservation and that page is
1094  * free.  Otherwise, returns false.
1095  */
1096 bool
1097 vm_reserv_is_page_free(vm_page_t m)
1098 {
1099         vm_reserv_t rv;
1100
1101         rv = vm_reserv_from_page(m);
1102         if (rv->object == NULL)
1103                 return (false);
1104         return (popmap_is_clear(rv->popmap, m - rv->pages));
1105 }
1106
1107 /*
1108  * If the given page belongs to a reservation, returns the level of that
1109  * reservation.  Otherwise, returns -1.
1110  */
1111 int
1112 vm_reserv_level(vm_page_t m)
1113 {
1114         vm_reserv_t rv;
1115
1116         rv = vm_reserv_from_page(m);
1117         return (rv->object != NULL ? 0 : -1);
1118 }
1119
1120 /*
1121  * Returns a reservation level if the given page belongs to a fully populated
1122  * reservation and -1 otherwise.
1123  */
1124 int
1125 vm_reserv_level_iffullpop(vm_page_t m)
1126 {
1127         vm_reserv_t rv;
1128
1129         rv = vm_reserv_from_page(m);
1130         return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
1131 }
1132
1133 /*
1134  * Remove a partially populated reservation from the queue.
1135  */
1136 static void
1137 vm_reserv_dequeue(vm_reserv_t rv)
1138 {
1139
1140         vm_reserv_domain_assert_locked(rv->domain);
1141         vm_reserv_assert_locked(rv);
1142         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1143             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1144         KASSERT(rv->inpartpopq,
1145             ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
1146
1147         TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq);
1148         rv->inpartpopq = FALSE;
1149 }
1150
1151 /*
1152  * Breaks the given partially populated reservation, releasing its free pages
1153  * to the physical memory allocator.
1154  */
1155 static void
1156 vm_reserv_reclaim(vm_reserv_t rv)
1157 {
1158
1159         vm_reserv_assert_locked(rv);
1160         CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
1161             __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq);
1162         if (rv->inpartpopq) {
1163                 vm_reserv_domain_lock(rv->domain);
1164                 vm_reserv_dequeue(rv);
1165                 vm_reserv_domain_unlock(rv->domain);
1166         }
1167         vm_reserv_break(rv);
1168         counter_u64_add(vm_reserv_reclaimed, 1);
1169 }
1170
1171 /*
1172  * Breaks a reservation near the head of the partially populated reservation
1173  * queue, releasing its free pages to the physical memory allocator.  Returns
1174  * TRUE if a reservation is broken and FALSE otherwise.
1175  */
1176 bool
1177 vm_reserv_reclaim_inactive(int domain)
1178 {
1179         vm_reserv_t rv;
1180
1181         vm_reserv_domain_lock(domain);
1182         TAILQ_FOREACH(rv, &vm_rvd[domain].partpop, partpopq) {
1183                 /*
1184                  * A locked reservation is likely being updated or reclaimed,
1185                  * so just skip ahead.
1186                  */
1187                 if (rv != &vm_rvd[domain].marker && vm_reserv_trylock(rv)) {
1188                         vm_reserv_dequeue(rv);
1189                         break;
1190                 }
1191         }
1192         vm_reserv_domain_unlock(domain);
1193         if (rv != NULL) {
1194                 vm_reserv_reclaim(rv);
1195                 vm_reserv_unlock(rv);
1196                 return (true);
1197         }
1198         return (false);
1199 }
1200
1201 /*
1202  * Determine whether this reservation has free pages that satisfy the given
1203  * request for contiguous physical memory.  Start searching from the lower
1204  * bound, defined by low_index.
1205  */
1206 static bool
1207 vm_reserv_test_contig(vm_reserv_t rv, u_long npages, vm_paddr_t low,
1208     vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1209 {
1210         vm_paddr_t pa, size;
1211         u_long changes;
1212         int bitpos, bits_left, i, hi, lo, n;
1213
1214         vm_reserv_assert_locked(rv);
1215         size = npages << PAGE_SHIFT;
1216         pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1217         lo = (pa < low) ?
1218             ((low + PAGE_MASK - pa) >> PAGE_SHIFT) : 0;
1219         i = lo / NBPOPMAP;
1220         changes = rv->popmap[i] | ((1UL << (lo % NBPOPMAP)) - 1);
1221         hi = (pa + VM_LEVEL_0_SIZE > high) ?
1222             ((high + PAGE_MASK - pa) >> PAGE_SHIFT) : VM_LEVEL_0_NPAGES;
1223         n = hi / NBPOPMAP;
1224         bits_left = hi % NBPOPMAP;
1225         hi = lo = -1;
1226         for (;;) {
1227                 /*
1228                  * "changes" is a bitmask that marks where a new sequence of
1229                  * 0s or 1s begins in popmap[i], with last bit in popmap[i-1]
1230                  * considered to be 1 if and only if lo == hi.  The bits of
1231                  * popmap[-1] and popmap[NPOPMAP] are considered all 1s.
1232                  */
1233                 changes ^= (changes << 1) | (lo == hi);
1234                 while (changes != 0) {
1235                         /*
1236                          * If the next change marked begins a run of 0s, set
1237                          * lo to mark that position.  Otherwise set hi and
1238                          * look for a satisfactory first page from lo up to hi.
1239                          */
1240                         bitpos = ffsl(changes) - 1;
1241                         changes ^= 1UL << bitpos;
1242                         if (lo == hi) {
1243                                 lo = NBPOPMAP * i + bitpos;
1244                                 continue;
1245                         }
1246                         hi = NBPOPMAP * i + bitpos;
1247                         pa = VM_PAGE_TO_PHYS(&rv->pages[lo]);
1248                         if ((pa & (alignment - 1)) != 0) {
1249                                 /* Skip to next aligned page. */
1250                                 lo += (((pa - 1) | (alignment - 1)) + 1) >>
1251                                     PAGE_SHIFT;
1252                                 if (lo >= VM_LEVEL_0_NPAGES)
1253                                         return (false);
1254                                 pa = VM_PAGE_TO_PHYS(&rv->pages[lo]);
1255                         }
1256                         if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) {
1257                                 /* Skip to next boundary-matching page. */
1258                                 lo += (((pa - 1) | (boundary - 1)) + 1) >>
1259                                     PAGE_SHIFT;
1260                                 if (lo >= VM_LEVEL_0_NPAGES)
1261                                         return (false);
1262                                 pa = VM_PAGE_TO_PHYS(&rv->pages[lo]);
1263                         }
1264                         if (lo * PAGE_SIZE + size <= hi * PAGE_SIZE)
1265                                 return (true);
1266                         lo = hi;
1267                 }
1268                 if (++i < n)
1269                         changes = rv->popmap[i];
1270                 else if (i == n)
1271                         changes = bits_left == 0 ? -1UL :
1272                             (rv->popmap[n] | (-1UL << bits_left));
1273                 else
1274                         return (false);
1275         }
1276 }
1277
1278 /*
1279  * Searches the partially populated reservation queue for the least recently
1280  * changed reservation with free pages that satisfy the given request for
1281  * contiguous physical memory.  If a satisfactory reservation is found, it is
1282  * broken.  Returns true if a reservation is broken and false otherwise.
1283  */
1284 bool
1285 vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
1286     vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1287 {
1288         struct vm_reserv_queue *queue;
1289         vm_paddr_t pa, size;
1290         vm_reserv_t marker, rv, rvn;
1291
1292         if (npages > VM_LEVEL_0_NPAGES - 1)
1293                 return (false);
1294         marker = &vm_rvd[domain].marker;
1295         queue = &vm_rvd[domain].partpop;
1296         size = npages << PAGE_SHIFT;
1297
1298         vm_reserv_domain_scan_lock(domain);
1299         vm_reserv_domain_lock(domain);
1300         TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) {
1301                 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1302                 if (pa + VM_LEVEL_0_SIZE - size < low) {
1303                         /* This entire reservation is too low; go to next. */
1304                         continue;
1305                 }
1306                 if (pa + size > high) {
1307                         /* This entire reservation is too high; go to next. */
1308                         continue;
1309                 }
1310
1311                 if (vm_reserv_trylock(rv) == 0) {
1312                         TAILQ_INSERT_AFTER(queue, rv, marker, partpopq);
1313                         vm_reserv_domain_unlock(domain);
1314                         vm_reserv_lock(rv);
1315                         if (!rv->inpartpopq ||
1316                             TAILQ_NEXT(rv, partpopq) != marker) {
1317                                 vm_reserv_unlock(rv);
1318                                 vm_reserv_domain_lock(domain);
1319                                 rvn = TAILQ_NEXT(marker, partpopq);
1320                                 TAILQ_REMOVE(queue, marker, partpopq);
1321                                 continue;
1322                         }
1323                         vm_reserv_domain_lock(domain);
1324                         TAILQ_REMOVE(queue, marker, partpopq);
1325                 }
1326                 vm_reserv_domain_unlock(domain);
1327                 if (vm_reserv_test_contig(rv, npages, low, high,
1328                     alignment, boundary)) {
1329                         vm_reserv_domain_scan_unlock(domain);
1330                         vm_reserv_reclaim(rv);
1331                         vm_reserv_unlock(rv);
1332                         return (true);
1333                 }
1334                 vm_reserv_unlock(rv);
1335                 vm_reserv_domain_lock(domain);
1336         }
1337         vm_reserv_domain_unlock(domain);
1338         vm_reserv_domain_scan_unlock(domain);
1339         return (false);
1340 }
1341
1342 /*
1343  * Transfers the reservation underlying the given page to a new object.
1344  *
1345  * The object must be locked.
1346  */
1347 void
1348 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1349     vm_pindex_t old_object_offset)
1350 {
1351         vm_reserv_t rv;
1352
1353         VM_OBJECT_ASSERT_WLOCKED(new_object);
1354         rv = vm_reserv_from_page(m);
1355         if (rv->object == old_object) {
1356                 vm_reserv_lock(rv);
1357                 CTR6(KTR_VM,
1358                     "%s: rv %p object %p new %p popcnt %d inpartpop %d",
1359                     __FUNCTION__, rv, rv->object, new_object, rv->popcnt,
1360                     rv->inpartpopq);
1361                 if (rv->object == old_object) {
1362                         vm_reserv_object_lock(old_object);
1363                         rv->object = NULL;
1364                         LIST_REMOVE(rv, objq);
1365                         vm_reserv_object_unlock(old_object);
1366                         vm_reserv_object_lock(new_object);
1367                         rv->object = new_object;
1368                         rv->pindex -= old_object_offset;
1369                         LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1370                         vm_reserv_object_unlock(new_object);
1371                 }
1372                 vm_reserv_unlock(rv);
1373         }
1374 }
1375
1376 /*
1377  * Returns the size (in bytes) of a reservation of the specified level.
1378  */
1379 int
1380 vm_reserv_size(int level)
1381 {
1382
1383         switch (level) {
1384         case 0:
1385                 return (VM_LEVEL_0_SIZE);
1386         case -1:
1387                 return (PAGE_SIZE);
1388         default:
1389                 return (0);
1390         }
1391 }
1392
1393 /*
1394  * Allocates the virtual and physical memory required by the reservation
1395  * management system's data structures, in particular, the reservation array.
1396  */
1397 vm_paddr_t
1398 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end)
1399 {
1400         vm_paddr_t new_end, high_water;
1401         size_t size;
1402         int i;
1403
1404         high_water = phys_avail[1];
1405         for (i = 0; i < vm_phys_nsegs; i++) {
1406                 if (vm_phys_segs[i].end > high_water)
1407                         high_water = vm_phys_segs[i].end;
1408         }
1409
1410         /* Skip the first chunk.  It is already accounted for. */
1411         for (i = 2; phys_avail[i + 1] != 0; i += 2) {
1412                 if (phys_avail[i + 1] > high_water)
1413                         high_water = phys_avail[i + 1];
1414         }
1415
1416         /*
1417          * Calculate the size (in bytes) of the reservation array.  Round up
1418          * from "high_water" because every small page is mapped to an element
1419          * in the reservation array based on its physical address.  Thus, the
1420          * number of elements in the reservation array can be greater than the
1421          * number of superpages. 
1422          */
1423         size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
1424
1425         /*
1426          * Allocate and map the physical memory for the reservation array.  The
1427          * next available virtual address is returned by reference.
1428          */
1429         new_end = end - round_page(size);
1430         vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1431             VM_PROT_READ | VM_PROT_WRITE);
1432         bzero(vm_reserv_array, size);
1433
1434         /*
1435          * Return the next available physical address.
1436          */
1437         return (new_end);
1438 }
1439
1440 /*
1441  * Initializes the reservation management system.  Specifically, initializes
1442  * the reservation counters.
1443  */
1444 static void
1445 vm_reserv_counter_init(void *unused)
1446 {
1447
1448         vm_reserv_freed = counter_u64_alloc(M_WAITOK); 
1449         vm_reserv_broken = counter_u64_alloc(M_WAITOK); 
1450         vm_reserv_reclaimed = counter_u64_alloc(M_WAITOK); 
1451 }
1452 SYSINIT(vm_reserv_counter_init, SI_SUB_CPU, SI_ORDER_ANY,
1453     vm_reserv_counter_init, NULL);
1454
1455 /*
1456  * Returns the superpage containing the given page.
1457  */
1458 vm_page_t
1459 vm_reserv_to_superpage(vm_page_t m)
1460 {
1461         vm_reserv_t rv;
1462
1463         VM_OBJECT_ASSERT_LOCKED(m->object);
1464         rv = vm_reserv_from_page(m);
1465         if (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES)
1466                 m = rv->pages;
1467         else
1468                 m = NULL;
1469
1470         return (m);
1471 }
1472
1473 #endif  /* VM_NRESERVLEVEL > 0 */