]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/vm/vm_reserv.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / vm / vm_reserv.c
1 /*-
2  * Copyright (c) 2002-2006 Rice University
3  * Copyright (c) 2007-2008 Alan L. Cox <alc@cs.rice.edu>
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Alan L. Cox,
7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31
32 /*
33  *      Superpage reservation management module
34  *
35  * Any external functions defined by this module are only to be used by the
36  * virtual memory system.
37  */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #include "opt_vm.h"
43
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mutex.h>
49 #include <sys/queue.h>
50 #include <sys/rwlock.h>
51 #include <sys/sbuf.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_phys.h>
60 #include <vm/vm_radix.h>
61 #include <vm/vm_reserv.h>
62
63 /*
64  * The reservation system supports the speculative allocation of large physical
65  * pages ("superpages").  Speculative allocation enables the fully-automatic
66  * utilization of superpages by the virtual memory system.  In other words, no
67  * programmatic directives are required to use superpages.
68  */
69
70 #if VM_NRESERVLEVEL > 0
71
72 /*
73  * The number of small pages that are contained in a level 0 reservation
74  */
75 #define VM_LEVEL_0_NPAGES       (1 << VM_LEVEL_0_ORDER)
76
77 /*
78  * The number of bits by which a physical address is shifted to obtain the
79  * reservation number
80  */
81 #define VM_LEVEL_0_SHIFT        (VM_LEVEL_0_ORDER + PAGE_SHIFT)
82
83 /*
84  * The size of a level 0 reservation in bytes
85  */
86 #define VM_LEVEL_0_SIZE         (1 << VM_LEVEL_0_SHIFT)
87
88 /*
89  * Computes the index of the small page underlying the given (object, pindex)
90  * within the reservation's array of small pages.
91  */
92 #define VM_RESERV_INDEX(object, pindex) \
93     (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
94
95 /*
96  * The reservation structure
97  *
98  * A reservation structure is constructed whenever a large physical page is
99  * speculatively allocated to an object.  The reservation provides the small
100  * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
101  * within that object.  The reservation's "popcnt" tracks the number of these
102  * small physical pages that are in use at any given time.  When and if the
103  * reservation is not fully utilized, it appears in the queue of partially-
104  * populated reservations.  The reservation always appears on the containing
105  * object's list of reservations.
106  *
107  * A partially-populated reservation can be broken and reclaimed at any time.
108  */
109 struct vm_reserv {
110         TAILQ_ENTRY(vm_reserv) partpopq;
111         LIST_ENTRY(vm_reserv) objq;
112         vm_object_t     object;                 /* containing object */
113         vm_pindex_t     pindex;                 /* offset within object */
114         vm_page_t       pages;                  /* first page of a superpage */
115         int             popcnt;                 /* # of pages in use */
116         char            inpartpopq;
117 };
118
119 /*
120  * The reservation array
121  *
122  * This array is analoguous in function to vm_page_array.  It differs in the
123  * respect that it may contain a greater number of useful reservation
124  * structures than there are (physical) superpages.  These "invalid"
125  * reservation structures exist to trade-off space for time in the
126  * implementation of vm_reserv_from_page().  Invalid reservation structures are
127  * distinguishable from "valid" reservation structures by inspecting the
128  * reservation's "pages" field.  Invalid reservation structures have a NULL
129  * "pages" field.
130  *
131  * vm_reserv_from_page() maps a small (physical) page to an element of this
132  * array by computing a physical reservation number from the page's physical
133  * address.  The physical reservation number is used as the array index.
134  *
135  * An "active" reservation is a valid reservation structure that has a non-NULL
136  * "object" field and a non-zero "popcnt" field.  In other words, every active
137  * reservation belongs to a particular object.  Moreover, every active
138  * reservation has an entry in the containing object's list of reservations.  
139  */
140 static vm_reserv_t vm_reserv_array;
141
142 /*
143  * The partially-populated reservation queue
144  *
145  * This queue enables the fast recovery of an unused cached or free small page
146  * from a partially-populated reservation.  The reservation at the head of
147  * this queue is the least-recently-changed, partially-populated reservation.
148  *
149  * Access to this queue is synchronized by the free page queue lock.
150  */
151 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop =
152                             TAILQ_HEAD_INITIALIZER(vm_rvq_partpop);
153
154 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
155
156 static long vm_reserv_broken;
157 SYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
158     &vm_reserv_broken, 0, "Cumulative number of broken reservations");
159
160 static long vm_reserv_freed;
161 SYSCTL_LONG(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
162     &vm_reserv_freed, 0, "Cumulative number of freed reservations");
163
164 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
165
166 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
167     sysctl_vm_reserv_partpopq, "A", "Partially-populated reservation queues");
168
169 static long vm_reserv_reclaimed;
170 SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
171     &vm_reserv_reclaimed, 0, "Cumulative number of reclaimed reservations");
172
173 static void             vm_reserv_depopulate(vm_reserv_t rv);
174 static vm_reserv_t      vm_reserv_from_page(vm_page_t m);
175 static boolean_t        vm_reserv_has_pindex(vm_reserv_t rv,
176                             vm_pindex_t pindex);
177 static void             vm_reserv_populate(vm_reserv_t rv);
178 static void             vm_reserv_reclaim(vm_reserv_t rv);
179
180 /*
181  * Describes the current state of the partially-populated reservation queue.
182  */
183 static int
184 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
185 {
186         struct sbuf sbuf;
187         vm_reserv_t rv;
188         int counter, error, level, unused_pages;
189
190         error = sysctl_wire_old_buffer(req, 0);
191         if (error != 0)
192                 return (error);
193         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
194         sbuf_printf(&sbuf, "\nLEVEL     SIZE  NUMBER\n\n");
195         for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
196                 counter = 0;
197                 unused_pages = 0;
198                 mtx_lock(&vm_page_queue_free_mtx);
199                 TAILQ_FOREACH(rv, &vm_rvq_partpop/*[level]*/, partpopq) {
200                         counter++;
201                         unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
202                 }
203                 mtx_unlock(&vm_page_queue_free_mtx);
204                 sbuf_printf(&sbuf, "%5d: %6dK, %6d\n", level,
205                     unused_pages * ((int)PAGE_SIZE / 1024), counter);
206         }
207         error = sbuf_finish(&sbuf);
208         sbuf_delete(&sbuf);
209         return (error);
210 }
211
212 /*
213  * Reduces the given reservation's population count.  If the population count
214  * becomes zero, the reservation is destroyed.  Additionally, moves the
215  * reservation to the tail of the partially-populated reservations queue if the
216  * population count is non-zero.
217  *
218  * The free page queue lock must be held.
219  */
220 static void
221 vm_reserv_depopulate(vm_reserv_t rv)
222 {
223
224         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
225         KASSERT(rv->object != NULL,
226             ("vm_reserv_depopulate: reserv %p is free", rv));
227         KASSERT(rv->popcnt > 0,
228             ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
229         if (rv->inpartpopq) {
230                 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
231                 rv->inpartpopq = FALSE;
232         } else {
233                 KASSERT(rv->pages->psind == 1,
234                     ("vm_reserv_depopulate: reserv %p is already demoted",
235                     rv));
236                 rv->pages->psind = 0;
237         }
238         rv->popcnt--;
239         if (rv->popcnt == 0) {
240                 LIST_REMOVE(rv, objq);
241                 rv->object = NULL;
242                 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
243                 vm_reserv_freed++;
244         } else {
245                 rv->inpartpopq = TRUE;
246                 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
247         }
248 }
249
250 /*
251  * Returns the reservation to which the given page might belong.
252  */
253 static __inline vm_reserv_t
254 vm_reserv_from_page(vm_page_t m)
255 {
256
257         return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
258 }
259
260 /*
261  * Returns TRUE if the given reservation contains the given page index and
262  * FALSE otherwise.
263  */
264 static __inline boolean_t
265 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
266 {
267
268         return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
269 }
270
271 /*
272  * Increases the given reservation's population count.  Moves the reservation
273  * to the tail of the partially-populated reservation queue.
274  *
275  * The free page queue must be locked.
276  */
277 static void
278 vm_reserv_populate(vm_reserv_t rv)
279 {
280
281         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
282         KASSERT(rv->object != NULL,
283             ("vm_reserv_populate: reserv %p is free", rv));
284         KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
285             ("vm_reserv_populate: reserv %p is already full", rv));
286         KASSERT(rv->pages->psind == 0,
287             ("vm_reserv_populate: reserv %p is already promoted", rv));
288         if (rv->inpartpopq) {
289                 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
290                 rv->inpartpopq = FALSE;
291         }
292         rv->popcnt++;
293         if (rv->popcnt < VM_LEVEL_0_NPAGES) {
294                 rv->inpartpopq = TRUE;
295                 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
296         } else
297                 rv->pages->psind = 1;
298 }
299
300 /*
301  * Allocates a contiguous set of physical pages of the given size "npages"
302  * from existing or newly created reservations.  All of the physical pages
303  * must be at or above the given physical address "low" and below the given
304  * physical address "high".  The given value "alignment" determines the
305  * alignment of the first physical page in the set.  If the given value
306  * "boundary" is non-zero, then the set of physical pages cannot cross any
307  * physical address boundary that is a multiple of that value.  Both
308  * "alignment" and "boundary" must be a power of two.
309  *
310  * The object and free page queue must be locked.
311  */
312 vm_page_t
313 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
314     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
315 {
316         vm_paddr_t pa, size;
317         vm_page_t m, m_ret, mpred, msucc;
318         vm_pindex_t first, leftcap, rightcap;
319         vm_reserv_t rv;
320         u_long allocpages, maxpages, minpages;
321         int i, index, n;
322
323         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
324         VM_OBJECT_ASSERT_WLOCKED(object);
325         KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
326
327         /*
328          * Is a reservation fundamentally impossible?
329          */
330         if (pindex < VM_RESERV_INDEX(object, pindex) ||
331             pindex + npages > object->size)
332                 return (NULL);
333
334         /*
335          * All reservations of a particular size have the same alignment.
336          * Assuming that the first page is allocated from a reservation, the
337          * least significant bits of its physical address can be determined
338          * from its offset from the beginning of the reservation and the size
339          * of the reservation.
340          *
341          * Could the specified index within a reservation of the smallest
342          * possible size satisfy the alignment and boundary requirements?
343          */
344         pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
345         if ((pa & (alignment - 1)) != 0)
346                 return (NULL);
347         size = npages << PAGE_SHIFT;
348         if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
349                 return (NULL);
350
351         /*
352          * Look for an existing reservation.
353          */
354         mpred = vm_radix_lookup_le(&object->rtree, pindex);
355         if (mpred != NULL) {
356                 KASSERT(mpred->pindex < pindex,
357                     ("vm_reserv_alloc_contig: pindex already allocated"));
358                 rv = vm_reserv_from_page(mpred);
359                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
360                         goto found;
361                 msucc = TAILQ_NEXT(mpred, listq);
362         } else
363                 msucc = TAILQ_FIRST(&object->memq);
364         if (msucc != NULL) {
365                 KASSERT(msucc->pindex > pindex,
366                     ("vm_reserv_alloc_contig: pindex already allocated"));
367                 rv = vm_reserv_from_page(msucc);
368                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
369                         goto found;
370         }
371
372         /*
373          * Could at least one reservation fit between the first index to the
374          * left that can be used ("leftcap") and the first index to the right
375          * that cannot be used ("rightcap")?
376          */
377         first = pindex - VM_RESERV_INDEX(object, pindex);
378         if (mpred != NULL) {
379                 if ((rv = vm_reserv_from_page(mpred))->object != object)
380                         leftcap = mpred->pindex + 1;
381                 else
382                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
383                 if (leftcap > first)
384                         return (NULL);
385         }
386         minpages = VM_RESERV_INDEX(object, pindex) + npages;
387         maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
388         allocpages = maxpages;
389         if (msucc != NULL) {
390                 if ((rv = vm_reserv_from_page(msucc))->object != object)
391                         rightcap = msucc->pindex;
392                 else
393                         rightcap = rv->pindex;
394                 if (first + maxpages > rightcap) {
395                         if (maxpages == VM_LEVEL_0_NPAGES)
396                                 return (NULL);
397
398                         /*
399                          * At least one reservation will fit between "leftcap"
400                          * and "rightcap".  However, a reservation for the
401                          * last of the requested pages will not fit.  Reduce
402                          * the size of the upcoming allocation accordingly.
403                          */
404                         allocpages = minpages;
405                 }
406         }
407
408         /*
409          * Would the last new reservation extend past the end of the object?
410          */
411         if (first + maxpages > object->size) {
412                 /*
413                  * Don't allocate the last new reservation if the object is a
414                  * vnode or backed by another object that is a vnode. 
415                  */
416                 if (object->type == OBJT_VNODE ||
417                     (object->backing_object != NULL &&
418                     object->backing_object->type == OBJT_VNODE)) {
419                         if (maxpages == VM_LEVEL_0_NPAGES)
420                                 return (NULL);
421                         allocpages = minpages;
422                 }
423                 /* Speculate that the object may grow. */
424         }
425
426         /*
427          * Allocate the physical pages.  The alignment and boundary specified
428          * for this allocation may be different from the alignment and
429          * boundary specified for the requested pages.  For instance, the
430          * specified index may not be the first page within the first new
431          * reservation.
432          */
433         m = vm_phys_alloc_contig(allocpages, low, high, ulmax(alignment,
434             VM_LEVEL_0_SIZE), boundary > VM_LEVEL_0_SIZE ? boundary : 0);
435         if (m == NULL)
436                 return (NULL);
437
438         /*
439          * The allocated physical pages always begin at a reservation
440          * boundary, but they do not always end at a reservation boundary.
441          * Initialize every reservation that is completely covered by the
442          * allocated physical pages.
443          */
444         m_ret = NULL;
445         index = VM_RESERV_INDEX(object, pindex);
446         do {
447                 rv = vm_reserv_from_page(m);
448                 KASSERT(rv->pages == m,
449                     ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
450                     rv));
451                 KASSERT(rv->object == NULL,
452                     ("vm_reserv_alloc_contig: reserv %p isn't free", rv));
453                 LIST_INSERT_HEAD(&object->rvq, rv, objq);
454                 rv->object = object;
455                 rv->pindex = first;
456                 KASSERT(rv->popcnt == 0,
457                     ("vm_reserv_alloc_contig: reserv %p's popcnt is corrupted",
458                     rv));
459                 KASSERT(!rv->inpartpopq,
460                     ("vm_reserv_alloc_contig: reserv %p's inpartpopq is TRUE",
461                     rv));
462                 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
463                 for (i = 0; i < n; i++)
464                         vm_reserv_populate(rv);
465                 npages -= n;
466                 if (m_ret == NULL) {
467                         m_ret = &rv->pages[index];
468                         index = 0;
469                 }
470                 m += VM_LEVEL_0_NPAGES;
471                 first += VM_LEVEL_0_NPAGES;
472                 allocpages -= VM_LEVEL_0_NPAGES;
473         } while (allocpages >= VM_LEVEL_0_NPAGES);
474         return (m_ret);
475
476         /*
477          * Found a matching reservation.
478          */
479 found:
480         index = VM_RESERV_INDEX(object, pindex);
481         /* Does the allocation fit within the reservation? */
482         if (index + npages > VM_LEVEL_0_NPAGES)
483                 return (NULL);
484         m = &rv->pages[index];
485         pa = VM_PAGE_TO_PHYS(m);
486         if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
487             ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
488                 return (NULL);
489         /* Handle vm_page_rename(m, new_object, ...). */
490         for (i = 0; i < npages; i++)
491                 if ((rv->pages[index + i].flags & (PG_CACHED | PG_FREE)) == 0)
492                         return (NULL);
493         for (i = 0; i < npages; i++)
494                 vm_reserv_populate(rv);
495         return (m);
496 }
497
498 /*
499  * Allocates a page from an existing or newly-created reservation.
500  *
501  * The page "mpred" must immediately precede the offset "pindex" within the
502  * specified object.
503  *
504  * The object and free page queue must be locked.
505  */
506 vm_page_t
507 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
508 {
509         vm_page_t m, msucc;
510         vm_pindex_t first, leftcap, rightcap;
511         vm_reserv_t rv;
512
513         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
514         VM_OBJECT_ASSERT_WLOCKED(object);
515
516         /*
517          * Is a reservation fundamentally impossible?
518          */
519         if (pindex < VM_RESERV_INDEX(object, pindex) ||
520             pindex >= object->size)
521                 return (NULL);
522
523         /*
524          * Look for an existing reservation.
525          */
526         if (mpred != NULL) {
527                 KASSERT(mpred->object == object,
528                     ("vm_reserv_alloc_page: object doesn't contain mpred"));
529                 KASSERT(mpred->pindex < pindex,
530                     ("vm_reserv_alloc_page: mpred doesn't precede pindex"));
531                 rv = vm_reserv_from_page(mpred);
532                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
533                         goto found;
534                 msucc = TAILQ_NEXT(mpred, listq);
535         } else
536                 msucc = TAILQ_FIRST(&object->memq);
537         if (msucc != NULL) {
538                 KASSERT(msucc->pindex > pindex,
539                     ("vm_reserv_alloc_page: msucc doesn't succeed pindex"));
540                 rv = vm_reserv_from_page(msucc);
541                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
542                         goto found;
543         }
544
545         /*
546          * Could a reservation fit between the first index to the left that
547          * can be used and the first index to the right that cannot be used?
548          */
549         first = pindex - VM_RESERV_INDEX(object, pindex);
550         if (mpred != NULL) {
551                 if ((rv = vm_reserv_from_page(mpred))->object != object)
552                         leftcap = mpred->pindex + 1;
553                 else
554                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
555                 if (leftcap > first)
556                         return (NULL);
557         }
558         if (msucc != NULL) {
559                 if ((rv = vm_reserv_from_page(msucc))->object != object)
560                         rightcap = msucc->pindex;
561                 else
562                         rightcap = rv->pindex;
563                 if (first + VM_LEVEL_0_NPAGES > rightcap)
564                         return (NULL);
565         }
566
567         /*
568          * Would a new reservation extend past the end of the object? 
569          */
570         if (first + VM_LEVEL_0_NPAGES > object->size) {
571                 /*
572                  * Don't allocate a new reservation if the object is a vnode or
573                  * backed by another object that is a vnode. 
574                  */
575                 if (object->type == OBJT_VNODE ||
576                     (object->backing_object != NULL &&
577                     object->backing_object->type == OBJT_VNODE))
578                         return (NULL);
579                 /* Speculate that the object may grow. */
580         }
581
582         /*
583          * Allocate and populate the new reservation.
584          */
585         m = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER);
586         if (m == NULL)
587                 return (NULL);
588         rv = vm_reserv_from_page(m);
589         KASSERT(rv->pages == m,
590             ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
591         KASSERT(rv->object == NULL,
592             ("vm_reserv_alloc_page: reserv %p isn't free", rv));
593         LIST_INSERT_HEAD(&object->rvq, rv, objq);
594         rv->object = object;
595         rv->pindex = first;
596         KASSERT(rv->popcnt == 0,
597             ("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", rv));
598         KASSERT(!rv->inpartpopq,
599             ("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", rv));
600         vm_reserv_populate(rv);
601         return (&rv->pages[VM_RESERV_INDEX(object, pindex)]);
602
603         /*
604          * Found a matching reservation.
605          */
606 found:
607         m = &rv->pages[VM_RESERV_INDEX(object, pindex)];
608         /* Handle vm_page_rename(m, new_object, ...). */
609         if ((m->flags & (PG_CACHED | PG_FREE)) == 0)
610                 return (NULL);
611         vm_reserv_populate(rv);
612         return (m);
613 }
614
615 /*
616  * Breaks all reservations belonging to the given object.
617  */
618 void
619 vm_reserv_break_all(vm_object_t object)
620 {
621         vm_reserv_t rv;
622         int i;
623
624         mtx_lock(&vm_page_queue_free_mtx);
625         while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
626                 KASSERT(rv->object == object,
627                     ("vm_reserv_break_all: reserv %p is corrupted", rv));
628                 if (rv->inpartpopq) {
629                         TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
630                         rv->inpartpopq = FALSE;
631                 }
632                 LIST_REMOVE(rv, objq);
633                 rv->object = NULL;
634                 for (i = 0; i < VM_LEVEL_0_NPAGES; i++) {
635                         if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
636                                 vm_phys_free_pages(&rv->pages[i], 0);
637                         else
638                                 rv->popcnt--;
639                 }
640                 KASSERT(rv->popcnt == 0,
641                     ("vm_reserv_break_all: reserv %p's popcnt is corrupted",
642                     rv));
643                 vm_reserv_broken++;
644         }
645         mtx_unlock(&vm_page_queue_free_mtx);
646 }
647
648 /*
649  * Frees the given page if it belongs to a reservation.  Returns TRUE if the
650  * page is freed and FALSE otherwise.
651  *
652  * The free page queue lock must be held.
653  */
654 boolean_t
655 vm_reserv_free_page(vm_page_t m)
656 {
657         vm_reserv_t rv;
658
659         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
660         rv = vm_reserv_from_page(m);
661         if (rv->object == NULL)
662                 return (FALSE);
663         if ((m->flags & PG_CACHED) != 0 && m->pool != VM_FREEPOOL_CACHE)
664                 vm_phys_set_pool(VM_FREEPOOL_CACHE, rv->pages,
665                     VM_LEVEL_0_ORDER);
666         vm_reserv_depopulate(rv);
667         return (TRUE);
668 }
669
670 /*
671  * Initializes the reservation management system.  Specifically, initializes
672  * the reservation array.
673  *
674  * Requires that vm_page_array and first_page are initialized!
675  */
676 void
677 vm_reserv_init(void)
678 {
679         vm_paddr_t paddr;
680         int i;
681
682         /*
683          * Initialize the reservation array.  Specifically, initialize the
684          * "pages" field for every element that has an underlying superpage.
685          */
686         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
687                 paddr = roundup2(phys_avail[i], VM_LEVEL_0_SIZE);
688                 while (paddr + VM_LEVEL_0_SIZE <= phys_avail[i + 1]) {
689                         vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages =
690                             PHYS_TO_VM_PAGE(paddr);
691                         paddr += VM_LEVEL_0_SIZE;
692                 }
693         }
694 }
695
696 /*
697  * Returns a reservation level if the given page belongs to a fully-populated
698  * reservation and -1 otherwise.
699  */
700 int
701 vm_reserv_level_iffullpop(vm_page_t m)
702 {
703         vm_reserv_t rv;
704
705         rv = vm_reserv_from_page(m);
706         return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
707 }
708
709 /*
710  * Prepare for the reactivation of a cached page.
711  *
712  * First, suppose that the given page "m" was allocated individually, i.e., not
713  * as part of a reservation, and cached.  Then, suppose a reservation
714  * containing "m" is allocated by the same object.  Although "m" and the
715  * reservation belong to the same object, "m"'s pindex may not match the
716  * reservation's.
717  *
718  * The free page queue must be locked.
719  */
720 boolean_t
721 vm_reserv_reactivate_page(vm_page_t m)
722 {
723         vm_reserv_t rv;
724         int i, m_index;
725
726         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
727         rv = vm_reserv_from_page(m);
728         if (rv->object == NULL)
729                 return (FALSE);
730         KASSERT((m->flags & PG_CACHED) != 0,
731             ("vm_reserv_uncache_page: page %p is not cached", m));
732         if (m->object == rv->object &&
733             m->pindex - rv->pindex == VM_RESERV_INDEX(m->object, m->pindex))
734                 vm_reserv_populate(rv);
735         else {
736                 KASSERT(rv->inpartpopq,
737                     ("vm_reserv_uncache_page: reserv %p's inpartpopq is FALSE",
738                     rv));
739                 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
740                 rv->inpartpopq = FALSE;
741                 LIST_REMOVE(rv, objq);
742                 rv->object = NULL;
743                 /* Don't vm_phys_free_pages(m, 0). */
744                 m_index = m - rv->pages;
745                 for (i = 0; i < m_index; i++) {
746                         if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
747                                 vm_phys_free_pages(&rv->pages[i], 0);
748                         else
749                                 rv->popcnt--;
750                 }
751                 for (i++; i < VM_LEVEL_0_NPAGES; i++) {
752                         if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
753                                 vm_phys_free_pages(&rv->pages[i], 0);
754                         else
755                                 rv->popcnt--;
756                 }
757                 KASSERT(rv->popcnt == 0,
758                     ("vm_reserv_uncache_page: reserv %p's popcnt is corrupted",
759                     rv));
760                 vm_reserv_broken++;
761         }
762         return (TRUE);
763 }
764
765 /*
766  * Breaks the given partially-populated reservation, releasing its cached and
767  * free pages to the physical memory allocator.
768  *
769  * The free page queue lock must be held.
770  */
771 static void
772 vm_reserv_reclaim(vm_reserv_t rv)
773 {
774         int i;
775
776         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
777         KASSERT(rv->inpartpopq,
778             ("vm_reserv_reclaim: reserv %p's inpartpopq is corrupted", rv));
779         TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
780         rv->inpartpopq = FALSE;
781         KASSERT(rv->object != NULL,
782             ("vm_reserv_reclaim: reserv %p is free", rv));
783         LIST_REMOVE(rv, objq);
784         rv->object = NULL;
785         for (i = 0; i < VM_LEVEL_0_NPAGES; i++) {
786                 if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
787                         vm_phys_free_pages(&rv->pages[i], 0);
788                 else
789                         rv->popcnt--;
790         }
791         KASSERT(rv->popcnt == 0,
792             ("vm_reserv_reclaim: reserv %p's popcnt is corrupted", rv));
793         vm_reserv_reclaimed++;
794 }
795
796 /*
797  * Breaks the reservation at the head of the partially-populated reservation
798  * queue, releasing its cached and free pages to the physical memory
799  * allocator.  Returns TRUE if a reservation is broken and FALSE otherwise.
800  *
801  * The free page queue lock must be held.
802  */
803 boolean_t
804 vm_reserv_reclaim_inactive(void)
805 {
806         vm_reserv_t rv;
807
808         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
809         if ((rv = TAILQ_FIRST(&vm_rvq_partpop)) != NULL) {
810                 vm_reserv_reclaim(rv);
811                 return (TRUE);
812         }
813         return (FALSE);
814 }
815
816 /*
817  * Searches the partially-populated reservation queue for the least recently
818  * active reservation with unused pages, i.e., cached or free, that satisfy the
819  * given request for contiguous physical memory.  If a satisfactory reservation
820  * is found, it is broken.  Returns TRUE if a reservation is broken and FALSE
821  * otherwise.
822  *
823  * The free page queue lock must be held.
824  */
825 boolean_t
826 vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
827     u_long alignment, vm_paddr_t boundary)
828 {
829         vm_paddr_t pa, pa_length, size;
830         vm_reserv_t rv;
831         int i;
832
833         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
834         if (npages > VM_LEVEL_0_NPAGES - 1)
835                 return (FALSE);
836         size = npages << PAGE_SHIFT;
837         TAILQ_FOREACH(rv, &vm_rvq_partpop, partpopq) {
838                 pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
839                 if (pa + PAGE_SIZE - size < low) {
840                         /* this entire reservation is too low; go to next */
841                         continue;
842                 }
843                 pa_length = 0;
844                 for (i = 0; i < VM_LEVEL_0_NPAGES; i++)
845                         if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) {
846                                 pa_length += PAGE_SIZE;
847                                 if (pa_length == PAGE_SIZE) {
848                                         pa = VM_PAGE_TO_PHYS(&rv->pages[i]);
849                                         if (pa + size > high) {
850                                                 /* skip to next reservation */
851                                                 break;
852                                         } else if (pa < low ||
853                                             (pa & (alignment - 1)) != 0 ||
854                                             ((pa ^ (pa + size - 1)) &
855                                             ~(boundary - 1)) != 0)
856                                                 pa_length = 0;
857                                 }
858                                 if (pa_length >= size) {
859                                         vm_reserv_reclaim(rv);
860                                         return (TRUE);
861                                 }
862                         } else
863                                 pa_length = 0;
864         }
865         return (FALSE);
866 }
867
868 /*
869  * Transfers the reservation underlying the given page to a new object.
870  *
871  * The object must be locked.
872  */
873 void
874 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
875     vm_pindex_t old_object_offset)
876 {
877         vm_reserv_t rv;
878
879         VM_OBJECT_ASSERT_WLOCKED(new_object);
880         rv = vm_reserv_from_page(m);
881         if (rv->object == old_object) {
882                 mtx_lock(&vm_page_queue_free_mtx);
883                 if (rv->object == old_object) {
884                         LIST_REMOVE(rv, objq);
885                         LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
886                         rv->object = new_object;
887                         rv->pindex -= old_object_offset;
888                 }
889                 mtx_unlock(&vm_page_queue_free_mtx);
890         }
891 }
892
893 /*
894  * Allocates the virtual and physical memory required by the reservation
895  * management system's data structures, in particular, the reservation array.
896  */
897 vm_paddr_t
898 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
899 {
900         vm_paddr_t new_end;
901         size_t size;
902
903         /*
904          * Calculate the size (in bytes) of the reservation array.  Round up
905          * from "high_water" because every small page is mapped to an element
906          * in the reservation array based on its physical address.  Thus, the
907          * number of elements in the reservation array can be greater than the
908          * number of superpages. 
909          */
910         size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
911
912         /*
913          * Allocate and map the physical memory for the reservation array.  The
914          * next available virtual address is returned by reference.
915          */
916         new_end = end - round_page(size);
917         vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
918             VM_PROT_READ | VM_PROT_WRITE);
919         bzero(vm_reserv_array, size);
920
921         /*
922          * Return the next available physical address.
923          */
924         return (new_end);
925 }
926
927 #endif  /* VM_NRESERVLEVEL > 0 */