2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
62 * Paging space routine stubs. Emulates a matchmaker-like interface
66 #include <sys/cdefs.h>
67 __FBSDID("$FreeBSD$");
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/vnode.h>
75 #include <sys/ucred.h>
76 #include <sys/malloc.h>
77 #include <sys/rwlock.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vm_extern.h>
87 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */
91 static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int);
92 static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
93 vm_ooffset_t, struct ucred *);
94 static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
95 static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
96 static void dead_pager_dealloc(vm_object_t);
99 dead_pager_getpages(obj, ma, count, req)
105 return VM_PAGER_FAIL;
109 dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
110 vm_ooffset_t off, struct ucred *cred)
116 dead_pager_putpages(object, m, count, flags, rtvals)
125 for (i = 0; i < count; i++) {
126 rtvals[i] = VM_PAGER_AGAIN;
131 dead_pager_haspage(object, pindex, prev, next)
145 dead_pager_dealloc(object)
151 static struct pagerops deadpagerops = {
152 .pgo_alloc = dead_pager_alloc,
153 .pgo_dealloc = dead_pager_dealloc,
154 .pgo_getpages = dead_pager_getpages,
155 .pgo_putpages = dead_pager_putpages,
156 .pgo_haspage = dead_pager_haspage,
159 struct pagerops *pagertab[] = {
160 &defaultpagerops, /* OBJT_DEFAULT */
161 &swappagerops, /* OBJT_SWAP */
162 &vnodepagerops, /* OBJT_VNODE */
163 &devicepagerops, /* OBJT_DEVICE */
164 &physpagerops, /* OBJT_PHYS */
165 &deadpagerops, /* OBJT_DEAD */
166 &sgpagerops, /* OBJT_SG */
167 &mgtdevicepagerops, /* OBJT_MGTDEVICE */
170 static const int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
173 * Kernel address space for mapping pages.
174 * Used by pagers where KVAs are needed for IO.
176 * XXX needs to be large enough to support the number of pending async
177 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
178 * (MAXPHYS == 64k) if you want to get the most efficiency.
180 struct mtx_padalign pbuf_mtx;
181 static TAILQ_HEAD(swqueue, buf) bswlist;
182 static int bswneeded;
183 vm_offset_t swapbkva; /* swap buffers kva */
188 struct pagerops **pgops;
190 TAILQ_INIT(&bswlist);
192 * Initialize known pagers
194 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
195 if ((*pgops)->pgo_init != NULL)
196 (*(*pgops)->pgo_init) ();
200 vm_pager_bufferinit()
205 mtx_init(&pbuf_mtx, "pbuf mutex", NULL, MTX_DEF);
208 * Now set up swap and physical I/O buffer headers.
210 for (i = 0; i < nswbuf; i++, bp++) {
211 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
213 LIST_INIT(&bp->b_dep);
214 bp->b_rcred = bp->b_wcred = NOCRED;
218 cluster_pbuf_freecnt = nswbuf / 2;
219 vnode_pbuf_freecnt = nswbuf / 2 + 1;
220 vnode_async_pbuf_freecnt = nswbuf / 2;
224 * Allocate an instance of a pager of the given type.
225 * Size, protection and offset parameters are passed in for pagers that
226 * need to perform page-level validation (e.g. the device pager).
229 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
230 vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
233 struct pagerops *ops;
235 ops = pagertab[type];
237 ret = (*ops->pgo_alloc) (handle, size, prot, off, cred);
244 * The object must be locked.
247 vm_pager_deallocate(object)
251 VM_OBJECT_ASSERT_WLOCKED(object);
252 (*pagertab[object->type]->pgo_dealloc) (object);
256 vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
260 VM_OBJECT_ASSERT_WLOCKED(object);
261 KASSERT(count > 0, ("%s: 0 count", __func__));
263 * All pages must be busied, not mapped, not fully valid,
264 * not dirty and belong to the proper object.
266 for (int i = 0 ; i < count; i++) {
267 vm_page_assert_xbusied(m[i]);
268 KASSERT(!pmap_page_is_mapped(m[i]),
269 ("%s: page %p is mapped", __func__, m[i]));
270 KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
271 ("%s: request for a valid page %p", __func__, m[i]));
272 KASSERT(m[i]->dirty == 0,
273 ("%s: page %p is dirty", __func__, m[i]));
274 KASSERT(m[i]->object == object,
275 ("%s: wrong object %p/%p", __func__, object, m[i]->object));
281 * Page in the pages for the object using its associated pager.
282 * The requested page must be fully valid on successful return.
285 vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int reqpage)
289 vm_pager_assert_in(object, m, count);
291 r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
292 if (r != VM_PAGER_OK)
296 * If pager has replaced the page, assert that it had
297 * updated the array. Also assert that page is still
300 KASSERT(m[reqpage] == vm_page_lookup(object, m[reqpage]->pindex),
301 ("%s: mismatch page %p pindex %ju", __func__,
302 m[reqpage], (uintmax_t )m[reqpage]->pindex));
303 vm_page_assert_xbusied(m[reqpage]);
306 * Pager didn't fill up entire page. Zero out
307 * partially filled data.
309 if (m[reqpage]->valid != VM_PAGE_BITS_ALL)
310 vm_page_zero_invalid(m[reqpage], TRUE);
312 return (VM_PAGER_OK);
316 vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
317 int reqpage, pgo_getpages_iodone_t iodone, void *arg)
320 vm_pager_assert_in(object, m, count);
322 return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
323 count, reqpage, iodone, arg));
327 * vm_pager_put_pages() - inline, see vm/vm_pager.h
328 * vm_pager_has_page() - inline, see vm/vm_pager.h
332 * Search the specified pager object list for an object with the
333 * specified handle. If an object with the specified handle is found,
334 * increase its reference count and return it. Otherwise, return NULL.
336 * The pager object list must be locked.
339 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
343 TAILQ_FOREACH(object, pg_list, pager_object_list) {
344 if (object->handle == handle) {
345 VM_OBJECT_WLOCK(object);
346 if ((object->flags & OBJ_DEAD) == 0) {
347 vm_object_reference_locked(object);
348 VM_OBJECT_WUNLOCK(object);
351 VM_OBJECT_WUNLOCK(object);
358 * Free the non-requested pages from the given array. To remove all pages,
359 * caller should provide out of range reqpage number.
362 vm_pager_free_nonreq(vm_object_t object, vm_page_t ma[], int reqpage,
363 int npages, boolean_t object_locked)
365 enum { UNLOCKED, CALLER_LOCKED, INTERNALLY_LOCKED } locked;
369 VM_OBJECT_ASSERT_WLOCKED(object);
370 locked = CALLER_LOCKED;
372 VM_OBJECT_ASSERT_UNLOCKED(object);
375 for (i = 0; i < npages; ++i) {
377 if (locked == UNLOCKED) {
378 VM_OBJECT_WLOCK(object);
379 locked = INTERNALLY_LOCKED;
383 vm_page_unlock(ma[i]);
386 if (locked == INTERNALLY_LOCKED)
387 VM_OBJECT_WUNLOCK(object);
391 * initialize a physical buffer
395 * XXX This probably belongs in vfs_bio.c
398 initpbuf(struct buf *bp)
400 KASSERT(bp->b_bufobj == NULL, ("initpbuf with bufobj"));
401 KASSERT(bp->b_vp == NULL, ("initpbuf with vp"));
402 bp->b_rcred = NOCRED;
403 bp->b_wcred = NOCRED;
404 bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */
405 bp->b_kvabase = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
406 bp->b_data = bp->b_kvabase;
407 bp->b_kvasize = MAXPHYS;
413 BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
417 * allocate a physical buffer
419 * There are a limited number (nswbuf) of physical buffers. We need
420 * to make sure that no single subsystem is able to hog all of them,
421 * so each subsystem implements a counter which is typically initialized
422 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and
423 * increments it on release, and blocks if the counter hits zero. A
424 * subsystem may initialize the counter to -1 to disable the feature,
425 * but it must still be sure to match up all uses of getpbuf() with
426 * relpbuf() using the same variable.
428 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
429 * relatively soon when the rest of the subsystems get smart about it. XXX
432 getpbuf(int *pfreecnt)
440 while (*pfreecnt == 0) {
441 msleep(pfreecnt, &pbuf_mtx, PVM, "wswbuf0", 0);
445 /* get a bp from the swap buffer header pool */
446 if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
450 msleep(&bswneeded, &pbuf_mtx, PVM, "wswbuf1", 0);
451 /* loop in case someone else grabbed one */
453 TAILQ_REMOVE(&bswlist, bp, b_freelist);
456 mtx_unlock(&pbuf_mtx);
463 * allocate a physical buffer, if one is available.
465 * Note that there is no NULL hack here - all subsystems using this
466 * call understand how to use pfreecnt.
469 trypbuf(int *pfreecnt)
474 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
475 mtx_unlock(&pbuf_mtx);
478 TAILQ_REMOVE(&bswlist, bp, b_freelist);
482 mtx_unlock(&pbuf_mtx);
490 * release a physical buffer
492 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
493 * relatively soon when the rest of the subsystems get smart about it. XXX
496 relpbuf(struct buf *bp, int *pfreecnt)
499 if (bp->b_rcred != NOCRED) {
501 bp->b_rcred = NOCRED;
503 if (bp->b_wcred != NOCRED) {
505 bp->b_wcred = NOCRED;
508 KASSERT(bp->b_vp == NULL, ("relpbuf with vp"));
509 KASSERT(bp->b_bufobj == NULL, ("relpbuf with bufobj"));
514 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
521 if (++*pfreecnt == 1)
524 mtx_unlock(&pbuf_mtx);
528 * Associate a p-buffer with a vnode.
530 * Also sets B_PAGING flag to indicate that vnode is not fully associated
531 * with the buffer. i.e. the bp has not been linked into the vnode or
535 pbgetvp(struct vnode *vp, struct buf *bp)
538 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
539 KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
542 bp->b_flags |= B_PAGING;
543 bp->b_bufobj = &vp->v_bufobj;
547 * Associate a p-buffer with a vnode.
549 * Also sets B_PAGING flag to indicate that vnode is not fully associated
550 * with the buffer. i.e. the bp has not been linked into the vnode or
554 pbgetbo(struct bufobj *bo, struct buf *bp)
557 KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
558 KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
560 bp->b_flags |= B_PAGING;
565 * Disassociate a p-buffer from a vnode.
568 pbrelvp(struct buf *bp)
571 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
572 KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
573 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
574 ("pbrelvp: pager buf on vnode list."));
578 bp->b_flags &= ~B_PAGING;
582 * Disassociate a p-buffer from a bufobj.
585 pbrelbo(struct buf *bp)
588 KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
589 KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
590 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
591 ("pbrelbo: pager buf on vnode list."));
594 bp->b_flags &= ~B_PAGING;