2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
64 * Paging space routine stubs. Emulates a matchmaker-like interface
68 #include <sys/cdefs.h>
69 __FBSDID("$FreeBSD$");
71 #include "opt_param.h"
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/vnode.h>
79 #include <sys/ucred.h>
80 #include <sys/malloc.h>
81 #include <sys/rwlock.h>
84 #include <vm/vm_param.h>
85 #include <vm/vm_kern.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_extern.h>
93 static int pbuf_init(void *, int, int);
94 static int pbuf_ctor(void *, int, void *, int);
95 static void pbuf_dtor(void *, int, void *);
97 static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
98 static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
99 vm_ooffset_t, struct ucred *);
100 static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
101 static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
102 static void dead_pager_dealloc(vm_object_t);
105 dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int *rbehind,
109 return (VM_PAGER_FAIL);
113 dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
114 vm_ooffset_t off, struct ucred *cred)
121 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count,
122 int flags, int *rtvals)
126 for (i = 0; i < count; i++)
127 rtvals[i] = VM_PAGER_AGAIN;
131 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next)
142 dead_pager_dealloc(vm_object_t object)
147 static struct pagerops deadpagerops = {
148 .pgo_alloc = dead_pager_alloc,
149 .pgo_dealloc = dead_pager_dealloc,
150 .pgo_getpages = dead_pager_getpages,
151 .pgo_putpages = dead_pager_putpages,
152 .pgo_haspage = dead_pager_haspage,
155 struct pagerops *pagertab[] = {
156 &defaultpagerops, /* OBJT_DEFAULT */
157 &swappagerops, /* OBJT_SWAP */
158 &vnodepagerops, /* OBJT_VNODE */
159 &devicepagerops, /* OBJT_DEVICE */
160 &physpagerops, /* OBJT_PHYS */
161 &deadpagerops, /* OBJT_DEAD */
162 &sgpagerops, /* OBJT_SG */
163 &mgtdevicepagerops, /* OBJT_MGTDEVICE */
169 struct pagerops **pgops;
172 * Initialize known pagers
174 for (pgops = pagertab; pgops < &pagertab[nitems(pagertab)]; pgops++)
175 if ((*pgops)->pgo_init != NULL)
176 (*(*pgops)->pgo_init)();
179 static int nswbuf_max;
182 vm_pager_bufferinit(void)
185 /* Main zone for paging bufs. */
186 pbuf_zone = uma_zcreate("pbuf", sizeof(struct buf),
187 pbuf_ctor, pbuf_dtor, pbuf_init, NULL, UMA_ALIGN_CACHE,
189 /* Few systems may still use this zone directly, so it needs a limit. */
190 nswbuf_max += uma_zone_set_max(pbuf_zone, NSWBUF_MIN);
194 pbuf_zsecond_create(const char *name, int max)
198 zone = uma_zsecond_create(name, pbuf_ctor, pbuf_dtor, NULL, NULL,
201 * uma_prealloc() rounds up to items per slab. If we would prealloc
202 * immediately on every pbuf_zsecond_create(), we may accumulate too
203 * much of difference between hard limit and prealloced items, which
204 * means wasted memory.
207 nswbuf_max += uma_zone_set_max(zone, max);
209 uma_prealloc(pbuf_zone, uma_zone_set_max(zone, max));
215 pbuf_prealloc(void *arg __unused)
218 uma_prealloc(pbuf_zone, nswbuf_max);
222 SYSINIT(pbuf, SI_SUB_KTHREAD_BUF, SI_ORDER_ANY, pbuf_prealloc, NULL);
225 * Allocate an instance of a pager of the given type.
226 * Size, protection and offset parameters are passed in for pagers that
227 * need to perform page-level validation (e.g. the device pager).
230 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
231 vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
234 struct pagerops *ops;
236 ops = pagertab[type];
238 ret = (*ops->pgo_alloc)(handle, size, prot, off, cred);
245 * The object must be locked.
248 vm_pager_deallocate(vm_object_t object)
251 VM_OBJECT_ASSERT_WLOCKED(object);
252 (*pagertab[object->type]->pgo_dealloc) (object);
256 vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
261 * All pages must be consecutive, busied, not mapped, not fully valid,
262 * not dirty and belong to the proper object. Some pages may be the
263 * bogus page, but the first and last pages must be a real ones.
266 VM_OBJECT_ASSERT_UNLOCKED(object);
267 VM_OBJECT_ASSERT_PAGING(object);
268 KASSERT(count > 0, ("%s: 0 count", __func__));
269 for (int i = 0 ; i < count; i++) {
270 if (m[i] == bogus_page) {
271 KASSERT(i != 0 && i != count - 1,
272 ("%s: page %d is the bogus page", __func__, i));
275 vm_page_assert_xbusied(m[i]);
276 KASSERT(!pmap_page_is_mapped(m[i]),
277 ("%s: page %p is mapped", __func__, m[i]));
278 KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
279 ("%s: request for a valid page %p", __func__, m[i]));
280 KASSERT(m[i]->dirty == 0,
281 ("%s: page %p is dirty", __func__, m[i]));
282 KASSERT(m[i]->object == object,
283 ("%s: wrong object %p/%p", __func__, object, m[i]->object));
284 KASSERT(m[i]->pindex == m[0]->pindex + i,
285 ("%s: page %p isn't consecutive", __func__, m[i]));
291 * Page in the pages for the object using its associated pager.
292 * The requested page must be fully valid on successful return.
295 vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
299 vm_pindex_t pindex = m[0]->pindex;
303 vm_pager_assert_in(object, m, count);
305 r = (*pagertab[object->type]->pgo_getpages)(object, m, count, rbehind,
307 if (r != VM_PAGER_OK)
310 for (int i = 0; i < count; i++) {
312 * If pager has replaced a page, assert that it had
316 VM_OBJECT_RLOCK(object);
317 KASSERT(m[i] == vm_page_lookup(object, pindex++),
318 ("%s: mismatch page %p pindex %ju", __func__,
319 m[i], (uintmax_t )pindex - 1));
320 VM_OBJECT_RUNLOCK(object);
323 * Zero out partially filled data.
325 if (m[i]->valid != VM_PAGE_BITS_ALL)
326 vm_page_zero_invalid(m[i], TRUE);
328 return (VM_PAGER_OK);
332 vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
333 int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
336 vm_pager_assert_in(object, m, count);
338 return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
339 count, rbehind, rahead, iodone, arg));
343 * vm_pager_put_pages() - inline, see vm/vm_pager.h
344 * vm_pager_has_page() - inline, see vm/vm_pager.h
348 * Search the specified pager object list for an object with the
349 * specified handle. If an object with the specified handle is found,
350 * increase its reference count and return it. Otherwise, return NULL.
352 * The pager object list must be locked.
355 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
359 TAILQ_FOREACH(object, pg_list, pager_object_list) {
360 if (object->handle == handle) {
361 VM_OBJECT_WLOCK(object);
362 if ((object->flags & OBJ_DEAD) == 0) {
363 vm_object_reference_locked(object);
364 VM_OBJECT_WUNLOCK(object);
367 VM_OBJECT_WUNLOCK(object);
374 pbuf_ctor(void *mem, int size, void *arg, int flags)
376 struct buf *bp = mem;
381 /* copied from initpbuf() */
382 bp->b_rcred = NOCRED;
383 bp->b_wcred = NOCRED;
384 bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */
385 bp->b_data = bp->b_kvabase;
391 BUF_LOCK(bp, LK_EXCLUSIVE, NULL);
397 pbuf_dtor(void *mem, int size, void *arg)
399 struct buf *bp = mem;
401 if (bp->b_rcred != NOCRED) {
403 bp->b_rcred = NOCRED;
405 if (bp->b_wcred != NOCRED) {
407 bp->b_wcred = NOCRED;
414 pbuf_init(void *mem, int size, int flags)
416 struct buf *bp = mem;
418 bp->b_kvabase = (void *)kva_alloc(MAXPHYS);
419 if (bp->b_kvabase == NULL)
421 bp->b_kvasize = MAXPHYS;
423 LIST_INIT(&bp->b_dep);
424 bp->b_rcred = bp->b_wcred = NOCRED;
431 * Associate a p-buffer with a vnode.
433 * Also sets B_PAGING flag to indicate that vnode is not fully associated
434 * with the buffer. i.e. the bp has not been linked into the vnode or
438 pbgetvp(struct vnode *vp, struct buf *bp)
441 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
442 KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
445 bp->b_flags |= B_PAGING;
446 bp->b_bufobj = &vp->v_bufobj;
450 * Associate a p-buffer with a vnode.
452 * Also sets B_PAGING flag to indicate that vnode is not fully associated
453 * with the buffer. i.e. the bp has not been linked into the vnode or
457 pbgetbo(struct bufobj *bo, struct buf *bp)
460 KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
461 KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
463 bp->b_flags |= B_PAGING;
468 * Disassociate a p-buffer from a vnode.
471 pbrelvp(struct buf *bp)
474 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
475 KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
476 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
477 ("pbrelvp: pager buf on vnode list."));
481 bp->b_flags &= ~B_PAGING;
485 * Disassociate a p-buffer from a bufobj.
488 pbrelbo(struct buf *bp)
491 KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
492 KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
493 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
494 ("pbrelbo: pager buf on vnode list."));
497 bp->b_flags &= ~B_PAGING;