2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
62 * Paging space routine stubs. Emulates a matchmaker-like interface
66 #include <sys/cdefs.h>
67 #include "opt_param.h"
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/vnode.h>
75 #include <sys/ucred.h>
76 #include <sys/malloc.h>
77 #include <sys/rwlock.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_kern.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pager.h>
86 #include <vm/vm_extern.h>
90 static int pbuf_init(void *, int, int);
91 static int pbuf_ctor(void *, int, void *, int);
92 static void pbuf_dtor(void *, int, void *);
94 static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
95 static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
96 vm_ooffset_t, struct ucred *);
97 static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
98 static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
99 static void dead_pager_dealloc(vm_object_t);
100 static void dead_pager_getvp(vm_object_t, struct vnode **, bool *);
103 dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int *rbehind,
107 return (VM_PAGER_FAIL);
111 dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
112 vm_ooffset_t off, struct ucred *cred)
119 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count,
120 int flags, int *rtvals)
124 for (i = 0; i < count; i++)
125 rtvals[i] = VM_PAGER_AGAIN;
129 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next)
140 dead_pager_dealloc(vm_object_t object)
146 dead_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
149 * For OBJT_DEAD objects, v_writecount was handled in
150 * vnode_pager_dealloc().
154 static const struct pagerops deadpagerops = {
155 .pgo_kvme_type = KVME_TYPE_DEAD,
156 .pgo_alloc = dead_pager_alloc,
157 .pgo_dealloc = dead_pager_dealloc,
158 .pgo_getpages = dead_pager_getpages,
159 .pgo_putpages = dead_pager_putpages,
160 .pgo_haspage = dead_pager_haspage,
161 .pgo_getvp = dead_pager_getvp,
164 const struct pagerops *pagertab[16] __read_mostly = {
165 [OBJT_SWAP] = &swappagerops,
166 [OBJT_VNODE] = &vnodepagerops,
167 [OBJT_DEVICE] = &devicepagerops,
168 [OBJT_PHYS] = &physpagerops,
169 [OBJT_DEAD] = &deadpagerops,
170 [OBJT_SG] = &sgpagerops,
171 [OBJT_MGTDEVICE] = &mgtdevicepagerops,
173 static struct mtx pagertab_lock;
178 const struct pagerops **pgops;
181 mtx_init(&pagertab_lock, "dynpag", NULL, MTX_DEF);
184 * Initialize known pagers
186 for (i = 0; i < OBJT_FIRST_DYN; i++) {
187 pgops = &pagertab[i];
188 if (*pgops != NULL && (*pgops)->pgo_init != NULL)
189 (*(*pgops)->pgo_init)();
193 static int nswbuf_max;
196 vm_pager_bufferinit(void)
199 /* Main zone for paging bufs. */
200 pbuf_zone = uma_zcreate("pbuf",
201 sizeof(struct buf) + PBUF_PAGES * sizeof(vm_page_t),
202 pbuf_ctor, pbuf_dtor, pbuf_init, NULL, UMA_ALIGN_CACHE,
204 /* Few systems may still use this zone directly, so it needs a limit. */
205 nswbuf_max += uma_zone_set_max(pbuf_zone, NSWBUF_MIN);
209 pbuf_zsecond_create(const char *name, int max)
213 zone = uma_zsecond_create(name, pbuf_ctor, pbuf_dtor, NULL, NULL,
218 * Shrink the size of the pbuf pools if KMSAN is enabled, otherwise the
219 * shadows of the large KVA allocations eat up too much memory.
225 * uma_prealloc() rounds up to items per slab. If we would prealloc
226 * immediately on every pbuf_zsecond_create(), we may accumulate too
227 * much of difference between hard limit and prealloced items, which
228 * means wasted memory.
231 nswbuf_max += uma_zone_set_max(zone, max);
233 uma_prealloc(pbuf_zone, uma_zone_set_max(zone, max));
239 pbuf_prealloc(void *arg __unused)
242 uma_prealloc(pbuf_zone, nswbuf_max);
246 SYSINIT(pbuf, SI_SUB_KTHREAD_BUF, SI_ORDER_ANY, pbuf_prealloc, NULL);
249 * Allocate an instance of a pager of the given type.
250 * Size, protection and offset parameters are passed in for pagers that
251 * need to perform page-level validation (e.g. the device pager).
254 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
255 vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
259 MPASS(type < nitems(pagertab));
261 object = (*pagertab[type]->pgo_alloc)(handle, size, prot, off, cred);
268 * The object must be locked.
271 vm_pager_deallocate(vm_object_t object)
274 VM_OBJECT_ASSERT_WLOCKED(object);
275 MPASS(object->type < nitems(pagertab));
276 (*pagertab[object->type]->pgo_dealloc) (object);
280 vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
285 * All pages must be consecutive, busied, not mapped, not fully valid,
286 * not dirty and belong to the proper object. Some pages may be the
287 * bogus page, but the first and last pages must be a real ones.
290 VM_OBJECT_ASSERT_UNLOCKED(object);
291 VM_OBJECT_ASSERT_PAGING(object);
292 KASSERT(count > 0, ("%s: 0 count", __func__));
293 for (int i = 0 ; i < count; i++) {
294 if (m[i] == bogus_page) {
295 KASSERT(i != 0 && i != count - 1,
296 ("%s: page %d is the bogus page", __func__, i));
299 vm_page_assert_xbusied(m[i]);
300 KASSERT(!pmap_page_is_mapped(m[i]),
301 ("%s: page %p is mapped", __func__, m[i]));
302 KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
303 ("%s: request for a valid page %p", __func__, m[i]));
304 KASSERT(m[i]->dirty == 0,
305 ("%s: page %p is dirty", __func__, m[i]));
306 KASSERT(m[i]->object == object,
307 ("%s: wrong object %p/%p", __func__, object, m[i]->object));
308 KASSERT(m[i]->pindex == m[0]->pindex + i,
309 ("%s: page %p isn't consecutive", __func__, m[i]));
315 * Page in the pages for the object using its associated pager.
316 * The requested page must be fully valid on successful return.
319 vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
323 vm_pindex_t pindex = m[0]->pindex;
327 MPASS(object->type < nitems(pagertab));
328 vm_pager_assert_in(object, m, count);
330 r = (*pagertab[object->type]->pgo_getpages)(object, m, count, rbehind,
332 if (r != VM_PAGER_OK)
335 for (int i = 0; i < count; i++) {
337 * If pager has replaced a page, assert that it had
341 KASSERT(m[i] == vm_page_relookup(object, pindex++),
342 ("%s: mismatch page %p pindex %ju", __func__,
343 m[i], (uintmax_t )pindex - 1));
347 * Zero out partially filled data.
349 if (m[i]->valid != VM_PAGE_BITS_ALL)
350 vm_page_zero_invalid(m[i], TRUE);
352 return (VM_PAGER_OK);
356 vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
357 int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
360 MPASS(object->type < nitems(pagertab));
361 vm_pager_assert_in(object, m, count);
363 return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
364 count, rbehind, rahead, iodone, arg));
368 * vm_pager_put_pages() - inline, see vm/vm_pager.h
369 * vm_pager_has_page() - inline, see vm/vm_pager.h
373 * Search the specified pager object list for an object with the
374 * specified handle. If an object with the specified handle is found,
375 * increase its reference count and return it. Otherwise, return NULL.
377 * The pager object list must be locked.
380 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
384 TAILQ_FOREACH(object, pg_list, pager_object_list) {
385 if (object->handle == handle) {
386 VM_OBJECT_WLOCK(object);
387 if ((object->flags & OBJ_DEAD) == 0) {
388 vm_object_reference_locked(object);
389 VM_OBJECT_WUNLOCK(object);
392 VM_OBJECT_WUNLOCK(object);
399 vm_pager_alloc_dyn_type(struct pagerops *ops, int base_type)
403 mtx_lock(&pagertab_lock);
404 MPASS(base_type == -1 ||
405 (base_type >= OBJT_SWAP && base_type < nitems(pagertab)));
406 for (res = OBJT_FIRST_DYN; res < nitems(pagertab); res++) {
407 if (pagertab[res] == NULL)
410 if (res == nitems(pagertab)) {
411 mtx_unlock(&pagertab_lock);
414 if (base_type != -1) {
415 MPASS(pagertab[base_type] != NULL);
417 if (ops->pgo_##n == NULL) \
418 ops->pgo_##n = pagertab[base_type]->pgo_##n
428 FIX(update_writecount);
429 FIX(release_writecount);
430 FIX(set_writeable_dirty);
439 pagertab[res] = ops; /* XXXKIB should be rel, but acq is too much */
440 mtx_unlock(&pagertab_lock);
445 vm_pager_free_dyn_type(objtype_t type)
447 MPASS(type >= OBJT_FIRST_DYN && type < nitems(pagertab));
449 mtx_lock(&pagertab_lock);
450 MPASS(pagertab[type] != NULL);
451 pagertab[type] = NULL;
452 mtx_unlock(&pagertab_lock);
456 pbuf_ctor(void *mem, int size, void *arg, int flags)
458 struct buf *bp = mem;
463 /* copied from initpbuf() */
464 bp->b_rcred = NOCRED;
465 bp->b_wcred = NOCRED;
466 bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */
467 bp->b_data = bp->b_kvabase;
469 bp->b_flags = B_MAXPHYS;
473 BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
479 pbuf_dtor(void *mem, int size, void *arg)
481 struct buf *bp = mem;
483 if (bp->b_rcred != NOCRED) {
485 bp->b_rcred = NOCRED;
487 if (bp->b_wcred != NOCRED) {
489 bp->b_wcred = NOCRED;
495 static const char pbuf_wmesg[] = "pbufwait";
498 pbuf_init(void *mem, int size, int flags)
500 struct buf *bp = mem;
504 bp->b_kvabase = (void *)kva_alloc(ptoa(PBUF_PAGES));
505 if (bp->b_kvabase == NULL)
507 bp->b_kvasize = ptoa(PBUF_PAGES);
508 BUF_LOCKINIT(bp, pbuf_wmesg);
509 LIST_INIT(&bp->b_dep);
510 bp->b_rcred = bp->b_wcred = NOCRED;
519 * Associate a p-buffer with a vnode.
521 * Also sets B_PAGING flag to indicate that vnode is not fully associated
522 * with the buffer. i.e. the bp has not been linked into the vnode or
526 pbgetvp(struct vnode *vp, struct buf *bp)
529 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
530 KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
533 bp->b_flags |= B_PAGING;
534 bp->b_bufobj = &vp->v_bufobj;
538 * Associate a p-buffer with a vnode.
540 * Also sets B_PAGING flag to indicate that vnode is not fully associated
541 * with the buffer. i.e. the bp has not been linked into the vnode or
545 pbgetbo(struct bufobj *bo, struct buf *bp)
548 KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
549 KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
551 bp->b_flags |= B_PAGING;
556 * Disassociate a p-buffer from a vnode.
559 pbrelvp(struct buf *bp)
562 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
563 KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
564 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
565 ("pbrelvp: pager buf on vnode list."));
569 bp->b_flags &= ~B_PAGING;
573 * Disassociate a p-buffer from a bufobj.
576 pbrelbo(struct buf *bp)
579 KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
580 KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
581 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
582 ("pbrelbo: pager buf on vnode list."));
585 bp->b_flags &= ~B_PAGING;
589 vm_object_set_writeable_dirty(vm_object_t object)
591 pgo_set_writeable_dirty_t *method;
593 MPASS(object->type < nitems(pagertab));
595 method = pagertab[object->type]->pgo_set_writeable_dirty;
601 vm_object_mightbedirty(vm_object_t object)
603 pgo_mightbedirty_t *method;
605 MPASS(object->type < nitems(pagertab));
607 method = pagertab[object->type]->pgo_mightbedirty;
610 return (method(object));
614 * Return the kvme type of the given object.
615 * If vpp is not NULL, set it to the object's vm_object_vnode() or NULL.
618 vm_object_kvme_type(vm_object_t object, struct vnode **vpp)
620 VM_OBJECT_ASSERT_LOCKED(object);
621 MPASS(object->type < nitems(pagertab));
624 *vpp = vm_object_vnode(object);
625 return (pagertab[object->type]->pgo_kvme_type);