2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
68 * Paging space routine stubs. Emulates a matchmaker-like interface
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h>
75 #include <sys/vnode.h>
78 #include <sys/ucred.h>
79 #include <sys/malloc.h>
82 #include <vm/vm_param.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pager.h>
86 #include <vm/vm_extern.h>
88 MALLOC_DEFINE(M_VMPGDATA, "VM pgdata", "XXX: VM pager private data");
90 extern struct pagerops defaultpagerops;
91 extern struct pagerops swappagerops;
92 extern struct pagerops vnodepagerops;
93 extern struct pagerops devicepagerops;
94 extern struct pagerops physpagerops;
96 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */
98 static int dead_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
99 static vm_object_t dead_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t,
101 static void dead_pager_putpages __P((vm_object_t, vm_page_t *, int, int, int *));
102 static boolean_t dead_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *));
103 static void dead_pager_dealloc __P((vm_object_t));
106 dead_pager_getpages(obj, ma, count, req)
112 return VM_PAGER_FAIL;
116 dead_pager_alloc(handle, size, prot, off)
126 dead_pager_putpages(object, m, count, flags, rtvals)
135 for (i = 0; i < count; i++) {
136 rtvals[i] = VM_PAGER_AGAIN;
141 dead_pager_haspage(object, pindex, prev, next)
155 dead_pager_dealloc(object)
161 static struct pagerops deadpagerops = {
171 struct pagerops *pagertab[] = {
172 &defaultpagerops, /* OBJT_DEFAULT */
173 &swappagerops, /* OBJT_SWAP */
174 &vnodepagerops, /* OBJT_VNODE */
175 &devicepagerops, /* OBJT_DEVICE */
176 &physpagerops, /* OBJT_PHYS */
177 &deadpagerops /* OBJT_DEAD */
180 int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
183 * Kernel address space for mapping pages.
184 * Used by pagers where KVAs are needed for IO.
186 * XXX needs to be large enough to support the number of pending async
187 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
188 * (MAXPHYS == 64k) if you want to get the most efficiency.
190 #define PAGER_MAP_SIZE (8 * 1024 * 1024)
192 int pager_map_size = PAGER_MAP_SIZE;
194 static int bswneeded;
195 static vm_offset_t swapbkva; /* swap buffers kva */
201 struct pagerops **pgops;
204 * Initialize known pagers
206 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
207 if (pgops && ((*pgops)->pgo_init != NULL))
208 (*(*pgops)->pgo_init) ();
212 vm_pager_bufferinit()
217 mtx_init(&pbuf_mtx, "pbuf mutex", MTX_DEF);
220 * Now set up swap and physical I/O buffer headers.
222 for (i = 0; i < nswbuf; i++, bp++) {
223 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
225 LIST_INIT(&bp->b_dep);
226 bp->b_rcred = bp->b_wcred = NOCRED;
230 cluster_pbuf_freecnt = nswbuf / 2;
232 swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
234 panic("Not enough pager_map VM space for physical buffers");
238 * Allocate an instance of a pager of the given type.
239 * Size, protection and offset parameters are passed in for pagers that
240 * need to perform page-level validation (e.g. the device pager).
243 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
244 vm_prot_t prot, vm_ooffset_t off)
247 struct pagerops *ops;
251 ops = pagertab[type];
253 ret = (*ops->pgo_alloc) (handle, size, prot, off);
260 vm_pager_deallocate(object)
264 (*pagertab[object->type]->pgo_dealloc) (object);
270 * called with no specific spl
271 * Execute strategy routine directly to pager.
275 vm_pager_strategy(vm_object_t object, struct bio *bp)
277 if (pagertab[object->type]->pgo_strategy) {
278 (*pagertab[object->type]->pgo_strategy)(object, bp);
280 bp->bio_flags |= BIO_ERROR;
281 bp->bio_error = ENXIO;
287 * vm_pager_get_pages() - inline, see vm/vm_pager.h
288 * vm_pager_put_pages() - inline, see vm/vm_pager.h
289 * vm_pager_has_page() - inline, see vm/vm_pager.h
290 * vm_pager_page_inserted() - inline, see vm/vm_pager.h
291 * vm_pager_page_removed() - inline, see vm/vm_pager.h
298 * Called by pageout daemon before going back to sleep.
299 * Gives pagers a chance to clean up any completed async pageing
305 struct pagerops **pgops;
307 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
308 if (pgops && ((*pgops)->pgo_sync != NULL))
309 (*(*pgops)->pgo_sync) ();
320 kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
321 pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
326 vm_pager_unmap_page(kva)
330 kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
334 vm_pager_object_lookup(pg_list, handle)
335 struct pagerlst *pg_list;
340 TAILQ_FOREACH(object, pg_list, pager_object_list)
341 if (object->handle == handle)
347 * initialize a physical buffer
351 initpbuf(struct buf *bp)
353 bp->b_rcred = NOCRED;
354 bp->b_wcred = NOCRED;
355 bp->b_qindex = QUEUE_NONE;
356 bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
357 bp->b_kvabase = bp->b_data;
358 bp->b_kvasize = MAXPHYS;
364 bp->b_magic = B_MAGIC_BIO;
365 bp->b_op = &buf_ops_bio;
366 BUF_LOCK(bp, LK_EXCLUSIVE);
370 * allocate a physical buffer
372 * There are a limited number (nswbuf) of physical buffers. We need
373 * to make sure that no single subsystem is able to hog all of them,
374 * so each subsystem implements a counter which is typically initialized
375 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and
376 * increments it on release, and blocks if the counter hits zero. A
377 * subsystem may initialize the counter to -1 to disable the feature,
378 * but it must still be sure to match up all uses of getpbuf() with
379 * relpbuf() using the same variable.
381 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
382 * relatively soon when the rest of the subsystems get smart about it. XXX
397 while (*pfreecnt == 0) {
398 msleep(pfreecnt, &pbuf_mtx, PVM, "wswbuf0", 0);
402 /* get a bp from the swap buffer header pool */
403 if ((bp = TAILQ_FIRST(&bswlist)) != NULL)
407 msleep(&bswneeded, &pbuf_mtx, PVM, "wswbuf1", 0);
408 /* loop in case someone else grabbed one */
410 TAILQ_REMOVE(&bswlist, bp, b_freelist);
413 mtx_unlock(&pbuf_mtx);
421 * allocate a physical buffer, if one is available.
423 * Note that there is no NULL hack here - all subsystems using this
424 * call understand how to use pfreecnt.
435 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) {
436 mtx_unlock(&pbuf_mtx);
440 TAILQ_REMOVE(&bswlist, bp, b_freelist);
444 mtx_unlock(&pbuf_mtx);
453 * release a physical buffer
455 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
456 * relatively soon when the rest of the subsystems get smart about it. XXX
459 relpbuf(bp, pfreecnt)
468 if (bp->b_rcred != NOCRED) {
470 bp->b_rcred = NOCRED;
472 if (bp->b_wcred != NOCRED) {
474 bp->b_wcred = NOCRED;
482 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
489 if (++*pfreecnt == 1)
492 mtx_unlock(&pbuf_mtx);