2 * Copyright (c) 2000 Peter Wemm
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
29 #include <sys/param.h>
30 #include <sys/systm.h>
32 #include <sys/kernel.h>
35 #include <sys/mutex.h>
37 #include <sys/rwlock.h>
38 #include <sys/sysctl.h>
41 #include <vm/vm_param.h>
42 #include <vm/vm_object.h>
43 #include <vm/vm_page.h>
44 #include <vm/vm_pager.h>
46 /* list of phys pager objects */
47 static struct pagerlst phys_pager_object_list;
48 /* protect access to phys_pager_object_list */
49 static struct mtx phys_pager_mtx;
55 TAILQ_INIT(&phys_pager_object_list);
56 mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF);
63 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
64 vm_ooffset_t foff, struct ucred *cred)
66 vm_object_t object, object1;
70 * Offset should be page aligned.
75 pindex = OFF_TO_IDX(foff + PAGE_MASK + size);
78 mtx_lock(&phys_pager_mtx);
80 * Look up pager, creating as necessary.
83 object = vm_pager_object_lookup(&phys_pager_object_list, handle);
86 * Allocate object and associate it with the pager.
88 mtx_unlock(&phys_pager_mtx);
89 object1 = vm_object_allocate(OBJT_PHYS, pindex);
90 mtx_lock(&phys_pager_mtx);
91 object = vm_pager_object_lookup(&phys_pager_object_list,
95 * We raced with other thread while
98 if (pindex > object->size)
99 object->size = pindex;
103 object->handle = handle;
104 TAILQ_INSERT_TAIL(&phys_pager_object_list, object,
108 if (pindex > object->size)
109 object->size = pindex;
111 mtx_unlock(&phys_pager_mtx);
112 vm_object_deallocate(object1);
114 object = vm_object_allocate(OBJT_PHYS, pindex);
124 phys_pager_dealloc(vm_object_t object)
127 if (object->handle != NULL) {
128 VM_OBJECT_WUNLOCK(object);
129 mtx_lock(&phys_pager_mtx);
130 TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
131 mtx_unlock(&phys_pager_mtx);
132 VM_OBJECT_WLOCK(object);
134 object->handle = NULL;
135 object->type = OBJT_DEAD;
139 * Fill as many pages as vm_fault has allocated for us.
142 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
146 VM_OBJECT_ASSERT_WLOCKED(object);
147 for (i = 0; i < count; i++) {
148 if (m[i]->valid == 0) {
149 if ((m[i]->flags & PG_ZERO) == 0)
150 pmap_zero_page(m[i]);
151 m[i]->valid = VM_PAGE_BITS_ALL;
153 KASSERT(m[i]->valid == VM_PAGE_BITS_ALL,
154 ("phys_pager_getpages: partially valid page %p", m[i]));
155 KASSERT(m[i]->dirty == 0,
156 ("phys_pager_getpages: dirty page %p", m[i]));
157 /* The requested page must remain busy, the others not. */
161 vm_page_unlock(m[i]);
163 vm_page_xunbusy(m[i]);
165 return (VM_PAGER_OK);
169 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
173 panic("phys_pager_putpage called");
177 * Implement a pretty aggressive clustered getpages strategy. Hint that
178 * everything in an entire 4MB window should be prefaulted at once.
180 * XXX 4MB (1024 slots per page table page) is convenient for x86,
181 * but may not be for other arches.
184 #define PHYSCLUSTER 1024
187 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
190 vm_pindex_t base, end;
192 base = pindex & (~(PHYSCLUSTER - 1));
193 end = base + (PHYSCLUSTER - 1);
195 *before = pindex - base;
197 *after = end - pindex;
201 struct pagerops physpagerops = {
202 .pgo_init = phys_pager_init,
203 .pgo_alloc = phys_pager_alloc,
204 .pgo_dealloc = phys_pager_dealloc,
205 .pgo_getpages = phys_pager_getpages,
206 .pgo_putpages = phys_pager_putpages,
207 .pgo_haspage = phys_pager_haspage,