]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/phys_pager.c
MFC r365488:
[FreeBSD/FreeBSD.git] / sys / vm / phys_pager.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Peter Wemm
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/proc.h>
37 #include <sys/mutex.h>
38 #include <sys/mman.h>
39 #include <sys/rwlock.h>
40 #include <sys/sysctl.h>
41
42 #include <vm/vm.h>
43 #include <vm/vm_param.h>
44 #include <vm/vm_object.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_pager.h>
48
49 /* list of phys pager objects */
50 static struct pagerlst phys_pager_object_list;
51 /* protect access to phys_pager_object_list */
52 static struct mtx phys_pager_mtx;
53
54 static int default_phys_pager_getpages(vm_object_t object, vm_page_t *m,
55     int count, int *rbehind, int *rahead);
56 static int default_phys_pager_populate(vm_object_t object, vm_pindex_t pidx,
57     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last);
58 static boolean_t default_phys_pager_haspage(vm_object_t object,
59     vm_pindex_t pindex, int *before, int *after);
60 struct phys_pager_ops default_phys_pg_ops = {
61         .phys_pg_getpages = default_phys_pager_getpages,
62         .phys_pg_populate = default_phys_pager_populate,
63         .phys_pg_haspage = default_phys_pager_haspage,
64         .phys_pg_ctor = NULL,
65         .phys_pg_dtor = NULL,
66 };
67
68 static void
69 phys_pager_init(void)
70 {
71
72         TAILQ_INIT(&phys_pager_object_list);
73         mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF);
74 }
75
76 vm_object_t
77 phys_pager_allocate(void *handle, struct phys_pager_ops *ops, void *data,
78     vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
79 {
80         vm_object_t object, object1;
81         vm_pindex_t pindex;
82         bool init;
83
84         /*
85          * Offset should be page aligned.
86          */
87         if (foff & PAGE_MASK)
88                 return (NULL);
89
90         pindex = OFF_TO_IDX(foff + PAGE_MASK + size);
91         init = true;
92
93         if (handle != NULL) {
94                 mtx_lock(&phys_pager_mtx);
95                 /*
96                  * Look up pager, creating as necessary.
97                  */
98                 object1 = NULL;
99                 object = vm_pager_object_lookup(&phys_pager_object_list, handle);
100                 if (object == NULL) {
101                         /*
102                          * Allocate object and associate it with the pager.
103                          */
104                         mtx_unlock(&phys_pager_mtx);
105                         object1 = vm_object_allocate(OBJT_PHYS, pindex);
106                         mtx_lock(&phys_pager_mtx);
107                         object = vm_pager_object_lookup(&phys_pager_object_list,
108                             handle);
109                         if (object != NULL) {
110                                 /*
111                                  * We raced with other thread while
112                                  * allocating object.
113                                  */
114                                 if (pindex > object->size)
115                                         object->size = pindex;
116                                 init = false;
117                         } else {
118                                 object = object1;
119                                 object1 = NULL;
120                                 object->handle = handle;
121                                 object->un_pager.phys.ops = ops;
122                                 object->un_pager.phys.data_ptr = data;
123                                 if (ops->phys_pg_populate != NULL)
124                                         vm_object_set_flag(object, OBJ_POPULATE);
125                                 TAILQ_INSERT_TAIL(&phys_pager_object_list,
126                                     object, pager_object_list);
127                         }
128                 } else {
129                         if (pindex > object->size)
130                                 object->size = pindex;
131                 }
132                 mtx_unlock(&phys_pager_mtx);
133                 vm_object_deallocate(object1);
134         } else {
135                 object = vm_object_allocate(OBJT_PHYS, pindex);
136                 object->un_pager.phys.ops = ops;
137                 object->un_pager.phys.data_ptr = data;
138                 if (ops->phys_pg_populate != NULL)
139                         vm_object_set_flag(object, OBJ_POPULATE);
140         }
141         if (init && ops->phys_pg_ctor != NULL)
142                 ops->phys_pg_ctor(object, prot, foff, cred);
143
144         return (object);
145 }
146
147 static vm_object_t
148 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
149     vm_ooffset_t foff, struct ucred *ucred)
150 {
151         return (phys_pager_allocate(handle, &default_phys_pg_ops, NULL,
152             size, prot, foff, ucred));
153 }
154
155 static void
156 phys_pager_dealloc(vm_object_t object)
157 {
158
159         if (object->handle != NULL) {
160                 VM_OBJECT_WUNLOCK(object);
161                 mtx_lock(&phys_pager_mtx);
162                 TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
163                 mtx_unlock(&phys_pager_mtx);
164                 VM_OBJECT_WLOCK(object);
165         }
166         object->type = OBJT_DEAD;
167         if (object->un_pager.phys.ops->phys_pg_dtor != NULL)
168                 object->un_pager.phys.ops->phys_pg_dtor(object);
169         object->handle = NULL;
170 }
171
172 /*
173  * Fill as many pages as vm_fault has allocated for us.
174  */
175 static int
176 default_phys_pager_getpages(vm_object_t object, vm_page_t *m, int count,
177     int *rbehind, int *rahead)
178 {
179         int i;
180
181         VM_OBJECT_ASSERT_WLOCKED(object);
182         for (i = 0; i < count; i++) {
183                 if (m[i]->valid == 0) {
184                         if ((m[i]->flags & PG_ZERO) == 0)
185                                 pmap_zero_page(m[i]);
186                         m[i]->valid = VM_PAGE_BITS_ALL;
187                 }
188                 KASSERT(m[i]->valid == VM_PAGE_BITS_ALL,
189                     ("phys_pager_getpages: partially valid page %p", m[i]));
190                 KASSERT(m[i]->dirty == 0,
191                     ("phys_pager_getpages: dirty page %p", m[i]));
192         }
193         if (rbehind)
194                 *rbehind = 0;
195         if (rahead)
196                 *rahead = 0;
197         return (VM_PAGER_OK);
198 }
199
200 static int
201 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
202     int *rahead)
203 {
204         return (object->un_pager.phys.ops->phys_pg_getpages(object, m,
205             count, rbehind, rahead));
206 }
207
208 /*
209  * Implement a pretty aggressive clustered getpages strategy.  Hint that
210  * everything in an entire 4MB window should be prefaulted at once.
211  *
212  * 4MB (1024 slots per page table page) is convenient for x86,
213  * but may not be for other arches.
214  */
215 #ifndef PHYSCLUSTER
216 #define PHYSCLUSTER 1024
217 #endif
218 static int phys_pager_cluster = PHYSCLUSTER;
219 SYSCTL_INT(_vm, OID_AUTO, phys_pager_cluster, CTLFLAG_RWTUN,
220     &phys_pager_cluster, 0,
221     "prefault window size for phys pager");
222
223 /*
224  * Max hint to vm_page_alloc() about the further allocation needs
225  * inside the phys_pager_populate() loop.  The number of bits used to
226  * implement VM_ALLOC_COUNT() determines the hard limit on this value.
227  * That limit is currently 65535.
228  */
229 #define PHYSALLOC       16
230
231 static int
232 default_phys_pager_populate(vm_object_t object, vm_pindex_t pidx,
233     int fault_type __unused, vm_prot_t max_prot __unused, vm_pindex_t *first,
234     vm_pindex_t *last)
235 {
236         vm_page_t m;
237         vm_pindex_t base, end, i;
238         int ahead;
239
240         base = rounddown(pidx, phys_pager_cluster);
241         end = base + phys_pager_cluster - 1;
242         if (end >= object->size)
243                 end = object->size - 1;
244         if (*first > base)
245                 base = *first;
246         if (end > *last)
247                 end = *last;
248         *first = base;
249         *last = end;
250
251         for (i = base; i <= end; i++) {
252 retry:
253                 m = vm_page_lookup(object, i);
254                 if (m == NULL) {
255                         ahead = MIN(end - i, PHYSALLOC);
256                         m = vm_page_alloc(object, i, VM_ALLOC_NORMAL |
257                             VM_ALLOC_ZERO | VM_ALLOC_WAITFAIL |
258                             VM_ALLOC_COUNT(ahead));
259                         if (m == NULL)
260                                 goto retry;
261                         if ((m->flags & PG_ZERO) == 0)
262                                 pmap_zero_page(m);
263                         m->valid = VM_PAGE_BITS_ALL;
264                 } else if (vm_page_xbusied(m)) {
265                         vm_page_lock(m);
266                         VM_OBJECT_WUNLOCK(object);
267                         vm_page_busy_sleep(m, "physb", true);
268                         VM_OBJECT_WLOCK(object);
269                         goto retry;
270                 } else {
271                         vm_page_xbusy(m);
272                         if (m->valid != VM_PAGE_BITS_ALL)
273                                 vm_page_zero_invalid(m, TRUE);
274                 }
275
276                 KASSERT(m->valid == VM_PAGE_BITS_ALL,
277                     ("phys_pager_populate: partially valid page %p", m));
278                 KASSERT(m->dirty == 0,
279                     ("phys_pager_populate: dirty page %p", m));
280         }
281         return (VM_PAGER_OK);
282 }
283
284 static int
285 phys_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type,
286     vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
287 {
288         return (object->un_pager.phys.ops->phys_pg_populate(object, pidx,
289             fault_type, max_prot, first, last));
290 }
291
292 static void
293 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
294     int *rtvals)
295 {
296
297         panic("phys_pager_putpage called");
298 }
299
300 static boolean_t
301 default_phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
302     int *after)
303 {
304         vm_pindex_t base, end;
305
306         base = rounddown(pindex, phys_pager_cluster);
307         end = base + phys_pager_cluster - 1;
308         if (before != NULL)
309                 *before = pindex - base;
310         if (after != NULL)
311                 *after = end - pindex;
312         return (TRUE);
313 }
314
315 static boolean_t
316 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
317     int *after)
318 {
319         return (object->un_pager.phys.ops->phys_pg_haspage(object, pindex,
320             before, after));
321 }
322
323 struct pagerops physpagerops = {
324         .pgo_init =     phys_pager_init,
325         .pgo_alloc =    phys_pager_alloc,
326         .pgo_dealloc =  phys_pager_dealloc,
327         .pgo_getpages = phys_pager_getpages,
328         .pgo_putpages = phys_pager_putpages,
329         .pgo_haspage =  phys_pager_haspage,
330         .pgo_populate = phys_pager_populate,
331 };