]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/phys_pager.c
net80211: improve scan debugging
[FreeBSD/FreeBSD.git] / sys / vm / phys_pager.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2000 Peter Wemm
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/conf.h>
32 #include <sys/kernel.h>
33 #include <sys/lock.h>
34 #include <sys/proc.h>
35 #include <sys/mutex.h>
36 #include <sys/mman.h>
37 #include <sys/rwlock.h>
38 #include <sys/sysctl.h>
39 #include <sys/user.h>
40
41 #include <vm/vm.h>
42 #include <vm/vm_param.h>
43 #include <vm/vm_object.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_pageout.h>
46 #include <vm/vm_pager.h>
47
48 /* list of phys pager objects */
49 static struct pagerlst phys_pager_object_list;
50 /* protect access to phys_pager_object_list */
51 static struct mtx phys_pager_mtx;
52
53 static int default_phys_pager_getpages(vm_object_t object, vm_page_t *m,
54     int count, int *rbehind, int *rahead);
55 static int default_phys_pager_populate(vm_object_t object, vm_pindex_t pidx,
56     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last);
57 static boolean_t default_phys_pager_haspage(vm_object_t object,
58     vm_pindex_t pindex, int *before, int *after);
59 const struct phys_pager_ops default_phys_pg_ops = {
60         .phys_pg_getpages = default_phys_pager_getpages,
61         .phys_pg_populate = default_phys_pager_populate,
62         .phys_pg_haspage = default_phys_pager_haspage,
63         .phys_pg_ctor = NULL,
64         .phys_pg_dtor = NULL,
65 };
66
67 static void
68 phys_pager_init(void)
69 {
70
71         TAILQ_INIT(&phys_pager_object_list);
72         mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF);
73 }
74
75 vm_object_t
76 phys_pager_allocate(void *handle, const struct phys_pager_ops *ops, void *data,
77     vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
78 {
79         vm_object_t object, object1;
80         vm_pindex_t pindex;
81         bool init;
82
83         /*
84          * Offset should be page aligned.
85          */
86         if (foff & PAGE_MASK)
87                 return (NULL);
88
89         pindex = OFF_TO_IDX(foff + PAGE_MASK + size);
90         init = true;
91
92         if (handle != NULL) {
93                 mtx_lock(&phys_pager_mtx);
94                 /*
95                  * Look up pager, creating as necessary.
96                  */
97                 object1 = NULL;
98                 object = vm_pager_object_lookup(&phys_pager_object_list, handle);
99                 if (object == NULL) {
100                         /*
101                          * Allocate object and associate it with the pager.
102                          */
103                         mtx_unlock(&phys_pager_mtx);
104                         object1 = vm_object_allocate(OBJT_PHYS, pindex);
105                         mtx_lock(&phys_pager_mtx);
106                         object = vm_pager_object_lookup(&phys_pager_object_list,
107                             handle);
108                         if (object != NULL) {
109                                 /*
110                                  * We raced with other thread while
111                                  * allocating object.
112                                  */
113                                 if (pindex > object->size)
114                                         object->size = pindex;
115                                 init = false;
116                         } else {
117                                 object = object1;
118                                 object1 = NULL;
119                                 object->handle = handle;
120                                 object->un_pager.phys.ops = ops;
121                                 object->un_pager.phys.data_ptr = data;
122                                 if (ops->phys_pg_populate != NULL)
123                                         vm_object_set_flag(object, OBJ_POPULATE);
124                                 TAILQ_INSERT_TAIL(&phys_pager_object_list,
125                                     object, pager_object_list);
126                         }
127                 } else {
128                         if (pindex > object->size)
129                                 object->size = pindex;
130                 }
131                 mtx_unlock(&phys_pager_mtx);
132                 vm_object_deallocate(object1);
133         } else {
134                 object = vm_object_allocate(OBJT_PHYS, pindex);
135                 object->un_pager.phys.ops = ops;
136                 object->un_pager.phys.data_ptr = data;
137                 if (ops->phys_pg_populate != NULL)
138                         vm_object_set_flag(object, OBJ_POPULATE);
139         }
140         if (init && ops->phys_pg_ctor != NULL)
141                 ops->phys_pg_ctor(object, prot, foff, cred);
142
143         return (object);
144 }
145
146 static vm_object_t
147 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
148     vm_ooffset_t foff, struct ucred *ucred)
149 {
150         return (phys_pager_allocate(handle, &default_phys_pg_ops, NULL,
151             size, prot, foff, ucred));
152 }
153
154 static void
155 phys_pager_dealloc(vm_object_t object)
156 {
157
158         if (object->handle != NULL) {
159                 VM_OBJECT_WUNLOCK(object);
160                 mtx_lock(&phys_pager_mtx);
161                 TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
162                 mtx_unlock(&phys_pager_mtx);
163                 VM_OBJECT_WLOCK(object);
164         }
165         object->type = OBJT_DEAD;
166         if (object->un_pager.phys.ops->phys_pg_dtor != NULL)
167                 object->un_pager.phys.ops->phys_pg_dtor(object);
168         object->handle = NULL;
169 }
170
171 /*
172  * Fill as many pages as vm_fault has allocated for us.
173  */
174 static int
175 default_phys_pager_getpages(vm_object_t object, vm_page_t *m, int count,
176     int *rbehind, int *rahead)
177 {
178         int i;
179
180         for (i = 0; i < count; i++) {
181                 if (vm_page_none_valid(m[i])) {
182                         if ((m[i]->flags & PG_ZERO) == 0)
183                                 pmap_zero_page(m[i]);
184                         vm_page_valid(m[i]);
185                 }
186                 KASSERT(vm_page_all_valid(m[i]),
187                     ("phys_pager_getpages: partially valid page %p", m[i]));
188                 KASSERT(m[i]->dirty == 0,
189                     ("phys_pager_getpages: dirty page %p", m[i]));
190         }
191         if (rbehind)
192                 *rbehind = 0;
193         if (rahead)
194                 *rahead = 0;
195         return (VM_PAGER_OK);
196 }
197
198 static int
199 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
200     int *rahead)
201 {
202         return (object->un_pager.phys.ops->phys_pg_getpages(object, m,
203             count, rbehind, rahead));
204 }
205
206 /*
207  * Implement a pretty aggressive clustered getpages strategy.  Hint that
208  * everything in an entire 4MB window should be prefaulted at once.
209  *
210  * 4MB (1024 slots per page table page) is convenient for x86,
211  * but may not be for other arches.
212  */
213 #ifndef PHYSCLUSTER
214 #define PHYSCLUSTER 1024
215 #endif
216 static int phys_pager_cluster = PHYSCLUSTER;
217 SYSCTL_INT(_vm, OID_AUTO, phys_pager_cluster, CTLFLAG_RWTUN,
218     &phys_pager_cluster, 0,
219     "prefault window size for phys pager");
220
221 /*
222  * Max hint to vm_page_alloc() about the further allocation needs
223  * inside the phys_pager_populate() loop.  The number of bits used to
224  * implement VM_ALLOC_COUNT() determines the hard limit on this value.
225  * That limit is currently 65535.
226  */
227 #define PHYSALLOC       16
228
229 static int
230 default_phys_pager_populate(vm_object_t object, vm_pindex_t pidx,
231     int fault_type __unused, vm_prot_t max_prot __unused, vm_pindex_t *first,
232     vm_pindex_t *last)
233 {
234         vm_page_t m;
235         vm_pindex_t base, end, i;
236         int ahead;
237
238         base = rounddown(pidx, phys_pager_cluster);
239         end = base + phys_pager_cluster - 1;
240         if (end >= object->size)
241                 end = object->size - 1;
242         if (*first > base)
243                 base = *first;
244         if (end > *last)
245                 end = *last;
246         *first = base;
247         *last = end;
248
249         for (i = base; i <= end; i++) {
250                 ahead = MIN(end - i, PHYSALLOC);
251                 m = vm_page_grab(object, i,
252                     VM_ALLOC_NORMAL | VM_ALLOC_COUNT(ahead));
253                 if (!vm_page_all_valid(m))
254                         vm_page_zero_invalid(m, TRUE);
255                 KASSERT(m->dirty == 0,
256                     ("phys_pager_populate: dirty page %p", m));
257         }
258         return (VM_PAGER_OK);
259 }
260
261 static int
262 phys_pager_populate(vm_object_t object, vm_pindex_t pidx, int fault_type,
263     vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
264 {
265         return (object->un_pager.phys.ops->phys_pg_populate(object, pidx,
266             fault_type, max_prot, first, last));
267 }
268
269 static void
270 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags,
271     int *rtvals)
272 {
273
274         panic("phys_pager_putpage called");
275 }
276
277 static boolean_t
278 default_phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
279     int *after)
280 {
281         vm_pindex_t base, end;
282
283         base = rounddown(pindex, phys_pager_cluster);
284         end = base + phys_pager_cluster - 1;
285         if (before != NULL)
286                 *before = pindex - base;
287         if (after != NULL)
288                 *after = end - pindex;
289         return (TRUE);
290 }
291
292 static boolean_t
293 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
294     int *after)
295 {
296         return (object->un_pager.phys.ops->phys_pg_haspage(object, pindex,
297             before, after));
298 }
299
300 const struct pagerops physpagerops = {
301         .pgo_kvme_type = KVME_TYPE_PHYS,
302         .pgo_init =     phys_pager_init,
303         .pgo_alloc =    phys_pager_alloc,
304         .pgo_dealloc =  phys_pager_dealloc,
305         .pgo_getpages = phys_pager_getpages,
306         .pgo_putpages = phys_pager_putpages,
307         .pgo_haspage =  phys_pager_haspage,
308         .pgo_populate = phys_pager_populate,
309 };