]> CyberLeo.Net >> Repos - FreeBSD/releng/9.1.git/blob - sys/vm/device_pager.c
[SA-14:31] Fix multiple vulnerabilities in NTP suite.
[FreeBSD/releng/9.1.git] / sys / vm / device_pager.c
1 /*-
2  * Copyright (c) 1990 University of Utah.
3  * Copyright (c) 1991, 1993
4  *      The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      @(#)device_pager.c      8.1 (Berkeley) 6/11/93
35  */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/conf.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/sx.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_pager.h>
53 #include <vm/uma.h>
54
55 static void dev_pager_init(void);
56 static vm_object_t dev_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
57     vm_ooffset_t, struct ucred *);
58 static void dev_pager_dealloc(vm_object_t);
59 static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int);
60 static void dev_pager_putpages(vm_object_t, vm_page_t *, int, 
61                 boolean_t, int *);
62 static boolean_t dev_pager_haspage(vm_object_t, vm_pindex_t, int *,
63                 int *);
64 static void dev_pager_free_page(vm_object_t object, vm_page_t m);
65
66 /* list of device pager objects */
67 static struct pagerlst dev_pager_object_list;
68 /* protect list manipulation */
69 static struct mtx dev_pager_mtx;
70
71 struct pagerops devicepagerops = {
72         .pgo_init =     dev_pager_init,
73         .pgo_alloc =    dev_pager_alloc,
74         .pgo_dealloc =  dev_pager_dealloc,
75         .pgo_getpages = dev_pager_getpages,
76         .pgo_putpages = dev_pager_putpages,
77         .pgo_haspage =  dev_pager_haspage,
78 };
79
80 struct pagerops mgtdevicepagerops = {
81         .pgo_alloc =    dev_pager_alloc,
82         .pgo_dealloc =  dev_pager_dealloc,
83         .pgo_getpages = dev_pager_getpages,
84         .pgo_putpages = dev_pager_putpages,
85         .pgo_haspage =  dev_pager_haspage,
86 };
87
88 static int old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
89     vm_ooffset_t foff, struct ucred *cred, u_short *color);
90 static void old_dev_pager_dtor(void *handle);
91 static int old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
92     int prot, vm_page_t *mres);
93
94 static struct cdev_pager_ops old_dev_pager_ops = {
95         .cdev_pg_ctor = old_dev_pager_ctor,
96         .cdev_pg_dtor = old_dev_pager_dtor,
97         .cdev_pg_fault = old_dev_pager_fault
98 };
99
100 static void
101 dev_pager_init()
102 {
103         TAILQ_INIT(&dev_pager_object_list);
104         mtx_init(&dev_pager_mtx, "dev_pager list", NULL, MTX_DEF);
105 }
106
107 vm_object_t
108 cdev_pager_lookup(void *handle)
109 {
110         vm_object_t object;
111
112         mtx_lock(&dev_pager_mtx);
113         object = vm_pager_object_lookup(&dev_pager_object_list, handle);
114         mtx_unlock(&dev_pager_mtx);
115         return (object);
116 }
117
118 vm_object_t
119 cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
120     vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
121 {
122         vm_object_t object, object1;
123         vm_pindex_t pindex;
124         u_short color;
125
126         if (tp != OBJT_DEVICE && tp != OBJT_MGTDEVICE)
127                 return (NULL);
128
129         /*
130          * Offset should be page aligned.
131          */
132         if (foff & PAGE_MASK)
133                 return (NULL);
134
135         size = round_page(size);
136         pindex = OFF_TO_IDX(foff + size);
137
138         if (ops->cdev_pg_ctor(handle, size, prot, foff, cred, &color) != 0)
139                 return (NULL);
140         mtx_lock(&dev_pager_mtx);
141
142         /*
143          * Look up pager, creating as necessary.
144          */
145         object1 = NULL;
146         object = vm_pager_object_lookup(&dev_pager_object_list, handle);
147         if (object == NULL) {
148                 /*
149                  * Allocate object and associate it with the pager.  Initialize
150                  * the object's pg_color based upon the physical address of the
151                  * device's memory.
152                  */
153                 mtx_unlock(&dev_pager_mtx);
154                 object1 = vm_object_allocate(tp, pindex);
155                 object1->flags |= OBJ_COLORED;
156                 object1->pg_color = color;
157                 object1->handle = handle;
158                 object1->un_pager.devp.ops = ops;
159                 TAILQ_INIT(&object1->un_pager.devp.devp_pglist);
160                 mtx_lock(&dev_pager_mtx);
161                 object = vm_pager_object_lookup(&dev_pager_object_list, handle);
162                 if (object != NULL) {
163                         /*
164                          * We raced with other thread while allocating object.
165                          */
166                         if (pindex > object->size)
167                                 object->size = pindex;
168                 } else {
169                         object = object1;
170                         object1 = NULL;
171                         object->handle = handle;
172                         TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
173                             pager_object_list);
174                         KASSERT(object->type == tp,
175                 ("Inconsistent device pager type %p %d", object, tp));
176                 }
177         } else {
178                 if (pindex > object->size)
179                         object->size = pindex;
180         }
181         mtx_unlock(&dev_pager_mtx);
182         if (object1 != NULL) {
183                 object1->handle = object1;
184                 mtx_lock(&dev_pager_mtx);
185                 TAILQ_INSERT_TAIL(&dev_pager_object_list, object1,
186                     pager_object_list);
187                 mtx_unlock(&dev_pager_mtx);
188                 vm_object_deallocate(object1);
189         }
190         return (object);
191 }
192
193 static vm_object_t
194 dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
195     vm_ooffset_t foff, struct ucred *cred)
196 {
197
198         return (cdev_pager_allocate(handle, OBJT_DEVICE, &old_dev_pager_ops,
199             size, prot, foff, cred));
200 }
201
202 void
203 cdev_pager_free_page(vm_object_t object, vm_page_t m)
204 {
205
206         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
207         if (object->type == OBJT_MGTDEVICE) {
208                 KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("unmanaged %p", m));
209                 pmap_remove_all(m);
210                 vm_page_lock(m);
211                 vm_page_remove(m);
212                 vm_page_unlock(m);
213         } else if (object->type == OBJT_DEVICE)
214                 dev_pager_free_page(object, m);
215 }
216
217 static void
218 dev_pager_free_page(vm_object_t object, vm_page_t m)
219 {
220
221         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
222         KASSERT((object->type == OBJT_DEVICE &&
223             (m->oflags & VPO_UNMANAGED) != 0),
224             ("Managed device or page obj %p m %p", object, m));
225         TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
226         vm_page_putfake(m);
227 }
228
229 static void
230 dev_pager_dealloc(object)
231         vm_object_t object;
232 {
233         vm_page_t m;
234
235         VM_OBJECT_UNLOCK(object);
236         object->un_pager.devp.ops->cdev_pg_dtor(object->handle);
237
238         mtx_lock(&dev_pager_mtx);
239         TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
240         mtx_unlock(&dev_pager_mtx);
241         VM_OBJECT_LOCK(object);
242
243         if (object->type == OBJT_DEVICE) {
244                 /*
245                  * Free up our fake pages.
246                  */
247                 while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist))
248                     != NULL)
249                         dev_pager_free_page(object, m);
250         }
251 }
252
253 static int
254 dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int reqpage)
255 {
256         int error, i;
257
258         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
259         error = object->un_pager.devp.ops->cdev_pg_fault(object,
260             IDX_TO_OFF(ma[reqpage]->pindex), PROT_READ, &ma[reqpage]);
261
262         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
263
264         for (i = 0; i < count; i++) {
265                 if (i != reqpage) {
266                         vm_page_lock(ma[i]);
267                         vm_page_free(ma[i]);
268                         vm_page_unlock(ma[i]);
269                 }
270         }
271
272         if (error == VM_PAGER_OK) {
273                 KASSERT((object->type == OBJT_DEVICE &&
274                      (ma[reqpage]->oflags & VPO_UNMANAGED) != 0) ||
275                     (object->type == OBJT_MGTDEVICE &&
276                      (ma[reqpage]->oflags & VPO_UNMANAGED) == 0),
277                     ("Wrong page type %p %p", ma[reqpage], object));
278                 if (object->type == OBJT_DEVICE) {
279                         TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
280                             ma[reqpage], pageq);
281                 }
282         }
283
284         return (error);
285 }
286
287 static int
288 old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
289     vm_page_t *mres)
290 {
291         vm_pindex_t pidx;
292         vm_paddr_t paddr;
293         vm_page_t m_paddr, page;
294         struct cdev *dev;
295         struct cdevsw *csw;
296         struct file *fpop;
297         struct thread *td;
298         vm_memattr_t memattr;
299         int ref, ret;
300
301         pidx = OFF_TO_IDX(offset);
302         memattr = object->memattr;
303
304         VM_OBJECT_UNLOCK(object);
305
306         dev = object->handle;
307         csw = dev_refthread(dev, &ref);
308         if (csw == NULL) {
309                 VM_OBJECT_LOCK(object);
310                 return (VM_PAGER_FAIL);
311         }
312         td = curthread;
313         fpop = td->td_fpop;
314         td->td_fpop = NULL;
315         ret = csw->d_mmap(dev, offset, &paddr, prot, &memattr);
316         td->td_fpop = fpop;
317         dev_relthread(dev, ref);
318         if (ret != 0) {
319                 printf(
320             "WARNING: dev_pager_getpage: map function returns error %d", ret);
321                 VM_OBJECT_LOCK(object);
322                 return (VM_PAGER_FAIL);
323         }
324
325         /* If "paddr" is a real page, perform a sanity check on "memattr". */
326         if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
327             pmap_page_get_memattr(m_paddr) != memattr) {
328                 memattr = pmap_page_get_memattr(m_paddr);
329                 printf(
330             "WARNING: A device driver has set \"memattr\" inconsistently.\n");
331         }
332         if (((*mres)->flags & PG_FICTITIOUS) != 0) {
333                 /*
334                  * If the passed in result page is a fake page, update it with
335                  * the new physical address.
336                  */
337                 page = *mres;
338                 VM_OBJECT_LOCK(object);
339                 vm_page_updatefake(page, paddr, memattr);
340         } else {
341                 /*
342                  * Replace the passed in reqpage page with our own fake page and
343                  * free up the all of the original pages.
344                  */
345                 page = vm_page_getfake(paddr, memattr);
346                 VM_OBJECT_LOCK(object);
347                 vm_page_lock(*mres);
348                 vm_page_free(*mres);
349                 vm_page_unlock(*mres);
350                 *mres = page;
351                 vm_page_insert(page, object, pidx);
352         }
353         page->valid = VM_PAGE_BITS_ALL;
354         return (VM_PAGER_OK);
355 }
356
357 static void
358 dev_pager_putpages(object, m, count, sync, rtvals)
359         vm_object_t object;
360         vm_page_t *m;
361         int count;
362         boolean_t sync;
363         int *rtvals;
364 {
365
366         panic("dev_pager_putpage called");
367 }
368
369 static boolean_t
370 dev_pager_haspage(object, pindex, before, after)
371         vm_object_t object;
372         vm_pindex_t pindex;
373         int *before;
374         int *after;
375 {
376         if (before != NULL)
377                 *before = 0;
378         if (after != NULL)
379                 *after = 0;
380         return (TRUE);
381 }
382
383 static int
384 old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
385     vm_ooffset_t foff, struct ucred *cred, u_short *color)
386 {
387         struct cdev *dev;
388         struct cdevsw *csw;
389         vm_memattr_t dummy;
390         vm_ooffset_t off;
391         vm_paddr_t paddr;
392         unsigned int npages;
393         int ref;
394
395         /*
396          * Make sure this device can be mapped.
397          */
398         dev = handle;
399         csw = dev_refthread(dev, &ref);
400         if (csw == NULL)
401                 return (ENXIO);
402
403         /*
404          * Check that the specified range of the device allows the desired
405          * protection.
406          *
407          * XXX assumes VM_PROT_* == PROT_*
408          */
409         npages = OFF_TO_IDX(size);
410         for (off = foff; npages--; off += PAGE_SIZE) {
411                 if (csw->d_mmap(dev, off, &paddr, (int)prot, &dummy) != 0) {
412                         dev_relthread(dev, ref);
413                         return (EINVAL);
414                 }
415         }
416
417         dev_ref(dev);
418         dev_relthread(dev, ref);
419         *color = atop(paddr) - OFF_TO_IDX(off - PAGE_SIZE);
420         return (0);
421 }
422
423 static void
424 old_dev_pager_dtor(void *handle)
425 {
426
427         dev_rel(handle);
428 }