]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/device_pager.c
Upgrade our copy of llvm/clang to r168974, from upstream's release_32
[FreeBSD/FreeBSD.git] / sys / vm / device_pager.c
1 /*-
2  * Copyright (c) 1990 University of Utah.
3  * Copyright (c) 1991, 1993
4  *      The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      @(#)device_pager.c      8.1 (Berkeley) 6/11/93
35  */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/conf.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/sx.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_param.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vm_phys.h>
55 #include <vm/uma.h>
56
57 static void dev_pager_init(void);
58 static vm_object_t dev_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
59     vm_ooffset_t, struct ucred *);
60 static void dev_pager_dealloc(vm_object_t);
61 static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int);
62 static void dev_pager_putpages(vm_object_t, vm_page_t *, int, 
63                 boolean_t, int *);
64 static boolean_t dev_pager_haspage(vm_object_t, vm_pindex_t, int *,
65                 int *);
66 static void dev_pager_free_page(vm_object_t object, vm_page_t m);
67
68 /* list of device pager objects */
69 static struct pagerlst dev_pager_object_list;
70 /* protect list manipulation */
71 static struct mtx dev_pager_mtx;
72
73 struct pagerops devicepagerops = {
74         .pgo_init =     dev_pager_init,
75         .pgo_alloc =    dev_pager_alloc,
76         .pgo_dealloc =  dev_pager_dealloc,
77         .pgo_getpages = dev_pager_getpages,
78         .pgo_putpages = dev_pager_putpages,
79         .pgo_haspage =  dev_pager_haspage,
80 };
81
82 struct pagerops mgtdevicepagerops = {
83         .pgo_alloc =    dev_pager_alloc,
84         .pgo_dealloc =  dev_pager_dealloc,
85         .pgo_getpages = dev_pager_getpages,
86         .pgo_putpages = dev_pager_putpages,
87         .pgo_haspage =  dev_pager_haspage,
88 };
89
90 static int old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
91     vm_ooffset_t foff, struct ucred *cred, u_short *color);
92 static void old_dev_pager_dtor(void *handle);
93 static int old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
94     int prot, vm_page_t *mres);
95
96 static struct cdev_pager_ops old_dev_pager_ops = {
97         .cdev_pg_ctor = old_dev_pager_ctor,
98         .cdev_pg_dtor = old_dev_pager_dtor,
99         .cdev_pg_fault = old_dev_pager_fault
100 };
101
102 static void
103 dev_pager_init()
104 {
105         TAILQ_INIT(&dev_pager_object_list);
106         mtx_init(&dev_pager_mtx, "dev_pager list", NULL, MTX_DEF);
107 }
108
109 vm_object_t
110 cdev_pager_lookup(void *handle)
111 {
112         vm_object_t object;
113
114         mtx_lock(&dev_pager_mtx);
115         object = vm_pager_object_lookup(&dev_pager_object_list, handle);
116         mtx_unlock(&dev_pager_mtx);
117         return (object);
118 }
119
120 vm_object_t
121 cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
122     vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred)
123 {
124         vm_object_t object, object1;
125         vm_pindex_t pindex;
126         u_short color;
127
128         if (tp != OBJT_DEVICE && tp != OBJT_MGTDEVICE)
129                 return (NULL);
130
131         /*
132          * Offset should be page aligned.
133          */
134         if (foff & PAGE_MASK)
135                 return (NULL);
136
137         size = round_page(size);
138         pindex = OFF_TO_IDX(foff + size);
139
140         if (ops->cdev_pg_ctor(handle, size, prot, foff, cred, &color) != 0)
141                 return (NULL);
142         mtx_lock(&dev_pager_mtx);
143
144         /*
145          * Look up pager, creating as necessary.
146          */
147         object1 = NULL;
148         object = vm_pager_object_lookup(&dev_pager_object_list, handle);
149         if (object == NULL) {
150                 /*
151                  * Allocate object and associate it with the pager.  Initialize
152                  * the object's pg_color based upon the physical address of the
153                  * device's memory.
154                  */
155                 mtx_unlock(&dev_pager_mtx);
156                 object1 = vm_object_allocate(tp, pindex);
157                 object1->flags |= OBJ_COLORED;
158                 object1->pg_color = color;
159                 object1->handle = handle;
160                 object1->un_pager.devp.ops = ops;
161                 TAILQ_INIT(&object1->un_pager.devp.devp_pglist);
162                 mtx_lock(&dev_pager_mtx);
163                 object = vm_pager_object_lookup(&dev_pager_object_list, handle);
164                 if (object != NULL) {
165                         /*
166                          * We raced with other thread while allocating object.
167                          */
168                         if (pindex > object->size)
169                                 object->size = pindex;
170                 } else {
171                         object = object1;
172                         object1 = NULL;
173                         object->handle = handle;
174                         TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
175                             pager_object_list);
176                         KASSERT(object->type == tp,
177                 ("Inconsistent device pager type %p %d", object, tp));
178                 }
179         } else {
180                 if (pindex > object->size)
181                         object->size = pindex;
182         }
183         mtx_unlock(&dev_pager_mtx);
184         if (object1 != NULL) {
185                 object1->handle = object1;
186                 mtx_lock(&dev_pager_mtx);
187                 TAILQ_INSERT_TAIL(&dev_pager_object_list, object1,
188                     pager_object_list);
189                 mtx_unlock(&dev_pager_mtx);
190                 vm_object_deallocate(object1);
191         }
192         return (object);
193 }
194
195 static vm_object_t
196 dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
197     vm_ooffset_t foff, struct ucred *cred)
198 {
199
200         return (cdev_pager_allocate(handle, OBJT_DEVICE, &old_dev_pager_ops,
201             size, prot, foff, cred));
202 }
203
204 void
205 cdev_pager_free_page(vm_object_t object, vm_page_t m)
206 {
207
208         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
209         if (object->type == OBJT_MGTDEVICE) {
210                 KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("unmanaged %p", m));
211                 pmap_remove_all(m);
212                 vm_page_lock(m);
213                 vm_page_remove(m);
214                 vm_page_unlock(m);
215         } else if (object->type == OBJT_DEVICE)
216                 dev_pager_free_page(object, m);
217 }
218
219 static void
220 dev_pager_free_page(vm_object_t object, vm_page_t m)
221 {
222
223         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
224         KASSERT((object->type == OBJT_DEVICE &&
225             (m->oflags & VPO_UNMANAGED) != 0),
226             ("Managed device or page obj %p m %p", object, m));
227         TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
228         vm_page_putfake(m);
229 }
230
231 static void
232 dev_pager_dealloc(object)
233         vm_object_t object;
234 {
235         vm_page_t m;
236
237         VM_OBJECT_UNLOCK(object);
238         object->un_pager.devp.ops->cdev_pg_dtor(object->handle);
239
240         mtx_lock(&dev_pager_mtx);
241         TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
242         mtx_unlock(&dev_pager_mtx);
243         VM_OBJECT_LOCK(object);
244
245         if (object->type == OBJT_DEVICE) {
246                 /*
247                  * Free up our fake pages.
248                  */
249                 while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist))
250                     != NULL)
251                         dev_pager_free_page(object, m);
252         }
253 }
254
255 static int
256 dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int reqpage)
257 {
258         int error, i;
259
260         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
261         error = object->un_pager.devp.ops->cdev_pg_fault(object,
262             IDX_TO_OFF(ma[reqpage]->pindex), PROT_READ, &ma[reqpage]);
263
264         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
265
266         for (i = 0; i < count; i++) {
267                 if (i != reqpage) {
268                         vm_page_lock(ma[i]);
269                         vm_page_free(ma[i]);
270                         vm_page_unlock(ma[i]);
271                 }
272         }
273
274         if (error == VM_PAGER_OK) {
275                 KASSERT((object->type == OBJT_DEVICE &&
276                      (ma[reqpage]->oflags & VPO_UNMANAGED) != 0) ||
277                     (object->type == OBJT_MGTDEVICE &&
278                      (ma[reqpage]->oflags & VPO_UNMANAGED) == 0),
279                     ("Wrong page type %p %p", ma[reqpage], object));
280                 if (object->type == OBJT_DEVICE) {
281                         TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
282                             ma[reqpage], pageq);
283                 }
284         }
285
286         return (error);
287 }
288
289 static int
290 old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
291     vm_page_t *mres)
292 {
293         vm_pindex_t pidx;
294         vm_paddr_t paddr;
295         vm_page_t m_paddr, page;
296         struct cdev *dev;
297         struct cdevsw *csw;
298         struct file *fpop;
299         struct thread *td;
300         vm_memattr_t memattr;
301         int ref, ret;
302
303         pidx = OFF_TO_IDX(offset);
304         memattr = object->memattr;
305
306         VM_OBJECT_UNLOCK(object);
307
308         dev = object->handle;
309         csw = dev_refthread(dev, &ref);
310         if (csw == NULL) {
311                 VM_OBJECT_LOCK(object);
312                 return (VM_PAGER_FAIL);
313         }
314         td = curthread;
315         fpop = td->td_fpop;
316         td->td_fpop = NULL;
317         ret = csw->d_mmap(dev, offset, &paddr, prot, &memattr);
318         td->td_fpop = fpop;
319         dev_relthread(dev, ref);
320         if (ret != 0) {
321                 printf(
322             "WARNING: dev_pager_getpage: map function returns error %d", ret);
323                 VM_OBJECT_LOCK(object);
324                 return (VM_PAGER_FAIL);
325         }
326
327         /* If "paddr" is a real page, perform a sanity check on "memattr". */
328         if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
329             pmap_page_get_memattr(m_paddr) != memattr) {
330                 memattr = pmap_page_get_memattr(m_paddr);
331                 printf(
332             "WARNING: A device driver has set \"memattr\" inconsistently.\n");
333         }
334         if (((*mres)->flags & PG_FICTITIOUS) != 0) {
335                 /*
336                  * If the passed in result page is a fake page, update it with
337                  * the new physical address.
338                  */
339                 page = *mres;
340                 VM_OBJECT_LOCK(object);
341                 vm_page_updatefake(page, paddr, memattr);
342         } else {
343                 /*
344                  * Replace the passed in reqpage page with our own fake page and
345                  * free up the all of the original pages.
346                  */
347                 page = vm_page_getfake(paddr, memattr);
348                 VM_OBJECT_LOCK(object);
349                 vm_page_lock(*mres);
350                 vm_page_free(*mres);
351                 vm_page_unlock(*mres);
352                 *mres = page;
353                 vm_page_insert(page, object, pidx);
354         }
355         page->valid = VM_PAGE_BITS_ALL;
356         return (VM_PAGER_OK);
357 }
358
359 static void
360 dev_pager_putpages(object, m, count, sync, rtvals)
361         vm_object_t object;
362         vm_page_t *m;
363         int count;
364         boolean_t sync;
365         int *rtvals;
366 {
367
368         panic("dev_pager_putpage called");
369 }
370
371 static boolean_t
372 dev_pager_haspage(object, pindex, before, after)
373         vm_object_t object;
374         vm_pindex_t pindex;
375         int *before;
376         int *after;
377 {
378         if (before != NULL)
379                 *before = 0;
380         if (after != NULL)
381                 *after = 0;
382         return (TRUE);
383 }
384
385 static int
386 old_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
387     vm_ooffset_t foff, struct ucred *cred, u_short *color)
388 {
389         struct cdev *dev;
390         struct cdevsw *csw;
391         vm_memattr_t dummy;
392         vm_ooffset_t off;
393         vm_paddr_t paddr;
394         unsigned int npages;
395         int ref;
396
397         /*
398          * Make sure this device can be mapped.
399          */
400         dev = handle;
401         csw = dev_refthread(dev, &ref);
402         if (csw == NULL)
403                 return (ENXIO);
404
405         /*
406          * Check that the specified range of the device allows the desired
407          * protection.
408          *
409          * XXX assumes VM_PROT_* == PROT_*
410          */
411         npages = OFF_TO_IDX(size);
412         for (off = foff; npages--; off += PAGE_SIZE) {
413                 if (csw->d_mmap(dev, off, &paddr, (int)prot, &dummy) != 0) {
414                         dev_relthread(dev, ref);
415                         return (EINVAL);
416                 }
417         }
418
419         dev_ref(dev);
420         dev_relthread(dev, ref);
421         *color = atop(paddr) - OFF_TO_IDX(off - PAGE_SIZE);
422         return (0);
423 }
424
425 static void
426 old_dev_pager_dtor(void *handle)
427 {
428
429         dev_rel(handle);
430 }