]> CyberLeo.Net >> Repos - FreeBSD/releng/9.0.git/blob - sys/vm/device_pager.c
Copy stable/9 to releng/9.0 as part of the FreeBSD 9.0-RELEASE release
[FreeBSD/releng/9.0.git] / sys / vm / device_pager.c
1 /*-
2  * Copyright (c) 1990 University of Utah.
3  * Copyright (c) 1991, 1993
4  *      The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      @(#)device_pager.c      8.1 (Berkeley) 6/11/93
35  */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/conf.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/sx.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_pager.h>
53 #include <vm/uma.h>
54
55 static void dev_pager_init(void);
56 static vm_object_t dev_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
57     vm_ooffset_t, struct ucred *);
58 static void dev_pager_dealloc(vm_object_t);
59 static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int);
60 static void dev_pager_putpages(vm_object_t, vm_page_t *, int, 
61                 boolean_t, int *);
62 static boolean_t dev_pager_haspage(vm_object_t, vm_pindex_t, int *,
63                 int *);
64
65 /* list of device pager objects */
66 static struct pagerlst dev_pager_object_list;
67 /* protect list manipulation */
68 static struct mtx dev_pager_mtx;
69
70 struct pagerops devicepagerops = {
71         .pgo_init =     dev_pager_init,
72         .pgo_alloc =    dev_pager_alloc,
73         .pgo_dealloc =  dev_pager_dealloc,
74         .pgo_getpages = dev_pager_getpages,
75         .pgo_putpages = dev_pager_putpages,
76         .pgo_haspage =  dev_pager_haspage,
77 };
78
79 static void
80 dev_pager_init()
81 {
82         TAILQ_INIT(&dev_pager_object_list);
83         mtx_init(&dev_pager_mtx, "dev_pager list", NULL, MTX_DEF);
84 }
85
86 /*
87  * MPSAFE
88  */
89 static vm_object_t
90 dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
91     vm_ooffset_t foff, struct ucred *cred)
92 {
93         struct cdev *dev;
94         vm_object_t object, object1;
95         vm_pindex_t pindex;
96         unsigned int npages;
97         vm_paddr_t paddr;
98         vm_ooffset_t off;
99         vm_memattr_t dummy;
100         struct cdevsw *csw;
101         int ref;
102
103         /*
104          * Offset should be page aligned.
105          */
106         if (foff & PAGE_MASK)
107                 return (NULL);
108
109         size = round_page(size);
110         pindex = OFF_TO_IDX(foff + size);
111
112         /*
113          * Make sure this device can be mapped.
114          */
115         dev = handle;
116         csw = dev_refthread(dev, &ref);
117         if (csw == NULL)
118                 return (NULL);
119
120         /*
121          * Check that the specified range of the device allows the desired
122          * protection.
123          *
124          * XXX assumes VM_PROT_* == PROT_*
125          */
126         npages = OFF_TO_IDX(size);
127         for (off = foff; npages--; off += PAGE_SIZE)
128                 if (csw->d_mmap(dev, off, &paddr, (int)prot, &dummy) != 0) {
129                         dev_relthread(dev, ref);
130                         return (NULL);
131                 }
132
133         mtx_lock(&dev_pager_mtx);
134
135         /*
136          * Look up pager, creating as necessary.
137          */
138         object1 = NULL;
139         object = vm_pager_object_lookup(&dev_pager_object_list, handle);
140         if (object == NULL) {
141                 /*
142                  * Allocate object and associate it with the pager.  Initialize
143                  * the object's pg_color based upon the physical address of the
144                  * device's memory.
145                  */
146                 mtx_unlock(&dev_pager_mtx);
147                 object1 = vm_object_allocate(OBJT_DEVICE, pindex);
148                 object1->flags |= OBJ_COLORED;
149                 object1->pg_color = atop(paddr) - OFF_TO_IDX(off - PAGE_SIZE);
150                 TAILQ_INIT(&object1->un_pager.devp.devp_pglist);
151                 mtx_lock(&dev_pager_mtx);
152                 object = vm_pager_object_lookup(&dev_pager_object_list, handle);
153                 if (object != NULL) {
154                         /*
155                          * We raced with other thread while allocating object.
156                          */
157                         if (pindex > object->size)
158                                 object->size = pindex;
159                 } else {
160                         object = object1;
161                         object1 = NULL;
162                         object->handle = handle;
163                         TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
164                             pager_object_list);
165                 }
166         } else {
167                 if (pindex > object->size)
168                         object->size = pindex;
169         }
170         mtx_unlock(&dev_pager_mtx);
171         dev_relthread(dev, ref);
172         if (object1 != NULL) {
173                 object1->handle = object1;
174                 mtx_lock(&dev_pager_mtx);
175                 TAILQ_INSERT_TAIL(&dev_pager_object_list, object1,
176                     pager_object_list);
177                 mtx_unlock(&dev_pager_mtx);
178                 vm_object_deallocate(object1);
179         }
180         return (object);
181 }
182
183 static void
184 dev_pager_dealloc(object)
185         vm_object_t object;
186 {
187         vm_page_t m;
188
189         VM_OBJECT_UNLOCK(object);
190         mtx_lock(&dev_pager_mtx);
191         TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
192         mtx_unlock(&dev_pager_mtx);
193         VM_OBJECT_LOCK(object);
194         /*
195          * Free up our fake pages.
196          */
197         while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) != NULL) {
198                 TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
199                 vm_page_putfake(m);
200         }
201 }
202
203 static int
204 dev_pager_getpages(object, m, count, reqpage)
205         vm_object_t object;
206         vm_page_t *m;
207         int count;
208         int reqpage;
209 {
210         vm_pindex_t offset;
211         vm_paddr_t paddr;
212         vm_page_t m_paddr, page;
213         vm_memattr_t memattr;
214         struct cdev *dev;
215         int i, ref, ret;
216         struct cdevsw *csw;
217         struct thread *td;
218         struct file *fpop;
219
220         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
221         dev = object->handle;
222         page = m[reqpage];
223         offset = page->pindex;
224         memattr = object->memattr;
225         VM_OBJECT_UNLOCK(object);
226         csw = dev_refthread(dev, &ref);
227         if (csw == NULL) {
228                 VM_OBJECT_LOCK(object);
229                 return (VM_PAGER_FAIL);
230         }
231         td = curthread;
232         fpop = td->td_fpop;
233         td->td_fpop = NULL;
234         ret = csw->d_mmap(dev, (vm_ooffset_t)offset << PAGE_SHIFT, &paddr,
235             PROT_READ, &memattr);
236         KASSERT(ret == 0, ("dev_pager_getpage: map function returns error"));
237         td->td_fpop = fpop;
238         dev_relthread(dev, ref);
239         /* If "paddr" is a real page, perform a sanity check on "memattr". */
240         if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
241             pmap_page_get_memattr(m_paddr) != memattr) {
242                 memattr = pmap_page_get_memattr(m_paddr);
243                 printf(
244             "WARNING: A device driver has set \"memattr\" inconsistently.\n");
245         }
246         if ((page->flags & PG_FICTITIOUS) != 0) {
247                 /*
248                  * If the passed in reqpage page is a fake page, update it with
249                  * the new physical address.
250                  */
251                 VM_OBJECT_LOCK(object);
252                 vm_page_updatefake(page, paddr, memattr);
253                 if (count > 1) {
254
255                         for (i = 0; i < count; i++) {
256                                 if (i != reqpage) {
257                                         vm_page_lock(m[i]);
258                                         vm_page_free(m[i]);
259                                         vm_page_unlock(m[i]);
260                                 }
261                         }
262                 }
263         } else {
264                 /*
265                  * Replace the passed in reqpage page with our own fake page and
266                  * free up the all of the original pages.
267                  */
268                 page = vm_page_getfake(paddr, memattr);
269                 VM_OBJECT_LOCK(object);
270                 TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq);
271                 for (i = 0; i < count; i++) {
272                         vm_page_lock(m[i]);
273                         vm_page_free(m[i]);
274                         vm_page_unlock(m[i]);
275                 }
276                 vm_page_insert(page, object, offset);
277                 m[reqpage] = page;
278         }
279         page->valid = VM_PAGE_BITS_ALL;
280         return (VM_PAGER_OK);
281 }
282
283 static void
284 dev_pager_putpages(object, m, count, sync, rtvals)
285         vm_object_t object;
286         vm_page_t *m;
287         int count;
288         boolean_t sync;
289         int *rtvals;
290 {
291         panic("dev_pager_putpage called");
292 }
293
294 static boolean_t
295 dev_pager_haspage(object, pindex, before, after)
296         vm_object_t object;
297         vm_pindex_t pindex;
298         int *before;
299         int *after;
300 {
301         if (before != NULL)
302                 *before = 0;
303         if (after != NULL)
304                 *after = 0;
305         return (TRUE);
306 }