]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/vm/device_pager.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / vm / device_pager.c
1 /*-
2  * Copyright (c) 1990 University of Utah.
3  * Copyright (c) 1991, 1993
4  *      The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      @(#)device_pager.c      8.1 (Berkeley) 6/11/93
35  */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/conf.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/sx.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_pager.h>
53 #include <vm/uma.h>
54
55 static void dev_pager_init(void);
56 static vm_object_t dev_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
57                 vm_ooffset_t);
58 static void dev_pager_dealloc(vm_object_t);
59 static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int);
60 static void dev_pager_putpages(vm_object_t, vm_page_t *, int, 
61                 boolean_t, int *);
62 static boolean_t dev_pager_haspage(vm_object_t, vm_pindex_t, int *,
63                 int *);
64
65 /* list of device pager objects */
66 static struct pagerlst dev_pager_object_list;
67 /* protect list manipulation */
68 static struct mtx dev_pager_mtx;
69
70
71 static uma_zone_t fakepg_zone;
72
73 static vm_page_t dev_pager_getfake(vm_paddr_t);
74 static void dev_pager_putfake(vm_page_t);
75 static void dev_pager_updatefake(vm_page_t, vm_paddr_t);
76
77 struct pagerops devicepagerops = {
78         .pgo_init =     dev_pager_init,
79         .pgo_alloc =    dev_pager_alloc,
80         .pgo_dealloc =  dev_pager_dealloc,
81         .pgo_getpages = dev_pager_getpages,
82         .pgo_putpages = dev_pager_putpages,
83         .pgo_haspage =  dev_pager_haspage,
84 };
85
86 static void
87 dev_pager_init()
88 {
89         TAILQ_INIT(&dev_pager_object_list);
90         mtx_init(&dev_pager_mtx, "dev_pager list", NULL, MTX_DEF);
91         fakepg_zone = uma_zcreate("DP fakepg", sizeof(struct vm_page),
92             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
93             UMA_ZONE_NOFREE|UMA_ZONE_VM); 
94 }
95
96 /*
97  * MPSAFE
98  */
99 static vm_object_t
100 dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff)
101 {
102         struct cdev *dev;
103         vm_object_t object, object1;
104         vm_pindex_t pindex;
105         unsigned int npages;
106         vm_paddr_t paddr;
107         vm_offset_t off;
108         struct cdevsw *csw;
109
110         /*
111          * Offset should be page aligned.
112          */
113         if (foff & PAGE_MASK)
114                 return (NULL);
115
116         size = round_page(size);
117         pindex = OFF_TO_IDX(foff + size);
118
119         /*
120          * Make sure this device can be mapped.
121          */
122         dev = handle;
123         csw = dev_refthread(dev);
124         if (csw == NULL)
125                 return (NULL);
126
127         /*
128          * Check that the specified range of the device allows the desired
129          * protection.
130          *
131          * XXX assumes VM_PROT_* == PROT_*
132          */
133         npages = OFF_TO_IDX(size);
134         for (off = foff; npages--; off += PAGE_SIZE)
135                 if ((*csw->d_mmap)(dev, off, &paddr, (int)prot) != 0) {
136                         dev_relthread(dev);
137                         return (NULL);
138                 }
139
140         mtx_lock(&dev_pager_mtx);
141
142         /*
143          * Look up pager, creating as necessary.
144          */
145         object1 = NULL;
146         object = vm_pager_object_lookup(&dev_pager_object_list, handle);
147         if (object == NULL) {
148                 /*
149                  * Allocate object and associate it with the pager.  Initialize
150                  * the object's pg_color based upon the physical address of the
151                  * device's memory.
152                  */
153                 mtx_unlock(&dev_pager_mtx);
154                 object1 = vm_object_allocate(OBJT_DEVICE, pindex);
155                 object1->flags |= OBJ_COLORED;
156                 object1->pg_color = atop(paddr) - OFF_TO_IDX(off - PAGE_SIZE);
157                 mtx_lock(&dev_pager_mtx);
158                 object = vm_pager_object_lookup(&dev_pager_object_list, handle);
159                 if (object != NULL) {
160                         /*
161                          * We raced with other thread while allocating object.
162                          */
163                         if (pindex > object->size)
164                                 object->size = pindex;
165                 } else {
166                         object = object1;
167                         object1 = NULL;
168                         object->handle = handle;
169                         TAILQ_INIT(&object->un_pager.devp.devp_pglist);
170                         TAILQ_INSERT_TAIL(&dev_pager_object_list, object,
171                             pager_object_list);
172                 }
173         } else {
174                 if (pindex > object->size)
175                         object->size = pindex;
176         }
177         mtx_unlock(&dev_pager_mtx);
178         dev_relthread(dev);
179         vm_object_deallocate(object1);
180         return (object);
181 }
182
183 static void
184 dev_pager_dealloc(object)
185         vm_object_t object;
186 {
187         vm_page_t m;
188
189         VM_OBJECT_UNLOCK(object);
190         mtx_lock(&dev_pager_mtx);
191         TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
192         mtx_unlock(&dev_pager_mtx);
193         VM_OBJECT_LOCK(object);
194         /*
195          * Free up our fake pages.
196          */
197         while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) != 0) {
198                 TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
199                 dev_pager_putfake(m);
200         }
201 }
202
203 static int
204 dev_pager_getpages(object, m, count, reqpage)
205         vm_object_t object;
206         vm_page_t *m;
207         int count;
208         int reqpage;
209 {
210         vm_pindex_t offset;
211         vm_paddr_t paddr;
212         vm_page_t page;
213         struct cdev *dev;
214         int i, ret;
215         int prot;
216         struct cdevsw *csw;
217         struct thread *td;
218         struct file *fpop;
219
220         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
221         dev = object->handle;
222         offset = m[reqpage]->pindex;
223         VM_OBJECT_UNLOCK(object);
224         csw = dev_refthread(dev);
225         if (csw == NULL)
226                 panic("dev_pager_getpage: no cdevsw");
227         prot = PROT_READ;       /* XXX should pass in? */
228
229         td = curthread;
230         fpop = td->td_fpop;
231         td->td_fpop = NULL;
232         ret = (*csw->d_mmap)(dev, (vm_offset_t)offset << PAGE_SHIFT, &paddr, prot);
233         KASSERT(ret == 0, ("dev_pager_getpage: map function returns error"));
234         td->td_fpop = fpop;
235         dev_relthread(dev);
236
237         if ((m[reqpage]->flags & PG_FICTITIOUS) != 0) {
238                 /*
239                  * If the passed in reqpage page is a fake page, update it with
240                  * the new physical address.
241                  */
242                 VM_OBJECT_LOCK(object);
243                 dev_pager_updatefake(m[reqpage], paddr);
244                 if (count > 1) {
245                         vm_page_lock_queues();
246                         for (i = 0; i < count; i++) {
247                                 if (i != reqpage)
248                                         vm_page_free(m[i]);
249                         }
250                         vm_page_unlock_queues();
251                 }
252         } else {
253                 /*
254                  * Replace the passed in reqpage page with our own fake page and
255                  * free up the all of the original pages.
256                  */
257                 page = dev_pager_getfake(paddr);
258                 VM_OBJECT_LOCK(object);
259                 TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq);
260                 vm_page_lock_queues();
261                 for (i = 0; i < count; i++)
262                         vm_page_free(m[i]);
263                 vm_page_unlock_queues();
264                 vm_page_insert(page, object, offset);
265                 m[reqpage] = page;
266         }
267
268         return (VM_PAGER_OK);
269 }
270
271 static void
272 dev_pager_putpages(object, m, count, sync, rtvals)
273         vm_object_t object;
274         vm_page_t *m;
275         int count;
276         boolean_t sync;
277         int *rtvals;
278 {
279         panic("dev_pager_putpage called");
280 }
281
282 static boolean_t
283 dev_pager_haspage(object, pindex, before, after)
284         vm_object_t object;
285         vm_pindex_t pindex;
286         int *before;
287         int *after;
288 {
289         if (before != NULL)
290                 *before = 0;
291         if (after != NULL)
292                 *after = 0;
293         return (TRUE);
294 }
295
296 /*
297  * Instantiate a fictitious page.  Unlike physical memory pages, only
298  * the machine-independent fields must be initialized.
299  */
300 static vm_page_t
301 dev_pager_getfake(paddr)
302         vm_paddr_t paddr;
303 {
304         vm_page_t m;
305
306         m = uma_zalloc(fakepg_zone, M_WAITOK);
307
308         m->flags = PG_FICTITIOUS;
309         m->oflags = VPO_BUSY;
310         m->valid = VM_PAGE_BITS_ALL;
311         m->dirty = 0;
312         m->busy = 0;
313         m->queue = PQ_NONE;
314         m->object = NULL;
315
316         m->wire_count = 1;
317         m->hold_count = 0;
318         m->phys_addr = paddr;
319
320         return (m);
321 }
322
323 static void
324 dev_pager_putfake(m)
325         vm_page_t m;
326 {
327         if (!(m->flags & PG_FICTITIOUS))
328                 panic("dev_pager_putfake: bad page");
329         uma_zfree(fakepg_zone, m);
330 }
331
332 static void
333 dev_pager_updatefake(m, paddr)
334         vm_page_t m;
335         vm_paddr_t paddr;
336 {
337         if (!(m->flags & PG_FICTITIOUS))
338                 panic("dev_pager_updatefake: bad page");
339         m->phys_addr = paddr;
340         m->valid = VM_PAGE_BITS_ALL;
341 }