]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/device_pager.c
This commit was generated by cvs2svn to compensate for changes in r171364,
[FreeBSD/FreeBSD.git] / sys / vm / device_pager.c
1 /*-
2  * Copyright (c) 1990 University of Utah.
3  * Copyright (c) 1991, 1993
4  *      The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      @(#)device_pager.c      8.1 (Berkeley) 6/11/93
35  */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/conf.h>
43 #include <sys/lock.h>
44 #include <sys/proc.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/sx.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_pager.h>
53 #include <vm/uma.h>
54
55 static void dev_pager_init(void);
56 static vm_object_t dev_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
57                 vm_ooffset_t);
58 static void dev_pager_dealloc(vm_object_t);
59 static int dev_pager_getpages(vm_object_t, vm_page_t *, int, int);
60 static void dev_pager_putpages(vm_object_t, vm_page_t *, int, 
61                 boolean_t, int *);
62 static boolean_t dev_pager_haspage(vm_object_t, vm_pindex_t, int *,
63                 int *);
64
65 /* list of device pager objects */
66 static struct pagerlst dev_pager_object_list;
67 /* protect against object creation */
68 static struct sx dev_pager_sx;
69 /* protect list manipulation */
70 static struct mtx dev_pager_mtx;
71
72
73 static uma_zone_t fakepg_zone;
74
75 static vm_page_t dev_pager_getfake(vm_paddr_t);
76 static void dev_pager_putfake(vm_page_t);
77 static void dev_pager_updatefake(vm_page_t, vm_paddr_t);
78
79 struct pagerops devicepagerops = {
80         .pgo_init =     dev_pager_init,
81         .pgo_alloc =    dev_pager_alloc,
82         .pgo_dealloc =  dev_pager_dealloc,
83         .pgo_getpages = dev_pager_getpages,
84         .pgo_putpages = dev_pager_putpages,
85         .pgo_haspage =  dev_pager_haspage,
86 };
87
88 static void
89 dev_pager_init()
90 {
91         TAILQ_INIT(&dev_pager_object_list);
92         sx_init(&dev_pager_sx, "dev_pager create");
93         mtx_init(&dev_pager_mtx, "dev_pager list", NULL, MTX_DEF);
94         fakepg_zone = uma_zcreate("DP fakepg", sizeof(struct vm_page),
95             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
96             UMA_ZONE_NOFREE|UMA_ZONE_VM); 
97 }
98
99 /*
100  * MPSAFE
101  */
102 static vm_object_t
103 dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff)
104 {
105         struct cdev *dev;
106         vm_object_t object;
107         vm_pindex_t pindex;
108         unsigned int npages;
109         vm_paddr_t paddr;
110         vm_offset_t off;
111         struct cdevsw *csw;
112
113         /*
114          * Offset should be page aligned.
115          */
116         if (foff & PAGE_MASK)
117                 return (NULL);
118
119         size = round_page(size);
120         pindex = OFF_TO_IDX(foff + size);
121
122         /*
123          * Make sure this device can be mapped.
124          */
125         dev = handle;
126         csw = dev_refthread(dev);
127         if (csw == NULL)
128                 return (NULL);
129         mtx_lock(&Giant);
130
131         /*
132          * Check that the specified range of the device allows the desired
133          * protection.
134          *
135          * XXX assumes VM_PROT_* == PROT_*
136          */
137         npages = OFF_TO_IDX(size);
138         for (off = foff; npages--; off += PAGE_SIZE)
139                 if ((*csw->d_mmap)(dev, off, &paddr, (int)prot) != 0) {
140                         mtx_unlock(&Giant);
141                         dev_relthread(dev);
142                         return (NULL);
143                 }
144
145         /*
146          * Lock to prevent object creation race condition.
147          */
148         sx_xlock(&dev_pager_sx);
149
150         /*
151          * Look up pager, creating as necessary.
152          */
153         object = vm_pager_object_lookup(&dev_pager_object_list, handle);
154         if (object == NULL) {
155                 /*
156                  * Allocate object and associate it with the pager.
157                  */
158                 object = vm_object_allocate(OBJT_DEVICE, pindex);
159                 object->handle = handle;
160                 TAILQ_INIT(&object->un_pager.devp.devp_pglist);
161                 mtx_lock(&dev_pager_mtx);
162                 TAILQ_INSERT_TAIL(&dev_pager_object_list, object, pager_object_list);
163                 mtx_unlock(&dev_pager_mtx);
164         } else {
165                 /*
166                  * Gain a reference to the object.
167                  */
168                 vm_object_reference(object);
169                 if (pindex > object->size)
170                         object->size = pindex;
171         }
172
173         sx_xunlock(&dev_pager_sx);
174         mtx_unlock(&Giant);
175         dev_relthread(dev);
176         return (object);
177 }
178
179 static void
180 dev_pager_dealloc(object)
181         vm_object_t object;
182 {
183         vm_page_t m;
184
185         mtx_lock(&dev_pager_mtx);
186         TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
187         mtx_unlock(&dev_pager_mtx);
188         /*
189          * Free up our fake pages.
190          */
191         while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) != 0) {
192                 TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
193                 dev_pager_putfake(m);
194         }
195 }
196
197 static int
198 dev_pager_getpages(object, m, count, reqpage)
199         vm_object_t object;
200         vm_page_t *m;
201         int count;
202         int reqpage;
203 {
204         vm_pindex_t offset;
205         vm_paddr_t paddr;
206         vm_page_t page;
207         struct cdev *dev;
208         int i, ret;
209         int prot;
210         struct cdevsw *csw;
211
212         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
213         dev = object->handle;
214         offset = m[reqpage]->pindex;
215         VM_OBJECT_UNLOCK(object);
216         csw = dev_refthread(dev);
217         if (csw == NULL)
218                 panic("dev_pager_getpage: no cdevsw");
219         mtx_lock(&Giant);
220         prot = PROT_READ;       /* XXX should pass in? */
221
222         ret = (*csw->d_mmap)(dev, (vm_offset_t)offset << PAGE_SHIFT, &paddr, prot);
223         KASSERT(ret == 0, ("dev_pager_getpage: map function returns error"));
224         mtx_unlock(&Giant);
225         dev_relthread(dev);
226
227         if ((m[reqpage]->flags & PG_FICTITIOUS) != 0) {
228                 /*
229                  * If the passed in reqpage page is a fake page, update it with
230                  * the new physical address.
231                  */
232                 VM_OBJECT_LOCK(object);
233                 dev_pager_updatefake(m[reqpage], paddr);
234                 if (count > 1) {
235                         vm_page_lock_queues();
236                         for (i = 0; i < count; i++) {
237                                 if (i != reqpage)
238                                         vm_page_free(m[i]);
239                         }
240                         vm_page_unlock_queues();
241                 }
242         } else {
243                 /*
244                  * Replace the passed in reqpage page with our own fake page and
245                  * free up the all of the original pages.
246                  */
247                 page = dev_pager_getfake(paddr);
248                 VM_OBJECT_LOCK(object);
249                 TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq);
250                 vm_page_lock_queues();
251                 for (i = 0; i < count; i++)
252                         vm_page_free(m[i]);
253                 vm_page_unlock_queues();
254                 vm_page_insert(page, object, offset);
255                 m[reqpage] = page;
256         }
257
258         return (VM_PAGER_OK);
259 }
260
261 static void
262 dev_pager_putpages(object, m, count, sync, rtvals)
263         vm_object_t object;
264         vm_page_t *m;
265         int count;
266         boolean_t sync;
267         int *rtvals;
268 {
269         panic("dev_pager_putpage called");
270 }
271
272 static boolean_t
273 dev_pager_haspage(object, pindex, before, after)
274         vm_object_t object;
275         vm_pindex_t pindex;
276         int *before;
277         int *after;
278 {
279         if (before != NULL)
280                 *before = 0;
281         if (after != NULL)
282                 *after = 0;
283         return (TRUE);
284 }
285
286 /*
287  * Instantiate a fictitious page.  Unlike physical memory pages, only
288  * the machine-independent fields must be initialized.
289  */
290 static vm_page_t
291 dev_pager_getfake(paddr)
292         vm_paddr_t paddr;
293 {
294         vm_page_t m;
295
296         m = uma_zalloc(fakepg_zone, M_WAITOK);
297
298         m->flags = PG_FICTITIOUS;
299         m->oflags = VPO_BUSY;
300         m->valid = VM_PAGE_BITS_ALL;
301         m->dirty = 0;
302         m->busy = 0;
303         m->queue = PQ_NONE;
304         m->object = NULL;
305
306         m->wire_count = 1;
307         m->hold_count = 0;
308         m->phys_addr = paddr;
309
310         return (m);
311 }
312
313 static void
314 dev_pager_putfake(m)
315         vm_page_t m;
316 {
317         if (!(m->flags & PG_FICTITIOUS))
318                 panic("dev_pager_putfake: bad page");
319         uma_zfree(fakepg_zone, m);
320 }
321
322 static void
323 dev_pager_updatefake(m, paddr)
324         vm_page_t m;
325         vm_paddr_t paddr;
326 {
327         if (!(m->flags & PG_FICTITIOUS))
328                 panic("dev_pager_updatefake: bad page");
329         m->phys_addr = paddr;
330         m->valid = VM_PAGE_BITS_ALL;
331 }