]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/vm/phys_pager.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / vm / phys_pager.c
1 /*-
2  * Copyright (c) 2000 Peter Wemm
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/linker_set.h>
32 #include <sys/conf.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/proc.h>
36 #include <sys/mutex.h>
37 #include <sys/mman.h>
38 #include <sys/sysctl.h>
39
40 #include <vm/vm.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pager.h>
44
45 /* list of phys pager objects */
46 static struct pagerlst phys_pager_object_list;
47 /* protect access to phys_pager_object_list */
48 static struct mtx phys_pager_mtx;
49
50 static void
51 phys_pager_init(void)
52 {
53
54         TAILQ_INIT(&phys_pager_object_list);
55         mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF);
56 }
57
58 /*
59  * MPSAFE
60  */
61 static vm_object_t
62 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
63     vm_ooffset_t foff, struct ucred *cred)
64 {
65         vm_object_t object, object1;
66         vm_pindex_t pindex;
67
68         /*
69          * Offset should be page aligned.
70          */
71         if (foff & PAGE_MASK)
72                 return (NULL);
73
74         pindex = OFF_TO_IDX(foff + PAGE_MASK + size);
75
76         if (handle != NULL) {
77                 mtx_lock(&phys_pager_mtx);
78                 /*
79                  * Look up pager, creating as necessary.
80                  */
81                 object1 = NULL;
82                 object = vm_pager_object_lookup(&phys_pager_object_list, handle);
83                 if (object == NULL) {
84                         /*
85                          * Allocate object and associate it with the pager.
86                          */
87                         mtx_unlock(&phys_pager_mtx);
88                         object1 = vm_object_allocate(OBJT_PHYS, pindex);
89                         mtx_lock(&phys_pager_mtx);
90                         object = vm_pager_object_lookup(&phys_pager_object_list,
91                             handle);
92                         if (object != NULL) {
93                                 /*
94                                  * We raced with other thread while
95                                  * allocating object.
96                                  */
97                                 if (pindex > object->size)
98                                         object->size = pindex;
99                         } else {
100                                 object = object1;
101                                 object1 = NULL;
102                                 object->handle = handle;
103                                 TAILQ_INSERT_TAIL(&phys_pager_object_list, object,
104                                     pager_object_list);
105                         }
106                 } else {
107                         if (pindex > object->size)
108                                 object->size = pindex;
109                 }
110                 mtx_unlock(&phys_pager_mtx);
111                 vm_object_deallocate(object1);
112         } else {
113                 object = vm_object_allocate(OBJT_PHYS, pindex);
114         }
115
116         return (object);
117 }
118
119 /*
120  * MPSAFE
121  */
122 static void
123 phys_pager_dealloc(vm_object_t object)
124 {
125
126         if (object->handle != NULL) {
127                 VM_OBJECT_UNLOCK(object);
128                 mtx_lock(&phys_pager_mtx);
129                 TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
130                 mtx_unlock(&phys_pager_mtx);
131                 VM_OBJECT_LOCK(object);
132         }
133 }
134
135 /*
136  * Fill as many pages as vm_fault has allocated for us.
137  */
138 static int
139 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
140 {
141         int i;
142
143         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
144         for (i = 0; i < count; i++) {
145                 if (m[i]->valid == 0) {
146                         if ((m[i]->flags & PG_ZERO) == 0)
147                                 pmap_zero_page(m[i]);
148                         m[i]->valid = VM_PAGE_BITS_ALL;
149                 }
150                 KASSERT(m[i]->valid == VM_PAGE_BITS_ALL,
151                     ("phys_pager_getpages: partially valid page %p", m[i]));
152                 KASSERT(m[i]->dirty == 0,
153                     ("phys_pager_getpages: dirty page %p", m[i]));
154                 /* The requested page must remain busy, the others not. */
155                 if (i == reqpage)
156                         vm_page_flash(m[i]);
157                 else
158                         vm_page_wakeup(m[i]);
159         }
160         return (VM_PAGER_OK);
161 }
162
163 static void
164 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
165                     int *rtvals)
166 {
167
168         panic("phys_pager_putpage called");
169 }
170
171 /*
172  * Implement a pretty aggressive clustered getpages strategy.  Hint that
173  * everything in an entire 4MB window should be prefaulted at once.
174  *
175  * XXX 4MB (1024 slots per page table page) is convenient for x86,
176  * but may not be for other arches.
177  */
178 #ifndef PHYSCLUSTER
179 #define PHYSCLUSTER 1024
180 #endif
181 static boolean_t
182 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
183                    int *after)
184 {
185         vm_pindex_t base, end;
186
187         base = pindex & (~(PHYSCLUSTER - 1));
188         end = base + (PHYSCLUSTER - 1);
189         if (before != NULL)
190                 *before = pindex - base;
191         if (after != NULL)
192                 *after = end - pindex;
193         return (TRUE);
194 }
195
196 struct pagerops physpagerops = {
197         .pgo_init =     phys_pager_init,
198         .pgo_alloc =    phys_pager_alloc,
199         .pgo_dealloc =  phys_pager_dealloc,
200         .pgo_getpages = phys_pager_getpages,
201         .pgo_putpages = phys_pager_putpages,
202         .pgo_haspage =  phys_pager_haspage,
203 };