]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/vm/phys_pager.c
Copy head (r256279) to stable/10 as part of the 10.0-RELEASE cycle.
[FreeBSD/stable/10.git] / sys / vm / phys_pager.c
1 /*-
2  * Copyright (c) 2000 Peter Wemm
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/conf.h>
32 #include <sys/kernel.h>
33 #include <sys/lock.h>
34 #include <sys/proc.h>
35 #include <sys/mutex.h>
36 #include <sys/mman.h>
37 #include <sys/rwlock.h>
38 #include <sys/sysctl.h>
39
40 #include <vm/vm.h>
41 #include <vm/vm_param.h>
42 #include <vm/vm_object.h>
43 #include <vm/vm_page.h>
44 #include <vm/vm_pager.h>
45
46 /* list of phys pager objects */
47 static struct pagerlst phys_pager_object_list;
48 /* protect access to phys_pager_object_list */
49 static struct mtx phys_pager_mtx;
50
51 static void
52 phys_pager_init(void)
53 {
54
55         TAILQ_INIT(&phys_pager_object_list);
56         mtx_init(&phys_pager_mtx, "phys_pager list", NULL, MTX_DEF);
57 }
58
59 /*
60  * MPSAFE
61  */
62 static vm_object_t
63 phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
64     vm_ooffset_t foff, struct ucred *cred)
65 {
66         vm_object_t object, object1;
67         vm_pindex_t pindex;
68
69         /*
70          * Offset should be page aligned.
71          */
72         if (foff & PAGE_MASK)
73                 return (NULL);
74
75         pindex = OFF_TO_IDX(foff + PAGE_MASK + size);
76
77         if (handle != NULL) {
78                 mtx_lock(&phys_pager_mtx);
79                 /*
80                  * Look up pager, creating as necessary.
81                  */
82                 object1 = NULL;
83                 object = vm_pager_object_lookup(&phys_pager_object_list, handle);
84                 if (object == NULL) {
85                         /*
86                          * Allocate object and associate it with the pager.
87                          */
88                         mtx_unlock(&phys_pager_mtx);
89                         object1 = vm_object_allocate(OBJT_PHYS, pindex);
90                         mtx_lock(&phys_pager_mtx);
91                         object = vm_pager_object_lookup(&phys_pager_object_list,
92                             handle);
93                         if (object != NULL) {
94                                 /*
95                                  * We raced with other thread while
96                                  * allocating object.
97                                  */
98                                 if (pindex > object->size)
99                                         object->size = pindex;
100                         } else {
101                                 object = object1;
102                                 object1 = NULL;
103                                 object->handle = handle;
104                                 TAILQ_INSERT_TAIL(&phys_pager_object_list, object,
105                                     pager_object_list);
106                         }
107                 } else {
108                         if (pindex > object->size)
109                                 object->size = pindex;
110                 }
111                 mtx_unlock(&phys_pager_mtx);
112                 vm_object_deallocate(object1);
113         } else {
114                 object = vm_object_allocate(OBJT_PHYS, pindex);
115         }
116
117         return (object);
118 }
119
120 /*
121  * MPSAFE
122  */
123 static void
124 phys_pager_dealloc(vm_object_t object)
125 {
126
127         if (object->handle != NULL) {
128                 VM_OBJECT_WUNLOCK(object);
129                 mtx_lock(&phys_pager_mtx);
130                 TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
131                 mtx_unlock(&phys_pager_mtx);
132                 VM_OBJECT_WLOCK(object);
133         }
134 }
135
136 /*
137  * Fill as many pages as vm_fault has allocated for us.
138  */
139 static int
140 phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
141 {
142         int i;
143
144         VM_OBJECT_ASSERT_WLOCKED(object);
145         for (i = 0; i < count; i++) {
146                 if (m[i]->valid == 0) {
147                         if ((m[i]->flags & PG_ZERO) == 0)
148                                 pmap_zero_page(m[i]);
149                         m[i]->valid = VM_PAGE_BITS_ALL;
150                 }
151                 KASSERT(m[i]->valid == VM_PAGE_BITS_ALL,
152                     ("phys_pager_getpages: partially valid page %p", m[i]));
153                 KASSERT(m[i]->dirty == 0,
154                     ("phys_pager_getpages: dirty page %p", m[i]));
155                 /* The requested page must remain busy, the others not. */
156                 if (i == reqpage) {
157                         vm_page_lock(m[i]);
158                         vm_page_flash(m[i]);
159                         vm_page_unlock(m[i]);
160                 } else
161                         vm_page_xunbusy(m[i]);
162         }
163         return (VM_PAGER_OK);
164 }
165
166 static void
167 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
168                     int *rtvals)
169 {
170
171         panic("phys_pager_putpage called");
172 }
173
174 /*
175  * Implement a pretty aggressive clustered getpages strategy.  Hint that
176  * everything in an entire 4MB window should be prefaulted at once.
177  *
178  * XXX 4MB (1024 slots per page table page) is convenient for x86,
179  * but may not be for other arches.
180  */
181 #ifndef PHYSCLUSTER
182 #define PHYSCLUSTER 1024
183 #endif
184 static boolean_t
185 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
186                    int *after)
187 {
188         vm_pindex_t base, end;
189
190         base = pindex & (~(PHYSCLUSTER - 1));
191         end = base + (PHYSCLUSTER - 1);
192         if (before != NULL)
193                 *before = pindex - base;
194         if (after != NULL)
195                 *after = end - pindex;
196         return (TRUE);
197 }
198
199 struct pagerops physpagerops = {
200         .pgo_init =     phys_pager_init,
201         .pgo_alloc =    phys_pager_alloc,
202         .pgo_dealloc =  phys_pager_dealloc,
203         .pgo_getpages = phys_pager_getpages,
204         .pgo_putpages = phys_pager_putpages,
205         .pgo_haspage =  phys_pager_haspage,
206 };