]> CyberLeo.Net >> Repos - FreeBSD/releng/9.0.git/blob - sys/vm/vm_contig.c
Copy stable/9 to releng/9.0 as part of the FreeBSD 9.0-RELEASE release
[FreeBSD/releng/9.0.git] / sys / vm / vm_contig.c
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *      from: @(#)vm_page.c     7.4 (Berkeley) 5/7/91
33  */
34
35 /*-
36  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
37  * All rights reserved.
38  *
39  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40  *
41  * Permission to use, copy, modify and distribute this software and
42  * its documentation is hereby granted, provided that both the copyright
43  * notice and this permission notice appear in all copies of the
44  * software, derivative works or modified versions, and any portions
45  * thereof, and that both notices appear in supporting documentation.
46  *
47  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
48  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
49  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50  *
51  * Carnegie Mellon requests users of this software to return to
52  *
53  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
54  *  School of Computer Science
55  *  Carnegie Mellon University
56  *  Pittsburgh PA 15213-3890
57  *
58  * any improvements or extensions that they make and grant Carnegie the
59  * rights to redistribute these changes.
60  */
61
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/lock.h>
68 #include <sys/malloc.h>
69 #include <sys/mount.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/kernel.h>
73 #include <sys/sysctl.h>
74 #include <sys/vmmeter.h>
75 #include <sys/vnode.h>
76
77 #include <vm/vm.h>
78 #include <vm/vm_param.h>
79 #include <vm/vm_kern.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_pager.h>
86 #include <vm/vm_phys.h>
87 #include <vm/vm_extern.h>
88
89 static int
90 vm_contig_launder_page(vm_page_t m, vm_page_t *next)
91 {
92         vm_object_t object;
93         vm_page_t m_tmp;
94         struct vnode *vp;
95         struct mount *mp;
96         int vfslocked;
97
98         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
99         vm_page_lock_assert(m, MA_OWNED);
100         object = m->object;
101         if (!VM_OBJECT_TRYLOCK(object) &&
102             (!vm_pageout_fallback_object_lock(m, next) || m->hold_count != 0)) {
103                 vm_page_unlock(m);
104                 VM_OBJECT_UNLOCK(object);
105                 return (EAGAIN);
106         }
107         if (vm_page_sleep_if_busy(m, TRUE, "vpctw0")) {
108                 VM_OBJECT_UNLOCK(object);
109                 vm_page_lock_queues();
110                 return (EBUSY);
111         }
112         vm_page_test_dirty(m);
113         if (m->dirty == 0)
114                 pmap_remove_all(m);
115         if (m->dirty != 0) {
116                 vm_page_unlock(m);
117                 if ((object->flags & OBJ_DEAD) != 0) {
118                         VM_OBJECT_UNLOCK(object);
119                         return (EAGAIN);
120                 }
121                 if (object->type == OBJT_VNODE) {
122                         vm_page_unlock_queues();
123                         vp = object->handle;
124                         vm_object_reference_locked(object);
125                         VM_OBJECT_UNLOCK(object);
126                         (void) vn_start_write(vp, &mp, V_WAIT);
127                         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
128                         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
129                         VM_OBJECT_LOCK(object);
130                         vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
131                         VM_OBJECT_UNLOCK(object);
132                         VOP_UNLOCK(vp, 0);
133                         VFS_UNLOCK_GIANT(vfslocked);
134                         vm_object_deallocate(object);
135                         vn_finished_write(mp);
136                         vm_page_lock_queues();
137                         return (0);
138                 } else if (object->type == OBJT_SWAP ||
139                            object->type == OBJT_DEFAULT) {
140                         vm_page_unlock_queues();
141                         m_tmp = m;
142                         vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC, 0, NULL);
143                         VM_OBJECT_UNLOCK(object);
144                         vm_page_lock_queues();
145                         return (0);
146                 }
147         } else {
148                 vm_page_cache(m);
149                 vm_page_unlock(m);
150         }
151         VM_OBJECT_UNLOCK(object);
152         return (0);
153 }
154
155 static int
156 vm_contig_launder(int queue, vm_paddr_t low, vm_paddr_t high)
157 {
158         vm_page_t m, next;
159         vm_paddr_t pa;
160         int error;
161
162         TAILQ_FOREACH_SAFE(m, &vm_page_queues[queue].pl, pageq, next) {
163
164                 /* Skip marker pages */
165                 if ((m->flags & PG_MARKER) != 0)
166                         continue;
167
168                 pa = VM_PAGE_TO_PHYS(m);
169                 if (pa < low || pa + PAGE_SIZE > high)
170                         continue;
171
172                 if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
173                         vm_page_unlock(m);
174                         continue;
175                 }
176                 KASSERT(m->queue == queue,
177                     ("vm_contig_launder: page %p's queue is not %d", m, queue));
178                 error = vm_contig_launder_page(m, &next);
179                 vm_page_lock_assert(m, MA_NOTOWNED);
180                 if (error == 0)
181                         return (TRUE);
182                 if (error == EBUSY)
183                         return (FALSE);
184         }
185         return (FALSE);
186 }
187
188 /*
189  *      Frees the given physically contiguous pages.
190  *
191  *      N.B.: Any pages with PG_ZERO set must, in fact, be zero filled.
192  */
193 static void
194 vm_page_release_contig(vm_page_t m, vm_pindex_t count)
195 {
196
197         while (count--) {
198                 /* Leave PG_ZERO unchanged. */
199                 vm_page_free_toq(m);
200                 m++;
201         }
202 }
203
204 /*
205  * Increase the number of cached pages.
206  */
207 void
208 vm_contig_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
209 {
210         int actl, actmax, inactl, inactmax;
211
212         vm_page_lock_queues();
213         inactl = 0;
214         inactmax = tries < 1 ? 0 : cnt.v_inactive_count;
215         actl = 0;
216         actmax = tries < 2 ? 0 : cnt.v_active_count;
217 again:
218         if (inactl < inactmax && vm_contig_launder(PQ_INACTIVE, low, high)) {
219                 inactl++;
220                 goto again;
221         }
222         if (actl < actmax && vm_contig_launder(PQ_ACTIVE, low, high)) {
223                 actl++;
224                 goto again;
225         }
226         vm_page_unlock_queues();
227 }
228
229 /*
230  * Allocates a region from the kernel address map and pages within the
231  * specified physical address range to the kernel object, creates a wired
232  * mapping from the region to these pages, and returns the region's starting
233  * virtual address.  The allocated pages are not necessarily physically
234  * contiguous.  If M_ZERO is specified through the given flags, then the pages
235  * are zeroed before they are mapped.
236  */
237 vm_offset_t
238 kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
239     vm_paddr_t high, vm_memattr_t memattr)
240 {
241         vm_object_t object = kernel_object;
242         vm_offset_t addr, i, offset;
243         vm_page_t m;
244         int tries;
245
246         size = round_page(size);
247         vm_map_lock(map);
248         if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
249                 vm_map_unlock(map);
250                 return (0);
251         }
252         offset = addr - VM_MIN_KERNEL_ADDRESS;
253         vm_object_reference(object);
254         vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
255             VM_PROT_ALL, 0);
256         VM_OBJECT_LOCK(object);
257         for (i = 0; i < size; i += PAGE_SIZE) {
258                 tries = 0;
259 retry:
260                 m = vm_phys_alloc_contig(1, low, high, PAGE_SIZE, 0);
261                 if (m == NULL) {
262                         if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
263                                 VM_OBJECT_UNLOCK(object);
264                                 vm_map_unlock(map);
265                                 vm_contig_grow_cache(tries, low, high);
266                                 vm_map_lock(map);
267                                 VM_OBJECT_LOCK(object);
268                                 tries++;
269                                 goto retry;
270                         }
271                         while (i != 0) {
272                                 i -= PAGE_SIZE;
273                                 m = vm_page_lookup(object, OFF_TO_IDX(offset +
274                                     i));
275                                 vm_page_free(m);
276                         }
277                         VM_OBJECT_UNLOCK(object);
278                         vm_map_delete(map, addr, addr + size);
279                         vm_map_unlock(map);
280                         return (0);
281                 }
282                 if (memattr != VM_MEMATTR_DEFAULT)
283                         pmap_page_set_memattr(m, memattr);
284                 vm_page_insert(m, object, OFF_TO_IDX(offset + i));
285                 if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
286                         pmap_zero_page(m);
287                 m->valid = VM_PAGE_BITS_ALL;
288         }
289         VM_OBJECT_UNLOCK(object);
290         vm_map_unlock(map);
291         vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
292             VM_MAP_WIRE_NOHOLES);
293         return (addr);
294 }
295
296 /*
297  *      Allocates a region from the kernel address map, inserts the
298  *      given physically contiguous pages into the kernel object,
299  *      creates a wired mapping from the region to the pages, and
300  *      returns the region's starting virtual address.  If M_ZERO is
301  *      specified through the given flags, then the pages are zeroed
302  *      before they are mapped.
303  */
304 static vm_offset_t
305 contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, vm_memattr_t memattr,
306     int flags)
307 {
308         vm_object_t object = kernel_object;
309         vm_offset_t addr, tmp_addr;
310  
311         vm_map_lock(map);
312         if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
313                 vm_map_unlock(map);
314                 return (0);
315         }
316         vm_object_reference(object);
317         vm_map_insert(map, object, addr - VM_MIN_KERNEL_ADDRESS,
318             addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
319         vm_map_unlock(map);
320         VM_OBJECT_LOCK(object);
321         for (tmp_addr = addr; tmp_addr < addr + size; tmp_addr += PAGE_SIZE) {
322                 if (memattr != VM_MEMATTR_DEFAULT)
323                         pmap_page_set_memattr(m, memattr);
324                 vm_page_insert(m, object,
325                     OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
326                 if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
327                         pmap_zero_page(m);
328                 m->valid = VM_PAGE_BITS_ALL;
329                 m++;
330         }
331         VM_OBJECT_UNLOCK(object);
332         vm_map_wire(map, addr, addr + size,
333             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
334         return (addr);
335 }
336
337 void *
338 contigmalloc(
339         unsigned long size,     /* should be size_t here and for malloc() */
340         struct malloc_type *type,
341         int flags,
342         vm_paddr_t low,
343         vm_paddr_t high,
344         unsigned long alignment,
345         unsigned long boundary)
346 {
347         void *ret;
348
349         ret = (void *)kmem_alloc_contig(kernel_map, size, flags, low, high,
350             alignment, boundary, VM_MEMATTR_DEFAULT);
351         if (ret != NULL)
352                 malloc_type_allocated(type, round_page(size));
353         return (ret);
354 }
355
356 vm_offset_t
357 kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
358     vm_paddr_t high, unsigned long alignment, unsigned long boundary,
359     vm_memattr_t memattr)
360 {
361         vm_offset_t ret;
362         vm_page_t pages;
363         unsigned long npgs;
364         int tries;
365
366         size = round_page(size);
367         npgs = size >> PAGE_SHIFT;
368         tries = 0;
369 retry:
370         pages = vm_phys_alloc_contig(npgs, low, high, alignment, boundary);
371         if (pages == NULL) {
372                 if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
373                         vm_contig_grow_cache(tries, low, high);
374                         tries++;
375                         goto retry;
376                 }
377                 ret = 0;
378         } else {
379                 ret = contigmapping(map, size, pages, memattr, flags);
380                 if (ret == 0)
381                         vm_page_release_contig(pages, npgs);
382         }
383         return (ret);
384 }
385
386 void
387 contigfree(void *addr, unsigned long size, struct malloc_type *type)
388 {
389
390         kmem_free(kernel_map, (vm_offset_t)addr, size);
391         malloc_type_freed(type, round_page(size));
392 }