2 * Copyright (c) 1991 Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56 * Carnegie Mellon requests users of this software to return to
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
67 #include <sys/param.h>
68 #include <sys/systm.h>
70 #include <sys/malloc.h>
71 #include <sys/mutex.h>
73 #include <sys/vmmeter.h>
74 #include <sys/vnode.h>
77 #include <vm/vm_param.h>
78 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pageout.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vm_extern.h>
88 * This interface is for merging with malloc() someday.
89 * Even if we never implement compaction so that contiguous allocation
90 * works after initialization time, malloc()'s data structures are good
91 * for statistics and for allocations of less than a page.
95 unsigned long size, /* should be size_t here and for malloc() */
96 struct malloc_type *type,
100 unsigned long alignment,
101 unsigned long boundary,
105 vm_offset_t addr, phys, tmp_addr;
107 vm_page_t pga = vm_page_array;
109 size = round_page(size);
111 panic("contigmalloc1: size must not be 0");
112 if ((alignment & (alignment - 1)) != 0)
113 panic("contigmalloc1: alignment must be a power of 2");
114 if ((boundary & (boundary - 1)) != 0)
115 panic("contigmalloc1: boundary must be a power of 2");
118 for (pass = 0; pass <= 1; pass++) {
122 * Find first page in array that is free, within range, aligned, and
123 * such that the boundary won't be crossed.
125 for (i = start; i < cnt.v_page_count; i++) {
127 phys = VM_PAGE_TO_PHYS(&pga[i]);
128 pqtype = pga[i].queue - pga[i].pc;
129 if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
130 (phys >= low) && (phys < high) &&
131 ((phys & (alignment - 1)) == 0) &&
132 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
137 * If the above failed or we will exceed the upper bound, fail.
139 if ((i == cnt.v_page_count) ||
140 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
144 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
148 KASSERT(m->queue == PQ_INACTIVE,
149 ("contigmalloc1: page %p is not PQ_INACTIVE", m));
151 next = TAILQ_NEXT(m, pageq);
152 if (vm_page_sleep_busy(m, TRUE, "vpctw0"))
154 vm_page_test_dirty(m);
156 if (m->object->type == OBJT_VNODE) {
157 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
158 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
159 VOP_UNLOCK(m->object->handle, 0, curthread);
161 } else if (m->object->type == OBJT_SWAP ||
162 m->object->type == OBJT_DEFAULT) {
163 vm_pageout_flush(&m, 1, 0);
167 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0))
171 for (m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
175 KASSERT(m->queue == PQ_ACTIVE,
176 ("contigmalloc1: page %p is not PQ_ACTIVE", m));
178 next = TAILQ_NEXT(m, pageq);
179 if (vm_page_sleep_busy(m, TRUE, "vpctw1"))
181 vm_page_test_dirty(m);
183 if (m->object->type == OBJT_VNODE) {
184 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
185 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
186 VOP_UNLOCK(m->object->handle, 0, curthread);
188 } else if (m->object->type == OBJT_SWAP ||
189 m->object->type == OBJT_DEFAULT) {
190 vm_pageout_flush(&m, 1, 0);
194 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0))
204 * Check successive pages for contiguous and free.
206 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
208 pqtype = pga[i].queue - pga[i].pc;
209 if ((VM_PAGE_TO_PHYS(&pga[i]) !=
210 (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
211 ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
217 for (i = start; i < (start + size / PAGE_SIZE); i++) {
219 vm_page_t m = &pga[i];
221 pqtype = m->queue - m->pc;
222 if (pqtype == PQ_CACHE) {
227 TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
228 vm_page_queues[m->queue].lcnt--;
230 m->valid = VM_PAGE_BITS_ALL;
231 if (m->flags & PG_ZERO)
232 vm_page_zero_count--;
234 KASSERT(m->dirty == 0, ("contigmalloc1: page %p was dirty", m));
242 * We've found a contiguous chunk that meets are requirements.
243 * Allocate kernel VM, unfree and assign the physical pages to it and
244 * return kernel VM pointer.
247 if (vm_map_findspace(map, vm_map_min(map), size, &addr) !=
250 * XXX We almost never run out of kernel virtual
251 * space, so we don't make the allocated memory
258 vm_object_reference(kernel_object);
259 vm_map_insert(map, kernel_object, addr - VM_MIN_KERNEL_ADDRESS,
260 addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
264 for (i = start; i < (start + size / PAGE_SIZE); i++) {
265 vm_page_t m = &pga[i];
266 vm_page_insert(m, kernel_object,
267 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
268 tmp_addr += PAGE_SIZE;
270 vm_map_pageable(map, addr, addr + size, FALSE);
273 return ((void *)addr);
280 unsigned long size, /* should be size_t here and for malloc() */
281 struct malloc_type *type,
285 unsigned long alignment,
286 unsigned long boundary)
291 ret = contigmalloc1(size, type, flags, low, high, alignment, boundary,
298 contigfree(void *addr, unsigned long size, struct malloc_type *type)
301 kmem_free(kernel_map, (vm_offset_t)addr, size);
305 vm_page_alloc_contig(
309 vm_offset_t alignment)
314 ret = ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high,
315 alignment, 0ul, kernel_map));