2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
36 * @(#)vm_unix.c 8.1 (Berkeley) 6/11/93
39 #include "opt_compat.h"
42 * Traditional sbrk/grow interface to VM
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
48 #include <sys/param.h>
50 #include <sys/mutex.h>
52 #include <sys/racct.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sysent.h>
55 #include <sys/sysproto.h>
56 #include <sys/systm.h>
59 #include <vm/vm_param.h>
61 #include <vm/vm_map.h>
63 #ifndef _SYS_SYSPROTO_H_
76 struct obreak_args *uap;
78 struct vmspace *vm = td->td_proc->p_vmspace;
79 vm_offset_t new, old, base;
80 rlim_t datalim, lmemlim, vmemlim;
83 boolean_t do_map_wirefuture;
85 PROC_LOCK(td->td_proc);
86 datalim = lim_cur(td->td_proc, RLIMIT_DATA);
87 lmemlim = lim_cur(td->td_proc, RLIMIT_MEMLOCK);
88 vmemlim = lim_cur(td->td_proc, RLIMIT_VMEM);
89 PROC_UNLOCK(td->td_proc);
91 do_map_wirefuture = FALSE;
92 new = round_page((vm_offset_t)uap->nsize);
93 vm_map_lock(&vm->vm_map);
95 base = round_page((vm_offset_t) vm->vm_daddr);
96 old = base + ctob(vm->vm_dsize);
99 * Check the resource limit, but allow a process to reduce
100 * its usage, even if it remains over the limit.
102 if (new - base > datalim && new > old) {
106 if (new > vm_map_max(&vm->vm_map)) {
110 } else if (new < base) {
112 * This is simply an invalid value. If someone wants to
113 * do fancy address space manipulations, mmap and munmap
114 * can do most of what the user would want.
120 if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
121 if (ptoa(vmspace_wired_count(td->td_proc->p_vmspace)) +
122 (new - old) > lmemlim) {
127 if (vm->vm_map.size + (new - old) > vmemlim) {
132 PROC_LOCK(td->td_proc);
133 error = racct_set(td->td_proc, RACCT_DATA, new - base);
135 PROC_UNLOCK(td->td_proc);
139 error = racct_set(td->td_proc, RACCT_VMEM,
140 vm->vm_map.size + (new - old));
142 racct_set_force(td->td_proc, RACCT_DATA, old - base);
143 PROC_UNLOCK(td->td_proc);
147 if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
148 error = racct_set(td->td_proc, RACCT_MEMLOCK,
149 ptoa(vmspace_wired_count(td->td_proc->p_vmspace)) +
152 racct_set_force(td->td_proc, RACCT_DATA,
154 racct_set_force(td->td_proc, RACCT_VMEM,
156 PROC_UNLOCK(td->td_proc);
161 PROC_UNLOCK(td->td_proc);
164 #ifdef COMPAT_FREEBSD32
165 #if defined(__amd64__) || defined(__ia64__)
166 if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32))
167 prot |= VM_PROT_EXECUTE;
170 rv = vm_map_insert(&vm->vm_map, NULL, 0, old, new,
171 prot, VM_PROT_ALL, 0);
172 if (rv != KERN_SUCCESS) {
174 PROC_LOCK(td->td_proc);
175 racct_set_force(td->td_proc, RACCT_DATA, old - base);
176 racct_set_force(td->td_proc, RACCT_VMEM, vm->vm_map.size);
177 if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
178 racct_set_force(td->td_proc, RACCT_MEMLOCK,
179 ptoa(vmspace_wired_count(
180 td->td_proc->p_vmspace)));
182 PROC_UNLOCK(td->td_proc);
187 vm->vm_dsize += btoc(new - old);
189 * Handle the MAP_WIREFUTURE case for legacy applications,
190 * by marking the newly mapped range of pages as wired.
191 * We are not required to perform a corresponding
192 * vm_map_unwire() before vm_map_delete() below, as
193 * it will forcibly unwire the pages in the range.
195 * XXX If the pages cannot be wired, no error is returned.
197 if ((vm->vm_map.flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) {
199 printf("obreak: MAP_WIREFUTURE set\n");
200 do_map_wirefuture = TRUE;
202 } else if (new < old) {
203 rv = vm_map_delete(&vm->vm_map, new, old);
204 if (rv != KERN_SUCCESS) {
208 vm->vm_dsize -= btoc(old - new);
210 PROC_LOCK(td->td_proc);
211 racct_set_force(td->td_proc, RACCT_DATA, new - base);
212 racct_set_force(td->td_proc, RACCT_VMEM, vm->vm_map.size);
213 if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
214 racct_set_force(td->td_proc, RACCT_MEMLOCK,
215 ptoa(vmspace_wired_count(td->td_proc->p_vmspace)));
217 PROC_UNLOCK(td->td_proc);
221 vm_map_unlock(&vm->vm_map);
223 if (do_map_wirefuture)
224 (void) vm_map_wire(&vm->vm_map, old, new,
225 VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
230 #ifndef _SYS_SYSPROTO_H_
231 struct ovadvise_args {
241 sys_ovadvise(td, uap)
243 struct ovadvise_args *uap;
245 /* START_GIANT_OPTIONAL */
246 /* END_GIANT_OPTIONAL */