]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_unix.c
Fix a bug in the device pager code that can trigger an assertion
[FreeBSD/FreeBSD.git] / sys / vm / vm_unix.c
1 /*-
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1991, 1993
4  *      The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
35  *
36  *      @(#)vm_unix.c   8.1 (Berkeley) 6/11/93
37  */
38
39 #include "opt_compat.h"
40
41 /*
42  * Traditional sbrk/grow interface to VM
43  */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include <sys/param.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/racct.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sysent.h>
55 #include <sys/sysproto.h>
56 #include <sys/systm.h>
57
58 #include <vm/vm.h>
59 #include <vm/vm_param.h>
60 #include <vm/pmap.h>
61 #include <vm/vm_map.h>
62
63 #ifndef _SYS_SYSPROTO_H_
64 struct obreak_args {
65         char *nsize;
66 };
67 #endif
68
69 /*
70  * MPSAFE
71  */
72 /* ARGSUSED */
73 int
74 sys_obreak(td, uap)
75         struct thread *td;
76         struct obreak_args *uap;
77 {
78         struct vmspace *vm = td->td_proc->p_vmspace;
79         vm_offset_t new, old, base;
80         rlim_t datalim, lmemlim, vmemlim;
81         int prot, rv;
82         int error = 0;
83         boolean_t do_map_wirefuture;
84
85         PROC_LOCK(td->td_proc);
86         datalim = lim_cur(td->td_proc, RLIMIT_DATA);
87         lmemlim = lim_cur(td->td_proc, RLIMIT_MEMLOCK);
88         vmemlim = lim_cur(td->td_proc, RLIMIT_VMEM);
89         PROC_UNLOCK(td->td_proc);
90
91         do_map_wirefuture = FALSE;
92         new = round_page((vm_offset_t)uap->nsize);
93         vm_map_lock(&vm->vm_map);
94
95         base = round_page((vm_offset_t) vm->vm_daddr);
96         old = base + ctob(vm->vm_dsize);
97         if (new > base) {
98                 /*
99                  * Check the resource limit, but allow a process to reduce
100                  * its usage, even if it remains over the limit.
101                  */
102                 if (new - base > datalim && new > old) {
103                         error = ENOMEM;
104                         goto done;
105                 }
106                 if (new > vm_map_max(&vm->vm_map)) {
107                         error = ENOMEM;
108                         goto done;
109                 }
110         } else if (new < base) {
111                 /*
112                  * This is simply an invalid value.  If someone wants to
113                  * do fancy address space manipulations, mmap and munmap
114                  * can do most of what the user would want.
115                  */
116                 error = EINVAL;
117                 goto done;
118         }
119         if (new > old) {
120                 if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
121                         if (ptoa(vmspace_wired_count(td->td_proc->p_vmspace)) +
122                             (new - old) > lmemlim) {
123                                 error = ENOMEM;
124                                 goto done;
125                         }
126                 }
127                 if (vm->vm_map.size + (new - old) > vmemlim) {
128                         error = ENOMEM;
129                         goto done;
130                 }
131 #ifdef RACCT
132                 PROC_LOCK(td->td_proc);
133                 error = racct_set(td->td_proc, RACCT_DATA, new - base);
134                 if (error != 0) {
135                         PROC_UNLOCK(td->td_proc);
136                         error = ENOMEM;
137                         goto done;
138                 }
139                 error = racct_set(td->td_proc, RACCT_VMEM,
140                     vm->vm_map.size + (new - old));
141                 if (error != 0) {
142                         racct_set_force(td->td_proc, RACCT_DATA, old - base);
143                         PROC_UNLOCK(td->td_proc);
144                         error = ENOMEM;
145                         goto done;
146                 }
147                 if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
148                         error = racct_set(td->td_proc, RACCT_MEMLOCK,
149                             ptoa(vmspace_wired_count(td->td_proc->p_vmspace)) +
150                             (new - old));
151                         if (error != 0) {
152                                 racct_set_force(td->td_proc, RACCT_DATA,
153                                     old - base);
154                                 racct_set_force(td->td_proc, RACCT_VMEM,
155                                     vm->vm_map.size);
156                                 PROC_UNLOCK(td->td_proc);
157                                 error = ENOMEM;
158                                 goto done;
159                         }
160                 }
161                 PROC_UNLOCK(td->td_proc);
162 #endif
163                 prot = VM_PROT_RW;
164 #ifdef COMPAT_FREEBSD32
165 #if defined(__amd64__) || defined(__ia64__)
166                 if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32))
167                         prot |= VM_PROT_EXECUTE;
168 #endif
169 #endif
170                 rv = vm_map_insert(&vm->vm_map, NULL, 0, old, new,
171                     prot, VM_PROT_ALL, 0);
172                 if (rv != KERN_SUCCESS) {
173 #ifdef RACCT
174                         PROC_LOCK(td->td_proc);
175                         racct_set_force(td->td_proc, RACCT_DATA, old - base);
176                         racct_set_force(td->td_proc, RACCT_VMEM, vm->vm_map.size);
177                         if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
178                                 racct_set_force(td->td_proc, RACCT_MEMLOCK,
179                                     ptoa(vmspace_wired_count(
180                                     td->td_proc->p_vmspace)));
181                         }
182                         PROC_UNLOCK(td->td_proc);
183 #endif
184                         error = ENOMEM;
185                         goto done;
186                 }
187                 vm->vm_dsize += btoc(new - old);
188                 /*
189                  * Handle the MAP_WIREFUTURE case for legacy applications,
190                  * by marking the newly mapped range of pages as wired.
191                  * We are not required to perform a corresponding
192                  * vm_map_unwire() before vm_map_delete() below, as
193                  * it will forcibly unwire the pages in the range.
194                  *
195                  * XXX If the pages cannot be wired, no error is returned.
196                  */
197                 if ((vm->vm_map.flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) {
198                         if (bootverbose)
199                                 printf("obreak: MAP_WIREFUTURE set\n");
200                         do_map_wirefuture = TRUE;
201                 }
202         } else if (new < old) {
203                 rv = vm_map_delete(&vm->vm_map, new, old);
204                 if (rv != KERN_SUCCESS) {
205                         error = ENOMEM;
206                         goto done;
207                 }
208                 vm->vm_dsize -= btoc(old - new);
209 #ifdef RACCT
210                 PROC_LOCK(td->td_proc);
211                 racct_set_force(td->td_proc, RACCT_DATA, new - base);
212                 racct_set_force(td->td_proc, RACCT_VMEM, vm->vm_map.size);
213                 if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
214                         racct_set_force(td->td_proc, RACCT_MEMLOCK,
215                             ptoa(vmspace_wired_count(td->td_proc->p_vmspace)));
216                 }
217                 PROC_UNLOCK(td->td_proc);
218 #endif
219         }
220 done:
221         vm_map_unlock(&vm->vm_map);
222
223         if (do_map_wirefuture)
224                 (void) vm_map_wire(&vm->vm_map, old, new,
225                     VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
226
227         return (error);
228 }
229
230 #ifndef _SYS_SYSPROTO_H_
231 struct ovadvise_args {
232         int anom;
233 };
234 #endif
235
236 /*
237  * MPSAFE
238  */
239 /* ARGSUSED */
240 int
241 sys_ovadvise(td, uap)
242         struct thread *td;
243         struct ovadvise_args *uap;
244 {
245         /* START_GIANT_OPTIONAL */
246         /* END_GIANT_OPTIONAL */
247         return (EINVAL);
248 }