]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_map.c
MFC r343082:
[FreeBSD/FreeBSD.git] / sys / vm / vm_map.c
1 /*-
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60
61 /*
62  *      Virtual memory mapping module.
63  */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/lock.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/racct.h>
79 #include <sys/resourcevar.h>
80 #include <sys/rwlock.h>
81 #include <sys/file.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysent.h>
84 #include <sys/shm.h>
85
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_pager.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vnode_pager.h>
96 #include <vm/swap_pager.h>
97 #include <vm/uma.h>
98
99 /*
100  *      Virtual memory maps provide for the mapping, protection,
101  *      and sharing of virtual memory objects.  In addition,
102  *      this module provides for an efficient virtual copy of
103  *      memory from one map to another.
104  *
105  *      Synchronization is required prior to most operations.
106  *
107  *      Maps consist of an ordered doubly-linked list of simple
108  *      entries; a self-adjusting binary search tree of these
109  *      entries is used to speed up lookups.
110  *
111  *      Since portions of maps are specified by start/end addresses,
112  *      which may not align with existing map entries, all
113  *      routines merely "clip" entries to these start/end values.
114  *      [That is, an entry is split into two, bordering at a
115  *      start or end value.]  Note that these clippings may not
116  *      always be necessary (as the two resulting entries are then
117  *      not changed); however, the clipping is done for convenience.
118  *
119  *      As mentioned above, virtual copy operations are performed
120  *      by copying VM object references from one map to
121  *      another, and then marking both regions as copy-on-write.
122  */
123
124 static struct mtx map_sleep_mtx;
125 static uma_zone_t mapentzone;
126 static uma_zone_t kmapentzone;
127 static uma_zone_t mapzone;
128 static uma_zone_t vmspace_zone;
129 static int vmspace_zinit(void *mem, int size, int flags);
130 static int vm_map_zinit(void *mem, int ize, int flags);
131 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
132     vm_offset_t max);
133 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
134 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
135 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
136 static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
137     vm_map_entry_t gap_entry);
138 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
139     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
140 #ifdef INVARIANTS
141 static void vm_map_zdtor(void *mem, int size, void *arg);
142 static void vmspace_zdtor(void *mem, int size, void *arg);
143 #endif
144 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
145     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
146     int cow);
147 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
148     vm_offset_t failed_addr);
149
150 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
151     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
152      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
153
154 /* 
155  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
156  * stable.
157  */
158 #define PROC_VMSPACE_LOCK(p) do { } while (0)
159 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
160
161 /*
162  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
163  *
164  *      Asserts that the starting and ending region
165  *      addresses fall within the valid range of the map.
166  */
167 #define VM_MAP_RANGE_CHECK(map, start, end)             \
168                 {                                       \
169                 if (start < vm_map_min(map))            \
170                         start = vm_map_min(map);        \
171                 if (end > vm_map_max(map))              \
172                         end = vm_map_max(map);          \
173                 if (start > end)                        \
174                         start = end;                    \
175                 }
176
177 /*
178  *      vm_map_startup:
179  *
180  *      Initialize the vm_map module.  Must be called before
181  *      any other vm_map routines.
182  *
183  *      Map and entry structures are allocated from the general
184  *      purpose memory pool with some exceptions:
185  *
186  *      - The kernel map and kmem submap are allocated statically.
187  *      - Kernel map entries are allocated out of a static pool.
188  *
189  *      These restrictions are necessary since malloc() uses the
190  *      maps and requires map entries.
191  */
192
193 void
194 vm_map_startup(void)
195 {
196         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
197         mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
198 #ifdef INVARIANTS
199             vm_map_zdtor,
200 #else
201             NULL,
202 #endif
203             vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
204         uma_prealloc(mapzone, MAX_KMAP);
205         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
206             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
207             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
208         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
209             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
210         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
211 #ifdef INVARIANTS
212             vmspace_zdtor,
213 #else
214             NULL,
215 #endif
216             vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
217 }
218
219 static int
220 vmspace_zinit(void *mem, int size, int flags)
221 {
222         struct vmspace *vm;
223
224         vm = (struct vmspace *)mem;
225
226         vm->vm_map.pmap = NULL;
227         (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
228         PMAP_LOCK_INIT(vmspace_pmap(vm));
229         return (0);
230 }
231
232 static int
233 vm_map_zinit(void *mem, int size, int flags)
234 {
235         vm_map_t map;
236
237         map = (vm_map_t)mem;
238         memset(map, 0, sizeof(*map));
239         mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
240         sx_init(&map->lock, "vm map (user)");
241         return (0);
242 }
243
244 #ifdef INVARIANTS
245 static void
246 vmspace_zdtor(void *mem, int size, void *arg)
247 {
248         struct vmspace *vm;
249
250         vm = (struct vmspace *)mem;
251
252         vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
253 }
254 static void
255 vm_map_zdtor(void *mem, int size, void *arg)
256 {
257         vm_map_t map;
258
259         map = (vm_map_t)mem;
260         KASSERT(map->nentries == 0,
261             ("map %p nentries == %d on free.",
262             map, map->nentries));
263         KASSERT(map->size == 0,
264             ("map %p size == %lu on free.",
265             map, (unsigned long)map->size));
266 }
267 #endif  /* INVARIANTS */
268
269 /*
270  * Allocate a vmspace structure, including a vm_map and pmap,
271  * and initialize those structures.  The refcnt is set to 1.
272  *
273  * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
274  */
275 struct vmspace *
276 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
277 {
278         struct vmspace *vm;
279
280         vm = uma_zalloc(vmspace_zone, M_WAITOK);
281         KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
282         if (!pinit(vmspace_pmap(vm))) {
283                 uma_zfree(vmspace_zone, vm);
284                 return (NULL);
285         }
286         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
287         _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
288         vm->vm_refcnt = 1;
289         vm->vm_shm = NULL;
290         vm->vm_swrss = 0;
291         vm->vm_tsize = 0;
292         vm->vm_dsize = 0;
293         vm->vm_ssize = 0;
294         vm->vm_taddr = 0;
295         vm->vm_daddr = 0;
296         vm->vm_maxsaddr = 0;
297         return (vm);
298 }
299
300 #ifdef RACCT
301 static void
302 vmspace_container_reset(struct proc *p)
303 {
304
305         PROC_LOCK(p);
306         racct_set(p, RACCT_DATA, 0);
307         racct_set(p, RACCT_STACK, 0);
308         racct_set(p, RACCT_RSS, 0);
309         racct_set(p, RACCT_MEMLOCK, 0);
310         racct_set(p, RACCT_VMEM, 0);
311         PROC_UNLOCK(p);
312 }
313 #endif
314
315 static inline void
316 vmspace_dofree(struct vmspace *vm)
317 {
318
319         CTR1(KTR_VM, "vmspace_free: %p", vm);
320
321         /*
322          * Make sure any SysV shm is freed, it might not have been in
323          * exit1().
324          */
325         shmexit(vm);
326
327         /*
328          * Lock the map, to wait out all other references to it.
329          * Delete all of the mappings and pages they hold, then call
330          * the pmap module to reclaim anything left.
331          */
332         (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
333             vm_map_max(&vm->vm_map));
334
335         pmap_release(vmspace_pmap(vm));
336         vm->vm_map.pmap = NULL;
337         uma_zfree(vmspace_zone, vm);
338 }
339
340 void
341 vmspace_free(struct vmspace *vm)
342 {
343
344         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
345             "vmspace_free() called");
346
347         if (vm->vm_refcnt == 0)
348                 panic("vmspace_free: attempt to free already freed vmspace");
349
350         if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
351                 vmspace_dofree(vm);
352 }
353
354 void
355 vmspace_exitfree(struct proc *p)
356 {
357         struct vmspace *vm;
358
359         PROC_VMSPACE_LOCK(p);
360         vm = p->p_vmspace;
361         p->p_vmspace = NULL;
362         PROC_VMSPACE_UNLOCK(p);
363         KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
364         vmspace_free(vm);
365 }
366
367 void
368 vmspace_exit(struct thread *td)
369 {
370         int refcnt;
371         struct vmspace *vm;
372         struct proc *p;
373
374         /*
375          * Release user portion of address space.
376          * This releases references to vnodes,
377          * which could cause I/O if the file has been unlinked.
378          * Need to do this early enough that we can still sleep.
379          *
380          * The last exiting process to reach this point releases as
381          * much of the environment as it can. vmspace_dofree() is the
382          * slower fallback in case another process had a temporary
383          * reference to the vmspace.
384          */
385
386         p = td->td_proc;
387         vm = p->p_vmspace;
388         atomic_add_int(&vmspace0.vm_refcnt, 1);
389         do {
390                 refcnt = vm->vm_refcnt;
391                 if (refcnt > 1 && p->p_vmspace != &vmspace0) {
392                         /* Switch now since other proc might free vmspace */
393                         PROC_VMSPACE_LOCK(p);
394                         p->p_vmspace = &vmspace0;
395                         PROC_VMSPACE_UNLOCK(p);
396                         pmap_activate(td);
397                 }
398         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
399         if (refcnt == 1) {
400                 if (p->p_vmspace != vm) {
401                         /* vmspace not yet freed, switch back */
402                         PROC_VMSPACE_LOCK(p);
403                         p->p_vmspace = vm;
404                         PROC_VMSPACE_UNLOCK(p);
405                         pmap_activate(td);
406                 }
407                 pmap_remove_pages(vmspace_pmap(vm));
408                 /* Switch now since this proc will free vmspace */
409                 PROC_VMSPACE_LOCK(p);
410                 p->p_vmspace = &vmspace0;
411                 PROC_VMSPACE_UNLOCK(p);
412                 pmap_activate(td);
413                 vmspace_dofree(vm);
414         }
415 #ifdef RACCT
416         if (racct_enable)
417                 vmspace_container_reset(p);
418 #endif
419 }
420
421 /* Acquire reference to vmspace owned by another process. */
422
423 struct vmspace *
424 vmspace_acquire_ref(struct proc *p)
425 {
426         struct vmspace *vm;
427         int refcnt;
428
429         PROC_VMSPACE_LOCK(p);
430         vm = p->p_vmspace;
431         if (vm == NULL) {
432                 PROC_VMSPACE_UNLOCK(p);
433                 return (NULL);
434         }
435         do {
436                 refcnt = vm->vm_refcnt;
437                 if (refcnt <= 0) {      /* Avoid 0->1 transition */
438                         PROC_VMSPACE_UNLOCK(p);
439                         return (NULL);
440                 }
441         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
442         if (vm != p->p_vmspace) {
443                 PROC_VMSPACE_UNLOCK(p);
444                 vmspace_free(vm);
445                 return (NULL);
446         }
447         PROC_VMSPACE_UNLOCK(p);
448         return (vm);
449 }
450
451 /*
452  * Switch between vmspaces in an AIO kernel process.
453  *
454  * The AIO kernel processes switch to and from a user process's
455  * vmspace while performing an I/O operation on behalf of a user
456  * process.  The new vmspace is either the vmspace of a user process
457  * obtained from an active AIO request or the initial vmspace of the
458  * AIO kernel process (when it is idling).  Because user processes
459  * will block to drain any active AIO requests before proceeding in
460  * exit() or execve(), the vmspace reference count for these vmspaces
461  * can never be 0.  This allows for a much simpler implementation than
462  * the loop in vmspace_acquire_ref() above.  Similarly, AIO kernel
463  * processes hold an extra reference on their initial vmspace for the
464  * life of the process so that this guarantee is true for any vmspace
465  * passed as 'newvm'.
466  */
467 void
468 vmspace_switch_aio(struct vmspace *newvm)
469 {
470         struct vmspace *oldvm;
471
472         /* XXX: Need some way to assert that this is an aio daemon. */
473
474         KASSERT(newvm->vm_refcnt > 0,
475             ("vmspace_switch_aio: newvm unreferenced"));
476
477         oldvm = curproc->p_vmspace;
478         if (oldvm == newvm)
479                 return;
480
481         /*
482          * Point to the new address space and refer to it.
483          */
484         curproc->p_vmspace = newvm;
485         atomic_add_int(&newvm->vm_refcnt, 1);
486
487         /* Activate the new mapping. */
488         pmap_activate(curthread);
489
490         /* Remove the daemon's reference to the old address space. */
491         KASSERT(oldvm->vm_refcnt > 1,
492             ("vmspace_switch_aio: oldvm dropping last reference"));
493         vmspace_free(oldvm);
494 }
495
496 void
497 _vm_map_lock(vm_map_t map, const char *file, int line)
498 {
499
500         if (map->system_map)
501                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
502         else
503                 sx_xlock_(&map->lock, file, line);
504         map->timestamp++;
505 }
506
507 static void
508 vm_map_process_deferred(void)
509 {
510         struct thread *td;
511         vm_map_entry_t entry, next;
512         vm_object_t object;
513
514         td = curthread;
515         entry = td->td_map_def_user;
516         td->td_map_def_user = NULL;
517         while (entry != NULL) {
518                 next = entry->next;
519                 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
520                         /*
521                          * Decrement the object's writemappings and
522                          * possibly the vnode's v_writecount.
523                          */
524                         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
525                             ("Submap with writecount"));
526                         object = entry->object.vm_object;
527                         KASSERT(object != NULL, ("No object for writecount"));
528                         vnode_pager_release_writecount(object, entry->start,
529                             entry->end);
530                 }
531                 vm_map_entry_deallocate(entry, FALSE);
532                 entry = next;
533         }
534 }
535
536 void
537 _vm_map_unlock(vm_map_t map, const char *file, int line)
538 {
539
540         if (map->system_map)
541                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
542         else {
543                 sx_xunlock_(&map->lock, file, line);
544                 vm_map_process_deferred();
545         }
546 }
547
548 void
549 _vm_map_lock_read(vm_map_t map, const char *file, int line)
550 {
551
552         if (map->system_map)
553                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
554         else
555                 sx_slock_(&map->lock, file, line);
556 }
557
558 void
559 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
560 {
561
562         if (map->system_map)
563                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
564         else {
565                 sx_sunlock_(&map->lock, file, line);
566                 vm_map_process_deferred();
567         }
568 }
569
570 int
571 _vm_map_trylock(vm_map_t map, const char *file, int line)
572 {
573         int error;
574
575         error = map->system_map ?
576             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
577             !sx_try_xlock_(&map->lock, file, line);
578         if (error == 0)
579                 map->timestamp++;
580         return (error == 0);
581 }
582
583 int
584 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
585 {
586         int error;
587
588         error = map->system_map ?
589             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
590             !sx_try_slock_(&map->lock, file, line);
591         return (error == 0);
592 }
593
594 /*
595  *      _vm_map_lock_upgrade:   [ internal use only ]
596  *
597  *      Tries to upgrade a read (shared) lock on the specified map to a write
598  *      (exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
599  *      non-zero value if the upgrade fails.  If the upgrade fails, the map is
600  *      returned without a read or write lock held.
601  *
602  *      Requires that the map be read locked.
603  */
604 int
605 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
606 {
607         unsigned int last_timestamp;
608
609         if (map->system_map) {
610                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
611         } else {
612                 if (!sx_try_upgrade_(&map->lock, file, line)) {
613                         last_timestamp = map->timestamp;
614                         sx_sunlock_(&map->lock, file, line);
615                         vm_map_process_deferred();
616                         /*
617                          * If the map's timestamp does not change while the
618                          * map is unlocked, then the upgrade succeeds.
619                          */
620                         sx_xlock_(&map->lock, file, line);
621                         if (last_timestamp != map->timestamp) {
622                                 sx_xunlock_(&map->lock, file, line);
623                                 return (1);
624                         }
625                 }
626         }
627         map->timestamp++;
628         return (0);
629 }
630
631 void
632 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
633 {
634
635         if (map->system_map) {
636                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
637         } else
638                 sx_downgrade_(&map->lock, file, line);
639 }
640
641 /*
642  *      vm_map_locked:
643  *
644  *      Returns a non-zero value if the caller holds a write (exclusive) lock
645  *      on the specified map and the value "0" otherwise.
646  */
647 int
648 vm_map_locked(vm_map_t map)
649 {
650
651         if (map->system_map)
652                 return (mtx_owned(&map->system_mtx));
653         else
654                 return (sx_xlocked(&map->lock));
655 }
656
657 #ifdef INVARIANTS
658 static void
659 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
660 {
661
662         if (map->system_map)
663                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
664         else
665                 sx_assert_(&map->lock, SA_XLOCKED, file, line);
666 }
667
668 #define VM_MAP_ASSERT_LOCKED(map) \
669     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
670 #else
671 #define VM_MAP_ASSERT_LOCKED(map)
672 #endif
673
674 /*
675  *      _vm_map_unlock_and_wait:
676  *
677  *      Atomically releases the lock on the specified map and puts the calling
678  *      thread to sleep.  The calling thread will remain asleep until either
679  *      vm_map_wakeup() is performed on the map or the specified timeout is
680  *      exceeded.
681  *
682  *      WARNING!  This function does not perform deferred deallocations of
683  *      objects and map entries.  Therefore, the calling thread is expected to
684  *      reacquire the map lock after reawakening and later perform an ordinary
685  *      unlock operation, such as vm_map_unlock(), before completing its
686  *      operation on the map.
687  */
688 int
689 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
690 {
691
692         mtx_lock(&map_sleep_mtx);
693         if (map->system_map)
694                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
695         else
696                 sx_xunlock_(&map->lock, file, line);
697         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
698             timo));
699 }
700
701 /*
702  *      vm_map_wakeup:
703  *
704  *      Awaken any threads that have slept on the map using
705  *      vm_map_unlock_and_wait().
706  */
707 void
708 vm_map_wakeup(vm_map_t map)
709 {
710
711         /*
712          * Acquire and release map_sleep_mtx to prevent a wakeup()
713          * from being performed (and lost) between the map unlock
714          * and the msleep() in _vm_map_unlock_and_wait().
715          */
716         mtx_lock(&map_sleep_mtx);
717         mtx_unlock(&map_sleep_mtx);
718         wakeup(&map->root);
719 }
720
721 void
722 vm_map_busy(vm_map_t map)
723 {
724
725         VM_MAP_ASSERT_LOCKED(map);
726         map->busy++;
727 }
728
729 void
730 vm_map_unbusy(vm_map_t map)
731 {
732
733         VM_MAP_ASSERT_LOCKED(map);
734         KASSERT(map->busy, ("vm_map_unbusy: not busy"));
735         if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
736                 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
737                 wakeup(&map->busy);
738         }
739 }
740
741 void 
742 vm_map_wait_busy(vm_map_t map)
743 {
744
745         VM_MAP_ASSERT_LOCKED(map);
746         while (map->busy) {
747                 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
748                 if (map->system_map)
749                         msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
750                 else
751                         sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
752         }
753         map->timestamp++;
754 }
755
756 long
757 vmspace_resident_count(struct vmspace *vmspace)
758 {
759         return pmap_resident_count(vmspace_pmap(vmspace));
760 }
761
762 /*
763  *      vm_map_create:
764  *
765  *      Creates and returns a new empty VM map with
766  *      the given physical map structure, and having
767  *      the given lower and upper address bounds.
768  */
769 vm_map_t
770 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
771 {
772         vm_map_t result;
773
774         result = uma_zalloc(mapzone, M_WAITOK);
775         CTR1(KTR_VM, "vm_map_create: %p", result);
776         _vm_map_init(result, pmap, min, max);
777         return (result);
778 }
779
780 /*
781  * Initialize an existing vm_map structure
782  * such as that in the vmspace structure.
783  */
784 static void
785 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
786 {
787
788         map->header.next = map->header.prev = &map->header;
789         map->needs_wakeup = FALSE;
790         map->system_map = 0;
791         map->pmap = pmap;
792         map->header.end = min;
793         map->header.start = max;
794         map->flags = 0;
795         map->root = NULL;
796         map->timestamp = 0;
797         map->busy = 0;
798 }
799
800 void
801 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
802 {
803
804         _vm_map_init(map, pmap, min, max);
805         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
806         sx_init(&map->lock, "user map");
807 }
808
809 /*
810  *      vm_map_entry_dispose:   [ internal use only ]
811  *
812  *      Inverse of vm_map_entry_create.
813  */
814 static void
815 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
816 {
817         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
818 }
819
820 /*
821  *      vm_map_entry_create:    [ internal use only ]
822  *
823  *      Allocates a VM map entry for insertion.
824  *      No entry fields are filled in.
825  */
826 static vm_map_entry_t
827 vm_map_entry_create(vm_map_t map)
828 {
829         vm_map_entry_t new_entry;
830
831         if (map->system_map)
832                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
833         else
834                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
835         if (new_entry == NULL)
836                 panic("vm_map_entry_create: kernel resources exhausted");
837         return (new_entry);
838 }
839
840 /*
841  *      vm_map_entry_set_behavior:
842  *
843  *      Set the expected access behavior, either normal, random, or
844  *      sequential.
845  */
846 static inline void
847 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
848 {
849         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
850             (behavior & MAP_ENTRY_BEHAV_MASK);
851 }
852
853 /*
854  *      vm_map_entry_set_max_free:
855  *
856  *      Set the max_free field in a vm_map_entry.
857  */
858 static inline void
859 vm_map_entry_set_max_free(vm_map_entry_t entry)
860 {
861
862         entry->max_free = entry->adj_free;
863         if (entry->left != NULL && entry->left->max_free > entry->max_free)
864                 entry->max_free = entry->left->max_free;
865         if (entry->right != NULL && entry->right->max_free > entry->max_free)
866                 entry->max_free = entry->right->max_free;
867 }
868
869 /*
870  *      vm_map_entry_splay:
871  *
872  *      The Sleator and Tarjan top-down splay algorithm with the
873  *      following variation.  Max_free must be computed bottom-up, so
874  *      on the downward pass, maintain the left and right spines in
875  *      reverse order.  Then, make a second pass up each side to fix
876  *      the pointers and compute max_free.  The time bound is O(log n)
877  *      amortized.
878  *
879  *      The new root is the vm_map_entry containing "addr", or else an
880  *      adjacent entry (lower or higher) if addr is not in the tree.
881  *
882  *      The map must be locked, and leaves it so.
883  *
884  *      Returns: the new root.
885  */
886 static vm_map_entry_t
887 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
888 {
889         vm_map_entry_t llist, rlist;
890         vm_map_entry_t ltree, rtree;
891         vm_map_entry_t y;
892
893         /* Special case of empty tree. */
894         if (root == NULL)
895                 return (root);
896
897         /*
898          * Pass One: Splay down the tree until we find addr or a NULL
899          * pointer where addr would go.  llist and rlist are the two
900          * sides in reverse order (bottom-up), with llist linked by
901          * the right pointer and rlist linked by the left pointer in
902          * the vm_map_entry.  Wait until Pass Two to set max_free on
903          * the two spines.
904          */
905         llist = NULL;
906         rlist = NULL;
907         for (;;) {
908                 /* root is never NULL in here. */
909                 if (addr < root->start) {
910                         y = root->left;
911                         if (y == NULL)
912                                 break;
913                         if (addr < y->start && y->left != NULL) {
914                                 /* Rotate right and put y on rlist. */
915                                 root->left = y->right;
916                                 y->right = root;
917                                 vm_map_entry_set_max_free(root);
918                                 root = y->left;
919                                 y->left = rlist;
920                                 rlist = y;
921                         } else {
922                                 /* Put root on rlist. */
923                                 root->left = rlist;
924                                 rlist = root;
925                                 root = y;
926                         }
927                 } else if (addr >= root->end) {
928                         y = root->right;
929                         if (y == NULL)
930                                 break;
931                         if (addr >= y->end && y->right != NULL) {
932                                 /* Rotate left and put y on llist. */
933                                 root->right = y->left;
934                                 y->left = root;
935                                 vm_map_entry_set_max_free(root);
936                                 root = y->right;
937                                 y->right = llist;
938                                 llist = y;
939                         } else {
940                                 /* Put root on llist. */
941                                 root->right = llist;
942                                 llist = root;
943                                 root = y;
944                         }
945                 } else
946                         break;
947         }
948
949         /*
950          * Pass Two: Walk back up the two spines, flip the pointers
951          * and set max_free.  The subtrees of the root go at the
952          * bottom of llist and rlist.
953          */
954         ltree = root->left;
955         while (llist != NULL) {
956                 y = llist->right;
957                 llist->right = ltree;
958                 vm_map_entry_set_max_free(llist);
959                 ltree = llist;
960                 llist = y;
961         }
962         rtree = root->right;
963         while (rlist != NULL) {
964                 y = rlist->left;
965                 rlist->left = rtree;
966                 vm_map_entry_set_max_free(rlist);
967                 rtree = rlist;
968                 rlist = y;
969         }
970
971         /*
972          * Final assembly: add ltree and rtree as subtrees of root.
973          */
974         root->left = ltree;
975         root->right = rtree;
976         vm_map_entry_set_max_free(root);
977
978         return (root);
979 }
980
981 /*
982  *      vm_map_entry_{un,}link:
983  *
984  *      Insert/remove entries from maps.
985  */
986 static void
987 vm_map_entry_link(vm_map_t map,
988                   vm_map_entry_t after_where,
989                   vm_map_entry_t entry)
990 {
991
992         CTR4(KTR_VM,
993             "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
994             map->nentries, entry, after_where);
995         VM_MAP_ASSERT_LOCKED(map);
996         KASSERT(after_where->end <= entry->start,
997             ("vm_map_entry_link: prev end %jx new start %jx overlap",
998             (uintmax_t)after_where->end, (uintmax_t)entry->start));
999         KASSERT(entry->end <= after_where->next->start,
1000             ("vm_map_entry_link: new end %jx next start %jx overlap",
1001             (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
1002
1003         map->nentries++;
1004         entry->prev = after_where;
1005         entry->next = after_where->next;
1006         entry->next->prev = entry;
1007         after_where->next = entry;
1008
1009         if (after_where != &map->header) {
1010                 if (after_where != map->root)
1011                         vm_map_entry_splay(after_where->start, map->root);
1012                 entry->right = after_where->right;
1013                 entry->left = after_where;
1014                 after_where->right = NULL;
1015                 after_where->adj_free = entry->start - after_where->end;
1016                 vm_map_entry_set_max_free(after_where);
1017         } else {
1018                 entry->right = map->root;
1019                 entry->left = NULL;
1020         }
1021         entry->adj_free = entry->next->start - entry->end;
1022         vm_map_entry_set_max_free(entry);
1023         map->root = entry;
1024 }
1025
1026 static void
1027 vm_map_entry_unlink(vm_map_t map,
1028                     vm_map_entry_t entry)
1029 {
1030         vm_map_entry_t next, prev, root;
1031
1032         VM_MAP_ASSERT_LOCKED(map);
1033         if (entry != map->root)
1034                 vm_map_entry_splay(entry->start, map->root);
1035         if (entry->left == NULL)
1036                 root = entry->right;
1037         else {
1038                 root = vm_map_entry_splay(entry->start, entry->left);
1039                 root->right = entry->right;
1040                 root->adj_free = entry->next->start - root->end;
1041                 vm_map_entry_set_max_free(root);
1042         }
1043         map->root = root;
1044
1045         prev = entry->prev;
1046         next = entry->next;
1047         next->prev = prev;
1048         prev->next = next;
1049         map->nentries--;
1050         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1051             map->nentries, entry);
1052 }
1053
1054 /*
1055  *      vm_map_entry_resize_free:
1056  *
1057  *      Recompute the amount of free space following a vm_map_entry
1058  *      and propagate that value up the tree.  Call this function after
1059  *      resizing a map entry in-place, that is, without a call to
1060  *      vm_map_entry_link() or _unlink().
1061  *
1062  *      The map must be locked, and leaves it so.
1063  */
1064 static void
1065 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1066 {
1067
1068         /*
1069          * Using splay trees without parent pointers, propagating
1070          * max_free up the tree is done by moving the entry to the
1071          * root and making the change there.
1072          */
1073         if (entry != map->root)
1074                 map->root = vm_map_entry_splay(entry->start, map->root);
1075
1076         entry->adj_free = entry->next->start - entry->end;
1077         vm_map_entry_set_max_free(entry);
1078 }
1079
1080 /*
1081  *      vm_map_lookup_entry:    [ internal use only ]
1082  *
1083  *      Finds the map entry containing (or
1084  *      immediately preceding) the specified address
1085  *      in the given map; the entry is returned
1086  *      in the "entry" parameter.  The boolean
1087  *      result indicates whether the address is
1088  *      actually contained in the map.
1089  */
1090 boolean_t
1091 vm_map_lookup_entry(
1092         vm_map_t map,
1093         vm_offset_t address,
1094         vm_map_entry_t *entry)  /* OUT */
1095 {
1096         vm_map_entry_t cur;
1097         boolean_t locked;
1098
1099         /*
1100          * If the map is empty, then the map entry immediately preceding
1101          * "address" is the map's header.
1102          */
1103         cur = map->root;
1104         if (cur == NULL)
1105                 *entry = &map->header;
1106         else if (address >= cur->start && cur->end > address) {
1107                 *entry = cur;
1108                 return (TRUE);
1109         } else if ((locked = vm_map_locked(map)) ||
1110             sx_try_upgrade(&map->lock)) {
1111                 /*
1112                  * Splay requires a write lock on the map.  However, it only
1113                  * restructures the binary search tree; it does not otherwise
1114                  * change the map.  Thus, the map's timestamp need not change
1115                  * on a temporary upgrade.
1116                  */
1117                 map->root = cur = vm_map_entry_splay(address, cur);
1118                 if (!locked)
1119                         sx_downgrade(&map->lock);
1120
1121                 /*
1122                  * If "address" is contained within a map entry, the new root
1123                  * is that map entry.  Otherwise, the new root is a map entry
1124                  * immediately before or after "address".
1125                  */
1126                 if (address >= cur->start) {
1127                         *entry = cur;
1128                         if (cur->end > address)
1129                                 return (TRUE);
1130                 } else
1131                         *entry = cur->prev;
1132         } else
1133                 /*
1134                  * Since the map is only locked for read access, perform a
1135                  * standard binary search tree lookup for "address".
1136                  */
1137                 for (;;) {
1138                         if (address < cur->start) {
1139                                 if (cur->left == NULL) {
1140                                         *entry = cur->prev;
1141                                         break;
1142                                 }
1143                                 cur = cur->left;
1144                         } else if (cur->end > address) {
1145                                 *entry = cur;
1146                                 return (TRUE);
1147                         } else {
1148                                 if (cur->right == NULL) {
1149                                         *entry = cur;
1150                                         break;
1151                                 }
1152                                 cur = cur->right;
1153                         }
1154                 }
1155         return (FALSE);
1156 }
1157
1158 /*
1159  *      vm_map_insert:
1160  *
1161  *      Inserts the given whole VM object into the target
1162  *      map at the specified address range.  The object's
1163  *      size should match that of the address range.
1164  *
1165  *      Requires that the map be locked, and leaves it so.
1166  *
1167  *      If object is non-NULL, ref count must be bumped by caller
1168  *      prior to making call to account for the new entry.
1169  */
1170 int
1171 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1172     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
1173 {
1174         vm_map_entry_t new_entry, prev_entry, temp_entry;
1175         struct ucred *cred;
1176         vm_eflags_t protoeflags;
1177         vm_inherit_t inheritance;
1178
1179         VM_MAP_ASSERT_LOCKED(map);
1180         KASSERT((object != kmem_object && object != kernel_object) ||
1181             (cow & MAP_COPY_ON_WRITE) == 0,
1182             ("vm_map_insert: kmem or kernel object and COW"));
1183         KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0,
1184             ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1185         KASSERT((prot & ~max) == 0,
1186             ("prot %#x is not subset of max_prot %#x", prot, max));
1187
1188         /*
1189          * Check that the start and end points are not bogus.
1190          */
1191         if (start < vm_map_min(map) || end > vm_map_max(map) ||
1192             start >= end)
1193                 return (KERN_INVALID_ADDRESS);
1194
1195         /*
1196          * Find the entry prior to the proposed starting address; if it's part
1197          * of an existing entry, this range is bogus.
1198          */
1199         if (vm_map_lookup_entry(map, start, &temp_entry))
1200                 return (KERN_NO_SPACE);
1201
1202         prev_entry = temp_entry;
1203
1204         /*
1205          * Assert that the next entry doesn't overlap the end point.
1206          */
1207         if (prev_entry->next->start < end)
1208                 return (KERN_NO_SPACE);
1209
1210         if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
1211             max != VM_PROT_NONE))
1212                 return (KERN_INVALID_ARGUMENT);
1213
1214         protoeflags = 0;
1215         if (cow & MAP_COPY_ON_WRITE)
1216                 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
1217         if (cow & MAP_NOFAULT)
1218                 protoeflags |= MAP_ENTRY_NOFAULT;
1219         if (cow & MAP_DISABLE_SYNCER)
1220                 protoeflags |= MAP_ENTRY_NOSYNC;
1221         if (cow & MAP_DISABLE_COREDUMP)
1222                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1223         if (cow & MAP_STACK_GROWS_DOWN)
1224                 protoeflags |= MAP_ENTRY_GROWS_DOWN;
1225         if (cow & MAP_STACK_GROWS_UP)
1226                 protoeflags |= MAP_ENTRY_GROWS_UP;
1227         if (cow & MAP_VN_WRITECOUNT)
1228                 protoeflags |= MAP_ENTRY_VN_WRITECNT;
1229         if ((cow & MAP_CREATE_GUARD) != 0)
1230                 protoeflags |= MAP_ENTRY_GUARD;
1231         if ((cow & MAP_CREATE_STACK_GAP_DN) != 0)
1232                 protoeflags |= MAP_ENTRY_STACK_GAP_DN;
1233         if ((cow & MAP_CREATE_STACK_GAP_UP) != 0)
1234                 protoeflags |= MAP_ENTRY_STACK_GAP_UP;
1235         if (cow & MAP_INHERIT_SHARE)
1236                 inheritance = VM_INHERIT_SHARE;
1237         else
1238                 inheritance = VM_INHERIT_DEFAULT;
1239
1240         cred = NULL;
1241         if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0)
1242                 goto charged;
1243         if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1244             ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1245                 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1246                         return (KERN_RESOURCE_SHORTAGE);
1247                 KASSERT(object == NULL ||
1248                     (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
1249                     object->cred == NULL,
1250                     ("overcommit: vm_map_insert o %p", object));
1251                 cred = curthread->td_ucred;
1252         }
1253
1254 charged:
1255         /* Expand the kernel pmap, if necessary. */
1256         if (map == kernel_map && end > kernel_vm_end)
1257                 pmap_growkernel(end);
1258         if (object != NULL) {
1259                 /*
1260                  * OBJ_ONEMAPPING must be cleared unless this mapping
1261                  * is trivially proven to be the only mapping for any
1262                  * of the object's pages.  (Object granularity
1263                  * reference counting is insufficient to recognize
1264                  * aliases with precision.)
1265                  */
1266                 VM_OBJECT_WLOCK(object);
1267                 if (object->ref_count > 1 || object->shadow_count != 0)
1268                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
1269                 VM_OBJECT_WUNLOCK(object);
1270         } else if (prev_entry != &map->header &&
1271             prev_entry->eflags == protoeflags &&
1272             (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 &&
1273             prev_entry->end == start && prev_entry->wired_count == 0 &&
1274             (prev_entry->cred == cred ||
1275             (prev_entry->object.vm_object != NULL &&
1276             prev_entry->object.vm_object->cred == cred)) &&
1277             vm_object_coalesce(prev_entry->object.vm_object,
1278             prev_entry->offset,
1279             (vm_size_t)(prev_entry->end - prev_entry->start),
1280             (vm_size_t)(end - prev_entry->end), cred != NULL &&
1281             (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
1282                 /*
1283                  * We were able to extend the object.  Determine if we
1284                  * can extend the previous map entry to include the
1285                  * new range as well.
1286                  */
1287                 if (prev_entry->inheritance == inheritance &&
1288                     prev_entry->protection == prot &&
1289                     prev_entry->max_protection == max) {
1290                         if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
1291                                 map->size += end - prev_entry->end;
1292                         prev_entry->end = end;
1293                         vm_map_entry_resize_free(map, prev_entry);
1294                         vm_map_simplify_entry(map, prev_entry);
1295                         return (KERN_SUCCESS);
1296                 }
1297
1298                 /*
1299                  * If we can extend the object but cannot extend the
1300                  * map entry, we have to create a new map entry.  We
1301                  * must bump the ref count on the extended object to
1302                  * account for it.  object may be NULL.
1303                  */
1304                 object = prev_entry->object.vm_object;
1305                 offset = prev_entry->offset +
1306                     (prev_entry->end - prev_entry->start);
1307                 vm_object_reference(object);
1308                 if (cred != NULL && object != NULL && object->cred != NULL &&
1309                     !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1310                         /* Object already accounts for this uid. */
1311                         cred = NULL;
1312                 }
1313         }
1314         if (cred != NULL)
1315                 crhold(cred);
1316
1317         /*
1318          * Create a new entry
1319          */
1320         new_entry = vm_map_entry_create(map);
1321         new_entry->start = start;
1322         new_entry->end = end;
1323         new_entry->cred = NULL;
1324
1325         new_entry->eflags = protoeflags;
1326         new_entry->object.vm_object = object;
1327         new_entry->offset = offset;
1328
1329         new_entry->inheritance = inheritance;
1330         new_entry->protection = prot;
1331         new_entry->max_protection = max;
1332         new_entry->wired_count = 0;
1333         new_entry->wiring_thread = NULL;
1334         new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1335         new_entry->next_read = start;
1336
1337         KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1338             ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
1339         new_entry->cred = cred;
1340
1341         /*
1342          * Insert the new entry into the list
1343          */
1344         vm_map_entry_link(map, prev_entry, new_entry);
1345         if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
1346                 map->size += new_entry->end - new_entry->start;
1347
1348         /*
1349          * Try to coalesce the new entry with both the previous and next
1350          * entries in the list.  Previously, we only attempted to coalesce
1351          * with the previous entry when object is NULL.  Here, we handle the
1352          * other cases, which are less common.
1353          */
1354         vm_map_simplify_entry(map, new_entry);
1355
1356         if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
1357                 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
1358                     end - start, cow & MAP_PREFAULT_PARTIAL);
1359         }
1360
1361         return (KERN_SUCCESS);
1362 }
1363
1364 /*
1365  *      vm_map_findspace:
1366  *
1367  *      Find the first fit (lowest VM address) for "length" free bytes
1368  *      beginning at address >= start in the given map.
1369  *
1370  *      In a vm_map_entry, "adj_free" is the amount of free space
1371  *      adjacent (higher address) to this entry, and "max_free" is the
1372  *      maximum amount of contiguous free space in its subtree.  This
1373  *      allows finding a free region in one path down the tree, so
1374  *      O(log n) amortized with splay trees.
1375  *
1376  *      The map must be locked, and leaves it so.
1377  *
1378  *      Returns: 0 on success, and starting address in *addr,
1379  *               1 if insufficient space.
1380  */
1381 int
1382 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1383     vm_offset_t *addr)  /* OUT */
1384 {
1385         vm_map_entry_t entry;
1386         vm_offset_t st;
1387
1388         /*
1389          * Request must fit within min/max VM address and must avoid
1390          * address wrap.
1391          */
1392         start = MAX(start, vm_map_min(map));
1393         if (start + length > vm_map_max(map) || start + length < start)
1394                 return (1);
1395
1396         /* Empty tree means wide open address space. */
1397         if (map->root == NULL) {
1398                 *addr = start;
1399                 return (0);
1400         }
1401
1402         /*
1403          * After splay, if start comes before root node, then there
1404          * must be a gap from start to the root.
1405          */
1406         map->root = vm_map_entry_splay(start, map->root);
1407         if (start + length <= map->root->start) {
1408                 *addr = start;
1409                 return (0);
1410         }
1411
1412         /*
1413          * Root is the last node that might begin its gap before
1414          * start, and this is the last comparison where address
1415          * wrap might be a problem.
1416          */
1417         st = (start > map->root->end) ? start : map->root->end;
1418         if (length <= map->root->end + map->root->adj_free - st) {
1419                 *addr = st;
1420                 return (0);
1421         }
1422
1423         /* With max_free, can immediately tell if no solution. */
1424         entry = map->root->right;
1425         if (entry == NULL || length > entry->max_free)
1426                 return (1);
1427
1428         /*
1429          * Search the right subtree in the order: left subtree, root,
1430          * right subtree (first fit).  The previous splay implies that
1431          * all regions in the right subtree have addresses > start.
1432          */
1433         while (entry != NULL) {
1434                 if (entry->left != NULL && entry->left->max_free >= length)
1435                         entry = entry->left;
1436                 else if (entry->adj_free >= length) {
1437                         *addr = entry->end;
1438                         return (0);
1439                 } else
1440                         entry = entry->right;
1441         }
1442
1443         /* Can't get here, so panic if we do. */
1444         panic("vm_map_findspace: max_free corrupt");
1445 }
1446
1447 int
1448 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1449     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1450     vm_prot_t max, int cow)
1451 {
1452         vm_offset_t end;
1453         int result;
1454
1455         end = start + length;
1456         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1457             object == NULL,
1458             ("vm_map_fixed: non-NULL backing object for stack"));
1459         vm_map_lock(map);
1460         VM_MAP_RANGE_CHECK(map, start, end);
1461         if ((cow & MAP_CHECK_EXCL) == 0)
1462                 vm_map_delete(map, start, end);
1463         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1464                 result = vm_map_stack_locked(map, start, length, sgrowsiz,
1465                     prot, max, cow);
1466         } else {
1467                 result = vm_map_insert(map, object, offset, start, end,
1468                     prot, max, cow);
1469         }
1470         vm_map_unlock(map);
1471         return (result);
1472 }
1473
1474 /*
1475  *      vm_map_find finds an unallocated region in the target address
1476  *      map with the given length.  The search is defined to be
1477  *      first-fit from the specified address; the region found is
1478  *      returned in the same parameter.
1479  *
1480  *      If object is non-NULL, ref count must be bumped by caller
1481  *      prior to making call to account for the new entry.
1482  */
1483 int
1484 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1485             vm_offset_t *addr,  /* IN/OUT */
1486             vm_size_t length, vm_offset_t max_addr, int find_space,
1487             vm_prot_t prot, vm_prot_t max, int cow)
1488 {
1489         vm_offset_t alignment, initial_addr, start;
1490         int result;
1491
1492         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1493             object == NULL,
1494             ("vm_map_find: non-NULL backing object for stack"));
1495         MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE &&
1496             (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0));
1497         if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1498             (object->flags & OBJ_COLORED) == 0))
1499                 find_space = VMFS_ANY_SPACE;
1500         if (find_space >> 8 != 0) {
1501                 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1502                 alignment = (vm_offset_t)1 << (find_space >> 8);
1503         } else
1504                 alignment = 0;
1505         initial_addr = *addr;
1506 again:
1507         start = initial_addr;
1508         vm_map_lock(map);
1509         do {
1510                 if (find_space != VMFS_NO_SPACE) {
1511                         if (vm_map_findspace(map, start, length, addr) ||
1512                             (max_addr != 0 && *addr + length > max_addr)) {
1513                                 vm_map_unlock(map);
1514                                 if (find_space == VMFS_OPTIMAL_SPACE) {
1515                                         find_space = VMFS_ANY_SPACE;
1516                                         goto again;
1517                                 }
1518                                 return (KERN_NO_SPACE);
1519                         }
1520                         switch (find_space) {
1521                         case VMFS_SUPER_SPACE:
1522                         case VMFS_OPTIMAL_SPACE:
1523                                 pmap_align_superpage(object, offset, addr,
1524                                     length);
1525                                 break;
1526                         case VMFS_ANY_SPACE:
1527                                 break;
1528                         default:
1529                                 if ((*addr & (alignment - 1)) != 0) {
1530                                         *addr &= ~(alignment - 1);
1531                                         *addr += alignment;
1532                                 }
1533                                 break;
1534                         }
1535
1536                         start = *addr;
1537                 } else if ((cow & MAP_REMAP) != 0) {
1538                         if (start < vm_map_min(map) ||
1539                             start + length > vm_map_max(map) ||
1540                             start + length <= length) {
1541                                 result = KERN_INVALID_ADDRESS;
1542                                 break;
1543                         }
1544                         vm_map_delete(map, start, start + length);
1545                 }
1546                 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1547                         result = vm_map_stack_locked(map, start, length,
1548                             sgrowsiz, prot, max, cow);
1549                 } else {
1550                         result = vm_map_insert(map, object, offset, start,
1551                             start + length, prot, max, cow);
1552                 }
1553         } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE &&
1554             find_space != VMFS_ANY_SPACE);
1555         vm_map_unlock(map);
1556         return (result);
1557 }
1558
1559 /*
1560  *      vm_map_find_min() is a variant of vm_map_find() that takes an
1561  *      additional parameter (min_addr) and treats the given address
1562  *      (*addr) differently.  Specifically, it treats *addr as a hint
1563  *      and not as the minimum address where the mapping is created.
1564  *
1565  *      This function works in two phases.  First, it tries to
1566  *      allocate above the hint.  If that fails and the hint is
1567  *      greater than min_addr, it performs a second pass, replacing
1568  *      the hint with min_addr as the minimum address for the
1569  *      allocation.
1570  */
1571 int
1572 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1573     vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr,
1574     vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max,
1575     int cow)
1576 {
1577         vm_offset_t hint;
1578         int rv;
1579
1580         hint = *addr;
1581         for (;;) {
1582                 rv = vm_map_find(map, object, offset, addr, length, max_addr,
1583                     find_space, prot, max, cow);
1584                 if (rv == KERN_SUCCESS || min_addr >= hint)
1585                         return (rv);
1586                 *addr = hint = min_addr;
1587         }
1588 }
1589
1590 /*
1591  *      vm_map_simplify_entry:
1592  *
1593  *      Simplify the given map entry by merging with either neighbor.  This
1594  *      routine also has the ability to merge with both neighbors.
1595  *
1596  *      The map must be locked.
1597  *
1598  *      This routine guarantees that the passed entry remains valid (though
1599  *      possibly extended).  When merging, this routine may delete one or
1600  *      both neighbors.
1601  */
1602 void
1603 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1604 {
1605         vm_map_entry_t next, prev;
1606         vm_size_t prevsize, esize;
1607
1608         if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP |
1609             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0)
1610                 return;
1611
1612         prev = entry->prev;
1613         if (prev != &map->header) {
1614                 prevsize = prev->end - prev->start;
1615                 if ( (prev->end == entry->start) &&
1616                      (prev->object.vm_object == entry->object.vm_object) &&
1617                      (!prev->object.vm_object ||
1618                         (prev->offset + prevsize == entry->offset)) &&
1619                      (prev->eflags == entry->eflags) &&
1620                      (prev->protection == entry->protection) &&
1621                      (prev->max_protection == entry->max_protection) &&
1622                      (prev->inheritance == entry->inheritance) &&
1623                      (prev->wired_count == entry->wired_count) &&
1624                      (prev->cred == entry->cred)) {
1625                         vm_map_entry_unlink(map, prev);
1626                         entry->start = prev->start;
1627                         entry->offset = prev->offset;
1628                         if (entry->prev != &map->header)
1629                                 vm_map_entry_resize_free(map, entry->prev);
1630
1631                         /*
1632                          * If the backing object is a vnode object,
1633                          * vm_object_deallocate() calls vrele().
1634                          * However, vrele() does not lock the vnode
1635                          * because the vnode has additional
1636                          * references.  Thus, the map lock can be kept
1637                          * without causing a lock-order reversal with
1638                          * the vnode lock.
1639                          *
1640                          * Since we count the number of virtual page
1641                          * mappings in object->un_pager.vnp.writemappings,
1642                          * the writemappings value should not be adjusted
1643                          * when the entry is disposed of.
1644                          */
1645                         if (prev->object.vm_object)
1646                                 vm_object_deallocate(prev->object.vm_object);
1647                         if (prev->cred != NULL)
1648                                 crfree(prev->cred);
1649                         vm_map_entry_dispose(map, prev);
1650                 }
1651         }
1652
1653         next = entry->next;
1654         if (next != &map->header) {
1655                 esize = entry->end - entry->start;
1656                 if ((entry->end == next->start) &&
1657                     (next->object.vm_object == entry->object.vm_object) &&
1658                      (!entry->object.vm_object ||
1659                         (entry->offset + esize == next->offset)) &&
1660                     (next->eflags == entry->eflags) &&
1661                     (next->protection == entry->protection) &&
1662                     (next->max_protection == entry->max_protection) &&
1663                     (next->inheritance == entry->inheritance) &&
1664                     (next->wired_count == entry->wired_count) &&
1665                     (next->cred == entry->cred)) {
1666                         vm_map_entry_unlink(map, next);
1667                         entry->end = next->end;
1668                         vm_map_entry_resize_free(map, entry);
1669
1670                         /*
1671                          * See comment above.
1672                          */
1673                         if (next->object.vm_object)
1674                                 vm_object_deallocate(next->object.vm_object);
1675                         if (next->cred != NULL)
1676                                 crfree(next->cred);
1677                         vm_map_entry_dispose(map, next);
1678                 }
1679         }
1680 }
1681 /*
1682  *      vm_map_clip_start:      [ internal use only ]
1683  *
1684  *      Asserts that the given entry begins at or after
1685  *      the specified address; if necessary,
1686  *      it splits the entry into two.
1687  */
1688 #define vm_map_clip_start(map, entry, startaddr) \
1689 { \
1690         if (startaddr > entry->start) \
1691                 _vm_map_clip_start(map, entry, startaddr); \
1692 }
1693
1694 /*
1695  *      This routine is called only when it is known that
1696  *      the entry must be split.
1697  */
1698 static void
1699 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1700 {
1701         vm_map_entry_t new_entry;
1702
1703         VM_MAP_ASSERT_LOCKED(map);
1704         KASSERT(entry->end > start && entry->start < start,
1705             ("_vm_map_clip_start: invalid clip of entry %p", entry));
1706
1707         /*
1708          * Split off the front portion -- note that we must insert the new
1709          * entry BEFORE this one, so that this entry has the specified
1710          * starting address.
1711          */
1712         vm_map_simplify_entry(map, entry);
1713
1714         /*
1715          * If there is no object backing this entry, we might as well create
1716          * one now.  If we defer it, an object can get created after the map
1717          * is clipped, and individual objects will be created for the split-up
1718          * map.  This is a bit of a hack, but is also about the best place to
1719          * put this improvement.
1720          */
1721         if (entry->object.vm_object == NULL && !map->system_map &&
1722             (entry->eflags & MAP_ENTRY_GUARD) == 0) {
1723                 vm_object_t object;
1724                 object = vm_object_allocate(OBJT_DEFAULT,
1725                                 atop(entry->end - entry->start));
1726                 entry->object.vm_object = object;
1727                 entry->offset = 0;
1728                 if (entry->cred != NULL) {
1729                         object->cred = entry->cred;
1730                         object->charge = entry->end - entry->start;
1731                         entry->cred = NULL;
1732                 }
1733         } else if (entry->object.vm_object != NULL &&
1734                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1735                    entry->cred != NULL) {
1736                 VM_OBJECT_WLOCK(entry->object.vm_object);
1737                 KASSERT(entry->object.vm_object->cred == NULL,
1738                     ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1739                 entry->object.vm_object->cred = entry->cred;
1740                 entry->object.vm_object->charge = entry->end - entry->start;
1741                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
1742                 entry->cred = NULL;
1743         }
1744
1745         new_entry = vm_map_entry_create(map);
1746         *new_entry = *entry;
1747
1748         new_entry->end = start;
1749         entry->offset += (start - entry->start);
1750         entry->start = start;
1751         if (new_entry->cred != NULL)
1752                 crhold(entry->cred);
1753
1754         vm_map_entry_link(map, entry->prev, new_entry);
1755
1756         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1757                 vm_object_reference(new_entry->object.vm_object);
1758                 /*
1759                  * The object->un_pager.vnp.writemappings for the
1760                  * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1761                  * kept as is here.  The virtual pages are
1762                  * re-distributed among the clipped entries, so the sum is
1763                  * left the same.
1764                  */
1765         }
1766 }
1767
1768 /*
1769  *      vm_map_clip_end:        [ internal use only ]
1770  *
1771  *      Asserts that the given entry ends at or before
1772  *      the specified address; if necessary,
1773  *      it splits the entry into two.
1774  */
1775 #define vm_map_clip_end(map, entry, endaddr) \
1776 { \
1777         if ((endaddr) < (entry->end)) \
1778                 _vm_map_clip_end((map), (entry), (endaddr)); \
1779 }
1780
1781 /*
1782  *      This routine is called only when it is known that
1783  *      the entry must be split.
1784  */
1785 static void
1786 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1787 {
1788         vm_map_entry_t new_entry;
1789
1790         VM_MAP_ASSERT_LOCKED(map);
1791         KASSERT(entry->start < end && entry->end > end,
1792             ("_vm_map_clip_end: invalid clip of entry %p", entry));
1793
1794         /*
1795          * If there is no object backing this entry, we might as well create
1796          * one now.  If we defer it, an object can get created after the map
1797          * is clipped, and individual objects will be created for the split-up
1798          * map.  This is a bit of a hack, but is also about the best place to
1799          * put this improvement.
1800          */
1801         if (entry->object.vm_object == NULL && !map->system_map &&
1802             (entry->eflags & MAP_ENTRY_GUARD) == 0) {
1803                 vm_object_t object;
1804                 object = vm_object_allocate(OBJT_DEFAULT,
1805                                 atop(entry->end - entry->start));
1806                 entry->object.vm_object = object;
1807                 entry->offset = 0;
1808                 if (entry->cred != NULL) {
1809                         object->cred = entry->cred;
1810                         object->charge = entry->end - entry->start;
1811                         entry->cred = NULL;
1812                 }
1813         } else if (entry->object.vm_object != NULL &&
1814                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1815                    entry->cred != NULL) {
1816                 VM_OBJECT_WLOCK(entry->object.vm_object);
1817                 KASSERT(entry->object.vm_object->cred == NULL,
1818                     ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1819                 entry->object.vm_object->cred = entry->cred;
1820                 entry->object.vm_object->charge = entry->end - entry->start;
1821                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
1822                 entry->cred = NULL;
1823         }
1824
1825         /*
1826          * Create a new entry and insert it AFTER the specified entry
1827          */
1828         new_entry = vm_map_entry_create(map);
1829         *new_entry = *entry;
1830
1831         new_entry->start = entry->end = end;
1832         new_entry->offset += (end - entry->start);
1833         if (new_entry->cred != NULL)
1834                 crhold(entry->cred);
1835
1836         vm_map_entry_link(map, entry, new_entry);
1837
1838         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1839                 vm_object_reference(new_entry->object.vm_object);
1840         }
1841 }
1842
1843 /*
1844  *      vm_map_submap:          [ kernel use only ]
1845  *
1846  *      Mark the given range as handled by a subordinate map.
1847  *
1848  *      This range must have been created with vm_map_find,
1849  *      and no other operations may have been performed on this
1850  *      range prior to calling vm_map_submap.
1851  *
1852  *      Only a limited number of operations can be performed
1853  *      within this rage after calling vm_map_submap:
1854  *              vm_fault
1855  *      [Don't try vm_map_copy!]
1856  *
1857  *      To remove a submapping, one must first remove the
1858  *      range from the superior map, and then destroy the
1859  *      submap (if desired).  [Better yet, don't try it.]
1860  */
1861 int
1862 vm_map_submap(
1863         vm_map_t map,
1864         vm_offset_t start,
1865         vm_offset_t end,
1866         vm_map_t submap)
1867 {
1868         vm_map_entry_t entry;
1869         int result = KERN_INVALID_ARGUMENT;
1870
1871         vm_map_lock(map);
1872
1873         VM_MAP_RANGE_CHECK(map, start, end);
1874
1875         if (vm_map_lookup_entry(map, start, &entry)) {
1876                 vm_map_clip_start(map, entry, start);
1877         } else
1878                 entry = entry->next;
1879
1880         vm_map_clip_end(map, entry, end);
1881
1882         if ((entry->start == start) && (entry->end == end) &&
1883             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1884             (entry->object.vm_object == NULL)) {
1885                 entry->object.sub_map = submap;
1886                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1887                 result = KERN_SUCCESS;
1888         }
1889         vm_map_unlock(map);
1890
1891         return (result);
1892 }
1893
1894 /*
1895  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
1896  */
1897 #define MAX_INIT_PT     96
1898
1899 /*
1900  *      vm_map_pmap_enter:
1901  *
1902  *      Preload the specified map's pmap with mappings to the specified
1903  *      object's memory-resident pages.  No further physical pages are
1904  *      allocated, and no further virtual pages are retrieved from secondary
1905  *      storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
1906  *      limited number of page mappings are created at the low-end of the
1907  *      specified address range.  (For this purpose, a superpage mapping
1908  *      counts as one page mapping.)  Otherwise, all resident pages within
1909  *      the specified address range are mapped.
1910  */
1911 static void
1912 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1913     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1914 {
1915         vm_offset_t start;
1916         vm_page_t p, p_start;
1917         vm_pindex_t mask, psize, threshold, tmpidx;
1918
1919         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1920                 return;
1921         VM_OBJECT_RLOCK(object);
1922         if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1923                 VM_OBJECT_RUNLOCK(object);
1924                 VM_OBJECT_WLOCK(object);
1925                 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1926                         pmap_object_init_pt(map->pmap, addr, object, pindex,
1927                             size);
1928                         VM_OBJECT_WUNLOCK(object);
1929                         return;
1930                 }
1931                 VM_OBJECT_LOCK_DOWNGRADE(object);
1932         }
1933
1934         psize = atop(size);
1935         if (psize + pindex > object->size) {
1936                 if (object->size < pindex) {
1937                         VM_OBJECT_RUNLOCK(object);
1938                         return;
1939                 }
1940                 psize = object->size - pindex;
1941         }
1942
1943         start = 0;
1944         p_start = NULL;
1945         threshold = MAX_INIT_PT;
1946
1947         p = vm_page_find_least(object, pindex);
1948         /*
1949          * Assert: the variable p is either (1) the page with the
1950          * least pindex greater than or equal to the parameter pindex
1951          * or (2) NULL.
1952          */
1953         for (;
1954              p != NULL && (tmpidx = p->pindex - pindex) < psize;
1955              p = TAILQ_NEXT(p, listq)) {
1956                 /*
1957                  * don't allow an madvise to blow away our really
1958                  * free pages allocating pv entries.
1959                  */
1960                 if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
1961                     vm_cnt.v_free_count < vm_cnt.v_free_reserved) ||
1962                     ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
1963                     tmpidx >= threshold)) {
1964                         psize = tmpidx;
1965                         break;
1966                 }
1967                 if (p->valid == VM_PAGE_BITS_ALL) {
1968                         if (p_start == NULL) {
1969                                 start = addr + ptoa(tmpidx);
1970                                 p_start = p;
1971                         }
1972                         /* Jump ahead if a superpage mapping is possible. */
1973                         if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
1974                             (pagesizes[p->psind] - 1)) == 0) {
1975                                 mask = atop(pagesizes[p->psind]) - 1;
1976                                 if (tmpidx + mask < psize &&
1977                                     vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
1978                                         p += mask;
1979                                         threshold += mask;
1980                                 }
1981                         }
1982                 } else if (p_start != NULL) {
1983                         pmap_enter_object(map->pmap, start, addr +
1984                             ptoa(tmpidx), p_start, prot);
1985                         p_start = NULL;
1986                 }
1987         }
1988         if (p_start != NULL)
1989                 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1990                     p_start, prot);
1991         VM_OBJECT_RUNLOCK(object);
1992 }
1993
1994 /*
1995  *      vm_map_protect:
1996  *
1997  *      Sets the protection of the specified address
1998  *      region in the target map.  If "set_max" is
1999  *      specified, the maximum protection is to be set;
2000  *      otherwise, only the current protection is affected.
2001  */
2002 int
2003 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2004                vm_prot_t new_prot, boolean_t set_max)
2005 {
2006         vm_map_entry_t current, entry;
2007         vm_object_t obj;
2008         struct ucred *cred;
2009         vm_prot_t old_prot;
2010
2011         if (start == end)
2012                 return (KERN_SUCCESS);
2013
2014         vm_map_lock(map);
2015
2016         /*
2017          * Ensure that we are not concurrently wiring pages.  vm_map_wire() may
2018          * need to fault pages into the map and will drop the map lock while
2019          * doing so, and the VM object may end up in an inconsistent state if we
2020          * update the protection on the map entry in between faults.
2021          */
2022         vm_map_wait_busy(map);
2023
2024         VM_MAP_RANGE_CHECK(map, start, end);
2025
2026         if (vm_map_lookup_entry(map, start, &entry)) {
2027                 vm_map_clip_start(map, entry, start);
2028         } else {
2029                 entry = entry->next;
2030         }
2031
2032         /*
2033          * Make a first pass to check for protection violations.
2034          */
2035         for (current = entry; current->start < end; current = current->next) {
2036                 if ((current->eflags & MAP_ENTRY_GUARD) != 0)
2037                         continue;
2038                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2039                         vm_map_unlock(map);
2040                         return (KERN_INVALID_ARGUMENT);
2041                 }
2042                 if ((new_prot & current->max_protection) != new_prot) {
2043                         vm_map_unlock(map);
2044                         return (KERN_PROTECTION_FAILURE);
2045                 }
2046         }
2047
2048         /*
2049          * Do an accounting pass for private read-only mappings that
2050          * now will do cow due to allowed write (e.g. debugger sets
2051          * breakpoint on text segment)
2052          */
2053         for (current = entry; current->start < end; current = current->next) {
2054
2055                 vm_map_clip_end(map, current, end);
2056
2057                 if (set_max ||
2058                     ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
2059                     ENTRY_CHARGED(current) ||
2060                     (current->eflags & MAP_ENTRY_GUARD) != 0) {
2061                         continue;
2062                 }
2063
2064                 cred = curthread->td_ucred;
2065                 obj = current->object.vm_object;
2066
2067                 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
2068                         if (!swap_reserve(current->end - current->start)) {
2069                                 vm_map_unlock(map);
2070                                 return (KERN_RESOURCE_SHORTAGE);
2071                         }
2072                         crhold(cred);
2073                         current->cred = cred;
2074                         continue;
2075                 }
2076
2077                 VM_OBJECT_WLOCK(obj);
2078                 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
2079                         VM_OBJECT_WUNLOCK(obj);
2080                         continue;
2081                 }
2082
2083                 /*
2084                  * Charge for the whole object allocation now, since
2085                  * we cannot distinguish between non-charged and
2086                  * charged clipped mapping of the same object later.
2087                  */
2088                 KASSERT(obj->charge == 0,
2089                     ("vm_map_protect: object %p overcharged (entry %p)",
2090                     obj, current));
2091                 if (!swap_reserve(ptoa(obj->size))) {
2092                         VM_OBJECT_WUNLOCK(obj);
2093                         vm_map_unlock(map);
2094                         return (KERN_RESOURCE_SHORTAGE);
2095                 }
2096
2097                 crhold(cred);
2098                 obj->cred = cred;
2099                 obj->charge = ptoa(obj->size);
2100                 VM_OBJECT_WUNLOCK(obj);
2101         }
2102
2103         /*
2104          * Go back and fix up protections. [Note that clipping is not
2105          * necessary the second time.]
2106          */
2107         for (current = entry; current->start < end; current = current->next) {
2108                 if ((current->eflags & MAP_ENTRY_GUARD) != 0)
2109                         continue;
2110
2111                 old_prot = current->protection;
2112
2113                 if (set_max)
2114                         current->protection =
2115                             (current->max_protection = new_prot) &
2116                             old_prot;
2117                 else
2118                         current->protection = new_prot;
2119
2120                 /*
2121                  * For user wired map entries, the normal lazy evaluation of
2122                  * write access upgrades through soft page faults is
2123                  * undesirable.  Instead, immediately copy any pages that are
2124                  * copy-on-write and enable write access in the physical map.
2125                  */
2126                 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2127                     (current->protection & VM_PROT_WRITE) != 0 &&
2128                     (old_prot & VM_PROT_WRITE) == 0)
2129                         vm_fault_copy_entry(map, map, current, current, NULL);
2130
2131                 /*
2132                  * When restricting access, update the physical map.  Worry
2133                  * about copy-on-write here.
2134                  */
2135                 if ((old_prot & ~current->protection) != 0) {
2136 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2137                                                         VM_PROT_ALL)
2138                         pmap_protect(map->pmap, current->start,
2139                             current->end,
2140                             current->protection & MASK(current));
2141 #undef  MASK
2142                 }
2143                 vm_map_simplify_entry(map, current);
2144         }
2145         vm_map_unlock(map);
2146         return (KERN_SUCCESS);
2147 }
2148
2149 /*
2150  *      vm_map_madvise:
2151  *
2152  *      This routine traverses a processes map handling the madvise
2153  *      system call.  Advisories are classified as either those effecting
2154  *      the vm_map_entry structure, or those effecting the underlying
2155  *      objects.
2156  */
2157 int
2158 vm_map_madvise(
2159         vm_map_t map,
2160         vm_offset_t start,
2161         vm_offset_t end,
2162         int behav)
2163 {
2164         vm_map_entry_t current, entry;
2165         int modify_map = 0;
2166
2167         /*
2168          * Some madvise calls directly modify the vm_map_entry, in which case
2169          * we need to use an exclusive lock on the map and we need to perform
2170          * various clipping operations.  Otherwise we only need a read-lock
2171          * on the map.
2172          */
2173         switch(behav) {
2174         case MADV_NORMAL:
2175         case MADV_SEQUENTIAL:
2176         case MADV_RANDOM:
2177         case MADV_NOSYNC:
2178         case MADV_AUTOSYNC:
2179         case MADV_NOCORE:
2180         case MADV_CORE:
2181                 if (start == end)
2182                         return (KERN_SUCCESS);
2183                 modify_map = 1;
2184                 vm_map_lock(map);
2185                 break;
2186         case MADV_WILLNEED:
2187         case MADV_DONTNEED:
2188         case MADV_FREE:
2189                 if (start == end)
2190                         return (KERN_SUCCESS);
2191                 vm_map_lock_read(map);
2192                 break;
2193         default:
2194                 return (KERN_INVALID_ARGUMENT);
2195         }
2196
2197         /*
2198          * Locate starting entry and clip if necessary.
2199          */
2200         VM_MAP_RANGE_CHECK(map, start, end);
2201
2202         if (vm_map_lookup_entry(map, start, &entry)) {
2203                 if (modify_map)
2204                         vm_map_clip_start(map, entry, start);
2205         } else {
2206                 entry = entry->next;
2207         }
2208
2209         if (modify_map) {
2210                 /*
2211                  * madvise behaviors that are implemented in the vm_map_entry.
2212                  *
2213                  * We clip the vm_map_entry so that behavioral changes are
2214                  * limited to the specified address range.
2215                  */
2216                 for (current = entry; current->start < end;
2217                     current = current->next) {
2218                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2219                                 continue;
2220
2221                         vm_map_clip_end(map, current, end);
2222
2223                         switch (behav) {
2224                         case MADV_NORMAL:
2225                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2226                                 break;
2227                         case MADV_SEQUENTIAL:
2228                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2229                                 break;
2230                         case MADV_RANDOM:
2231                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2232                                 break;
2233                         case MADV_NOSYNC:
2234                                 current->eflags |= MAP_ENTRY_NOSYNC;
2235                                 break;
2236                         case MADV_AUTOSYNC:
2237                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
2238                                 break;
2239                         case MADV_NOCORE:
2240                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
2241                                 break;
2242                         case MADV_CORE:
2243                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2244                                 break;
2245                         default:
2246                                 break;
2247                         }
2248                         vm_map_simplify_entry(map, current);
2249                 }
2250                 vm_map_unlock(map);
2251         } else {
2252                 vm_pindex_t pstart, pend;
2253
2254                 /*
2255                  * madvise behaviors that are implemented in the underlying
2256                  * vm_object.
2257                  *
2258                  * Since we don't clip the vm_map_entry, we have to clip
2259                  * the vm_object pindex and count.
2260                  */
2261                 for (current = entry; current->start < end;
2262                     current = current->next) {
2263                         vm_offset_t useEnd, useStart;
2264
2265                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2266                                 continue;
2267
2268                         pstart = OFF_TO_IDX(current->offset);
2269                         pend = pstart + atop(current->end - current->start);
2270                         useStart = current->start;
2271                         useEnd = current->end;
2272
2273                         if (current->start < start) {
2274                                 pstart += atop(start - current->start);
2275                                 useStart = start;
2276                         }
2277                         if (current->end > end) {
2278                                 pend -= atop(current->end - end);
2279                                 useEnd = end;
2280                         }
2281
2282                         if (pstart >= pend)
2283                                 continue;
2284
2285                         /*
2286                          * Perform the pmap_advise() before clearing
2287                          * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2288                          * concurrent pmap operation, such as pmap_remove(),
2289                          * could clear a reference in the pmap and set
2290                          * PGA_REFERENCED on the page before the pmap_advise()
2291                          * had completed.  Consequently, the page would appear
2292                          * referenced based upon an old reference that
2293                          * occurred before this pmap_advise() ran.
2294                          */
2295                         if (behav == MADV_DONTNEED || behav == MADV_FREE)
2296                                 pmap_advise(map->pmap, useStart, useEnd,
2297                                     behav);
2298
2299                         vm_object_madvise(current->object.vm_object, pstart,
2300                             pend, behav);
2301
2302                         /*
2303                          * Pre-populate paging structures in the
2304                          * WILLNEED case.  For wired entries, the
2305                          * paging structures are already populated.
2306                          */
2307                         if (behav == MADV_WILLNEED &&
2308                             current->wired_count == 0) {
2309                                 vm_map_pmap_enter(map,
2310                                     useStart,
2311                                     current->protection,
2312                                     current->object.vm_object,
2313                                     pstart,
2314                                     ptoa(pend - pstart),
2315                                     MAP_PREFAULT_MADVISE
2316                                 );
2317                         }
2318                 }
2319                 vm_map_unlock_read(map);
2320         }
2321         return (0);
2322 }
2323
2324
2325 /*
2326  *      vm_map_inherit:
2327  *
2328  *      Sets the inheritance of the specified address
2329  *      range in the target map.  Inheritance
2330  *      affects how the map will be shared with
2331  *      child maps at the time of vmspace_fork.
2332  */
2333 int
2334 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2335                vm_inherit_t new_inheritance)
2336 {
2337         vm_map_entry_t entry;
2338         vm_map_entry_t temp_entry;
2339
2340         switch (new_inheritance) {
2341         case VM_INHERIT_NONE:
2342         case VM_INHERIT_COPY:
2343         case VM_INHERIT_SHARE:
2344         case VM_INHERIT_ZERO:
2345                 break;
2346         default:
2347                 return (KERN_INVALID_ARGUMENT);
2348         }
2349         if (start == end)
2350                 return (KERN_SUCCESS);
2351         vm_map_lock(map);
2352         VM_MAP_RANGE_CHECK(map, start, end);
2353         if (vm_map_lookup_entry(map, start, &temp_entry)) {
2354                 entry = temp_entry;
2355                 vm_map_clip_start(map, entry, start);
2356         } else
2357                 entry = temp_entry->next;
2358         while (entry->start < end) {
2359                 vm_map_clip_end(map, entry, end);
2360                 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
2361                     new_inheritance != VM_INHERIT_ZERO)
2362                         entry->inheritance = new_inheritance;
2363                 vm_map_simplify_entry(map, entry);
2364                 entry = entry->next;
2365         }
2366         vm_map_unlock(map);
2367         return (KERN_SUCCESS);
2368 }
2369
2370 /*
2371  *      vm_map_unwire:
2372  *
2373  *      Implements both kernel and user unwiring.
2374  */
2375 int
2376 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2377     int flags)
2378 {
2379         vm_map_entry_t entry, first_entry, tmp_entry;
2380         vm_offset_t saved_start;
2381         unsigned int last_timestamp;
2382         int rv;
2383         boolean_t need_wakeup, result, user_unwire;
2384
2385         if (start == end)
2386                 return (KERN_SUCCESS);
2387         user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2388         vm_map_lock(map);
2389         VM_MAP_RANGE_CHECK(map, start, end);
2390         if (!vm_map_lookup_entry(map, start, &first_entry)) {
2391                 if (flags & VM_MAP_WIRE_HOLESOK)
2392                         first_entry = first_entry->next;
2393                 else {
2394                         vm_map_unlock(map);
2395                         return (KERN_INVALID_ADDRESS);
2396                 }
2397         }
2398         last_timestamp = map->timestamp;
2399         entry = first_entry;
2400         while (entry->start < end) {
2401                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2402                         /*
2403                          * We have not yet clipped the entry.
2404                          */
2405                         saved_start = (start >= entry->start) ? start :
2406                             entry->start;
2407                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2408                         if (vm_map_unlock_and_wait(map, 0)) {
2409                                 /*
2410                                  * Allow interruption of user unwiring?
2411                                  */
2412                         }
2413                         vm_map_lock(map);
2414                         if (last_timestamp+1 != map->timestamp) {
2415                                 /*
2416                                  * Look again for the entry because the map was
2417                                  * modified while it was unlocked.
2418                                  * Specifically, the entry may have been
2419                                  * clipped, merged, or deleted.
2420                                  */
2421                                 if (!vm_map_lookup_entry(map, saved_start,
2422                                     &tmp_entry)) {
2423                                         if (flags & VM_MAP_WIRE_HOLESOK)
2424                                                 tmp_entry = tmp_entry->next;
2425                                         else {
2426                                                 if (saved_start == start) {
2427                                                         /*
2428                                                          * First_entry has been deleted.
2429                                                          */
2430                                                         vm_map_unlock(map);
2431                                                         return (KERN_INVALID_ADDRESS);
2432                                                 }
2433                                                 end = saved_start;
2434                                                 rv = KERN_INVALID_ADDRESS;
2435                                                 goto done;
2436                                         }
2437                                 }
2438                                 if (entry == first_entry)
2439                                         first_entry = tmp_entry;
2440                                 else
2441                                         first_entry = NULL;
2442                                 entry = tmp_entry;
2443                         }
2444                         last_timestamp = map->timestamp;
2445                         continue;
2446                 }
2447                 vm_map_clip_start(map, entry, start);
2448                 vm_map_clip_end(map, entry, end);
2449                 /*
2450                  * Mark the entry in case the map lock is released.  (See
2451                  * above.)
2452                  */
2453                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2454                     entry->wiring_thread == NULL,
2455                     ("owned map entry %p", entry));
2456                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2457                 entry->wiring_thread = curthread;
2458                 /*
2459                  * Check the map for holes in the specified region.
2460                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2461                  */
2462                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2463                     (entry->end < end && entry->next->start > entry->end)) {
2464                         end = entry->end;
2465                         rv = KERN_INVALID_ADDRESS;
2466                         goto done;
2467                 }
2468                 /*
2469                  * If system unwiring, require that the entry is system wired.
2470                  */
2471                 if (!user_unwire &&
2472                     vm_map_entry_system_wired_count(entry) == 0) {
2473                         end = entry->end;
2474                         rv = KERN_INVALID_ARGUMENT;
2475                         goto done;
2476                 }
2477                 entry = entry->next;
2478         }
2479         rv = KERN_SUCCESS;
2480 done:
2481         need_wakeup = FALSE;
2482         if (first_entry == NULL) {
2483                 result = vm_map_lookup_entry(map, start, &first_entry);
2484                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2485                         first_entry = first_entry->next;
2486                 else
2487                         KASSERT(result, ("vm_map_unwire: lookup failed"));
2488         }
2489         for (entry = first_entry; entry->start < end; entry = entry->next) {
2490                 /*
2491                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
2492                  * space in the unwired region could have been mapped
2493                  * while the map lock was dropped for draining
2494                  * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2495                  * could be simultaneously wiring this new mapping
2496                  * entry.  Detect these cases and skip any entries
2497                  * marked as in transition by us.
2498                  */
2499                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2500                     entry->wiring_thread != curthread) {
2501                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2502                             ("vm_map_unwire: !HOLESOK and new/changed entry"));
2503                         continue;
2504                 }
2505
2506                 if (rv == KERN_SUCCESS && (!user_unwire ||
2507                     (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2508                         if (user_unwire)
2509                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2510                         if (entry->wired_count == 1)
2511                                 vm_map_entry_unwire(map, entry);
2512                         else
2513                                 entry->wired_count--;
2514                 }
2515                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2516                     ("vm_map_unwire: in-transition flag missing %p", entry));
2517                 KASSERT(entry->wiring_thread == curthread,
2518                     ("vm_map_unwire: alien wire %p", entry));
2519                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2520                 entry->wiring_thread = NULL;
2521                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2522                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2523                         need_wakeup = TRUE;
2524                 }
2525                 vm_map_simplify_entry(map, entry);
2526         }
2527         vm_map_unlock(map);
2528         if (need_wakeup)
2529                 vm_map_wakeup(map);
2530         return (rv);
2531 }
2532
2533 /*
2534  *      vm_map_wire_entry_failure:
2535  *
2536  *      Handle a wiring failure on the given entry.
2537  *
2538  *      The map should be locked.
2539  */
2540 static void
2541 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
2542     vm_offset_t failed_addr)
2543 {
2544
2545         VM_MAP_ASSERT_LOCKED(map);
2546         KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
2547             entry->wired_count == 1,
2548             ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
2549         KASSERT(failed_addr < entry->end,
2550             ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
2551
2552         /*
2553          * If any pages at the start of this entry were successfully wired,
2554          * then unwire them.
2555          */
2556         if (failed_addr > entry->start) {
2557                 pmap_unwire(map->pmap, entry->start, failed_addr);
2558                 vm_object_unwire(entry->object.vm_object, entry->offset,
2559                     failed_addr - entry->start, PQ_ACTIVE);
2560         }
2561
2562         /*
2563          * Assign an out-of-range value to represent the failure to wire this
2564          * entry.
2565          */
2566         entry->wired_count = -1;
2567 }
2568
2569 /*
2570  *      vm_map_wire:
2571  *
2572  *      Implements both kernel and user wiring.
2573  */
2574 int
2575 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2576     int flags)
2577 {
2578         vm_map_entry_t entry, first_entry, tmp_entry;
2579         vm_offset_t faddr, saved_end, saved_start;
2580         unsigned int last_timestamp;
2581         int rv;
2582         boolean_t need_wakeup, result, user_wire;
2583         vm_prot_t prot;
2584
2585         if (start == end)
2586                 return (KERN_SUCCESS);
2587         prot = 0;
2588         if (flags & VM_MAP_WIRE_WRITE)
2589                 prot |= VM_PROT_WRITE;
2590         user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2591         vm_map_lock(map);
2592         VM_MAP_RANGE_CHECK(map, start, end);
2593         if (!vm_map_lookup_entry(map, start, &first_entry)) {
2594                 if (flags & VM_MAP_WIRE_HOLESOK)
2595                         first_entry = first_entry->next;
2596                 else {
2597                         vm_map_unlock(map);
2598                         return (KERN_INVALID_ADDRESS);
2599                 }
2600         }
2601         last_timestamp = map->timestamp;
2602         entry = first_entry;
2603         while (entry->start < end) {
2604                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2605                         /*
2606                          * We have not yet clipped the entry.
2607                          */
2608                         saved_start = (start >= entry->start) ? start :
2609                             entry->start;
2610                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2611                         if (vm_map_unlock_and_wait(map, 0)) {
2612                                 /*
2613                                  * Allow interruption of user wiring?
2614                                  */
2615                         }
2616                         vm_map_lock(map);
2617                         if (last_timestamp + 1 != map->timestamp) {
2618                                 /*
2619                                  * Look again for the entry because the map was
2620                                  * modified while it was unlocked.
2621                                  * Specifically, the entry may have been
2622                                  * clipped, merged, or deleted.
2623                                  */
2624                                 if (!vm_map_lookup_entry(map, saved_start,
2625                                     &tmp_entry)) {
2626                                         if (flags & VM_MAP_WIRE_HOLESOK)
2627                                                 tmp_entry = tmp_entry->next;
2628                                         else {
2629                                                 if (saved_start == start) {
2630                                                         /*
2631                                                          * first_entry has been deleted.
2632                                                          */
2633                                                         vm_map_unlock(map);
2634                                                         return (KERN_INVALID_ADDRESS);
2635                                                 }
2636                                                 end = saved_start;
2637                                                 rv = KERN_INVALID_ADDRESS;
2638                                                 goto done;
2639                                         }
2640                                 }
2641                                 if (entry == first_entry)
2642                                         first_entry = tmp_entry;
2643                                 else
2644                                         first_entry = NULL;
2645                                 entry = tmp_entry;
2646                         }
2647                         last_timestamp = map->timestamp;
2648                         continue;
2649                 }
2650                 vm_map_clip_start(map, entry, start);
2651                 vm_map_clip_end(map, entry, end);
2652                 /*
2653                  * Mark the entry in case the map lock is released.  (See
2654                  * above.)
2655                  */
2656                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2657                     entry->wiring_thread == NULL,
2658                     ("owned map entry %p", entry));
2659                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2660                 entry->wiring_thread = curthread;
2661                 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2662                     || (entry->protection & prot) != prot) {
2663                         entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2664                         if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2665                                 end = entry->end;
2666                                 rv = KERN_INVALID_ADDRESS;
2667                                 goto done;
2668                         }
2669                         goto next_entry;
2670                 }
2671                 if (entry->wired_count == 0) {
2672                         entry->wired_count++;
2673                         saved_start = entry->start;
2674                         saved_end = entry->end;
2675
2676                         /*
2677                          * Release the map lock, relying on the in-transition
2678                          * mark.  Mark the map busy for fork.
2679                          */
2680                         vm_map_busy(map);
2681                         vm_map_unlock(map);
2682
2683                         faddr = saved_start;
2684                         do {
2685                                 /*
2686                                  * Simulate a fault to get the page and enter
2687                                  * it into the physical map.
2688                                  */
2689                                 if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
2690                                     VM_FAULT_WIRE)) != KERN_SUCCESS)
2691                                         break;
2692                         } while ((faddr += PAGE_SIZE) < saved_end);
2693                         vm_map_lock(map);
2694                         vm_map_unbusy(map);
2695                         if (last_timestamp + 1 != map->timestamp) {
2696                                 /*
2697                                  * Look again for the entry because the map was
2698                                  * modified while it was unlocked.  The entry
2699                                  * may have been clipped, but NOT merged or
2700                                  * deleted.
2701                                  */
2702                                 result = vm_map_lookup_entry(map, saved_start,
2703                                     &tmp_entry);
2704                                 KASSERT(result, ("vm_map_wire: lookup failed"));
2705                                 if (entry == first_entry)
2706                                         first_entry = tmp_entry;
2707                                 else
2708                                         first_entry = NULL;
2709                                 entry = tmp_entry;
2710                                 while (entry->end < saved_end) {
2711                                         /*
2712                                          * In case of failure, handle entries
2713                                          * that were not fully wired here;
2714                                          * fully wired entries are handled
2715                                          * later.
2716                                          */
2717                                         if (rv != KERN_SUCCESS &&
2718                                             faddr < entry->end)
2719                                                 vm_map_wire_entry_failure(map,
2720                                                     entry, faddr);
2721                                         entry = entry->next;
2722                                 }
2723                         }
2724                         last_timestamp = map->timestamp;
2725                         if (rv != KERN_SUCCESS) {
2726                                 vm_map_wire_entry_failure(map, entry, faddr);
2727                                 end = entry->end;
2728                                 goto done;
2729                         }
2730                 } else if (!user_wire ||
2731                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2732                         entry->wired_count++;
2733                 }
2734                 /*
2735                  * Check the map for holes in the specified region.
2736                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2737                  */
2738         next_entry:
2739                 if ((flags & VM_MAP_WIRE_HOLESOK) == 0 &&
2740                     entry->end < end && entry->next->start > entry->end) {
2741                         end = entry->end;
2742                         rv = KERN_INVALID_ADDRESS;
2743                         goto done;
2744                 }
2745                 entry = entry->next;
2746         }
2747         rv = KERN_SUCCESS;
2748 done:
2749         need_wakeup = FALSE;
2750         if (first_entry == NULL) {
2751                 result = vm_map_lookup_entry(map, start, &first_entry);
2752                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2753                         first_entry = first_entry->next;
2754                 else
2755                         KASSERT(result, ("vm_map_wire: lookup failed"));
2756         }
2757         for (entry = first_entry; entry->start < end; entry = entry->next) {
2758                 /*
2759                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
2760                  * space in the unwired region could have been mapped
2761                  * while the map lock was dropped for faulting in the
2762                  * pages or draining MAP_ENTRY_IN_TRANSITION.
2763                  * Moreover, another thread could be simultaneously
2764                  * wiring this new mapping entry.  Detect these cases
2765                  * and skip any entries marked as in transition not by us.
2766                  */
2767                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2768                     entry->wiring_thread != curthread) {
2769                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2770                             ("vm_map_wire: !HOLESOK and new/changed entry"));
2771                         continue;
2772                 }
2773
2774                 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2775                         goto next_entry_done;
2776
2777                 if (rv == KERN_SUCCESS) {
2778                         if (user_wire)
2779                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
2780                 } else if (entry->wired_count == -1) {
2781                         /*
2782                          * Wiring failed on this entry.  Thus, unwiring is
2783                          * unnecessary.
2784                          */
2785                         entry->wired_count = 0;
2786                 } else if (!user_wire ||
2787                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2788                         /*
2789                          * Undo the wiring.  Wiring succeeded on this entry
2790                          * but failed on a later entry.  
2791                          */
2792                         if (entry->wired_count == 1)
2793                                 vm_map_entry_unwire(map, entry);
2794                         else
2795                                 entry->wired_count--;
2796                 }
2797         next_entry_done:
2798                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2799                     ("vm_map_wire: in-transition flag missing %p", entry));
2800                 KASSERT(entry->wiring_thread == curthread,
2801                     ("vm_map_wire: alien wire %p", entry));
2802                 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2803                     MAP_ENTRY_WIRE_SKIPPED);
2804                 entry->wiring_thread = NULL;
2805                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2806                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2807                         need_wakeup = TRUE;
2808                 }
2809                 vm_map_simplify_entry(map, entry);
2810         }
2811         vm_map_unlock(map);
2812         if (need_wakeup)
2813                 vm_map_wakeup(map);
2814         return (rv);
2815 }
2816
2817 /*
2818  * vm_map_sync
2819  *
2820  * Push any dirty cached pages in the address range to their pager.
2821  * If syncio is TRUE, dirty pages are written synchronously.
2822  * If invalidate is TRUE, any cached pages are freed as well.
2823  *
2824  * If the size of the region from start to end is zero, we are
2825  * supposed to flush all modified pages within the region containing
2826  * start.  Unfortunately, a region can be split or coalesced with
2827  * neighboring regions, making it difficult to determine what the
2828  * original region was.  Therefore, we approximate this requirement by
2829  * flushing the current region containing start.
2830  *
2831  * Returns an error if any part of the specified range is not mapped.
2832  */
2833 int
2834 vm_map_sync(
2835         vm_map_t map,
2836         vm_offset_t start,
2837         vm_offset_t end,
2838         boolean_t syncio,
2839         boolean_t invalidate)
2840 {
2841         vm_map_entry_t current;
2842         vm_map_entry_t entry;
2843         vm_size_t size;
2844         vm_object_t object;
2845         vm_ooffset_t offset;
2846         unsigned int last_timestamp;
2847         boolean_t failed;
2848
2849         vm_map_lock_read(map);
2850         VM_MAP_RANGE_CHECK(map, start, end);
2851         if (!vm_map_lookup_entry(map, start, &entry)) {
2852                 vm_map_unlock_read(map);
2853                 return (KERN_INVALID_ADDRESS);
2854         } else if (start == end) {
2855                 start = entry->start;
2856                 end = entry->end;
2857         }
2858         /*
2859          * Make a first pass to check for user-wired memory and holes.
2860          */
2861         for (current = entry; current->start < end; current = current->next) {
2862                 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2863                         vm_map_unlock_read(map);
2864                         return (KERN_INVALID_ARGUMENT);
2865                 }
2866                 if (end > current->end &&
2867                     current->end != current->next->start) {
2868                         vm_map_unlock_read(map);
2869                         return (KERN_INVALID_ADDRESS);
2870                 }
2871         }
2872
2873         if (invalidate)
2874                 pmap_remove(map->pmap, start, end);
2875         failed = FALSE;
2876
2877         /*
2878          * Make a second pass, cleaning/uncaching pages from the indicated
2879          * objects as we go.
2880          */
2881         for (current = entry; current->start < end;) {
2882                 offset = current->offset + (start - current->start);
2883                 size = (end <= current->end ? end : current->end) - start;
2884                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2885                         vm_map_t smap;
2886                         vm_map_entry_t tentry;
2887                         vm_size_t tsize;
2888
2889                         smap = current->object.sub_map;
2890                         vm_map_lock_read(smap);
2891                         (void) vm_map_lookup_entry(smap, offset, &tentry);
2892                         tsize = tentry->end - offset;
2893                         if (tsize < size)
2894                                 size = tsize;
2895                         object = tentry->object.vm_object;
2896                         offset = tentry->offset + (offset - tentry->start);
2897                         vm_map_unlock_read(smap);
2898                 } else {
2899                         object = current->object.vm_object;
2900                 }
2901                 vm_object_reference(object);
2902                 last_timestamp = map->timestamp;
2903                 vm_map_unlock_read(map);
2904                 if (!vm_object_sync(object, offset, size, syncio, invalidate))
2905                         failed = TRUE;
2906                 start += size;
2907                 vm_object_deallocate(object);
2908                 vm_map_lock_read(map);
2909                 if (last_timestamp == map->timestamp ||
2910                     !vm_map_lookup_entry(map, start, &current))
2911                         current = current->next;
2912         }
2913
2914         vm_map_unlock_read(map);
2915         return (failed ? KERN_FAILURE : KERN_SUCCESS);
2916 }
2917
2918 /*
2919  *      vm_map_entry_unwire:    [ internal use only ]
2920  *
2921  *      Make the region specified by this entry pageable.
2922  *
2923  *      The map in question should be locked.
2924  *      [This is the reason for this routine's existence.]
2925  */
2926 static void
2927 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2928 {
2929
2930         VM_MAP_ASSERT_LOCKED(map);
2931         KASSERT(entry->wired_count > 0,
2932             ("vm_map_entry_unwire: entry %p isn't wired", entry));
2933         pmap_unwire(map->pmap, entry->start, entry->end);
2934         vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
2935             entry->start, PQ_ACTIVE);
2936         entry->wired_count = 0;
2937 }
2938
2939 static void
2940 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2941 {
2942
2943         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2944                 vm_object_deallocate(entry->object.vm_object);
2945         uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2946 }
2947
2948 /*
2949  *      vm_map_entry_delete:    [ internal use only ]
2950  *
2951  *      Deallocate the given entry from the target map.
2952  */
2953 static void
2954 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2955 {
2956         vm_object_t object;
2957         vm_pindex_t offidxstart, offidxend, count, size1;
2958         vm_size_t size;
2959
2960         vm_map_entry_unlink(map, entry);
2961         object = entry->object.vm_object;
2962
2963         if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
2964                 MPASS(entry->cred == NULL);
2965                 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
2966                 MPASS(object == NULL);
2967                 vm_map_entry_deallocate(entry, map->system_map);
2968                 return;
2969         }
2970
2971         size = entry->end - entry->start;
2972         map->size -= size;
2973
2974         if (entry->cred != NULL) {
2975                 swap_release_by_cred(size, entry->cred);
2976                 crfree(entry->cred);
2977         }
2978
2979         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2980             (object != NULL)) {
2981                 KASSERT(entry->cred == NULL || object->cred == NULL ||
2982                     (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2983                     ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
2984                 count = atop(size);
2985                 offidxstart = OFF_TO_IDX(entry->offset);
2986                 offidxend = offidxstart + count;
2987                 VM_OBJECT_WLOCK(object);
2988                 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT |
2989                     OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2990                     object == kernel_object || object == kmem_object)) {
2991                         vm_object_collapse(object);
2992
2993                         /*
2994                          * The option OBJPR_NOTMAPPED can be passed here
2995                          * because vm_map_delete() already performed
2996                          * pmap_remove() on the only mapping to this range
2997                          * of pages. 
2998                          */
2999                         vm_object_page_remove(object, offidxstart, offidxend,
3000                             OBJPR_NOTMAPPED);
3001                         if (object->type == OBJT_SWAP)
3002                                 swap_pager_freespace(object, offidxstart,
3003                                     count);
3004                         if (offidxend >= object->size &&
3005                             offidxstart < object->size) {
3006                                 size1 = object->size;
3007                                 object->size = offidxstart;
3008                                 if (object->cred != NULL) {
3009                                         size1 -= object->size;
3010                                         KASSERT(object->charge >= ptoa(size1),
3011                                             ("object %p charge < 0", object));
3012                                         swap_release_by_cred(ptoa(size1),
3013                                             object->cred);
3014                                         object->charge -= ptoa(size1);
3015                                 }
3016                         }
3017                 }
3018                 VM_OBJECT_WUNLOCK(object);
3019         } else
3020                 entry->object.vm_object = NULL;
3021         if (map->system_map)
3022                 vm_map_entry_deallocate(entry, TRUE);
3023         else {
3024                 entry->next = curthread->td_map_def_user;
3025                 curthread->td_map_def_user = entry;
3026         }
3027 }
3028
3029 /*
3030  *      vm_map_delete:  [ internal use only ]
3031  *
3032  *      Deallocates the given address range from the target
3033  *      map.
3034  */
3035 int
3036 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
3037 {
3038         vm_map_entry_t entry;
3039         vm_map_entry_t first_entry;
3040
3041         VM_MAP_ASSERT_LOCKED(map);
3042         if (start == end)
3043                 return (KERN_SUCCESS);
3044
3045         /*
3046          * Find the start of the region, and clip it
3047          */
3048         if (!vm_map_lookup_entry(map, start, &first_entry))
3049                 entry = first_entry->next;
3050         else {
3051                 entry = first_entry;
3052                 vm_map_clip_start(map, entry, start);
3053         }
3054
3055         /*
3056          * Step through all entries in this region
3057          */
3058         while (entry->start < end) {
3059                 vm_map_entry_t next;
3060
3061                 /*
3062                  * Wait for wiring or unwiring of an entry to complete.
3063                  * Also wait for any system wirings to disappear on
3064                  * user maps.
3065                  */
3066                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
3067                     (vm_map_pmap(map) != kernel_pmap &&
3068                     vm_map_entry_system_wired_count(entry) != 0)) {
3069                         unsigned int last_timestamp;
3070                         vm_offset_t saved_start;
3071                         vm_map_entry_t tmp_entry;
3072
3073                         saved_start = entry->start;
3074                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3075                         last_timestamp = map->timestamp;
3076                         (void) vm_map_unlock_and_wait(map, 0);
3077                         vm_map_lock(map);
3078                         if (last_timestamp + 1 != map->timestamp) {
3079                                 /*
3080                                  * Look again for the entry because the map was
3081                                  * modified while it was unlocked.
3082                                  * Specifically, the entry may have been
3083                                  * clipped, merged, or deleted.
3084                                  */
3085                                 if (!vm_map_lookup_entry(map, saved_start,
3086                                                          &tmp_entry))
3087                                         entry = tmp_entry->next;
3088                                 else {
3089                                         entry = tmp_entry;
3090                                         vm_map_clip_start(map, entry,
3091                                                           saved_start);
3092                                 }
3093                         }
3094                         continue;
3095                 }
3096                 vm_map_clip_end(map, entry, end);
3097
3098                 next = entry->next;
3099
3100                 /*
3101                  * Unwire before removing addresses from the pmap; otherwise,
3102                  * unwiring will put the entries back in the pmap.
3103                  */
3104                 if (entry->wired_count != 0)
3105                         vm_map_entry_unwire(map, entry);
3106
3107                 /*
3108                  * Remove mappings for the pages, but only if the
3109                  * mappings could exist.  For instance, it does not
3110                  * make sense to call pmap_remove() for guard entries.
3111                  */
3112                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
3113                     entry->object.vm_object != NULL)
3114                         pmap_remove(map->pmap, entry->start, entry->end);
3115
3116                 /*
3117                  * Delete the entry only after removing all pmap
3118                  * entries pointing to its pages.  (Otherwise, its
3119                  * page frames may be reallocated, and any modify bits
3120                  * will be set in the wrong object!)
3121                  */
3122                 vm_map_entry_delete(map, entry);
3123                 entry = next;
3124         }
3125         return (KERN_SUCCESS);
3126 }
3127
3128 /*
3129  *      vm_map_remove:
3130  *
3131  *      Remove the given address range from the target map.
3132  *      This is the exported form of vm_map_delete.
3133  */
3134 int
3135 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3136 {
3137         int result;
3138
3139         vm_map_lock(map);
3140         VM_MAP_RANGE_CHECK(map, start, end);
3141         result = vm_map_delete(map, start, end);
3142         vm_map_unlock(map);
3143         return (result);
3144 }
3145
3146 /*
3147  *      vm_map_check_protection:
3148  *
3149  *      Assert that the target map allows the specified privilege on the
3150  *      entire address region given.  The entire region must be allocated.
3151  *
3152  *      WARNING!  This code does not and should not check whether the
3153  *      contents of the region is accessible.  For example a smaller file
3154  *      might be mapped into a larger address space.
3155  *
3156  *      NOTE!  This code is also called by munmap().
3157  *
3158  *      The map must be locked.  A read lock is sufficient.
3159  */
3160 boolean_t
3161 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3162                         vm_prot_t protection)
3163 {
3164         vm_map_entry_t entry;
3165         vm_map_entry_t tmp_entry;
3166
3167         if (!vm_map_lookup_entry(map, start, &tmp_entry))
3168                 return (FALSE);
3169         entry = tmp_entry;
3170
3171         while (start < end) {
3172                 /*
3173                  * No holes allowed!
3174                  */
3175                 if (start < entry->start)
3176                         return (FALSE);
3177                 /*
3178                  * Check protection associated with entry.
3179                  */
3180                 if ((entry->protection & protection) != protection)
3181                         return (FALSE);
3182                 /* go to next entry */
3183                 start = entry->end;
3184                 entry = entry->next;
3185         }
3186         return (TRUE);
3187 }
3188
3189 /*
3190  *      vm_map_copy_entry:
3191  *
3192  *      Copies the contents of the source entry to the destination
3193  *      entry.  The entries *must* be aligned properly.
3194  */
3195 static void
3196 vm_map_copy_entry(
3197         vm_map_t src_map,
3198         vm_map_t dst_map,
3199         vm_map_entry_t src_entry,
3200         vm_map_entry_t dst_entry,
3201         vm_ooffset_t *fork_charge)
3202 {
3203         vm_object_t src_object;
3204         vm_map_entry_t fake_entry;
3205         vm_offset_t size;
3206         struct ucred *cred;
3207         int charged;
3208
3209         VM_MAP_ASSERT_LOCKED(dst_map);
3210
3211         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3212                 return;
3213
3214         if (src_entry->wired_count == 0 ||
3215             (src_entry->protection & VM_PROT_WRITE) == 0) {
3216                 /*
3217                  * If the source entry is marked needs_copy, it is already
3218                  * write-protected.
3219                  */
3220                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3221                     (src_entry->protection & VM_PROT_WRITE) != 0) {
3222                         pmap_protect(src_map->pmap,
3223                             src_entry->start,
3224                             src_entry->end,
3225                             src_entry->protection & ~VM_PROT_WRITE);
3226                 }
3227
3228                 /*
3229                  * Make a copy of the object.
3230                  */
3231                 size = src_entry->end - src_entry->start;
3232                 if ((src_object = src_entry->object.vm_object) != NULL) {
3233                         VM_OBJECT_WLOCK(src_object);
3234                         charged = ENTRY_CHARGED(src_entry);
3235                         if (src_object->handle == NULL &&
3236                             (src_object->type == OBJT_DEFAULT ||
3237                             src_object->type == OBJT_SWAP)) {
3238                                 vm_object_collapse(src_object);
3239                                 if ((src_object->flags & (OBJ_NOSPLIT |
3240                                     OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3241                                         vm_object_split(src_entry);
3242                                         src_object =
3243                                             src_entry->object.vm_object;
3244                                 }
3245                         }
3246                         vm_object_reference_locked(src_object);
3247                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3248                         if (src_entry->cred != NULL &&
3249                             !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3250                                 KASSERT(src_object->cred == NULL,
3251                                     ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3252                                      src_object));
3253                                 src_object->cred = src_entry->cred;
3254                                 src_object->charge = size;
3255                         }
3256                         VM_OBJECT_WUNLOCK(src_object);
3257                         dst_entry->object.vm_object = src_object;
3258                         if (charged) {
3259                                 cred = curthread->td_ucred;
3260                                 crhold(cred);
3261                                 dst_entry->cred = cred;
3262                                 *fork_charge += size;
3263                                 if (!(src_entry->eflags &
3264                                       MAP_ENTRY_NEEDS_COPY)) {
3265                                         crhold(cred);
3266                                         src_entry->cred = cred;
3267                                         *fork_charge += size;
3268                                 }
3269                         }
3270                         src_entry->eflags |= MAP_ENTRY_COW |
3271                             MAP_ENTRY_NEEDS_COPY;
3272                         dst_entry->eflags |= MAP_ENTRY_COW |
3273                             MAP_ENTRY_NEEDS_COPY;
3274                         dst_entry->offset = src_entry->offset;
3275                         if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3276                                 /*
3277                                  * MAP_ENTRY_VN_WRITECNT cannot
3278                                  * indicate write reference from
3279                                  * src_entry, since the entry is
3280                                  * marked as needs copy.  Allocate a
3281                                  * fake entry that is used to
3282                                  * decrement object->un_pager.vnp.writecount
3283                                  * at the appropriate time.  Attach
3284                                  * fake_entry to the deferred list.
3285                                  */
3286                                 fake_entry = vm_map_entry_create(dst_map);
3287                                 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3288                                 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3289                                 vm_object_reference(src_object);
3290                                 fake_entry->object.vm_object = src_object;
3291                                 fake_entry->start = src_entry->start;
3292                                 fake_entry->end = src_entry->end;
3293                                 fake_entry->next = curthread->td_map_def_user;
3294                                 curthread->td_map_def_user = fake_entry;
3295                         }
3296
3297                         pmap_copy(dst_map->pmap, src_map->pmap,
3298                             dst_entry->start, dst_entry->end - dst_entry->start,
3299                             src_entry->start);
3300                 } else {
3301                         dst_entry->object.vm_object = NULL;
3302                         dst_entry->offset = 0;
3303                         if (src_entry->cred != NULL) {
3304                                 dst_entry->cred = curthread->td_ucred;
3305                                 crhold(dst_entry->cred);
3306                                 *fork_charge += size;
3307                         }
3308                 }
3309         } else {
3310                 /*
3311                  * We don't want to make writeable wired pages copy-on-write.
3312                  * Immediately copy these pages into the new map by simulating
3313                  * page faults.  The new pages are pageable.
3314                  */
3315                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3316                     fork_charge);
3317         }
3318 }
3319
3320 /*
3321  * vmspace_map_entry_forked:
3322  * Update the newly-forked vmspace each time a map entry is inherited
3323  * or copied.  The values for vm_dsize and vm_tsize are approximate
3324  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3325  */
3326 static void
3327 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3328     vm_map_entry_t entry)
3329 {
3330         vm_size_t entrysize;
3331         vm_offset_t newend;
3332
3333         if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
3334                 return;
3335         entrysize = entry->end - entry->start;
3336         vm2->vm_map.size += entrysize;
3337         if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3338                 vm2->vm_ssize += btoc(entrysize);
3339         } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3340             entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3341                 newend = MIN(entry->end,
3342                     (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3343                 vm2->vm_dsize += btoc(newend - entry->start);
3344         } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3345             entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3346                 newend = MIN(entry->end,
3347                     (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3348                 vm2->vm_tsize += btoc(newend - entry->start);
3349         }
3350 }
3351
3352 /*
3353  * vmspace_fork:
3354  * Create a new process vmspace structure and vm_map
3355  * based on those of an existing process.  The new map
3356  * is based on the old map, according to the inheritance
3357  * values on the regions in that map.
3358  *
3359  * XXX It might be worth coalescing the entries added to the new vmspace.
3360  *
3361  * The source map must not be locked.
3362  */
3363 struct vmspace *
3364 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3365 {
3366         struct vmspace *vm2;
3367         vm_map_t new_map, old_map;
3368         vm_map_entry_t new_entry, old_entry;
3369         vm_object_t object;
3370         int locked;
3371         vm_inherit_t inh;
3372
3373         old_map = &vm1->vm_map;
3374         /* Copy immutable fields of vm1 to vm2. */
3375         vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
3376             pmap_pinit);
3377         if (vm2 == NULL)
3378                 return (NULL);
3379         vm2->vm_taddr = vm1->vm_taddr;
3380         vm2->vm_daddr = vm1->vm_daddr;
3381         vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3382         vm_map_lock(old_map);
3383         if (old_map->busy)
3384                 vm_map_wait_busy(old_map);
3385         new_map = &vm2->vm_map;
3386         locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3387         KASSERT(locked, ("vmspace_fork: lock failed"));
3388
3389         old_entry = old_map->header.next;
3390
3391         while (old_entry != &old_map->header) {
3392                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3393                         panic("vm_map_fork: encountered a submap");
3394
3395                 inh = old_entry->inheritance;
3396                 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
3397                     inh != VM_INHERIT_NONE)
3398                         inh = VM_INHERIT_COPY;
3399
3400                 switch (inh) {
3401                 case VM_INHERIT_NONE:
3402                         break;
3403
3404                 case VM_INHERIT_SHARE:
3405                         /*
3406                          * Clone the entry, creating the shared object if necessary.
3407                          */
3408                         object = old_entry->object.vm_object;
3409                         if (object == NULL) {
3410                                 object = vm_object_allocate(OBJT_DEFAULT,
3411                                         atop(old_entry->end - old_entry->start));
3412                                 old_entry->object.vm_object = object;
3413                                 old_entry->offset = 0;
3414                                 if (old_entry->cred != NULL) {
3415                                         object->cred = old_entry->cred;
3416                                         object->charge = old_entry->end -
3417                                             old_entry->start;
3418                                         old_entry->cred = NULL;
3419                                 }
3420                         }
3421
3422                         /*
3423                          * Add the reference before calling vm_object_shadow
3424                          * to insure that a shadow object is created.
3425                          */
3426                         vm_object_reference(object);
3427                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3428                                 vm_object_shadow(&old_entry->object.vm_object,
3429                                     &old_entry->offset,
3430                                     old_entry->end - old_entry->start);
3431                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3432                                 /* Transfer the second reference too. */
3433                                 vm_object_reference(
3434                                     old_entry->object.vm_object);
3435
3436                                 /*
3437                                  * As in vm_map_simplify_entry(), the
3438                                  * vnode lock will not be acquired in
3439                                  * this call to vm_object_deallocate().
3440                                  */
3441                                 vm_object_deallocate(object);
3442                                 object = old_entry->object.vm_object;
3443                         }
3444                         VM_OBJECT_WLOCK(object);
3445                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
3446                         if (old_entry->cred != NULL) {
3447                                 KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3448                                 object->cred = old_entry->cred;
3449                                 object->charge = old_entry->end - old_entry->start;
3450                                 old_entry->cred = NULL;
3451                         }
3452
3453                         /*
3454                          * Assert the correct state of the vnode
3455                          * v_writecount while the object is locked, to
3456                          * not relock it later for the assertion
3457                          * correctness.
3458                          */
3459                         if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3460                             object->type == OBJT_VNODE) {
3461                                 KASSERT(((struct vnode *)object->handle)->
3462                                     v_writecount > 0,
3463                                     ("vmspace_fork: v_writecount %p", object));
3464                                 KASSERT(object->un_pager.vnp.writemappings > 0,
3465                                     ("vmspace_fork: vnp.writecount %p",
3466                                     object));
3467                         }
3468                         VM_OBJECT_WUNLOCK(object);
3469
3470                         /*
3471                          * Clone the entry, referencing the shared object.
3472                          */
3473                         new_entry = vm_map_entry_create(new_map);
3474                         *new_entry = *old_entry;
3475                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3476                             MAP_ENTRY_IN_TRANSITION);
3477                         new_entry->wiring_thread = NULL;
3478                         new_entry->wired_count = 0;
3479                         if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3480                                 vnode_pager_update_writecount(object,
3481                                     new_entry->start, new_entry->end);
3482                         }
3483
3484                         /*
3485                          * Insert the entry into the new map -- we know we're
3486                          * inserting at the end of the new map.
3487                          */
3488                         vm_map_entry_link(new_map, new_map->header.prev,
3489                             new_entry);
3490                         vmspace_map_entry_forked(vm1, vm2, new_entry);
3491
3492                         /*
3493                          * Update the physical map
3494                          */
3495                         pmap_copy(new_map->pmap, old_map->pmap,
3496                             new_entry->start,
3497                             (old_entry->end - old_entry->start),
3498                             old_entry->start);
3499                         break;
3500
3501                 case VM_INHERIT_COPY:
3502                         /*
3503                          * Clone the entry and link into the map.
3504                          */
3505                         new_entry = vm_map_entry_create(new_map);
3506                         *new_entry = *old_entry;
3507                         /*
3508                          * Copied entry is COW over the old object.
3509                          */
3510                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3511                             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3512                         new_entry->wiring_thread = NULL;
3513                         new_entry->wired_count = 0;
3514                         new_entry->object.vm_object = NULL;
3515                         new_entry->cred = NULL;
3516                         vm_map_entry_link(new_map, new_map->header.prev,
3517                             new_entry);
3518                         vmspace_map_entry_forked(vm1, vm2, new_entry);
3519                         vm_map_copy_entry(old_map, new_map, old_entry,
3520                             new_entry, fork_charge);
3521                         break;
3522
3523                 case VM_INHERIT_ZERO:
3524                         /*
3525                          * Create a new anonymous mapping entry modelled from
3526                          * the old one.
3527                          */
3528                         new_entry = vm_map_entry_create(new_map);
3529                         memset(new_entry, 0, sizeof(*new_entry));
3530
3531                         new_entry->start = old_entry->start;
3532                         new_entry->end = old_entry->end;
3533                         new_entry->eflags = old_entry->eflags &
3534                             ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION |
3535                             MAP_ENTRY_VN_WRITECNT);
3536                         new_entry->protection = old_entry->protection;
3537                         new_entry->max_protection = old_entry->max_protection;
3538                         new_entry->inheritance = VM_INHERIT_ZERO;
3539
3540                         vm_map_entry_link(new_map, new_map->header.prev,
3541                             new_entry);
3542                         vmspace_map_entry_forked(vm1, vm2, new_entry);
3543
3544                         new_entry->cred = curthread->td_ucred;
3545                         crhold(new_entry->cred);
3546                         *fork_charge += (new_entry->end - new_entry->start);
3547
3548                         break;
3549                 }
3550                 old_entry = old_entry->next;
3551         }
3552         /*
3553          * Use inlined vm_map_unlock() to postpone handling the deferred
3554          * map entries, which cannot be done until both old_map and
3555          * new_map locks are released.
3556          */
3557         sx_xunlock(&old_map->lock);
3558         sx_xunlock(&new_map->lock);
3559         vm_map_process_deferred();
3560
3561         return (vm2);
3562 }
3563
3564 /*
3565  * Create a process's stack for exec_new_vmspace().  This function is never
3566  * asked to wire the newly created stack.
3567  */
3568 int
3569 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3570     vm_prot_t prot, vm_prot_t max, int cow)
3571 {
3572         vm_size_t growsize, init_ssize;
3573         rlim_t vmemlim;
3574         int rv;
3575
3576         MPASS((map->flags & MAP_WIREFUTURE) == 0);
3577         growsize = sgrowsiz;
3578         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3579         vm_map_lock(map);
3580         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
3581         /* If we would blow our VMEM resource limit, no go */
3582         if (map->size + init_ssize > vmemlim) {
3583                 rv = KERN_NO_SPACE;
3584                 goto out;
3585         }
3586         rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
3587             max, cow);
3588 out:
3589         vm_map_unlock(map);
3590         return (rv);
3591 }
3592
3593 static int stack_guard_page = 1;
3594 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
3595     &stack_guard_page, 0,
3596     "Specifies the number of guard pages for a stack that grows");
3597
3598 static int
3599 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3600     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
3601 {
3602         vm_map_entry_t new_entry, prev_entry;
3603         vm_offset_t bot, gap_bot, gap_top, top;
3604         vm_size_t init_ssize, sgp;
3605         int orient, rv;
3606
3607         /*
3608          * The stack orientation is piggybacked with the cow argument.
3609          * Extract it into orient and mask the cow argument so that we
3610          * don't pass it around further.
3611          */
3612         orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP);
3613         KASSERT(orient != 0, ("No stack grow direction"));
3614         KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
3615             ("bi-dir stack"));
3616
3617         if (addrbos < vm_map_min(map) ||
3618             addrbos + max_ssize > vm_map_max(map) ||
3619             addrbos + max_ssize <= addrbos)
3620                 return (KERN_INVALID_ADDRESS);
3621         sgp = (vm_size_t)stack_guard_page * PAGE_SIZE;
3622         if (sgp >= max_ssize)
3623                 return (KERN_INVALID_ARGUMENT);
3624
3625         init_ssize = growsize;
3626         if (max_ssize < init_ssize + sgp)
3627                 init_ssize = max_ssize - sgp;
3628
3629         /* If addr is already mapped, no go */
3630         if (vm_map_lookup_entry(map, addrbos, &prev_entry))
3631                 return (KERN_NO_SPACE);
3632
3633         /*
3634          * If we can't accommodate max_ssize in the current mapping, no go.
3635          */
3636         if (prev_entry->next->start < addrbos + max_ssize)
3637                 return (KERN_NO_SPACE);
3638
3639         /*
3640          * We initially map a stack of only init_ssize.  We will grow as
3641          * needed later.  Depending on the orientation of the stack (i.e.
3642          * the grow direction) we either map at the top of the range, the
3643          * bottom of the range or in the middle.
3644          *
3645          * Note: we would normally expect prot and max to be VM_PROT_ALL,
3646          * and cow to be 0.  Possibly we should eliminate these as input
3647          * parameters, and just pass these values here in the insert call.
3648          */
3649         if (orient == MAP_STACK_GROWS_DOWN) {
3650                 bot = addrbos + max_ssize - init_ssize;
3651                 top = bot + init_ssize;
3652                 gap_bot = addrbos;
3653                 gap_top = bot;
3654         } else /* if (orient == MAP_STACK_GROWS_UP) */ {
3655                 bot = addrbos;
3656                 top = bot + init_ssize;
3657                 gap_bot = top;
3658                 gap_top = addrbos + max_ssize;
3659         }
3660         rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3661         if (rv != KERN_SUCCESS)
3662                 return (rv);
3663         new_entry = prev_entry->next;
3664         KASSERT(new_entry->end == top || new_entry->start == bot,
3665             ("Bad entry start/end for new stack entry"));
3666         KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
3667             (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
3668             ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
3669         KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
3670             (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
3671             ("new entry lacks MAP_ENTRY_GROWS_UP"));
3672         rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
3673             VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
3674             MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP));
3675         if (rv != KERN_SUCCESS)
3676                 (void)vm_map_delete(map, bot, top);
3677         return (rv);
3678 }
3679
3680 /*
3681  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if we
3682  * successfully grow the stack.
3683  */
3684 static int
3685 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
3686 {
3687         vm_map_entry_t stack_entry;
3688         struct proc *p;
3689         struct vmspace *vm;
3690         struct ucred *cred;
3691         vm_offset_t gap_end, gap_start, grow_start;
3692         size_t grow_amount, guard, max_grow;
3693         rlim_t lmemlim, stacklim, vmemlim;
3694         int rv, rv1;
3695         bool gap_deleted, grow_down, is_procstack;
3696 #ifdef notyet
3697         uint64_t limit;
3698 #endif
3699 #ifdef RACCT
3700         int error;
3701 #endif
3702
3703         p = curproc;
3704         vm = p->p_vmspace;
3705
3706         /*
3707          * Disallow stack growth when the access is performed by a
3708          * debugger or AIO daemon.  The reason is that the wrong
3709          * resource limits are applied.
3710          */
3711         if (map != &p->p_vmspace->vm_map || p->p_textvp == NULL)
3712                 return (KERN_FAILURE);
3713
3714         MPASS(!map->system_map);
3715
3716         guard = stack_guard_page * PAGE_SIZE;
3717         lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
3718         stacklim = lim_cur(curthread, RLIMIT_STACK);
3719         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
3720 retry:
3721         /* If addr is not in a hole for a stack grow area, no need to grow. */
3722         if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
3723                 return (KERN_FAILURE);
3724         if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
3725                 return (KERN_SUCCESS);
3726         if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
3727                 stack_entry = gap_entry->next;
3728                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
3729                     stack_entry->start != gap_entry->end)
3730                         return (KERN_FAILURE);
3731                 grow_amount = round_page(stack_entry->start - addr);
3732                 grow_down = true;
3733         } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
3734                 stack_entry = gap_entry->prev;
3735                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
3736                     stack_entry->end != gap_entry->start)
3737                         return (KERN_FAILURE);
3738                 grow_amount = round_page(addr + 1 - stack_entry->end);
3739                 grow_down = false;
3740         } else {
3741                 return (KERN_FAILURE);
3742         }
3743         max_grow = gap_entry->end - gap_entry->start;
3744         if (guard > max_grow)
3745                 return (KERN_NO_SPACE);
3746         max_grow -= guard;
3747         if (grow_amount > max_grow)
3748                 return (KERN_NO_SPACE);
3749
3750         /*
3751          * If this is the main process stack, see if we're over the stack
3752          * limit.
3753          */
3754         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
3755             addr < (vm_offset_t)p->p_sysent->sv_usrstack;
3756         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
3757                 return (KERN_NO_SPACE);
3758
3759 #ifdef RACCT
3760         if (racct_enable) {
3761                 PROC_LOCK(p);
3762                 if (is_procstack && racct_set(p, RACCT_STACK,
3763                     ctob(vm->vm_ssize) + grow_amount)) {
3764                         PROC_UNLOCK(p);
3765                         return (KERN_NO_SPACE);
3766                 }
3767                 PROC_UNLOCK(p);
3768         }
3769 #endif
3770
3771         grow_amount = roundup(grow_amount, sgrowsiz);
3772         if (grow_amount > max_grow)
3773                 grow_amount = max_grow;
3774         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3775                 grow_amount = trunc_page((vm_size_t)stacklim) -
3776                     ctob(vm->vm_ssize);
3777         }
3778
3779 #ifdef notyet
3780         PROC_LOCK(p);
3781         limit = racct_get_available(p, RACCT_STACK);
3782         PROC_UNLOCK(p);
3783         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3784                 grow_amount = limit - ctob(vm->vm_ssize);
3785 #endif
3786
3787         if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
3788                 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3789                         rv = KERN_NO_SPACE;
3790                         goto out;
3791                 }
3792 #ifdef RACCT
3793                 if (racct_enable) {
3794                         PROC_LOCK(p);
3795                         if (racct_set(p, RACCT_MEMLOCK,
3796                             ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3797                                 PROC_UNLOCK(p);
3798                                 rv = KERN_NO_SPACE;
3799                                 goto out;
3800                         }
3801                         PROC_UNLOCK(p);
3802                 }
3803 #endif
3804         }
3805
3806         /* If we would blow our VMEM resource limit, no go */
3807         if (map->size + grow_amount > vmemlim) {
3808                 rv = KERN_NO_SPACE;
3809                 goto out;
3810         }
3811 #ifdef RACCT
3812         if (racct_enable) {
3813                 PROC_LOCK(p);
3814                 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3815                         PROC_UNLOCK(p);
3816                         rv = KERN_NO_SPACE;
3817                         goto out;
3818                 }
3819                 PROC_UNLOCK(p);
3820         }
3821 #endif
3822
3823         if (vm_map_lock_upgrade(map)) {
3824                 gap_entry = NULL;
3825                 vm_map_lock_read(map);
3826                 goto retry;
3827         }
3828
3829         if (grow_down) {
3830                 grow_start = gap_entry->end - grow_amount;
3831                 if (gap_entry->start + grow_amount == gap_entry->end) {
3832                         gap_start = gap_entry->start;
3833                         gap_end = gap_entry->end;
3834                         vm_map_entry_delete(map, gap_entry);
3835                         gap_deleted = true;
3836                 } else {
3837                         MPASS(gap_entry->start < gap_entry->end - grow_amount);
3838                         gap_entry->end -= grow_amount;
3839                         vm_map_entry_resize_free(map, gap_entry);
3840                         gap_deleted = false;
3841                 }
3842                 rv = vm_map_insert(map, NULL, 0, grow_start,
3843                     grow_start + grow_amount,
3844                     stack_entry->protection, stack_entry->max_protection,
3845                     MAP_STACK_GROWS_DOWN);
3846                 if (rv != KERN_SUCCESS) {
3847                         if (gap_deleted) {
3848                                 rv1 = vm_map_insert(map, NULL, 0, gap_start,
3849                                     gap_end, VM_PROT_NONE, VM_PROT_NONE,
3850                                     MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
3851                                 MPASS(rv1 == KERN_SUCCESS);
3852                         } else {
3853                                 gap_entry->end += grow_amount;
3854                                 vm_map_entry_resize_free(map, gap_entry);
3855                         }
3856                 }
3857         } else {
3858                 grow_start = stack_entry->end;
3859                 cred = stack_entry->cred;
3860                 if (cred == NULL && stack_entry->object.vm_object != NULL)
3861                         cred = stack_entry->object.vm_object->cred;
3862                 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3863                         rv = KERN_NO_SPACE;
3864                 /* Grow the underlying object if applicable. */
3865                 else if (stack_entry->object.vm_object == NULL ||
3866                     vm_object_coalesce(stack_entry->object.vm_object,
3867                     stack_entry->offset,
3868                     (vm_size_t)(stack_entry->end - stack_entry->start),
3869                     (vm_size_t)grow_amount, cred != NULL)) {
3870                         if (gap_entry->start + grow_amount == gap_entry->end)
3871                                 vm_map_entry_delete(map, gap_entry);
3872                         else
3873                                 gap_entry->start += grow_amount;
3874                         stack_entry->end += grow_amount;
3875                         map->size += grow_amount;
3876                         vm_map_entry_resize_free(map, stack_entry);
3877                         rv = KERN_SUCCESS;
3878                 } else
3879                         rv = KERN_FAILURE;
3880         }
3881         if (rv == KERN_SUCCESS && is_procstack)
3882                 vm->vm_ssize += btoc(grow_amount);
3883
3884         /*
3885          * Heed the MAP_WIREFUTURE flag if it was set for this process.
3886          */
3887         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
3888                 vm_map_unlock(map);
3889                 vm_map_wire(map, grow_start, grow_start + grow_amount,
3890                     VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
3891                 vm_map_lock_read(map);
3892         } else
3893                 vm_map_lock_downgrade(map);
3894
3895 out:
3896 #ifdef RACCT
3897         if (racct_enable && rv != KERN_SUCCESS) {
3898                 PROC_LOCK(p);
3899                 error = racct_set(p, RACCT_VMEM, map->size);
3900                 KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
3901                 if (!old_mlock) {
3902                         error = racct_set(p, RACCT_MEMLOCK,
3903                             ptoa(pmap_wired_count(map->pmap)));
3904                         KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
3905                 }
3906                 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
3907                 KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
3908                 PROC_UNLOCK(p);
3909         }
3910 #endif
3911
3912         return (rv);
3913 }
3914
3915 /*
3916  * Unshare the specified VM space for exec.  If other processes are
3917  * mapped to it, then create a new one.  The new vmspace is null.
3918  */
3919 int
3920 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3921 {
3922         struct vmspace *oldvmspace = p->p_vmspace;
3923         struct vmspace *newvmspace;
3924
3925         KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
3926             ("vmspace_exec recursed"));
3927         newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit);
3928         if (newvmspace == NULL)
3929                 return (ENOMEM);
3930         newvmspace->vm_swrss = oldvmspace->vm_swrss;
3931         /*
3932          * This code is written like this for prototype purposes.  The
3933          * goal is to avoid running down the vmspace here, but let the
3934          * other process's that are still using the vmspace to finally
3935          * run it down.  Even though there is little or no chance of blocking
3936          * here, it is a good idea to keep this form for future mods.
3937          */
3938         PROC_VMSPACE_LOCK(p);
3939         p->p_vmspace = newvmspace;
3940         PROC_VMSPACE_UNLOCK(p);
3941         if (p == curthread->td_proc)
3942                 pmap_activate(curthread);
3943         curthread->td_pflags |= TDP_EXECVMSPC;
3944         return (0);
3945 }
3946
3947 /*
3948  * Unshare the specified VM space for forcing COW.  This
3949  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3950  */
3951 int
3952 vmspace_unshare(struct proc *p)
3953 {
3954         struct vmspace *oldvmspace = p->p_vmspace;
3955         struct vmspace *newvmspace;
3956         vm_ooffset_t fork_charge;
3957
3958         if (oldvmspace->vm_refcnt == 1)
3959                 return (0);
3960         fork_charge = 0;
3961         newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3962         if (newvmspace == NULL)
3963                 return (ENOMEM);
3964         if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
3965                 vmspace_free(newvmspace);
3966                 return (ENOMEM);
3967         }
3968         PROC_VMSPACE_LOCK(p);
3969         p->p_vmspace = newvmspace;
3970         PROC_VMSPACE_UNLOCK(p);
3971         if (p == curthread->td_proc)
3972                 pmap_activate(curthread);
3973         vmspace_free(oldvmspace);
3974         return (0);
3975 }
3976
3977 /*
3978  *      vm_map_lookup:
3979  *
3980  *      Finds the VM object, offset, and
3981  *      protection for a given virtual address in the
3982  *      specified map, assuming a page fault of the
3983  *      type specified.
3984  *
3985  *      Leaves the map in question locked for read; return
3986  *      values are guaranteed until a vm_map_lookup_done
3987  *      call is performed.  Note that the map argument
3988  *      is in/out; the returned map must be used in
3989  *      the call to vm_map_lookup_done.
3990  *
3991  *      A handle (out_entry) is returned for use in
3992  *      vm_map_lookup_done, to make that fast.
3993  *
3994  *      If a lookup is requested with "write protection"
3995  *      specified, the map may be changed to perform virtual
3996  *      copying operations, although the data referenced will
3997  *      remain the same.
3998  */
3999 int
4000 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
4001               vm_offset_t vaddr,
4002               vm_prot_t fault_typea,
4003               vm_map_entry_t *out_entry,        /* OUT */
4004               vm_object_t *object,              /* OUT */
4005               vm_pindex_t *pindex,              /* OUT */
4006               vm_prot_t *out_prot,              /* OUT */
4007               boolean_t *wired)                 /* OUT */
4008 {
4009         vm_map_entry_t entry;
4010         vm_map_t map = *var_map;
4011         vm_prot_t prot;
4012         vm_prot_t fault_type = fault_typea;
4013         vm_object_t eobject;
4014         vm_size_t size;
4015         struct ucred *cred;
4016
4017 RetryLookup:
4018
4019         vm_map_lock_read(map);
4020
4021 RetryLookupLocked:
4022         /*
4023          * Lookup the faulting address.
4024          */
4025         if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
4026                 vm_map_unlock_read(map);
4027                 return (KERN_INVALID_ADDRESS);
4028         }
4029
4030         entry = *out_entry;
4031
4032         /*
4033          * Handle submaps.
4034          */
4035         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4036                 vm_map_t old_map = map;
4037
4038                 *var_map = map = entry->object.sub_map;
4039                 vm_map_unlock_read(old_map);
4040                 goto RetryLookup;
4041         }
4042
4043         /*
4044          * Check whether this task is allowed to have this page.
4045          */
4046         prot = entry->protection;
4047         if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) {
4048                 fault_typea &= ~VM_PROT_FAULT_LOOKUP;
4049                 if (prot == VM_PROT_NONE && map != kernel_map &&
4050                     (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4051                     (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
4052                     MAP_ENTRY_STACK_GAP_UP)) != 0 &&
4053                     vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
4054                         goto RetryLookupLocked;
4055         }
4056         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4057         if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
4058                 vm_map_unlock_read(map);
4059                 return (KERN_PROTECTION_FAILURE);
4060         }
4061         KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
4062             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
4063             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
4064             ("entry %p flags %x", entry, entry->eflags));
4065         if ((fault_typea & VM_PROT_COPY) != 0 &&
4066             (entry->max_protection & VM_PROT_WRITE) == 0 &&
4067             (entry->eflags & MAP_ENTRY_COW) == 0) {
4068                 vm_map_unlock_read(map);
4069                 return (KERN_PROTECTION_FAILURE);
4070         }
4071
4072         /*
4073          * If this page is not pageable, we have to get it for all possible
4074          * accesses.
4075          */
4076         *wired = (entry->wired_count != 0);
4077         if (*wired)
4078                 fault_type = entry->protection;
4079         size = entry->end - entry->start;
4080         /*
4081          * If the entry was copy-on-write, we either ...
4082          */
4083         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4084                 /*
4085                  * If we want to write the page, we may as well handle that
4086                  * now since we've got the map locked.
4087                  *
4088                  * If we don't need to write the page, we just demote the
4089                  * permissions allowed.
4090                  */
4091                 if ((fault_type & VM_PROT_WRITE) != 0 ||
4092                     (fault_typea & VM_PROT_COPY) != 0) {
4093                         /*
4094                          * Make a new object, and place it in the object
4095                          * chain.  Note that no new references have appeared
4096                          * -- one just moved from the map to the new
4097                          * object.
4098                          */
4099                         if (vm_map_lock_upgrade(map))
4100                                 goto RetryLookup;
4101
4102                         if (entry->cred == NULL) {
4103                                 /*
4104                                  * The debugger owner is charged for
4105                                  * the memory.
4106                                  */
4107                                 cred = curthread->td_ucred;
4108                                 crhold(cred);
4109                                 if (!swap_reserve_by_cred(size, cred)) {
4110                                         crfree(cred);
4111                                         vm_map_unlock(map);
4112                                         return (KERN_RESOURCE_SHORTAGE);
4113                                 }
4114                                 entry->cred = cred;
4115                         }
4116                         vm_object_shadow(&entry->object.vm_object,
4117                             &entry->offset, size);
4118                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4119                         eobject = entry->object.vm_object;
4120                         if (eobject->cred != NULL) {
4121                                 /*
4122                                  * The object was not shadowed.
4123                                  */
4124                                 swap_release_by_cred(size, entry->cred);
4125                                 crfree(entry->cred);
4126                                 entry->cred = NULL;
4127                         } else if (entry->cred != NULL) {
4128                                 VM_OBJECT_WLOCK(eobject);
4129                                 eobject->cred = entry->cred;
4130                                 eobject->charge = size;
4131                                 VM_OBJECT_WUNLOCK(eobject);
4132                                 entry->cred = NULL;
4133                         }
4134
4135                         vm_map_lock_downgrade(map);
4136                 } else {
4137                         /*
4138                          * We're attempting to read a copy-on-write page --
4139                          * don't allow writes.
4140                          */
4141                         prot &= ~VM_PROT_WRITE;
4142                 }
4143         }
4144
4145         /*
4146          * Create an object if necessary.
4147          */
4148         if (entry->object.vm_object == NULL &&
4149             !map->system_map) {
4150                 if (vm_map_lock_upgrade(map))
4151                         goto RetryLookup;
4152                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
4153                     atop(size));
4154                 entry->offset = 0;
4155                 if (entry->cred != NULL) {
4156                         VM_OBJECT_WLOCK(entry->object.vm_object);
4157                         entry->object.vm_object->cred = entry->cred;
4158                         entry->object.vm_object->charge = size;
4159                         VM_OBJECT_WUNLOCK(entry->object.vm_object);
4160                         entry->cred = NULL;
4161                 }
4162                 vm_map_lock_downgrade(map);
4163         }
4164
4165         /*
4166          * Return the object/offset from this entry.  If the entry was
4167          * copy-on-write or empty, it has been fixed up.
4168          */
4169         *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset);
4170         *object = entry->object.vm_object;
4171
4172         *out_prot = prot;
4173         return (KERN_SUCCESS);
4174 }
4175
4176 /*
4177  *      vm_map_lookup_locked:
4178  *
4179  *      Lookup the faulting address.  A version of vm_map_lookup that returns 
4180  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4181  */
4182 int
4183 vm_map_lookup_locked(vm_map_t *var_map,         /* IN/OUT */
4184                      vm_offset_t vaddr,
4185                      vm_prot_t fault_typea,
4186                      vm_map_entry_t *out_entry, /* OUT */
4187                      vm_object_t *object,       /* OUT */
4188                      vm_pindex_t *pindex,       /* OUT */
4189                      vm_prot_t *out_prot,       /* OUT */
4190                      boolean_t *wired)          /* OUT */
4191 {
4192         vm_map_entry_t entry;
4193         vm_map_t map = *var_map;
4194         vm_prot_t prot;
4195         vm_prot_t fault_type = fault_typea;
4196
4197         /*
4198          * Lookup the faulting address.
4199          */
4200         if (!vm_map_lookup_entry(map, vaddr, out_entry))
4201                 return (KERN_INVALID_ADDRESS);
4202
4203         entry = *out_entry;
4204
4205         /*
4206          * Fail if the entry refers to a submap.
4207          */
4208         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4209                 return (KERN_FAILURE);
4210
4211         /*
4212          * Check whether this task is allowed to have this page.
4213          */
4214         prot = entry->protection;
4215         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4216         if ((fault_type & prot) != fault_type)
4217                 return (KERN_PROTECTION_FAILURE);
4218
4219         /*
4220          * If this page is not pageable, we have to get it for all possible
4221          * accesses.
4222          */
4223         *wired = (entry->wired_count != 0);
4224         if (*wired)
4225                 fault_type = entry->protection;
4226
4227         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4228                 /*
4229                  * Fail if the entry was copy-on-write for a write fault.
4230                  */
4231                 if (fault_type & VM_PROT_WRITE)
4232                         return (KERN_FAILURE);
4233                 /*
4234                  * We're attempting to read a copy-on-write page --
4235                  * don't allow writes.
4236                  */
4237                 prot &= ~VM_PROT_WRITE;
4238         }
4239
4240         /*
4241          * Fail if an object should be created.
4242          */
4243         if (entry->object.vm_object == NULL && !map->system_map)
4244                 return (KERN_FAILURE);
4245
4246         /*
4247          * Return the object/offset from this entry.  If the entry was
4248          * copy-on-write or empty, it has been fixed up.
4249          */
4250         *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset);
4251         *object = entry->object.vm_object;
4252
4253         *out_prot = prot;
4254         return (KERN_SUCCESS);
4255 }
4256
4257 /*
4258  *      vm_map_lookup_done:
4259  *
4260  *      Releases locks acquired by a vm_map_lookup
4261  *      (according to the handle returned by that lookup).
4262  */
4263 void
4264 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4265 {
4266         /*
4267          * Unlock the main-level map
4268          */
4269         vm_map_unlock_read(map);
4270 }
4271
4272 vm_offset_t
4273 vm_map_max_KBI(const struct vm_map *map)
4274 {
4275
4276         return (vm_map_max(map));
4277 }
4278
4279 vm_offset_t
4280 vm_map_min_KBI(const struct vm_map *map)
4281 {
4282
4283         return (vm_map_min(map));
4284 }
4285
4286 pmap_t
4287 vm_map_pmap_KBI(vm_map_t map)
4288 {
4289
4290         return (map->pmap);
4291 }
4292
4293 #include "opt_ddb.h"
4294 #ifdef DDB
4295 #include <sys/kernel.h>
4296
4297 #include <ddb/ddb.h>
4298
4299 static void
4300 vm_map_print(vm_map_t map)
4301 {
4302         vm_map_entry_t entry;
4303
4304         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4305             (void *)map,
4306             (void *)map->pmap, map->nentries, map->timestamp);
4307
4308         db_indent += 2;
4309         for (entry = map->header.next; entry != &map->header;
4310             entry = entry->next) {
4311                 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
4312                     (void *)entry, (void *)entry->start, (void *)entry->end,
4313                     entry->eflags);
4314                 {
4315                         static char *inheritance_name[4] =
4316                         {"share", "copy", "none", "donate_copy"};
4317
4318                         db_iprintf(" prot=%x/%x/%s",
4319                             entry->protection,
4320                             entry->max_protection,
4321                             inheritance_name[(int)(unsigned char)entry->inheritance]);
4322                         if (entry->wired_count != 0)
4323                                 db_printf(", wired");
4324                 }
4325                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4326                         db_printf(", share=%p, offset=0x%jx\n",
4327                             (void *)entry->object.sub_map,
4328                             (uintmax_t)entry->offset);
4329                         if ((entry->prev == &map->header) ||
4330                             (entry->prev->object.sub_map !=
4331                                 entry->object.sub_map)) {
4332                                 db_indent += 2;
4333                                 vm_map_print((vm_map_t)entry->object.sub_map);
4334                                 db_indent -= 2;
4335                         }
4336                 } else {
4337                         if (entry->cred != NULL)
4338                                 db_printf(", ruid %d", entry->cred->cr_ruid);
4339                         db_printf(", object=%p, offset=0x%jx",
4340                             (void *)entry->object.vm_object,
4341                             (uintmax_t)entry->offset);
4342                         if (entry->object.vm_object && entry->object.vm_object->cred)
4343                                 db_printf(", obj ruid %d charge %jx",
4344                                     entry->object.vm_object->cred->cr_ruid,
4345                                     (uintmax_t)entry->object.vm_object->charge);
4346                         if (entry->eflags & MAP_ENTRY_COW)
4347                                 db_printf(", copy (%s)",
4348                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4349                         db_printf("\n");
4350
4351                         if ((entry->prev == &map->header) ||
4352                             (entry->prev->object.vm_object !=
4353                                 entry->object.vm_object)) {
4354                                 db_indent += 2;
4355                                 vm_object_print((db_expr_t)(intptr_t)
4356                                                 entry->object.vm_object,
4357                                                 0, 0, (char *)0);
4358                                 db_indent -= 2;
4359                         }
4360                 }
4361         }
4362         db_indent -= 2;
4363 }
4364
4365 DB_SHOW_COMMAND(map, map)
4366 {
4367
4368         if (!have_addr) {
4369                 db_printf("usage: show map <addr>\n");
4370                 return;
4371         }
4372         vm_map_print((vm_map_t)addr);
4373 }
4374
4375 DB_SHOW_COMMAND(procvm, procvm)
4376 {
4377         struct proc *p;
4378
4379         if (have_addr) {
4380                 p = db_lookup_proc(addr);
4381         } else {
4382                 p = curproc;
4383         }
4384
4385         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4386             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4387             (void *)vmspace_pmap(p->p_vmspace));
4388
4389         vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4390 }
4391
4392 #endif /* DDB */