]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_map.c
MFV of tzdata2011e:
[FreeBSD/FreeBSD.git] / sys / vm / vm_map.c
1 /*-
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60
61 /*
62  *      Virtual memory mapping module.
63  */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/lock.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/resourcevar.h>
79 #include <sys/file.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysent.h>
82 #include <sys/shm.h>
83
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/swap_pager.h>
94 #include <vm/uma.h>
95
96 /*
97  *      Virtual memory maps provide for the mapping, protection,
98  *      and sharing of virtual memory objects.  In addition,
99  *      this module provides for an efficient virtual copy of
100  *      memory from one map to another.
101  *
102  *      Synchronization is required prior to most operations.
103  *
104  *      Maps consist of an ordered doubly-linked list of simple
105  *      entries; a self-adjusting binary search tree of these
106  *      entries is used to speed up lookups.
107  *
108  *      Since portions of maps are specified by start/end addresses,
109  *      which may not align with existing map entries, all
110  *      routines merely "clip" entries to these start/end values.
111  *      [That is, an entry is split into two, bordering at a
112  *      start or end value.]  Note that these clippings may not
113  *      always be necessary (as the two resulting entries are then
114  *      not changed); however, the clipping is done for convenience.
115  *
116  *      As mentioned above, virtual copy operations are performed
117  *      by copying VM object references from one map to
118  *      another, and then marking both regions as copy-on-write.
119  */
120
121 static struct mtx map_sleep_mtx;
122 static uma_zone_t mapentzone;
123 static uma_zone_t kmapentzone;
124 static uma_zone_t mapzone;
125 static uma_zone_t vmspace_zone;
126 static struct vm_object kmapentobj;
127 static int vmspace_zinit(void *mem, int size, int flags);
128 static void vmspace_zfini(void *mem, int size);
129 static int vm_map_zinit(void *mem, int ize, int flags);
130 static void vm_map_zfini(void *mem, int size);
131 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
132     vm_offset_t max);
133 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
134 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
135 #ifdef INVARIANTS
136 static void vm_map_zdtor(void *mem, int size, void *arg);
137 static void vmspace_zdtor(void *mem, int size, void *arg);
138 #endif
139
140 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
141     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
142      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
143
144 /* 
145  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
146  * stable.
147  */
148 #define PROC_VMSPACE_LOCK(p) do { } while (0)
149 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
150
151 /*
152  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
153  *
154  *      Asserts that the starting and ending region
155  *      addresses fall within the valid range of the map.
156  */
157 #define VM_MAP_RANGE_CHECK(map, start, end)             \
158                 {                                       \
159                 if (start < vm_map_min(map))            \
160                         start = vm_map_min(map);        \
161                 if (end > vm_map_max(map))              \
162                         end = vm_map_max(map);          \
163                 if (start > end)                        \
164                         start = end;                    \
165                 }
166
167 /*
168  *      vm_map_startup:
169  *
170  *      Initialize the vm_map module.  Must be called before
171  *      any other vm_map routines.
172  *
173  *      Map and entry structures are allocated from the general
174  *      purpose memory pool with some exceptions:
175  *
176  *      - The kernel map and kmem submap are allocated statically.
177  *      - Kernel map entries are allocated out of a static pool.
178  *
179  *      These restrictions are necessary since malloc() uses the
180  *      maps and requires map entries.
181  */
182
183 void
184 vm_map_startup(void)
185 {
186         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
187         mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
188 #ifdef INVARIANTS
189             vm_map_zdtor,
190 #else
191             NULL,
192 #endif
193             vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
194         uma_prealloc(mapzone, MAX_KMAP);
195         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
196             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
197             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
198         uma_prealloc(kmapentzone, MAX_KMAPENT);
199         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
200             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
201 }
202
203 static void
204 vmspace_zfini(void *mem, int size)
205 {
206         struct vmspace *vm;
207
208         vm = (struct vmspace *)mem;
209         vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
210 }
211
212 static int
213 vmspace_zinit(void *mem, int size, int flags)
214 {
215         struct vmspace *vm;
216
217         vm = (struct vmspace *)mem;
218
219         vm->vm_map.pmap = NULL;
220         (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
221         return (0);
222 }
223
224 static void
225 vm_map_zfini(void *mem, int size)
226 {
227         vm_map_t map;
228
229         map = (vm_map_t)mem;
230         mtx_destroy(&map->system_mtx);
231         sx_destroy(&map->lock);
232 }
233
234 static int
235 vm_map_zinit(void *mem, int size, int flags)
236 {
237         vm_map_t map;
238
239         map = (vm_map_t)mem;
240         map->nentries = 0;
241         map->size = 0;
242         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
243         sx_init(&map->lock, "user map");
244         return (0);
245 }
246
247 #ifdef INVARIANTS
248 static void
249 vmspace_zdtor(void *mem, int size, void *arg)
250 {
251         struct vmspace *vm;
252
253         vm = (struct vmspace *)mem;
254
255         vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
256 }
257 static void
258 vm_map_zdtor(void *mem, int size, void *arg)
259 {
260         vm_map_t map;
261
262         map = (vm_map_t)mem;
263         KASSERT(map->nentries == 0,
264             ("map %p nentries == %d on free.",
265             map, map->nentries));
266         KASSERT(map->size == 0,
267             ("map %p size == %lu on free.",
268             map, (unsigned long)map->size));
269 }
270 #endif  /* INVARIANTS */
271
272 /*
273  * Allocate a vmspace structure, including a vm_map and pmap,
274  * and initialize those structures.  The refcnt is set to 1.
275  */
276 struct vmspace *
277 vmspace_alloc(min, max)
278         vm_offset_t min, max;
279 {
280         struct vmspace *vm;
281
282         vm = uma_zalloc(vmspace_zone, M_WAITOK);
283         if (vm->vm_map.pmap == NULL && !pmap_pinit(vmspace_pmap(vm))) {
284                 uma_zfree(vmspace_zone, vm);
285                 return (NULL);
286         }
287         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
288         _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
289         vm->vm_refcnt = 1;
290         vm->vm_shm = NULL;
291         vm->vm_swrss = 0;
292         vm->vm_tsize = 0;
293         vm->vm_dsize = 0;
294         vm->vm_ssize = 0;
295         vm->vm_taddr = 0;
296         vm->vm_daddr = 0;
297         vm->vm_maxsaddr = 0;
298         return (vm);
299 }
300
301 void
302 vm_init2(void)
303 {
304         uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
305             (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 +
306              maxproc * 2 + maxfiles);
307         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
308 #ifdef INVARIANTS
309             vmspace_zdtor,
310 #else
311             NULL,
312 #endif
313             vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
314 }
315
316 static inline void
317 vmspace_dofree(struct vmspace *vm)
318 {
319
320         CTR1(KTR_VM, "vmspace_free: %p", vm);
321
322         /*
323          * Make sure any SysV shm is freed, it might not have been in
324          * exit1().
325          */
326         shmexit(vm);
327
328         /*
329          * Lock the map, to wait out all other references to it.
330          * Delete all of the mappings and pages they hold, then call
331          * the pmap module to reclaim anything left.
332          */
333         (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
334             vm->vm_map.max_offset);
335
336         pmap_release(vmspace_pmap(vm));
337         vm->vm_map.pmap = NULL;
338         uma_zfree(vmspace_zone, vm);
339 }
340
341 void
342 vmspace_free(struct vmspace *vm)
343 {
344
345         if (vm->vm_refcnt == 0)
346                 panic("vmspace_free: attempt to free already freed vmspace");
347
348         if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
349                 vmspace_dofree(vm);
350 }
351
352 void
353 vmspace_exitfree(struct proc *p)
354 {
355         struct vmspace *vm;
356
357         PROC_VMSPACE_LOCK(p);
358         vm = p->p_vmspace;
359         p->p_vmspace = NULL;
360         PROC_VMSPACE_UNLOCK(p);
361         KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
362         vmspace_free(vm);
363 }
364
365 void
366 vmspace_exit(struct thread *td)
367 {
368         int refcnt;
369         struct vmspace *vm;
370         struct proc *p;
371
372         /*
373          * Release user portion of address space.
374          * This releases references to vnodes,
375          * which could cause I/O if the file has been unlinked.
376          * Need to do this early enough that we can still sleep.
377          *
378          * The last exiting process to reach this point releases as
379          * much of the environment as it can. vmspace_dofree() is the
380          * slower fallback in case another process had a temporary
381          * reference to the vmspace.
382          */
383
384         p = td->td_proc;
385         vm = p->p_vmspace;
386         atomic_add_int(&vmspace0.vm_refcnt, 1);
387         do {
388                 refcnt = vm->vm_refcnt;
389                 if (refcnt > 1 && p->p_vmspace != &vmspace0) {
390                         /* Switch now since other proc might free vmspace */
391                         PROC_VMSPACE_LOCK(p);
392                         p->p_vmspace = &vmspace0;
393                         PROC_VMSPACE_UNLOCK(p);
394                         pmap_activate(td);
395                 }
396         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
397         if (refcnt == 1) {
398                 if (p->p_vmspace != vm) {
399                         /* vmspace not yet freed, switch back */
400                         PROC_VMSPACE_LOCK(p);
401                         p->p_vmspace = vm;
402                         PROC_VMSPACE_UNLOCK(p);
403                         pmap_activate(td);
404                 }
405                 pmap_remove_pages(vmspace_pmap(vm));
406                 /* Switch now since this proc will free vmspace */
407                 PROC_VMSPACE_LOCK(p);
408                 p->p_vmspace = &vmspace0;
409                 PROC_VMSPACE_UNLOCK(p);
410                 pmap_activate(td);
411                 vmspace_dofree(vm);
412         }
413 }
414
415 /* Acquire reference to vmspace owned by another process. */
416
417 struct vmspace *
418 vmspace_acquire_ref(struct proc *p)
419 {
420         struct vmspace *vm;
421         int refcnt;
422
423         PROC_VMSPACE_LOCK(p);
424         vm = p->p_vmspace;
425         if (vm == NULL) {
426                 PROC_VMSPACE_UNLOCK(p);
427                 return (NULL);
428         }
429         do {
430                 refcnt = vm->vm_refcnt;
431                 if (refcnt <= 0) {      /* Avoid 0->1 transition */
432                         PROC_VMSPACE_UNLOCK(p);
433                         return (NULL);
434                 }
435         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
436         if (vm != p->p_vmspace) {
437                 PROC_VMSPACE_UNLOCK(p);
438                 vmspace_free(vm);
439                 return (NULL);
440         }
441         PROC_VMSPACE_UNLOCK(p);
442         return (vm);
443 }
444
445 void
446 _vm_map_lock(vm_map_t map, const char *file, int line)
447 {
448
449         if (map->system_map)
450                 _mtx_lock_flags(&map->system_mtx, 0, file, line);
451         else
452                 (void)_sx_xlock(&map->lock, 0, file, line);
453         map->timestamp++;
454 }
455
456 static void
457 vm_map_process_deferred(void)
458 {
459         struct thread *td;
460         vm_map_entry_t entry;
461
462         td = curthread;
463
464         while ((entry = td->td_map_def_user) != NULL) {
465                 td->td_map_def_user = entry->next;
466                 vm_map_entry_deallocate(entry, FALSE);
467         }
468 }
469
470 void
471 _vm_map_unlock(vm_map_t map, const char *file, int line)
472 {
473
474         if (map->system_map)
475                 _mtx_unlock_flags(&map->system_mtx, 0, file, line);
476         else {
477                 _sx_xunlock(&map->lock, file, line);
478                 vm_map_process_deferred();
479         }
480 }
481
482 void
483 _vm_map_lock_read(vm_map_t map, const char *file, int line)
484 {
485
486         if (map->system_map)
487                 _mtx_lock_flags(&map->system_mtx, 0, file, line);
488         else
489                 (void)_sx_slock(&map->lock, 0, file, line);
490 }
491
492 void
493 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
494 {
495
496         if (map->system_map)
497                 _mtx_unlock_flags(&map->system_mtx, 0, file, line);
498         else {
499                 _sx_sunlock(&map->lock, file, line);
500                 vm_map_process_deferred();
501         }
502 }
503
504 int
505 _vm_map_trylock(vm_map_t map, const char *file, int line)
506 {
507         int error;
508
509         error = map->system_map ?
510             !_mtx_trylock(&map->system_mtx, 0, file, line) :
511             !_sx_try_xlock(&map->lock, file, line);
512         if (error == 0)
513                 map->timestamp++;
514         return (error == 0);
515 }
516
517 int
518 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
519 {
520         int error;
521
522         error = map->system_map ?
523             !_mtx_trylock(&map->system_mtx, 0, file, line) :
524             !_sx_try_slock(&map->lock, file, line);
525         return (error == 0);
526 }
527
528 /*
529  *      _vm_map_lock_upgrade:   [ internal use only ]
530  *
531  *      Tries to upgrade a read (shared) lock on the specified map to a write
532  *      (exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
533  *      non-zero value if the upgrade fails.  If the upgrade fails, the map is
534  *      returned without a read or write lock held.
535  *
536  *      Requires that the map be read locked.
537  */
538 int
539 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
540 {
541         unsigned int last_timestamp;
542
543         if (map->system_map) {
544 #ifdef INVARIANTS
545                 _mtx_assert(&map->system_mtx, MA_OWNED, file, line);
546 #endif
547         } else {
548                 if (!_sx_try_upgrade(&map->lock, file, line)) {
549                         last_timestamp = map->timestamp;
550                         _sx_sunlock(&map->lock, file, line);
551                         vm_map_process_deferred();
552                         /*
553                          * If the map's timestamp does not change while the
554                          * map is unlocked, then the upgrade succeeds.
555                          */
556                         (void)_sx_xlock(&map->lock, 0, file, line);
557                         if (last_timestamp != map->timestamp) {
558                                 _sx_xunlock(&map->lock, file, line);
559                                 return (1);
560                         }
561                 }
562         }
563         map->timestamp++;
564         return (0);
565 }
566
567 void
568 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
569 {
570
571         if (map->system_map) {
572 #ifdef INVARIANTS
573                 _mtx_assert(&map->system_mtx, MA_OWNED, file, line);
574 #endif
575         } else
576                 _sx_downgrade(&map->lock, file, line);
577 }
578
579 /*
580  *      vm_map_locked:
581  *
582  *      Returns a non-zero value if the caller holds a write (exclusive) lock
583  *      on the specified map and the value "0" otherwise.
584  */
585 int
586 vm_map_locked(vm_map_t map)
587 {
588
589         if (map->system_map)
590                 return (mtx_owned(&map->system_mtx));
591         else
592                 return (sx_xlocked(&map->lock));
593 }
594
595 #ifdef INVARIANTS
596 static void
597 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
598 {
599
600         if (map->system_map)
601                 _mtx_assert(&map->system_mtx, MA_OWNED, file, line);
602         else
603                 _sx_assert(&map->lock, SA_XLOCKED, file, line);
604 }
605
606 #if 0
607 static void
608 _vm_map_assert_locked_read(vm_map_t map, const char *file, int line)
609 {
610
611         if (map->system_map)
612                 _mtx_assert(&map->system_mtx, MA_OWNED, file, line);
613         else
614                 _sx_assert(&map->lock, SA_SLOCKED, file, line);
615 }
616 #endif
617
618 #define VM_MAP_ASSERT_LOCKED(map) \
619     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
620 #define VM_MAP_ASSERT_LOCKED_READ(map) \
621     _vm_map_assert_locked_read(map, LOCK_FILE, LOCK_LINE)
622 #else
623 #define VM_MAP_ASSERT_LOCKED(map)
624 #define VM_MAP_ASSERT_LOCKED_READ(map)
625 #endif
626
627 /*
628  *      _vm_map_unlock_and_wait:
629  *
630  *      Atomically releases the lock on the specified map and puts the calling
631  *      thread to sleep.  The calling thread will remain asleep until either
632  *      vm_map_wakeup() is performed on the map or the specified timeout is
633  *      exceeded.
634  *
635  *      WARNING!  This function does not perform deferred deallocations of
636  *      objects and map entries.  Therefore, the calling thread is expected to
637  *      reacquire the map lock after reawakening and later perform an ordinary
638  *      unlock operation, such as vm_map_unlock(), before completing its
639  *      operation on the map.
640  */
641 int
642 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
643 {
644
645         mtx_lock(&map_sleep_mtx);
646         if (map->system_map)
647                 _mtx_unlock_flags(&map->system_mtx, 0, file, line);
648         else
649                 _sx_xunlock(&map->lock, file, line);
650         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
651             timo));
652 }
653
654 /*
655  *      vm_map_wakeup:
656  *
657  *      Awaken any threads that have slept on the map using
658  *      vm_map_unlock_and_wait().
659  */
660 void
661 vm_map_wakeup(vm_map_t map)
662 {
663
664         /*
665          * Acquire and release map_sleep_mtx to prevent a wakeup()
666          * from being performed (and lost) between the map unlock
667          * and the msleep() in _vm_map_unlock_and_wait().
668          */
669         mtx_lock(&map_sleep_mtx);
670         mtx_unlock(&map_sleep_mtx);
671         wakeup(&map->root);
672 }
673
674 void
675 vm_map_busy(vm_map_t map)
676 {
677
678         VM_MAP_ASSERT_LOCKED(map);
679         map->busy++;
680 }
681
682 void
683 vm_map_unbusy(vm_map_t map)
684 {
685
686         VM_MAP_ASSERT_LOCKED(map);
687         KASSERT(map->busy, ("vm_map_unbusy: not busy"));
688         if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
689                 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
690                 wakeup(&map->busy);
691         }
692 }
693
694 void 
695 vm_map_wait_busy(vm_map_t map)
696 {
697
698         VM_MAP_ASSERT_LOCKED(map);
699         while (map->busy) {
700                 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
701                 if (map->system_map)
702                         msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
703                 else
704                         sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
705         }
706         map->timestamp++;
707 }
708
709 long
710 vmspace_resident_count(struct vmspace *vmspace)
711 {
712         return pmap_resident_count(vmspace_pmap(vmspace));
713 }
714
715 long
716 vmspace_wired_count(struct vmspace *vmspace)
717 {
718         return pmap_wired_count(vmspace_pmap(vmspace));
719 }
720
721 /*
722  *      vm_map_create:
723  *
724  *      Creates and returns a new empty VM map with
725  *      the given physical map structure, and having
726  *      the given lower and upper address bounds.
727  */
728 vm_map_t
729 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
730 {
731         vm_map_t result;
732
733         result = uma_zalloc(mapzone, M_WAITOK);
734         CTR1(KTR_VM, "vm_map_create: %p", result);
735         _vm_map_init(result, pmap, min, max);
736         return (result);
737 }
738
739 /*
740  * Initialize an existing vm_map structure
741  * such as that in the vmspace structure.
742  */
743 static void
744 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
745 {
746
747         map->header.next = map->header.prev = &map->header;
748         map->needs_wakeup = FALSE;
749         map->system_map = 0;
750         map->pmap = pmap;
751         map->min_offset = min;
752         map->max_offset = max;
753         map->flags = 0;
754         map->root = NULL;
755         map->timestamp = 0;
756         map->busy = 0;
757 }
758
759 void
760 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
761 {
762
763         _vm_map_init(map, pmap, min, max);
764         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
765         sx_init(&map->lock, "user map");
766 }
767
768 /*
769  *      vm_map_entry_dispose:   [ internal use only ]
770  *
771  *      Inverse of vm_map_entry_create.
772  */
773 static void
774 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
775 {
776         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
777 }
778
779 /*
780  *      vm_map_entry_create:    [ internal use only ]
781  *
782  *      Allocates a VM map entry for insertion.
783  *      No entry fields are filled in.
784  */
785 static vm_map_entry_t
786 vm_map_entry_create(vm_map_t map)
787 {
788         vm_map_entry_t new_entry;
789
790         if (map->system_map)
791                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
792         else
793                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
794         if (new_entry == NULL)
795                 panic("vm_map_entry_create: kernel resources exhausted");
796         return (new_entry);
797 }
798
799 /*
800  *      vm_map_entry_set_behavior:
801  *
802  *      Set the expected access behavior, either normal, random, or
803  *      sequential.
804  */
805 static inline void
806 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
807 {
808         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
809             (behavior & MAP_ENTRY_BEHAV_MASK);
810 }
811
812 /*
813  *      vm_map_entry_set_max_free:
814  *
815  *      Set the max_free field in a vm_map_entry.
816  */
817 static inline void
818 vm_map_entry_set_max_free(vm_map_entry_t entry)
819 {
820
821         entry->max_free = entry->adj_free;
822         if (entry->left != NULL && entry->left->max_free > entry->max_free)
823                 entry->max_free = entry->left->max_free;
824         if (entry->right != NULL && entry->right->max_free > entry->max_free)
825                 entry->max_free = entry->right->max_free;
826 }
827
828 /*
829  *      vm_map_entry_splay:
830  *
831  *      The Sleator and Tarjan top-down splay algorithm with the
832  *      following variation.  Max_free must be computed bottom-up, so
833  *      on the downward pass, maintain the left and right spines in
834  *      reverse order.  Then, make a second pass up each side to fix
835  *      the pointers and compute max_free.  The time bound is O(log n)
836  *      amortized.
837  *
838  *      The new root is the vm_map_entry containing "addr", or else an
839  *      adjacent entry (lower or higher) if addr is not in the tree.
840  *
841  *      The map must be locked, and leaves it so.
842  *
843  *      Returns: the new root.
844  */
845 static vm_map_entry_t
846 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
847 {
848         vm_map_entry_t llist, rlist;
849         vm_map_entry_t ltree, rtree;
850         vm_map_entry_t y;
851
852         /* Special case of empty tree. */
853         if (root == NULL)
854                 return (root);
855
856         /*
857          * Pass One: Splay down the tree until we find addr or a NULL
858          * pointer where addr would go.  llist and rlist are the two
859          * sides in reverse order (bottom-up), with llist linked by
860          * the right pointer and rlist linked by the left pointer in
861          * the vm_map_entry.  Wait until Pass Two to set max_free on
862          * the two spines.
863          */
864         llist = NULL;
865         rlist = NULL;
866         for (;;) {
867                 /* root is never NULL in here. */
868                 if (addr < root->start) {
869                         y = root->left;
870                         if (y == NULL)
871                                 break;
872                         if (addr < y->start && y->left != NULL) {
873                                 /* Rotate right and put y on rlist. */
874                                 root->left = y->right;
875                                 y->right = root;
876                                 vm_map_entry_set_max_free(root);
877                                 root = y->left;
878                                 y->left = rlist;
879                                 rlist = y;
880                         } else {
881                                 /* Put root on rlist. */
882                                 root->left = rlist;
883                                 rlist = root;
884                                 root = y;
885                         }
886                 } else if (addr >= root->end) {
887                         y = root->right;
888                         if (y == NULL)
889                                 break;
890                         if (addr >= y->end && y->right != NULL) {
891                                 /* Rotate left and put y on llist. */
892                                 root->right = y->left;
893                                 y->left = root;
894                                 vm_map_entry_set_max_free(root);
895                                 root = y->right;
896                                 y->right = llist;
897                                 llist = y;
898                         } else {
899                                 /* Put root on llist. */
900                                 root->right = llist;
901                                 llist = root;
902                                 root = y;
903                         }
904                 } else
905                         break;
906         }
907
908         /*
909          * Pass Two: Walk back up the two spines, flip the pointers
910          * and set max_free.  The subtrees of the root go at the
911          * bottom of llist and rlist.
912          */
913         ltree = root->left;
914         while (llist != NULL) {
915                 y = llist->right;
916                 llist->right = ltree;
917                 vm_map_entry_set_max_free(llist);
918                 ltree = llist;
919                 llist = y;
920         }
921         rtree = root->right;
922         while (rlist != NULL) {
923                 y = rlist->left;
924                 rlist->left = rtree;
925                 vm_map_entry_set_max_free(rlist);
926                 rtree = rlist;
927                 rlist = y;
928         }
929
930         /*
931          * Final assembly: add ltree and rtree as subtrees of root.
932          */
933         root->left = ltree;
934         root->right = rtree;
935         vm_map_entry_set_max_free(root);
936
937         return (root);
938 }
939
940 /*
941  *      vm_map_entry_{un,}link:
942  *
943  *      Insert/remove entries from maps.
944  */
945 static void
946 vm_map_entry_link(vm_map_t map,
947                   vm_map_entry_t after_where,
948                   vm_map_entry_t entry)
949 {
950
951         CTR4(KTR_VM,
952             "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
953             map->nentries, entry, after_where);
954         VM_MAP_ASSERT_LOCKED(map);
955         map->nentries++;
956         entry->prev = after_where;
957         entry->next = after_where->next;
958         entry->next->prev = entry;
959         after_where->next = entry;
960
961         if (after_where != &map->header) {
962                 if (after_where != map->root)
963                         vm_map_entry_splay(after_where->start, map->root);
964                 entry->right = after_where->right;
965                 entry->left = after_where;
966                 after_where->right = NULL;
967                 after_where->adj_free = entry->start - after_where->end;
968                 vm_map_entry_set_max_free(after_where);
969         } else {
970                 entry->right = map->root;
971                 entry->left = NULL;
972         }
973         entry->adj_free = (entry->next == &map->header ? map->max_offset :
974             entry->next->start) - entry->end;
975         vm_map_entry_set_max_free(entry);
976         map->root = entry;
977 }
978
979 static void
980 vm_map_entry_unlink(vm_map_t map,
981                     vm_map_entry_t entry)
982 {
983         vm_map_entry_t next, prev, root;
984
985         VM_MAP_ASSERT_LOCKED(map);
986         if (entry != map->root)
987                 vm_map_entry_splay(entry->start, map->root);
988         if (entry->left == NULL)
989                 root = entry->right;
990         else {
991                 root = vm_map_entry_splay(entry->start, entry->left);
992                 root->right = entry->right;
993                 root->adj_free = (entry->next == &map->header ? map->max_offset :
994                     entry->next->start) - root->end;
995                 vm_map_entry_set_max_free(root);
996         }
997         map->root = root;
998
999         prev = entry->prev;
1000         next = entry->next;
1001         next->prev = prev;
1002         prev->next = next;
1003         map->nentries--;
1004         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1005             map->nentries, entry);
1006 }
1007
1008 /*
1009  *      vm_map_entry_resize_free:
1010  *
1011  *      Recompute the amount of free space following a vm_map_entry
1012  *      and propagate that value up the tree.  Call this function after
1013  *      resizing a map entry in-place, that is, without a call to
1014  *      vm_map_entry_link() or _unlink().
1015  *
1016  *      The map must be locked, and leaves it so.
1017  */
1018 static void
1019 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1020 {
1021
1022         /*
1023          * Using splay trees without parent pointers, propagating
1024          * max_free up the tree is done by moving the entry to the
1025          * root and making the change there.
1026          */
1027         if (entry != map->root)
1028                 map->root = vm_map_entry_splay(entry->start, map->root);
1029
1030         entry->adj_free = (entry->next == &map->header ? map->max_offset :
1031             entry->next->start) - entry->end;
1032         vm_map_entry_set_max_free(entry);
1033 }
1034
1035 /*
1036  *      vm_map_lookup_entry:    [ internal use only ]
1037  *
1038  *      Finds the map entry containing (or
1039  *      immediately preceding) the specified address
1040  *      in the given map; the entry is returned
1041  *      in the "entry" parameter.  The boolean
1042  *      result indicates whether the address is
1043  *      actually contained in the map.
1044  */
1045 boolean_t
1046 vm_map_lookup_entry(
1047         vm_map_t map,
1048         vm_offset_t address,
1049         vm_map_entry_t *entry)  /* OUT */
1050 {
1051         vm_map_entry_t cur;
1052         boolean_t locked;
1053
1054         /*
1055          * If the map is empty, then the map entry immediately preceding
1056          * "address" is the map's header.
1057          */
1058         cur = map->root;
1059         if (cur == NULL)
1060                 *entry = &map->header;
1061         else if (address >= cur->start && cur->end > address) {
1062                 *entry = cur;
1063                 return (TRUE);
1064         } else if ((locked = vm_map_locked(map)) ||
1065             sx_try_upgrade(&map->lock)) {
1066                 /*
1067                  * Splay requires a write lock on the map.  However, it only
1068                  * restructures the binary search tree; it does not otherwise
1069                  * change the map.  Thus, the map's timestamp need not change
1070                  * on a temporary upgrade.
1071                  */
1072                 map->root = cur = vm_map_entry_splay(address, cur);
1073                 if (!locked)
1074                         sx_downgrade(&map->lock);
1075
1076                 /*
1077                  * If "address" is contained within a map entry, the new root
1078                  * is that map entry.  Otherwise, the new root is a map entry
1079                  * immediately before or after "address".
1080                  */
1081                 if (address >= cur->start) {
1082                         *entry = cur;
1083                         if (cur->end > address)
1084                                 return (TRUE);
1085                 } else
1086                         *entry = cur->prev;
1087         } else
1088                 /*
1089                  * Since the map is only locked for read access, perform a
1090                  * standard binary search tree lookup for "address".
1091                  */
1092                 for (;;) {
1093                         if (address < cur->start) {
1094                                 if (cur->left == NULL) {
1095                                         *entry = cur->prev;
1096                                         break;
1097                                 }
1098                                 cur = cur->left;
1099                         } else if (cur->end > address) {
1100                                 *entry = cur;
1101                                 return (TRUE);
1102                         } else {
1103                                 if (cur->right == NULL) {
1104                                         *entry = cur;
1105                                         break;
1106                                 }
1107                                 cur = cur->right;
1108                         }
1109                 }
1110         return (FALSE);
1111 }
1112
1113 /*
1114  *      vm_map_insert:
1115  *
1116  *      Inserts the given whole VM object into the target
1117  *      map at the specified address range.  The object's
1118  *      size should match that of the address range.
1119  *
1120  *      Requires that the map be locked, and leaves it so.
1121  *
1122  *      If object is non-NULL, ref count must be bumped by caller
1123  *      prior to making call to account for the new entry.
1124  */
1125 int
1126 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1127               vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
1128               int cow)
1129 {
1130         vm_map_entry_t new_entry;
1131         vm_map_entry_t prev_entry;
1132         vm_map_entry_t temp_entry;
1133         vm_eflags_t protoeflags;
1134         struct ucred *cred;
1135         boolean_t charge_prev_obj;
1136
1137         VM_MAP_ASSERT_LOCKED(map);
1138
1139         /*
1140          * Check that the start and end points are not bogus.
1141          */
1142         if ((start < map->min_offset) || (end > map->max_offset) ||
1143             (start >= end))
1144                 return (KERN_INVALID_ADDRESS);
1145
1146         /*
1147          * Find the entry prior to the proposed starting address; if it's part
1148          * of an existing entry, this range is bogus.
1149          */
1150         if (vm_map_lookup_entry(map, start, &temp_entry))
1151                 return (KERN_NO_SPACE);
1152
1153         prev_entry = temp_entry;
1154
1155         /*
1156          * Assert that the next entry doesn't overlap the end point.
1157          */
1158         if ((prev_entry->next != &map->header) &&
1159             (prev_entry->next->start < end))
1160                 return (KERN_NO_SPACE);
1161
1162         protoeflags = 0;
1163         charge_prev_obj = FALSE;
1164
1165         if (cow & MAP_COPY_ON_WRITE)
1166                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1167
1168         if (cow & MAP_NOFAULT) {
1169                 protoeflags |= MAP_ENTRY_NOFAULT;
1170
1171                 KASSERT(object == NULL,
1172                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1173         }
1174         if (cow & MAP_DISABLE_SYNCER)
1175                 protoeflags |= MAP_ENTRY_NOSYNC;
1176         if (cow & MAP_DISABLE_COREDUMP)
1177                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1178
1179         cred = NULL;
1180         KASSERT((object != kmem_object && object != kernel_object) ||
1181             ((object == kmem_object || object == kernel_object) &&
1182                 !(protoeflags & MAP_ENTRY_NEEDS_COPY)),
1183             ("kmem or kernel object and cow"));
1184         if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1185                 goto charged;
1186         if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1187             ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1188                 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1189                         return (KERN_RESOURCE_SHORTAGE);
1190                 KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
1191                     object->cred == NULL,
1192                     ("OVERCOMMIT: vm_map_insert o %p", object));
1193                 cred = curthread->td_ucred;
1194                 crhold(cred);
1195                 if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
1196                         charge_prev_obj = TRUE;
1197         }
1198
1199 charged:
1200         /* Expand the kernel pmap, if necessary. */
1201         if (map == kernel_map && end > kernel_vm_end)
1202                 pmap_growkernel(end);
1203         if (object != NULL) {
1204                 /*
1205                  * OBJ_ONEMAPPING must be cleared unless this mapping
1206                  * is trivially proven to be the only mapping for any
1207                  * of the object's pages.  (Object granularity
1208                  * reference counting is insufficient to recognize
1209                  * aliases with precision.)
1210                  */
1211                 VM_OBJECT_LOCK(object);
1212                 if (object->ref_count > 1 || object->shadow_count != 0)
1213                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
1214                 VM_OBJECT_UNLOCK(object);
1215         }
1216         else if ((prev_entry != &map->header) &&
1217                  (prev_entry->eflags == protoeflags) &&
1218                  (prev_entry->end == start) &&
1219                  (prev_entry->wired_count == 0) &&
1220                  (prev_entry->cred == cred ||
1221                   (prev_entry->object.vm_object != NULL &&
1222                    (prev_entry->object.vm_object->cred == cred))) &&
1223                    vm_object_coalesce(prev_entry->object.vm_object,
1224                        prev_entry->offset,
1225                        (vm_size_t)(prev_entry->end - prev_entry->start),
1226                        (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
1227                 /*
1228                  * We were able to extend the object.  Determine if we
1229                  * can extend the previous map entry to include the
1230                  * new range as well.
1231                  */
1232                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
1233                     (prev_entry->protection == prot) &&
1234                     (prev_entry->max_protection == max)) {
1235                         map->size += (end - prev_entry->end);
1236                         prev_entry->end = end;
1237                         vm_map_entry_resize_free(map, prev_entry);
1238                         vm_map_simplify_entry(map, prev_entry);
1239                         if (cred != NULL)
1240                                 crfree(cred);
1241                         return (KERN_SUCCESS);
1242                 }
1243
1244                 /*
1245                  * If we can extend the object but cannot extend the
1246                  * map entry, we have to create a new map entry.  We
1247                  * must bump the ref count on the extended object to
1248                  * account for it.  object may be NULL.
1249                  */
1250                 object = prev_entry->object.vm_object;
1251                 offset = prev_entry->offset +
1252                         (prev_entry->end - prev_entry->start);
1253                 vm_object_reference(object);
1254                 if (cred != NULL && object != NULL && object->cred != NULL &&
1255                     !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1256                         /* Object already accounts for this uid. */
1257                         crfree(cred);
1258                         cred = NULL;
1259                 }
1260         }
1261
1262         /*
1263          * NOTE: if conditionals fail, object can be NULL here.  This occurs
1264          * in things like the buffer map where we manage kva but do not manage
1265          * backing objects.
1266          */
1267
1268         /*
1269          * Create a new entry
1270          */
1271         new_entry = vm_map_entry_create(map);
1272         new_entry->start = start;
1273         new_entry->end = end;
1274         new_entry->cred = NULL;
1275
1276         new_entry->eflags = protoeflags;
1277         new_entry->object.vm_object = object;
1278         new_entry->offset = offset;
1279         new_entry->avail_ssize = 0;
1280
1281         new_entry->inheritance = VM_INHERIT_DEFAULT;
1282         new_entry->protection = prot;
1283         new_entry->max_protection = max;
1284         new_entry->wired_count = 0;
1285
1286         KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1287             ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
1288         new_entry->cred = cred;
1289
1290         /*
1291          * Insert the new entry into the list
1292          */
1293         vm_map_entry_link(map, prev_entry, new_entry);
1294         map->size += new_entry->end - new_entry->start;
1295
1296         /*
1297          * It may be possible to merge the new entry with the next and/or
1298          * previous entries.  However, due to MAP_STACK_* being a hack, a
1299          * panic can result from merging such entries.
1300          */
1301         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)
1302                 vm_map_simplify_entry(map, new_entry);
1303
1304         if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1305                 vm_map_pmap_enter(map, start, prot,
1306                                     object, OFF_TO_IDX(offset), end - start,
1307                                     cow & MAP_PREFAULT_PARTIAL);
1308         }
1309
1310         return (KERN_SUCCESS);
1311 }
1312
1313 /*
1314  *      vm_map_findspace:
1315  *
1316  *      Find the first fit (lowest VM address) for "length" free bytes
1317  *      beginning at address >= start in the given map.
1318  *
1319  *      In a vm_map_entry, "adj_free" is the amount of free space
1320  *      adjacent (higher address) to this entry, and "max_free" is the
1321  *      maximum amount of contiguous free space in its subtree.  This
1322  *      allows finding a free region in one path down the tree, so
1323  *      O(log n) amortized with splay trees.
1324  *
1325  *      The map must be locked, and leaves it so.
1326  *
1327  *      Returns: 0 on success, and starting address in *addr,
1328  *               1 if insufficient space.
1329  */
1330 int
1331 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1332     vm_offset_t *addr)  /* OUT */
1333 {
1334         vm_map_entry_t entry;
1335         vm_offset_t st;
1336
1337         /*
1338          * Request must fit within min/max VM address and must avoid
1339          * address wrap.
1340          */
1341         if (start < map->min_offset)
1342                 start = map->min_offset;
1343         if (start + length > map->max_offset || start + length < start)
1344                 return (1);
1345
1346         /* Empty tree means wide open address space. */
1347         if (map->root == NULL) {
1348                 *addr = start;
1349                 return (0);
1350         }
1351
1352         /*
1353          * After splay, if start comes before root node, then there
1354          * must be a gap from start to the root.
1355          */
1356         map->root = vm_map_entry_splay(start, map->root);
1357         if (start + length <= map->root->start) {
1358                 *addr = start;
1359                 return (0);
1360         }
1361
1362         /*
1363          * Root is the last node that might begin its gap before
1364          * start, and this is the last comparison where address
1365          * wrap might be a problem.
1366          */
1367         st = (start > map->root->end) ? start : map->root->end;
1368         if (length <= map->root->end + map->root->adj_free - st) {
1369                 *addr = st;
1370                 return (0);
1371         }
1372
1373         /* With max_free, can immediately tell if no solution. */
1374         entry = map->root->right;
1375         if (entry == NULL || length > entry->max_free)
1376                 return (1);
1377
1378         /*
1379          * Search the right subtree in the order: left subtree, root,
1380          * right subtree (first fit).  The previous splay implies that
1381          * all regions in the right subtree have addresses > start.
1382          */
1383         while (entry != NULL) {
1384                 if (entry->left != NULL && entry->left->max_free >= length)
1385                         entry = entry->left;
1386                 else if (entry->adj_free >= length) {
1387                         *addr = entry->end;
1388                         return (0);
1389                 } else
1390                         entry = entry->right;
1391         }
1392
1393         /* Can't get here, so panic if we do. */
1394         panic("vm_map_findspace: max_free corrupt");
1395 }
1396
1397 int
1398 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1399     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1400     vm_prot_t max, int cow)
1401 {
1402         vm_offset_t end;
1403         int result;
1404
1405         end = start + length;
1406         vm_map_lock(map);
1407         VM_MAP_RANGE_CHECK(map, start, end);
1408         (void) vm_map_delete(map, start, end);
1409         result = vm_map_insert(map, object, offset, start, end, prot,
1410             max, cow);
1411         vm_map_unlock(map);
1412         return (result);
1413 }
1414
1415 /*
1416  *      vm_map_find finds an unallocated region in the target address
1417  *      map with the given length.  The search is defined to be
1418  *      first-fit from the specified address; the region found is
1419  *      returned in the same parameter.
1420  *
1421  *      If object is non-NULL, ref count must be bumped by caller
1422  *      prior to making call to account for the new entry.
1423  */
1424 int
1425 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1426             vm_offset_t *addr,  /* IN/OUT */
1427             vm_size_t length, int find_space, vm_prot_t prot,
1428             vm_prot_t max, int cow)
1429 {
1430         vm_offset_t start;
1431         int result;
1432
1433         start = *addr;
1434         vm_map_lock(map);
1435         do {
1436                 if (find_space != VMFS_NO_SPACE) {
1437                         if (vm_map_findspace(map, start, length, addr)) {
1438                                 vm_map_unlock(map);
1439                                 return (KERN_NO_SPACE);
1440                         }
1441                         switch (find_space) {
1442                         case VMFS_ALIGNED_SPACE:
1443                                 pmap_align_superpage(object, offset, addr,
1444                                     length);
1445                                 break;
1446 #ifdef VMFS_TLB_ALIGNED_SPACE
1447                         case VMFS_TLB_ALIGNED_SPACE:
1448                                 pmap_align_tlb(addr);
1449                                 break;
1450 #endif
1451                         default:
1452                                 break;
1453                         }
1454
1455                         start = *addr;
1456                 }
1457                 result = vm_map_insert(map, object, offset, start, start +
1458                     length, prot, max, cow);
1459         } while (result == KERN_NO_SPACE && (find_space == VMFS_ALIGNED_SPACE
1460 #ifdef VMFS_TLB_ALIGNED_SPACE
1461             || find_space == VMFS_TLB_ALIGNED_SPACE
1462 #endif
1463             ));
1464         vm_map_unlock(map);
1465         return (result);
1466 }
1467
1468 /*
1469  *      vm_map_simplify_entry:
1470  *
1471  *      Simplify the given map entry by merging with either neighbor.  This
1472  *      routine also has the ability to merge with both neighbors.
1473  *
1474  *      The map must be locked.
1475  *
1476  *      This routine guarentees that the passed entry remains valid (though
1477  *      possibly extended).  When merging, this routine may delete one or
1478  *      both neighbors.
1479  */
1480 void
1481 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1482 {
1483         vm_map_entry_t next, prev;
1484         vm_size_t prevsize, esize;
1485
1486         if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1487                 return;
1488
1489         prev = entry->prev;
1490         if (prev != &map->header) {
1491                 prevsize = prev->end - prev->start;
1492                 if ( (prev->end == entry->start) &&
1493                      (prev->object.vm_object == entry->object.vm_object) &&
1494                      (!prev->object.vm_object ||
1495                         (prev->offset + prevsize == entry->offset)) &&
1496                      (prev->eflags == entry->eflags) &&
1497                      (prev->protection == entry->protection) &&
1498                      (prev->max_protection == entry->max_protection) &&
1499                      (prev->inheritance == entry->inheritance) &&
1500                      (prev->wired_count == entry->wired_count) &&
1501                      (prev->cred == entry->cred)) {
1502                         vm_map_entry_unlink(map, prev);
1503                         entry->start = prev->start;
1504                         entry->offset = prev->offset;
1505                         if (entry->prev != &map->header)
1506                                 vm_map_entry_resize_free(map, entry->prev);
1507
1508                         /*
1509                          * If the backing object is a vnode object,
1510                          * vm_object_deallocate() calls vrele().
1511                          * However, vrele() does not lock the vnode
1512                          * because the vnode has additional
1513                          * references.  Thus, the map lock can be kept
1514                          * without causing a lock-order reversal with
1515                          * the vnode lock.
1516                          */
1517                         if (prev->object.vm_object)
1518                                 vm_object_deallocate(prev->object.vm_object);
1519                         if (prev->cred != NULL)
1520                                 crfree(prev->cred);
1521                         vm_map_entry_dispose(map, prev);
1522                 }
1523         }
1524
1525         next = entry->next;
1526         if (next != &map->header) {
1527                 esize = entry->end - entry->start;
1528                 if ((entry->end == next->start) &&
1529                     (next->object.vm_object == entry->object.vm_object) &&
1530                      (!entry->object.vm_object ||
1531                         (entry->offset + esize == next->offset)) &&
1532                     (next->eflags == entry->eflags) &&
1533                     (next->protection == entry->protection) &&
1534                     (next->max_protection == entry->max_protection) &&
1535                     (next->inheritance == entry->inheritance) &&
1536                     (next->wired_count == entry->wired_count) &&
1537                     (next->cred == entry->cred)) {
1538                         vm_map_entry_unlink(map, next);
1539                         entry->end = next->end;
1540                         vm_map_entry_resize_free(map, entry);
1541
1542                         /*
1543                          * See comment above.
1544                          */
1545                         if (next->object.vm_object)
1546                                 vm_object_deallocate(next->object.vm_object);
1547                         if (next->cred != NULL)
1548                                 crfree(next->cred);
1549                         vm_map_entry_dispose(map, next);
1550                 }
1551         }
1552 }
1553 /*
1554  *      vm_map_clip_start:      [ internal use only ]
1555  *
1556  *      Asserts that the given entry begins at or after
1557  *      the specified address; if necessary,
1558  *      it splits the entry into two.
1559  */
1560 #define vm_map_clip_start(map, entry, startaddr) \
1561 { \
1562         if (startaddr > entry->start) \
1563                 _vm_map_clip_start(map, entry, startaddr); \
1564 }
1565
1566 /*
1567  *      This routine is called only when it is known that
1568  *      the entry must be split.
1569  */
1570 static void
1571 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1572 {
1573         vm_map_entry_t new_entry;
1574
1575         VM_MAP_ASSERT_LOCKED(map);
1576
1577         /*
1578          * Split off the front portion -- note that we must insert the new
1579          * entry BEFORE this one, so that this entry has the specified
1580          * starting address.
1581          */
1582         vm_map_simplify_entry(map, entry);
1583
1584         /*
1585          * If there is no object backing this entry, we might as well create
1586          * one now.  If we defer it, an object can get created after the map
1587          * is clipped, and individual objects will be created for the split-up
1588          * map.  This is a bit of a hack, but is also about the best place to
1589          * put this improvement.
1590          */
1591         if (entry->object.vm_object == NULL && !map->system_map) {
1592                 vm_object_t object;
1593                 object = vm_object_allocate(OBJT_DEFAULT,
1594                                 atop(entry->end - entry->start));
1595                 entry->object.vm_object = object;
1596                 entry->offset = 0;
1597                 if (entry->cred != NULL) {
1598                         object->cred = entry->cred;
1599                         object->charge = entry->end - entry->start;
1600                         entry->cred = NULL;
1601                 }
1602         } else if (entry->object.vm_object != NULL &&
1603                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1604                    entry->cred != NULL) {
1605                 VM_OBJECT_LOCK(entry->object.vm_object);
1606                 KASSERT(entry->object.vm_object->cred == NULL,
1607                     ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1608                 entry->object.vm_object->cred = entry->cred;
1609                 entry->object.vm_object->charge = entry->end - entry->start;
1610                 VM_OBJECT_UNLOCK(entry->object.vm_object);
1611                 entry->cred = NULL;
1612         }
1613
1614         new_entry = vm_map_entry_create(map);
1615         *new_entry = *entry;
1616
1617         new_entry->end = start;
1618         entry->offset += (start - entry->start);
1619         entry->start = start;
1620         if (new_entry->cred != NULL)
1621                 crhold(entry->cred);
1622
1623         vm_map_entry_link(map, entry->prev, new_entry);
1624
1625         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1626                 vm_object_reference(new_entry->object.vm_object);
1627         }
1628 }
1629
1630 /*
1631  *      vm_map_clip_end:        [ internal use only ]
1632  *
1633  *      Asserts that the given entry ends at or before
1634  *      the specified address; if necessary,
1635  *      it splits the entry into two.
1636  */
1637 #define vm_map_clip_end(map, entry, endaddr) \
1638 { \
1639         if ((endaddr) < (entry->end)) \
1640                 _vm_map_clip_end((map), (entry), (endaddr)); \
1641 }
1642
1643 /*
1644  *      This routine is called only when it is known that
1645  *      the entry must be split.
1646  */
1647 static void
1648 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1649 {
1650         vm_map_entry_t new_entry;
1651
1652         VM_MAP_ASSERT_LOCKED(map);
1653
1654         /*
1655          * If there is no object backing this entry, we might as well create
1656          * one now.  If we defer it, an object can get created after the map
1657          * is clipped, and individual objects will be created for the split-up
1658          * map.  This is a bit of a hack, but is also about the best place to
1659          * put this improvement.
1660          */
1661         if (entry->object.vm_object == NULL && !map->system_map) {
1662                 vm_object_t object;
1663                 object = vm_object_allocate(OBJT_DEFAULT,
1664                                 atop(entry->end - entry->start));
1665                 entry->object.vm_object = object;
1666                 entry->offset = 0;
1667                 if (entry->cred != NULL) {
1668                         object->cred = entry->cred;
1669                         object->charge = entry->end - entry->start;
1670                         entry->cred = NULL;
1671                 }
1672         } else if (entry->object.vm_object != NULL &&
1673                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1674                    entry->cred != NULL) {
1675                 VM_OBJECT_LOCK(entry->object.vm_object);
1676                 KASSERT(entry->object.vm_object->cred == NULL,
1677                     ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1678                 entry->object.vm_object->cred = entry->cred;
1679                 entry->object.vm_object->charge = entry->end - entry->start;
1680                 VM_OBJECT_UNLOCK(entry->object.vm_object);
1681                 entry->cred = NULL;
1682         }
1683
1684         /*
1685          * Create a new entry and insert it AFTER the specified entry
1686          */
1687         new_entry = vm_map_entry_create(map);
1688         *new_entry = *entry;
1689
1690         new_entry->start = entry->end = end;
1691         new_entry->offset += (end - entry->start);
1692         if (new_entry->cred != NULL)
1693                 crhold(entry->cred);
1694
1695         vm_map_entry_link(map, entry, new_entry);
1696
1697         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1698                 vm_object_reference(new_entry->object.vm_object);
1699         }
1700 }
1701
1702 /*
1703  *      vm_map_submap:          [ kernel use only ]
1704  *
1705  *      Mark the given range as handled by a subordinate map.
1706  *
1707  *      This range must have been created with vm_map_find,
1708  *      and no other operations may have been performed on this
1709  *      range prior to calling vm_map_submap.
1710  *
1711  *      Only a limited number of operations can be performed
1712  *      within this rage after calling vm_map_submap:
1713  *              vm_fault
1714  *      [Don't try vm_map_copy!]
1715  *
1716  *      To remove a submapping, one must first remove the
1717  *      range from the superior map, and then destroy the
1718  *      submap (if desired).  [Better yet, don't try it.]
1719  */
1720 int
1721 vm_map_submap(
1722         vm_map_t map,
1723         vm_offset_t start,
1724         vm_offset_t end,
1725         vm_map_t submap)
1726 {
1727         vm_map_entry_t entry;
1728         int result = KERN_INVALID_ARGUMENT;
1729
1730         vm_map_lock(map);
1731
1732         VM_MAP_RANGE_CHECK(map, start, end);
1733
1734         if (vm_map_lookup_entry(map, start, &entry)) {
1735                 vm_map_clip_start(map, entry, start);
1736         } else
1737                 entry = entry->next;
1738
1739         vm_map_clip_end(map, entry, end);
1740
1741         if ((entry->start == start) && (entry->end == end) &&
1742             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1743             (entry->object.vm_object == NULL)) {
1744                 entry->object.sub_map = submap;
1745                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1746                 result = KERN_SUCCESS;
1747         }
1748         vm_map_unlock(map);
1749
1750         return (result);
1751 }
1752
1753 /*
1754  * The maximum number of pages to map
1755  */
1756 #define MAX_INIT_PT     96
1757
1758 /*
1759  *      vm_map_pmap_enter:
1760  *
1761  *      Preload read-only mappings for the given object's resident pages into
1762  *      the given map.  This eliminates the soft faults on process startup and
1763  *      immediately after an mmap(2).  Because these are speculative mappings,
1764  *      cached pages are not reactivated and mapped.
1765  */
1766 void
1767 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1768     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1769 {
1770         vm_offset_t start;
1771         vm_page_t p, p_start;
1772         vm_pindex_t psize, tmpidx;
1773
1774         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1775                 return;
1776         VM_OBJECT_LOCK(object);
1777         if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1778                 pmap_object_init_pt(map->pmap, addr, object, pindex, size);
1779                 goto unlock_return;
1780         }
1781
1782         psize = atop(size);
1783
1784         if ((flags & MAP_PREFAULT_PARTIAL) && psize > MAX_INIT_PT &&
1785             object->resident_page_count > MAX_INIT_PT)
1786                 goto unlock_return;
1787
1788         if (psize + pindex > object->size) {
1789                 if (object->size < pindex)
1790                         goto unlock_return;
1791                 psize = object->size - pindex;
1792         }
1793
1794         start = 0;
1795         p_start = NULL;
1796
1797         p = vm_page_find_least(object, pindex);
1798         /*
1799          * Assert: the variable p is either (1) the page with the
1800          * least pindex greater than or equal to the parameter pindex
1801          * or (2) NULL.
1802          */
1803         for (;
1804              p != NULL && (tmpidx = p->pindex - pindex) < psize;
1805              p = TAILQ_NEXT(p, listq)) {
1806                 /*
1807                  * don't allow an madvise to blow away our really
1808                  * free pages allocating pv entries.
1809                  */
1810                 if ((flags & MAP_PREFAULT_MADVISE) &&
1811                     cnt.v_free_count < cnt.v_free_reserved) {
1812                         psize = tmpidx;
1813                         break;
1814                 }
1815                 if (p->valid == VM_PAGE_BITS_ALL) {
1816                         if (p_start == NULL) {
1817                                 start = addr + ptoa(tmpidx);
1818                                 p_start = p;
1819                         }
1820                 } else if (p_start != NULL) {
1821                         pmap_enter_object(map->pmap, start, addr +
1822                             ptoa(tmpidx), p_start, prot);
1823                         p_start = NULL;
1824                 }
1825         }
1826         if (p_start != NULL)
1827                 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1828                     p_start, prot);
1829 unlock_return:
1830         VM_OBJECT_UNLOCK(object);
1831 }
1832
1833 /*
1834  *      vm_map_protect:
1835  *
1836  *      Sets the protection of the specified address
1837  *      region in the target map.  If "set_max" is
1838  *      specified, the maximum protection is to be set;
1839  *      otherwise, only the current protection is affected.
1840  */
1841 int
1842 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1843                vm_prot_t new_prot, boolean_t set_max)
1844 {
1845         vm_map_entry_t current, entry;
1846         vm_object_t obj;
1847         struct ucred *cred;
1848         vm_prot_t old_prot;
1849
1850         vm_map_lock(map);
1851
1852         VM_MAP_RANGE_CHECK(map, start, end);
1853
1854         if (vm_map_lookup_entry(map, start, &entry)) {
1855                 vm_map_clip_start(map, entry, start);
1856         } else {
1857                 entry = entry->next;
1858         }
1859
1860         /*
1861          * Make a first pass to check for protection violations.
1862          */
1863         current = entry;
1864         while ((current != &map->header) && (current->start < end)) {
1865                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1866                         vm_map_unlock(map);
1867                         return (KERN_INVALID_ARGUMENT);
1868                 }
1869                 if ((new_prot & current->max_protection) != new_prot) {
1870                         vm_map_unlock(map);
1871                         return (KERN_PROTECTION_FAILURE);
1872                 }
1873                 current = current->next;
1874         }
1875
1876
1877         /*
1878          * Do an accounting pass for private read-only mappings that
1879          * now will do cow due to allowed write (e.g. debugger sets
1880          * breakpoint on text segment)
1881          */
1882         for (current = entry; (current != &map->header) &&
1883              (current->start < end); current = current->next) {
1884
1885                 vm_map_clip_end(map, current, end);
1886
1887                 if (set_max ||
1888                     ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
1889                     ENTRY_CHARGED(current)) {
1890                         continue;
1891                 }
1892
1893                 cred = curthread->td_ucred;
1894                 obj = current->object.vm_object;
1895
1896                 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
1897                         if (!swap_reserve(current->end - current->start)) {
1898                                 vm_map_unlock(map);
1899                                 return (KERN_RESOURCE_SHORTAGE);
1900                         }
1901                         crhold(cred);
1902                         current->cred = cred;
1903                         continue;
1904                 }
1905
1906                 VM_OBJECT_LOCK(obj);
1907                 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
1908                         VM_OBJECT_UNLOCK(obj);
1909                         continue;
1910                 }
1911
1912                 /*
1913                  * Charge for the whole object allocation now, since
1914                  * we cannot distinguish between non-charged and
1915                  * charged clipped mapping of the same object later.
1916                  */
1917                 KASSERT(obj->charge == 0,
1918                     ("vm_map_protect: object %p overcharged\n", obj));
1919                 if (!swap_reserve(ptoa(obj->size))) {
1920                         VM_OBJECT_UNLOCK(obj);
1921                         vm_map_unlock(map);
1922                         return (KERN_RESOURCE_SHORTAGE);
1923                 }
1924
1925                 crhold(cred);
1926                 obj->cred = cred;
1927                 obj->charge = ptoa(obj->size);
1928                 VM_OBJECT_UNLOCK(obj);
1929         }
1930
1931         /*
1932          * Go back and fix up protections. [Note that clipping is not
1933          * necessary the second time.]
1934          */
1935         current = entry;
1936         while ((current != &map->header) && (current->start < end)) {
1937                 old_prot = current->protection;
1938
1939                 if (set_max)
1940                         current->protection =
1941                             (current->max_protection = new_prot) &
1942                             old_prot;
1943                 else
1944                         current->protection = new_prot;
1945
1946                 if ((current->eflags & (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED))
1947                      == (MAP_ENTRY_COW | MAP_ENTRY_USER_WIRED) &&
1948                     (current->protection & VM_PROT_WRITE) != 0 &&
1949                     (old_prot & VM_PROT_WRITE) == 0) {
1950                         vm_fault_copy_entry(map, map, current, current, NULL);
1951                 }
1952
1953                 /*
1954                  * When restricting access, update the physical map.  Worry
1955                  * about copy-on-write here.
1956                  */
1957                 if ((old_prot & ~current->protection) != 0) {
1958 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1959                                                         VM_PROT_ALL)
1960                         pmap_protect(map->pmap, current->start,
1961                             current->end,
1962                             current->protection & MASK(current));
1963 #undef  MASK
1964                 }
1965                 vm_map_simplify_entry(map, current);
1966                 current = current->next;
1967         }
1968         vm_map_unlock(map);
1969         return (KERN_SUCCESS);
1970 }
1971
1972 /*
1973  *      vm_map_madvise:
1974  *
1975  *      This routine traverses a processes map handling the madvise
1976  *      system call.  Advisories are classified as either those effecting
1977  *      the vm_map_entry structure, or those effecting the underlying
1978  *      objects.
1979  */
1980 int
1981 vm_map_madvise(
1982         vm_map_t map,
1983         vm_offset_t start,
1984         vm_offset_t end,
1985         int behav)
1986 {
1987         vm_map_entry_t current, entry;
1988         int modify_map = 0;
1989
1990         /*
1991          * Some madvise calls directly modify the vm_map_entry, in which case
1992          * we need to use an exclusive lock on the map and we need to perform
1993          * various clipping operations.  Otherwise we only need a read-lock
1994          * on the map.
1995          */
1996         switch(behav) {
1997         case MADV_NORMAL:
1998         case MADV_SEQUENTIAL:
1999         case MADV_RANDOM:
2000         case MADV_NOSYNC:
2001         case MADV_AUTOSYNC:
2002         case MADV_NOCORE:
2003         case MADV_CORE:
2004                 modify_map = 1;
2005                 vm_map_lock(map);
2006                 break;
2007         case MADV_WILLNEED:
2008         case MADV_DONTNEED:
2009         case MADV_FREE:
2010                 vm_map_lock_read(map);
2011                 break;
2012         default:
2013                 return (KERN_INVALID_ARGUMENT);
2014         }
2015
2016         /*
2017          * Locate starting entry and clip if necessary.
2018          */
2019         VM_MAP_RANGE_CHECK(map, start, end);
2020
2021         if (vm_map_lookup_entry(map, start, &entry)) {
2022                 if (modify_map)
2023                         vm_map_clip_start(map, entry, start);
2024         } else {
2025                 entry = entry->next;
2026         }
2027
2028         if (modify_map) {
2029                 /*
2030                  * madvise behaviors that are implemented in the vm_map_entry.
2031                  *
2032                  * We clip the vm_map_entry so that behavioral changes are
2033                  * limited to the specified address range.
2034                  */
2035                 for (current = entry;
2036                      (current != &map->header) && (current->start < end);
2037                      current = current->next
2038                 ) {
2039                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2040                                 continue;
2041
2042                         vm_map_clip_end(map, current, end);
2043
2044                         switch (behav) {
2045                         case MADV_NORMAL:
2046                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2047                                 break;
2048                         case MADV_SEQUENTIAL:
2049                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2050                                 break;
2051                         case MADV_RANDOM:
2052                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2053                                 break;
2054                         case MADV_NOSYNC:
2055                                 current->eflags |= MAP_ENTRY_NOSYNC;
2056                                 break;
2057                         case MADV_AUTOSYNC:
2058                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
2059                                 break;
2060                         case MADV_NOCORE:
2061                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
2062                                 break;
2063                         case MADV_CORE:
2064                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2065                                 break;
2066                         default:
2067                                 break;
2068                         }
2069                         vm_map_simplify_entry(map, current);
2070                 }
2071                 vm_map_unlock(map);
2072         } else {
2073                 vm_pindex_t pindex;
2074                 int count;
2075
2076                 /*
2077                  * madvise behaviors that are implemented in the underlying
2078                  * vm_object.
2079                  *
2080                  * Since we don't clip the vm_map_entry, we have to clip
2081                  * the vm_object pindex and count.
2082                  */
2083                 for (current = entry;
2084                      (current != &map->header) && (current->start < end);
2085                      current = current->next
2086                 ) {
2087                         vm_offset_t useStart;
2088
2089                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2090                                 continue;
2091
2092                         pindex = OFF_TO_IDX(current->offset);
2093                         count = atop(current->end - current->start);
2094                         useStart = current->start;
2095
2096                         if (current->start < start) {
2097                                 pindex += atop(start - current->start);
2098                                 count -= atop(start - current->start);
2099                                 useStart = start;
2100                         }
2101                         if (current->end > end)
2102                                 count -= atop(current->end - end);
2103
2104                         if (count <= 0)
2105                                 continue;
2106
2107                         vm_object_madvise(current->object.vm_object,
2108                                           pindex, count, behav);
2109                         if (behav == MADV_WILLNEED) {
2110                                 vm_map_pmap_enter(map,
2111                                     useStart,
2112                                     current->protection,
2113                                     current->object.vm_object,
2114                                     pindex,
2115                                     (count << PAGE_SHIFT),
2116                                     MAP_PREFAULT_MADVISE
2117                                 );
2118                         }
2119                 }
2120                 vm_map_unlock_read(map);
2121         }
2122         return (0);
2123 }
2124
2125
2126 /*
2127  *      vm_map_inherit:
2128  *
2129  *      Sets the inheritance of the specified address
2130  *      range in the target map.  Inheritance
2131  *      affects how the map will be shared with
2132  *      child maps at the time of vmspace_fork.
2133  */
2134 int
2135 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2136                vm_inherit_t new_inheritance)
2137 {
2138         vm_map_entry_t entry;
2139         vm_map_entry_t temp_entry;
2140
2141         switch (new_inheritance) {
2142         case VM_INHERIT_NONE:
2143         case VM_INHERIT_COPY:
2144         case VM_INHERIT_SHARE:
2145                 break;
2146         default:
2147                 return (KERN_INVALID_ARGUMENT);
2148         }
2149         vm_map_lock(map);
2150         VM_MAP_RANGE_CHECK(map, start, end);
2151         if (vm_map_lookup_entry(map, start, &temp_entry)) {
2152                 entry = temp_entry;
2153                 vm_map_clip_start(map, entry, start);
2154         } else
2155                 entry = temp_entry->next;
2156         while ((entry != &map->header) && (entry->start < end)) {
2157                 vm_map_clip_end(map, entry, end);
2158                 entry->inheritance = new_inheritance;
2159                 vm_map_simplify_entry(map, entry);
2160                 entry = entry->next;
2161         }
2162         vm_map_unlock(map);
2163         return (KERN_SUCCESS);
2164 }
2165
2166 /*
2167  *      vm_map_unwire:
2168  *
2169  *      Implements both kernel and user unwiring.
2170  */
2171 int
2172 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2173     int flags)
2174 {
2175         vm_map_entry_t entry, first_entry, tmp_entry;
2176         vm_offset_t saved_start;
2177         unsigned int last_timestamp;
2178         int rv;
2179         boolean_t need_wakeup, result, user_unwire;
2180
2181         user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2182         vm_map_lock(map);
2183         VM_MAP_RANGE_CHECK(map, start, end);
2184         if (!vm_map_lookup_entry(map, start, &first_entry)) {
2185                 if (flags & VM_MAP_WIRE_HOLESOK)
2186                         first_entry = first_entry->next;
2187                 else {
2188                         vm_map_unlock(map);
2189                         return (KERN_INVALID_ADDRESS);
2190                 }
2191         }
2192         last_timestamp = map->timestamp;
2193         entry = first_entry;
2194         while (entry != &map->header && entry->start < end) {
2195                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2196                         /*
2197                          * We have not yet clipped the entry.
2198                          */
2199                         saved_start = (start >= entry->start) ? start :
2200                             entry->start;
2201                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2202                         if (vm_map_unlock_and_wait(map, 0)) {
2203                                 /*
2204                                  * Allow interruption of user unwiring?
2205                                  */
2206                         }
2207                         vm_map_lock(map);
2208                         if (last_timestamp+1 != map->timestamp) {
2209                                 /*
2210                                  * Look again for the entry because the map was
2211                                  * modified while it was unlocked.
2212                                  * Specifically, the entry may have been
2213                                  * clipped, merged, or deleted.
2214                                  */
2215                                 if (!vm_map_lookup_entry(map, saved_start,
2216                                     &tmp_entry)) {
2217                                         if (flags & VM_MAP_WIRE_HOLESOK)
2218                                                 tmp_entry = tmp_entry->next;
2219                                         else {
2220                                                 if (saved_start == start) {
2221                                                         /*
2222                                                          * First_entry has been deleted.
2223                                                          */
2224                                                         vm_map_unlock(map);
2225                                                         return (KERN_INVALID_ADDRESS);
2226                                                 }
2227                                                 end = saved_start;
2228                                                 rv = KERN_INVALID_ADDRESS;
2229                                                 goto done;
2230                                         }
2231                                 }
2232                                 if (entry == first_entry)
2233                                         first_entry = tmp_entry;
2234                                 else
2235                                         first_entry = NULL;
2236                                 entry = tmp_entry;
2237                         }
2238                         last_timestamp = map->timestamp;
2239                         continue;
2240                 }
2241                 vm_map_clip_start(map, entry, start);
2242                 vm_map_clip_end(map, entry, end);
2243                 /*
2244                  * Mark the entry in case the map lock is released.  (See
2245                  * above.)
2246                  */
2247                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2248                 /*
2249                  * Check the map for holes in the specified region.
2250                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2251                  */
2252                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2253                     (entry->end < end && (entry->next == &map->header ||
2254                     entry->next->start > entry->end))) {
2255                         end = entry->end;
2256                         rv = KERN_INVALID_ADDRESS;
2257                         goto done;
2258                 }
2259                 /*
2260                  * If system unwiring, require that the entry is system wired.
2261                  */
2262                 if (!user_unwire &&
2263                     vm_map_entry_system_wired_count(entry) == 0) {
2264                         end = entry->end;
2265                         rv = KERN_INVALID_ARGUMENT;
2266                         goto done;
2267                 }
2268                 entry = entry->next;
2269         }
2270         rv = KERN_SUCCESS;
2271 done:
2272         need_wakeup = FALSE;
2273         if (first_entry == NULL) {
2274                 result = vm_map_lookup_entry(map, start, &first_entry);
2275                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2276                         first_entry = first_entry->next;
2277                 else
2278                         KASSERT(result, ("vm_map_unwire: lookup failed"));
2279         }
2280         entry = first_entry;
2281         while (entry != &map->header && entry->start < end) {
2282                 if (rv == KERN_SUCCESS && (!user_unwire ||
2283                     (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2284                         if (user_unwire)
2285                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2286                         entry->wired_count--;
2287                         if (entry->wired_count == 0) {
2288                                 /*
2289                                  * Retain the map lock.
2290                                  */
2291                                 vm_fault_unwire(map, entry->start, entry->end,
2292                                     entry->object.vm_object != NULL &&
2293                                     (entry->object.vm_object->type == OBJT_DEVICE ||
2294                                     entry->object.vm_object->type == OBJT_SG));
2295                         }
2296                 }
2297                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
2298                         ("vm_map_unwire: in-transition flag missing"));
2299                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2300                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2301                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2302                         need_wakeup = TRUE;
2303                 }
2304                 vm_map_simplify_entry(map, entry);
2305                 entry = entry->next;
2306         }
2307         vm_map_unlock(map);
2308         if (need_wakeup)
2309                 vm_map_wakeup(map);
2310         return (rv);
2311 }
2312
2313 /*
2314  *      vm_map_wire:
2315  *
2316  *      Implements both kernel and user wiring.
2317  */
2318 int
2319 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2320     int flags)
2321 {
2322         vm_map_entry_t entry, first_entry, tmp_entry;
2323         vm_offset_t saved_end, saved_start;
2324         unsigned int last_timestamp;
2325         int rv;
2326         boolean_t fictitious, need_wakeup, result, user_wire;
2327         vm_prot_t prot;
2328
2329         prot = 0;
2330         if (flags & VM_MAP_WIRE_WRITE)
2331                 prot |= VM_PROT_WRITE;
2332         user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2333         vm_map_lock(map);
2334         VM_MAP_RANGE_CHECK(map, start, end);
2335         if (!vm_map_lookup_entry(map, start, &first_entry)) {
2336                 if (flags & VM_MAP_WIRE_HOLESOK)
2337                         first_entry = first_entry->next;
2338                 else {
2339                         vm_map_unlock(map);
2340                         return (KERN_INVALID_ADDRESS);
2341                 }
2342         }
2343         last_timestamp = map->timestamp;
2344         entry = first_entry;
2345         while (entry != &map->header && entry->start < end) {
2346                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2347                         /*
2348                          * We have not yet clipped the entry.
2349                          */
2350                         saved_start = (start >= entry->start) ? start :
2351                             entry->start;
2352                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2353                         if (vm_map_unlock_and_wait(map, 0)) {
2354                                 /*
2355                                  * Allow interruption of user wiring?
2356                                  */
2357                         }
2358                         vm_map_lock(map);
2359                         if (last_timestamp + 1 != map->timestamp) {
2360                                 /*
2361                                  * Look again for the entry because the map was
2362                                  * modified while it was unlocked.
2363                                  * Specifically, the entry may have been
2364                                  * clipped, merged, or deleted.
2365                                  */
2366                                 if (!vm_map_lookup_entry(map, saved_start,
2367                                     &tmp_entry)) {
2368                                         if (flags & VM_MAP_WIRE_HOLESOK)
2369                                                 tmp_entry = tmp_entry->next;
2370                                         else {
2371                                                 if (saved_start == start) {
2372                                                         /*
2373                                                          * first_entry has been deleted.
2374                                                          */
2375                                                         vm_map_unlock(map);
2376                                                         return (KERN_INVALID_ADDRESS);
2377                                                 }
2378                                                 end = saved_start;
2379                                                 rv = KERN_INVALID_ADDRESS;
2380                                                 goto done;
2381                                         }
2382                                 }
2383                                 if (entry == first_entry)
2384                                         first_entry = tmp_entry;
2385                                 else
2386                                         first_entry = NULL;
2387                                 entry = tmp_entry;
2388                         }
2389                         last_timestamp = map->timestamp;
2390                         continue;
2391                 }
2392                 vm_map_clip_start(map, entry, start);
2393                 vm_map_clip_end(map, entry, end);
2394                 /*
2395                  * Mark the entry in case the map lock is released.  (See
2396                  * above.)
2397                  */
2398                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2399                 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2400                     || (entry->protection & prot) != prot) {
2401                         entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2402                         if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2403                                 end = entry->end;
2404                                 rv = KERN_INVALID_ADDRESS;
2405                                 goto done;
2406                         }
2407                         goto next_entry;
2408                 }
2409                 if (entry->wired_count == 0) {
2410                         entry->wired_count++;
2411                         saved_start = entry->start;
2412                         saved_end = entry->end;
2413                         fictitious = entry->object.vm_object != NULL &&
2414                             (entry->object.vm_object->type == OBJT_DEVICE ||
2415                             entry->object.vm_object->type == OBJT_SG);
2416                         /*
2417                          * Release the map lock, relying on the in-transition
2418                          * mark.  Mark the map busy for fork.
2419                          */
2420                         vm_map_busy(map);
2421                         vm_map_unlock(map);
2422                         rv = vm_fault_wire(map, saved_start, saved_end,
2423                             fictitious);
2424                         vm_map_lock(map);
2425                         vm_map_unbusy(map);
2426                         if (last_timestamp + 1 != map->timestamp) {
2427                                 /*
2428                                  * Look again for the entry because the map was
2429                                  * modified while it was unlocked.  The entry
2430                                  * may have been clipped, but NOT merged or
2431                                  * deleted.
2432                                  */
2433                                 result = vm_map_lookup_entry(map, saved_start,
2434                                     &tmp_entry);
2435                                 KASSERT(result, ("vm_map_wire: lookup failed"));
2436                                 if (entry == first_entry)
2437                                         first_entry = tmp_entry;
2438                                 else
2439                                         first_entry = NULL;
2440                                 entry = tmp_entry;
2441                                 while (entry->end < saved_end) {
2442                                         if (rv != KERN_SUCCESS) {
2443                                                 KASSERT(entry->wired_count == 1,
2444                                                     ("vm_map_wire: bad count"));
2445                                                 entry->wired_count = -1;
2446                                         }
2447                                         entry = entry->next;
2448                                 }
2449                         }
2450                         last_timestamp = map->timestamp;
2451                         if (rv != KERN_SUCCESS) {
2452                                 KASSERT(entry->wired_count == 1,
2453                                     ("vm_map_wire: bad count"));
2454                                 /*
2455                                  * Assign an out-of-range value to represent
2456                                  * the failure to wire this entry.
2457                                  */
2458                                 entry->wired_count = -1;
2459                                 end = entry->end;
2460                                 goto done;
2461                         }
2462                 } else if (!user_wire ||
2463                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2464                         entry->wired_count++;
2465                 }
2466                 /*
2467                  * Check the map for holes in the specified region.
2468                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2469                  */
2470         next_entry:
2471                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2472                     (entry->end < end && (entry->next == &map->header ||
2473                     entry->next->start > entry->end))) {
2474                         end = entry->end;
2475                         rv = KERN_INVALID_ADDRESS;
2476                         goto done;
2477                 }
2478                 entry = entry->next;
2479         }
2480         rv = KERN_SUCCESS;
2481 done:
2482         need_wakeup = FALSE;
2483         if (first_entry == NULL) {
2484                 result = vm_map_lookup_entry(map, start, &first_entry);
2485                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2486                         first_entry = first_entry->next;
2487                 else
2488                         KASSERT(result, ("vm_map_wire: lookup failed"));
2489         }
2490         entry = first_entry;
2491         while (entry != &map->header && entry->start < end) {
2492                 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2493                         goto next_entry_done;
2494                 if (rv == KERN_SUCCESS) {
2495                         if (user_wire)
2496                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
2497                 } else if (entry->wired_count == -1) {
2498                         /*
2499                          * Wiring failed on this entry.  Thus, unwiring is
2500                          * unnecessary.
2501                          */
2502                         entry->wired_count = 0;
2503                 } else {
2504                         if (!user_wire ||
2505                             (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
2506                                 entry->wired_count--;
2507                         if (entry->wired_count == 0) {
2508                                 /*
2509                                  * Retain the map lock.
2510                                  */
2511                                 vm_fault_unwire(map, entry->start, entry->end,
2512                                     entry->object.vm_object != NULL &&
2513                                     (entry->object.vm_object->type == OBJT_DEVICE ||
2514                                     entry->object.vm_object->type == OBJT_SG));
2515                         }
2516                 }
2517         next_entry_done:
2518                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
2519                         ("vm_map_wire: in-transition flag missing"));
2520                 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION|MAP_ENTRY_WIRE_SKIPPED);
2521                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2522                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2523                         need_wakeup = TRUE;
2524                 }
2525                 vm_map_simplify_entry(map, entry);
2526                 entry = entry->next;
2527         }
2528         vm_map_unlock(map);
2529         if (need_wakeup)
2530                 vm_map_wakeup(map);
2531         return (rv);
2532 }
2533
2534 /*
2535  * vm_map_sync
2536  *
2537  * Push any dirty cached pages in the address range to their pager.
2538  * If syncio is TRUE, dirty pages are written synchronously.
2539  * If invalidate is TRUE, any cached pages are freed as well.
2540  *
2541  * If the size of the region from start to end is zero, we are
2542  * supposed to flush all modified pages within the region containing
2543  * start.  Unfortunately, a region can be split or coalesced with
2544  * neighboring regions, making it difficult to determine what the
2545  * original region was.  Therefore, we approximate this requirement by
2546  * flushing the current region containing start.
2547  *
2548  * Returns an error if any part of the specified range is not mapped.
2549  */
2550 int
2551 vm_map_sync(
2552         vm_map_t map,
2553         vm_offset_t start,
2554         vm_offset_t end,
2555         boolean_t syncio,
2556         boolean_t invalidate)
2557 {
2558         vm_map_entry_t current;
2559         vm_map_entry_t entry;
2560         vm_size_t size;
2561         vm_object_t object;
2562         vm_ooffset_t offset;
2563         unsigned int last_timestamp;
2564
2565         vm_map_lock_read(map);
2566         VM_MAP_RANGE_CHECK(map, start, end);
2567         if (!vm_map_lookup_entry(map, start, &entry)) {
2568                 vm_map_unlock_read(map);
2569                 return (KERN_INVALID_ADDRESS);
2570         } else if (start == end) {
2571                 start = entry->start;
2572                 end = entry->end;
2573         }
2574         /*
2575          * Make a first pass to check for user-wired memory and holes.
2576          */
2577         for (current = entry; current != &map->header && current->start < end;
2578             current = current->next) {
2579                 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2580                         vm_map_unlock_read(map);
2581                         return (KERN_INVALID_ARGUMENT);
2582                 }
2583                 if (end > current->end &&
2584                     (current->next == &map->header ||
2585                         current->end != current->next->start)) {
2586                         vm_map_unlock_read(map);
2587                         return (KERN_INVALID_ADDRESS);
2588                 }
2589         }
2590
2591         if (invalidate)
2592                 pmap_remove(map->pmap, start, end);
2593
2594         /*
2595          * Make a second pass, cleaning/uncaching pages from the indicated
2596          * objects as we go.
2597          */
2598         for (current = entry; current != &map->header && current->start < end;) {
2599                 offset = current->offset + (start - current->start);
2600                 size = (end <= current->end ? end : current->end) - start;
2601                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2602                         vm_map_t smap;
2603                         vm_map_entry_t tentry;
2604                         vm_size_t tsize;
2605
2606                         smap = current->object.sub_map;
2607                         vm_map_lock_read(smap);
2608                         (void) vm_map_lookup_entry(smap, offset, &tentry);
2609                         tsize = tentry->end - offset;
2610                         if (tsize < size)
2611                                 size = tsize;
2612                         object = tentry->object.vm_object;
2613                         offset = tentry->offset + (offset - tentry->start);
2614                         vm_map_unlock_read(smap);
2615                 } else {
2616                         object = current->object.vm_object;
2617                 }
2618                 vm_object_reference(object);
2619                 last_timestamp = map->timestamp;
2620                 vm_map_unlock_read(map);
2621                 vm_object_sync(object, offset, size, syncio, invalidate);
2622                 start += size;
2623                 vm_object_deallocate(object);
2624                 vm_map_lock_read(map);
2625                 if (last_timestamp == map->timestamp ||
2626                     !vm_map_lookup_entry(map, start, &current))
2627                         current = current->next;
2628         }
2629
2630         vm_map_unlock_read(map);
2631         return (KERN_SUCCESS);
2632 }
2633
2634 /*
2635  *      vm_map_entry_unwire:    [ internal use only ]
2636  *
2637  *      Make the region specified by this entry pageable.
2638  *
2639  *      The map in question should be locked.
2640  *      [This is the reason for this routine's existence.]
2641  */
2642 static void
2643 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2644 {
2645         vm_fault_unwire(map, entry->start, entry->end,
2646             entry->object.vm_object != NULL &&
2647             (entry->object.vm_object->type == OBJT_DEVICE ||
2648             entry->object.vm_object->type == OBJT_SG));
2649         entry->wired_count = 0;
2650 }
2651
2652 static void
2653 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2654 {
2655
2656         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2657                 vm_object_deallocate(entry->object.vm_object);
2658         uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2659 }
2660
2661 /*
2662  *      vm_map_entry_delete:    [ internal use only ]
2663  *
2664  *      Deallocate the given entry from the target map.
2665  */
2666 static void
2667 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2668 {
2669         vm_object_t object;
2670         vm_pindex_t offidxstart, offidxend, count, size1;
2671         vm_ooffset_t size;
2672
2673         vm_map_entry_unlink(map, entry);
2674         object = entry->object.vm_object;
2675         size = entry->end - entry->start;
2676         map->size -= size;
2677
2678         if (entry->cred != NULL) {
2679                 swap_release_by_cred(size, entry->cred);
2680                 crfree(entry->cred);
2681         }
2682
2683         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2684             (object != NULL)) {
2685                 KASSERT(entry->cred == NULL || object->cred == NULL ||
2686                     (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2687                     ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
2688                 count = OFF_TO_IDX(size);
2689                 offidxstart = OFF_TO_IDX(entry->offset);
2690                 offidxend = offidxstart + count;
2691                 VM_OBJECT_LOCK(object);
2692                 if (object->ref_count != 1 &&
2693                     ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2694                     object == kernel_object || object == kmem_object)) {
2695                         vm_object_collapse(object);
2696                         vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2697                         if (object->type == OBJT_SWAP)
2698                                 swap_pager_freespace(object, offidxstart, count);
2699                         if (offidxend >= object->size &&
2700                             offidxstart < object->size) {
2701                                 size1 = object->size;
2702                                 object->size = offidxstart;
2703                                 if (object->cred != NULL) {
2704                                         size1 -= object->size;
2705                                         KASSERT(object->charge >= ptoa(size1),
2706                                             ("vm_map_entry_delete: object->charge < 0"));
2707                                         swap_release_by_cred(ptoa(size1), object->cred);
2708                                         object->charge -= ptoa(size1);
2709                                 }
2710                         }
2711                 }
2712                 VM_OBJECT_UNLOCK(object);
2713         } else
2714                 entry->object.vm_object = NULL;
2715         if (map->system_map)
2716                 vm_map_entry_deallocate(entry, TRUE);
2717         else {
2718                 entry->next = curthread->td_map_def_user;
2719                 curthread->td_map_def_user = entry;
2720         }
2721 }
2722
2723 /*
2724  *      vm_map_delete:  [ internal use only ]
2725  *
2726  *      Deallocates the given address range from the target
2727  *      map.
2728  */
2729 int
2730 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2731 {
2732         vm_map_entry_t entry;
2733         vm_map_entry_t first_entry;
2734
2735         VM_MAP_ASSERT_LOCKED(map);
2736
2737         /*
2738          * Find the start of the region, and clip it
2739          */
2740         if (!vm_map_lookup_entry(map, start, &first_entry))
2741                 entry = first_entry->next;
2742         else {
2743                 entry = first_entry;
2744                 vm_map_clip_start(map, entry, start);
2745         }
2746
2747         /*
2748          * Step through all entries in this region
2749          */
2750         while ((entry != &map->header) && (entry->start < end)) {
2751                 vm_map_entry_t next;
2752
2753                 /*
2754                  * Wait for wiring or unwiring of an entry to complete.
2755                  * Also wait for any system wirings to disappear on
2756                  * user maps.
2757                  */
2758                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2759                     (vm_map_pmap(map) != kernel_pmap &&
2760                     vm_map_entry_system_wired_count(entry) != 0)) {
2761                         unsigned int last_timestamp;
2762                         vm_offset_t saved_start;
2763                         vm_map_entry_t tmp_entry;
2764
2765                         saved_start = entry->start;
2766                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2767                         last_timestamp = map->timestamp;
2768                         (void) vm_map_unlock_and_wait(map, 0);
2769                         vm_map_lock(map);
2770                         if (last_timestamp + 1 != map->timestamp) {
2771                                 /*
2772                                  * Look again for the entry because the map was
2773                                  * modified while it was unlocked.
2774                                  * Specifically, the entry may have been
2775                                  * clipped, merged, or deleted.
2776                                  */
2777                                 if (!vm_map_lookup_entry(map, saved_start,
2778                                                          &tmp_entry))
2779                                         entry = tmp_entry->next;
2780                                 else {
2781                                         entry = tmp_entry;
2782                                         vm_map_clip_start(map, entry,
2783                                                           saved_start);
2784                                 }
2785                         }
2786                         continue;
2787                 }
2788                 vm_map_clip_end(map, entry, end);
2789
2790                 next = entry->next;
2791
2792                 /*
2793                  * Unwire before removing addresses from the pmap; otherwise,
2794                  * unwiring will put the entries back in the pmap.
2795                  */
2796                 if (entry->wired_count != 0) {
2797                         vm_map_entry_unwire(map, entry);
2798                 }
2799
2800                 pmap_remove(map->pmap, entry->start, entry->end);
2801
2802                 /*
2803                  * Delete the entry only after removing all pmap
2804                  * entries pointing to its pages.  (Otherwise, its
2805                  * page frames may be reallocated, and any modify bits
2806                  * will be set in the wrong object!)
2807                  */
2808                 vm_map_entry_delete(map, entry);
2809                 entry = next;
2810         }
2811         return (KERN_SUCCESS);
2812 }
2813
2814 /*
2815  *      vm_map_remove:
2816  *
2817  *      Remove the given address range from the target map.
2818  *      This is the exported form of vm_map_delete.
2819  */
2820 int
2821 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2822 {
2823         int result;
2824
2825         vm_map_lock(map);
2826         VM_MAP_RANGE_CHECK(map, start, end);
2827         result = vm_map_delete(map, start, end);
2828         vm_map_unlock(map);
2829         return (result);
2830 }
2831
2832 /*
2833  *      vm_map_check_protection:
2834  *
2835  *      Assert that the target map allows the specified privilege on the
2836  *      entire address region given.  The entire region must be allocated.
2837  *
2838  *      WARNING!  This code does not and should not check whether the
2839  *      contents of the region is accessible.  For example a smaller file
2840  *      might be mapped into a larger address space.
2841  *
2842  *      NOTE!  This code is also called by munmap().
2843  *
2844  *      The map must be locked.  A read lock is sufficient.
2845  */
2846 boolean_t
2847 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2848                         vm_prot_t protection)
2849 {
2850         vm_map_entry_t entry;
2851         vm_map_entry_t tmp_entry;
2852
2853         if (!vm_map_lookup_entry(map, start, &tmp_entry))
2854                 return (FALSE);
2855         entry = tmp_entry;
2856
2857         while (start < end) {
2858                 if (entry == &map->header)
2859                         return (FALSE);
2860                 /*
2861                  * No holes allowed!
2862                  */
2863                 if (start < entry->start)
2864                         return (FALSE);
2865                 /*
2866                  * Check protection associated with entry.
2867                  */
2868                 if ((entry->protection & protection) != protection)
2869                         return (FALSE);
2870                 /* go to next entry */
2871                 start = entry->end;
2872                 entry = entry->next;
2873         }
2874         return (TRUE);
2875 }
2876
2877 /*
2878  *      vm_map_copy_entry:
2879  *
2880  *      Copies the contents of the source entry to the destination
2881  *      entry.  The entries *must* be aligned properly.
2882  */
2883 static void
2884 vm_map_copy_entry(
2885         vm_map_t src_map,
2886         vm_map_t dst_map,
2887         vm_map_entry_t src_entry,
2888         vm_map_entry_t dst_entry,
2889         vm_ooffset_t *fork_charge)
2890 {
2891         vm_object_t src_object;
2892         vm_offset_t size;
2893         struct ucred *cred;
2894         int charged;
2895
2896         VM_MAP_ASSERT_LOCKED(dst_map);
2897
2898         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2899                 return;
2900
2901         if (src_entry->wired_count == 0) {
2902
2903                 /*
2904                  * If the source entry is marked needs_copy, it is already
2905                  * write-protected.
2906                  */
2907                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2908                         pmap_protect(src_map->pmap,
2909                             src_entry->start,
2910                             src_entry->end,
2911                             src_entry->protection & ~VM_PROT_WRITE);
2912                 }
2913
2914                 /*
2915                  * Make a copy of the object.
2916                  */
2917                 size = src_entry->end - src_entry->start;
2918                 if ((src_object = src_entry->object.vm_object) != NULL) {
2919                         VM_OBJECT_LOCK(src_object);
2920                         charged = ENTRY_CHARGED(src_entry);
2921                         if ((src_object->handle == NULL) &&
2922                                 (src_object->type == OBJT_DEFAULT ||
2923                                  src_object->type == OBJT_SWAP)) {
2924                                 vm_object_collapse(src_object);
2925                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2926                                         vm_object_split(src_entry);
2927                                         src_object = src_entry->object.vm_object;
2928                                 }
2929                         }
2930                         vm_object_reference_locked(src_object);
2931                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2932                         if (src_entry->cred != NULL &&
2933                             !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
2934                                 KASSERT(src_object->cred == NULL,
2935                                     ("OVERCOMMIT: vm_map_copy_entry: cred %p",
2936                                      src_object));
2937                                 src_object->cred = src_entry->cred;
2938                                 src_object->charge = size;
2939                         }
2940                         VM_OBJECT_UNLOCK(src_object);
2941                         dst_entry->object.vm_object = src_object;
2942                         if (charged) {
2943                                 cred = curthread->td_ucred;
2944                                 crhold(cred);
2945                                 dst_entry->cred = cred;
2946                                 *fork_charge += size;
2947                                 if (!(src_entry->eflags &
2948                                       MAP_ENTRY_NEEDS_COPY)) {
2949                                         crhold(cred);
2950                                         src_entry->cred = cred;
2951                                         *fork_charge += size;
2952                                 }
2953                         }
2954                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2955                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2956                         dst_entry->offset = src_entry->offset;
2957                 } else {
2958                         dst_entry->object.vm_object = NULL;
2959                         dst_entry->offset = 0;
2960                         if (src_entry->cred != NULL) {
2961                                 dst_entry->cred = curthread->td_ucred;
2962                                 crhold(dst_entry->cred);
2963                                 *fork_charge += size;
2964                         }
2965                 }
2966
2967                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2968                     dst_entry->end - dst_entry->start, src_entry->start);
2969         } else {
2970                 /*
2971                  * Of course, wired down pages can't be set copy-on-write.
2972                  * Cause wired pages to be copied into the new map by
2973                  * simulating faults (the new pages are pageable)
2974                  */
2975                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
2976                     fork_charge);
2977         }
2978 }
2979
2980 /*
2981  * vmspace_map_entry_forked:
2982  * Update the newly-forked vmspace each time a map entry is inherited
2983  * or copied.  The values for vm_dsize and vm_tsize are approximate
2984  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
2985  */
2986 static void
2987 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
2988     vm_map_entry_t entry)
2989 {
2990         vm_size_t entrysize;
2991         vm_offset_t newend;
2992
2993         entrysize = entry->end - entry->start;
2994         vm2->vm_map.size += entrysize;
2995         if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
2996                 vm2->vm_ssize += btoc(entrysize);
2997         } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
2998             entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
2999                 newend = MIN(entry->end,
3000                     (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3001                 vm2->vm_dsize += btoc(newend - entry->start);
3002         } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3003             entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3004                 newend = MIN(entry->end,
3005                     (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3006                 vm2->vm_tsize += btoc(newend - entry->start);
3007         }
3008 }
3009
3010 /*
3011  * vmspace_fork:
3012  * Create a new process vmspace structure and vm_map
3013  * based on those of an existing process.  The new map
3014  * is based on the old map, according to the inheritance
3015  * values on the regions in that map.
3016  *
3017  * XXX It might be worth coalescing the entries added to the new vmspace.
3018  *
3019  * The source map must not be locked.
3020  */
3021 struct vmspace *
3022 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3023 {
3024         struct vmspace *vm2;
3025         vm_map_t old_map = &vm1->vm_map;
3026         vm_map_t new_map;
3027         vm_map_entry_t old_entry;
3028         vm_map_entry_t new_entry;
3029         vm_object_t object;
3030         int locked;
3031
3032         vm_map_lock(old_map);
3033         if (old_map->busy)
3034                 vm_map_wait_busy(old_map);
3035         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
3036         if (vm2 == NULL)
3037                 goto unlock_and_return;
3038         vm2->vm_taddr = vm1->vm_taddr;
3039         vm2->vm_daddr = vm1->vm_daddr;
3040         vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3041         new_map = &vm2->vm_map; /* XXX */
3042         locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3043         KASSERT(locked, ("vmspace_fork: lock failed"));
3044         new_map->timestamp = 1;
3045
3046         old_entry = old_map->header.next;
3047
3048         while (old_entry != &old_map->header) {
3049                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3050                         panic("vm_map_fork: encountered a submap");
3051
3052                 switch (old_entry->inheritance) {
3053                 case VM_INHERIT_NONE:
3054                         break;
3055
3056                 case VM_INHERIT_SHARE:
3057                         /*
3058                          * Clone the entry, creating the shared object if necessary.
3059                          */
3060                         object = old_entry->object.vm_object;
3061                         if (object == NULL) {
3062                                 object = vm_object_allocate(OBJT_DEFAULT,
3063                                         atop(old_entry->end - old_entry->start));
3064                                 old_entry->object.vm_object = object;
3065                                 old_entry->offset = 0;
3066                                 if (old_entry->cred != NULL) {
3067                                         object->cred = old_entry->cred;
3068                                         object->charge = old_entry->end -
3069                                             old_entry->start;
3070                                         old_entry->cred = NULL;
3071                                 }
3072                         }
3073
3074                         /*
3075                          * Add the reference before calling vm_object_shadow
3076                          * to insure that a shadow object is created.
3077                          */
3078                         vm_object_reference(object);
3079                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3080                                 vm_object_shadow(&old_entry->object.vm_object,
3081                                     &old_entry->offset,
3082                                     old_entry->end - old_entry->start);
3083                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3084                                 /* Transfer the second reference too. */
3085                                 vm_object_reference(
3086                                     old_entry->object.vm_object);
3087
3088                                 /*
3089                                  * As in vm_map_simplify_entry(), the
3090                                  * vnode lock will not be acquired in
3091                                  * this call to vm_object_deallocate().
3092                                  */
3093                                 vm_object_deallocate(object);
3094                                 object = old_entry->object.vm_object;
3095                         }
3096                         VM_OBJECT_LOCK(object);
3097                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
3098                         if (old_entry->cred != NULL) {
3099                                 KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3100                                 object->cred = old_entry->cred;
3101                                 object->charge = old_entry->end - old_entry->start;
3102                                 old_entry->cred = NULL;
3103                         }
3104                         VM_OBJECT_UNLOCK(object);
3105
3106                         /*
3107                          * Clone the entry, referencing the shared object.
3108                          */
3109                         new_entry = vm_map_entry_create(new_map);
3110                         *new_entry = *old_entry;
3111                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3112                             MAP_ENTRY_IN_TRANSITION);
3113                         new_entry->wired_count = 0;
3114
3115                         /*
3116                          * Insert the entry into the new map -- we know we're
3117                          * inserting at the end of the new map.
3118                          */
3119                         vm_map_entry_link(new_map, new_map->header.prev,
3120                             new_entry);
3121                         vmspace_map_entry_forked(vm1, vm2, new_entry);
3122
3123                         /*
3124                          * Update the physical map
3125                          */
3126                         pmap_copy(new_map->pmap, old_map->pmap,
3127                             new_entry->start,
3128                             (old_entry->end - old_entry->start),
3129                             old_entry->start);
3130                         break;
3131
3132                 case VM_INHERIT_COPY:
3133                         /*
3134                          * Clone the entry and link into the map.
3135                          */
3136                         new_entry = vm_map_entry_create(new_map);
3137                         *new_entry = *old_entry;
3138                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3139                             MAP_ENTRY_IN_TRANSITION);
3140                         new_entry->wired_count = 0;
3141                         new_entry->object.vm_object = NULL;
3142                         new_entry->cred = NULL;
3143                         vm_map_entry_link(new_map, new_map->header.prev,
3144                             new_entry);
3145                         vmspace_map_entry_forked(vm1, vm2, new_entry);
3146                         vm_map_copy_entry(old_map, new_map, old_entry,
3147                             new_entry, fork_charge);
3148                         break;
3149                 }
3150                 old_entry = old_entry->next;
3151         }
3152 unlock_and_return:
3153         vm_map_unlock(old_map);
3154         if (vm2 != NULL)
3155                 vm_map_unlock(new_map);
3156
3157         return (vm2);
3158 }
3159
3160 int
3161 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3162     vm_prot_t prot, vm_prot_t max, int cow)
3163 {
3164         vm_map_entry_t new_entry, prev_entry;
3165         vm_offset_t bot, top;
3166         vm_size_t init_ssize;
3167         int orient, rv;
3168         rlim_t vmemlim;
3169
3170         /*
3171          * The stack orientation is piggybacked with the cow argument.
3172          * Extract it into orient and mask the cow argument so that we
3173          * don't pass it around further.
3174          * NOTE: We explicitly allow bi-directional stacks.
3175          */
3176         orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3177         cow &= ~orient;
3178         KASSERT(orient != 0, ("No stack grow direction"));
3179
3180         if (addrbos < vm_map_min(map) ||
3181             addrbos > vm_map_max(map) ||
3182             addrbos + max_ssize < addrbos)
3183                 return (KERN_NO_SPACE);
3184
3185         init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz;
3186
3187         PROC_LOCK(curthread->td_proc);
3188         vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM);
3189         PROC_UNLOCK(curthread->td_proc);
3190
3191         vm_map_lock(map);
3192
3193         /* If addr is already mapped, no go */
3194         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3195                 vm_map_unlock(map);
3196                 return (KERN_NO_SPACE);
3197         }
3198
3199         /* If we would blow our VMEM resource limit, no go */
3200         if (map->size + init_ssize > vmemlim) {
3201                 vm_map_unlock(map);
3202                 return (KERN_NO_SPACE);
3203         }
3204
3205         /*
3206          * If we can't accomodate max_ssize in the current mapping, no go.
3207          * However, we need to be aware that subsequent user mappings might
3208          * map into the space we have reserved for stack, and currently this
3209          * space is not protected.
3210          *
3211          * Hopefully we will at least detect this condition when we try to
3212          * grow the stack.
3213          */
3214         if ((prev_entry->next != &map->header) &&
3215             (prev_entry->next->start < addrbos + max_ssize)) {
3216                 vm_map_unlock(map);
3217                 return (KERN_NO_SPACE);
3218         }
3219
3220         /*
3221          * We initially map a stack of only init_ssize.  We will grow as
3222          * needed later.  Depending on the orientation of the stack (i.e.
3223          * the grow direction) we either map at the top of the range, the
3224          * bottom of the range or in the middle.
3225          *
3226          * Note: we would normally expect prot and max to be VM_PROT_ALL,
3227          * and cow to be 0.  Possibly we should eliminate these as input
3228          * parameters, and just pass these values here in the insert call.
3229          */
3230         if (orient == MAP_STACK_GROWS_DOWN)
3231                 bot = addrbos + max_ssize - init_ssize;
3232         else if (orient == MAP_STACK_GROWS_UP)
3233                 bot = addrbos;
3234         else
3235                 bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3236         top = bot + init_ssize;
3237         rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3238
3239         /* Now set the avail_ssize amount. */
3240         if (rv == KERN_SUCCESS) {
3241                 if (prev_entry != &map->header)
3242                         vm_map_clip_end(map, prev_entry, bot);
3243                 new_entry = prev_entry->next;
3244                 if (new_entry->end != top || new_entry->start != bot)
3245                         panic("Bad entry start/end for new stack entry");
3246
3247                 new_entry->avail_ssize = max_ssize - init_ssize;
3248                 if (orient & MAP_STACK_GROWS_DOWN)
3249                         new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3250                 if (orient & MAP_STACK_GROWS_UP)
3251                         new_entry->eflags |= MAP_ENTRY_GROWS_UP;
3252         }
3253
3254         vm_map_unlock(map);
3255         return (rv);
3256 }
3257
3258 static int stack_guard_page = 0;
3259 TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page);
3260 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW,
3261     &stack_guard_page, 0,
3262     "Insert stack guard page ahead of the growable segments.");
3263
3264 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3265  * desired address is already mapped, or if we successfully grow
3266  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3267  * stack range (this is strange, but preserves compatibility with
3268  * the grow function in vm_machdep.c).
3269  */
3270 int
3271 vm_map_growstack(struct proc *p, vm_offset_t addr)
3272 {
3273         vm_map_entry_t next_entry, prev_entry;
3274         vm_map_entry_t new_entry, stack_entry;
3275         struct vmspace *vm = p->p_vmspace;
3276         vm_map_t map = &vm->vm_map;
3277         vm_offset_t end;
3278         size_t grow_amount, max_grow;
3279         rlim_t stacklim, vmemlim;
3280         int is_procstack, rv;
3281         struct ucred *cred;
3282
3283 Retry:
3284         PROC_LOCK(p);
3285         stacklim = lim_cur(p, RLIMIT_STACK);
3286         vmemlim = lim_cur(p, RLIMIT_VMEM);
3287         PROC_UNLOCK(p);
3288
3289         vm_map_lock_read(map);
3290
3291         /* If addr is already in the entry range, no need to grow.*/
3292         if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3293                 vm_map_unlock_read(map);
3294                 return (KERN_SUCCESS);
3295         }
3296
3297         next_entry = prev_entry->next;
3298         if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3299                 /*
3300                  * This entry does not grow upwards. Since the address lies
3301                  * beyond this entry, the next entry (if one exists) has to
3302                  * be a downward growable entry. The entry list header is
3303                  * never a growable entry, so it suffices to check the flags.
3304                  */
3305                 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3306                         vm_map_unlock_read(map);
3307                         return (KERN_SUCCESS);
3308                 }
3309                 stack_entry = next_entry;
3310         } else {
3311                 /*
3312                  * This entry grows upward. If the next entry does not at
3313                  * least grow downwards, this is the entry we need to grow.
3314                  * otherwise we have two possible choices and we have to
3315                  * select one.
3316                  */
3317                 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3318                         /*
3319                          * We have two choices; grow the entry closest to
3320                          * the address to minimize the amount of growth.
3321                          */
3322                         if (addr - prev_entry->end <= next_entry->start - addr)
3323                                 stack_entry = prev_entry;
3324                         else
3325                                 stack_entry = next_entry;
3326                 } else
3327                         stack_entry = prev_entry;
3328         }
3329
3330         if (stack_entry == next_entry) {
3331                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3332                 KASSERT(addr < stack_entry->start, ("foo"));
3333                 end = (prev_entry != &map->header) ? prev_entry->end :
3334                     stack_entry->start - stack_entry->avail_ssize;
3335                 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3336                 max_grow = stack_entry->start - end;
3337         } else {
3338                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3339                 KASSERT(addr >= stack_entry->end, ("foo"));
3340                 end = (next_entry != &map->header) ? next_entry->start :
3341                     stack_entry->end + stack_entry->avail_ssize;
3342                 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3343                 max_grow = end - stack_entry->end;
3344         }
3345
3346         if (grow_amount > stack_entry->avail_ssize) {
3347                 vm_map_unlock_read(map);
3348                 return (KERN_NO_SPACE);
3349         }
3350
3351         /*
3352          * If there is no longer enough space between the entries nogo, and
3353          * adjust the available space.  Note: this  should only happen if the
3354          * user has mapped into the stack area after the stack was created,
3355          * and is probably an error.
3356          *
3357          * This also effectively destroys any guard page the user might have
3358          * intended by limiting the stack size.
3359          */
3360         if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) {
3361                 if (vm_map_lock_upgrade(map))
3362                         goto Retry;
3363
3364                 stack_entry->avail_ssize = max_grow;
3365
3366                 vm_map_unlock(map);
3367                 return (KERN_NO_SPACE);
3368         }
3369
3370         is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
3371
3372         /*
3373          * If this is the main process stack, see if we're over the stack
3374          * limit.
3375          */
3376         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3377                 vm_map_unlock_read(map);
3378                 return (KERN_NO_SPACE);
3379         }
3380
3381         /* Round up the grow amount modulo SGROWSIZ */
3382         grow_amount = roundup (grow_amount, sgrowsiz);
3383         if (grow_amount > stack_entry->avail_ssize)
3384                 grow_amount = stack_entry->avail_ssize;
3385         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3386                 grow_amount = trunc_page((vm_size_t)stacklim) -
3387                     ctob(vm->vm_ssize);
3388         }
3389
3390         /* If we would blow our VMEM resource limit, no go */
3391         if (map->size + grow_amount > vmemlim) {
3392                 vm_map_unlock_read(map);
3393                 return (KERN_NO_SPACE);
3394         }
3395
3396         if (vm_map_lock_upgrade(map))
3397                 goto Retry;
3398
3399         if (stack_entry == next_entry) {
3400                 /*
3401                  * Growing downward.
3402                  */
3403                 /* Get the preliminary new entry start value */
3404                 addr = stack_entry->start - grow_amount;
3405
3406                 /*
3407                  * If this puts us into the previous entry, cut back our
3408                  * growth to the available space. Also, see the note above.
3409                  */
3410                 if (addr < end) {
3411                         stack_entry->avail_ssize = max_grow;
3412                         addr = end;
3413                         if (stack_guard_page)
3414                                 addr += PAGE_SIZE;
3415                 }
3416
3417                 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3418                     p->p_sysent->sv_stackprot, VM_PROT_ALL, 0);
3419
3420                 /* Adjust the available stack space by the amount we grew. */
3421                 if (rv == KERN_SUCCESS) {
3422                         if (prev_entry != &map->header)
3423                                 vm_map_clip_end(map, prev_entry, addr);
3424                         new_entry = prev_entry->next;
3425                         KASSERT(new_entry == stack_entry->prev, ("foo"));
3426                         KASSERT(new_entry->end == stack_entry->start, ("foo"));
3427                         KASSERT(new_entry->start == addr, ("foo"));
3428                         grow_amount = new_entry->end - new_entry->start;
3429                         new_entry->avail_ssize = stack_entry->avail_ssize -
3430                             grow_amount;
3431                         stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3432                         new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3433                 }
3434         } else {
3435                 /*
3436                  * Growing upward.
3437                  */
3438                 addr = stack_entry->end + grow_amount;
3439
3440                 /*
3441                  * If this puts us into the next entry, cut back our growth
3442                  * to the available space. Also, see the note above.
3443                  */
3444                 if (addr > end) {
3445                         stack_entry->avail_ssize = end - stack_entry->end;
3446                         addr = end;
3447                         if (stack_guard_page)
3448                                 addr -= PAGE_SIZE;
3449                 }
3450
3451                 grow_amount = addr - stack_entry->end;
3452                 cred = stack_entry->cred;
3453                 if (cred == NULL && stack_entry->object.vm_object != NULL)
3454                         cred = stack_entry->object.vm_object->cred;
3455                 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3456                         rv = KERN_NO_SPACE;
3457                 /* Grow the underlying object if applicable. */
3458                 else if (stack_entry->object.vm_object == NULL ||
3459                          vm_object_coalesce(stack_entry->object.vm_object,
3460                          stack_entry->offset,
3461                          (vm_size_t)(stack_entry->end - stack_entry->start),
3462                          (vm_size_t)grow_amount, cred != NULL)) {
3463                         map->size += (addr - stack_entry->end);
3464                         /* Update the current entry. */
3465                         stack_entry->end = addr;
3466                         stack_entry->avail_ssize -= grow_amount;
3467                         vm_map_entry_resize_free(map, stack_entry);
3468                         rv = KERN_SUCCESS;
3469
3470                         if (next_entry != &map->header)
3471                                 vm_map_clip_start(map, next_entry, addr);
3472                 } else
3473                         rv = KERN_FAILURE;
3474         }
3475
3476         if (rv == KERN_SUCCESS && is_procstack)
3477                 vm->vm_ssize += btoc(grow_amount);
3478
3479         vm_map_unlock(map);
3480
3481         /*
3482          * Heed the MAP_WIREFUTURE flag if it was set for this process.
3483          */
3484         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3485                 vm_map_wire(map,
3486                     (stack_entry == next_entry) ? addr : addr - grow_amount,
3487                     (stack_entry == next_entry) ? stack_entry->start : addr,
3488                     (p->p_flag & P_SYSTEM)
3489                     ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3490                     : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3491         }
3492
3493         return (rv);
3494 }
3495
3496 /*
3497  * Unshare the specified VM space for exec.  If other processes are
3498  * mapped to it, then create a new one.  The new vmspace is null.
3499  */
3500 int
3501 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3502 {
3503         struct vmspace *oldvmspace = p->p_vmspace;
3504         struct vmspace *newvmspace;
3505
3506         newvmspace = vmspace_alloc(minuser, maxuser);
3507         if (newvmspace == NULL)
3508                 return (ENOMEM);
3509         newvmspace->vm_swrss = oldvmspace->vm_swrss;
3510         /*
3511          * This code is written like this for prototype purposes.  The
3512          * goal is to avoid running down the vmspace here, but let the
3513          * other process's that are still using the vmspace to finally
3514          * run it down.  Even though there is little or no chance of blocking
3515          * here, it is a good idea to keep this form for future mods.
3516          */
3517         PROC_VMSPACE_LOCK(p);
3518         p->p_vmspace = newvmspace;
3519         PROC_VMSPACE_UNLOCK(p);
3520         if (p == curthread->td_proc)
3521                 pmap_activate(curthread);
3522         vmspace_free(oldvmspace);
3523         return (0);
3524 }
3525
3526 /*
3527  * Unshare the specified VM space for forcing COW.  This
3528  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3529  */
3530 int
3531 vmspace_unshare(struct proc *p)
3532 {
3533         struct vmspace *oldvmspace = p->p_vmspace;
3534         struct vmspace *newvmspace;
3535         vm_ooffset_t fork_charge;
3536
3537         if (oldvmspace->vm_refcnt == 1)
3538                 return (0);
3539         fork_charge = 0;
3540         newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3541         if (newvmspace == NULL)
3542                 return (ENOMEM);
3543         if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
3544                 vmspace_free(newvmspace);
3545                 return (ENOMEM);
3546         }
3547         PROC_VMSPACE_LOCK(p);
3548         p->p_vmspace = newvmspace;
3549         PROC_VMSPACE_UNLOCK(p);
3550         if (p == curthread->td_proc)
3551                 pmap_activate(curthread);
3552         vmspace_free(oldvmspace);
3553         return (0);
3554 }
3555
3556 /*
3557  *      vm_map_lookup:
3558  *
3559  *      Finds the VM object, offset, and
3560  *      protection for a given virtual address in the
3561  *      specified map, assuming a page fault of the
3562  *      type specified.
3563  *
3564  *      Leaves the map in question locked for read; return
3565  *      values are guaranteed until a vm_map_lookup_done
3566  *      call is performed.  Note that the map argument
3567  *      is in/out; the returned map must be used in
3568  *      the call to vm_map_lookup_done.
3569  *
3570  *      A handle (out_entry) is returned for use in
3571  *      vm_map_lookup_done, to make that fast.
3572  *
3573  *      If a lookup is requested with "write protection"
3574  *      specified, the map may be changed to perform virtual
3575  *      copying operations, although the data referenced will
3576  *      remain the same.
3577  */
3578 int
3579 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
3580               vm_offset_t vaddr,
3581               vm_prot_t fault_typea,
3582               vm_map_entry_t *out_entry,        /* OUT */
3583               vm_object_t *object,              /* OUT */
3584               vm_pindex_t *pindex,              /* OUT */
3585               vm_prot_t *out_prot,              /* OUT */
3586               boolean_t *wired)                 /* OUT */
3587 {
3588         vm_map_entry_t entry;
3589         vm_map_t map = *var_map;
3590         vm_prot_t prot;
3591         vm_prot_t fault_type = fault_typea;
3592         vm_object_t eobject;
3593         vm_size_t size;
3594         struct ucred *cred;
3595
3596 RetryLookup:;
3597
3598         vm_map_lock_read(map);
3599
3600         /*
3601          * Lookup the faulting address.
3602          */
3603         if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3604                 vm_map_unlock_read(map);
3605                 return (KERN_INVALID_ADDRESS);
3606         }
3607
3608         entry = *out_entry;
3609
3610         /*
3611          * Handle submaps.
3612          */
3613         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3614                 vm_map_t old_map = map;
3615
3616                 *var_map = map = entry->object.sub_map;
3617                 vm_map_unlock_read(old_map);
3618                 goto RetryLookup;
3619         }
3620
3621         /*
3622          * Check whether this task is allowed to have this page.
3623          */
3624         prot = entry->protection;
3625         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3626         if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
3627                 vm_map_unlock_read(map);
3628                 return (KERN_PROTECTION_FAILURE);
3629         }
3630         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3631             (entry->eflags & MAP_ENTRY_COW) &&
3632             (fault_type & VM_PROT_WRITE)) {
3633                 vm_map_unlock_read(map);
3634                 return (KERN_PROTECTION_FAILURE);
3635         }
3636
3637         /*
3638          * If this page is not pageable, we have to get it for all possible
3639          * accesses.
3640          */
3641         *wired = (entry->wired_count != 0);
3642         if (*wired)
3643                 fault_type = entry->protection;
3644         size = entry->end - entry->start;
3645         /*
3646          * If the entry was copy-on-write, we either ...
3647          */
3648         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3649                 /*
3650                  * If we want to write the page, we may as well handle that
3651                  * now since we've got the map locked.
3652                  *
3653                  * If we don't need to write the page, we just demote the
3654                  * permissions allowed.
3655                  */
3656                 if ((fault_type & VM_PROT_WRITE) != 0 ||
3657                     (fault_typea & VM_PROT_COPY) != 0) {
3658                         /*
3659                          * Make a new object, and place it in the object
3660                          * chain.  Note that no new references have appeared
3661                          * -- one just moved from the map to the new
3662                          * object.
3663                          */
3664                         if (vm_map_lock_upgrade(map))
3665                                 goto RetryLookup;
3666
3667                         if (entry->cred == NULL) {
3668                                 /*
3669                                  * The debugger owner is charged for
3670                                  * the memory.
3671                                  */
3672                                 cred = curthread->td_ucred;
3673                                 crhold(cred);
3674                                 if (!swap_reserve_by_cred(size, cred)) {
3675                                         crfree(cred);
3676                                         vm_map_unlock(map);
3677                                         return (KERN_RESOURCE_SHORTAGE);
3678                                 }
3679                                 entry->cred = cred;
3680                         }
3681                         vm_object_shadow(&entry->object.vm_object,
3682                             &entry->offset, size);
3683                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3684                         eobject = entry->object.vm_object;
3685                         if (eobject->cred != NULL) {
3686                                 /*
3687                                  * The object was not shadowed.
3688                                  */
3689                                 swap_release_by_cred(size, entry->cred);
3690                                 crfree(entry->cred);
3691                                 entry->cred = NULL;
3692                         } else if (entry->cred != NULL) {
3693                                 VM_OBJECT_LOCK(eobject);
3694                                 eobject->cred = entry->cred;
3695                                 eobject->charge = size;
3696                                 VM_OBJECT_UNLOCK(eobject);
3697                                 entry->cred = NULL;
3698                         }
3699
3700                         vm_map_lock_downgrade(map);
3701                 } else {
3702                         /*
3703                          * We're attempting to read a copy-on-write page --
3704                          * don't allow writes.
3705                          */
3706                         prot &= ~VM_PROT_WRITE;
3707                 }
3708         }
3709
3710         /*
3711          * Create an object if necessary.
3712          */
3713         if (entry->object.vm_object == NULL &&
3714             !map->system_map) {
3715                 if (vm_map_lock_upgrade(map))
3716                         goto RetryLookup;
3717                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3718                     atop(size));
3719                 entry->offset = 0;
3720                 if (entry->cred != NULL) {
3721                         VM_OBJECT_LOCK(entry->object.vm_object);
3722                         entry->object.vm_object->cred = entry->cred;
3723                         entry->object.vm_object->charge = size;
3724                         VM_OBJECT_UNLOCK(entry->object.vm_object);
3725                         entry->cred = NULL;
3726                 }
3727                 vm_map_lock_downgrade(map);
3728         }
3729
3730         /*
3731          * Return the object/offset from this entry.  If the entry was
3732          * copy-on-write or empty, it has been fixed up.
3733          */
3734         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3735         *object = entry->object.vm_object;
3736
3737         *out_prot = prot;
3738         return (KERN_SUCCESS);
3739 }
3740
3741 /*
3742  *      vm_map_lookup_locked:
3743  *
3744  *      Lookup the faulting address.  A version of vm_map_lookup that returns 
3745  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
3746  */
3747 int
3748 vm_map_lookup_locked(vm_map_t *var_map,         /* IN/OUT */
3749                      vm_offset_t vaddr,
3750                      vm_prot_t fault_typea,
3751                      vm_map_entry_t *out_entry, /* OUT */
3752                      vm_object_t *object,       /* OUT */
3753                      vm_pindex_t *pindex,       /* OUT */
3754                      vm_prot_t *out_prot,       /* OUT */
3755                      boolean_t *wired)          /* OUT */
3756 {
3757         vm_map_entry_t entry;
3758         vm_map_t map = *var_map;
3759         vm_prot_t prot;
3760         vm_prot_t fault_type = fault_typea;
3761
3762         /*
3763          * Lookup the faulting address.
3764          */
3765         if (!vm_map_lookup_entry(map, vaddr, out_entry))
3766                 return (KERN_INVALID_ADDRESS);
3767
3768         entry = *out_entry;
3769
3770         /*
3771          * Fail if the entry refers to a submap.
3772          */
3773         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3774                 return (KERN_FAILURE);
3775
3776         /*
3777          * Check whether this task is allowed to have this page.
3778          */
3779         prot = entry->protection;
3780         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
3781         if ((fault_type & prot) != fault_type)
3782                 return (KERN_PROTECTION_FAILURE);
3783         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3784             (entry->eflags & MAP_ENTRY_COW) &&
3785             (fault_type & VM_PROT_WRITE))
3786                 return (KERN_PROTECTION_FAILURE);
3787
3788         /*
3789          * If this page is not pageable, we have to get it for all possible
3790          * accesses.
3791          */
3792         *wired = (entry->wired_count != 0);
3793         if (*wired)
3794                 fault_type = entry->protection;
3795
3796         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3797                 /*
3798                  * Fail if the entry was copy-on-write for a write fault.
3799                  */
3800                 if (fault_type & VM_PROT_WRITE)
3801                         return (KERN_FAILURE);
3802                 /*
3803                  * We're attempting to read a copy-on-write page --
3804                  * don't allow writes.
3805                  */
3806                 prot &= ~VM_PROT_WRITE;
3807         }
3808
3809         /*
3810          * Fail if an object should be created.
3811          */
3812         if (entry->object.vm_object == NULL && !map->system_map)
3813                 return (KERN_FAILURE);
3814
3815         /*
3816          * Return the object/offset from this entry.  If the entry was
3817          * copy-on-write or empty, it has been fixed up.
3818          */
3819         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3820         *object = entry->object.vm_object;
3821
3822         *out_prot = prot;
3823         return (KERN_SUCCESS);
3824 }
3825
3826 /*
3827  *      vm_map_lookup_done:
3828  *
3829  *      Releases locks acquired by a vm_map_lookup
3830  *      (according to the handle returned by that lookup).
3831  */
3832 void
3833 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
3834 {
3835         /*
3836          * Unlock the main-level map
3837          */
3838         vm_map_unlock_read(map);
3839 }
3840
3841 #include "opt_ddb.h"
3842 #ifdef DDB
3843 #include <sys/kernel.h>
3844
3845 #include <ddb/ddb.h>
3846
3847 /*
3848  *      vm_map_print:   [ debug ]
3849  */
3850 DB_SHOW_COMMAND(map, vm_map_print)
3851 {
3852         static int nlines;
3853         /* XXX convert args. */
3854         vm_map_t map = (vm_map_t)addr;
3855         boolean_t full = have_addr;
3856
3857         vm_map_entry_t entry;
3858
3859         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3860             (void *)map,
3861             (void *)map->pmap, map->nentries, map->timestamp);
3862         nlines++;
3863
3864         if (!full && db_indent)
3865                 return;
3866
3867         db_indent += 2;
3868         for (entry = map->header.next; entry != &map->header;
3869             entry = entry->next) {
3870                 db_iprintf("map entry %p: start=%p, end=%p\n",
3871                     (void *)entry, (void *)entry->start, (void *)entry->end);
3872                 nlines++;
3873                 {
3874                         static char *inheritance_name[4] =
3875                         {"share", "copy", "none", "donate_copy"};
3876
3877                         db_iprintf(" prot=%x/%x/%s",
3878                             entry->protection,
3879                             entry->max_protection,
3880                             inheritance_name[(int)(unsigned char)entry->inheritance]);
3881                         if (entry->wired_count != 0)
3882                                 db_printf(", wired");
3883                 }
3884                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3885                         db_printf(", share=%p, offset=0x%jx\n",
3886                             (void *)entry->object.sub_map,
3887                             (uintmax_t)entry->offset);
3888                         nlines++;
3889                         if ((entry->prev == &map->header) ||
3890                             (entry->prev->object.sub_map !=
3891                                 entry->object.sub_map)) {
3892                                 db_indent += 2;
3893                                 vm_map_print((db_expr_t)(intptr_t)
3894                                              entry->object.sub_map,
3895                                              full, 0, (char *)0);
3896                                 db_indent -= 2;
3897                         }
3898                 } else {
3899                         if (entry->cred != NULL)
3900                                 db_printf(", ruid %d", entry->cred->cr_ruid);
3901                         db_printf(", object=%p, offset=0x%jx",
3902                             (void *)entry->object.vm_object,
3903                             (uintmax_t)entry->offset);
3904                         if (entry->object.vm_object && entry->object.vm_object->cred)
3905                                 db_printf(", obj ruid %d charge %jx",
3906                                     entry->object.vm_object->cred->cr_ruid,
3907                                     (uintmax_t)entry->object.vm_object->charge);
3908                         if (entry->eflags & MAP_ENTRY_COW)
3909                                 db_printf(", copy (%s)",
3910                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3911                         db_printf("\n");
3912                         nlines++;
3913
3914                         if ((entry->prev == &map->header) ||
3915                             (entry->prev->object.vm_object !=
3916                                 entry->object.vm_object)) {
3917                                 db_indent += 2;
3918                                 vm_object_print((db_expr_t)(intptr_t)
3919                                                 entry->object.vm_object,
3920                                                 full, 0, (char *)0);
3921                                 nlines += 4;
3922                                 db_indent -= 2;
3923                         }
3924                 }
3925         }
3926         db_indent -= 2;
3927         if (db_indent == 0)
3928                 nlines = 0;
3929 }
3930
3931
3932 DB_SHOW_COMMAND(procvm, procvm)
3933 {
3934         struct proc *p;
3935
3936         if (have_addr) {
3937                 p = (struct proc *) addr;
3938         } else {
3939                 p = curproc;
3940         }
3941
3942         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3943             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3944             (void *)vmspace_pmap(p->p_vmspace));
3945
3946         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3947 }
3948
3949 #endif /* DDB */