]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/vm/vm_map.c
MFC 310028: Use db_lookup_proc() in the DDB 'show procvm' command.
[FreeBSD/stable/10.git] / sys / vm / vm_map.c
1 /*-
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60
61 /*
62  *      Virtual memory mapping module.
63  */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/lock.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/racct.h>
79 #include <sys/resourcevar.h>
80 #include <sys/rwlock.h>
81 #include <sys/file.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysent.h>
84 #include <sys/shm.h>
85
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_pager.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vnode_pager.h>
96 #include <vm/swap_pager.h>
97 #include <vm/uma.h>
98
99 /*
100  *      Virtual memory maps provide for the mapping, protection,
101  *      and sharing of virtual memory objects.  In addition,
102  *      this module provides for an efficient virtual copy of
103  *      memory from one map to another.
104  *
105  *      Synchronization is required prior to most operations.
106  *
107  *      Maps consist of an ordered doubly-linked list of simple
108  *      entries; a self-adjusting binary search tree of these
109  *      entries is used to speed up lookups.
110  *
111  *      Since portions of maps are specified by start/end addresses,
112  *      which may not align with existing map entries, all
113  *      routines merely "clip" entries to these start/end values.
114  *      [That is, an entry is split into two, bordering at a
115  *      start or end value.]  Note that these clippings may not
116  *      always be necessary (as the two resulting entries are then
117  *      not changed); however, the clipping is done for convenience.
118  *
119  *      As mentioned above, virtual copy operations are performed
120  *      by copying VM object references from one map to
121  *      another, and then marking both regions as copy-on-write.
122  */
123
124 static struct mtx map_sleep_mtx;
125 static uma_zone_t mapentzone;
126 static uma_zone_t kmapentzone;
127 static uma_zone_t mapzone;
128 static uma_zone_t vmspace_zone;
129 static int vmspace_zinit(void *mem, int size, int flags);
130 static int vm_map_zinit(void *mem, int ize, int flags);
131 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
132     vm_offset_t max);
133 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
134 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
135 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
136 #ifdef INVARIANTS
137 static void vm_map_zdtor(void *mem, int size, void *arg);
138 static void vmspace_zdtor(void *mem, int size, void *arg);
139 #endif
140 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
141     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
142     int cow);
143 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
144     vm_offset_t failed_addr);
145
146 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
147     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
148      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
149
150 /* 
151  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
152  * stable.
153  */
154 #define PROC_VMSPACE_LOCK(p) do { } while (0)
155 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
156
157 /*
158  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
159  *
160  *      Asserts that the starting and ending region
161  *      addresses fall within the valid range of the map.
162  */
163 #define VM_MAP_RANGE_CHECK(map, start, end)             \
164                 {                                       \
165                 if (start < vm_map_min(map))            \
166                         start = vm_map_min(map);        \
167                 if (end > vm_map_max(map))              \
168                         end = vm_map_max(map);          \
169                 if (start > end)                        \
170                         start = end;                    \
171                 }
172
173 /*
174  *      vm_map_startup:
175  *
176  *      Initialize the vm_map module.  Must be called before
177  *      any other vm_map routines.
178  *
179  *      Map and entry structures are allocated from the general
180  *      purpose memory pool with some exceptions:
181  *
182  *      - The kernel map and kmem submap are allocated statically.
183  *      - Kernel map entries are allocated out of a static pool.
184  *
185  *      These restrictions are necessary since malloc() uses the
186  *      maps and requires map entries.
187  */
188
189 void
190 vm_map_startup(void)
191 {
192         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
193         mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
194 #ifdef INVARIANTS
195             vm_map_zdtor,
196 #else
197             NULL,
198 #endif
199             vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
200         uma_prealloc(mapzone, MAX_KMAP);
201         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
202             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
203             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
204         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
205             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
206         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
207 #ifdef INVARIANTS
208             vmspace_zdtor,
209 #else
210             NULL,
211 #endif
212             vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
213 }
214
215 static int
216 vmspace_zinit(void *mem, int size, int flags)
217 {
218         struct vmspace *vm;
219
220         vm = (struct vmspace *)mem;
221
222         vm->vm_map.pmap = NULL;
223         (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
224         PMAP_LOCK_INIT(vmspace_pmap(vm));
225         return (0);
226 }
227
228 static int
229 vm_map_zinit(void *mem, int size, int flags)
230 {
231         vm_map_t map;
232
233         map = (vm_map_t)mem;
234         memset(map, 0, sizeof(*map));
235         mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
236         sx_init(&map->lock, "vm map (user)");
237         return (0);
238 }
239
240 #ifdef INVARIANTS
241 static void
242 vmspace_zdtor(void *mem, int size, void *arg)
243 {
244         struct vmspace *vm;
245
246         vm = (struct vmspace *)mem;
247
248         vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
249 }
250 static void
251 vm_map_zdtor(void *mem, int size, void *arg)
252 {
253         vm_map_t map;
254
255         map = (vm_map_t)mem;
256         KASSERT(map->nentries == 0,
257             ("map %p nentries == %d on free.",
258             map, map->nentries));
259         KASSERT(map->size == 0,
260             ("map %p size == %lu on free.",
261             map, (unsigned long)map->size));
262 }
263 #endif  /* INVARIANTS */
264
265 /*
266  * Allocate a vmspace structure, including a vm_map and pmap,
267  * and initialize those structures.  The refcnt is set to 1.
268  *
269  * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
270  */
271 struct vmspace *
272 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
273 {
274         struct vmspace *vm;
275
276         vm = uma_zalloc(vmspace_zone, M_WAITOK);
277
278         KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
279
280         if (pinit == NULL)
281                 pinit = &pmap_pinit;
282
283         if (!pinit(vmspace_pmap(vm))) {
284                 uma_zfree(vmspace_zone, vm);
285                 return (NULL);
286         }
287         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
288         _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
289         vm->vm_refcnt = 1;
290         vm->vm_shm = NULL;
291         vm->vm_swrss = 0;
292         vm->vm_tsize = 0;
293         vm->vm_dsize = 0;
294         vm->vm_ssize = 0;
295         vm->vm_taddr = 0;
296         vm->vm_daddr = 0;
297         vm->vm_maxsaddr = 0;
298         return (vm);
299 }
300
301 #ifdef RACCT
302 static void
303 vmspace_container_reset(struct proc *p)
304 {
305
306         PROC_LOCK(p);
307         racct_set(p, RACCT_DATA, 0);
308         racct_set(p, RACCT_STACK, 0);
309         racct_set(p, RACCT_RSS, 0);
310         racct_set(p, RACCT_MEMLOCK, 0);
311         racct_set(p, RACCT_VMEM, 0);
312         PROC_UNLOCK(p);
313 }
314 #endif
315
316 static inline void
317 vmspace_dofree(struct vmspace *vm)
318 {
319
320         CTR1(KTR_VM, "vmspace_free: %p", vm);
321
322         /*
323          * Make sure any SysV shm is freed, it might not have been in
324          * exit1().
325          */
326         shmexit(vm);
327
328         /*
329          * Lock the map, to wait out all other references to it.
330          * Delete all of the mappings and pages they hold, then call
331          * the pmap module to reclaim anything left.
332          */
333         (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
334             vm->vm_map.max_offset);
335
336         pmap_release(vmspace_pmap(vm));
337         vm->vm_map.pmap = NULL;
338         uma_zfree(vmspace_zone, vm);
339 }
340
341 void
342 vmspace_free(struct vmspace *vm)
343 {
344
345         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
346             "vmspace_free() called with non-sleepable lock held");
347
348         if (vm->vm_refcnt == 0)
349                 panic("vmspace_free: attempt to free already freed vmspace");
350
351         if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
352                 vmspace_dofree(vm);
353 }
354
355 void
356 vmspace_exitfree(struct proc *p)
357 {
358         struct vmspace *vm;
359
360         PROC_VMSPACE_LOCK(p);
361         vm = p->p_vmspace;
362         p->p_vmspace = NULL;
363         PROC_VMSPACE_UNLOCK(p);
364         KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
365         vmspace_free(vm);
366 }
367
368 void
369 vmspace_exit(struct thread *td)
370 {
371         int refcnt;
372         struct vmspace *vm;
373         struct proc *p;
374
375         /*
376          * Release user portion of address space.
377          * This releases references to vnodes,
378          * which could cause I/O if the file has been unlinked.
379          * Need to do this early enough that we can still sleep.
380          *
381          * The last exiting process to reach this point releases as
382          * much of the environment as it can. vmspace_dofree() is the
383          * slower fallback in case another process had a temporary
384          * reference to the vmspace.
385          */
386
387         p = td->td_proc;
388         vm = p->p_vmspace;
389         atomic_add_int(&vmspace0.vm_refcnt, 1);
390         do {
391                 refcnt = vm->vm_refcnt;
392                 if (refcnt > 1 && p->p_vmspace != &vmspace0) {
393                         /* Switch now since other proc might free vmspace */
394                         PROC_VMSPACE_LOCK(p);
395                         p->p_vmspace = &vmspace0;
396                         PROC_VMSPACE_UNLOCK(p);
397                         pmap_activate(td);
398                 }
399         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
400         if (refcnt == 1) {
401                 if (p->p_vmspace != vm) {
402                         /* vmspace not yet freed, switch back */
403                         PROC_VMSPACE_LOCK(p);
404                         p->p_vmspace = vm;
405                         PROC_VMSPACE_UNLOCK(p);
406                         pmap_activate(td);
407                 }
408                 pmap_remove_pages(vmspace_pmap(vm));
409                 /* Switch now since this proc will free vmspace */
410                 PROC_VMSPACE_LOCK(p);
411                 p->p_vmspace = &vmspace0;
412                 PROC_VMSPACE_UNLOCK(p);
413                 pmap_activate(td);
414                 vmspace_dofree(vm);
415         }
416 #ifdef RACCT
417         if (racct_enable)
418                 vmspace_container_reset(p);
419 #endif
420 }
421
422 /* Acquire reference to vmspace owned by another process. */
423
424 struct vmspace *
425 vmspace_acquire_ref(struct proc *p)
426 {
427         struct vmspace *vm;
428         int refcnt;
429
430         PROC_VMSPACE_LOCK(p);
431         vm = p->p_vmspace;
432         if (vm == NULL) {
433                 PROC_VMSPACE_UNLOCK(p);
434                 return (NULL);
435         }
436         do {
437                 refcnt = vm->vm_refcnt;
438                 if (refcnt <= 0) {      /* Avoid 0->1 transition */
439                         PROC_VMSPACE_UNLOCK(p);
440                         return (NULL);
441                 }
442         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
443         if (vm != p->p_vmspace) {
444                 PROC_VMSPACE_UNLOCK(p);
445                 vmspace_free(vm);
446                 return (NULL);
447         }
448         PROC_VMSPACE_UNLOCK(p);
449         return (vm);
450 }
451
452 void
453 _vm_map_lock(vm_map_t map, const char *file, int line)
454 {
455
456         if (map->system_map)
457                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
458         else
459                 sx_xlock_(&map->lock, file, line);
460         map->timestamp++;
461 }
462
463 static void
464 vm_map_process_deferred(void)
465 {
466         struct thread *td;
467         vm_map_entry_t entry, next;
468         vm_object_t object;
469
470         td = curthread;
471         entry = td->td_map_def_user;
472         td->td_map_def_user = NULL;
473         while (entry != NULL) {
474                 next = entry->next;
475                 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
476                         /*
477                          * Decrement the object's writemappings and
478                          * possibly the vnode's v_writecount.
479                          */
480                         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
481                             ("Submap with writecount"));
482                         object = entry->object.vm_object;
483                         KASSERT(object != NULL, ("No object for writecount"));
484                         vnode_pager_release_writecount(object, entry->start,
485                             entry->end);
486                 }
487                 vm_map_entry_deallocate(entry, FALSE);
488                 entry = next;
489         }
490 }
491
492 void
493 _vm_map_unlock(vm_map_t map, const char *file, int line)
494 {
495
496         if (map->system_map)
497                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
498         else {
499                 sx_xunlock_(&map->lock, file, line);
500                 vm_map_process_deferred();
501         }
502 }
503
504 void
505 _vm_map_lock_read(vm_map_t map, const char *file, int line)
506 {
507
508         if (map->system_map)
509                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
510         else
511                 sx_slock_(&map->lock, file, line);
512 }
513
514 void
515 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
516 {
517
518         if (map->system_map)
519                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
520         else {
521                 sx_sunlock_(&map->lock, file, line);
522                 vm_map_process_deferred();
523         }
524 }
525
526 int
527 _vm_map_trylock(vm_map_t map, const char *file, int line)
528 {
529         int error;
530
531         error = map->system_map ?
532             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
533             !sx_try_xlock_(&map->lock, file, line);
534         if (error == 0)
535                 map->timestamp++;
536         return (error == 0);
537 }
538
539 int
540 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
541 {
542         int error;
543
544         error = map->system_map ?
545             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
546             !sx_try_slock_(&map->lock, file, line);
547         return (error == 0);
548 }
549
550 /*
551  *      _vm_map_lock_upgrade:   [ internal use only ]
552  *
553  *      Tries to upgrade a read (shared) lock on the specified map to a write
554  *      (exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
555  *      non-zero value if the upgrade fails.  If the upgrade fails, the map is
556  *      returned without a read or write lock held.
557  *
558  *      Requires that the map be read locked.
559  */
560 int
561 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
562 {
563         unsigned int last_timestamp;
564
565         if (map->system_map) {
566                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
567         } else {
568                 if (!sx_try_upgrade_(&map->lock, file, line)) {
569                         last_timestamp = map->timestamp;
570                         sx_sunlock_(&map->lock, file, line);
571                         vm_map_process_deferred();
572                         /*
573                          * If the map's timestamp does not change while the
574                          * map is unlocked, then the upgrade succeeds.
575                          */
576                         sx_xlock_(&map->lock, file, line);
577                         if (last_timestamp != map->timestamp) {
578                                 sx_xunlock_(&map->lock, file, line);
579                                 return (1);
580                         }
581                 }
582         }
583         map->timestamp++;
584         return (0);
585 }
586
587 void
588 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
589 {
590
591         if (map->system_map) {
592                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
593         } else
594                 sx_downgrade_(&map->lock, file, line);
595 }
596
597 /*
598  *      vm_map_locked:
599  *
600  *      Returns a non-zero value if the caller holds a write (exclusive) lock
601  *      on the specified map and the value "0" otherwise.
602  */
603 int
604 vm_map_locked(vm_map_t map)
605 {
606
607         if (map->system_map)
608                 return (mtx_owned(&map->system_mtx));
609         else
610                 return (sx_xlocked(&map->lock));
611 }
612
613 #ifdef INVARIANTS
614 static void
615 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
616 {
617
618         if (map->system_map)
619                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
620         else
621                 sx_assert_(&map->lock, SA_XLOCKED, file, line);
622 }
623
624 #define VM_MAP_ASSERT_LOCKED(map) \
625     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
626 #else
627 #define VM_MAP_ASSERT_LOCKED(map)
628 #endif
629
630 /*
631  *      _vm_map_unlock_and_wait:
632  *
633  *      Atomically releases the lock on the specified map and puts the calling
634  *      thread to sleep.  The calling thread will remain asleep until either
635  *      vm_map_wakeup() is performed on the map or the specified timeout is
636  *      exceeded.
637  *
638  *      WARNING!  This function does not perform deferred deallocations of
639  *      objects and map entries.  Therefore, the calling thread is expected to
640  *      reacquire the map lock after reawakening and later perform an ordinary
641  *      unlock operation, such as vm_map_unlock(), before completing its
642  *      operation on the map.
643  */
644 int
645 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
646 {
647
648         mtx_lock(&map_sleep_mtx);
649         if (map->system_map)
650                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
651         else
652                 sx_xunlock_(&map->lock, file, line);
653         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
654             timo));
655 }
656
657 /*
658  *      vm_map_wakeup:
659  *
660  *      Awaken any threads that have slept on the map using
661  *      vm_map_unlock_and_wait().
662  */
663 void
664 vm_map_wakeup(vm_map_t map)
665 {
666
667         /*
668          * Acquire and release map_sleep_mtx to prevent a wakeup()
669          * from being performed (and lost) between the map unlock
670          * and the msleep() in _vm_map_unlock_and_wait().
671          */
672         mtx_lock(&map_sleep_mtx);
673         mtx_unlock(&map_sleep_mtx);
674         wakeup(&map->root);
675 }
676
677 void
678 vm_map_busy(vm_map_t map)
679 {
680
681         VM_MAP_ASSERT_LOCKED(map);
682         map->busy++;
683 }
684
685 void
686 vm_map_unbusy(vm_map_t map)
687 {
688
689         VM_MAP_ASSERT_LOCKED(map);
690         KASSERT(map->busy, ("vm_map_unbusy: not busy"));
691         if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
692                 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
693                 wakeup(&map->busy);
694         }
695 }
696
697 void 
698 vm_map_wait_busy(vm_map_t map)
699 {
700
701         VM_MAP_ASSERT_LOCKED(map);
702         while (map->busy) {
703                 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
704                 if (map->system_map)
705                         msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
706                 else
707                         sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
708         }
709         map->timestamp++;
710 }
711
712 long
713 vmspace_resident_count(struct vmspace *vmspace)
714 {
715         return pmap_resident_count(vmspace_pmap(vmspace));
716 }
717
718 /*
719  *      vm_map_create:
720  *
721  *      Creates and returns a new empty VM map with
722  *      the given physical map structure, and having
723  *      the given lower and upper address bounds.
724  */
725 vm_map_t
726 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
727 {
728         vm_map_t result;
729
730         result = uma_zalloc(mapzone, M_WAITOK);
731         CTR1(KTR_VM, "vm_map_create: %p", result);
732         _vm_map_init(result, pmap, min, max);
733         return (result);
734 }
735
736 /*
737  * Initialize an existing vm_map structure
738  * such as that in the vmspace structure.
739  */
740 static void
741 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
742 {
743
744         map->header.next = map->header.prev = &map->header;
745         map->needs_wakeup = FALSE;
746         map->system_map = 0;
747         map->pmap = pmap;
748         map->min_offset = min;
749         map->max_offset = max;
750         map->flags = 0;
751         map->root = NULL;
752         map->timestamp = 0;
753         map->busy = 0;
754 }
755
756 void
757 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
758 {
759
760         _vm_map_init(map, pmap, min, max);
761         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
762         sx_init(&map->lock, "user map");
763 }
764
765 /*
766  *      vm_map_entry_dispose:   [ internal use only ]
767  *
768  *      Inverse of vm_map_entry_create.
769  */
770 static void
771 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
772 {
773         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
774 }
775
776 /*
777  *      vm_map_entry_create:    [ internal use only ]
778  *
779  *      Allocates a VM map entry for insertion.
780  *      No entry fields are filled in.
781  */
782 static vm_map_entry_t
783 vm_map_entry_create(vm_map_t map)
784 {
785         vm_map_entry_t new_entry;
786
787         if (map->system_map)
788                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
789         else
790                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
791         if (new_entry == NULL)
792                 panic("vm_map_entry_create: kernel resources exhausted");
793         return (new_entry);
794 }
795
796 /*
797  *      vm_map_entry_set_behavior:
798  *
799  *      Set the expected access behavior, either normal, random, or
800  *      sequential.
801  */
802 static inline void
803 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
804 {
805         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
806             (behavior & MAP_ENTRY_BEHAV_MASK);
807 }
808
809 /*
810  *      vm_map_entry_set_max_free:
811  *
812  *      Set the max_free field in a vm_map_entry.
813  */
814 static inline void
815 vm_map_entry_set_max_free(vm_map_entry_t entry)
816 {
817
818         entry->max_free = entry->adj_free;
819         if (entry->left != NULL && entry->left->max_free > entry->max_free)
820                 entry->max_free = entry->left->max_free;
821         if (entry->right != NULL && entry->right->max_free > entry->max_free)
822                 entry->max_free = entry->right->max_free;
823 }
824
825 /*
826  *      vm_map_entry_splay:
827  *
828  *      The Sleator and Tarjan top-down splay algorithm with the
829  *      following variation.  Max_free must be computed bottom-up, so
830  *      on the downward pass, maintain the left and right spines in
831  *      reverse order.  Then, make a second pass up each side to fix
832  *      the pointers and compute max_free.  The time bound is O(log n)
833  *      amortized.
834  *
835  *      The new root is the vm_map_entry containing "addr", or else an
836  *      adjacent entry (lower or higher) if addr is not in the tree.
837  *
838  *      The map must be locked, and leaves it so.
839  *
840  *      Returns: the new root.
841  */
842 static vm_map_entry_t
843 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
844 {
845         vm_map_entry_t llist, rlist;
846         vm_map_entry_t ltree, rtree;
847         vm_map_entry_t y;
848
849         /* Special case of empty tree. */
850         if (root == NULL)
851                 return (root);
852
853         /*
854          * Pass One: Splay down the tree until we find addr or a NULL
855          * pointer where addr would go.  llist and rlist are the two
856          * sides in reverse order (bottom-up), with llist linked by
857          * the right pointer and rlist linked by the left pointer in
858          * the vm_map_entry.  Wait until Pass Two to set max_free on
859          * the two spines.
860          */
861         llist = NULL;
862         rlist = NULL;
863         for (;;) {
864                 /* root is never NULL in here. */
865                 if (addr < root->start) {
866                         y = root->left;
867                         if (y == NULL)
868                                 break;
869                         if (addr < y->start && y->left != NULL) {
870                                 /* Rotate right and put y on rlist. */
871                                 root->left = y->right;
872                                 y->right = root;
873                                 vm_map_entry_set_max_free(root);
874                                 root = y->left;
875                                 y->left = rlist;
876                                 rlist = y;
877                         } else {
878                                 /* Put root on rlist. */
879                                 root->left = rlist;
880                                 rlist = root;
881                                 root = y;
882                         }
883                 } else if (addr >= root->end) {
884                         y = root->right;
885                         if (y == NULL)
886                                 break;
887                         if (addr >= y->end && y->right != NULL) {
888                                 /* Rotate left and put y on llist. */
889                                 root->right = y->left;
890                                 y->left = root;
891                                 vm_map_entry_set_max_free(root);
892                                 root = y->right;
893                                 y->right = llist;
894                                 llist = y;
895                         } else {
896                                 /* Put root on llist. */
897                                 root->right = llist;
898                                 llist = root;
899                                 root = y;
900                         }
901                 } else
902                         break;
903         }
904
905         /*
906          * Pass Two: Walk back up the two spines, flip the pointers
907          * and set max_free.  The subtrees of the root go at the
908          * bottom of llist and rlist.
909          */
910         ltree = root->left;
911         while (llist != NULL) {
912                 y = llist->right;
913                 llist->right = ltree;
914                 vm_map_entry_set_max_free(llist);
915                 ltree = llist;
916                 llist = y;
917         }
918         rtree = root->right;
919         while (rlist != NULL) {
920                 y = rlist->left;
921                 rlist->left = rtree;
922                 vm_map_entry_set_max_free(rlist);
923                 rtree = rlist;
924                 rlist = y;
925         }
926
927         /*
928          * Final assembly: add ltree and rtree as subtrees of root.
929          */
930         root->left = ltree;
931         root->right = rtree;
932         vm_map_entry_set_max_free(root);
933
934         return (root);
935 }
936
937 /*
938  *      vm_map_entry_{un,}link:
939  *
940  *      Insert/remove entries from maps.
941  */
942 static void
943 vm_map_entry_link(vm_map_t map,
944                   vm_map_entry_t after_where,
945                   vm_map_entry_t entry)
946 {
947
948         CTR4(KTR_VM,
949             "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
950             map->nentries, entry, after_where);
951         VM_MAP_ASSERT_LOCKED(map);
952         KASSERT(after_where == &map->header ||
953             after_where->end <= entry->start,
954             ("vm_map_entry_link: prev end %jx new start %jx overlap",
955             (uintmax_t)after_where->end, (uintmax_t)entry->start));
956         KASSERT(after_where->next == &map->header ||
957             entry->end <= after_where->next->start,
958             ("vm_map_entry_link: new end %jx next start %jx overlap",
959             (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
960
961         map->nentries++;
962         entry->prev = after_where;
963         entry->next = after_where->next;
964         entry->next->prev = entry;
965         after_where->next = entry;
966
967         if (after_where != &map->header) {
968                 if (after_where != map->root)
969                         vm_map_entry_splay(after_where->start, map->root);
970                 entry->right = after_where->right;
971                 entry->left = after_where;
972                 after_where->right = NULL;
973                 after_where->adj_free = entry->start - after_where->end;
974                 vm_map_entry_set_max_free(after_where);
975         } else {
976                 entry->right = map->root;
977                 entry->left = NULL;
978         }
979         entry->adj_free = (entry->next == &map->header ? map->max_offset :
980             entry->next->start) - entry->end;
981         vm_map_entry_set_max_free(entry);
982         map->root = entry;
983 }
984
985 static void
986 vm_map_entry_unlink(vm_map_t map,
987                     vm_map_entry_t entry)
988 {
989         vm_map_entry_t next, prev, root;
990
991         VM_MAP_ASSERT_LOCKED(map);
992         if (entry != map->root)
993                 vm_map_entry_splay(entry->start, map->root);
994         if (entry->left == NULL)
995                 root = entry->right;
996         else {
997                 root = vm_map_entry_splay(entry->start, entry->left);
998                 root->right = entry->right;
999                 root->adj_free = (entry->next == &map->header ? map->max_offset :
1000                     entry->next->start) - root->end;
1001                 vm_map_entry_set_max_free(root);
1002         }
1003         map->root = root;
1004
1005         prev = entry->prev;
1006         next = entry->next;
1007         next->prev = prev;
1008         prev->next = next;
1009         map->nentries--;
1010         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1011             map->nentries, entry);
1012 }
1013
1014 /*
1015  *      vm_map_entry_resize_free:
1016  *
1017  *      Recompute the amount of free space following a vm_map_entry
1018  *      and propagate that value up the tree.  Call this function after
1019  *      resizing a map entry in-place, that is, without a call to
1020  *      vm_map_entry_link() or _unlink().
1021  *
1022  *      The map must be locked, and leaves it so.
1023  */
1024 static void
1025 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1026 {
1027
1028         /*
1029          * Using splay trees without parent pointers, propagating
1030          * max_free up the tree is done by moving the entry to the
1031          * root and making the change there.
1032          */
1033         if (entry != map->root)
1034                 map->root = vm_map_entry_splay(entry->start, map->root);
1035
1036         entry->adj_free = (entry->next == &map->header ? map->max_offset :
1037             entry->next->start) - entry->end;
1038         vm_map_entry_set_max_free(entry);
1039 }
1040
1041 /*
1042  *      vm_map_lookup_entry:    [ internal use only ]
1043  *
1044  *      Finds the map entry containing (or
1045  *      immediately preceding) the specified address
1046  *      in the given map; the entry is returned
1047  *      in the "entry" parameter.  The boolean
1048  *      result indicates whether the address is
1049  *      actually contained in the map.
1050  */
1051 boolean_t
1052 vm_map_lookup_entry(
1053         vm_map_t map,
1054         vm_offset_t address,
1055         vm_map_entry_t *entry)  /* OUT */
1056 {
1057         vm_map_entry_t cur;
1058         boolean_t locked;
1059
1060         /*
1061          * If the map is empty, then the map entry immediately preceding
1062          * "address" is the map's header.
1063          */
1064         cur = map->root;
1065         if (cur == NULL)
1066                 *entry = &map->header;
1067         else if (address >= cur->start && cur->end > address) {
1068                 *entry = cur;
1069                 return (TRUE);
1070         } else if ((locked = vm_map_locked(map)) ||
1071             sx_try_upgrade(&map->lock)) {
1072                 /*
1073                  * Splay requires a write lock on the map.  However, it only
1074                  * restructures the binary search tree; it does not otherwise
1075                  * change the map.  Thus, the map's timestamp need not change
1076                  * on a temporary upgrade.
1077                  */
1078                 map->root = cur = vm_map_entry_splay(address, cur);
1079                 if (!locked)
1080                         sx_downgrade(&map->lock);
1081
1082                 /*
1083                  * If "address" is contained within a map entry, the new root
1084                  * is that map entry.  Otherwise, the new root is a map entry
1085                  * immediately before or after "address".
1086                  */
1087                 if (address >= cur->start) {
1088                         *entry = cur;
1089                         if (cur->end > address)
1090                                 return (TRUE);
1091                 } else
1092                         *entry = cur->prev;
1093         } else
1094                 /*
1095                  * Since the map is only locked for read access, perform a
1096                  * standard binary search tree lookup for "address".
1097                  */
1098                 for (;;) {
1099                         if (address < cur->start) {
1100                                 if (cur->left == NULL) {
1101                                         *entry = cur->prev;
1102                                         break;
1103                                 }
1104                                 cur = cur->left;
1105                         } else if (cur->end > address) {
1106                                 *entry = cur;
1107                                 return (TRUE);
1108                         } else {
1109                                 if (cur->right == NULL) {
1110                                         *entry = cur;
1111                                         break;
1112                                 }
1113                                 cur = cur->right;
1114                         }
1115                 }
1116         return (FALSE);
1117 }
1118
1119 /*
1120  *      vm_map_insert:
1121  *
1122  *      Inserts the given whole VM object into the target
1123  *      map at the specified address range.  The object's
1124  *      size should match that of the address range.
1125  *
1126  *      Requires that the map be locked, and leaves it so.
1127  *
1128  *      If object is non-NULL, ref count must be bumped by caller
1129  *      prior to making call to account for the new entry.
1130  */
1131 int
1132 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1133     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
1134 {
1135         vm_map_entry_t new_entry, prev_entry, temp_entry;
1136         struct ucred *cred;
1137         vm_eflags_t protoeflags;
1138         vm_inherit_t inheritance;
1139
1140         VM_MAP_ASSERT_LOCKED(map);
1141         KASSERT((object != kmem_object && object != kernel_object) ||
1142             (cow & MAP_COPY_ON_WRITE) == 0,
1143             ("vm_map_insert: kmem or kernel object and COW"));
1144         KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0,
1145             ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1146
1147         /*
1148          * Check that the start and end points are not bogus.
1149          */
1150         if (start < map->min_offset || end > map->max_offset || start >= end)
1151                 return (KERN_INVALID_ADDRESS);
1152
1153         /*
1154          * Find the entry prior to the proposed starting address; if it's part
1155          * of an existing entry, this range is bogus.
1156          */
1157         if (vm_map_lookup_entry(map, start, &temp_entry))
1158                 return (KERN_NO_SPACE);
1159
1160         prev_entry = temp_entry;
1161
1162         /*
1163          * Assert that the next entry doesn't overlap the end point.
1164          */
1165         if (prev_entry->next != &map->header && prev_entry->next->start < end)
1166                 return (KERN_NO_SPACE);
1167
1168         protoeflags = 0;
1169         if (cow & MAP_COPY_ON_WRITE)
1170                 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
1171         if (cow & MAP_NOFAULT)
1172                 protoeflags |= MAP_ENTRY_NOFAULT;
1173         if (cow & MAP_DISABLE_SYNCER)
1174                 protoeflags |= MAP_ENTRY_NOSYNC;
1175         if (cow & MAP_DISABLE_COREDUMP)
1176                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1177         if (cow & MAP_STACK_GROWS_DOWN)
1178                 protoeflags |= MAP_ENTRY_GROWS_DOWN;
1179         if (cow & MAP_STACK_GROWS_UP)
1180                 protoeflags |= MAP_ENTRY_GROWS_UP;
1181         if (cow & MAP_VN_WRITECOUNT)
1182                 protoeflags |= MAP_ENTRY_VN_WRITECNT;
1183         if (cow & MAP_INHERIT_SHARE)
1184                 inheritance = VM_INHERIT_SHARE;
1185         else
1186                 inheritance = VM_INHERIT_DEFAULT;
1187
1188         cred = NULL;
1189         if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1190                 goto charged;
1191         if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1192             ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1193                 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1194                         return (KERN_RESOURCE_SHORTAGE);
1195                 KASSERT(object == NULL ||
1196                     (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
1197                     object->cred == NULL,
1198                     ("overcommit: vm_map_insert o %p", object));
1199                 cred = curthread->td_ucred;
1200         }
1201
1202 charged:
1203         /* Expand the kernel pmap, if necessary. */
1204         if (map == kernel_map && end > kernel_vm_end)
1205                 pmap_growkernel(end);
1206         if (object != NULL) {
1207                 /*
1208                  * OBJ_ONEMAPPING must be cleared unless this mapping
1209                  * is trivially proven to be the only mapping for any
1210                  * of the object's pages.  (Object granularity
1211                  * reference counting is insufficient to recognize
1212                  * aliases with precision.)
1213                  */
1214                 VM_OBJECT_WLOCK(object);
1215                 if (object->ref_count > 1 || object->shadow_count != 0)
1216                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
1217                 VM_OBJECT_WUNLOCK(object);
1218         } else if (prev_entry != &map->header &&
1219             prev_entry->eflags == protoeflags &&
1220             (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 &&
1221             prev_entry->end == start && prev_entry->wired_count == 0 &&
1222             (prev_entry->cred == cred ||
1223             (prev_entry->object.vm_object != NULL &&
1224             prev_entry->object.vm_object->cred == cred)) &&
1225             vm_object_coalesce(prev_entry->object.vm_object,
1226             prev_entry->offset,
1227             (vm_size_t)(prev_entry->end - prev_entry->start),
1228             (vm_size_t)(end - prev_entry->end), cred != NULL &&
1229             (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
1230                 /*
1231                  * We were able to extend the object.  Determine if we
1232                  * can extend the previous map entry to include the
1233                  * new range as well.
1234                  */
1235                 if (prev_entry->inheritance == inheritance &&
1236                     prev_entry->protection == prot &&
1237                     prev_entry->max_protection == max) {
1238                         map->size += end - prev_entry->end;
1239                         prev_entry->end = end;
1240                         vm_map_entry_resize_free(map, prev_entry);
1241                         vm_map_simplify_entry(map, prev_entry);
1242                         return (KERN_SUCCESS);
1243                 }
1244
1245                 /*
1246                  * If we can extend the object but cannot extend the
1247                  * map entry, we have to create a new map entry.  We
1248                  * must bump the ref count on the extended object to
1249                  * account for it.  object may be NULL.
1250                  */
1251                 object = prev_entry->object.vm_object;
1252                 offset = prev_entry->offset +
1253                     (prev_entry->end - prev_entry->start);
1254                 vm_object_reference(object);
1255                 if (cred != NULL && object != NULL && object->cred != NULL &&
1256                     !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1257                         /* Object already accounts for this uid. */
1258                         cred = NULL;
1259                 }
1260         }
1261         if (cred != NULL)
1262                 crhold(cred);
1263
1264         /*
1265          * Create a new entry
1266          */
1267         new_entry = vm_map_entry_create(map);
1268         new_entry->start = start;
1269         new_entry->end = end;
1270         new_entry->cred = NULL;
1271
1272         new_entry->eflags = protoeflags;
1273         new_entry->object.vm_object = object;
1274         new_entry->offset = offset;
1275         new_entry->avail_ssize = 0;
1276
1277         new_entry->inheritance = inheritance;
1278         new_entry->protection = prot;
1279         new_entry->max_protection = max;
1280         new_entry->wired_count = 0;
1281         new_entry->wiring_thread = NULL;
1282         new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1283         new_entry->next_read = OFF_TO_IDX(offset);
1284
1285         KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1286             ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
1287         new_entry->cred = cred;
1288
1289         /*
1290          * Insert the new entry into the list
1291          */
1292         vm_map_entry_link(map, prev_entry, new_entry);
1293         map->size += new_entry->end - new_entry->start;
1294
1295         /*
1296          * Try to coalesce the new entry with both the previous and next
1297          * entries in the list.  Previously, we only attempted to coalesce
1298          * with the previous entry when object is NULL.  Here, we handle the
1299          * other cases, which are less common.
1300          */
1301         vm_map_simplify_entry(map, new_entry);
1302
1303         if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
1304                 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
1305                     end - start, cow & MAP_PREFAULT_PARTIAL);
1306         }
1307
1308         return (KERN_SUCCESS);
1309 }
1310
1311 /*
1312  *      vm_map_findspace:
1313  *
1314  *      Find the first fit (lowest VM address) for "length" free bytes
1315  *      beginning at address >= start in the given map.
1316  *
1317  *      In a vm_map_entry, "adj_free" is the amount of free space
1318  *      adjacent (higher address) to this entry, and "max_free" is the
1319  *      maximum amount of contiguous free space in its subtree.  This
1320  *      allows finding a free region in one path down the tree, so
1321  *      O(log n) amortized with splay trees.
1322  *
1323  *      The map must be locked, and leaves it so.
1324  *
1325  *      Returns: 0 on success, and starting address in *addr,
1326  *               1 if insufficient space.
1327  */
1328 int
1329 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1330     vm_offset_t *addr)  /* OUT */
1331 {
1332         vm_map_entry_t entry;
1333         vm_offset_t st;
1334
1335         /*
1336          * Request must fit within min/max VM address and must avoid
1337          * address wrap.
1338          */
1339         if (start < map->min_offset)
1340                 start = map->min_offset;
1341         if (start + length > map->max_offset || start + length < start)
1342                 return (1);
1343
1344         /* Empty tree means wide open address space. */
1345         if (map->root == NULL) {
1346                 *addr = start;
1347                 return (0);
1348         }
1349
1350         /*
1351          * After splay, if start comes before root node, then there
1352          * must be a gap from start to the root.
1353          */
1354         map->root = vm_map_entry_splay(start, map->root);
1355         if (start + length <= map->root->start) {
1356                 *addr = start;
1357                 return (0);
1358         }
1359
1360         /*
1361          * Root is the last node that might begin its gap before
1362          * start, and this is the last comparison where address
1363          * wrap might be a problem.
1364          */
1365         st = (start > map->root->end) ? start : map->root->end;
1366         if (length <= map->root->end + map->root->adj_free - st) {
1367                 *addr = st;
1368                 return (0);
1369         }
1370
1371         /* With max_free, can immediately tell if no solution. */
1372         entry = map->root->right;
1373         if (entry == NULL || length > entry->max_free)
1374                 return (1);
1375
1376         /*
1377          * Search the right subtree in the order: left subtree, root,
1378          * right subtree (first fit).  The previous splay implies that
1379          * all regions in the right subtree have addresses > start.
1380          */
1381         while (entry != NULL) {
1382                 if (entry->left != NULL && entry->left->max_free >= length)
1383                         entry = entry->left;
1384                 else if (entry->adj_free >= length) {
1385                         *addr = entry->end;
1386                         return (0);
1387                 } else
1388                         entry = entry->right;
1389         }
1390
1391         /* Can't get here, so panic if we do. */
1392         panic("vm_map_findspace: max_free corrupt");
1393 }
1394
1395 int
1396 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1397     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1398     vm_prot_t max, int cow)
1399 {
1400         vm_offset_t end;
1401         int result;
1402
1403         end = start + length;
1404         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1405             object == NULL,
1406             ("vm_map_fixed: non-NULL backing object for stack"));
1407         vm_map_lock(map);
1408         VM_MAP_RANGE_CHECK(map, start, end);
1409         if ((cow & MAP_CHECK_EXCL) == 0)
1410                 vm_map_delete(map, start, end);
1411         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1412                 result = vm_map_stack_locked(map, start, length, sgrowsiz,
1413                     prot, max, cow);
1414         } else {
1415                 result = vm_map_insert(map, object, offset, start, end,
1416                     prot, max, cow);
1417         }
1418         vm_map_unlock(map);
1419         return (result);
1420 }
1421
1422 /*
1423  *      vm_map_find finds an unallocated region in the target address
1424  *      map with the given length.  The search is defined to be
1425  *      first-fit from the specified address; the region found is
1426  *      returned in the same parameter.
1427  *
1428  *      If object is non-NULL, ref count must be bumped by caller
1429  *      prior to making call to account for the new entry.
1430  */
1431 int
1432 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1433             vm_offset_t *addr,  /* IN/OUT */
1434             vm_size_t length, vm_offset_t max_addr, int find_space,
1435             vm_prot_t prot, vm_prot_t max, int cow)
1436 {
1437         vm_offset_t alignment, initial_addr, start;
1438         int result;
1439
1440         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1441             object == NULL,
1442             ("vm_map_find: non-NULL backing object for stack"));
1443         if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1444             (object->flags & OBJ_COLORED) == 0))
1445                 find_space = VMFS_ANY_SPACE;
1446         if (find_space >> 8 != 0) {
1447                 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1448                 alignment = (vm_offset_t)1 << (find_space >> 8);
1449         } else
1450                 alignment = 0;
1451         initial_addr = *addr;
1452 again:
1453         start = initial_addr;
1454         vm_map_lock(map);
1455         do {
1456                 if (find_space != VMFS_NO_SPACE) {
1457                         if (vm_map_findspace(map, start, length, addr) ||
1458                             (max_addr != 0 && *addr + length > max_addr)) {
1459                                 vm_map_unlock(map);
1460                                 if (find_space == VMFS_OPTIMAL_SPACE) {
1461                                         find_space = VMFS_ANY_SPACE;
1462                                         goto again;
1463                                 }
1464                                 return (KERN_NO_SPACE);
1465                         }
1466                         switch (find_space) {
1467                         case VMFS_SUPER_SPACE:
1468                         case VMFS_OPTIMAL_SPACE:
1469                                 pmap_align_superpage(object, offset, addr,
1470                                     length);
1471                                 break;
1472                         case VMFS_ANY_SPACE:
1473                                 break;
1474                         default:
1475                                 if ((*addr & (alignment - 1)) != 0) {
1476                                         *addr &= ~(alignment - 1);
1477                                         *addr += alignment;
1478                                 }
1479                                 break;
1480                         }
1481
1482                         start = *addr;
1483                 }
1484                 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1485                         result = vm_map_stack_locked(map, start, length,
1486                             sgrowsiz, prot, max, cow);
1487                 } else {
1488                         result = vm_map_insert(map, object, offset, start,
1489                             start + length, prot, max, cow);
1490                 }
1491         } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE &&
1492             find_space != VMFS_ANY_SPACE);
1493         vm_map_unlock(map);
1494         return (result);
1495 }
1496
1497 /*
1498  *      vm_map_simplify_entry:
1499  *
1500  *      Simplify the given map entry by merging with either neighbor.  This
1501  *      routine also has the ability to merge with both neighbors.
1502  *
1503  *      The map must be locked.
1504  *
1505  *      This routine guarentees that the passed entry remains valid (though
1506  *      possibly extended).  When merging, this routine may delete one or
1507  *      both neighbors.
1508  */
1509 void
1510 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1511 {
1512         vm_map_entry_t next, prev;
1513         vm_size_t prevsize, esize;
1514
1515         if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP |
1516             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0)
1517                 return;
1518
1519         prev = entry->prev;
1520         if (prev != &map->header) {
1521                 prevsize = prev->end - prev->start;
1522                 if ( (prev->end == entry->start) &&
1523                      (prev->object.vm_object == entry->object.vm_object) &&
1524                      (!prev->object.vm_object ||
1525                         (prev->offset + prevsize == entry->offset)) &&
1526                      (prev->eflags == entry->eflags) &&
1527                      (prev->protection == entry->protection) &&
1528                      (prev->max_protection == entry->max_protection) &&
1529                      (prev->inheritance == entry->inheritance) &&
1530                      (prev->wired_count == entry->wired_count) &&
1531                      (prev->cred == entry->cred)) {
1532                         vm_map_entry_unlink(map, prev);
1533                         entry->start = prev->start;
1534                         entry->offset = prev->offset;
1535                         if (entry->prev != &map->header)
1536                                 vm_map_entry_resize_free(map, entry->prev);
1537
1538                         /*
1539                          * If the backing object is a vnode object,
1540                          * vm_object_deallocate() calls vrele().
1541                          * However, vrele() does not lock the vnode
1542                          * because the vnode has additional
1543                          * references.  Thus, the map lock can be kept
1544                          * without causing a lock-order reversal with
1545                          * the vnode lock.
1546                          *
1547                          * Since we count the number of virtual page
1548                          * mappings in object->un_pager.vnp.writemappings,
1549                          * the writemappings value should not be adjusted
1550                          * when the entry is disposed of.
1551                          */
1552                         if (prev->object.vm_object)
1553                                 vm_object_deallocate(prev->object.vm_object);
1554                         if (prev->cred != NULL)
1555                                 crfree(prev->cred);
1556                         vm_map_entry_dispose(map, prev);
1557                 }
1558         }
1559
1560         next = entry->next;
1561         if (next != &map->header) {
1562                 esize = entry->end - entry->start;
1563                 if ((entry->end == next->start) &&
1564                     (next->object.vm_object == entry->object.vm_object) &&
1565                      (!entry->object.vm_object ||
1566                         (entry->offset + esize == next->offset)) &&
1567                     (next->eflags == entry->eflags) &&
1568                     (next->protection == entry->protection) &&
1569                     (next->max_protection == entry->max_protection) &&
1570                     (next->inheritance == entry->inheritance) &&
1571                     (next->wired_count == entry->wired_count) &&
1572                     (next->cred == entry->cred)) {
1573                         vm_map_entry_unlink(map, next);
1574                         entry->end = next->end;
1575                         vm_map_entry_resize_free(map, entry);
1576
1577                         /*
1578                          * See comment above.
1579                          */
1580                         if (next->object.vm_object)
1581                                 vm_object_deallocate(next->object.vm_object);
1582                         if (next->cred != NULL)
1583                                 crfree(next->cred);
1584                         vm_map_entry_dispose(map, next);
1585                 }
1586         }
1587 }
1588 /*
1589  *      vm_map_clip_start:      [ internal use only ]
1590  *
1591  *      Asserts that the given entry begins at or after
1592  *      the specified address; if necessary,
1593  *      it splits the entry into two.
1594  */
1595 #define vm_map_clip_start(map, entry, startaddr) \
1596 { \
1597         if (startaddr > entry->start) \
1598                 _vm_map_clip_start(map, entry, startaddr); \
1599 }
1600
1601 /*
1602  *      This routine is called only when it is known that
1603  *      the entry must be split.
1604  */
1605 static void
1606 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1607 {
1608         vm_map_entry_t new_entry;
1609
1610         VM_MAP_ASSERT_LOCKED(map);
1611
1612         /*
1613          * Split off the front portion -- note that we must insert the new
1614          * entry BEFORE this one, so that this entry has the specified
1615          * starting address.
1616          */
1617         vm_map_simplify_entry(map, entry);
1618
1619         /*
1620          * If there is no object backing this entry, we might as well create
1621          * one now.  If we defer it, an object can get created after the map
1622          * is clipped, and individual objects will be created for the split-up
1623          * map.  This is a bit of a hack, but is also about the best place to
1624          * put this improvement.
1625          */
1626         if (entry->object.vm_object == NULL && !map->system_map) {
1627                 vm_object_t object;
1628                 object = vm_object_allocate(OBJT_DEFAULT,
1629                                 atop(entry->end - entry->start));
1630                 entry->object.vm_object = object;
1631                 entry->offset = 0;
1632                 if (entry->cred != NULL) {
1633                         object->cred = entry->cred;
1634                         object->charge = entry->end - entry->start;
1635                         entry->cred = NULL;
1636                 }
1637         } else if (entry->object.vm_object != NULL &&
1638                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1639                    entry->cred != NULL) {
1640                 VM_OBJECT_WLOCK(entry->object.vm_object);
1641                 KASSERT(entry->object.vm_object->cred == NULL,
1642                     ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1643                 entry->object.vm_object->cred = entry->cred;
1644                 entry->object.vm_object->charge = entry->end - entry->start;
1645                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
1646                 entry->cred = NULL;
1647         }
1648
1649         new_entry = vm_map_entry_create(map);
1650         *new_entry = *entry;
1651
1652         new_entry->end = start;
1653         entry->offset += (start - entry->start);
1654         entry->start = start;
1655         if (new_entry->cred != NULL)
1656                 crhold(entry->cred);
1657
1658         vm_map_entry_link(map, entry->prev, new_entry);
1659
1660         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1661                 vm_object_reference(new_entry->object.vm_object);
1662                 /*
1663                  * The object->un_pager.vnp.writemappings for the
1664                  * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1665                  * kept as is here.  The virtual pages are
1666                  * re-distributed among the clipped entries, so the sum is
1667                  * left the same.
1668                  */
1669         }
1670 }
1671
1672 /*
1673  *      vm_map_clip_end:        [ internal use only ]
1674  *
1675  *      Asserts that the given entry ends at or before
1676  *      the specified address; if necessary,
1677  *      it splits the entry into two.
1678  */
1679 #define vm_map_clip_end(map, entry, endaddr) \
1680 { \
1681         if ((endaddr) < (entry->end)) \
1682                 _vm_map_clip_end((map), (entry), (endaddr)); \
1683 }
1684
1685 /*
1686  *      This routine is called only when it is known that
1687  *      the entry must be split.
1688  */
1689 static void
1690 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1691 {
1692         vm_map_entry_t new_entry;
1693
1694         VM_MAP_ASSERT_LOCKED(map);
1695
1696         /*
1697          * If there is no object backing this entry, we might as well create
1698          * one now.  If we defer it, an object can get created after the map
1699          * is clipped, and individual objects will be created for the split-up
1700          * map.  This is a bit of a hack, but is also about the best place to
1701          * put this improvement.
1702          */
1703         if (entry->object.vm_object == NULL && !map->system_map) {
1704                 vm_object_t object;
1705                 object = vm_object_allocate(OBJT_DEFAULT,
1706                                 atop(entry->end - entry->start));
1707                 entry->object.vm_object = object;
1708                 entry->offset = 0;
1709                 if (entry->cred != NULL) {
1710                         object->cred = entry->cred;
1711                         object->charge = entry->end - entry->start;
1712                         entry->cred = NULL;
1713                 }
1714         } else if (entry->object.vm_object != NULL &&
1715                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1716                    entry->cred != NULL) {
1717                 VM_OBJECT_WLOCK(entry->object.vm_object);
1718                 KASSERT(entry->object.vm_object->cred == NULL,
1719                     ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1720                 entry->object.vm_object->cred = entry->cred;
1721                 entry->object.vm_object->charge = entry->end - entry->start;
1722                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
1723                 entry->cred = NULL;
1724         }
1725
1726         /*
1727          * Create a new entry and insert it AFTER the specified entry
1728          */
1729         new_entry = vm_map_entry_create(map);
1730         *new_entry = *entry;
1731
1732         new_entry->start = entry->end = end;
1733         new_entry->offset += (end - entry->start);
1734         if (new_entry->cred != NULL)
1735                 crhold(entry->cred);
1736
1737         vm_map_entry_link(map, entry, new_entry);
1738
1739         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1740                 vm_object_reference(new_entry->object.vm_object);
1741         }
1742 }
1743
1744 /*
1745  *      vm_map_submap:          [ kernel use only ]
1746  *
1747  *      Mark the given range as handled by a subordinate map.
1748  *
1749  *      This range must have been created with vm_map_find,
1750  *      and no other operations may have been performed on this
1751  *      range prior to calling vm_map_submap.
1752  *
1753  *      Only a limited number of operations can be performed
1754  *      within this rage after calling vm_map_submap:
1755  *              vm_fault
1756  *      [Don't try vm_map_copy!]
1757  *
1758  *      To remove a submapping, one must first remove the
1759  *      range from the superior map, and then destroy the
1760  *      submap (if desired).  [Better yet, don't try it.]
1761  */
1762 int
1763 vm_map_submap(
1764         vm_map_t map,
1765         vm_offset_t start,
1766         vm_offset_t end,
1767         vm_map_t submap)
1768 {
1769         vm_map_entry_t entry;
1770         int result = KERN_INVALID_ARGUMENT;
1771
1772         vm_map_lock(map);
1773
1774         VM_MAP_RANGE_CHECK(map, start, end);
1775
1776         if (vm_map_lookup_entry(map, start, &entry)) {
1777                 vm_map_clip_start(map, entry, start);
1778         } else
1779                 entry = entry->next;
1780
1781         vm_map_clip_end(map, entry, end);
1782
1783         if ((entry->start == start) && (entry->end == end) &&
1784             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1785             (entry->object.vm_object == NULL)) {
1786                 entry->object.sub_map = submap;
1787                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1788                 result = KERN_SUCCESS;
1789         }
1790         vm_map_unlock(map);
1791
1792         return (result);
1793 }
1794
1795 /*
1796  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
1797  */
1798 #define MAX_INIT_PT     96
1799
1800 /*
1801  *      vm_map_pmap_enter:
1802  *
1803  *      Preload the specified map's pmap with mappings to the specified
1804  *      object's memory-resident pages.  No further physical pages are
1805  *      allocated, and no further virtual pages are retrieved from secondary
1806  *      storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
1807  *      limited number of page mappings are created at the low-end of the
1808  *      specified address range.  (For this purpose, a superpage mapping
1809  *      counts as one page mapping.)  Otherwise, all resident pages within
1810  *      the specified address range are mapped.  Because these mappings are
1811  *      being created speculatively, cached pages are not reactivated and
1812  *      mapped.
1813  */
1814 void
1815 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1816     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1817 {
1818         vm_offset_t start;
1819         vm_page_t p, p_start;
1820         vm_pindex_t mask, psize, threshold, tmpidx;
1821
1822         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1823                 return;
1824         VM_OBJECT_RLOCK(object);
1825         if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1826                 VM_OBJECT_RUNLOCK(object);
1827                 VM_OBJECT_WLOCK(object);
1828                 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1829                         pmap_object_init_pt(map->pmap, addr, object, pindex,
1830                             size);
1831                         VM_OBJECT_WUNLOCK(object);
1832                         return;
1833                 }
1834                 VM_OBJECT_LOCK_DOWNGRADE(object);
1835         }
1836
1837         psize = atop(size);
1838         if (psize + pindex > object->size) {
1839                 if (object->size < pindex) {
1840                         VM_OBJECT_RUNLOCK(object);
1841                         return;
1842                 }
1843                 psize = object->size - pindex;
1844         }
1845
1846         start = 0;
1847         p_start = NULL;
1848         threshold = MAX_INIT_PT;
1849
1850         p = vm_page_find_least(object, pindex);
1851         /*
1852          * Assert: the variable p is either (1) the page with the
1853          * least pindex greater than or equal to the parameter pindex
1854          * or (2) NULL.
1855          */
1856         for (;
1857              p != NULL && (tmpidx = p->pindex - pindex) < psize;
1858              p = TAILQ_NEXT(p, listq)) {
1859                 /*
1860                  * don't allow an madvise to blow away our really
1861                  * free pages allocating pv entries.
1862                  */
1863                 if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
1864                     cnt.v_free_count < cnt.v_free_reserved) ||
1865                     ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
1866                     tmpidx >= threshold)) {
1867                         psize = tmpidx;
1868                         break;
1869                 }
1870                 if (p->valid == VM_PAGE_BITS_ALL) {
1871                         if (p_start == NULL) {
1872                                 start = addr + ptoa(tmpidx);
1873                                 p_start = p;
1874                         }
1875                         /* Jump ahead if a superpage mapping is possible. */
1876                         if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
1877                             (pagesizes[p->psind] - 1)) == 0) {
1878                                 mask = atop(pagesizes[p->psind]) - 1;
1879                                 if (tmpidx + mask < psize &&
1880                                     vm_page_ps_is_valid(p)) {
1881                                         p += mask;
1882                                         threshold += mask;
1883                                 }
1884                         }
1885                 } else if (p_start != NULL) {
1886                         pmap_enter_object(map->pmap, start, addr +
1887                             ptoa(tmpidx), p_start, prot);
1888                         p_start = NULL;
1889                 }
1890         }
1891         if (p_start != NULL)
1892                 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1893                     p_start, prot);
1894         VM_OBJECT_RUNLOCK(object);
1895 }
1896
1897 /*
1898  *      vm_map_protect:
1899  *
1900  *      Sets the protection of the specified address
1901  *      region in the target map.  If "set_max" is
1902  *      specified, the maximum protection is to be set;
1903  *      otherwise, only the current protection is affected.
1904  */
1905 int
1906 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1907                vm_prot_t new_prot, boolean_t set_max)
1908 {
1909         vm_map_entry_t current, entry;
1910         vm_object_t obj;
1911         struct ucred *cred;
1912         vm_prot_t old_prot;
1913
1914         if (start == end)
1915                 return (KERN_SUCCESS);
1916
1917         vm_map_lock(map);
1918
1919         VM_MAP_RANGE_CHECK(map, start, end);
1920
1921         if (vm_map_lookup_entry(map, start, &entry)) {
1922                 vm_map_clip_start(map, entry, start);
1923         } else {
1924                 entry = entry->next;
1925         }
1926
1927         /*
1928          * Make a first pass to check for protection violations.
1929          */
1930         current = entry;
1931         while ((current != &map->header) && (current->start < end)) {
1932                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1933                         vm_map_unlock(map);
1934                         return (KERN_INVALID_ARGUMENT);
1935                 }
1936                 if ((new_prot & current->max_protection) != new_prot) {
1937                         vm_map_unlock(map);
1938                         return (KERN_PROTECTION_FAILURE);
1939                 }
1940                 current = current->next;
1941         }
1942
1943
1944         /*
1945          * Do an accounting pass for private read-only mappings that
1946          * now will do cow due to allowed write (e.g. debugger sets
1947          * breakpoint on text segment)
1948          */
1949         for (current = entry; (current != &map->header) &&
1950              (current->start < end); current = current->next) {
1951
1952                 vm_map_clip_end(map, current, end);
1953
1954                 if (set_max ||
1955                     ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
1956                     ENTRY_CHARGED(current)) {
1957                         continue;
1958                 }
1959
1960                 cred = curthread->td_ucred;
1961                 obj = current->object.vm_object;
1962
1963                 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
1964                         if (!swap_reserve(current->end - current->start)) {
1965                                 vm_map_unlock(map);
1966                                 return (KERN_RESOURCE_SHORTAGE);
1967                         }
1968                         crhold(cred);
1969                         current->cred = cred;
1970                         continue;
1971                 }
1972
1973                 VM_OBJECT_WLOCK(obj);
1974                 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
1975                         VM_OBJECT_WUNLOCK(obj);
1976                         continue;
1977                 }
1978
1979                 /*
1980                  * Charge for the whole object allocation now, since
1981                  * we cannot distinguish between non-charged and
1982                  * charged clipped mapping of the same object later.
1983                  */
1984                 KASSERT(obj->charge == 0,
1985                     ("vm_map_protect: object %p overcharged (entry %p)",
1986                     obj, current));
1987                 if (!swap_reserve(ptoa(obj->size))) {
1988                         VM_OBJECT_WUNLOCK(obj);
1989                         vm_map_unlock(map);
1990                         return (KERN_RESOURCE_SHORTAGE);
1991                 }
1992
1993                 crhold(cred);
1994                 obj->cred = cred;
1995                 obj->charge = ptoa(obj->size);
1996                 VM_OBJECT_WUNLOCK(obj);
1997         }
1998
1999         /*
2000          * Go back and fix up protections. [Note that clipping is not
2001          * necessary the second time.]
2002          */
2003         current = entry;
2004         while ((current != &map->header) && (current->start < end)) {
2005                 old_prot = current->protection;
2006
2007                 if (set_max)
2008                         current->protection =
2009                             (current->max_protection = new_prot) &
2010                             old_prot;
2011                 else
2012                         current->protection = new_prot;
2013
2014                 /*
2015                  * For user wired map entries, the normal lazy evaluation of
2016                  * write access upgrades through soft page faults is
2017                  * undesirable.  Instead, immediately copy any pages that are
2018                  * copy-on-write and enable write access in the physical map.
2019                  */
2020                 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2021                     (current->protection & VM_PROT_WRITE) != 0 &&
2022                     (old_prot & VM_PROT_WRITE) == 0)
2023                         vm_fault_copy_entry(map, map, current, current, NULL);
2024
2025                 /*
2026                  * When restricting access, update the physical map.  Worry
2027                  * about copy-on-write here.
2028                  */
2029                 if ((old_prot & ~current->protection) != 0) {
2030 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2031                                                         VM_PROT_ALL)
2032                         pmap_protect(map->pmap, current->start,
2033                             current->end,
2034                             current->protection & MASK(current));
2035 #undef  MASK
2036                 }
2037                 vm_map_simplify_entry(map, current);
2038                 current = current->next;
2039         }
2040         vm_map_unlock(map);
2041         return (KERN_SUCCESS);
2042 }
2043
2044 /*
2045  *      vm_map_madvise:
2046  *
2047  *      This routine traverses a processes map handling the madvise
2048  *      system call.  Advisories are classified as either those effecting
2049  *      the vm_map_entry structure, or those effecting the underlying
2050  *      objects.
2051  */
2052 int
2053 vm_map_madvise(
2054         vm_map_t map,
2055         vm_offset_t start,
2056         vm_offset_t end,
2057         int behav)
2058 {
2059         vm_map_entry_t current, entry;
2060         int modify_map = 0;
2061
2062         /*
2063          * Some madvise calls directly modify the vm_map_entry, in which case
2064          * we need to use an exclusive lock on the map and we need to perform
2065          * various clipping operations.  Otherwise we only need a read-lock
2066          * on the map.
2067          */
2068         switch(behav) {
2069         case MADV_NORMAL:
2070         case MADV_SEQUENTIAL:
2071         case MADV_RANDOM:
2072         case MADV_NOSYNC:
2073         case MADV_AUTOSYNC:
2074         case MADV_NOCORE:
2075         case MADV_CORE:
2076                 if (start == end)
2077                         return (KERN_SUCCESS);
2078                 modify_map = 1;
2079                 vm_map_lock(map);
2080                 break;
2081         case MADV_WILLNEED:
2082         case MADV_DONTNEED:
2083         case MADV_FREE:
2084                 if (start == end)
2085                         return (KERN_SUCCESS);
2086                 vm_map_lock_read(map);
2087                 break;
2088         default:
2089                 return (KERN_INVALID_ARGUMENT);
2090         }
2091
2092         /*
2093          * Locate starting entry and clip if necessary.
2094          */
2095         VM_MAP_RANGE_CHECK(map, start, end);
2096
2097         if (vm_map_lookup_entry(map, start, &entry)) {
2098                 if (modify_map)
2099                         vm_map_clip_start(map, entry, start);
2100         } else {
2101                 entry = entry->next;
2102         }
2103
2104         if (modify_map) {
2105                 /*
2106                  * madvise behaviors that are implemented in the vm_map_entry.
2107                  *
2108                  * We clip the vm_map_entry so that behavioral changes are
2109                  * limited to the specified address range.
2110                  */
2111                 for (current = entry;
2112                      (current != &map->header) && (current->start < end);
2113                      current = current->next
2114                 ) {
2115                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2116                                 continue;
2117
2118                         vm_map_clip_end(map, current, end);
2119
2120                         switch (behav) {
2121                         case MADV_NORMAL:
2122                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2123                                 break;
2124                         case MADV_SEQUENTIAL:
2125                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2126                                 break;
2127                         case MADV_RANDOM:
2128                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2129                                 break;
2130                         case MADV_NOSYNC:
2131                                 current->eflags |= MAP_ENTRY_NOSYNC;
2132                                 break;
2133                         case MADV_AUTOSYNC:
2134                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
2135                                 break;
2136                         case MADV_NOCORE:
2137                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
2138                                 break;
2139                         case MADV_CORE:
2140                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2141                                 break;
2142                         default:
2143                                 break;
2144                         }
2145                         vm_map_simplify_entry(map, current);
2146                 }
2147                 vm_map_unlock(map);
2148         } else {
2149                 vm_pindex_t pstart, pend;
2150
2151                 /*
2152                  * madvise behaviors that are implemented in the underlying
2153                  * vm_object.
2154                  *
2155                  * Since we don't clip the vm_map_entry, we have to clip
2156                  * the vm_object pindex and count.
2157                  */
2158                 for (current = entry;
2159                      (current != &map->header) && (current->start < end);
2160                      current = current->next
2161                 ) {
2162                         vm_offset_t useEnd, useStart;
2163
2164                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2165                                 continue;
2166
2167                         pstart = OFF_TO_IDX(current->offset);
2168                         pend = pstart + atop(current->end - current->start);
2169                         useStart = current->start;
2170                         useEnd = current->end;
2171
2172                         if (current->start < start) {
2173                                 pstart += atop(start - current->start);
2174                                 useStart = start;
2175                         }
2176                         if (current->end > end) {
2177                                 pend -= atop(current->end - end);
2178                                 useEnd = end;
2179                         }
2180
2181                         if (pstart >= pend)
2182                                 continue;
2183
2184                         /*
2185                          * Perform the pmap_advise() before clearing
2186                          * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2187                          * concurrent pmap operation, such as pmap_remove(),
2188                          * could clear a reference in the pmap and set
2189                          * PGA_REFERENCED on the page before the pmap_advise()
2190                          * had completed.  Consequently, the page would appear
2191                          * referenced based upon an old reference that
2192                          * occurred before this pmap_advise() ran.
2193                          */
2194                         if (behav == MADV_DONTNEED || behav == MADV_FREE)
2195                                 pmap_advise(map->pmap, useStart, useEnd,
2196                                     behav);
2197
2198                         vm_object_madvise(current->object.vm_object, pstart,
2199                             pend, behav);
2200
2201                         /*
2202                          * Pre-populate paging structures in the
2203                          * WILLNEED case.  For wired entries, the
2204                          * paging structures are already populated.
2205                          */
2206                         if (behav == MADV_WILLNEED &&
2207                             current->wired_count == 0) {
2208                                 vm_map_pmap_enter(map,
2209                                     useStart,
2210                                     current->protection,
2211                                     current->object.vm_object,
2212                                     pstart,
2213                                     ptoa(pend - pstart),
2214                                     MAP_PREFAULT_MADVISE
2215                                 );
2216                         }
2217                 }
2218                 vm_map_unlock_read(map);
2219         }
2220         return (0);
2221 }
2222
2223
2224 /*
2225  *      vm_map_inherit:
2226  *
2227  *      Sets the inheritance of the specified address
2228  *      range in the target map.  Inheritance
2229  *      affects how the map will be shared with
2230  *      child maps at the time of vmspace_fork.
2231  */
2232 int
2233 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2234                vm_inherit_t new_inheritance)
2235 {
2236         vm_map_entry_t entry;
2237         vm_map_entry_t temp_entry;
2238
2239         switch (new_inheritance) {
2240         case VM_INHERIT_NONE:
2241         case VM_INHERIT_COPY:
2242         case VM_INHERIT_SHARE:
2243                 break;
2244         default:
2245                 return (KERN_INVALID_ARGUMENT);
2246         }
2247         if (start == end)
2248                 return (KERN_SUCCESS);
2249         vm_map_lock(map);
2250         VM_MAP_RANGE_CHECK(map, start, end);
2251         if (vm_map_lookup_entry(map, start, &temp_entry)) {
2252                 entry = temp_entry;
2253                 vm_map_clip_start(map, entry, start);
2254         } else
2255                 entry = temp_entry->next;
2256         while ((entry != &map->header) && (entry->start < end)) {
2257                 vm_map_clip_end(map, entry, end);
2258                 entry->inheritance = new_inheritance;
2259                 vm_map_simplify_entry(map, entry);
2260                 entry = entry->next;
2261         }
2262         vm_map_unlock(map);
2263         return (KERN_SUCCESS);
2264 }
2265
2266 /*
2267  *      vm_map_unwire:
2268  *
2269  *      Implements both kernel and user unwiring.
2270  */
2271 int
2272 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2273     int flags)
2274 {
2275         vm_map_entry_t entry, first_entry, tmp_entry;
2276         vm_offset_t saved_start;
2277         unsigned int last_timestamp;
2278         int rv;
2279         boolean_t need_wakeup, result, user_unwire;
2280
2281         if (start == end)
2282                 return (KERN_SUCCESS);
2283         user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2284         vm_map_lock(map);
2285         VM_MAP_RANGE_CHECK(map, start, end);
2286         if (!vm_map_lookup_entry(map, start, &first_entry)) {
2287                 if (flags & VM_MAP_WIRE_HOLESOK)
2288                         first_entry = first_entry->next;
2289                 else {
2290                         vm_map_unlock(map);
2291                         return (KERN_INVALID_ADDRESS);
2292                 }
2293         }
2294         last_timestamp = map->timestamp;
2295         entry = first_entry;
2296         while (entry != &map->header && entry->start < end) {
2297                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2298                         /*
2299                          * We have not yet clipped the entry.
2300                          */
2301                         saved_start = (start >= entry->start) ? start :
2302                             entry->start;
2303                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2304                         if (vm_map_unlock_and_wait(map, 0)) {
2305                                 /*
2306                                  * Allow interruption of user unwiring?
2307                                  */
2308                         }
2309                         vm_map_lock(map);
2310                         if (last_timestamp+1 != map->timestamp) {
2311                                 /*
2312                                  * Look again for the entry because the map was
2313                                  * modified while it was unlocked.
2314                                  * Specifically, the entry may have been
2315                                  * clipped, merged, or deleted.
2316                                  */
2317                                 if (!vm_map_lookup_entry(map, saved_start,
2318                                     &tmp_entry)) {
2319                                         if (flags & VM_MAP_WIRE_HOLESOK)
2320                                                 tmp_entry = tmp_entry->next;
2321                                         else {
2322                                                 if (saved_start == start) {
2323                                                         /*
2324                                                          * First_entry has been deleted.
2325                                                          */
2326                                                         vm_map_unlock(map);
2327                                                         return (KERN_INVALID_ADDRESS);
2328                                                 }
2329                                                 end = saved_start;
2330                                                 rv = KERN_INVALID_ADDRESS;
2331                                                 goto done;
2332                                         }
2333                                 }
2334                                 if (entry == first_entry)
2335                                         first_entry = tmp_entry;
2336                                 else
2337                                         first_entry = NULL;
2338                                 entry = tmp_entry;
2339                         }
2340                         last_timestamp = map->timestamp;
2341                         continue;
2342                 }
2343                 vm_map_clip_start(map, entry, start);
2344                 vm_map_clip_end(map, entry, end);
2345                 /*
2346                  * Mark the entry in case the map lock is released.  (See
2347                  * above.)
2348                  */
2349                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2350                     entry->wiring_thread == NULL,
2351                     ("owned map entry %p", entry));
2352                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2353                 entry->wiring_thread = curthread;
2354                 /*
2355                  * Check the map for holes in the specified region.
2356                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2357                  */
2358                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2359                     (entry->end < end && (entry->next == &map->header ||
2360                     entry->next->start > entry->end))) {
2361                         end = entry->end;
2362                         rv = KERN_INVALID_ADDRESS;
2363                         goto done;
2364                 }
2365                 /*
2366                  * If system unwiring, require that the entry is system wired.
2367                  */
2368                 if (!user_unwire &&
2369                     vm_map_entry_system_wired_count(entry) == 0) {
2370                         end = entry->end;
2371                         rv = KERN_INVALID_ARGUMENT;
2372                         goto done;
2373                 }
2374                 entry = entry->next;
2375         }
2376         rv = KERN_SUCCESS;
2377 done:
2378         need_wakeup = FALSE;
2379         if (first_entry == NULL) {
2380                 result = vm_map_lookup_entry(map, start, &first_entry);
2381                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2382                         first_entry = first_entry->next;
2383                 else
2384                         KASSERT(result, ("vm_map_unwire: lookup failed"));
2385         }
2386         for (entry = first_entry; entry != &map->header && entry->start < end;
2387             entry = entry->next) {
2388                 /*
2389                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
2390                  * space in the unwired region could have been mapped
2391                  * while the map lock was dropped for draining
2392                  * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2393                  * could be simultaneously wiring this new mapping
2394                  * entry.  Detect these cases and skip any entries
2395                  * marked as in transition by us.
2396                  */
2397                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2398                     entry->wiring_thread != curthread) {
2399                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2400                             ("vm_map_unwire: !HOLESOK and new/changed entry"));
2401                         continue;
2402                 }
2403
2404                 if (rv == KERN_SUCCESS && (!user_unwire ||
2405                     (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2406                         if (user_unwire)
2407                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2408                         if (entry->wired_count == 1)
2409                                 vm_map_entry_unwire(map, entry);
2410                         else
2411                                 entry->wired_count--;
2412                 }
2413                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2414                     ("vm_map_unwire: in-transition flag missing %p", entry));
2415                 KASSERT(entry->wiring_thread == curthread,
2416                     ("vm_map_unwire: alien wire %p", entry));
2417                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2418                 entry->wiring_thread = NULL;
2419                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2420                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2421                         need_wakeup = TRUE;
2422                 }
2423                 vm_map_simplify_entry(map, entry);
2424         }
2425         vm_map_unlock(map);
2426         if (need_wakeup)
2427                 vm_map_wakeup(map);
2428         return (rv);
2429 }
2430
2431 /*
2432  *      vm_map_wire_entry_failure:
2433  *
2434  *      Handle a wiring failure on the given entry.
2435  *
2436  *      The map should be locked.
2437  */
2438 static void
2439 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
2440     vm_offset_t failed_addr)
2441 {
2442
2443         VM_MAP_ASSERT_LOCKED(map);
2444         KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
2445             entry->wired_count == 1,
2446             ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
2447         KASSERT(failed_addr < entry->end,
2448             ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
2449
2450         /*
2451          * If any pages at the start of this entry were successfully wired,
2452          * then unwire them.
2453          */
2454         if (failed_addr > entry->start) {
2455                 pmap_unwire(map->pmap, entry->start, failed_addr);
2456                 vm_object_unwire(entry->object.vm_object, entry->offset,
2457                     failed_addr - entry->start, PQ_ACTIVE);
2458         }
2459
2460         /*
2461          * Assign an out-of-range value to represent the failure to wire this
2462          * entry.
2463          */
2464         entry->wired_count = -1;
2465 }
2466
2467 /*
2468  *      vm_map_wire:
2469  *
2470  *      Implements both kernel and user wiring.
2471  */
2472 int
2473 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2474     int flags)
2475 {
2476         vm_map_entry_t entry, first_entry, tmp_entry;
2477         vm_offset_t faddr, saved_end, saved_start;
2478         unsigned int last_timestamp;
2479         int rv;
2480         boolean_t need_wakeup, result, user_wire;
2481         vm_prot_t prot;
2482
2483         if (start == end)
2484                 return (KERN_SUCCESS);
2485         prot = 0;
2486         if (flags & VM_MAP_WIRE_WRITE)
2487                 prot |= VM_PROT_WRITE;
2488         user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2489         vm_map_lock(map);
2490         VM_MAP_RANGE_CHECK(map, start, end);
2491         if (!vm_map_lookup_entry(map, start, &first_entry)) {
2492                 if (flags & VM_MAP_WIRE_HOLESOK)
2493                         first_entry = first_entry->next;
2494                 else {
2495                         vm_map_unlock(map);
2496                         return (KERN_INVALID_ADDRESS);
2497                 }
2498         }
2499         last_timestamp = map->timestamp;
2500         entry = first_entry;
2501         while (entry != &map->header && entry->start < end) {
2502                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2503                         /*
2504                          * We have not yet clipped the entry.
2505                          */
2506                         saved_start = (start >= entry->start) ? start :
2507                             entry->start;
2508                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2509                         if (vm_map_unlock_and_wait(map, 0)) {
2510                                 /*
2511                                  * Allow interruption of user wiring?
2512                                  */
2513                         }
2514                         vm_map_lock(map);
2515                         if (last_timestamp + 1 != map->timestamp) {
2516                                 /*
2517                                  * Look again for the entry because the map was
2518                                  * modified while it was unlocked.
2519                                  * Specifically, the entry may have been
2520                                  * clipped, merged, or deleted.
2521                                  */
2522                                 if (!vm_map_lookup_entry(map, saved_start,
2523                                     &tmp_entry)) {
2524                                         if (flags & VM_MAP_WIRE_HOLESOK)
2525                                                 tmp_entry = tmp_entry->next;
2526                                         else {
2527                                                 if (saved_start == start) {
2528                                                         /*
2529                                                          * first_entry has been deleted.
2530                                                          */
2531                                                         vm_map_unlock(map);
2532                                                         return (KERN_INVALID_ADDRESS);
2533                                                 }
2534                                                 end = saved_start;
2535                                                 rv = KERN_INVALID_ADDRESS;
2536                                                 goto done;
2537                                         }
2538                                 }
2539                                 if (entry == first_entry)
2540                                         first_entry = tmp_entry;
2541                                 else
2542                                         first_entry = NULL;
2543                                 entry = tmp_entry;
2544                         }
2545                         last_timestamp = map->timestamp;
2546                         continue;
2547                 }
2548                 vm_map_clip_start(map, entry, start);
2549                 vm_map_clip_end(map, entry, end);
2550                 /*
2551                  * Mark the entry in case the map lock is released.  (See
2552                  * above.)
2553                  */
2554                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2555                     entry->wiring_thread == NULL,
2556                     ("owned map entry %p", entry));
2557                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2558                 entry->wiring_thread = curthread;
2559                 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2560                     || (entry->protection & prot) != prot) {
2561                         entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2562                         if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2563                                 end = entry->end;
2564                                 rv = KERN_INVALID_ADDRESS;
2565                                 goto done;
2566                         }
2567                         goto next_entry;
2568                 }
2569                 if (entry->wired_count == 0) {
2570                         entry->wired_count++;
2571                         saved_start = entry->start;
2572                         saved_end = entry->end;
2573
2574                         /*
2575                          * Release the map lock, relying on the in-transition
2576                          * mark.  Mark the map busy for fork.
2577                          */
2578                         vm_map_busy(map);
2579                         vm_map_unlock(map);
2580
2581                         faddr = saved_start;
2582                         do {
2583                                 /*
2584                                  * Simulate a fault to get the page and enter
2585                                  * it into the physical map.
2586                                  */
2587                                 if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
2588                                     VM_FAULT_WIRE)) != KERN_SUCCESS)
2589                                         break;
2590                         } while ((faddr += PAGE_SIZE) < saved_end);
2591                         vm_map_lock(map);
2592                         vm_map_unbusy(map);
2593                         if (last_timestamp + 1 != map->timestamp) {
2594                                 /*
2595                                  * Look again for the entry because the map was
2596                                  * modified while it was unlocked.  The entry
2597                                  * may have been clipped, but NOT merged or
2598                                  * deleted.
2599                                  */
2600                                 result = vm_map_lookup_entry(map, saved_start,
2601                                     &tmp_entry);
2602                                 KASSERT(result, ("vm_map_wire: lookup failed"));
2603                                 if (entry == first_entry)
2604                                         first_entry = tmp_entry;
2605                                 else
2606                                         first_entry = NULL;
2607                                 entry = tmp_entry;
2608                                 while (entry->end < saved_end) {
2609                                         /*
2610                                          * In case of failure, handle entries
2611                                          * that were not fully wired here;
2612                                          * fully wired entries are handled
2613                                          * later.
2614                                          */
2615                                         if (rv != KERN_SUCCESS &&
2616                                             faddr < entry->end)
2617                                                 vm_map_wire_entry_failure(map,
2618                                                     entry, faddr);
2619                                         entry = entry->next;
2620                                 }
2621                         }
2622                         last_timestamp = map->timestamp;
2623                         if (rv != KERN_SUCCESS) {
2624                                 vm_map_wire_entry_failure(map, entry, faddr);
2625                                 end = entry->end;
2626                                 goto done;
2627                         }
2628                 } else if (!user_wire ||
2629                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2630                         entry->wired_count++;
2631                 }
2632                 /*
2633                  * Check the map for holes in the specified region.
2634                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2635                  */
2636         next_entry:
2637                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2638                     (entry->end < end && (entry->next == &map->header ||
2639                     entry->next->start > entry->end))) {
2640                         end = entry->end;
2641                         rv = KERN_INVALID_ADDRESS;
2642                         goto done;
2643                 }
2644                 entry = entry->next;
2645         }
2646         rv = KERN_SUCCESS;
2647 done:
2648         need_wakeup = FALSE;
2649         if (first_entry == NULL) {
2650                 result = vm_map_lookup_entry(map, start, &first_entry);
2651                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2652                         first_entry = first_entry->next;
2653                 else
2654                         KASSERT(result, ("vm_map_wire: lookup failed"));
2655         }
2656         for (entry = first_entry; entry != &map->header && entry->start < end;
2657             entry = entry->next) {
2658                 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2659                         goto next_entry_done;
2660
2661                 /*
2662                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
2663                  * space in the unwired region could have been mapped
2664                  * while the map lock was dropped for faulting in the
2665                  * pages or draining MAP_ENTRY_IN_TRANSITION.
2666                  * Moreover, another thread could be simultaneously
2667                  * wiring this new mapping entry.  Detect these cases
2668                  * and skip any entries marked as in transition by us.
2669                  */
2670                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2671                     entry->wiring_thread != curthread) {
2672                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2673                             ("vm_map_wire: !HOLESOK and new/changed entry"));
2674                         continue;
2675                 }
2676
2677                 if (rv == KERN_SUCCESS) {
2678                         if (user_wire)
2679                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
2680                 } else if (entry->wired_count == -1) {
2681                         /*
2682                          * Wiring failed on this entry.  Thus, unwiring is
2683                          * unnecessary.
2684                          */
2685                         entry->wired_count = 0;
2686                 } else if (!user_wire ||
2687                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2688                         /*
2689                          * Undo the wiring.  Wiring succeeded on this entry
2690                          * but failed on a later entry.  
2691                          */
2692                         if (entry->wired_count == 1)
2693                                 vm_map_entry_unwire(map, entry);
2694                         else
2695                                 entry->wired_count--;
2696                 }
2697         next_entry_done:
2698                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2699                     ("vm_map_wire: in-transition flag missing %p", entry));
2700                 KASSERT(entry->wiring_thread == curthread,
2701                     ("vm_map_wire: alien wire %p", entry));
2702                 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2703                     MAP_ENTRY_WIRE_SKIPPED);
2704                 entry->wiring_thread = NULL;
2705                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2706                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2707                         need_wakeup = TRUE;
2708                 }
2709                 vm_map_simplify_entry(map, entry);
2710         }
2711         vm_map_unlock(map);
2712         if (need_wakeup)
2713                 vm_map_wakeup(map);
2714         return (rv);
2715 }
2716
2717 /*
2718  * vm_map_sync
2719  *
2720  * Push any dirty cached pages in the address range to their pager.
2721  * If syncio is TRUE, dirty pages are written synchronously.
2722  * If invalidate is TRUE, any cached pages are freed as well.
2723  *
2724  * If the size of the region from start to end is zero, we are
2725  * supposed to flush all modified pages within the region containing
2726  * start.  Unfortunately, a region can be split or coalesced with
2727  * neighboring regions, making it difficult to determine what the
2728  * original region was.  Therefore, we approximate this requirement by
2729  * flushing the current region containing start.
2730  *
2731  * Returns an error if any part of the specified range is not mapped.
2732  */
2733 int
2734 vm_map_sync(
2735         vm_map_t map,
2736         vm_offset_t start,
2737         vm_offset_t end,
2738         boolean_t syncio,
2739         boolean_t invalidate)
2740 {
2741         vm_map_entry_t current;
2742         vm_map_entry_t entry;
2743         vm_size_t size;
2744         vm_object_t object;
2745         vm_ooffset_t offset;
2746         unsigned int last_timestamp;
2747         boolean_t failed;
2748
2749         vm_map_lock_read(map);
2750         VM_MAP_RANGE_CHECK(map, start, end);
2751         if (!vm_map_lookup_entry(map, start, &entry)) {
2752                 vm_map_unlock_read(map);
2753                 return (KERN_INVALID_ADDRESS);
2754         } else if (start == end) {
2755                 start = entry->start;
2756                 end = entry->end;
2757         }
2758         /*
2759          * Make a first pass to check for user-wired memory and holes.
2760          */
2761         for (current = entry; current != &map->header && current->start < end;
2762             current = current->next) {
2763                 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2764                         vm_map_unlock_read(map);
2765                         return (KERN_INVALID_ARGUMENT);
2766                 }
2767                 if (end > current->end &&
2768                     (current->next == &map->header ||
2769                         current->end != current->next->start)) {
2770                         vm_map_unlock_read(map);
2771                         return (KERN_INVALID_ADDRESS);
2772                 }
2773         }
2774
2775         if (invalidate)
2776                 pmap_remove(map->pmap, start, end);
2777         failed = FALSE;
2778
2779         /*
2780          * Make a second pass, cleaning/uncaching pages from the indicated
2781          * objects as we go.
2782          */
2783         for (current = entry; current != &map->header && current->start < end;) {
2784                 offset = current->offset + (start - current->start);
2785                 size = (end <= current->end ? end : current->end) - start;
2786                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2787                         vm_map_t smap;
2788                         vm_map_entry_t tentry;
2789                         vm_size_t tsize;
2790
2791                         smap = current->object.sub_map;
2792                         vm_map_lock_read(smap);
2793                         (void) vm_map_lookup_entry(smap, offset, &tentry);
2794                         tsize = tentry->end - offset;
2795                         if (tsize < size)
2796                                 size = tsize;
2797                         object = tentry->object.vm_object;
2798                         offset = tentry->offset + (offset - tentry->start);
2799                         vm_map_unlock_read(smap);
2800                 } else {
2801                         object = current->object.vm_object;
2802                 }
2803                 vm_object_reference(object);
2804                 last_timestamp = map->timestamp;
2805                 vm_map_unlock_read(map);
2806                 if (!vm_object_sync(object, offset, size, syncio, invalidate))
2807                         failed = TRUE;
2808                 start += size;
2809                 vm_object_deallocate(object);
2810                 vm_map_lock_read(map);
2811                 if (last_timestamp == map->timestamp ||
2812                     !vm_map_lookup_entry(map, start, &current))
2813                         current = current->next;
2814         }
2815
2816         vm_map_unlock_read(map);
2817         return (failed ? KERN_FAILURE : KERN_SUCCESS);
2818 }
2819
2820 /*
2821  *      vm_map_entry_unwire:    [ internal use only ]
2822  *
2823  *      Make the region specified by this entry pageable.
2824  *
2825  *      The map in question should be locked.
2826  *      [This is the reason for this routine's existence.]
2827  */
2828 static void
2829 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2830 {
2831
2832         VM_MAP_ASSERT_LOCKED(map);
2833         KASSERT(entry->wired_count > 0,
2834             ("vm_map_entry_unwire: entry %p isn't wired", entry));
2835         pmap_unwire(map->pmap, entry->start, entry->end);
2836         vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
2837             entry->start, PQ_ACTIVE);
2838         entry->wired_count = 0;
2839 }
2840
2841 static void
2842 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2843 {
2844
2845         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2846                 vm_object_deallocate(entry->object.vm_object);
2847         uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2848 }
2849
2850 /*
2851  *      vm_map_entry_delete:    [ internal use only ]
2852  *
2853  *      Deallocate the given entry from the target map.
2854  */
2855 static void
2856 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2857 {
2858         vm_object_t object;
2859         vm_pindex_t offidxstart, offidxend, count, size1;
2860         vm_ooffset_t size;
2861
2862         vm_map_entry_unlink(map, entry);
2863         object = entry->object.vm_object;
2864         size = entry->end - entry->start;
2865         map->size -= size;
2866
2867         if (entry->cred != NULL) {
2868                 swap_release_by_cred(size, entry->cred);
2869                 crfree(entry->cred);
2870         }
2871
2872         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2873             (object != NULL)) {
2874                 KASSERT(entry->cred == NULL || object->cred == NULL ||
2875                     (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2876                     ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
2877                 count = OFF_TO_IDX(size);
2878                 offidxstart = OFF_TO_IDX(entry->offset);
2879                 offidxend = offidxstart + count;
2880                 VM_OBJECT_WLOCK(object);
2881                 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT |
2882                     OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2883                     object == kernel_object || object == kmem_object)) {
2884                         vm_object_collapse(object);
2885
2886                         /*
2887                          * The option OBJPR_NOTMAPPED can be passed here
2888                          * because vm_map_delete() already performed
2889                          * pmap_remove() on the only mapping to this range
2890                          * of pages. 
2891                          */
2892                         vm_object_page_remove(object, offidxstart, offidxend,
2893                             OBJPR_NOTMAPPED);
2894                         if (object->type == OBJT_SWAP)
2895                                 swap_pager_freespace(object, offidxstart,
2896                                     count);
2897                         if (offidxend >= object->size &&
2898                             offidxstart < object->size) {
2899                                 size1 = object->size;
2900                                 object->size = offidxstart;
2901                                 if (object->cred != NULL) {
2902                                         size1 -= object->size;
2903                                         KASSERT(object->charge >= ptoa(size1),
2904                                             ("object %p charge < 0", object));
2905                                         swap_release_by_cred(ptoa(size1),
2906                                             object->cred);
2907                                         object->charge -= ptoa(size1);
2908                                 }
2909                         }
2910                 }
2911                 VM_OBJECT_WUNLOCK(object);
2912         } else
2913                 entry->object.vm_object = NULL;
2914         if (map->system_map)
2915                 vm_map_entry_deallocate(entry, TRUE);
2916         else {
2917                 entry->next = curthread->td_map_def_user;
2918                 curthread->td_map_def_user = entry;
2919         }
2920 }
2921
2922 /*
2923  *      vm_map_delete:  [ internal use only ]
2924  *
2925  *      Deallocates the given address range from the target
2926  *      map.
2927  */
2928 int
2929 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2930 {
2931         vm_map_entry_t entry;
2932         vm_map_entry_t first_entry;
2933
2934         VM_MAP_ASSERT_LOCKED(map);
2935         if (start == end)
2936                 return (KERN_SUCCESS);
2937
2938         /*
2939          * Find the start of the region, and clip it
2940          */
2941         if (!vm_map_lookup_entry(map, start, &first_entry))
2942                 entry = first_entry->next;
2943         else {
2944                 entry = first_entry;
2945                 vm_map_clip_start(map, entry, start);
2946         }
2947
2948         /*
2949          * Step through all entries in this region
2950          */
2951         while ((entry != &map->header) && (entry->start < end)) {
2952                 vm_map_entry_t next;
2953
2954                 /*
2955                  * Wait for wiring or unwiring of an entry to complete.
2956                  * Also wait for any system wirings to disappear on
2957                  * user maps.
2958                  */
2959                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2960                     (vm_map_pmap(map) != kernel_pmap &&
2961                     vm_map_entry_system_wired_count(entry) != 0)) {
2962                         unsigned int last_timestamp;
2963                         vm_offset_t saved_start;
2964                         vm_map_entry_t tmp_entry;
2965
2966                         saved_start = entry->start;
2967                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2968                         last_timestamp = map->timestamp;
2969                         (void) vm_map_unlock_and_wait(map, 0);
2970                         vm_map_lock(map);
2971                         if (last_timestamp + 1 != map->timestamp) {
2972                                 /*
2973                                  * Look again for the entry because the map was
2974                                  * modified while it was unlocked.
2975                                  * Specifically, the entry may have been
2976                                  * clipped, merged, or deleted.
2977                                  */
2978                                 if (!vm_map_lookup_entry(map, saved_start,
2979                                                          &tmp_entry))
2980                                         entry = tmp_entry->next;
2981                                 else {
2982                                         entry = tmp_entry;
2983                                         vm_map_clip_start(map, entry,
2984                                                           saved_start);
2985                                 }
2986                         }
2987                         continue;
2988                 }
2989                 vm_map_clip_end(map, entry, end);
2990
2991                 next = entry->next;
2992
2993                 /*
2994                  * Unwire before removing addresses from the pmap; otherwise,
2995                  * unwiring will put the entries back in the pmap.
2996                  */
2997                 if (entry->wired_count != 0) {
2998                         vm_map_entry_unwire(map, entry);
2999                 }
3000
3001                 pmap_remove(map->pmap, entry->start, entry->end);
3002
3003                 /*
3004                  * Delete the entry only after removing all pmap
3005                  * entries pointing to its pages.  (Otherwise, its
3006                  * page frames may be reallocated, and any modify bits
3007                  * will be set in the wrong object!)
3008                  */
3009                 vm_map_entry_delete(map, entry);
3010                 entry = next;
3011         }
3012         return (KERN_SUCCESS);
3013 }
3014
3015 /*
3016  *      vm_map_remove:
3017  *
3018  *      Remove the given address range from the target map.
3019  *      This is the exported form of vm_map_delete.
3020  */
3021 int
3022 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3023 {
3024         int result;
3025
3026         vm_map_lock(map);
3027         VM_MAP_RANGE_CHECK(map, start, end);
3028         result = vm_map_delete(map, start, end);
3029         vm_map_unlock(map);
3030         return (result);
3031 }
3032
3033 /*
3034  *      vm_map_check_protection:
3035  *
3036  *      Assert that the target map allows the specified privilege on the
3037  *      entire address region given.  The entire region must be allocated.
3038  *
3039  *      WARNING!  This code does not and should not check whether the
3040  *      contents of the region is accessible.  For example a smaller file
3041  *      might be mapped into a larger address space.
3042  *
3043  *      NOTE!  This code is also called by munmap().
3044  *
3045  *      The map must be locked.  A read lock is sufficient.
3046  */
3047 boolean_t
3048 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3049                         vm_prot_t protection)
3050 {
3051         vm_map_entry_t entry;
3052         vm_map_entry_t tmp_entry;
3053
3054         if (!vm_map_lookup_entry(map, start, &tmp_entry))
3055                 return (FALSE);
3056         entry = tmp_entry;
3057
3058         while (start < end) {
3059                 if (entry == &map->header)
3060                         return (FALSE);
3061                 /*
3062                  * No holes allowed!
3063                  */
3064                 if (start < entry->start)
3065                         return (FALSE);
3066                 /*
3067                  * Check protection associated with entry.
3068                  */
3069                 if ((entry->protection & protection) != protection)
3070                         return (FALSE);
3071                 /* go to next entry */
3072                 start = entry->end;
3073                 entry = entry->next;
3074         }
3075         return (TRUE);
3076 }
3077
3078 /*
3079  *      vm_map_copy_entry:
3080  *
3081  *      Copies the contents of the source entry to the destination
3082  *      entry.  The entries *must* be aligned properly.
3083  */
3084 static void
3085 vm_map_copy_entry(
3086         vm_map_t src_map,
3087         vm_map_t dst_map,
3088         vm_map_entry_t src_entry,
3089         vm_map_entry_t dst_entry,
3090         vm_ooffset_t *fork_charge)
3091 {
3092         vm_object_t src_object;
3093         vm_map_entry_t fake_entry;
3094         vm_offset_t size;
3095         struct ucred *cred;
3096         int charged;
3097
3098         VM_MAP_ASSERT_LOCKED(dst_map);
3099
3100         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3101                 return;
3102
3103         if (src_entry->wired_count == 0 ||
3104             (src_entry->protection & VM_PROT_WRITE) == 0) {
3105                 /*
3106                  * If the source entry is marked needs_copy, it is already
3107                  * write-protected.
3108                  */
3109                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3110                     (src_entry->protection & VM_PROT_WRITE) != 0) {
3111                         pmap_protect(src_map->pmap,
3112                             src_entry->start,
3113                             src_entry->end,
3114                             src_entry->protection & ~VM_PROT_WRITE);
3115                 }
3116
3117                 /*
3118                  * Make a copy of the object.
3119                  */
3120                 size = src_entry->end - src_entry->start;
3121                 if ((src_object = src_entry->object.vm_object) != NULL) {
3122                         VM_OBJECT_WLOCK(src_object);
3123                         charged = ENTRY_CHARGED(src_entry);
3124                         if (src_object->handle == NULL &&
3125                             (src_object->type == OBJT_DEFAULT ||
3126                             src_object->type == OBJT_SWAP)) {
3127                                 vm_object_collapse(src_object);
3128                                 if ((src_object->flags & (OBJ_NOSPLIT |
3129                                     OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3130                                         vm_object_split(src_entry);
3131                                         src_object =
3132                                             src_entry->object.vm_object;
3133                                 }
3134                         }
3135                         vm_object_reference_locked(src_object);
3136                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3137                         if (src_entry->cred != NULL &&
3138                             !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3139                                 KASSERT(src_object->cred == NULL,
3140                                     ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3141                                      src_object));
3142                                 src_object->cred = src_entry->cred;
3143                                 src_object->charge = size;
3144                         }
3145                         VM_OBJECT_WUNLOCK(src_object);
3146                         dst_entry->object.vm_object = src_object;
3147                         if (charged) {
3148                                 cred = curthread->td_ucred;
3149                                 crhold(cred);
3150                                 dst_entry->cred = cred;
3151                                 *fork_charge += size;
3152                                 if (!(src_entry->eflags &
3153                                       MAP_ENTRY_NEEDS_COPY)) {
3154                                         crhold(cred);
3155                                         src_entry->cred = cred;
3156                                         *fork_charge += size;
3157                                 }
3158                         }
3159                         src_entry->eflags |= MAP_ENTRY_COW |
3160                             MAP_ENTRY_NEEDS_COPY;
3161                         dst_entry->eflags |= MAP_ENTRY_COW |
3162                             MAP_ENTRY_NEEDS_COPY;
3163                         dst_entry->offset = src_entry->offset;
3164                         if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3165                                 /*
3166                                  * MAP_ENTRY_VN_WRITECNT cannot
3167                                  * indicate write reference from
3168                                  * src_entry, since the entry is
3169                                  * marked as needs copy.  Allocate a
3170                                  * fake entry that is used to
3171                                  * decrement object->un_pager.vnp.writecount
3172                                  * at the appropriate time.  Attach
3173                                  * fake_entry to the deferred list.
3174                                  */
3175                                 fake_entry = vm_map_entry_create(dst_map);
3176                                 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3177                                 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3178                                 vm_object_reference(src_object);
3179                                 fake_entry->object.vm_object = src_object;
3180                                 fake_entry->start = src_entry->start;
3181                                 fake_entry->end = src_entry->end;
3182                                 fake_entry->next = curthread->td_map_def_user;
3183                                 curthread->td_map_def_user = fake_entry;
3184                         }
3185                 } else {
3186                         dst_entry->object.vm_object = NULL;
3187                         dst_entry->offset = 0;
3188                         if (src_entry->cred != NULL) {
3189                                 dst_entry->cred = curthread->td_ucred;
3190                                 crhold(dst_entry->cred);
3191                                 *fork_charge += size;
3192                         }
3193                 }
3194
3195                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3196                     dst_entry->end - dst_entry->start, src_entry->start);
3197         } else {
3198                 /*
3199                  * We don't want to make writeable wired pages copy-on-write.
3200                  * Immediately copy these pages into the new map by simulating
3201                  * page faults.  The new pages are pageable.
3202                  */
3203                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3204                     fork_charge);
3205         }
3206 }
3207
3208 /*
3209  * vmspace_map_entry_forked:
3210  * Update the newly-forked vmspace each time a map entry is inherited
3211  * or copied.  The values for vm_dsize and vm_tsize are approximate
3212  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3213  */
3214 static void
3215 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3216     vm_map_entry_t entry)
3217 {
3218         vm_size_t entrysize;
3219         vm_offset_t newend;
3220
3221         entrysize = entry->end - entry->start;
3222         vm2->vm_map.size += entrysize;
3223         if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3224                 vm2->vm_ssize += btoc(entrysize);
3225         } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3226             entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3227                 newend = MIN(entry->end,
3228                     (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3229                 vm2->vm_dsize += btoc(newend - entry->start);
3230         } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3231             entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3232                 newend = MIN(entry->end,
3233                     (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3234                 vm2->vm_tsize += btoc(newend - entry->start);
3235         }
3236 }
3237
3238 /*
3239  * vmspace_fork:
3240  * Create a new process vmspace structure and vm_map
3241  * based on those of an existing process.  The new map
3242  * is based on the old map, according to the inheritance
3243  * values on the regions in that map.
3244  *
3245  * XXX It might be worth coalescing the entries added to the new vmspace.
3246  *
3247  * The source map must not be locked.
3248  */
3249 struct vmspace *
3250 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3251 {
3252         struct vmspace *vm2;
3253         vm_map_t new_map, old_map;
3254         vm_map_entry_t new_entry, old_entry;
3255         vm_object_t object;
3256         int locked;
3257
3258         old_map = &vm1->vm_map;
3259         /* Copy immutable fields of vm1 to vm2. */
3260         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL);
3261         if (vm2 == NULL)
3262                 return (NULL);
3263         vm2->vm_taddr = vm1->vm_taddr;
3264         vm2->vm_daddr = vm1->vm_daddr;
3265         vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3266         vm_map_lock(old_map);
3267         if (old_map->busy)
3268                 vm_map_wait_busy(old_map);
3269         new_map = &vm2->vm_map;
3270         locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3271         KASSERT(locked, ("vmspace_fork: lock failed"));
3272
3273         old_entry = old_map->header.next;
3274
3275         while (old_entry != &old_map->header) {
3276                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3277                         panic("vm_map_fork: encountered a submap");
3278
3279                 switch (old_entry->inheritance) {
3280                 case VM_INHERIT_NONE:
3281                         break;
3282
3283                 case VM_INHERIT_SHARE:
3284                         /*
3285                          * Clone the entry, creating the shared object if necessary.
3286                          */
3287                         object = old_entry->object.vm_object;
3288                         if (object == NULL) {
3289                                 object = vm_object_allocate(OBJT_DEFAULT,
3290                                         atop(old_entry->end - old_entry->start));
3291                                 old_entry->object.vm_object = object;
3292                                 old_entry->offset = 0;
3293                                 if (old_entry->cred != NULL) {
3294                                         object->cred = old_entry->cred;
3295                                         object->charge = old_entry->end -
3296                                             old_entry->start;
3297                                         old_entry->cred = NULL;
3298                                 }
3299                         }
3300
3301                         /*
3302                          * Add the reference before calling vm_object_shadow
3303                          * to insure that a shadow object is created.
3304                          */
3305                         vm_object_reference(object);
3306                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3307                                 vm_object_shadow(&old_entry->object.vm_object,
3308                                     &old_entry->offset,
3309                                     old_entry->end - old_entry->start);
3310                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3311                                 /* Transfer the second reference too. */
3312                                 vm_object_reference(
3313                                     old_entry->object.vm_object);
3314
3315                                 /*
3316                                  * As in vm_map_simplify_entry(), the
3317                                  * vnode lock will not be acquired in
3318                                  * this call to vm_object_deallocate().
3319                                  */
3320                                 vm_object_deallocate(object);
3321                                 object = old_entry->object.vm_object;
3322                         }
3323                         VM_OBJECT_WLOCK(object);
3324                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
3325                         if (old_entry->cred != NULL) {
3326                                 KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3327                                 object->cred = old_entry->cred;
3328                                 object->charge = old_entry->end - old_entry->start;
3329                                 old_entry->cred = NULL;
3330                         }
3331
3332                         /*
3333                          * Assert the correct state of the vnode
3334                          * v_writecount while the object is locked, to
3335                          * not relock it later for the assertion
3336                          * correctness.
3337                          */
3338                         if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3339                             object->type == OBJT_VNODE) {
3340                                 KASSERT(((struct vnode *)object->handle)->
3341                                     v_writecount > 0,
3342                                     ("vmspace_fork: v_writecount %p", object));
3343                                 KASSERT(object->un_pager.vnp.writemappings > 0,
3344                                     ("vmspace_fork: vnp.writecount %p",
3345                                     object));
3346                         }
3347                         VM_OBJECT_WUNLOCK(object);
3348
3349                         /*
3350                          * Clone the entry, referencing the shared object.
3351                          */
3352                         new_entry = vm_map_entry_create(new_map);
3353                         *new_entry = *old_entry;
3354                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3355                             MAP_ENTRY_IN_TRANSITION);
3356                         new_entry->wiring_thread = NULL;
3357                         new_entry->wired_count = 0;
3358                         if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3359                                 vnode_pager_update_writecount(object,
3360                                     new_entry->start, new_entry->end);
3361                         }
3362
3363                         /*
3364                          * Insert the entry into the new map -- we know we're
3365                          * inserting at the end of the new map.
3366                          */
3367                         vm_map_entry_link(new_map, new_map->header.prev,
3368                             new_entry);
3369                         vmspace_map_entry_forked(vm1, vm2, new_entry);
3370
3371                         /*
3372                          * Update the physical map
3373                          */
3374                         pmap_copy(new_map->pmap, old_map->pmap,
3375                             new_entry->start,
3376                             (old_entry->end - old_entry->start),
3377                             old_entry->start);
3378                         break;
3379
3380                 case VM_INHERIT_COPY:
3381                         /*
3382                          * Clone the entry and link into the map.
3383                          */
3384                         new_entry = vm_map_entry_create(new_map);
3385                         *new_entry = *old_entry;
3386                         /*
3387                          * Copied entry is COW over the old object.
3388                          */
3389                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3390                             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3391                         new_entry->wiring_thread = NULL;
3392                         new_entry->wired_count = 0;
3393                         new_entry->object.vm_object = NULL;
3394                         new_entry->cred = NULL;
3395                         vm_map_entry_link(new_map, new_map->header.prev,
3396                             new_entry);
3397                         vmspace_map_entry_forked(vm1, vm2, new_entry);
3398                         vm_map_copy_entry(old_map, new_map, old_entry,
3399                             new_entry, fork_charge);
3400                         break;
3401                 }
3402                 old_entry = old_entry->next;
3403         }
3404         /*
3405          * Use inlined vm_map_unlock() to postpone handling the deferred
3406          * map entries, which cannot be done until both old_map and
3407          * new_map locks are released.
3408          */
3409         sx_xunlock(&old_map->lock);
3410         sx_xunlock(&new_map->lock);
3411         vm_map_process_deferred();
3412
3413         return (vm2);
3414 }
3415
3416 int
3417 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3418     vm_prot_t prot, vm_prot_t max, int cow)
3419 {
3420         vm_size_t growsize, init_ssize;
3421         rlim_t lmemlim, vmemlim;
3422         int rv;
3423
3424         growsize = sgrowsiz;
3425         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3426         vm_map_lock(map);
3427         PROC_LOCK(curproc);
3428         lmemlim = lim_cur(curproc, RLIMIT_MEMLOCK);
3429         vmemlim = lim_cur(curproc, RLIMIT_VMEM);
3430         PROC_UNLOCK(curproc);
3431         if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3432                 if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) {
3433                         rv = KERN_NO_SPACE;
3434                         goto out;
3435                 }
3436         }
3437         /* If we would blow our VMEM resource limit, no go */
3438         if (map->size + init_ssize > vmemlim) {
3439                 rv = KERN_NO_SPACE;
3440                 goto out;
3441         }
3442         rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
3443             max, cow);
3444 out:
3445         vm_map_unlock(map);
3446         return (rv);
3447 }
3448
3449 static int
3450 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3451     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
3452 {
3453         vm_map_entry_t new_entry, prev_entry;
3454         vm_offset_t bot, top;
3455         vm_size_t init_ssize;
3456         int orient, rv;
3457
3458         /*
3459          * The stack orientation is piggybacked with the cow argument.
3460          * Extract it into orient and mask the cow argument so that we
3461          * don't pass it around further.
3462          * NOTE: We explicitly allow bi-directional stacks.
3463          */
3464         orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3465         KASSERT(orient != 0, ("No stack grow direction"));
3466
3467         if (addrbos < vm_map_min(map) ||
3468             addrbos > vm_map_max(map) ||
3469             addrbos + max_ssize < addrbos)
3470                 return (KERN_NO_SPACE);
3471
3472         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3473
3474         /* If addr is already mapped, no go */
3475         if (vm_map_lookup_entry(map, addrbos, &prev_entry))
3476                 return (KERN_NO_SPACE);
3477
3478         /*
3479          * If we can't accomodate max_ssize in the current mapping, no go.
3480          * However, we need to be aware that subsequent user mappings might
3481          * map into the space we have reserved for stack, and currently this
3482          * space is not protected.
3483          *
3484          * Hopefully we will at least detect this condition when we try to
3485          * grow the stack.
3486          */
3487         if ((prev_entry->next != &map->header) &&
3488             (prev_entry->next->start < addrbos + max_ssize))
3489                 return (KERN_NO_SPACE);
3490
3491         /*
3492          * We initially map a stack of only init_ssize.  We will grow as
3493          * needed later.  Depending on the orientation of the stack (i.e.
3494          * the grow direction) we either map at the top of the range, the
3495          * bottom of the range or in the middle.
3496          *
3497          * Note: we would normally expect prot and max to be VM_PROT_ALL,
3498          * and cow to be 0.  Possibly we should eliminate these as input
3499          * parameters, and just pass these values here in the insert call.
3500          */
3501         if (orient == MAP_STACK_GROWS_DOWN)
3502                 bot = addrbos + max_ssize - init_ssize;
3503         else if (orient == MAP_STACK_GROWS_UP)
3504                 bot = addrbos;
3505         else
3506                 bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3507         top = bot + init_ssize;
3508         rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3509
3510         /* Now set the avail_ssize amount. */
3511         if (rv == KERN_SUCCESS) {
3512                 new_entry = prev_entry->next;
3513                 if (new_entry->end != top || new_entry->start != bot)
3514                         panic("Bad entry start/end for new stack entry");
3515
3516                 new_entry->avail_ssize = max_ssize - init_ssize;
3517                 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
3518                     (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
3519                     ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
3520                 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
3521                     (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
3522                     ("new entry lacks MAP_ENTRY_GROWS_UP"));
3523         }
3524
3525         return (rv);
3526 }
3527
3528 static int stack_guard_page = 0;
3529 TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page);
3530 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW,
3531     &stack_guard_page, 0,
3532     "Insert stack guard page ahead of the growable segments.");
3533
3534 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3535  * desired address is already mapped, or if we successfully grow
3536  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3537  * stack range (this is strange, but preserves compatibility with
3538  * the grow function in vm_machdep.c).
3539  */
3540 int
3541 vm_map_growstack(struct proc *p, vm_offset_t addr)
3542 {
3543         vm_map_entry_t next_entry, prev_entry;
3544         vm_map_entry_t new_entry, stack_entry;
3545         struct vmspace *vm = p->p_vmspace;
3546         vm_map_t map = &vm->vm_map;
3547         vm_offset_t end;
3548         vm_size_t growsize;
3549         size_t grow_amount, max_grow;
3550         rlim_t lmemlim, stacklim, vmemlim;
3551         int is_procstack, rv;
3552         struct ucred *cred;
3553 #ifdef notyet
3554         uint64_t limit;
3555 #endif
3556 #ifdef RACCT
3557         int error;
3558 #endif
3559
3560 Retry:
3561         PROC_LOCK(p);
3562         lmemlim = lim_cur(p, RLIMIT_MEMLOCK);
3563         stacklim = lim_cur(p, RLIMIT_STACK);
3564         vmemlim = lim_cur(p, RLIMIT_VMEM);
3565         PROC_UNLOCK(p);
3566
3567         vm_map_lock_read(map);
3568
3569         /* If addr is already in the entry range, no need to grow.*/
3570         if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3571                 vm_map_unlock_read(map);
3572                 return (KERN_SUCCESS);
3573         }
3574
3575         next_entry = prev_entry->next;
3576         if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3577                 /*
3578                  * This entry does not grow upwards. Since the address lies
3579                  * beyond this entry, the next entry (if one exists) has to
3580                  * be a downward growable entry. The entry list header is
3581                  * never a growable entry, so it suffices to check the flags.
3582                  */
3583                 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3584                         vm_map_unlock_read(map);
3585                         return (KERN_SUCCESS);
3586                 }
3587                 stack_entry = next_entry;
3588         } else {
3589                 /*
3590                  * This entry grows upward. If the next entry does not at
3591                  * least grow downwards, this is the entry we need to grow.
3592                  * otherwise we have two possible choices and we have to
3593                  * select one.
3594                  */
3595                 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3596                         /*
3597                          * We have two choices; grow the entry closest to
3598                          * the address to minimize the amount of growth.
3599                          */
3600                         if (addr - prev_entry->end <= next_entry->start - addr)
3601                                 stack_entry = prev_entry;
3602                         else
3603                                 stack_entry = next_entry;
3604                 } else
3605                         stack_entry = prev_entry;
3606         }
3607
3608         if (stack_entry == next_entry) {
3609                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3610                 KASSERT(addr < stack_entry->start, ("foo"));
3611                 end = (prev_entry != &map->header) ? prev_entry->end :
3612                     stack_entry->start - stack_entry->avail_ssize;
3613                 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3614                 max_grow = stack_entry->start - end;
3615         } else {
3616                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3617                 KASSERT(addr >= stack_entry->end, ("foo"));
3618                 end = (next_entry != &map->header) ? next_entry->start :
3619                     stack_entry->end + stack_entry->avail_ssize;
3620                 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3621                 max_grow = end - stack_entry->end;
3622         }
3623
3624         if (grow_amount > stack_entry->avail_ssize) {
3625                 vm_map_unlock_read(map);
3626                 return (KERN_NO_SPACE);
3627         }
3628
3629         /*
3630          * If there is no longer enough space between the entries nogo, and
3631          * adjust the available space.  Note: this  should only happen if the
3632          * user has mapped into the stack area after the stack was created,
3633          * and is probably an error.
3634          *
3635          * This also effectively destroys any guard page the user might have
3636          * intended by limiting the stack size.
3637          */
3638         if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) {
3639                 if (vm_map_lock_upgrade(map))
3640                         goto Retry;
3641
3642                 stack_entry->avail_ssize = max_grow;
3643
3644                 vm_map_unlock(map);
3645                 return (KERN_NO_SPACE);
3646         }
3647
3648         is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr &&
3649             addr < (vm_offset_t)p->p_sysent->sv_usrstack) ? 1 : 0;
3650
3651         /*
3652          * If this is the main process stack, see if we're over the stack
3653          * limit.
3654          */
3655         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3656                 vm_map_unlock_read(map);
3657                 return (KERN_NO_SPACE);
3658         }
3659 #ifdef RACCT
3660         if (racct_enable) {
3661                 PROC_LOCK(p);
3662                 if (is_procstack && racct_set(p, RACCT_STACK,
3663                     ctob(vm->vm_ssize) + grow_amount)) {
3664                         PROC_UNLOCK(p);
3665                         vm_map_unlock_read(map);
3666                         return (KERN_NO_SPACE);
3667                 }
3668                 PROC_UNLOCK(p);
3669         }
3670 #endif
3671
3672         /* Round up the grow amount modulo sgrowsiz */
3673         growsize = sgrowsiz;
3674         grow_amount = roundup(grow_amount, growsize);
3675         if (grow_amount > stack_entry->avail_ssize)
3676                 grow_amount = stack_entry->avail_ssize;
3677         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3678                 grow_amount = trunc_page((vm_size_t)stacklim) -
3679                     ctob(vm->vm_ssize);
3680         }
3681 #ifdef notyet
3682         PROC_LOCK(p);
3683         limit = racct_get_available(p, RACCT_STACK);
3684         PROC_UNLOCK(p);
3685         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3686                 grow_amount = limit - ctob(vm->vm_ssize);
3687 #endif
3688         if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3689                 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3690                         vm_map_unlock_read(map);
3691                         rv = KERN_NO_SPACE;
3692                         goto out;
3693                 }
3694 #ifdef RACCT
3695                 if (racct_enable) {
3696                         PROC_LOCK(p);
3697                         if (racct_set(p, RACCT_MEMLOCK,
3698                             ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3699                                 PROC_UNLOCK(p);
3700                                 vm_map_unlock_read(map);
3701                                 rv = KERN_NO_SPACE;
3702                                 goto out;
3703                         }
3704                         PROC_UNLOCK(p);
3705                 }
3706 #endif
3707         }
3708         /* If we would blow our VMEM resource limit, no go */
3709         if (map->size + grow_amount > vmemlim) {
3710                 vm_map_unlock_read(map);
3711                 rv = KERN_NO_SPACE;
3712                 goto out;
3713         }
3714 #ifdef RACCT
3715         if (racct_enable) {
3716                 PROC_LOCK(p);
3717                 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3718                         PROC_UNLOCK(p);
3719                         vm_map_unlock_read(map);
3720                         rv = KERN_NO_SPACE;
3721                         goto out;
3722                 }
3723                 PROC_UNLOCK(p);
3724         }
3725 #endif
3726
3727         if (vm_map_lock_upgrade(map))
3728                 goto Retry;
3729
3730         if (stack_entry == next_entry) {
3731                 /*
3732                  * Growing downward.
3733                  */
3734                 /* Get the preliminary new entry start value */
3735                 addr = stack_entry->start - grow_amount;
3736
3737                 /*
3738                  * If this puts us into the previous entry, cut back our
3739                  * growth to the available space. Also, see the note above.
3740                  */
3741                 if (addr < end) {
3742                         stack_entry->avail_ssize = max_grow;
3743                         addr = end;
3744                         if (stack_guard_page)
3745                                 addr += PAGE_SIZE;
3746                 }
3747
3748                 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3749                     next_entry->protection, next_entry->max_protection,
3750                     MAP_STACK_GROWS_DOWN);
3751
3752                 /* Adjust the available stack space by the amount we grew. */
3753                 if (rv == KERN_SUCCESS) {
3754                         new_entry = prev_entry->next;
3755                         KASSERT(new_entry == stack_entry->prev, ("foo"));
3756                         KASSERT(new_entry->end == stack_entry->start, ("foo"));
3757                         KASSERT(new_entry->start == addr, ("foo"));
3758                         KASSERT((new_entry->eflags & MAP_ENTRY_GROWS_DOWN) !=
3759                             0, ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
3760                         grow_amount = new_entry->end - new_entry->start;
3761                         new_entry->avail_ssize = stack_entry->avail_ssize -
3762                             grow_amount;
3763                         stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3764                 }
3765         } else {
3766                 /*
3767                  * Growing upward.
3768                  */
3769                 addr = stack_entry->end + grow_amount;
3770
3771                 /*
3772                  * If this puts us into the next entry, cut back our growth
3773                  * to the available space. Also, see the note above.
3774                  */
3775                 if (addr > end) {
3776                         stack_entry->avail_ssize = end - stack_entry->end;
3777                         addr = end;
3778                         if (stack_guard_page)
3779                                 addr -= PAGE_SIZE;
3780                 }
3781
3782                 grow_amount = addr - stack_entry->end;
3783                 cred = stack_entry->cred;
3784                 if (cred == NULL && stack_entry->object.vm_object != NULL)
3785                         cred = stack_entry->object.vm_object->cred;
3786                 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3787                         rv = KERN_NO_SPACE;
3788                 /* Grow the underlying object if applicable. */
3789                 else if (stack_entry->object.vm_object == NULL ||
3790                     vm_object_coalesce(stack_entry->object.vm_object,
3791                     stack_entry->offset,
3792                     (vm_size_t)(stack_entry->end - stack_entry->start),
3793                     (vm_size_t)grow_amount, cred != NULL)) {
3794                         map->size += (addr - stack_entry->end);
3795                         /* Update the current entry. */
3796                         stack_entry->end = addr;
3797                         stack_entry->avail_ssize -= grow_amount;
3798                         vm_map_entry_resize_free(map, stack_entry);
3799                         rv = KERN_SUCCESS;
3800
3801                         if (next_entry != &map->header)
3802                                 vm_map_clip_start(map, next_entry, addr);
3803                 } else
3804                         rv = KERN_FAILURE;
3805         }
3806
3807         if (rv == KERN_SUCCESS && is_procstack)
3808                 vm->vm_ssize += btoc(grow_amount);
3809
3810         vm_map_unlock(map);
3811
3812         /*
3813          * Heed the MAP_WIREFUTURE flag if it was set for this process.
3814          */
3815         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3816                 vm_map_wire(map,
3817                     (stack_entry == next_entry) ? addr : addr - grow_amount,
3818                     (stack_entry == next_entry) ? stack_entry->start : addr,
3819                     (p->p_flag & P_SYSTEM)
3820                     ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3821                     : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3822         }
3823
3824 out:
3825 #ifdef RACCT
3826         if (racct_enable && rv != KERN_SUCCESS) {
3827                 PROC_LOCK(p);
3828                 error = racct_set(p, RACCT_VMEM, map->size);
3829                 KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
3830                 if (!old_mlock) {
3831                         error = racct_set(p, RACCT_MEMLOCK,
3832                             ptoa(pmap_wired_count(map->pmap)));
3833                         KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
3834                 }
3835                 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
3836                 KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
3837                 PROC_UNLOCK(p);
3838         }
3839 #endif
3840
3841         return (rv);
3842 }
3843
3844 /*
3845  * Unshare the specified VM space for exec.  If other processes are
3846  * mapped to it, then create a new one.  The new vmspace is null.
3847  */
3848 int
3849 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3850 {
3851         struct vmspace *oldvmspace = p->p_vmspace;
3852         struct vmspace *newvmspace;
3853
3854         KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
3855             ("vmspace_exec recursed"));
3856         newvmspace = vmspace_alloc(minuser, maxuser, NULL);
3857         if (newvmspace == NULL)
3858                 return (ENOMEM);
3859         newvmspace->vm_swrss = oldvmspace->vm_swrss;
3860         /*
3861          * This code is written like this for prototype purposes.  The
3862          * goal is to avoid running down the vmspace here, but let the
3863          * other process's that are still using the vmspace to finally
3864          * run it down.  Even though there is little or no chance of blocking
3865          * here, it is a good idea to keep this form for future mods.
3866          */
3867         PROC_VMSPACE_LOCK(p);
3868         p->p_vmspace = newvmspace;
3869         PROC_VMSPACE_UNLOCK(p);
3870         if (p == curthread->td_proc)
3871                 pmap_activate(curthread);
3872         curthread->td_pflags |= TDP_EXECVMSPC;
3873         return (0);
3874 }
3875
3876 /*
3877  * Unshare the specified VM space for forcing COW.  This
3878  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3879  */
3880 int
3881 vmspace_unshare(struct proc *p)
3882 {
3883         struct vmspace *oldvmspace = p->p_vmspace;
3884         struct vmspace *newvmspace;
3885         vm_ooffset_t fork_charge;
3886
3887         if (oldvmspace->vm_refcnt == 1)
3888                 return (0);
3889         fork_charge = 0;
3890         newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3891         if (newvmspace == NULL)
3892                 return (ENOMEM);
3893         if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
3894                 vmspace_free(newvmspace);
3895                 return (ENOMEM);
3896         }
3897         PROC_VMSPACE_LOCK(p);
3898         p->p_vmspace = newvmspace;
3899         PROC_VMSPACE_UNLOCK(p);
3900         if (p == curthread->td_proc)
3901                 pmap_activate(curthread);
3902         vmspace_free(oldvmspace);
3903         return (0);
3904 }
3905
3906 /*
3907  *      vm_map_lookup:
3908  *
3909  *      Finds the VM object, offset, and
3910  *      protection for a given virtual address in the
3911  *      specified map, assuming a page fault of the
3912  *      type specified.
3913  *
3914  *      Leaves the map in question locked for read; return
3915  *      values are guaranteed until a vm_map_lookup_done
3916  *      call is performed.  Note that the map argument
3917  *      is in/out; the returned map must be used in
3918  *      the call to vm_map_lookup_done.
3919  *
3920  *      A handle (out_entry) is returned for use in
3921  *      vm_map_lookup_done, to make that fast.
3922  *
3923  *      If a lookup is requested with "write protection"
3924  *      specified, the map may be changed to perform virtual
3925  *      copying operations, although the data referenced will
3926  *      remain the same.
3927  */
3928 int
3929 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
3930               vm_offset_t vaddr,
3931               vm_prot_t fault_typea,
3932               vm_map_entry_t *out_entry,        /* OUT */
3933               vm_object_t *object,              /* OUT */
3934               vm_pindex_t *pindex,              /* OUT */
3935               vm_prot_t *out_prot,              /* OUT */
3936               boolean_t *wired)                 /* OUT */
3937 {
3938         vm_map_entry_t entry;
3939         vm_map_t map = *var_map;
3940         vm_prot_t prot;
3941         vm_prot_t fault_type = fault_typea;
3942         vm_object_t eobject;
3943         vm_size_t size;
3944         struct ucred *cred;
3945
3946 RetryLookup:;
3947
3948         vm_map_lock_read(map);
3949
3950         /*
3951          * Lookup the faulting address.
3952          */
3953         if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3954                 vm_map_unlock_read(map);
3955                 return (KERN_INVALID_ADDRESS);
3956         }
3957
3958         entry = *out_entry;
3959
3960         /*
3961          * Handle submaps.
3962          */
3963         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3964                 vm_map_t old_map = map;
3965
3966                 *var_map = map = entry->object.sub_map;
3967                 vm_map_unlock_read(old_map);
3968                 goto RetryLookup;
3969         }
3970
3971         /*
3972          * Check whether this task is allowed to have this page.
3973          */
3974         prot = entry->protection;
3975         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3976         if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
3977                 vm_map_unlock_read(map);
3978                 return (KERN_PROTECTION_FAILURE);
3979         }
3980         KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
3981             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
3982             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
3983             ("entry %p flags %x", entry, entry->eflags));
3984         if ((fault_typea & VM_PROT_COPY) != 0 &&
3985             (entry->max_protection & VM_PROT_WRITE) == 0 &&
3986             (entry->eflags & MAP_ENTRY_COW) == 0) {
3987                 vm_map_unlock_read(map);
3988                 return (KERN_PROTECTION_FAILURE);
3989         }
3990
3991         /*
3992          * If this page is not pageable, we have to get it for all possible
3993          * accesses.
3994          */
3995         *wired = (entry->wired_count != 0);
3996         if (*wired)
3997                 fault_type = entry->protection;
3998         size = entry->end - entry->start;
3999         /*
4000          * If the entry was copy-on-write, we either ...
4001          */
4002         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4003                 /*
4004                  * If we want to write the page, we may as well handle that
4005                  * now since we've got the map locked.
4006                  *
4007                  * If we don't need to write the page, we just demote the
4008                  * permissions allowed.
4009                  */
4010                 if ((fault_type & VM_PROT_WRITE) != 0 ||
4011                     (fault_typea & VM_PROT_COPY) != 0) {
4012                         /*
4013                          * Make a new object, and place it in the object
4014                          * chain.  Note that no new references have appeared
4015                          * -- one just moved from the map to the new
4016                          * object.
4017                          */
4018                         if (vm_map_lock_upgrade(map))
4019                                 goto RetryLookup;
4020
4021                         if (entry->cred == NULL) {
4022                                 /*
4023                                  * The debugger owner is charged for
4024                                  * the memory.
4025                                  */
4026                                 cred = curthread->td_ucred;
4027                                 crhold(cred);
4028                                 if (!swap_reserve_by_cred(size, cred)) {
4029                                         crfree(cred);
4030                                         vm_map_unlock(map);
4031                                         return (KERN_RESOURCE_SHORTAGE);
4032                                 }
4033                                 entry->cred = cred;
4034                         }
4035                         vm_object_shadow(&entry->object.vm_object,
4036                             &entry->offset, size);
4037                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4038                         eobject = entry->object.vm_object;
4039                         if (eobject->cred != NULL) {
4040                                 /*
4041                                  * The object was not shadowed.
4042                                  */
4043                                 swap_release_by_cred(size, entry->cred);
4044                                 crfree(entry->cred);
4045                                 entry->cred = NULL;
4046                         } else if (entry->cred != NULL) {
4047                                 VM_OBJECT_WLOCK(eobject);
4048                                 eobject->cred = entry->cred;
4049                                 eobject->charge = size;
4050                                 VM_OBJECT_WUNLOCK(eobject);
4051                                 entry->cred = NULL;
4052                         }
4053
4054                         vm_map_lock_downgrade(map);
4055                 } else {
4056                         /*
4057                          * We're attempting to read a copy-on-write page --
4058                          * don't allow writes.
4059                          */
4060                         prot &= ~VM_PROT_WRITE;
4061                 }
4062         }
4063
4064         /*
4065          * Create an object if necessary.
4066          */
4067         if (entry->object.vm_object == NULL &&
4068             !map->system_map) {
4069                 if (vm_map_lock_upgrade(map))
4070                         goto RetryLookup;
4071                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
4072                     atop(size));
4073                 entry->offset = 0;
4074                 if (entry->cred != NULL) {
4075                         VM_OBJECT_WLOCK(entry->object.vm_object);
4076                         entry->object.vm_object->cred = entry->cred;
4077                         entry->object.vm_object->charge = size;
4078                         VM_OBJECT_WUNLOCK(entry->object.vm_object);
4079                         entry->cred = NULL;
4080                 }
4081                 vm_map_lock_downgrade(map);
4082         }
4083
4084         /*
4085          * Return the object/offset from this entry.  If the entry was
4086          * copy-on-write or empty, it has been fixed up.
4087          */
4088         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4089         *object = entry->object.vm_object;
4090
4091         *out_prot = prot;
4092         return (KERN_SUCCESS);
4093 }
4094
4095 /*
4096  *      vm_map_lookup_locked:
4097  *
4098  *      Lookup the faulting address.  A version of vm_map_lookup that returns 
4099  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4100  */
4101 int
4102 vm_map_lookup_locked(vm_map_t *var_map,         /* IN/OUT */
4103                      vm_offset_t vaddr,
4104                      vm_prot_t fault_typea,
4105                      vm_map_entry_t *out_entry, /* OUT */
4106                      vm_object_t *object,       /* OUT */
4107                      vm_pindex_t *pindex,       /* OUT */
4108                      vm_prot_t *out_prot,       /* OUT */
4109                      boolean_t *wired)          /* OUT */
4110 {
4111         vm_map_entry_t entry;
4112         vm_map_t map = *var_map;
4113         vm_prot_t prot;
4114         vm_prot_t fault_type = fault_typea;
4115
4116         /*
4117          * Lookup the faulting address.
4118          */
4119         if (!vm_map_lookup_entry(map, vaddr, out_entry))
4120                 return (KERN_INVALID_ADDRESS);
4121
4122         entry = *out_entry;
4123
4124         /*
4125          * Fail if the entry refers to a submap.
4126          */
4127         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4128                 return (KERN_FAILURE);
4129
4130         /*
4131          * Check whether this task is allowed to have this page.
4132          */
4133         prot = entry->protection;
4134         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4135         if ((fault_type & prot) != fault_type)
4136                 return (KERN_PROTECTION_FAILURE);
4137
4138         /*
4139          * If this page is not pageable, we have to get it for all possible
4140          * accesses.
4141          */
4142         *wired = (entry->wired_count != 0);
4143         if (*wired)
4144                 fault_type = entry->protection;
4145
4146         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4147                 /*
4148                  * Fail if the entry was copy-on-write for a write fault.
4149                  */
4150                 if (fault_type & VM_PROT_WRITE)
4151                         return (KERN_FAILURE);
4152                 /*
4153                  * We're attempting to read a copy-on-write page --
4154                  * don't allow writes.
4155                  */
4156                 prot &= ~VM_PROT_WRITE;
4157         }
4158
4159         /*
4160          * Fail if an object should be created.
4161          */
4162         if (entry->object.vm_object == NULL && !map->system_map)
4163                 return (KERN_FAILURE);
4164
4165         /*
4166          * Return the object/offset from this entry.  If the entry was
4167          * copy-on-write or empty, it has been fixed up.
4168          */
4169         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4170         *object = entry->object.vm_object;
4171
4172         *out_prot = prot;
4173         return (KERN_SUCCESS);
4174 }
4175
4176 /*
4177  *      vm_map_lookup_done:
4178  *
4179  *      Releases locks acquired by a vm_map_lookup
4180  *      (according to the handle returned by that lookup).
4181  */
4182 void
4183 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4184 {
4185         /*
4186          * Unlock the main-level map
4187          */
4188         vm_map_unlock_read(map);
4189 }
4190
4191 #include "opt_ddb.h"
4192 #ifdef DDB
4193 #include <sys/kernel.h>
4194
4195 #include <ddb/ddb.h>
4196
4197 static void
4198 vm_map_print(vm_map_t map)
4199 {
4200         vm_map_entry_t entry;
4201
4202         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4203             (void *)map,
4204             (void *)map->pmap, map->nentries, map->timestamp);
4205
4206         db_indent += 2;
4207         for (entry = map->header.next; entry != &map->header;
4208             entry = entry->next) {
4209                 db_iprintf("map entry %p: start=%p, end=%p\n",
4210                     (void *)entry, (void *)entry->start, (void *)entry->end);
4211                 {
4212                         static char *inheritance_name[4] =
4213                         {"share", "copy", "none", "donate_copy"};
4214
4215                         db_iprintf(" prot=%x/%x/%s",
4216                             entry->protection,
4217                             entry->max_protection,
4218                             inheritance_name[(int)(unsigned char)entry->inheritance]);
4219                         if (entry->wired_count != 0)
4220                                 db_printf(", wired");
4221                 }
4222                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4223                         db_printf(", share=%p, offset=0x%jx\n",
4224                             (void *)entry->object.sub_map,
4225                             (uintmax_t)entry->offset);
4226                         if ((entry->prev == &map->header) ||
4227                             (entry->prev->object.sub_map !=
4228                                 entry->object.sub_map)) {
4229                                 db_indent += 2;
4230                                 vm_map_print((vm_map_t)entry->object.sub_map);
4231                                 db_indent -= 2;
4232                         }
4233                 } else {
4234                         if (entry->cred != NULL)
4235                                 db_printf(", ruid %d", entry->cred->cr_ruid);
4236                         db_printf(", object=%p, offset=0x%jx",
4237                             (void *)entry->object.vm_object,
4238                             (uintmax_t)entry->offset);
4239                         if (entry->object.vm_object && entry->object.vm_object->cred)
4240                                 db_printf(", obj ruid %d charge %jx",
4241                                     entry->object.vm_object->cred->cr_ruid,
4242                                     (uintmax_t)entry->object.vm_object->charge);
4243                         if (entry->eflags & MAP_ENTRY_COW)
4244                                 db_printf(", copy (%s)",
4245                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4246                         db_printf("\n");
4247
4248                         if ((entry->prev == &map->header) ||
4249                             (entry->prev->object.vm_object !=
4250                                 entry->object.vm_object)) {
4251                                 db_indent += 2;
4252                                 vm_object_print((db_expr_t)(intptr_t)
4253                                                 entry->object.vm_object,
4254                                                 0, 0, (char *)0);
4255                                 db_indent -= 2;
4256                         }
4257                 }
4258         }
4259         db_indent -= 2;
4260 }
4261
4262 DB_SHOW_COMMAND(map, map)
4263 {
4264
4265         if (!have_addr) {
4266                 db_printf("usage: show map <addr>\n");
4267                 return;
4268         }
4269         vm_map_print((vm_map_t)addr);
4270 }
4271
4272 DB_SHOW_COMMAND(procvm, procvm)
4273 {
4274         struct proc *p;
4275
4276         if (have_addr) {
4277                 p = db_lookup_proc(addr);
4278         } else {
4279                 p = curproc;
4280         }
4281
4282         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4283             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4284             (void *)vmspace_pmap(p->p_vmspace));
4285
4286         vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4287 }
4288
4289 #endif /* DDB */