]> CyberLeo.Net >> Repos - FreeBSD/releng/10.1.git/blob - sys/vm/vm_map.c
Document SA-14:25, SA-14:26
[FreeBSD/releng/10.1.git] / sys / vm / vm_map.c
1 /*-
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60
61 /*
62  *      Virtual memory mapping module.
63  */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/lock.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/racct.h>
79 #include <sys/resourcevar.h>
80 #include <sys/rwlock.h>
81 #include <sys/file.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysent.h>
84 #include <sys/shm.h>
85
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_pager.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vnode_pager.h>
96 #include <vm/swap_pager.h>
97 #include <vm/uma.h>
98
99 /*
100  *      Virtual memory maps provide for the mapping, protection,
101  *      and sharing of virtual memory objects.  In addition,
102  *      this module provides for an efficient virtual copy of
103  *      memory from one map to another.
104  *
105  *      Synchronization is required prior to most operations.
106  *
107  *      Maps consist of an ordered doubly-linked list of simple
108  *      entries; a self-adjusting binary search tree of these
109  *      entries is used to speed up lookups.
110  *
111  *      Since portions of maps are specified by start/end addresses,
112  *      which may not align with existing map entries, all
113  *      routines merely "clip" entries to these start/end values.
114  *      [That is, an entry is split into two, bordering at a
115  *      start or end value.]  Note that these clippings may not
116  *      always be necessary (as the two resulting entries are then
117  *      not changed); however, the clipping is done for convenience.
118  *
119  *      As mentioned above, virtual copy operations are performed
120  *      by copying VM object references from one map to
121  *      another, and then marking both regions as copy-on-write.
122  */
123
124 static struct mtx map_sleep_mtx;
125 static uma_zone_t mapentzone;
126 static uma_zone_t kmapentzone;
127 static uma_zone_t mapzone;
128 static uma_zone_t vmspace_zone;
129 static int vmspace_zinit(void *mem, int size, int flags);
130 static int vm_map_zinit(void *mem, int ize, int flags);
131 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
132     vm_offset_t max);
133 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
134 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
135 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
136 #ifdef INVARIANTS
137 static void vm_map_zdtor(void *mem, int size, void *arg);
138 static void vmspace_zdtor(void *mem, int size, void *arg);
139 #endif
140 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
141     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
142     int cow);
143 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
144     vm_offset_t failed_addr);
145
146 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
147     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
148      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
149
150 /* 
151  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
152  * stable.
153  */
154 #define PROC_VMSPACE_LOCK(p) do { } while (0)
155 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
156
157 /*
158  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
159  *
160  *      Asserts that the starting and ending region
161  *      addresses fall within the valid range of the map.
162  */
163 #define VM_MAP_RANGE_CHECK(map, start, end)             \
164                 {                                       \
165                 if (start < vm_map_min(map))            \
166                         start = vm_map_min(map);        \
167                 if (end > vm_map_max(map))              \
168                         end = vm_map_max(map);          \
169                 if (start > end)                        \
170                         start = end;                    \
171                 }
172
173 /*
174  *      vm_map_startup:
175  *
176  *      Initialize the vm_map module.  Must be called before
177  *      any other vm_map routines.
178  *
179  *      Map and entry structures are allocated from the general
180  *      purpose memory pool with some exceptions:
181  *
182  *      - The kernel map and kmem submap are allocated statically.
183  *      - Kernel map entries are allocated out of a static pool.
184  *
185  *      These restrictions are necessary since malloc() uses the
186  *      maps and requires map entries.
187  */
188
189 void
190 vm_map_startup(void)
191 {
192         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
193         mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
194 #ifdef INVARIANTS
195             vm_map_zdtor,
196 #else
197             NULL,
198 #endif
199             vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
200         uma_prealloc(mapzone, MAX_KMAP);
201         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
202             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
203             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
204         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
205             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
206         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
207 #ifdef INVARIANTS
208             vmspace_zdtor,
209 #else
210             NULL,
211 #endif
212             vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
213 }
214
215 static int
216 vmspace_zinit(void *mem, int size, int flags)
217 {
218         struct vmspace *vm;
219
220         vm = (struct vmspace *)mem;
221
222         vm->vm_map.pmap = NULL;
223         (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
224         PMAP_LOCK_INIT(vmspace_pmap(vm));
225         return (0);
226 }
227
228 static int
229 vm_map_zinit(void *mem, int size, int flags)
230 {
231         vm_map_t map;
232
233         map = (vm_map_t)mem;
234         memset(map, 0, sizeof(*map));
235         mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
236         sx_init(&map->lock, "vm map (user)");
237         return (0);
238 }
239
240 #ifdef INVARIANTS
241 static void
242 vmspace_zdtor(void *mem, int size, void *arg)
243 {
244         struct vmspace *vm;
245
246         vm = (struct vmspace *)mem;
247
248         vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
249 }
250 static void
251 vm_map_zdtor(void *mem, int size, void *arg)
252 {
253         vm_map_t map;
254
255         map = (vm_map_t)mem;
256         KASSERT(map->nentries == 0,
257             ("map %p nentries == %d on free.",
258             map, map->nentries));
259         KASSERT(map->size == 0,
260             ("map %p size == %lu on free.",
261             map, (unsigned long)map->size));
262 }
263 #endif  /* INVARIANTS */
264
265 /*
266  * Allocate a vmspace structure, including a vm_map and pmap,
267  * and initialize those structures.  The refcnt is set to 1.
268  *
269  * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
270  */
271 struct vmspace *
272 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
273 {
274         struct vmspace *vm;
275
276         vm = uma_zalloc(vmspace_zone, M_WAITOK);
277
278         KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
279
280         if (pinit == NULL)
281                 pinit = &pmap_pinit;
282
283         if (!pinit(vmspace_pmap(vm))) {
284                 uma_zfree(vmspace_zone, vm);
285                 return (NULL);
286         }
287         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
288         _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
289         vm->vm_refcnt = 1;
290         vm->vm_shm = NULL;
291         vm->vm_swrss = 0;
292         vm->vm_tsize = 0;
293         vm->vm_dsize = 0;
294         vm->vm_ssize = 0;
295         vm->vm_taddr = 0;
296         vm->vm_daddr = 0;
297         vm->vm_maxsaddr = 0;
298         return (vm);
299 }
300
301 static void
302 vmspace_container_reset(struct proc *p)
303 {
304
305 #ifdef RACCT
306         PROC_LOCK(p);
307         racct_set(p, RACCT_DATA, 0);
308         racct_set(p, RACCT_STACK, 0);
309         racct_set(p, RACCT_RSS, 0);
310         racct_set(p, RACCT_MEMLOCK, 0);
311         racct_set(p, RACCT_VMEM, 0);
312         PROC_UNLOCK(p);
313 #endif
314 }
315
316 static inline void
317 vmspace_dofree(struct vmspace *vm)
318 {
319
320         CTR1(KTR_VM, "vmspace_free: %p", vm);
321
322         /*
323          * Make sure any SysV shm is freed, it might not have been in
324          * exit1().
325          */
326         shmexit(vm);
327
328         /*
329          * Lock the map, to wait out all other references to it.
330          * Delete all of the mappings and pages they hold, then call
331          * the pmap module to reclaim anything left.
332          */
333         (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
334             vm->vm_map.max_offset);
335
336         pmap_release(vmspace_pmap(vm));
337         vm->vm_map.pmap = NULL;
338         uma_zfree(vmspace_zone, vm);
339 }
340
341 void
342 vmspace_free(struct vmspace *vm)
343 {
344
345         if (vm->vm_refcnt == 0)
346                 panic("vmspace_free: attempt to free already freed vmspace");
347
348         if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
349                 vmspace_dofree(vm);
350 }
351
352 void
353 vmspace_exitfree(struct proc *p)
354 {
355         struct vmspace *vm;
356
357         PROC_VMSPACE_LOCK(p);
358         vm = p->p_vmspace;
359         p->p_vmspace = NULL;
360         PROC_VMSPACE_UNLOCK(p);
361         KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
362         vmspace_free(vm);
363 }
364
365 void
366 vmspace_exit(struct thread *td)
367 {
368         int refcnt;
369         struct vmspace *vm;
370         struct proc *p;
371
372         /*
373          * Release user portion of address space.
374          * This releases references to vnodes,
375          * which could cause I/O if the file has been unlinked.
376          * Need to do this early enough that we can still sleep.
377          *
378          * The last exiting process to reach this point releases as
379          * much of the environment as it can. vmspace_dofree() is the
380          * slower fallback in case another process had a temporary
381          * reference to the vmspace.
382          */
383
384         p = td->td_proc;
385         vm = p->p_vmspace;
386         atomic_add_int(&vmspace0.vm_refcnt, 1);
387         do {
388                 refcnt = vm->vm_refcnt;
389                 if (refcnt > 1 && p->p_vmspace != &vmspace0) {
390                         /* Switch now since other proc might free vmspace */
391                         PROC_VMSPACE_LOCK(p);
392                         p->p_vmspace = &vmspace0;
393                         PROC_VMSPACE_UNLOCK(p);
394                         pmap_activate(td);
395                 }
396         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
397         if (refcnt == 1) {
398                 if (p->p_vmspace != vm) {
399                         /* vmspace not yet freed, switch back */
400                         PROC_VMSPACE_LOCK(p);
401                         p->p_vmspace = vm;
402                         PROC_VMSPACE_UNLOCK(p);
403                         pmap_activate(td);
404                 }
405                 pmap_remove_pages(vmspace_pmap(vm));
406                 /* Switch now since this proc will free vmspace */
407                 PROC_VMSPACE_LOCK(p);
408                 p->p_vmspace = &vmspace0;
409                 PROC_VMSPACE_UNLOCK(p);
410                 pmap_activate(td);
411                 vmspace_dofree(vm);
412         }
413         vmspace_container_reset(p);
414 }
415
416 /* Acquire reference to vmspace owned by another process. */
417
418 struct vmspace *
419 vmspace_acquire_ref(struct proc *p)
420 {
421         struct vmspace *vm;
422         int refcnt;
423
424         PROC_VMSPACE_LOCK(p);
425         vm = p->p_vmspace;
426         if (vm == NULL) {
427                 PROC_VMSPACE_UNLOCK(p);
428                 return (NULL);
429         }
430         do {
431                 refcnt = vm->vm_refcnt;
432                 if (refcnt <= 0) {      /* Avoid 0->1 transition */
433                         PROC_VMSPACE_UNLOCK(p);
434                         return (NULL);
435                 }
436         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
437         if (vm != p->p_vmspace) {
438                 PROC_VMSPACE_UNLOCK(p);
439                 vmspace_free(vm);
440                 return (NULL);
441         }
442         PROC_VMSPACE_UNLOCK(p);
443         return (vm);
444 }
445
446 void
447 _vm_map_lock(vm_map_t map, const char *file, int line)
448 {
449
450         if (map->system_map)
451                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
452         else
453                 sx_xlock_(&map->lock, file, line);
454         map->timestamp++;
455 }
456
457 static void
458 vm_map_process_deferred(void)
459 {
460         struct thread *td;
461         vm_map_entry_t entry, next;
462         vm_object_t object;
463
464         td = curthread;
465         entry = td->td_map_def_user;
466         td->td_map_def_user = NULL;
467         while (entry != NULL) {
468                 next = entry->next;
469                 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
470                         /*
471                          * Decrement the object's writemappings and
472                          * possibly the vnode's v_writecount.
473                          */
474                         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
475                             ("Submap with writecount"));
476                         object = entry->object.vm_object;
477                         KASSERT(object != NULL, ("No object for writecount"));
478                         vnode_pager_release_writecount(object, entry->start,
479                             entry->end);
480                 }
481                 vm_map_entry_deallocate(entry, FALSE);
482                 entry = next;
483         }
484 }
485
486 void
487 _vm_map_unlock(vm_map_t map, const char *file, int line)
488 {
489
490         if (map->system_map)
491                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
492         else {
493                 sx_xunlock_(&map->lock, file, line);
494                 vm_map_process_deferred();
495         }
496 }
497
498 void
499 _vm_map_lock_read(vm_map_t map, const char *file, int line)
500 {
501
502         if (map->system_map)
503                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
504         else
505                 sx_slock_(&map->lock, file, line);
506 }
507
508 void
509 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
510 {
511
512         if (map->system_map)
513                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
514         else {
515                 sx_sunlock_(&map->lock, file, line);
516                 vm_map_process_deferred();
517         }
518 }
519
520 int
521 _vm_map_trylock(vm_map_t map, const char *file, int line)
522 {
523         int error;
524
525         error = map->system_map ?
526             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
527             !sx_try_xlock_(&map->lock, file, line);
528         if (error == 0)
529                 map->timestamp++;
530         return (error == 0);
531 }
532
533 int
534 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
535 {
536         int error;
537
538         error = map->system_map ?
539             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
540             !sx_try_slock_(&map->lock, file, line);
541         return (error == 0);
542 }
543
544 /*
545  *      _vm_map_lock_upgrade:   [ internal use only ]
546  *
547  *      Tries to upgrade a read (shared) lock on the specified map to a write
548  *      (exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
549  *      non-zero value if the upgrade fails.  If the upgrade fails, the map is
550  *      returned without a read or write lock held.
551  *
552  *      Requires that the map be read locked.
553  */
554 int
555 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
556 {
557         unsigned int last_timestamp;
558
559         if (map->system_map) {
560                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
561         } else {
562                 if (!sx_try_upgrade_(&map->lock, file, line)) {
563                         last_timestamp = map->timestamp;
564                         sx_sunlock_(&map->lock, file, line);
565                         vm_map_process_deferred();
566                         /*
567                          * If the map's timestamp does not change while the
568                          * map is unlocked, then the upgrade succeeds.
569                          */
570                         sx_xlock_(&map->lock, file, line);
571                         if (last_timestamp != map->timestamp) {
572                                 sx_xunlock_(&map->lock, file, line);
573                                 return (1);
574                         }
575                 }
576         }
577         map->timestamp++;
578         return (0);
579 }
580
581 void
582 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
583 {
584
585         if (map->system_map) {
586                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
587         } else
588                 sx_downgrade_(&map->lock, file, line);
589 }
590
591 /*
592  *      vm_map_locked:
593  *
594  *      Returns a non-zero value if the caller holds a write (exclusive) lock
595  *      on the specified map and the value "0" otherwise.
596  */
597 int
598 vm_map_locked(vm_map_t map)
599 {
600
601         if (map->system_map)
602                 return (mtx_owned(&map->system_mtx));
603         else
604                 return (sx_xlocked(&map->lock));
605 }
606
607 #ifdef INVARIANTS
608 static void
609 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
610 {
611
612         if (map->system_map)
613                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
614         else
615                 sx_assert_(&map->lock, SA_XLOCKED, file, line);
616 }
617
618 #define VM_MAP_ASSERT_LOCKED(map) \
619     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
620 #else
621 #define VM_MAP_ASSERT_LOCKED(map)
622 #endif
623
624 /*
625  *      _vm_map_unlock_and_wait:
626  *
627  *      Atomically releases the lock on the specified map and puts the calling
628  *      thread to sleep.  The calling thread will remain asleep until either
629  *      vm_map_wakeup() is performed on the map or the specified timeout is
630  *      exceeded.
631  *
632  *      WARNING!  This function does not perform deferred deallocations of
633  *      objects and map entries.  Therefore, the calling thread is expected to
634  *      reacquire the map lock after reawakening and later perform an ordinary
635  *      unlock operation, such as vm_map_unlock(), before completing its
636  *      operation on the map.
637  */
638 int
639 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
640 {
641
642         mtx_lock(&map_sleep_mtx);
643         if (map->system_map)
644                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
645         else
646                 sx_xunlock_(&map->lock, file, line);
647         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
648             timo));
649 }
650
651 /*
652  *      vm_map_wakeup:
653  *
654  *      Awaken any threads that have slept on the map using
655  *      vm_map_unlock_and_wait().
656  */
657 void
658 vm_map_wakeup(vm_map_t map)
659 {
660
661         /*
662          * Acquire and release map_sleep_mtx to prevent a wakeup()
663          * from being performed (and lost) between the map unlock
664          * and the msleep() in _vm_map_unlock_and_wait().
665          */
666         mtx_lock(&map_sleep_mtx);
667         mtx_unlock(&map_sleep_mtx);
668         wakeup(&map->root);
669 }
670
671 void
672 vm_map_busy(vm_map_t map)
673 {
674
675         VM_MAP_ASSERT_LOCKED(map);
676         map->busy++;
677 }
678
679 void
680 vm_map_unbusy(vm_map_t map)
681 {
682
683         VM_MAP_ASSERT_LOCKED(map);
684         KASSERT(map->busy, ("vm_map_unbusy: not busy"));
685         if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
686                 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
687                 wakeup(&map->busy);
688         }
689 }
690
691 void 
692 vm_map_wait_busy(vm_map_t map)
693 {
694
695         VM_MAP_ASSERT_LOCKED(map);
696         while (map->busy) {
697                 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
698                 if (map->system_map)
699                         msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
700                 else
701                         sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
702         }
703         map->timestamp++;
704 }
705
706 long
707 vmspace_resident_count(struct vmspace *vmspace)
708 {
709         return pmap_resident_count(vmspace_pmap(vmspace));
710 }
711
712 /*
713  *      vm_map_create:
714  *
715  *      Creates and returns a new empty VM map with
716  *      the given physical map structure, and having
717  *      the given lower and upper address bounds.
718  */
719 vm_map_t
720 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
721 {
722         vm_map_t result;
723
724         result = uma_zalloc(mapzone, M_WAITOK);
725         CTR1(KTR_VM, "vm_map_create: %p", result);
726         _vm_map_init(result, pmap, min, max);
727         return (result);
728 }
729
730 /*
731  * Initialize an existing vm_map structure
732  * such as that in the vmspace structure.
733  */
734 static void
735 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
736 {
737
738         map->header.next = map->header.prev = &map->header;
739         map->needs_wakeup = FALSE;
740         map->system_map = 0;
741         map->pmap = pmap;
742         map->min_offset = min;
743         map->max_offset = max;
744         map->flags = 0;
745         map->root = NULL;
746         map->timestamp = 0;
747         map->busy = 0;
748 }
749
750 void
751 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
752 {
753
754         _vm_map_init(map, pmap, min, max);
755         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
756         sx_init(&map->lock, "user map");
757 }
758
759 /*
760  *      vm_map_entry_dispose:   [ internal use only ]
761  *
762  *      Inverse of vm_map_entry_create.
763  */
764 static void
765 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
766 {
767         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
768 }
769
770 /*
771  *      vm_map_entry_create:    [ internal use only ]
772  *
773  *      Allocates a VM map entry for insertion.
774  *      No entry fields are filled in.
775  */
776 static vm_map_entry_t
777 vm_map_entry_create(vm_map_t map)
778 {
779         vm_map_entry_t new_entry;
780
781         if (map->system_map)
782                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
783         else
784                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
785         if (new_entry == NULL)
786                 panic("vm_map_entry_create: kernel resources exhausted");
787         return (new_entry);
788 }
789
790 /*
791  *      vm_map_entry_set_behavior:
792  *
793  *      Set the expected access behavior, either normal, random, or
794  *      sequential.
795  */
796 static inline void
797 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
798 {
799         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
800             (behavior & MAP_ENTRY_BEHAV_MASK);
801 }
802
803 /*
804  *      vm_map_entry_set_max_free:
805  *
806  *      Set the max_free field in a vm_map_entry.
807  */
808 static inline void
809 vm_map_entry_set_max_free(vm_map_entry_t entry)
810 {
811
812         entry->max_free = entry->adj_free;
813         if (entry->left != NULL && entry->left->max_free > entry->max_free)
814                 entry->max_free = entry->left->max_free;
815         if (entry->right != NULL && entry->right->max_free > entry->max_free)
816                 entry->max_free = entry->right->max_free;
817 }
818
819 /*
820  *      vm_map_entry_splay:
821  *
822  *      The Sleator and Tarjan top-down splay algorithm with the
823  *      following variation.  Max_free must be computed bottom-up, so
824  *      on the downward pass, maintain the left and right spines in
825  *      reverse order.  Then, make a second pass up each side to fix
826  *      the pointers and compute max_free.  The time bound is O(log n)
827  *      amortized.
828  *
829  *      The new root is the vm_map_entry containing "addr", or else an
830  *      adjacent entry (lower or higher) if addr is not in the tree.
831  *
832  *      The map must be locked, and leaves it so.
833  *
834  *      Returns: the new root.
835  */
836 static vm_map_entry_t
837 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
838 {
839         vm_map_entry_t llist, rlist;
840         vm_map_entry_t ltree, rtree;
841         vm_map_entry_t y;
842
843         /* Special case of empty tree. */
844         if (root == NULL)
845                 return (root);
846
847         /*
848          * Pass One: Splay down the tree until we find addr or a NULL
849          * pointer where addr would go.  llist and rlist are the two
850          * sides in reverse order (bottom-up), with llist linked by
851          * the right pointer and rlist linked by the left pointer in
852          * the vm_map_entry.  Wait until Pass Two to set max_free on
853          * the two spines.
854          */
855         llist = NULL;
856         rlist = NULL;
857         for (;;) {
858                 /* root is never NULL in here. */
859                 if (addr < root->start) {
860                         y = root->left;
861                         if (y == NULL)
862                                 break;
863                         if (addr < y->start && y->left != NULL) {
864                                 /* Rotate right and put y on rlist. */
865                                 root->left = y->right;
866                                 y->right = root;
867                                 vm_map_entry_set_max_free(root);
868                                 root = y->left;
869                                 y->left = rlist;
870                                 rlist = y;
871                         } else {
872                                 /* Put root on rlist. */
873                                 root->left = rlist;
874                                 rlist = root;
875                                 root = y;
876                         }
877                 } else if (addr >= root->end) {
878                         y = root->right;
879                         if (y == NULL)
880                                 break;
881                         if (addr >= y->end && y->right != NULL) {
882                                 /* Rotate left and put y on llist. */
883                                 root->right = y->left;
884                                 y->left = root;
885                                 vm_map_entry_set_max_free(root);
886                                 root = y->right;
887                                 y->right = llist;
888                                 llist = y;
889                         } else {
890                                 /* Put root on llist. */
891                                 root->right = llist;
892                                 llist = root;
893                                 root = y;
894                         }
895                 } else
896                         break;
897         }
898
899         /*
900          * Pass Two: Walk back up the two spines, flip the pointers
901          * and set max_free.  The subtrees of the root go at the
902          * bottom of llist and rlist.
903          */
904         ltree = root->left;
905         while (llist != NULL) {
906                 y = llist->right;
907                 llist->right = ltree;
908                 vm_map_entry_set_max_free(llist);
909                 ltree = llist;
910                 llist = y;
911         }
912         rtree = root->right;
913         while (rlist != NULL) {
914                 y = rlist->left;
915                 rlist->left = rtree;
916                 vm_map_entry_set_max_free(rlist);
917                 rtree = rlist;
918                 rlist = y;
919         }
920
921         /*
922          * Final assembly: add ltree and rtree as subtrees of root.
923          */
924         root->left = ltree;
925         root->right = rtree;
926         vm_map_entry_set_max_free(root);
927
928         return (root);
929 }
930
931 /*
932  *      vm_map_entry_{un,}link:
933  *
934  *      Insert/remove entries from maps.
935  */
936 static void
937 vm_map_entry_link(vm_map_t map,
938                   vm_map_entry_t after_where,
939                   vm_map_entry_t entry)
940 {
941
942         CTR4(KTR_VM,
943             "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
944             map->nentries, entry, after_where);
945         VM_MAP_ASSERT_LOCKED(map);
946         KASSERT(after_where == &map->header ||
947             after_where->end <= entry->start,
948             ("vm_map_entry_link: prev end %jx new start %jx overlap",
949             (uintmax_t)after_where->end, (uintmax_t)entry->start));
950         KASSERT(after_where->next == &map->header ||
951             entry->end <= after_where->next->start,
952             ("vm_map_entry_link: new end %jx next start %jx overlap",
953             (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
954
955         map->nentries++;
956         entry->prev = after_where;
957         entry->next = after_where->next;
958         entry->next->prev = entry;
959         after_where->next = entry;
960
961         if (after_where != &map->header) {
962                 if (after_where != map->root)
963                         vm_map_entry_splay(after_where->start, map->root);
964                 entry->right = after_where->right;
965                 entry->left = after_where;
966                 after_where->right = NULL;
967                 after_where->adj_free = entry->start - after_where->end;
968                 vm_map_entry_set_max_free(after_where);
969         } else {
970                 entry->right = map->root;
971                 entry->left = NULL;
972         }
973         entry->adj_free = (entry->next == &map->header ? map->max_offset :
974             entry->next->start) - entry->end;
975         vm_map_entry_set_max_free(entry);
976         map->root = entry;
977 }
978
979 static void
980 vm_map_entry_unlink(vm_map_t map,
981                     vm_map_entry_t entry)
982 {
983         vm_map_entry_t next, prev, root;
984
985         VM_MAP_ASSERT_LOCKED(map);
986         if (entry != map->root)
987                 vm_map_entry_splay(entry->start, map->root);
988         if (entry->left == NULL)
989                 root = entry->right;
990         else {
991                 root = vm_map_entry_splay(entry->start, entry->left);
992                 root->right = entry->right;
993                 root->adj_free = (entry->next == &map->header ? map->max_offset :
994                     entry->next->start) - root->end;
995                 vm_map_entry_set_max_free(root);
996         }
997         map->root = root;
998
999         prev = entry->prev;
1000         next = entry->next;
1001         next->prev = prev;
1002         prev->next = next;
1003         map->nentries--;
1004         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1005             map->nentries, entry);
1006 }
1007
1008 /*
1009  *      vm_map_entry_resize_free:
1010  *
1011  *      Recompute the amount of free space following a vm_map_entry
1012  *      and propagate that value up the tree.  Call this function after
1013  *      resizing a map entry in-place, that is, without a call to
1014  *      vm_map_entry_link() or _unlink().
1015  *
1016  *      The map must be locked, and leaves it so.
1017  */
1018 static void
1019 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1020 {
1021
1022         /*
1023          * Using splay trees without parent pointers, propagating
1024          * max_free up the tree is done by moving the entry to the
1025          * root and making the change there.
1026          */
1027         if (entry != map->root)
1028                 map->root = vm_map_entry_splay(entry->start, map->root);
1029
1030         entry->adj_free = (entry->next == &map->header ? map->max_offset :
1031             entry->next->start) - entry->end;
1032         vm_map_entry_set_max_free(entry);
1033 }
1034
1035 /*
1036  *      vm_map_lookup_entry:    [ internal use only ]
1037  *
1038  *      Finds the map entry containing (or
1039  *      immediately preceding) the specified address
1040  *      in the given map; the entry is returned
1041  *      in the "entry" parameter.  The boolean
1042  *      result indicates whether the address is
1043  *      actually contained in the map.
1044  */
1045 boolean_t
1046 vm_map_lookup_entry(
1047         vm_map_t map,
1048         vm_offset_t address,
1049         vm_map_entry_t *entry)  /* OUT */
1050 {
1051         vm_map_entry_t cur;
1052         boolean_t locked;
1053
1054         /*
1055          * If the map is empty, then the map entry immediately preceding
1056          * "address" is the map's header.
1057          */
1058         cur = map->root;
1059         if (cur == NULL)
1060                 *entry = &map->header;
1061         else if (address >= cur->start && cur->end > address) {
1062                 *entry = cur;
1063                 return (TRUE);
1064         } else if ((locked = vm_map_locked(map)) ||
1065             sx_try_upgrade(&map->lock)) {
1066                 /*
1067                  * Splay requires a write lock on the map.  However, it only
1068                  * restructures the binary search tree; it does not otherwise
1069                  * change the map.  Thus, the map's timestamp need not change
1070                  * on a temporary upgrade.
1071                  */
1072                 map->root = cur = vm_map_entry_splay(address, cur);
1073                 if (!locked)
1074                         sx_downgrade(&map->lock);
1075
1076                 /*
1077                  * If "address" is contained within a map entry, the new root
1078                  * is that map entry.  Otherwise, the new root is a map entry
1079                  * immediately before or after "address".
1080                  */
1081                 if (address >= cur->start) {
1082                         *entry = cur;
1083                         if (cur->end > address)
1084                                 return (TRUE);
1085                 } else
1086                         *entry = cur->prev;
1087         } else
1088                 /*
1089                  * Since the map is only locked for read access, perform a
1090                  * standard binary search tree lookup for "address".
1091                  */
1092                 for (;;) {
1093                         if (address < cur->start) {
1094                                 if (cur->left == NULL) {
1095                                         *entry = cur->prev;
1096                                         break;
1097                                 }
1098                                 cur = cur->left;
1099                         } else if (cur->end > address) {
1100                                 *entry = cur;
1101                                 return (TRUE);
1102                         } else {
1103                                 if (cur->right == NULL) {
1104                                         *entry = cur;
1105                                         break;
1106                                 }
1107                                 cur = cur->right;
1108                         }
1109                 }
1110         return (FALSE);
1111 }
1112
1113 /*
1114  *      vm_map_insert:
1115  *
1116  *      Inserts the given whole VM object into the target
1117  *      map at the specified address range.  The object's
1118  *      size should match that of the address range.
1119  *
1120  *      Requires that the map be locked, and leaves it so.
1121  *
1122  *      If object is non-NULL, ref count must be bumped by caller
1123  *      prior to making call to account for the new entry.
1124  */
1125 int
1126 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1127               vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
1128               int cow)
1129 {
1130         vm_map_entry_t new_entry;
1131         vm_map_entry_t prev_entry;
1132         vm_map_entry_t temp_entry;
1133         vm_eflags_t protoeflags;
1134         struct ucred *cred;
1135         vm_inherit_t inheritance;
1136         boolean_t charge_prev_obj;
1137
1138         VM_MAP_ASSERT_LOCKED(map);
1139
1140         /*
1141          * Check that the start and end points are not bogus.
1142          */
1143         if ((start < map->min_offset) || (end > map->max_offset) ||
1144             (start >= end))
1145                 return (KERN_INVALID_ADDRESS);
1146
1147         /*
1148          * Find the entry prior to the proposed starting address; if it's part
1149          * of an existing entry, this range is bogus.
1150          */
1151         if (vm_map_lookup_entry(map, start, &temp_entry))
1152                 return (KERN_NO_SPACE);
1153
1154         prev_entry = temp_entry;
1155
1156         /*
1157          * Assert that the next entry doesn't overlap the end point.
1158          */
1159         if ((prev_entry->next != &map->header) &&
1160             (prev_entry->next->start < end))
1161                 return (KERN_NO_SPACE);
1162
1163         protoeflags = 0;
1164         charge_prev_obj = FALSE;
1165
1166         if (cow & MAP_COPY_ON_WRITE)
1167                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1168
1169         if (cow & MAP_NOFAULT) {
1170                 protoeflags |= MAP_ENTRY_NOFAULT;
1171
1172                 KASSERT(object == NULL,
1173                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1174         }
1175         if (cow & MAP_DISABLE_SYNCER)
1176                 protoeflags |= MAP_ENTRY_NOSYNC;
1177         if (cow & MAP_DISABLE_COREDUMP)
1178                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1179         if (cow & MAP_VN_WRITECOUNT)
1180                 protoeflags |= MAP_ENTRY_VN_WRITECNT;
1181         if (cow & MAP_INHERIT_SHARE)
1182                 inheritance = VM_INHERIT_SHARE;
1183         else
1184                 inheritance = VM_INHERIT_DEFAULT;
1185
1186         cred = NULL;
1187         KASSERT((object != kmem_object && object != kernel_object) ||
1188             ((object == kmem_object || object == kernel_object) &&
1189                 !(protoeflags & MAP_ENTRY_NEEDS_COPY)),
1190             ("kmem or kernel object and cow"));
1191         if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1192                 goto charged;
1193         if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1194             ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1195                 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1196                         return (KERN_RESOURCE_SHORTAGE);
1197                 KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
1198                     object->cred == NULL,
1199                     ("OVERCOMMIT: vm_map_insert o %p", object));
1200                 cred = curthread->td_ucred;
1201                 crhold(cred);
1202                 if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
1203                         charge_prev_obj = TRUE;
1204         }
1205
1206 charged:
1207         /* Expand the kernel pmap, if necessary. */
1208         if (map == kernel_map && end > kernel_vm_end)
1209                 pmap_growkernel(end);
1210         if (object != NULL) {
1211                 /*
1212                  * OBJ_ONEMAPPING must be cleared unless this mapping
1213                  * is trivially proven to be the only mapping for any
1214                  * of the object's pages.  (Object granularity
1215                  * reference counting is insufficient to recognize
1216                  * aliases with precision.)
1217                  */
1218                 VM_OBJECT_WLOCK(object);
1219                 if (object->ref_count > 1 || object->shadow_count != 0)
1220                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
1221                 VM_OBJECT_WUNLOCK(object);
1222         }
1223         else if ((prev_entry != &map->header) &&
1224                  (prev_entry->eflags == protoeflags) &&
1225                  (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 &&
1226                  (prev_entry->end == start) &&
1227                  (prev_entry->wired_count == 0) &&
1228                  (prev_entry->cred == cred ||
1229                   (prev_entry->object.vm_object != NULL &&
1230                    (prev_entry->object.vm_object->cred == cred))) &&
1231                    vm_object_coalesce(prev_entry->object.vm_object,
1232                        prev_entry->offset,
1233                        (vm_size_t)(prev_entry->end - prev_entry->start),
1234                        (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
1235                 /*
1236                  * We were able to extend the object.  Determine if we
1237                  * can extend the previous map entry to include the
1238                  * new range as well.
1239                  */
1240                 if ((prev_entry->inheritance == inheritance) &&
1241                     (prev_entry->protection == prot) &&
1242                     (prev_entry->max_protection == max)) {
1243                         map->size += (end - prev_entry->end);
1244                         prev_entry->end = end;
1245                         vm_map_entry_resize_free(map, prev_entry);
1246                         vm_map_simplify_entry(map, prev_entry);
1247                         if (cred != NULL)
1248                                 crfree(cred);
1249                         return (KERN_SUCCESS);
1250                 }
1251
1252                 /*
1253                  * If we can extend the object but cannot extend the
1254                  * map entry, we have to create a new map entry.  We
1255                  * must bump the ref count on the extended object to
1256                  * account for it.  object may be NULL.
1257                  */
1258                 object = prev_entry->object.vm_object;
1259                 offset = prev_entry->offset +
1260                         (prev_entry->end - prev_entry->start);
1261                 vm_object_reference(object);
1262                 if (cred != NULL && object != NULL && object->cred != NULL &&
1263                     !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1264                         /* Object already accounts for this uid. */
1265                         crfree(cred);
1266                         cred = NULL;
1267                 }
1268         }
1269
1270         /*
1271          * NOTE: if conditionals fail, object can be NULL here.  This occurs
1272          * in things like the buffer map where we manage kva but do not manage
1273          * backing objects.
1274          */
1275
1276         /*
1277          * Create a new entry
1278          */
1279         new_entry = vm_map_entry_create(map);
1280         new_entry->start = start;
1281         new_entry->end = end;
1282         new_entry->cred = NULL;
1283
1284         new_entry->eflags = protoeflags;
1285         new_entry->object.vm_object = object;
1286         new_entry->offset = offset;
1287         new_entry->avail_ssize = 0;
1288
1289         new_entry->inheritance = inheritance;
1290         new_entry->protection = prot;
1291         new_entry->max_protection = max;
1292         new_entry->wired_count = 0;
1293         new_entry->wiring_thread = NULL;
1294         new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1295         new_entry->next_read = OFF_TO_IDX(offset);
1296
1297         KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1298             ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
1299         new_entry->cred = cred;
1300
1301         /*
1302          * Insert the new entry into the list
1303          */
1304         vm_map_entry_link(map, prev_entry, new_entry);
1305         map->size += new_entry->end - new_entry->start;
1306
1307         /*
1308          * It may be possible to merge the new entry with the next and/or
1309          * previous entries.  However, due to MAP_STACK_* being a hack, a
1310          * panic can result from merging such entries.
1311          */
1312         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)
1313                 vm_map_simplify_entry(map, new_entry);
1314
1315         if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1316                 vm_map_pmap_enter(map, start, prot,
1317                                     object, OFF_TO_IDX(offset), end - start,
1318                                     cow & MAP_PREFAULT_PARTIAL);
1319         }
1320
1321         return (KERN_SUCCESS);
1322 }
1323
1324 /*
1325  *      vm_map_findspace:
1326  *
1327  *      Find the first fit (lowest VM address) for "length" free bytes
1328  *      beginning at address >= start in the given map.
1329  *
1330  *      In a vm_map_entry, "adj_free" is the amount of free space
1331  *      adjacent (higher address) to this entry, and "max_free" is the
1332  *      maximum amount of contiguous free space in its subtree.  This
1333  *      allows finding a free region in one path down the tree, so
1334  *      O(log n) amortized with splay trees.
1335  *
1336  *      The map must be locked, and leaves it so.
1337  *
1338  *      Returns: 0 on success, and starting address in *addr,
1339  *               1 if insufficient space.
1340  */
1341 int
1342 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1343     vm_offset_t *addr)  /* OUT */
1344 {
1345         vm_map_entry_t entry;
1346         vm_offset_t st;
1347
1348         /*
1349          * Request must fit within min/max VM address and must avoid
1350          * address wrap.
1351          */
1352         if (start < map->min_offset)
1353                 start = map->min_offset;
1354         if (start + length > map->max_offset || start + length < start)
1355                 return (1);
1356
1357         /* Empty tree means wide open address space. */
1358         if (map->root == NULL) {
1359                 *addr = start;
1360                 return (0);
1361         }
1362
1363         /*
1364          * After splay, if start comes before root node, then there
1365          * must be a gap from start to the root.
1366          */
1367         map->root = vm_map_entry_splay(start, map->root);
1368         if (start + length <= map->root->start) {
1369                 *addr = start;
1370                 return (0);
1371         }
1372
1373         /*
1374          * Root is the last node that might begin its gap before
1375          * start, and this is the last comparison where address
1376          * wrap might be a problem.
1377          */
1378         st = (start > map->root->end) ? start : map->root->end;
1379         if (length <= map->root->end + map->root->adj_free - st) {
1380                 *addr = st;
1381                 return (0);
1382         }
1383
1384         /* With max_free, can immediately tell if no solution. */
1385         entry = map->root->right;
1386         if (entry == NULL || length > entry->max_free)
1387                 return (1);
1388
1389         /*
1390          * Search the right subtree in the order: left subtree, root,
1391          * right subtree (first fit).  The previous splay implies that
1392          * all regions in the right subtree have addresses > start.
1393          */
1394         while (entry != NULL) {
1395                 if (entry->left != NULL && entry->left->max_free >= length)
1396                         entry = entry->left;
1397                 else if (entry->adj_free >= length) {
1398                         *addr = entry->end;
1399                         return (0);
1400                 } else
1401                         entry = entry->right;
1402         }
1403
1404         /* Can't get here, so panic if we do. */
1405         panic("vm_map_findspace: max_free corrupt");
1406 }
1407
1408 int
1409 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1410     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1411     vm_prot_t max, int cow)
1412 {
1413         vm_offset_t end;
1414         int result;
1415
1416         end = start + length;
1417         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1418             object == NULL,
1419             ("vm_map_fixed: non-NULL backing object for stack"));
1420         vm_map_lock(map);
1421         VM_MAP_RANGE_CHECK(map, start, end);
1422         if ((cow & MAP_CHECK_EXCL) == 0)
1423                 vm_map_delete(map, start, end);
1424         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1425                 result = vm_map_stack_locked(map, start, length, sgrowsiz,
1426                     prot, max, cow);
1427         } else {
1428                 result = vm_map_insert(map, object, offset, start, end,
1429                     prot, max, cow);
1430         }
1431         vm_map_unlock(map);
1432         return (result);
1433 }
1434
1435 /*
1436  *      vm_map_find finds an unallocated region in the target address
1437  *      map with the given length.  The search is defined to be
1438  *      first-fit from the specified address; the region found is
1439  *      returned in the same parameter.
1440  *
1441  *      If object is non-NULL, ref count must be bumped by caller
1442  *      prior to making call to account for the new entry.
1443  */
1444 int
1445 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1446             vm_offset_t *addr,  /* IN/OUT */
1447             vm_size_t length, vm_offset_t max_addr, int find_space,
1448             vm_prot_t prot, vm_prot_t max, int cow)
1449 {
1450         vm_offset_t alignment, initial_addr, start;
1451         int result;
1452
1453         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1454             object == NULL,
1455             ("vm_map_find: non-NULL backing object for stack"));
1456         if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1457             (object->flags & OBJ_COLORED) == 0))
1458                 find_space = VMFS_ANY_SPACE;
1459         if (find_space >> 8 != 0) {
1460                 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1461                 alignment = (vm_offset_t)1 << (find_space >> 8);
1462         } else
1463                 alignment = 0;
1464         initial_addr = *addr;
1465 again:
1466         start = initial_addr;
1467         vm_map_lock(map);
1468         do {
1469                 if (find_space != VMFS_NO_SPACE) {
1470                         if (vm_map_findspace(map, start, length, addr) ||
1471                             (max_addr != 0 && *addr + length > max_addr)) {
1472                                 vm_map_unlock(map);
1473                                 if (find_space == VMFS_OPTIMAL_SPACE) {
1474                                         find_space = VMFS_ANY_SPACE;
1475                                         goto again;
1476                                 }
1477                                 return (KERN_NO_SPACE);
1478                         }
1479                         switch (find_space) {
1480                         case VMFS_SUPER_SPACE:
1481                         case VMFS_OPTIMAL_SPACE:
1482                                 pmap_align_superpage(object, offset, addr,
1483                                     length);
1484                                 break;
1485                         case VMFS_ANY_SPACE:
1486                                 break;
1487                         default:
1488                                 if ((*addr & (alignment - 1)) != 0) {
1489                                         *addr &= ~(alignment - 1);
1490                                         *addr += alignment;
1491                                 }
1492                                 break;
1493                         }
1494
1495                         start = *addr;
1496                 }
1497                 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1498                         result = vm_map_stack_locked(map, start, length,
1499                             sgrowsiz, prot, max, cow);
1500                 } else {
1501                         result = vm_map_insert(map, object, offset, start,
1502                             start + length, prot, max, cow);
1503                 }
1504         } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE &&
1505             find_space != VMFS_ANY_SPACE);
1506         vm_map_unlock(map);
1507         return (result);
1508 }
1509
1510 /*
1511  *      vm_map_simplify_entry:
1512  *
1513  *      Simplify the given map entry by merging with either neighbor.  This
1514  *      routine also has the ability to merge with both neighbors.
1515  *
1516  *      The map must be locked.
1517  *
1518  *      This routine guarentees that the passed entry remains valid (though
1519  *      possibly extended).  When merging, this routine may delete one or
1520  *      both neighbors.
1521  */
1522 void
1523 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1524 {
1525         vm_map_entry_t next, prev;
1526         vm_size_t prevsize, esize;
1527
1528         if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1529                 return;
1530
1531         prev = entry->prev;
1532         if (prev != &map->header) {
1533                 prevsize = prev->end - prev->start;
1534                 if ( (prev->end == entry->start) &&
1535                      (prev->object.vm_object == entry->object.vm_object) &&
1536                      (!prev->object.vm_object ||
1537                         (prev->offset + prevsize == entry->offset)) &&
1538                      (prev->eflags == entry->eflags) &&
1539                      (prev->protection == entry->protection) &&
1540                      (prev->max_protection == entry->max_protection) &&
1541                      (prev->inheritance == entry->inheritance) &&
1542                      (prev->wired_count == entry->wired_count) &&
1543                      (prev->cred == entry->cred)) {
1544                         vm_map_entry_unlink(map, prev);
1545                         entry->start = prev->start;
1546                         entry->offset = prev->offset;
1547                         if (entry->prev != &map->header)
1548                                 vm_map_entry_resize_free(map, entry->prev);
1549
1550                         /*
1551                          * If the backing object is a vnode object,
1552                          * vm_object_deallocate() calls vrele().
1553                          * However, vrele() does not lock the vnode
1554                          * because the vnode has additional
1555                          * references.  Thus, the map lock can be kept
1556                          * without causing a lock-order reversal with
1557                          * the vnode lock.
1558                          *
1559                          * Since we count the number of virtual page
1560                          * mappings in object->un_pager.vnp.writemappings,
1561                          * the writemappings value should not be adjusted
1562                          * when the entry is disposed of.
1563                          */
1564                         if (prev->object.vm_object)
1565                                 vm_object_deallocate(prev->object.vm_object);
1566                         if (prev->cred != NULL)
1567                                 crfree(prev->cred);
1568                         vm_map_entry_dispose(map, prev);
1569                 }
1570         }
1571
1572         next = entry->next;
1573         if (next != &map->header) {
1574                 esize = entry->end - entry->start;
1575                 if ((entry->end == next->start) &&
1576                     (next->object.vm_object == entry->object.vm_object) &&
1577                      (!entry->object.vm_object ||
1578                         (entry->offset + esize == next->offset)) &&
1579                     (next->eflags == entry->eflags) &&
1580                     (next->protection == entry->protection) &&
1581                     (next->max_protection == entry->max_protection) &&
1582                     (next->inheritance == entry->inheritance) &&
1583                     (next->wired_count == entry->wired_count) &&
1584                     (next->cred == entry->cred)) {
1585                         vm_map_entry_unlink(map, next);
1586                         entry->end = next->end;
1587                         vm_map_entry_resize_free(map, entry);
1588
1589                         /*
1590                          * See comment above.
1591                          */
1592                         if (next->object.vm_object)
1593                                 vm_object_deallocate(next->object.vm_object);
1594                         if (next->cred != NULL)
1595                                 crfree(next->cred);
1596                         vm_map_entry_dispose(map, next);
1597                 }
1598         }
1599 }
1600 /*
1601  *      vm_map_clip_start:      [ internal use only ]
1602  *
1603  *      Asserts that the given entry begins at or after
1604  *      the specified address; if necessary,
1605  *      it splits the entry into two.
1606  */
1607 #define vm_map_clip_start(map, entry, startaddr) \
1608 { \
1609         if (startaddr > entry->start) \
1610                 _vm_map_clip_start(map, entry, startaddr); \
1611 }
1612
1613 /*
1614  *      This routine is called only when it is known that
1615  *      the entry must be split.
1616  */
1617 static void
1618 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1619 {
1620         vm_map_entry_t new_entry;
1621
1622         VM_MAP_ASSERT_LOCKED(map);
1623
1624         /*
1625          * Split off the front portion -- note that we must insert the new
1626          * entry BEFORE this one, so that this entry has the specified
1627          * starting address.
1628          */
1629         vm_map_simplify_entry(map, entry);
1630
1631         /*
1632          * If there is no object backing this entry, we might as well create
1633          * one now.  If we defer it, an object can get created after the map
1634          * is clipped, and individual objects will be created for the split-up
1635          * map.  This is a bit of a hack, but is also about the best place to
1636          * put this improvement.
1637          */
1638         if (entry->object.vm_object == NULL && !map->system_map) {
1639                 vm_object_t object;
1640                 object = vm_object_allocate(OBJT_DEFAULT,
1641                                 atop(entry->end - entry->start));
1642                 entry->object.vm_object = object;
1643                 entry->offset = 0;
1644                 if (entry->cred != NULL) {
1645                         object->cred = entry->cred;
1646                         object->charge = entry->end - entry->start;
1647                         entry->cred = NULL;
1648                 }
1649         } else if (entry->object.vm_object != NULL &&
1650                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1651                    entry->cred != NULL) {
1652                 VM_OBJECT_WLOCK(entry->object.vm_object);
1653                 KASSERT(entry->object.vm_object->cred == NULL,
1654                     ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1655                 entry->object.vm_object->cred = entry->cred;
1656                 entry->object.vm_object->charge = entry->end - entry->start;
1657                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
1658                 entry->cred = NULL;
1659         }
1660
1661         new_entry = vm_map_entry_create(map);
1662         *new_entry = *entry;
1663
1664         new_entry->end = start;
1665         entry->offset += (start - entry->start);
1666         entry->start = start;
1667         if (new_entry->cred != NULL)
1668                 crhold(entry->cred);
1669
1670         vm_map_entry_link(map, entry->prev, new_entry);
1671
1672         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1673                 vm_object_reference(new_entry->object.vm_object);
1674                 /*
1675                  * The object->un_pager.vnp.writemappings for the
1676                  * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1677                  * kept as is here.  The virtual pages are
1678                  * re-distributed among the clipped entries, so the sum is
1679                  * left the same.
1680                  */
1681         }
1682 }
1683
1684 /*
1685  *      vm_map_clip_end:        [ internal use only ]
1686  *
1687  *      Asserts that the given entry ends at or before
1688  *      the specified address; if necessary,
1689  *      it splits the entry into two.
1690  */
1691 #define vm_map_clip_end(map, entry, endaddr) \
1692 { \
1693         if ((endaddr) < (entry->end)) \
1694                 _vm_map_clip_end((map), (entry), (endaddr)); \
1695 }
1696
1697 /*
1698  *      This routine is called only when it is known that
1699  *      the entry must be split.
1700  */
1701 static void
1702 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1703 {
1704         vm_map_entry_t new_entry;
1705
1706         VM_MAP_ASSERT_LOCKED(map);
1707
1708         /*
1709          * If there is no object backing this entry, we might as well create
1710          * one now.  If we defer it, an object can get created after the map
1711          * is clipped, and individual objects will be created for the split-up
1712          * map.  This is a bit of a hack, but is also about the best place to
1713          * put this improvement.
1714          */
1715         if (entry->object.vm_object == NULL && !map->system_map) {
1716                 vm_object_t object;
1717                 object = vm_object_allocate(OBJT_DEFAULT,
1718                                 atop(entry->end - entry->start));
1719                 entry->object.vm_object = object;
1720                 entry->offset = 0;
1721                 if (entry->cred != NULL) {
1722                         object->cred = entry->cred;
1723                         object->charge = entry->end - entry->start;
1724                         entry->cred = NULL;
1725                 }
1726         } else if (entry->object.vm_object != NULL &&
1727                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1728                    entry->cred != NULL) {
1729                 VM_OBJECT_WLOCK(entry->object.vm_object);
1730                 KASSERT(entry->object.vm_object->cred == NULL,
1731                     ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1732                 entry->object.vm_object->cred = entry->cred;
1733                 entry->object.vm_object->charge = entry->end - entry->start;
1734                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
1735                 entry->cred = NULL;
1736         }
1737
1738         /*
1739          * Create a new entry and insert it AFTER the specified entry
1740          */
1741         new_entry = vm_map_entry_create(map);
1742         *new_entry = *entry;
1743
1744         new_entry->start = entry->end = end;
1745         new_entry->offset += (end - entry->start);
1746         if (new_entry->cred != NULL)
1747                 crhold(entry->cred);
1748
1749         vm_map_entry_link(map, entry, new_entry);
1750
1751         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1752                 vm_object_reference(new_entry->object.vm_object);
1753         }
1754 }
1755
1756 /*
1757  *      vm_map_submap:          [ kernel use only ]
1758  *
1759  *      Mark the given range as handled by a subordinate map.
1760  *
1761  *      This range must have been created with vm_map_find,
1762  *      and no other operations may have been performed on this
1763  *      range prior to calling vm_map_submap.
1764  *
1765  *      Only a limited number of operations can be performed
1766  *      within this rage after calling vm_map_submap:
1767  *              vm_fault
1768  *      [Don't try vm_map_copy!]
1769  *
1770  *      To remove a submapping, one must first remove the
1771  *      range from the superior map, and then destroy the
1772  *      submap (if desired).  [Better yet, don't try it.]
1773  */
1774 int
1775 vm_map_submap(
1776         vm_map_t map,
1777         vm_offset_t start,
1778         vm_offset_t end,
1779         vm_map_t submap)
1780 {
1781         vm_map_entry_t entry;
1782         int result = KERN_INVALID_ARGUMENT;
1783
1784         vm_map_lock(map);
1785
1786         VM_MAP_RANGE_CHECK(map, start, end);
1787
1788         if (vm_map_lookup_entry(map, start, &entry)) {
1789                 vm_map_clip_start(map, entry, start);
1790         } else
1791                 entry = entry->next;
1792
1793         vm_map_clip_end(map, entry, end);
1794
1795         if ((entry->start == start) && (entry->end == end) &&
1796             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1797             (entry->object.vm_object == NULL)) {
1798                 entry->object.sub_map = submap;
1799                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1800                 result = KERN_SUCCESS;
1801         }
1802         vm_map_unlock(map);
1803
1804         return (result);
1805 }
1806
1807 /*
1808  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
1809  */
1810 #define MAX_INIT_PT     96
1811
1812 /*
1813  *      vm_map_pmap_enter:
1814  *
1815  *      Preload the specified map's pmap with mappings to the specified
1816  *      object's memory-resident pages.  No further physical pages are
1817  *      allocated, and no further virtual pages are retrieved from secondary
1818  *      storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
1819  *      limited number of page mappings are created at the low-end of the
1820  *      specified address range.  (For this purpose, a superpage mapping
1821  *      counts as one page mapping.)  Otherwise, all resident pages within
1822  *      the specified address range are mapped.  Because these mappings are
1823  *      being created speculatively, cached pages are not reactivated and
1824  *      mapped.
1825  */
1826 void
1827 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1828     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1829 {
1830         vm_offset_t start;
1831         vm_page_t p, p_start;
1832         vm_pindex_t mask, psize, threshold, tmpidx;
1833
1834         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1835                 return;
1836         VM_OBJECT_RLOCK(object);
1837         if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1838                 VM_OBJECT_RUNLOCK(object);
1839                 VM_OBJECT_WLOCK(object);
1840                 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1841                         pmap_object_init_pt(map->pmap, addr, object, pindex,
1842                             size);
1843                         VM_OBJECT_WUNLOCK(object);
1844                         return;
1845                 }
1846                 VM_OBJECT_LOCK_DOWNGRADE(object);
1847         }
1848
1849         psize = atop(size);
1850         if (psize + pindex > object->size) {
1851                 if (object->size < pindex) {
1852                         VM_OBJECT_RUNLOCK(object);
1853                         return;
1854                 }
1855                 psize = object->size - pindex;
1856         }
1857
1858         start = 0;
1859         p_start = NULL;
1860         threshold = MAX_INIT_PT;
1861
1862         p = vm_page_find_least(object, pindex);
1863         /*
1864          * Assert: the variable p is either (1) the page with the
1865          * least pindex greater than or equal to the parameter pindex
1866          * or (2) NULL.
1867          */
1868         for (;
1869              p != NULL && (tmpidx = p->pindex - pindex) < psize;
1870              p = TAILQ_NEXT(p, listq)) {
1871                 /*
1872                  * don't allow an madvise to blow away our really
1873                  * free pages allocating pv entries.
1874                  */
1875                 if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
1876                     cnt.v_free_count < cnt.v_free_reserved) ||
1877                     ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
1878                     tmpidx >= threshold)) {
1879                         psize = tmpidx;
1880                         break;
1881                 }
1882                 if (p->valid == VM_PAGE_BITS_ALL) {
1883                         if (p_start == NULL) {
1884                                 start = addr + ptoa(tmpidx);
1885                                 p_start = p;
1886                         }
1887                         /* Jump ahead if a superpage mapping is possible. */
1888                         if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
1889                             (pagesizes[p->psind] - 1)) == 0) {
1890                                 mask = atop(pagesizes[p->psind]) - 1;
1891                                 if (tmpidx + mask < psize &&
1892                                     vm_page_ps_is_valid(p)) {
1893                                         p += mask;
1894                                         threshold += mask;
1895                                 }
1896                         }
1897                 } else if (p_start != NULL) {
1898                         pmap_enter_object(map->pmap, start, addr +
1899                             ptoa(tmpidx), p_start, prot);
1900                         p_start = NULL;
1901                 }
1902         }
1903         if (p_start != NULL)
1904                 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1905                     p_start, prot);
1906         VM_OBJECT_RUNLOCK(object);
1907 }
1908
1909 /*
1910  *      vm_map_protect:
1911  *
1912  *      Sets the protection of the specified address
1913  *      region in the target map.  If "set_max" is
1914  *      specified, the maximum protection is to be set;
1915  *      otherwise, only the current protection is affected.
1916  */
1917 int
1918 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1919                vm_prot_t new_prot, boolean_t set_max)
1920 {
1921         vm_map_entry_t current, entry;
1922         vm_object_t obj;
1923         struct ucred *cred;
1924         vm_prot_t old_prot;
1925
1926         if (start == end)
1927                 return (KERN_SUCCESS);
1928
1929         vm_map_lock(map);
1930
1931         VM_MAP_RANGE_CHECK(map, start, end);
1932
1933         if (vm_map_lookup_entry(map, start, &entry)) {
1934                 vm_map_clip_start(map, entry, start);
1935         } else {
1936                 entry = entry->next;
1937         }
1938
1939         /*
1940          * Make a first pass to check for protection violations.
1941          */
1942         current = entry;
1943         while ((current != &map->header) && (current->start < end)) {
1944                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1945                         vm_map_unlock(map);
1946                         return (KERN_INVALID_ARGUMENT);
1947                 }
1948                 if ((new_prot & current->max_protection) != new_prot) {
1949                         vm_map_unlock(map);
1950                         return (KERN_PROTECTION_FAILURE);
1951                 }
1952                 current = current->next;
1953         }
1954
1955
1956         /*
1957          * Do an accounting pass for private read-only mappings that
1958          * now will do cow due to allowed write (e.g. debugger sets
1959          * breakpoint on text segment)
1960          */
1961         for (current = entry; (current != &map->header) &&
1962              (current->start < end); current = current->next) {
1963
1964                 vm_map_clip_end(map, current, end);
1965
1966                 if (set_max ||
1967                     ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
1968                     ENTRY_CHARGED(current)) {
1969                         continue;
1970                 }
1971
1972                 cred = curthread->td_ucred;
1973                 obj = current->object.vm_object;
1974
1975                 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
1976                         if (!swap_reserve(current->end - current->start)) {
1977                                 vm_map_unlock(map);
1978                                 return (KERN_RESOURCE_SHORTAGE);
1979                         }
1980                         crhold(cred);
1981                         current->cred = cred;
1982                         continue;
1983                 }
1984
1985                 VM_OBJECT_WLOCK(obj);
1986                 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
1987                         VM_OBJECT_WUNLOCK(obj);
1988                         continue;
1989                 }
1990
1991                 /*
1992                  * Charge for the whole object allocation now, since
1993                  * we cannot distinguish between non-charged and
1994                  * charged clipped mapping of the same object later.
1995                  */
1996                 KASSERT(obj->charge == 0,
1997                     ("vm_map_protect: object %p overcharged (entry %p)",
1998                     obj, current));
1999                 if (!swap_reserve(ptoa(obj->size))) {
2000                         VM_OBJECT_WUNLOCK(obj);
2001                         vm_map_unlock(map);
2002                         return (KERN_RESOURCE_SHORTAGE);
2003                 }
2004
2005                 crhold(cred);
2006                 obj->cred = cred;
2007                 obj->charge = ptoa(obj->size);
2008                 VM_OBJECT_WUNLOCK(obj);
2009         }
2010
2011         /*
2012          * Go back and fix up protections. [Note that clipping is not
2013          * necessary the second time.]
2014          */
2015         current = entry;
2016         while ((current != &map->header) && (current->start < end)) {
2017                 old_prot = current->protection;
2018
2019                 if (set_max)
2020                         current->protection =
2021                             (current->max_protection = new_prot) &
2022                             old_prot;
2023                 else
2024                         current->protection = new_prot;
2025
2026                 /*
2027                  * For user wired map entries, the normal lazy evaluation of
2028                  * write access upgrades through soft page faults is
2029                  * undesirable.  Instead, immediately copy any pages that are
2030                  * copy-on-write and enable write access in the physical map.
2031                  */
2032                 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2033                     (current->protection & VM_PROT_WRITE) != 0 &&
2034                     (old_prot & VM_PROT_WRITE) == 0)
2035                         vm_fault_copy_entry(map, map, current, current, NULL);
2036
2037                 /*
2038                  * When restricting access, update the physical map.  Worry
2039                  * about copy-on-write here.
2040                  */
2041                 if ((old_prot & ~current->protection) != 0) {
2042 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2043                                                         VM_PROT_ALL)
2044                         pmap_protect(map->pmap, current->start,
2045                             current->end,
2046                             current->protection & MASK(current));
2047 #undef  MASK
2048                 }
2049                 vm_map_simplify_entry(map, current);
2050                 current = current->next;
2051         }
2052         vm_map_unlock(map);
2053         return (KERN_SUCCESS);
2054 }
2055
2056 /*
2057  *      vm_map_madvise:
2058  *
2059  *      This routine traverses a processes map handling the madvise
2060  *      system call.  Advisories are classified as either those effecting
2061  *      the vm_map_entry structure, or those effecting the underlying
2062  *      objects.
2063  */
2064 int
2065 vm_map_madvise(
2066         vm_map_t map,
2067         vm_offset_t start,
2068         vm_offset_t end,
2069         int behav)
2070 {
2071         vm_map_entry_t current, entry;
2072         int modify_map = 0;
2073
2074         /*
2075          * Some madvise calls directly modify the vm_map_entry, in which case
2076          * we need to use an exclusive lock on the map and we need to perform
2077          * various clipping operations.  Otherwise we only need a read-lock
2078          * on the map.
2079          */
2080         switch(behav) {
2081         case MADV_NORMAL:
2082         case MADV_SEQUENTIAL:
2083         case MADV_RANDOM:
2084         case MADV_NOSYNC:
2085         case MADV_AUTOSYNC:
2086         case MADV_NOCORE:
2087         case MADV_CORE:
2088                 if (start == end)
2089                         return (KERN_SUCCESS);
2090                 modify_map = 1;
2091                 vm_map_lock(map);
2092                 break;
2093         case MADV_WILLNEED:
2094         case MADV_DONTNEED:
2095         case MADV_FREE:
2096                 if (start == end)
2097                         return (KERN_SUCCESS);
2098                 vm_map_lock_read(map);
2099                 break;
2100         default:
2101                 return (KERN_INVALID_ARGUMENT);
2102         }
2103
2104         /*
2105          * Locate starting entry and clip if necessary.
2106          */
2107         VM_MAP_RANGE_CHECK(map, start, end);
2108
2109         if (vm_map_lookup_entry(map, start, &entry)) {
2110                 if (modify_map)
2111                         vm_map_clip_start(map, entry, start);
2112         } else {
2113                 entry = entry->next;
2114         }
2115
2116         if (modify_map) {
2117                 /*
2118                  * madvise behaviors that are implemented in the vm_map_entry.
2119                  *
2120                  * We clip the vm_map_entry so that behavioral changes are
2121                  * limited to the specified address range.
2122                  */
2123                 for (current = entry;
2124                      (current != &map->header) && (current->start < end);
2125                      current = current->next
2126                 ) {
2127                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2128                                 continue;
2129
2130                         vm_map_clip_end(map, current, end);
2131
2132                         switch (behav) {
2133                         case MADV_NORMAL:
2134                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2135                                 break;
2136                         case MADV_SEQUENTIAL:
2137                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2138                                 break;
2139                         case MADV_RANDOM:
2140                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2141                                 break;
2142                         case MADV_NOSYNC:
2143                                 current->eflags |= MAP_ENTRY_NOSYNC;
2144                                 break;
2145                         case MADV_AUTOSYNC:
2146                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
2147                                 break;
2148                         case MADV_NOCORE:
2149                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
2150                                 break;
2151                         case MADV_CORE:
2152                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2153                                 break;
2154                         default:
2155                                 break;
2156                         }
2157                         vm_map_simplify_entry(map, current);
2158                 }
2159                 vm_map_unlock(map);
2160         } else {
2161                 vm_pindex_t pstart, pend;
2162
2163                 /*
2164                  * madvise behaviors that are implemented in the underlying
2165                  * vm_object.
2166                  *
2167                  * Since we don't clip the vm_map_entry, we have to clip
2168                  * the vm_object pindex and count.
2169                  */
2170                 for (current = entry;
2171                      (current != &map->header) && (current->start < end);
2172                      current = current->next
2173                 ) {
2174                         vm_offset_t useEnd, useStart;
2175
2176                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2177                                 continue;
2178
2179                         pstart = OFF_TO_IDX(current->offset);
2180                         pend = pstart + atop(current->end - current->start);
2181                         useStart = current->start;
2182                         useEnd = current->end;
2183
2184                         if (current->start < start) {
2185                                 pstart += atop(start - current->start);
2186                                 useStart = start;
2187                         }
2188                         if (current->end > end) {
2189                                 pend -= atop(current->end - end);
2190                                 useEnd = end;
2191                         }
2192
2193                         if (pstart >= pend)
2194                                 continue;
2195
2196                         /*
2197                          * Perform the pmap_advise() before clearing
2198                          * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2199                          * concurrent pmap operation, such as pmap_remove(),
2200                          * could clear a reference in the pmap and set
2201                          * PGA_REFERENCED on the page before the pmap_advise()
2202                          * had completed.  Consequently, the page would appear
2203                          * referenced based upon an old reference that
2204                          * occurred before this pmap_advise() ran.
2205                          */
2206                         if (behav == MADV_DONTNEED || behav == MADV_FREE)
2207                                 pmap_advise(map->pmap, useStart, useEnd,
2208                                     behav);
2209
2210                         vm_object_madvise(current->object.vm_object, pstart,
2211                             pend, behav);
2212
2213                         /*
2214                          * Pre-populate paging structures in the
2215                          * WILLNEED case.  For wired entries, the
2216                          * paging structures are already populated.
2217                          */
2218                         if (behav == MADV_WILLNEED &&
2219                             current->wired_count == 0) {
2220                                 vm_map_pmap_enter(map,
2221                                     useStart,
2222                                     current->protection,
2223                                     current->object.vm_object,
2224                                     pstart,
2225                                     ptoa(pend - pstart),
2226                                     MAP_PREFAULT_MADVISE
2227                                 );
2228                         }
2229                 }
2230                 vm_map_unlock_read(map);
2231         }
2232         return (0);
2233 }
2234
2235
2236 /*
2237  *      vm_map_inherit:
2238  *
2239  *      Sets the inheritance of the specified address
2240  *      range in the target map.  Inheritance
2241  *      affects how the map will be shared with
2242  *      child maps at the time of vmspace_fork.
2243  */
2244 int
2245 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2246                vm_inherit_t new_inheritance)
2247 {
2248         vm_map_entry_t entry;
2249         vm_map_entry_t temp_entry;
2250
2251         switch (new_inheritance) {
2252         case VM_INHERIT_NONE:
2253         case VM_INHERIT_COPY:
2254         case VM_INHERIT_SHARE:
2255                 break;
2256         default:
2257                 return (KERN_INVALID_ARGUMENT);
2258         }
2259         if (start == end)
2260                 return (KERN_SUCCESS);
2261         vm_map_lock(map);
2262         VM_MAP_RANGE_CHECK(map, start, end);
2263         if (vm_map_lookup_entry(map, start, &temp_entry)) {
2264                 entry = temp_entry;
2265                 vm_map_clip_start(map, entry, start);
2266         } else
2267                 entry = temp_entry->next;
2268         while ((entry != &map->header) && (entry->start < end)) {
2269                 vm_map_clip_end(map, entry, end);
2270                 entry->inheritance = new_inheritance;
2271                 vm_map_simplify_entry(map, entry);
2272                 entry = entry->next;
2273         }
2274         vm_map_unlock(map);
2275         return (KERN_SUCCESS);
2276 }
2277
2278 /*
2279  *      vm_map_unwire:
2280  *
2281  *      Implements both kernel and user unwiring.
2282  */
2283 int
2284 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2285     int flags)
2286 {
2287         vm_map_entry_t entry, first_entry, tmp_entry;
2288         vm_offset_t saved_start;
2289         unsigned int last_timestamp;
2290         int rv;
2291         boolean_t need_wakeup, result, user_unwire;
2292
2293         if (start == end)
2294                 return (KERN_SUCCESS);
2295         user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2296         vm_map_lock(map);
2297         VM_MAP_RANGE_CHECK(map, start, end);
2298         if (!vm_map_lookup_entry(map, start, &first_entry)) {
2299                 if (flags & VM_MAP_WIRE_HOLESOK)
2300                         first_entry = first_entry->next;
2301                 else {
2302                         vm_map_unlock(map);
2303                         return (KERN_INVALID_ADDRESS);
2304                 }
2305         }
2306         last_timestamp = map->timestamp;
2307         entry = first_entry;
2308         while (entry != &map->header && entry->start < end) {
2309                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2310                         /*
2311                          * We have not yet clipped the entry.
2312                          */
2313                         saved_start = (start >= entry->start) ? start :
2314                             entry->start;
2315                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2316                         if (vm_map_unlock_and_wait(map, 0)) {
2317                                 /*
2318                                  * Allow interruption of user unwiring?
2319                                  */
2320                         }
2321                         vm_map_lock(map);
2322                         if (last_timestamp+1 != map->timestamp) {
2323                                 /*
2324                                  * Look again for the entry because the map was
2325                                  * modified while it was unlocked.
2326                                  * Specifically, the entry may have been
2327                                  * clipped, merged, or deleted.
2328                                  */
2329                                 if (!vm_map_lookup_entry(map, saved_start,
2330                                     &tmp_entry)) {
2331                                         if (flags & VM_MAP_WIRE_HOLESOK)
2332                                                 tmp_entry = tmp_entry->next;
2333                                         else {
2334                                                 if (saved_start == start) {
2335                                                         /*
2336                                                          * First_entry has been deleted.
2337                                                          */
2338                                                         vm_map_unlock(map);
2339                                                         return (KERN_INVALID_ADDRESS);
2340                                                 }
2341                                                 end = saved_start;
2342                                                 rv = KERN_INVALID_ADDRESS;
2343                                                 goto done;
2344                                         }
2345                                 }
2346                                 if (entry == first_entry)
2347                                         first_entry = tmp_entry;
2348                                 else
2349                                         first_entry = NULL;
2350                                 entry = tmp_entry;
2351                         }
2352                         last_timestamp = map->timestamp;
2353                         continue;
2354                 }
2355                 vm_map_clip_start(map, entry, start);
2356                 vm_map_clip_end(map, entry, end);
2357                 /*
2358                  * Mark the entry in case the map lock is released.  (See
2359                  * above.)
2360                  */
2361                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2362                     entry->wiring_thread == NULL,
2363                     ("owned map entry %p", entry));
2364                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2365                 entry->wiring_thread = curthread;
2366                 /*
2367                  * Check the map for holes in the specified region.
2368                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2369                  */
2370                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2371                     (entry->end < end && (entry->next == &map->header ||
2372                     entry->next->start > entry->end))) {
2373                         end = entry->end;
2374                         rv = KERN_INVALID_ADDRESS;
2375                         goto done;
2376                 }
2377                 /*
2378                  * If system unwiring, require that the entry is system wired.
2379                  */
2380                 if (!user_unwire &&
2381                     vm_map_entry_system_wired_count(entry) == 0) {
2382                         end = entry->end;
2383                         rv = KERN_INVALID_ARGUMENT;
2384                         goto done;
2385                 }
2386                 entry = entry->next;
2387         }
2388         rv = KERN_SUCCESS;
2389 done:
2390         need_wakeup = FALSE;
2391         if (first_entry == NULL) {
2392                 result = vm_map_lookup_entry(map, start, &first_entry);
2393                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2394                         first_entry = first_entry->next;
2395                 else
2396                         KASSERT(result, ("vm_map_unwire: lookup failed"));
2397         }
2398         for (entry = first_entry; entry != &map->header && entry->start < end;
2399             entry = entry->next) {
2400                 /*
2401                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
2402                  * space in the unwired region could have been mapped
2403                  * while the map lock was dropped for draining
2404                  * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2405                  * could be simultaneously wiring this new mapping
2406                  * entry.  Detect these cases and skip any entries
2407                  * marked as in transition by us.
2408                  */
2409                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2410                     entry->wiring_thread != curthread) {
2411                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2412                             ("vm_map_unwire: !HOLESOK and new/changed entry"));
2413                         continue;
2414                 }
2415
2416                 if (rv == KERN_SUCCESS && (!user_unwire ||
2417                     (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2418                         if (user_unwire)
2419                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2420                         if (entry->wired_count == 1)
2421                                 vm_map_entry_unwire(map, entry);
2422                         else
2423                                 entry->wired_count--;
2424                 }
2425                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2426                     ("vm_map_unwire: in-transition flag missing %p", entry));
2427                 KASSERT(entry->wiring_thread == curthread,
2428                     ("vm_map_unwire: alien wire %p", entry));
2429                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2430                 entry->wiring_thread = NULL;
2431                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2432                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2433                         need_wakeup = TRUE;
2434                 }
2435                 vm_map_simplify_entry(map, entry);
2436         }
2437         vm_map_unlock(map);
2438         if (need_wakeup)
2439                 vm_map_wakeup(map);
2440         return (rv);
2441 }
2442
2443 /*
2444  *      vm_map_wire_entry_failure:
2445  *
2446  *      Handle a wiring failure on the given entry.
2447  *
2448  *      The map should be locked.
2449  */
2450 static void
2451 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
2452     vm_offset_t failed_addr)
2453 {
2454
2455         VM_MAP_ASSERT_LOCKED(map);
2456         KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
2457             entry->wired_count == 1,
2458             ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
2459         KASSERT(failed_addr < entry->end,
2460             ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
2461
2462         /*
2463          * If any pages at the start of this entry were successfully wired,
2464          * then unwire them.
2465          */
2466         if (failed_addr > entry->start) {
2467                 pmap_unwire(map->pmap, entry->start, failed_addr);
2468                 vm_object_unwire(entry->object.vm_object, entry->offset,
2469                     failed_addr - entry->start, PQ_ACTIVE);
2470         }
2471
2472         /*
2473          * Assign an out-of-range value to represent the failure to wire this
2474          * entry.
2475          */
2476         entry->wired_count = -1;
2477 }
2478
2479 /*
2480  *      vm_map_wire:
2481  *
2482  *      Implements both kernel and user wiring.
2483  */
2484 int
2485 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2486     int flags)
2487 {
2488         vm_map_entry_t entry, first_entry, tmp_entry;
2489         vm_offset_t faddr, saved_end, saved_start;
2490         unsigned int last_timestamp;
2491         int rv;
2492         boolean_t need_wakeup, result, user_wire;
2493         vm_prot_t prot;
2494
2495         if (start == end)
2496                 return (KERN_SUCCESS);
2497         prot = 0;
2498         if (flags & VM_MAP_WIRE_WRITE)
2499                 prot |= VM_PROT_WRITE;
2500         user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2501         vm_map_lock(map);
2502         VM_MAP_RANGE_CHECK(map, start, end);
2503         if (!vm_map_lookup_entry(map, start, &first_entry)) {
2504                 if (flags & VM_MAP_WIRE_HOLESOK)
2505                         first_entry = first_entry->next;
2506                 else {
2507                         vm_map_unlock(map);
2508                         return (KERN_INVALID_ADDRESS);
2509                 }
2510         }
2511         last_timestamp = map->timestamp;
2512         entry = first_entry;
2513         while (entry != &map->header && entry->start < end) {
2514                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2515                         /*
2516                          * We have not yet clipped the entry.
2517                          */
2518                         saved_start = (start >= entry->start) ? start :
2519                             entry->start;
2520                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2521                         if (vm_map_unlock_and_wait(map, 0)) {
2522                                 /*
2523                                  * Allow interruption of user wiring?
2524                                  */
2525                         }
2526                         vm_map_lock(map);
2527                         if (last_timestamp + 1 != map->timestamp) {
2528                                 /*
2529                                  * Look again for the entry because the map was
2530                                  * modified while it was unlocked.
2531                                  * Specifically, the entry may have been
2532                                  * clipped, merged, or deleted.
2533                                  */
2534                                 if (!vm_map_lookup_entry(map, saved_start,
2535                                     &tmp_entry)) {
2536                                         if (flags & VM_MAP_WIRE_HOLESOK)
2537                                                 tmp_entry = tmp_entry->next;
2538                                         else {
2539                                                 if (saved_start == start) {
2540                                                         /*
2541                                                          * first_entry has been deleted.
2542                                                          */
2543                                                         vm_map_unlock(map);
2544                                                         return (KERN_INVALID_ADDRESS);
2545                                                 }
2546                                                 end = saved_start;
2547                                                 rv = KERN_INVALID_ADDRESS;
2548                                                 goto done;
2549                                         }
2550                                 }
2551                                 if (entry == first_entry)
2552                                         first_entry = tmp_entry;
2553                                 else
2554                                         first_entry = NULL;
2555                                 entry = tmp_entry;
2556                         }
2557                         last_timestamp = map->timestamp;
2558                         continue;
2559                 }
2560                 vm_map_clip_start(map, entry, start);
2561                 vm_map_clip_end(map, entry, end);
2562                 /*
2563                  * Mark the entry in case the map lock is released.  (See
2564                  * above.)
2565                  */
2566                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2567                     entry->wiring_thread == NULL,
2568                     ("owned map entry %p", entry));
2569                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2570                 entry->wiring_thread = curthread;
2571                 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2572                     || (entry->protection & prot) != prot) {
2573                         entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2574                         if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2575                                 end = entry->end;
2576                                 rv = KERN_INVALID_ADDRESS;
2577                                 goto done;
2578                         }
2579                         goto next_entry;
2580                 }
2581                 if (entry->wired_count == 0) {
2582                         entry->wired_count++;
2583                         saved_start = entry->start;
2584                         saved_end = entry->end;
2585
2586                         /*
2587                          * Release the map lock, relying on the in-transition
2588                          * mark.  Mark the map busy for fork.
2589                          */
2590                         vm_map_busy(map);
2591                         vm_map_unlock(map);
2592
2593                         faddr = saved_start;
2594                         do {
2595                                 /*
2596                                  * Simulate a fault to get the page and enter
2597                                  * it into the physical map.
2598                                  */
2599                                 if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
2600                                     VM_FAULT_CHANGE_WIRING)) != KERN_SUCCESS)
2601                                         break;
2602                         } while ((faddr += PAGE_SIZE) < saved_end);
2603                         vm_map_lock(map);
2604                         vm_map_unbusy(map);
2605                         if (last_timestamp + 1 != map->timestamp) {
2606                                 /*
2607                                  * Look again for the entry because the map was
2608                                  * modified while it was unlocked.  The entry
2609                                  * may have been clipped, but NOT merged or
2610                                  * deleted.
2611                                  */
2612                                 result = vm_map_lookup_entry(map, saved_start,
2613                                     &tmp_entry);
2614                                 KASSERT(result, ("vm_map_wire: lookup failed"));
2615                                 if (entry == first_entry)
2616                                         first_entry = tmp_entry;
2617                                 else
2618                                         first_entry = NULL;
2619                                 entry = tmp_entry;
2620                                 while (entry->end < saved_end) {
2621                                         /*
2622                                          * In case of failure, handle entries
2623                                          * that were not fully wired here;
2624                                          * fully wired entries are handled
2625                                          * later.
2626                                          */
2627                                         if (rv != KERN_SUCCESS &&
2628                                             faddr < entry->end)
2629                                                 vm_map_wire_entry_failure(map,
2630                                                     entry, faddr);
2631                                         entry = entry->next;
2632                                 }
2633                         }
2634                         last_timestamp = map->timestamp;
2635                         if (rv != KERN_SUCCESS) {
2636                                 vm_map_wire_entry_failure(map, entry, faddr);
2637                                 end = entry->end;
2638                                 goto done;
2639                         }
2640                 } else if (!user_wire ||
2641                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2642                         entry->wired_count++;
2643                 }
2644                 /*
2645                  * Check the map for holes in the specified region.
2646                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2647                  */
2648         next_entry:
2649                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2650                     (entry->end < end && (entry->next == &map->header ||
2651                     entry->next->start > entry->end))) {
2652                         end = entry->end;
2653                         rv = KERN_INVALID_ADDRESS;
2654                         goto done;
2655                 }
2656                 entry = entry->next;
2657         }
2658         rv = KERN_SUCCESS;
2659 done:
2660         need_wakeup = FALSE;
2661         if (first_entry == NULL) {
2662                 result = vm_map_lookup_entry(map, start, &first_entry);
2663                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2664                         first_entry = first_entry->next;
2665                 else
2666                         KASSERT(result, ("vm_map_wire: lookup failed"));
2667         }
2668         for (entry = first_entry; entry != &map->header && entry->start < end;
2669             entry = entry->next) {
2670                 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2671                         goto next_entry_done;
2672
2673                 /*
2674                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
2675                  * space in the unwired region could have been mapped
2676                  * while the map lock was dropped for faulting in the
2677                  * pages or draining MAP_ENTRY_IN_TRANSITION.
2678                  * Moreover, another thread could be simultaneously
2679                  * wiring this new mapping entry.  Detect these cases
2680                  * and skip any entries marked as in transition by us.
2681                  */
2682                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2683                     entry->wiring_thread != curthread) {
2684                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2685                             ("vm_map_wire: !HOLESOK and new/changed entry"));
2686                         continue;
2687                 }
2688
2689                 if (rv == KERN_SUCCESS) {
2690                         if (user_wire)
2691                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
2692                 } else if (entry->wired_count == -1) {
2693                         /*
2694                          * Wiring failed on this entry.  Thus, unwiring is
2695                          * unnecessary.
2696                          */
2697                         entry->wired_count = 0;
2698                 } else if (!user_wire ||
2699                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2700                         /*
2701                          * Undo the wiring.  Wiring succeeded on this entry
2702                          * but failed on a later entry.  
2703                          */
2704                         if (entry->wired_count == 1)
2705                                 vm_map_entry_unwire(map, entry);
2706                         else
2707                                 entry->wired_count--;
2708                 }
2709         next_entry_done:
2710                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2711                     ("vm_map_wire: in-transition flag missing %p", entry));
2712                 KASSERT(entry->wiring_thread == curthread,
2713                     ("vm_map_wire: alien wire %p", entry));
2714                 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2715                     MAP_ENTRY_WIRE_SKIPPED);
2716                 entry->wiring_thread = NULL;
2717                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2718                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2719                         need_wakeup = TRUE;
2720                 }
2721                 vm_map_simplify_entry(map, entry);
2722         }
2723         vm_map_unlock(map);
2724         if (need_wakeup)
2725                 vm_map_wakeup(map);
2726         return (rv);
2727 }
2728
2729 /*
2730  * vm_map_sync
2731  *
2732  * Push any dirty cached pages in the address range to their pager.
2733  * If syncio is TRUE, dirty pages are written synchronously.
2734  * If invalidate is TRUE, any cached pages are freed as well.
2735  *
2736  * If the size of the region from start to end is zero, we are
2737  * supposed to flush all modified pages within the region containing
2738  * start.  Unfortunately, a region can be split or coalesced with
2739  * neighboring regions, making it difficult to determine what the
2740  * original region was.  Therefore, we approximate this requirement by
2741  * flushing the current region containing start.
2742  *
2743  * Returns an error if any part of the specified range is not mapped.
2744  */
2745 int
2746 vm_map_sync(
2747         vm_map_t map,
2748         vm_offset_t start,
2749         vm_offset_t end,
2750         boolean_t syncio,
2751         boolean_t invalidate)
2752 {
2753         vm_map_entry_t current;
2754         vm_map_entry_t entry;
2755         vm_size_t size;
2756         vm_object_t object;
2757         vm_ooffset_t offset;
2758         unsigned int last_timestamp;
2759         boolean_t failed;
2760
2761         vm_map_lock_read(map);
2762         VM_MAP_RANGE_CHECK(map, start, end);
2763         if (!vm_map_lookup_entry(map, start, &entry)) {
2764                 vm_map_unlock_read(map);
2765                 return (KERN_INVALID_ADDRESS);
2766         } else if (start == end) {
2767                 start = entry->start;
2768                 end = entry->end;
2769         }
2770         /*
2771          * Make a first pass to check for user-wired memory and holes.
2772          */
2773         for (current = entry; current != &map->header && current->start < end;
2774             current = current->next) {
2775                 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2776                         vm_map_unlock_read(map);
2777                         return (KERN_INVALID_ARGUMENT);
2778                 }
2779                 if (end > current->end &&
2780                     (current->next == &map->header ||
2781                         current->end != current->next->start)) {
2782                         vm_map_unlock_read(map);
2783                         return (KERN_INVALID_ADDRESS);
2784                 }
2785         }
2786
2787         if (invalidate)
2788                 pmap_remove(map->pmap, start, end);
2789         failed = FALSE;
2790
2791         /*
2792          * Make a second pass, cleaning/uncaching pages from the indicated
2793          * objects as we go.
2794          */
2795         for (current = entry; current != &map->header && current->start < end;) {
2796                 offset = current->offset + (start - current->start);
2797                 size = (end <= current->end ? end : current->end) - start;
2798                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2799                         vm_map_t smap;
2800                         vm_map_entry_t tentry;
2801                         vm_size_t tsize;
2802
2803                         smap = current->object.sub_map;
2804                         vm_map_lock_read(smap);
2805                         (void) vm_map_lookup_entry(smap, offset, &tentry);
2806                         tsize = tentry->end - offset;
2807                         if (tsize < size)
2808                                 size = tsize;
2809                         object = tentry->object.vm_object;
2810                         offset = tentry->offset + (offset - tentry->start);
2811                         vm_map_unlock_read(smap);
2812                 } else {
2813                         object = current->object.vm_object;
2814                 }
2815                 vm_object_reference(object);
2816                 last_timestamp = map->timestamp;
2817                 vm_map_unlock_read(map);
2818                 if (!vm_object_sync(object, offset, size, syncio, invalidate))
2819                         failed = TRUE;
2820                 start += size;
2821                 vm_object_deallocate(object);
2822                 vm_map_lock_read(map);
2823                 if (last_timestamp == map->timestamp ||
2824                     !vm_map_lookup_entry(map, start, &current))
2825                         current = current->next;
2826         }
2827
2828         vm_map_unlock_read(map);
2829         return (failed ? KERN_FAILURE : KERN_SUCCESS);
2830 }
2831
2832 /*
2833  *      vm_map_entry_unwire:    [ internal use only ]
2834  *
2835  *      Make the region specified by this entry pageable.
2836  *
2837  *      The map in question should be locked.
2838  *      [This is the reason for this routine's existence.]
2839  */
2840 static void
2841 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2842 {
2843
2844         VM_MAP_ASSERT_LOCKED(map);
2845         KASSERT(entry->wired_count > 0,
2846             ("vm_map_entry_unwire: entry %p isn't wired", entry));
2847         pmap_unwire(map->pmap, entry->start, entry->end);
2848         vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
2849             entry->start, PQ_ACTIVE);
2850         entry->wired_count = 0;
2851 }
2852
2853 static void
2854 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2855 {
2856
2857         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2858                 vm_object_deallocate(entry->object.vm_object);
2859         uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2860 }
2861
2862 /*
2863  *      vm_map_entry_delete:    [ internal use only ]
2864  *
2865  *      Deallocate the given entry from the target map.
2866  */
2867 static void
2868 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2869 {
2870         vm_object_t object;
2871         vm_pindex_t offidxstart, offidxend, count, size1;
2872         vm_ooffset_t size;
2873
2874         vm_map_entry_unlink(map, entry);
2875         object = entry->object.vm_object;
2876         size = entry->end - entry->start;
2877         map->size -= size;
2878
2879         if (entry->cred != NULL) {
2880                 swap_release_by_cred(size, entry->cred);
2881                 crfree(entry->cred);
2882         }
2883
2884         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2885             (object != NULL)) {
2886                 KASSERT(entry->cred == NULL || object->cred == NULL ||
2887                     (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2888                     ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
2889                 count = OFF_TO_IDX(size);
2890                 offidxstart = OFF_TO_IDX(entry->offset);
2891                 offidxend = offidxstart + count;
2892                 VM_OBJECT_WLOCK(object);
2893                 if (object->ref_count != 1 &&
2894                     ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2895                     object == kernel_object || object == kmem_object)) {
2896                         vm_object_collapse(object);
2897
2898                         /*
2899                          * The option OBJPR_NOTMAPPED can be passed here
2900                          * because vm_map_delete() already performed
2901                          * pmap_remove() on the only mapping to this range
2902                          * of pages. 
2903                          */
2904                         vm_object_page_remove(object, offidxstart, offidxend,
2905                             OBJPR_NOTMAPPED);
2906                         if (object->type == OBJT_SWAP)
2907                                 swap_pager_freespace(object, offidxstart, count);
2908                         if (offidxend >= object->size &&
2909                             offidxstart < object->size) {
2910                                 size1 = object->size;
2911                                 object->size = offidxstart;
2912                                 if (object->cred != NULL) {
2913                                         size1 -= object->size;
2914                                         KASSERT(object->charge >= ptoa(size1),
2915                                             ("vm_map_entry_delete: object->charge < 0"));
2916                                         swap_release_by_cred(ptoa(size1), object->cred);
2917                                         object->charge -= ptoa(size1);
2918                                 }
2919                         }
2920                 }
2921                 VM_OBJECT_WUNLOCK(object);
2922         } else
2923                 entry->object.vm_object = NULL;
2924         if (map->system_map)
2925                 vm_map_entry_deallocate(entry, TRUE);
2926         else {
2927                 entry->next = curthread->td_map_def_user;
2928                 curthread->td_map_def_user = entry;
2929         }
2930 }
2931
2932 /*
2933  *      vm_map_delete:  [ internal use only ]
2934  *
2935  *      Deallocates the given address range from the target
2936  *      map.
2937  */
2938 int
2939 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2940 {
2941         vm_map_entry_t entry;
2942         vm_map_entry_t first_entry;
2943
2944         VM_MAP_ASSERT_LOCKED(map);
2945         if (start == end)
2946                 return (KERN_SUCCESS);
2947
2948         /*
2949          * Find the start of the region, and clip it
2950          */
2951         if (!vm_map_lookup_entry(map, start, &first_entry))
2952                 entry = first_entry->next;
2953         else {
2954                 entry = first_entry;
2955                 vm_map_clip_start(map, entry, start);
2956         }
2957
2958         /*
2959          * Step through all entries in this region
2960          */
2961         while ((entry != &map->header) && (entry->start < end)) {
2962                 vm_map_entry_t next;
2963
2964                 /*
2965                  * Wait for wiring or unwiring of an entry to complete.
2966                  * Also wait for any system wirings to disappear on
2967                  * user maps.
2968                  */
2969                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2970                     (vm_map_pmap(map) != kernel_pmap &&
2971                     vm_map_entry_system_wired_count(entry) != 0)) {
2972                         unsigned int last_timestamp;
2973                         vm_offset_t saved_start;
2974                         vm_map_entry_t tmp_entry;
2975
2976                         saved_start = entry->start;
2977                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2978                         last_timestamp = map->timestamp;
2979                         (void) vm_map_unlock_and_wait(map, 0);
2980                         vm_map_lock(map);
2981                         if (last_timestamp + 1 != map->timestamp) {
2982                                 /*
2983                                  * Look again for the entry because the map was
2984                                  * modified while it was unlocked.
2985                                  * Specifically, the entry may have been
2986                                  * clipped, merged, or deleted.
2987                                  */
2988                                 if (!vm_map_lookup_entry(map, saved_start,
2989                                                          &tmp_entry))
2990                                         entry = tmp_entry->next;
2991                                 else {
2992                                         entry = tmp_entry;
2993                                         vm_map_clip_start(map, entry,
2994                                                           saved_start);
2995                                 }
2996                         }
2997                         continue;
2998                 }
2999                 vm_map_clip_end(map, entry, end);
3000
3001                 next = entry->next;
3002
3003                 /*
3004                  * Unwire before removing addresses from the pmap; otherwise,
3005                  * unwiring will put the entries back in the pmap.
3006                  */
3007                 if (entry->wired_count != 0) {
3008                         vm_map_entry_unwire(map, entry);
3009                 }
3010
3011                 pmap_remove(map->pmap, entry->start, entry->end);
3012
3013                 /*
3014                  * Delete the entry only after removing all pmap
3015                  * entries pointing to its pages.  (Otherwise, its
3016                  * page frames may be reallocated, and any modify bits
3017                  * will be set in the wrong object!)
3018                  */
3019                 vm_map_entry_delete(map, entry);
3020                 entry = next;
3021         }
3022         return (KERN_SUCCESS);
3023 }
3024
3025 /*
3026  *      vm_map_remove:
3027  *
3028  *      Remove the given address range from the target map.
3029  *      This is the exported form of vm_map_delete.
3030  */
3031 int
3032 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3033 {
3034         int result;
3035
3036         vm_map_lock(map);
3037         VM_MAP_RANGE_CHECK(map, start, end);
3038         result = vm_map_delete(map, start, end);
3039         vm_map_unlock(map);
3040         return (result);
3041 }
3042
3043 /*
3044  *      vm_map_check_protection:
3045  *
3046  *      Assert that the target map allows the specified privilege on the
3047  *      entire address region given.  The entire region must be allocated.
3048  *
3049  *      WARNING!  This code does not and should not check whether the
3050  *      contents of the region is accessible.  For example a smaller file
3051  *      might be mapped into a larger address space.
3052  *
3053  *      NOTE!  This code is also called by munmap().
3054  *
3055  *      The map must be locked.  A read lock is sufficient.
3056  */
3057 boolean_t
3058 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3059                         vm_prot_t protection)
3060 {
3061         vm_map_entry_t entry;
3062         vm_map_entry_t tmp_entry;
3063
3064         if (!vm_map_lookup_entry(map, start, &tmp_entry))
3065                 return (FALSE);
3066         entry = tmp_entry;
3067
3068         while (start < end) {
3069                 if (entry == &map->header)
3070                         return (FALSE);
3071                 /*
3072                  * No holes allowed!
3073                  */
3074                 if (start < entry->start)
3075                         return (FALSE);
3076                 /*
3077                  * Check protection associated with entry.
3078                  */
3079                 if ((entry->protection & protection) != protection)
3080                         return (FALSE);
3081                 /* go to next entry */
3082                 start = entry->end;
3083                 entry = entry->next;
3084         }
3085         return (TRUE);
3086 }
3087
3088 /*
3089  *      vm_map_copy_entry:
3090  *
3091  *      Copies the contents of the source entry to the destination
3092  *      entry.  The entries *must* be aligned properly.
3093  */
3094 static void
3095 vm_map_copy_entry(
3096         vm_map_t src_map,
3097         vm_map_t dst_map,
3098         vm_map_entry_t src_entry,
3099         vm_map_entry_t dst_entry,
3100         vm_ooffset_t *fork_charge)
3101 {
3102         vm_object_t src_object;
3103         vm_map_entry_t fake_entry;
3104         vm_offset_t size;
3105         struct ucred *cred;
3106         int charged;
3107
3108         VM_MAP_ASSERT_LOCKED(dst_map);
3109
3110         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3111                 return;
3112
3113         if (src_entry->wired_count == 0 ||
3114             (src_entry->protection & VM_PROT_WRITE) == 0) {
3115                 /*
3116                  * If the source entry is marked needs_copy, it is already
3117                  * write-protected.
3118                  */
3119                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3120                     (src_entry->protection & VM_PROT_WRITE) != 0) {
3121                         pmap_protect(src_map->pmap,
3122                             src_entry->start,
3123                             src_entry->end,
3124                             src_entry->protection & ~VM_PROT_WRITE);
3125                 }
3126
3127                 /*
3128                  * Make a copy of the object.
3129                  */
3130                 size = src_entry->end - src_entry->start;
3131                 if ((src_object = src_entry->object.vm_object) != NULL) {
3132                         VM_OBJECT_WLOCK(src_object);
3133                         charged = ENTRY_CHARGED(src_entry);
3134                         if ((src_object->handle == NULL) &&
3135                                 (src_object->type == OBJT_DEFAULT ||
3136                                  src_object->type == OBJT_SWAP)) {
3137                                 vm_object_collapse(src_object);
3138                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3139                                         vm_object_split(src_entry);
3140                                         src_object = src_entry->object.vm_object;
3141                                 }
3142                         }
3143                         vm_object_reference_locked(src_object);
3144                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3145                         if (src_entry->cred != NULL &&
3146                             !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3147                                 KASSERT(src_object->cred == NULL,
3148                                     ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3149                                      src_object));
3150                                 src_object->cred = src_entry->cred;
3151                                 src_object->charge = size;
3152                         }
3153                         VM_OBJECT_WUNLOCK(src_object);
3154                         dst_entry->object.vm_object = src_object;
3155                         if (charged) {
3156                                 cred = curthread->td_ucred;
3157                                 crhold(cred);
3158                                 dst_entry->cred = cred;
3159                                 *fork_charge += size;
3160                                 if (!(src_entry->eflags &
3161                                       MAP_ENTRY_NEEDS_COPY)) {
3162                                         crhold(cred);
3163                                         src_entry->cred = cred;
3164                                         *fork_charge += size;
3165                                 }
3166                         }
3167                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3168                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3169                         dst_entry->offset = src_entry->offset;
3170                         if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3171                                 /*
3172                                  * MAP_ENTRY_VN_WRITECNT cannot
3173                                  * indicate write reference from
3174                                  * src_entry, since the entry is
3175                                  * marked as needs copy.  Allocate a
3176                                  * fake entry that is used to
3177                                  * decrement object->un_pager.vnp.writecount
3178                                  * at the appropriate time.  Attach
3179                                  * fake_entry to the deferred list.
3180                                  */
3181                                 fake_entry = vm_map_entry_create(dst_map);
3182                                 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3183                                 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3184                                 vm_object_reference(src_object);
3185                                 fake_entry->object.vm_object = src_object;
3186                                 fake_entry->start = src_entry->start;
3187                                 fake_entry->end = src_entry->end;
3188                                 fake_entry->next = curthread->td_map_def_user;
3189                                 curthread->td_map_def_user = fake_entry;
3190                         }
3191                 } else {
3192                         dst_entry->object.vm_object = NULL;
3193                         dst_entry->offset = 0;
3194                         if (src_entry->cred != NULL) {
3195                                 dst_entry->cred = curthread->td_ucred;
3196                                 crhold(dst_entry->cred);
3197                                 *fork_charge += size;
3198                         }
3199                 }
3200
3201                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3202                     dst_entry->end - dst_entry->start, src_entry->start);
3203         } else {
3204                 /*
3205                  * We don't want to make writeable wired pages copy-on-write.
3206                  * Immediately copy these pages into the new map by simulating
3207                  * page faults.  The new pages are pageable.
3208                  */
3209                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3210                     fork_charge);
3211         }
3212 }
3213
3214 /*
3215  * vmspace_map_entry_forked:
3216  * Update the newly-forked vmspace each time a map entry is inherited
3217  * or copied.  The values for vm_dsize and vm_tsize are approximate
3218  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3219  */
3220 static void
3221 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3222     vm_map_entry_t entry)
3223 {
3224         vm_size_t entrysize;
3225         vm_offset_t newend;
3226
3227         entrysize = entry->end - entry->start;
3228         vm2->vm_map.size += entrysize;
3229         if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3230                 vm2->vm_ssize += btoc(entrysize);
3231         } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3232             entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3233                 newend = MIN(entry->end,
3234                     (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3235                 vm2->vm_dsize += btoc(newend - entry->start);
3236         } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3237             entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3238                 newend = MIN(entry->end,
3239                     (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3240                 vm2->vm_tsize += btoc(newend - entry->start);
3241         }
3242 }
3243
3244 /*
3245  * vmspace_fork:
3246  * Create a new process vmspace structure and vm_map
3247  * based on those of an existing process.  The new map
3248  * is based on the old map, according to the inheritance
3249  * values on the regions in that map.
3250  *
3251  * XXX It might be worth coalescing the entries added to the new vmspace.
3252  *
3253  * The source map must not be locked.
3254  */
3255 struct vmspace *
3256 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3257 {
3258         struct vmspace *vm2;
3259         vm_map_t new_map, old_map;
3260         vm_map_entry_t new_entry, old_entry;
3261         vm_object_t object;
3262         int locked;
3263
3264         old_map = &vm1->vm_map;
3265         /* Copy immutable fields of vm1 to vm2. */
3266         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL);
3267         if (vm2 == NULL)
3268                 return (NULL);
3269         vm2->vm_taddr = vm1->vm_taddr;
3270         vm2->vm_daddr = vm1->vm_daddr;
3271         vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3272         vm_map_lock(old_map);
3273         if (old_map->busy)
3274                 vm_map_wait_busy(old_map);
3275         new_map = &vm2->vm_map;
3276         locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3277         KASSERT(locked, ("vmspace_fork: lock failed"));
3278
3279         old_entry = old_map->header.next;
3280
3281         while (old_entry != &old_map->header) {
3282                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3283                         panic("vm_map_fork: encountered a submap");
3284
3285                 switch (old_entry->inheritance) {
3286                 case VM_INHERIT_NONE:
3287                         break;
3288
3289                 case VM_INHERIT_SHARE:
3290                         /*
3291                          * Clone the entry, creating the shared object if necessary.
3292                          */
3293                         object = old_entry->object.vm_object;
3294                         if (object == NULL) {
3295                                 object = vm_object_allocate(OBJT_DEFAULT,
3296                                         atop(old_entry->end - old_entry->start));
3297                                 old_entry->object.vm_object = object;
3298                                 old_entry->offset = 0;
3299                                 if (old_entry->cred != NULL) {
3300                                         object->cred = old_entry->cred;
3301                                         object->charge = old_entry->end -
3302                                             old_entry->start;
3303                                         old_entry->cred = NULL;
3304                                 }
3305                         }
3306
3307                         /*
3308                          * Add the reference before calling vm_object_shadow
3309                          * to insure that a shadow object is created.
3310                          */
3311                         vm_object_reference(object);
3312                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3313                                 vm_object_shadow(&old_entry->object.vm_object,
3314                                     &old_entry->offset,
3315                                     old_entry->end - old_entry->start);
3316                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3317                                 /* Transfer the second reference too. */
3318                                 vm_object_reference(
3319                                     old_entry->object.vm_object);
3320
3321                                 /*
3322                                  * As in vm_map_simplify_entry(), the
3323                                  * vnode lock will not be acquired in
3324                                  * this call to vm_object_deallocate().
3325                                  */
3326                                 vm_object_deallocate(object);
3327                                 object = old_entry->object.vm_object;
3328                         }
3329                         VM_OBJECT_WLOCK(object);
3330                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
3331                         if (old_entry->cred != NULL) {
3332                                 KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3333                                 object->cred = old_entry->cred;
3334                                 object->charge = old_entry->end - old_entry->start;
3335                                 old_entry->cred = NULL;
3336                         }
3337
3338                         /*
3339                          * Assert the correct state of the vnode
3340                          * v_writecount while the object is locked, to
3341                          * not relock it later for the assertion
3342                          * correctness.
3343                          */
3344                         if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3345                             object->type == OBJT_VNODE) {
3346                                 KASSERT(((struct vnode *)object->handle)->
3347                                     v_writecount > 0,
3348                                     ("vmspace_fork: v_writecount %p", object));
3349                                 KASSERT(object->un_pager.vnp.writemappings > 0,
3350                                     ("vmspace_fork: vnp.writecount %p",
3351                                     object));
3352                         }
3353                         VM_OBJECT_WUNLOCK(object);
3354
3355                         /*
3356                          * Clone the entry, referencing the shared object.
3357                          */
3358                         new_entry = vm_map_entry_create(new_map);
3359                         *new_entry = *old_entry;
3360                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3361                             MAP_ENTRY_IN_TRANSITION);
3362                         new_entry->wiring_thread = NULL;
3363                         new_entry->wired_count = 0;
3364                         if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3365                                 vnode_pager_update_writecount(object,
3366                                     new_entry->start, new_entry->end);
3367                         }
3368
3369                         /*
3370                          * Insert the entry into the new map -- we know we're
3371                          * inserting at the end of the new map.
3372                          */
3373                         vm_map_entry_link(new_map, new_map->header.prev,
3374                             new_entry);
3375                         vmspace_map_entry_forked(vm1, vm2, new_entry);
3376
3377                         /*
3378                          * Update the physical map
3379                          */
3380                         pmap_copy(new_map->pmap, old_map->pmap,
3381                             new_entry->start,
3382                             (old_entry->end - old_entry->start),
3383                             old_entry->start);
3384                         break;
3385
3386                 case VM_INHERIT_COPY:
3387                         /*
3388                          * Clone the entry and link into the map.
3389                          */
3390                         new_entry = vm_map_entry_create(new_map);
3391                         *new_entry = *old_entry;
3392                         /*
3393                          * Copied entry is COW over the old object.
3394                          */
3395                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3396                             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3397                         new_entry->wiring_thread = NULL;
3398                         new_entry->wired_count = 0;
3399                         new_entry->object.vm_object = NULL;
3400                         new_entry->cred = NULL;
3401                         vm_map_entry_link(new_map, new_map->header.prev,
3402                             new_entry);
3403                         vmspace_map_entry_forked(vm1, vm2, new_entry);
3404                         vm_map_copy_entry(old_map, new_map, old_entry,
3405                             new_entry, fork_charge);
3406                         break;
3407                 }
3408                 old_entry = old_entry->next;
3409         }
3410         /*
3411          * Use inlined vm_map_unlock() to postpone handling the deferred
3412          * map entries, which cannot be done until both old_map and
3413          * new_map locks are released.
3414          */
3415         sx_xunlock(&old_map->lock);
3416         sx_xunlock(&new_map->lock);
3417         vm_map_process_deferred();
3418
3419         return (vm2);
3420 }
3421
3422 int
3423 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3424     vm_prot_t prot, vm_prot_t max, int cow)
3425 {
3426         vm_size_t growsize, init_ssize;
3427         rlim_t lmemlim, vmemlim;
3428         int rv;
3429
3430         growsize = sgrowsiz;
3431         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3432         vm_map_lock(map);
3433         PROC_LOCK(curproc);
3434         lmemlim = lim_cur(curproc, RLIMIT_MEMLOCK);
3435         vmemlim = lim_cur(curproc, RLIMIT_VMEM);
3436         PROC_UNLOCK(curproc);
3437         if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3438                 if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) {
3439                         rv = KERN_NO_SPACE;
3440                         goto out;
3441                 }
3442         }
3443         /* If we would blow our VMEM resource limit, no go */
3444         if (map->size + init_ssize > vmemlim) {
3445                 rv = KERN_NO_SPACE;
3446                 goto out;
3447         }
3448         rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
3449             max, cow);
3450 out:
3451         vm_map_unlock(map);
3452         return (rv);
3453 }
3454
3455 static int
3456 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3457     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
3458 {
3459         vm_map_entry_t new_entry, prev_entry;
3460         vm_offset_t bot, top;
3461         vm_size_t init_ssize;
3462         int orient, rv;
3463
3464         /*
3465          * The stack orientation is piggybacked with the cow argument.
3466          * Extract it into orient and mask the cow argument so that we
3467          * don't pass it around further.
3468          * NOTE: We explicitly allow bi-directional stacks.
3469          */
3470         orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3471         KASSERT(orient != 0, ("No stack grow direction"));
3472
3473         if (addrbos < vm_map_min(map) ||
3474             addrbos > vm_map_max(map) ||
3475             addrbos + max_ssize < addrbos)
3476                 return (KERN_NO_SPACE);
3477
3478         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3479
3480         /* If addr is already mapped, no go */
3481         if (vm_map_lookup_entry(map, addrbos, &prev_entry))
3482                 return (KERN_NO_SPACE);
3483
3484         /*
3485          * If we can't accomodate max_ssize in the current mapping, no go.
3486          * However, we need to be aware that subsequent user mappings might
3487          * map into the space we have reserved for stack, and currently this
3488          * space is not protected.
3489          *
3490          * Hopefully we will at least detect this condition when we try to
3491          * grow the stack.
3492          */
3493         if ((prev_entry->next != &map->header) &&
3494             (prev_entry->next->start < addrbos + max_ssize))
3495                 return (KERN_NO_SPACE);
3496
3497         /*
3498          * We initially map a stack of only init_ssize.  We will grow as
3499          * needed later.  Depending on the orientation of the stack (i.e.
3500          * the grow direction) we either map at the top of the range, the
3501          * bottom of the range or in the middle.
3502          *
3503          * Note: we would normally expect prot and max to be VM_PROT_ALL,
3504          * and cow to be 0.  Possibly we should eliminate these as input
3505          * parameters, and just pass these values here in the insert call.
3506          */
3507         if (orient == MAP_STACK_GROWS_DOWN)
3508                 bot = addrbos + max_ssize - init_ssize;
3509         else if (orient == MAP_STACK_GROWS_UP)
3510                 bot = addrbos;
3511         else
3512                 bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3513         top = bot + init_ssize;
3514         rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3515
3516         /* Now set the avail_ssize amount. */
3517         if (rv == KERN_SUCCESS) {
3518                 if (prev_entry != &map->header)
3519                         vm_map_clip_end(map, prev_entry, bot);
3520                 new_entry = prev_entry->next;
3521                 if (new_entry->end != top || new_entry->start != bot)
3522                         panic("Bad entry start/end for new stack entry");
3523
3524                 new_entry->avail_ssize = max_ssize - init_ssize;
3525                 if (orient & MAP_STACK_GROWS_DOWN)
3526                         new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3527                 if (orient & MAP_STACK_GROWS_UP)
3528                         new_entry->eflags |= MAP_ENTRY_GROWS_UP;
3529         }
3530
3531         return (rv);
3532 }
3533
3534 static int stack_guard_page = 0;
3535 TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page);
3536 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW,
3537     &stack_guard_page, 0,
3538     "Insert stack guard page ahead of the growable segments.");
3539
3540 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3541  * desired address is already mapped, or if we successfully grow
3542  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3543  * stack range (this is strange, but preserves compatibility with
3544  * the grow function in vm_machdep.c).
3545  */
3546 int
3547 vm_map_growstack(struct proc *p, vm_offset_t addr)
3548 {
3549         vm_map_entry_t next_entry, prev_entry;
3550         vm_map_entry_t new_entry, stack_entry;
3551         struct vmspace *vm = p->p_vmspace;
3552         vm_map_t map = &vm->vm_map;
3553         vm_offset_t end;
3554         vm_size_t growsize;
3555         size_t grow_amount, max_grow;
3556         rlim_t lmemlim, stacklim, vmemlim;
3557         int is_procstack, rv;
3558         struct ucred *cred;
3559 #ifdef notyet
3560         uint64_t limit;
3561 #endif
3562 #ifdef RACCT
3563         int error;
3564 #endif
3565
3566 Retry:
3567         PROC_LOCK(p);
3568         lmemlim = lim_cur(p, RLIMIT_MEMLOCK);
3569         stacklim = lim_cur(p, RLIMIT_STACK);
3570         vmemlim = lim_cur(p, RLIMIT_VMEM);
3571         PROC_UNLOCK(p);
3572
3573         vm_map_lock_read(map);
3574
3575         /* If addr is already in the entry range, no need to grow.*/
3576         if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3577                 vm_map_unlock_read(map);
3578                 return (KERN_SUCCESS);
3579         }
3580
3581         next_entry = prev_entry->next;
3582         if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3583                 /*
3584                  * This entry does not grow upwards. Since the address lies
3585                  * beyond this entry, the next entry (if one exists) has to
3586                  * be a downward growable entry. The entry list header is
3587                  * never a growable entry, so it suffices to check the flags.
3588                  */
3589                 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3590                         vm_map_unlock_read(map);
3591                         return (KERN_SUCCESS);
3592                 }
3593                 stack_entry = next_entry;
3594         } else {
3595                 /*
3596                  * This entry grows upward. If the next entry does not at
3597                  * least grow downwards, this is the entry we need to grow.
3598                  * otherwise we have two possible choices and we have to
3599                  * select one.
3600                  */
3601                 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3602                         /*
3603                          * We have two choices; grow the entry closest to
3604                          * the address to minimize the amount of growth.
3605                          */
3606                         if (addr - prev_entry->end <= next_entry->start - addr)
3607                                 stack_entry = prev_entry;
3608                         else
3609                                 stack_entry = next_entry;
3610                 } else
3611                         stack_entry = prev_entry;
3612         }
3613
3614         if (stack_entry == next_entry) {
3615                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3616                 KASSERT(addr < stack_entry->start, ("foo"));
3617                 end = (prev_entry != &map->header) ? prev_entry->end :
3618                     stack_entry->start - stack_entry->avail_ssize;
3619                 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3620                 max_grow = stack_entry->start - end;
3621         } else {
3622                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3623                 KASSERT(addr >= stack_entry->end, ("foo"));
3624                 end = (next_entry != &map->header) ? next_entry->start :
3625                     stack_entry->end + stack_entry->avail_ssize;
3626                 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3627                 max_grow = end - stack_entry->end;
3628         }
3629
3630         if (grow_amount > stack_entry->avail_ssize) {
3631                 vm_map_unlock_read(map);
3632                 return (KERN_NO_SPACE);
3633         }
3634
3635         /*
3636          * If there is no longer enough space between the entries nogo, and
3637          * adjust the available space.  Note: this  should only happen if the
3638          * user has mapped into the stack area after the stack was created,
3639          * and is probably an error.
3640          *
3641          * This also effectively destroys any guard page the user might have
3642          * intended by limiting the stack size.
3643          */
3644         if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) {
3645                 if (vm_map_lock_upgrade(map))
3646                         goto Retry;
3647
3648                 stack_entry->avail_ssize = max_grow;
3649
3650                 vm_map_unlock(map);
3651                 return (KERN_NO_SPACE);
3652         }
3653
3654         is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
3655
3656         /*
3657          * If this is the main process stack, see if we're over the stack
3658          * limit.
3659          */
3660         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3661                 vm_map_unlock_read(map);
3662                 return (KERN_NO_SPACE);
3663         }
3664 #ifdef RACCT
3665         PROC_LOCK(p);
3666         if (is_procstack &&
3667             racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) {
3668                 PROC_UNLOCK(p);
3669                 vm_map_unlock_read(map);
3670                 return (KERN_NO_SPACE);
3671         }
3672         PROC_UNLOCK(p);
3673 #endif
3674
3675         /* Round up the grow amount modulo sgrowsiz */
3676         growsize = sgrowsiz;
3677         grow_amount = roundup(grow_amount, growsize);
3678         if (grow_amount > stack_entry->avail_ssize)
3679                 grow_amount = stack_entry->avail_ssize;
3680         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3681                 grow_amount = trunc_page((vm_size_t)stacklim) -
3682                     ctob(vm->vm_ssize);
3683         }
3684 #ifdef notyet
3685         PROC_LOCK(p);
3686         limit = racct_get_available(p, RACCT_STACK);
3687         PROC_UNLOCK(p);
3688         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3689                 grow_amount = limit - ctob(vm->vm_ssize);
3690 #endif
3691         if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3692                 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3693                         vm_map_unlock_read(map);
3694                         rv = KERN_NO_SPACE;
3695                         goto out;
3696                 }
3697 #ifdef RACCT
3698                 PROC_LOCK(p);
3699                 if (racct_set(p, RACCT_MEMLOCK,
3700                     ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3701                         PROC_UNLOCK(p);
3702                         vm_map_unlock_read(map);
3703                         rv = KERN_NO_SPACE;
3704                         goto out;
3705                 }
3706                 PROC_UNLOCK(p);
3707 #endif
3708         }
3709         /* If we would blow our VMEM resource limit, no go */
3710         if (map->size + grow_amount > vmemlim) {
3711                 vm_map_unlock_read(map);
3712                 rv = KERN_NO_SPACE;
3713                 goto out;
3714         }
3715 #ifdef RACCT
3716         PROC_LOCK(p);
3717         if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3718                 PROC_UNLOCK(p);
3719                 vm_map_unlock_read(map);
3720                 rv = KERN_NO_SPACE;
3721                 goto out;
3722         }
3723         PROC_UNLOCK(p);
3724 #endif
3725
3726         if (vm_map_lock_upgrade(map))
3727                 goto Retry;
3728
3729         if (stack_entry == next_entry) {
3730                 /*
3731                  * Growing downward.
3732                  */
3733                 /* Get the preliminary new entry start value */
3734                 addr = stack_entry->start - grow_amount;
3735
3736                 /*
3737                  * If this puts us into the previous entry, cut back our
3738                  * growth to the available space. Also, see the note above.
3739                  */
3740                 if (addr < end) {
3741                         stack_entry->avail_ssize = max_grow;
3742                         addr = end;
3743                         if (stack_guard_page)
3744                                 addr += PAGE_SIZE;
3745                 }
3746
3747                 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3748                     next_entry->protection, next_entry->max_protection, 0);
3749
3750                 /* Adjust the available stack space by the amount we grew. */
3751                 if (rv == KERN_SUCCESS) {
3752                         if (prev_entry != &map->header)
3753                                 vm_map_clip_end(map, prev_entry, addr);
3754                         new_entry = prev_entry->next;
3755                         KASSERT(new_entry == stack_entry->prev, ("foo"));
3756                         KASSERT(new_entry->end == stack_entry->start, ("foo"));
3757                         KASSERT(new_entry->start == addr, ("foo"));
3758                         grow_amount = new_entry->end - new_entry->start;
3759                         new_entry->avail_ssize = stack_entry->avail_ssize -
3760                             grow_amount;
3761                         stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3762                         new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3763                 }
3764         } else {
3765                 /*
3766                  * Growing upward.
3767                  */
3768                 addr = stack_entry->end + grow_amount;
3769
3770                 /*
3771                  * If this puts us into the next entry, cut back our growth
3772                  * to the available space. Also, see the note above.
3773                  */
3774                 if (addr > end) {
3775                         stack_entry->avail_ssize = end - stack_entry->end;
3776                         addr = end;
3777                         if (stack_guard_page)
3778                                 addr -= PAGE_SIZE;
3779                 }
3780
3781                 grow_amount = addr - stack_entry->end;
3782                 cred = stack_entry->cred;
3783                 if (cred == NULL && stack_entry->object.vm_object != NULL)
3784                         cred = stack_entry->object.vm_object->cred;
3785                 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3786                         rv = KERN_NO_SPACE;
3787                 /* Grow the underlying object if applicable. */
3788                 else if (stack_entry->object.vm_object == NULL ||
3789                          vm_object_coalesce(stack_entry->object.vm_object,
3790                          stack_entry->offset,
3791                          (vm_size_t)(stack_entry->end - stack_entry->start),
3792                          (vm_size_t)grow_amount, cred != NULL)) {
3793                         map->size += (addr - stack_entry->end);
3794                         /* Update the current entry. */
3795                         stack_entry->end = addr;
3796                         stack_entry->avail_ssize -= grow_amount;
3797                         vm_map_entry_resize_free(map, stack_entry);
3798                         rv = KERN_SUCCESS;
3799
3800                         if (next_entry != &map->header)
3801                                 vm_map_clip_start(map, next_entry, addr);
3802                 } else
3803                         rv = KERN_FAILURE;
3804         }
3805
3806         if (rv == KERN_SUCCESS && is_procstack)
3807                 vm->vm_ssize += btoc(grow_amount);
3808
3809         vm_map_unlock(map);
3810
3811         /*
3812          * Heed the MAP_WIREFUTURE flag if it was set for this process.
3813          */
3814         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3815                 vm_map_wire(map,
3816                     (stack_entry == next_entry) ? addr : addr - grow_amount,
3817                     (stack_entry == next_entry) ? stack_entry->start : addr,
3818                     (p->p_flag & P_SYSTEM)
3819                     ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3820                     : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3821         }
3822
3823 out:
3824 #ifdef RACCT
3825         if (rv != KERN_SUCCESS) {
3826                 PROC_LOCK(p);
3827                 error = racct_set(p, RACCT_VMEM, map->size);
3828                 KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
3829                 if (!old_mlock) {
3830                         error = racct_set(p, RACCT_MEMLOCK,
3831                             ptoa(pmap_wired_count(map->pmap)));
3832                         KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
3833                 }
3834                 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
3835                 KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
3836                 PROC_UNLOCK(p);
3837         }
3838 #endif
3839
3840         return (rv);
3841 }
3842
3843 /*
3844  * Unshare the specified VM space for exec.  If other processes are
3845  * mapped to it, then create a new one.  The new vmspace is null.
3846  */
3847 int
3848 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3849 {
3850         struct vmspace *oldvmspace = p->p_vmspace;
3851         struct vmspace *newvmspace;
3852
3853         KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
3854             ("vmspace_exec recursed"));
3855         newvmspace = vmspace_alloc(minuser, maxuser, NULL);
3856         if (newvmspace == NULL)
3857                 return (ENOMEM);
3858         newvmspace->vm_swrss = oldvmspace->vm_swrss;
3859         /*
3860          * This code is written like this for prototype purposes.  The
3861          * goal is to avoid running down the vmspace here, but let the
3862          * other process's that are still using the vmspace to finally
3863          * run it down.  Even though there is little or no chance of blocking
3864          * here, it is a good idea to keep this form for future mods.
3865          */
3866         PROC_VMSPACE_LOCK(p);
3867         p->p_vmspace = newvmspace;
3868         PROC_VMSPACE_UNLOCK(p);
3869         if (p == curthread->td_proc)
3870                 pmap_activate(curthread);
3871         curthread->td_pflags |= TDP_EXECVMSPC;
3872         return (0);
3873 }
3874
3875 /*
3876  * Unshare the specified VM space for forcing COW.  This
3877  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3878  */
3879 int
3880 vmspace_unshare(struct proc *p)
3881 {
3882         struct vmspace *oldvmspace = p->p_vmspace;
3883         struct vmspace *newvmspace;
3884         vm_ooffset_t fork_charge;
3885
3886         if (oldvmspace->vm_refcnt == 1)
3887                 return (0);
3888         fork_charge = 0;
3889         newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3890         if (newvmspace == NULL)
3891                 return (ENOMEM);
3892         if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
3893                 vmspace_free(newvmspace);
3894                 return (ENOMEM);
3895         }
3896         PROC_VMSPACE_LOCK(p);
3897         p->p_vmspace = newvmspace;
3898         PROC_VMSPACE_UNLOCK(p);
3899         if (p == curthread->td_proc)
3900                 pmap_activate(curthread);
3901         vmspace_free(oldvmspace);
3902         return (0);
3903 }
3904
3905 /*
3906  *      vm_map_lookup:
3907  *
3908  *      Finds the VM object, offset, and
3909  *      protection for a given virtual address in the
3910  *      specified map, assuming a page fault of the
3911  *      type specified.
3912  *
3913  *      Leaves the map in question locked for read; return
3914  *      values are guaranteed until a vm_map_lookup_done
3915  *      call is performed.  Note that the map argument
3916  *      is in/out; the returned map must be used in
3917  *      the call to vm_map_lookup_done.
3918  *
3919  *      A handle (out_entry) is returned for use in
3920  *      vm_map_lookup_done, to make that fast.
3921  *
3922  *      If a lookup is requested with "write protection"
3923  *      specified, the map may be changed to perform virtual
3924  *      copying operations, although the data referenced will
3925  *      remain the same.
3926  */
3927 int
3928 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
3929               vm_offset_t vaddr,
3930               vm_prot_t fault_typea,
3931               vm_map_entry_t *out_entry,        /* OUT */
3932               vm_object_t *object,              /* OUT */
3933               vm_pindex_t *pindex,              /* OUT */
3934               vm_prot_t *out_prot,              /* OUT */
3935               boolean_t *wired)                 /* OUT */
3936 {
3937         vm_map_entry_t entry;
3938         vm_map_t map = *var_map;
3939         vm_prot_t prot;
3940         vm_prot_t fault_type = fault_typea;
3941         vm_object_t eobject;
3942         vm_size_t size;
3943         struct ucred *cred;
3944
3945 RetryLookup:;
3946
3947         vm_map_lock_read(map);
3948
3949         /*
3950          * Lookup the faulting address.
3951          */
3952         if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3953                 vm_map_unlock_read(map);
3954                 return (KERN_INVALID_ADDRESS);
3955         }
3956
3957         entry = *out_entry;
3958
3959         /*
3960          * Handle submaps.
3961          */
3962         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3963                 vm_map_t old_map = map;
3964
3965                 *var_map = map = entry->object.sub_map;
3966                 vm_map_unlock_read(old_map);
3967                 goto RetryLookup;
3968         }
3969
3970         /*
3971          * Check whether this task is allowed to have this page.
3972          */
3973         prot = entry->protection;
3974         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3975         if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
3976                 vm_map_unlock_read(map);
3977                 return (KERN_PROTECTION_FAILURE);
3978         }
3979         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3980             (entry->eflags & MAP_ENTRY_COW) &&
3981             (fault_type & VM_PROT_WRITE)) {
3982                 vm_map_unlock_read(map);
3983                 return (KERN_PROTECTION_FAILURE);
3984         }
3985         if ((fault_typea & VM_PROT_COPY) != 0 &&
3986             (entry->max_protection & VM_PROT_WRITE) == 0 &&
3987             (entry->eflags & MAP_ENTRY_COW) == 0) {
3988                 vm_map_unlock_read(map);
3989                 return (KERN_PROTECTION_FAILURE);
3990         }
3991
3992         /*
3993          * If this page is not pageable, we have to get it for all possible
3994          * accesses.
3995          */
3996         *wired = (entry->wired_count != 0);
3997         if (*wired)
3998                 fault_type = entry->protection;
3999         size = entry->end - entry->start;
4000         /*
4001          * If the entry was copy-on-write, we either ...
4002          */
4003         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4004                 /*
4005                  * If we want to write the page, we may as well handle that
4006                  * now since we've got the map locked.
4007                  *
4008                  * If we don't need to write the page, we just demote the
4009                  * permissions allowed.
4010                  */
4011                 if ((fault_type & VM_PROT_WRITE) != 0 ||
4012                     (fault_typea & VM_PROT_COPY) != 0) {
4013                         /*
4014                          * Make a new object, and place it in the object
4015                          * chain.  Note that no new references have appeared
4016                          * -- one just moved from the map to the new
4017                          * object.
4018                          */
4019                         if (vm_map_lock_upgrade(map))
4020                                 goto RetryLookup;
4021
4022                         if (entry->cred == NULL) {
4023                                 /*
4024                                  * The debugger owner is charged for
4025                                  * the memory.
4026                                  */
4027                                 cred = curthread->td_ucred;
4028                                 crhold(cred);
4029                                 if (!swap_reserve_by_cred(size, cred)) {
4030                                         crfree(cred);
4031                                         vm_map_unlock(map);
4032                                         return (KERN_RESOURCE_SHORTAGE);
4033                                 }
4034                                 entry->cred = cred;
4035                         }
4036                         vm_object_shadow(&entry->object.vm_object,
4037                             &entry->offset, size);
4038                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4039                         eobject = entry->object.vm_object;
4040                         if (eobject->cred != NULL) {
4041                                 /*
4042                                  * The object was not shadowed.
4043                                  */
4044                                 swap_release_by_cred(size, entry->cred);
4045                                 crfree(entry->cred);
4046                                 entry->cred = NULL;
4047                         } else if (entry->cred != NULL) {
4048                                 VM_OBJECT_WLOCK(eobject);
4049                                 eobject->cred = entry->cred;
4050                                 eobject->charge = size;
4051                                 VM_OBJECT_WUNLOCK(eobject);
4052                                 entry->cred = NULL;
4053                         }
4054
4055                         vm_map_lock_downgrade(map);
4056                 } else {
4057                         /*
4058                          * We're attempting to read a copy-on-write page --
4059                          * don't allow writes.
4060                          */
4061                         prot &= ~VM_PROT_WRITE;
4062                 }
4063         }
4064
4065         /*
4066          * Create an object if necessary.
4067          */
4068         if (entry->object.vm_object == NULL &&
4069             !map->system_map) {
4070                 if (vm_map_lock_upgrade(map))
4071                         goto RetryLookup;
4072                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
4073                     atop(size));
4074                 entry->offset = 0;
4075                 if (entry->cred != NULL) {
4076                         VM_OBJECT_WLOCK(entry->object.vm_object);
4077                         entry->object.vm_object->cred = entry->cred;
4078                         entry->object.vm_object->charge = size;
4079                         VM_OBJECT_WUNLOCK(entry->object.vm_object);
4080                         entry->cred = NULL;
4081                 }
4082                 vm_map_lock_downgrade(map);
4083         }
4084
4085         /*
4086          * Return the object/offset from this entry.  If the entry was
4087          * copy-on-write or empty, it has been fixed up.
4088          */
4089         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4090         *object = entry->object.vm_object;
4091
4092         *out_prot = prot;
4093         return (KERN_SUCCESS);
4094 }
4095
4096 /*
4097  *      vm_map_lookup_locked:
4098  *
4099  *      Lookup the faulting address.  A version of vm_map_lookup that returns 
4100  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4101  */
4102 int
4103 vm_map_lookup_locked(vm_map_t *var_map,         /* IN/OUT */
4104                      vm_offset_t vaddr,
4105                      vm_prot_t fault_typea,
4106                      vm_map_entry_t *out_entry, /* OUT */
4107                      vm_object_t *object,       /* OUT */
4108                      vm_pindex_t *pindex,       /* OUT */
4109                      vm_prot_t *out_prot,       /* OUT */
4110                      boolean_t *wired)          /* OUT */
4111 {
4112         vm_map_entry_t entry;
4113         vm_map_t map = *var_map;
4114         vm_prot_t prot;
4115         vm_prot_t fault_type = fault_typea;
4116
4117         /*
4118          * Lookup the faulting address.
4119          */
4120         if (!vm_map_lookup_entry(map, vaddr, out_entry))
4121                 return (KERN_INVALID_ADDRESS);
4122
4123         entry = *out_entry;
4124
4125         /*
4126          * Fail if the entry refers to a submap.
4127          */
4128         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4129                 return (KERN_FAILURE);
4130
4131         /*
4132          * Check whether this task is allowed to have this page.
4133          */
4134         prot = entry->protection;
4135         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4136         if ((fault_type & prot) != fault_type)
4137                 return (KERN_PROTECTION_FAILURE);
4138         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
4139             (entry->eflags & MAP_ENTRY_COW) &&
4140             (fault_type & VM_PROT_WRITE))
4141                 return (KERN_PROTECTION_FAILURE);
4142
4143         /*
4144          * If this page is not pageable, we have to get it for all possible
4145          * accesses.
4146          */
4147         *wired = (entry->wired_count != 0);
4148         if (*wired)
4149                 fault_type = entry->protection;
4150
4151         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4152                 /*
4153                  * Fail if the entry was copy-on-write for a write fault.
4154                  */
4155                 if (fault_type & VM_PROT_WRITE)
4156                         return (KERN_FAILURE);
4157                 /*
4158                  * We're attempting to read a copy-on-write page --
4159                  * don't allow writes.
4160                  */
4161                 prot &= ~VM_PROT_WRITE;
4162         }
4163
4164         /*
4165          * Fail if an object should be created.
4166          */
4167         if (entry->object.vm_object == NULL && !map->system_map)
4168                 return (KERN_FAILURE);
4169
4170         /*
4171          * Return the object/offset from this entry.  If the entry was
4172          * copy-on-write or empty, it has been fixed up.
4173          */
4174         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4175         *object = entry->object.vm_object;
4176
4177         *out_prot = prot;
4178         return (KERN_SUCCESS);
4179 }
4180
4181 /*
4182  *      vm_map_lookup_done:
4183  *
4184  *      Releases locks acquired by a vm_map_lookup
4185  *      (according to the handle returned by that lookup).
4186  */
4187 void
4188 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4189 {
4190         /*
4191          * Unlock the main-level map
4192          */
4193         vm_map_unlock_read(map);
4194 }
4195
4196 #include "opt_ddb.h"
4197 #ifdef DDB
4198 #include <sys/kernel.h>
4199
4200 #include <ddb/ddb.h>
4201
4202 static void
4203 vm_map_print(vm_map_t map)
4204 {
4205         vm_map_entry_t entry;
4206
4207         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4208             (void *)map,
4209             (void *)map->pmap, map->nentries, map->timestamp);
4210
4211         db_indent += 2;
4212         for (entry = map->header.next; entry != &map->header;
4213             entry = entry->next) {
4214                 db_iprintf("map entry %p: start=%p, end=%p\n",
4215                     (void *)entry, (void *)entry->start, (void *)entry->end);
4216                 {
4217                         static char *inheritance_name[4] =
4218                         {"share", "copy", "none", "donate_copy"};
4219
4220                         db_iprintf(" prot=%x/%x/%s",
4221                             entry->protection,
4222                             entry->max_protection,
4223                             inheritance_name[(int)(unsigned char)entry->inheritance]);
4224                         if (entry->wired_count != 0)
4225                                 db_printf(", wired");
4226                 }
4227                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4228                         db_printf(", share=%p, offset=0x%jx\n",
4229                             (void *)entry->object.sub_map,
4230                             (uintmax_t)entry->offset);
4231                         if ((entry->prev == &map->header) ||
4232                             (entry->prev->object.sub_map !=
4233                                 entry->object.sub_map)) {
4234                                 db_indent += 2;
4235                                 vm_map_print((vm_map_t)entry->object.sub_map);
4236                                 db_indent -= 2;
4237                         }
4238                 } else {
4239                         if (entry->cred != NULL)
4240                                 db_printf(", ruid %d", entry->cred->cr_ruid);
4241                         db_printf(", object=%p, offset=0x%jx",
4242                             (void *)entry->object.vm_object,
4243                             (uintmax_t)entry->offset);
4244                         if (entry->object.vm_object && entry->object.vm_object->cred)
4245                                 db_printf(", obj ruid %d charge %jx",
4246                                     entry->object.vm_object->cred->cr_ruid,
4247                                     (uintmax_t)entry->object.vm_object->charge);
4248                         if (entry->eflags & MAP_ENTRY_COW)
4249                                 db_printf(", copy (%s)",
4250                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4251                         db_printf("\n");
4252
4253                         if ((entry->prev == &map->header) ||
4254                             (entry->prev->object.vm_object !=
4255                                 entry->object.vm_object)) {
4256                                 db_indent += 2;
4257                                 vm_object_print((db_expr_t)(intptr_t)
4258                                                 entry->object.vm_object,
4259                                                 0, 0, (char *)0);
4260                                 db_indent -= 2;
4261                         }
4262                 }
4263         }
4264         db_indent -= 2;
4265 }
4266
4267 DB_SHOW_COMMAND(map, map)
4268 {
4269
4270         if (!have_addr) {
4271                 db_printf("usage: show map <addr>\n");
4272                 return;
4273         }
4274         vm_map_print((vm_map_t)addr);
4275 }
4276
4277 DB_SHOW_COMMAND(procvm, procvm)
4278 {
4279         struct proc *p;
4280
4281         if (have_addr) {
4282                 p = (struct proc *) addr;
4283         } else {
4284                 p = curproc;
4285         }
4286
4287         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4288             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4289             (void *)vmspace_pmap(p->p_vmspace));
4290
4291         vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4292 }
4293
4294 #endif /* DDB */