]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/vm/vm_map.c
Fix issues with original SA-15:06.openssl commit:
[FreeBSD/stable/10.git] / sys / vm / vm_map.c
1 /*-
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60
61 /*
62  *      Virtual memory mapping module.
63  */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
71 #include <sys/ktr.h>
72 #include <sys/lock.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/racct.h>
79 #include <sys/resourcevar.h>
80 #include <sys/rwlock.h>
81 #include <sys/file.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysent.h>
84 #include <sys/shm.h>
85
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_pager.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vnode_pager.h>
96 #include <vm/swap_pager.h>
97 #include <vm/uma.h>
98
99 /*
100  *      Virtual memory maps provide for the mapping, protection,
101  *      and sharing of virtual memory objects.  In addition,
102  *      this module provides for an efficient virtual copy of
103  *      memory from one map to another.
104  *
105  *      Synchronization is required prior to most operations.
106  *
107  *      Maps consist of an ordered doubly-linked list of simple
108  *      entries; a self-adjusting binary search tree of these
109  *      entries is used to speed up lookups.
110  *
111  *      Since portions of maps are specified by start/end addresses,
112  *      which may not align with existing map entries, all
113  *      routines merely "clip" entries to these start/end values.
114  *      [That is, an entry is split into two, bordering at a
115  *      start or end value.]  Note that these clippings may not
116  *      always be necessary (as the two resulting entries are then
117  *      not changed); however, the clipping is done for convenience.
118  *
119  *      As mentioned above, virtual copy operations are performed
120  *      by copying VM object references from one map to
121  *      another, and then marking both regions as copy-on-write.
122  */
123
124 static struct mtx map_sleep_mtx;
125 static uma_zone_t mapentzone;
126 static uma_zone_t kmapentzone;
127 static uma_zone_t mapzone;
128 static uma_zone_t vmspace_zone;
129 static int vmspace_zinit(void *mem, int size, int flags);
130 static int vm_map_zinit(void *mem, int ize, int flags);
131 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
132     vm_offset_t max);
133 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
134 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
135 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
136 #ifdef INVARIANTS
137 static void vm_map_zdtor(void *mem, int size, void *arg);
138 static void vmspace_zdtor(void *mem, int size, void *arg);
139 #endif
140 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
141     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
142     int cow);
143 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
144     vm_offset_t failed_addr);
145
146 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
147     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
148      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
149
150 /* 
151  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
152  * stable.
153  */
154 #define PROC_VMSPACE_LOCK(p) do { } while (0)
155 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
156
157 /*
158  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
159  *
160  *      Asserts that the starting and ending region
161  *      addresses fall within the valid range of the map.
162  */
163 #define VM_MAP_RANGE_CHECK(map, start, end)             \
164                 {                                       \
165                 if (start < vm_map_min(map))            \
166                         start = vm_map_min(map);        \
167                 if (end > vm_map_max(map))              \
168                         end = vm_map_max(map);          \
169                 if (start > end)                        \
170                         start = end;                    \
171                 }
172
173 /*
174  *      vm_map_startup:
175  *
176  *      Initialize the vm_map module.  Must be called before
177  *      any other vm_map routines.
178  *
179  *      Map and entry structures are allocated from the general
180  *      purpose memory pool with some exceptions:
181  *
182  *      - The kernel map and kmem submap are allocated statically.
183  *      - Kernel map entries are allocated out of a static pool.
184  *
185  *      These restrictions are necessary since malloc() uses the
186  *      maps and requires map entries.
187  */
188
189 void
190 vm_map_startup(void)
191 {
192         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
193         mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
194 #ifdef INVARIANTS
195             vm_map_zdtor,
196 #else
197             NULL,
198 #endif
199             vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
200         uma_prealloc(mapzone, MAX_KMAP);
201         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
202             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
203             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
204         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
205             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
206         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
207 #ifdef INVARIANTS
208             vmspace_zdtor,
209 #else
210             NULL,
211 #endif
212             vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
213 }
214
215 static int
216 vmspace_zinit(void *mem, int size, int flags)
217 {
218         struct vmspace *vm;
219
220         vm = (struct vmspace *)mem;
221
222         vm->vm_map.pmap = NULL;
223         (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
224         PMAP_LOCK_INIT(vmspace_pmap(vm));
225         return (0);
226 }
227
228 static int
229 vm_map_zinit(void *mem, int size, int flags)
230 {
231         vm_map_t map;
232
233         map = (vm_map_t)mem;
234         memset(map, 0, sizeof(*map));
235         mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
236         sx_init(&map->lock, "vm map (user)");
237         return (0);
238 }
239
240 #ifdef INVARIANTS
241 static void
242 vmspace_zdtor(void *mem, int size, void *arg)
243 {
244         struct vmspace *vm;
245
246         vm = (struct vmspace *)mem;
247
248         vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
249 }
250 static void
251 vm_map_zdtor(void *mem, int size, void *arg)
252 {
253         vm_map_t map;
254
255         map = (vm_map_t)mem;
256         KASSERT(map->nentries == 0,
257             ("map %p nentries == %d on free.",
258             map, map->nentries));
259         KASSERT(map->size == 0,
260             ("map %p size == %lu on free.",
261             map, (unsigned long)map->size));
262 }
263 #endif  /* INVARIANTS */
264
265 /*
266  * Allocate a vmspace structure, including a vm_map and pmap,
267  * and initialize those structures.  The refcnt is set to 1.
268  *
269  * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
270  */
271 struct vmspace *
272 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
273 {
274         struct vmspace *vm;
275
276         vm = uma_zalloc(vmspace_zone, M_WAITOK);
277
278         KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
279
280         if (pinit == NULL)
281                 pinit = &pmap_pinit;
282
283         if (!pinit(vmspace_pmap(vm))) {
284                 uma_zfree(vmspace_zone, vm);
285                 return (NULL);
286         }
287         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
288         _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
289         vm->vm_refcnt = 1;
290         vm->vm_shm = NULL;
291         vm->vm_swrss = 0;
292         vm->vm_tsize = 0;
293         vm->vm_dsize = 0;
294         vm->vm_ssize = 0;
295         vm->vm_taddr = 0;
296         vm->vm_daddr = 0;
297         vm->vm_maxsaddr = 0;
298         return (vm);
299 }
300
301 static void
302 vmspace_container_reset(struct proc *p)
303 {
304
305 #ifdef RACCT
306         PROC_LOCK(p);
307         racct_set(p, RACCT_DATA, 0);
308         racct_set(p, RACCT_STACK, 0);
309         racct_set(p, RACCT_RSS, 0);
310         racct_set(p, RACCT_MEMLOCK, 0);
311         racct_set(p, RACCT_VMEM, 0);
312         PROC_UNLOCK(p);
313 #endif
314 }
315
316 static inline void
317 vmspace_dofree(struct vmspace *vm)
318 {
319
320         CTR1(KTR_VM, "vmspace_free: %p", vm);
321
322         /*
323          * Make sure any SysV shm is freed, it might not have been in
324          * exit1().
325          */
326         shmexit(vm);
327
328         /*
329          * Lock the map, to wait out all other references to it.
330          * Delete all of the mappings and pages they hold, then call
331          * the pmap module to reclaim anything left.
332          */
333         (void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
334             vm->vm_map.max_offset);
335
336         pmap_release(vmspace_pmap(vm));
337         vm->vm_map.pmap = NULL;
338         uma_zfree(vmspace_zone, vm);
339 }
340
341 void
342 vmspace_free(struct vmspace *vm)
343 {
344
345         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
346             "vmspace_free() called with non-sleepable lock held");
347
348         if (vm->vm_refcnt == 0)
349                 panic("vmspace_free: attempt to free already freed vmspace");
350
351         if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
352                 vmspace_dofree(vm);
353 }
354
355 void
356 vmspace_exitfree(struct proc *p)
357 {
358         struct vmspace *vm;
359
360         PROC_VMSPACE_LOCK(p);
361         vm = p->p_vmspace;
362         p->p_vmspace = NULL;
363         PROC_VMSPACE_UNLOCK(p);
364         KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
365         vmspace_free(vm);
366 }
367
368 void
369 vmspace_exit(struct thread *td)
370 {
371         int refcnt;
372         struct vmspace *vm;
373         struct proc *p;
374
375         /*
376          * Release user portion of address space.
377          * This releases references to vnodes,
378          * which could cause I/O if the file has been unlinked.
379          * Need to do this early enough that we can still sleep.
380          *
381          * The last exiting process to reach this point releases as
382          * much of the environment as it can. vmspace_dofree() is the
383          * slower fallback in case another process had a temporary
384          * reference to the vmspace.
385          */
386
387         p = td->td_proc;
388         vm = p->p_vmspace;
389         atomic_add_int(&vmspace0.vm_refcnt, 1);
390         do {
391                 refcnt = vm->vm_refcnt;
392                 if (refcnt > 1 && p->p_vmspace != &vmspace0) {
393                         /* Switch now since other proc might free vmspace */
394                         PROC_VMSPACE_LOCK(p);
395                         p->p_vmspace = &vmspace0;
396                         PROC_VMSPACE_UNLOCK(p);
397                         pmap_activate(td);
398                 }
399         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
400         if (refcnt == 1) {
401                 if (p->p_vmspace != vm) {
402                         /* vmspace not yet freed, switch back */
403                         PROC_VMSPACE_LOCK(p);
404                         p->p_vmspace = vm;
405                         PROC_VMSPACE_UNLOCK(p);
406                         pmap_activate(td);
407                 }
408                 pmap_remove_pages(vmspace_pmap(vm));
409                 /* Switch now since this proc will free vmspace */
410                 PROC_VMSPACE_LOCK(p);
411                 p->p_vmspace = &vmspace0;
412                 PROC_VMSPACE_UNLOCK(p);
413                 pmap_activate(td);
414                 vmspace_dofree(vm);
415         }
416         vmspace_container_reset(p);
417 }
418
419 /* Acquire reference to vmspace owned by another process. */
420
421 struct vmspace *
422 vmspace_acquire_ref(struct proc *p)
423 {
424         struct vmspace *vm;
425         int refcnt;
426
427         PROC_VMSPACE_LOCK(p);
428         vm = p->p_vmspace;
429         if (vm == NULL) {
430                 PROC_VMSPACE_UNLOCK(p);
431                 return (NULL);
432         }
433         do {
434                 refcnt = vm->vm_refcnt;
435                 if (refcnt <= 0) {      /* Avoid 0->1 transition */
436                         PROC_VMSPACE_UNLOCK(p);
437                         return (NULL);
438                 }
439         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
440         if (vm != p->p_vmspace) {
441                 PROC_VMSPACE_UNLOCK(p);
442                 vmspace_free(vm);
443                 return (NULL);
444         }
445         PROC_VMSPACE_UNLOCK(p);
446         return (vm);
447 }
448
449 void
450 _vm_map_lock(vm_map_t map, const char *file, int line)
451 {
452
453         if (map->system_map)
454                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
455         else
456                 sx_xlock_(&map->lock, file, line);
457         map->timestamp++;
458 }
459
460 static void
461 vm_map_process_deferred(void)
462 {
463         struct thread *td;
464         vm_map_entry_t entry, next;
465         vm_object_t object;
466
467         td = curthread;
468         entry = td->td_map_def_user;
469         td->td_map_def_user = NULL;
470         while (entry != NULL) {
471                 next = entry->next;
472                 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
473                         /*
474                          * Decrement the object's writemappings and
475                          * possibly the vnode's v_writecount.
476                          */
477                         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
478                             ("Submap with writecount"));
479                         object = entry->object.vm_object;
480                         KASSERT(object != NULL, ("No object for writecount"));
481                         vnode_pager_release_writecount(object, entry->start,
482                             entry->end);
483                 }
484                 vm_map_entry_deallocate(entry, FALSE);
485                 entry = next;
486         }
487 }
488
489 void
490 _vm_map_unlock(vm_map_t map, const char *file, int line)
491 {
492
493         if (map->system_map)
494                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
495         else {
496                 sx_xunlock_(&map->lock, file, line);
497                 vm_map_process_deferred();
498         }
499 }
500
501 void
502 _vm_map_lock_read(vm_map_t map, const char *file, int line)
503 {
504
505         if (map->system_map)
506                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
507         else
508                 sx_slock_(&map->lock, file, line);
509 }
510
511 void
512 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
513 {
514
515         if (map->system_map)
516                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
517         else {
518                 sx_sunlock_(&map->lock, file, line);
519                 vm_map_process_deferred();
520         }
521 }
522
523 int
524 _vm_map_trylock(vm_map_t map, const char *file, int line)
525 {
526         int error;
527
528         error = map->system_map ?
529             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
530             !sx_try_xlock_(&map->lock, file, line);
531         if (error == 0)
532                 map->timestamp++;
533         return (error == 0);
534 }
535
536 int
537 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
538 {
539         int error;
540
541         error = map->system_map ?
542             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
543             !sx_try_slock_(&map->lock, file, line);
544         return (error == 0);
545 }
546
547 /*
548  *      _vm_map_lock_upgrade:   [ internal use only ]
549  *
550  *      Tries to upgrade a read (shared) lock on the specified map to a write
551  *      (exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
552  *      non-zero value if the upgrade fails.  If the upgrade fails, the map is
553  *      returned without a read or write lock held.
554  *
555  *      Requires that the map be read locked.
556  */
557 int
558 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
559 {
560         unsigned int last_timestamp;
561
562         if (map->system_map) {
563                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
564         } else {
565                 if (!sx_try_upgrade_(&map->lock, file, line)) {
566                         last_timestamp = map->timestamp;
567                         sx_sunlock_(&map->lock, file, line);
568                         vm_map_process_deferred();
569                         /*
570                          * If the map's timestamp does not change while the
571                          * map is unlocked, then the upgrade succeeds.
572                          */
573                         sx_xlock_(&map->lock, file, line);
574                         if (last_timestamp != map->timestamp) {
575                                 sx_xunlock_(&map->lock, file, line);
576                                 return (1);
577                         }
578                 }
579         }
580         map->timestamp++;
581         return (0);
582 }
583
584 void
585 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
586 {
587
588         if (map->system_map) {
589                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
590         } else
591                 sx_downgrade_(&map->lock, file, line);
592 }
593
594 /*
595  *      vm_map_locked:
596  *
597  *      Returns a non-zero value if the caller holds a write (exclusive) lock
598  *      on the specified map and the value "0" otherwise.
599  */
600 int
601 vm_map_locked(vm_map_t map)
602 {
603
604         if (map->system_map)
605                 return (mtx_owned(&map->system_mtx));
606         else
607                 return (sx_xlocked(&map->lock));
608 }
609
610 #ifdef INVARIANTS
611 static void
612 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
613 {
614
615         if (map->system_map)
616                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
617         else
618                 sx_assert_(&map->lock, SA_XLOCKED, file, line);
619 }
620
621 #define VM_MAP_ASSERT_LOCKED(map) \
622     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
623 #else
624 #define VM_MAP_ASSERT_LOCKED(map)
625 #endif
626
627 /*
628  *      _vm_map_unlock_and_wait:
629  *
630  *      Atomically releases the lock on the specified map and puts the calling
631  *      thread to sleep.  The calling thread will remain asleep until either
632  *      vm_map_wakeup() is performed on the map or the specified timeout is
633  *      exceeded.
634  *
635  *      WARNING!  This function does not perform deferred deallocations of
636  *      objects and map entries.  Therefore, the calling thread is expected to
637  *      reacquire the map lock after reawakening and later perform an ordinary
638  *      unlock operation, such as vm_map_unlock(), before completing its
639  *      operation on the map.
640  */
641 int
642 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
643 {
644
645         mtx_lock(&map_sleep_mtx);
646         if (map->system_map)
647                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
648         else
649                 sx_xunlock_(&map->lock, file, line);
650         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
651             timo));
652 }
653
654 /*
655  *      vm_map_wakeup:
656  *
657  *      Awaken any threads that have slept on the map using
658  *      vm_map_unlock_and_wait().
659  */
660 void
661 vm_map_wakeup(vm_map_t map)
662 {
663
664         /*
665          * Acquire and release map_sleep_mtx to prevent a wakeup()
666          * from being performed (and lost) between the map unlock
667          * and the msleep() in _vm_map_unlock_and_wait().
668          */
669         mtx_lock(&map_sleep_mtx);
670         mtx_unlock(&map_sleep_mtx);
671         wakeup(&map->root);
672 }
673
674 void
675 vm_map_busy(vm_map_t map)
676 {
677
678         VM_MAP_ASSERT_LOCKED(map);
679         map->busy++;
680 }
681
682 void
683 vm_map_unbusy(vm_map_t map)
684 {
685
686         VM_MAP_ASSERT_LOCKED(map);
687         KASSERT(map->busy, ("vm_map_unbusy: not busy"));
688         if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
689                 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
690                 wakeup(&map->busy);
691         }
692 }
693
694 void 
695 vm_map_wait_busy(vm_map_t map)
696 {
697
698         VM_MAP_ASSERT_LOCKED(map);
699         while (map->busy) {
700                 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
701                 if (map->system_map)
702                         msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
703                 else
704                         sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
705         }
706         map->timestamp++;
707 }
708
709 long
710 vmspace_resident_count(struct vmspace *vmspace)
711 {
712         return pmap_resident_count(vmspace_pmap(vmspace));
713 }
714
715 /*
716  *      vm_map_create:
717  *
718  *      Creates and returns a new empty VM map with
719  *      the given physical map structure, and having
720  *      the given lower and upper address bounds.
721  */
722 vm_map_t
723 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
724 {
725         vm_map_t result;
726
727         result = uma_zalloc(mapzone, M_WAITOK);
728         CTR1(KTR_VM, "vm_map_create: %p", result);
729         _vm_map_init(result, pmap, min, max);
730         return (result);
731 }
732
733 /*
734  * Initialize an existing vm_map structure
735  * such as that in the vmspace structure.
736  */
737 static void
738 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
739 {
740
741         map->header.next = map->header.prev = &map->header;
742         map->needs_wakeup = FALSE;
743         map->system_map = 0;
744         map->pmap = pmap;
745         map->min_offset = min;
746         map->max_offset = max;
747         map->flags = 0;
748         map->root = NULL;
749         map->timestamp = 0;
750         map->busy = 0;
751 }
752
753 void
754 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
755 {
756
757         _vm_map_init(map, pmap, min, max);
758         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
759         sx_init(&map->lock, "user map");
760 }
761
762 /*
763  *      vm_map_entry_dispose:   [ internal use only ]
764  *
765  *      Inverse of vm_map_entry_create.
766  */
767 static void
768 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
769 {
770         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
771 }
772
773 /*
774  *      vm_map_entry_create:    [ internal use only ]
775  *
776  *      Allocates a VM map entry for insertion.
777  *      No entry fields are filled in.
778  */
779 static vm_map_entry_t
780 vm_map_entry_create(vm_map_t map)
781 {
782         vm_map_entry_t new_entry;
783
784         if (map->system_map)
785                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
786         else
787                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
788         if (new_entry == NULL)
789                 panic("vm_map_entry_create: kernel resources exhausted");
790         return (new_entry);
791 }
792
793 /*
794  *      vm_map_entry_set_behavior:
795  *
796  *      Set the expected access behavior, either normal, random, or
797  *      sequential.
798  */
799 static inline void
800 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
801 {
802         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
803             (behavior & MAP_ENTRY_BEHAV_MASK);
804 }
805
806 /*
807  *      vm_map_entry_set_max_free:
808  *
809  *      Set the max_free field in a vm_map_entry.
810  */
811 static inline void
812 vm_map_entry_set_max_free(vm_map_entry_t entry)
813 {
814
815         entry->max_free = entry->adj_free;
816         if (entry->left != NULL && entry->left->max_free > entry->max_free)
817                 entry->max_free = entry->left->max_free;
818         if (entry->right != NULL && entry->right->max_free > entry->max_free)
819                 entry->max_free = entry->right->max_free;
820 }
821
822 /*
823  *      vm_map_entry_splay:
824  *
825  *      The Sleator and Tarjan top-down splay algorithm with the
826  *      following variation.  Max_free must be computed bottom-up, so
827  *      on the downward pass, maintain the left and right spines in
828  *      reverse order.  Then, make a second pass up each side to fix
829  *      the pointers and compute max_free.  The time bound is O(log n)
830  *      amortized.
831  *
832  *      The new root is the vm_map_entry containing "addr", or else an
833  *      adjacent entry (lower or higher) if addr is not in the tree.
834  *
835  *      The map must be locked, and leaves it so.
836  *
837  *      Returns: the new root.
838  */
839 static vm_map_entry_t
840 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
841 {
842         vm_map_entry_t llist, rlist;
843         vm_map_entry_t ltree, rtree;
844         vm_map_entry_t y;
845
846         /* Special case of empty tree. */
847         if (root == NULL)
848                 return (root);
849
850         /*
851          * Pass One: Splay down the tree until we find addr or a NULL
852          * pointer where addr would go.  llist and rlist are the two
853          * sides in reverse order (bottom-up), with llist linked by
854          * the right pointer and rlist linked by the left pointer in
855          * the vm_map_entry.  Wait until Pass Two to set max_free on
856          * the two spines.
857          */
858         llist = NULL;
859         rlist = NULL;
860         for (;;) {
861                 /* root is never NULL in here. */
862                 if (addr < root->start) {
863                         y = root->left;
864                         if (y == NULL)
865                                 break;
866                         if (addr < y->start && y->left != NULL) {
867                                 /* Rotate right and put y on rlist. */
868                                 root->left = y->right;
869                                 y->right = root;
870                                 vm_map_entry_set_max_free(root);
871                                 root = y->left;
872                                 y->left = rlist;
873                                 rlist = y;
874                         } else {
875                                 /* Put root on rlist. */
876                                 root->left = rlist;
877                                 rlist = root;
878                                 root = y;
879                         }
880                 } else if (addr >= root->end) {
881                         y = root->right;
882                         if (y == NULL)
883                                 break;
884                         if (addr >= y->end && y->right != NULL) {
885                                 /* Rotate left and put y on llist. */
886                                 root->right = y->left;
887                                 y->left = root;
888                                 vm_map_entry_set_max_free(root);
889                                 root = y->right;
890                                 y->right = llist;
891                                 llist = y;
892                         } else {
893                                 /* Put root on llist. */
894                                 root->right = llist;
895                                 llist = root;
896                                 root = y;
897                         }
898                 } else
899                         break;
900         }
901
902         /*
903          * Pass Two: Walk back up the two spines, flip the pointers
904          * and set max_free.  The subtrees of the root go at the
905          * bottom of llist and rlist.
906          */
907         ltree = root->left;
908         while (llist != NULL) {
909                 y = llist->right;
910                 llist->right = ltree;
911                 vm_map_entry_set_max_free(llist);
912                 ltree = llist;
913                 llist = y;
914         }
915         rtree = root->right;
916         while (rlist != NULL) {
917                 y = rlist->left;
918                 rlist->left = rtree;
919                 vm_map_entry_set_max_free(rlist);
920                 rtree = rlist;
921                 rlist = y;
922         }
923
924         /*
925          * Final assembly: add ltree and rtree as subtrees of root.
926          */
927         root->left = ltree;
928         root->right = rtree;
929         vm_map_entry_set_max_free(root);
930
931         return (root);
932 }
933
934 /*
935  *      vm_map_entry_{un,}link:
936  *
937  *      Insert/remove entries from maps.
938  */
939 static void
940 vm_map_entry_link(vm_map_t map,
941                   vm_map_entry_t after_where,
942                   vm_map_entry_t entry)
943 {
944
945         CTR4(KTR_VM,
946             "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
947             map->nentries, entry, after_where);
948         VM_MAP_ASSERT_LOCKED(map);
949         KASSERT(after_where == &map->header ||
950             after_where->end <= entry->start,
951             ("vm_map_entry_link: prev end %jx new start %jx overlap",
952             (uintmax_t)after_where->end, (uintmax_t)entry->start));
953         KASSERT(after_where->next == &map->header ||
954             entry->end <= after_where->next->start,
955             ("vm_map_entry_link: new end %jx next start %jx overlap",
956             (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
957
958         map->nentries++;
959         entry->prev = after_where;
960         entry->next = after_where->next;
961         entry->next->prev = entry;
962         after_where->next = entry;
963
964         if (after_where != &map->header) {
965                 if (after_where != map->root)
966                         vm_map_entry_splay(after_where->start, map->root);
967                 entry->right = after_where->right;
968                 entry->left = after_where;
969                 after_where->right = NULL;
970                 after_where->adj_free = entry->start - after_where->end;
971                 vm_map_entry_set_max_free(after_where);
972         } else {
973                 entry->right = map->root;
974                 entry->left = NULL;
975         }
976         entry->adj_free = (entry->next == &map->header ? map->max_offset :
977             entry->next->start) - entry->end;
978         vm_map_entry_set_max_free(entry);
979         map->root = entry;
980 }
981
982 static void
983 vm_map_entry_unlink(vm_map_t map,
984                     vm_map_entry_t entry)
985 {
986         vm_map_entry_t next, prev, root;
987
988         VM_MAP_ASSERT_LOCKED(map);
989         if (entry != map->root)
990                 vm_map_entry_splay(entry->start, map->root);
991         if (entry->left == NULL)
992                 root = entry->right;
993         else {
994                 root = vm_map_entry_splay(entry->start, entry->left);
995                 root->right = entry->right;
996                 root->adj_free = (entry->next == &map->header ? map->max_offset :
997                     entry->next->start) - root->end;
998                 vm_map_entry_set_max_free(root);
999         }
1000         map->root = root;
1001
1002         prev = entry->prev;
1003         next = entry->next;
1004         next->prev = prev;
1005         prev->next = next;
1006         map->nentries--;
1007         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1008             map->nentries, entry);
1009 }
1010
1011 /*
1012  *      vm_map_entry_resize_free:
1013  *
1014  *      Recompute the amount of free space following a vm_map_entry
1015  *      and propagate that value up the tree.  Call this function after
1016  *      resizing a map entry in-place, that is, without a call to
1017  *      vm_map_entry_link() or _unlink().
1018  *
1019  *      The map must be locked, and leaves it so.
1020  */
1021 static void
1022 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1023 {
1024
1025         /*
1026          * Using splay trees without parent pointers, propagating
1027          * max_free up the tree is done by moving the entry to the
1028          * root and making the change there.
1029          */
1030         if (entry != map->root)
1031                 map->root = vm_map_entry_splay(entry->start, map->root);
1032
1033         entry->adj_free = (entry->next == &map->header ? map->max_offset :
1034             entry->next->start) - entry->end;
1035         vm_map_entry_set_max_free(entry);
1036 }
1037
1038 /*
1039  *      vm_map_lookup_entry:    [ internal use only ]
1040  *
1041  *      Finds the map entry containing (or
1042  *      immediately preceding) the specified address
1043  *      in the given map; the entry is returned
1044  *      in the "entry" parameter.  The boolean
1045  *      result indicates whether the address is
1046  *      actually contained in the map.
1047  */
1048 boolean_t
1049 vm_map_lookup_entry(
1050         vm_map_t map,
1051         vm_offset_t address,
1052         vm_map_entry_t *entry)  /* OUT */
1053 {
1054         vm_map_entry_t cur;
1055         boolean_t locked;
1056
1057         /*
1058          * If the map is empty, then the map entry immediately preceding
1059          * "address" is the map's header.
1060          */
1061         cur = map->root;
1062         if (cur == NULL)
1063                 *entry = &map->header;
1064         else if (address >= cur->start && cur->end > address) {
1065                 *entry = cur;
1066                 return (TRUE);
1067         } else if ((locked = vm_map_locked(map)) ||
1068             sx_try_upgrade(&map->lock)) {
1069                 /*
1070                  * Splay requires a write lock on the map.  However, it only
1071                  * restructures the binary search tree; it does not otherwise
1072                  * change the map.  Thus, the map's timestamp need not change
1073                  * on a temporary upgrade.
1074                  */
1075                 map->root = cur = vm_map_entry_splay(address, cur);
1076                 if (!locked)
1077                         sx_downgrade(&map->lock);
1078
1079                 /*
1080                  * If "address" is contained within a map entry, the new root
1081                  * is that map entry.  Otherwise, the new root is a map entry
1082                  * immediately before or after "address".
1083                  */
1084                 if (address >= cur->start) {
1085                         *entry = cur;
1086                         if (cur->end > address)
1087                                 return (TRUE);
1088                 } else
1089                         *entry = cur->prev;
1090         } else
1091                 /*
1092                  * Since the map is only locked for read access, perform a
1093                  * standard binary search tree lookup for "address".
1094                  */
1095                 for (;;) {
1096                         if (address < cur->start) {
1097                                 if (cur->left == NULL) {
1098                                         *entry = cur->prev;
1099                                         break;
1100                                 }
1101                                 cur = cur->left;
1102                         } else if (cur->end > address) {
1103                                 *entry = cur;
1104                                 return (TRUE);
1105                         } else {
1106                                 if (cur->right == NULL) {
1107                                         *entry = cur;
1108                                         break;
1109                                 }
1110                                 cur = cur->right;
1111                         }
1112                 }
1113         return (FALSE);
1114 }
1115
1116 /*
1117  *      vm_map_insert:
1118  *
1119  *      Inserts the given whole VM object into the target
1120  *      map at the specified address range.  The object's
1121  *      size should match that of the address range.
1122  *
1123  *      Requires that the map be locked, and leaves it so.
1124  *
1125  *      If object is non-NULL, ref count must be bumped by caller
1126  *      prior to making call to account for the new entry.
1127  */
1128 int
1129 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1130               vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
1131               int cow)
1132 {
1133         vm_map_entry_t new_entry;
1134         vm_map_entry_t prev_entry;
1135         vm_map_entry_t temp_entry;
1136         vm_eflags_t protoeflags;
1137         struct ucred *cred;
1138         vm_inherit_t inheritance;
1139         boolean_t charge_prev_obj;
1140
1141         VM_MAP_ASSERT_LOCKED(map);
1142
1143         /*
1144          * Check that the start and end points are not bogus.
1145          */
1146         if ((start < map->min_offset) || (end > map->max_offset) ||
1147             (start >= end))
1148                 return (KERN_INVALID_ADDRESS);
1149
1150         /*
1151          * Find the entry prior to the proposed starting address; if it's part
1152          * of an existing entry, this range is bogus.
1153          */
1154         if (vm_map_lookup_entry(map, start, &temp_entry))
1155                 return (KERN_NO_SPACE);
1156
1157         prev_entry = temp_entry;
1158
1159         /*
1160          * Assert that the next entry doesn't overlap the end point.
1161          */
1162         if ((prev_entry->next != &map->header) &&
1163             (prev_entry->next->start < end))
1164                 return (KERN_NO_SPACE);
1165
1166         protoeflags = 0;
1167         charge_prev_obj = FALSE;
1168
1169         if (cow & MAP_COPY_ON_WRITE)
1170                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1171
1172         if (cow & MAP_NOFAULT) {
1173                 protoeflags |= MAP_ENTRY_NOFAULT;
1174
1175                 KASSERT(object == NULL,
1176                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1177         }
1178         if (cow & MAP_DISABLE_SYNCER)
1179                 protoeflags |= MAP_ENTRY_NOSYNC;
1180         if (cow & MAP_DISABLE_COREDUMP)
1181                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1182         if (cow & MAP_VN_WRITECOUNT)
1183                 protoeflags |= MAP_ENTRY_VN_WRITECNT;
1184         if (cow & MAP_INHERIT_SHARE)
1185                 inheritance = VM_INHERIT_SHARE;
1186         else
1187                 inheritance = VM_INHERIT_DEFAULT;
1188
1189         cred = NULL;
1190         KASSERT((object != kmem_object && object != kernel_object) ||
1191             ((object == kmem_object || object == kernel_object) &&
1192                 !(protoeflags & MAP_ENTRY_NEEDS_COPY)),
1193             ("kmem or kernel object and cow"));
1194         if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1195                 goto charged;
1196         if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1197             ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1198                 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1199                         return (KERN_RESOURCE_SHORTAGE);
1200                 KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
1201                     object->cred == NULL,
1202                     ("OVERCOMMIT: vm_map_insert o %p", object));
1203                 cred = curthread->td_ucred;
1204                 crhold(cred);
1205                 if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
1206                         charge_prev_obj = TRUE;
1207         }
1208
1209 charged:
1210         /* Expand the kernel pmap, if necessary. */
1211         if (map == kernel_map && end > kernel_vm_end)
1212                 pmap_growkernel(end);
1213         if (object != NULL) {
1214                 /*
1215                  * OBJ_ONEMAPPING must be cleared unless this mapping
1216                  * is trivially proven to be the only mapping for any
1217                  * of the object's pages.  (Object granularity
1218                  * reference counting is insufficient to recognize
1219                  * aliases with precision.)
1220                  */
1221                 VM_OBJECT_WLOCK(object);
1222                 if (object->ref_count > 1 || object->shadow_count != 0)
1223                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
1224                 VM_OBJECT_WUNLOCK(object);
1225         }
1226         else if ((prev_entry != &map->header) &&
1227                  (prev_entry->eflags == protoeflags) &&
1228                  (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 &&
1229                  (prev_entry->end == start) &&
1230                  (prev_entry->wired_count == 0) &&
1231                  (prev_entry->cred == cred ||
1232                   (prev_entry->object.vm_object != NULL &&
1233                    (prev_entry->object.vm_object->cred == cred))) &&
1234                    vm_object_coalesce(prev_entry->object.vm_object,
1235                        prev_entry->offset,
1236                        (vm_size_t)(prev_entry->end - prev_entry->start),
1237                        (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
1238                 /*
1239                  * We were able to extend the object.  Determine if we
1240                  * can extend the previous map entry to include the
1241                  * new range as well.
1242                  */
1243                 if ((prev_entry->inheritance == inheritance) &&
1244                     (prev_entry->protection == prot) &&
1245                     (prev_entry->max_protection == max)) {
1246                         map->size += (end - prev_entry->end);
1247                         prev_entry->end = end;
1248                         vm_map_entry_resize_free(map, prev_entry);
1249                         vm_map_simplify_entry(map, prev_entry);
1250                         if (cred != NULL)
1251                                 crfree(cred);
1252                         return (KERN_SUCCESS);
1253                 }
1254
1255                 /*
1256                  * If we can extend the object but cannot extend the
1257                  * map entry, we have to create a new map entry.  We
1258                  * must bump the ref count on the extended object to
1259                  * account for it.  object may be NULL.
1260                  */
1261                 object = prev_entry->object.vm_object;
1262                 offset = prev_entry->offset +
1263                         (prev_entry->end - prev_entry->start);
1264                 vm_object_reference(object);
1265                 if (cred != NULL && object != NULL && object->cred != NULL &&
1266                     !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1267                         /* Object already accounts for this uid. */
1268                         crfree(cred);
1269                         cred = NULL;
1270                 }
1271         }
1272
1273         /*
1274          * NOTE: if conditionals fail, object can be NULL here.  This occurs
1275          * in things like the buffer map where we manage kva but do not manage
1276          * backing objects.
1277          */
1278
1279         /*
1280          * Create a new entry
1281          */
1282         new_entry = vm_map_entry_create(map);
1283         new_entry->start = start;
1284         new_entry->end = end;
1285         new_entry->cred = NULL;
1286
1287         new_entry->eflags = protoeflags;
1288         new_entry->object.vm_object = object;
1289         new_entry->offset = offset;
1290         new_entry->avail_ssize = 0;
1291
1292         new_entry->inheritance = inheritance;
1293         new_entry->protection = prot;
1294         new_entry->max_protection = max;
1295         new_entry->wired_count = 0;
1296         new_entry->wiring_thread = NULL;
1297         new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1298         new_entry->next_read = OFF_TO_IDX(offset);
1299
1300         KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1301             ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
1302         new_entry->cred = cred;
1303
1304         /*
1305          * Insert the new entry into the list
1306          */
1307         vm_map_entry_link(map, prev_entry, new_entry);
1308         map->size += new_entry->end - new_entry->start;
1309
1310         /*
1311          * It may be possible to merge the new entry with the next and/or
1312          * previous entries.  However, due to MAP_STACK_* being a hack, a
1313          * panic can result from merging such entries.
1314          */
1315         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0)
1316                 vm_map_simplify_entry(map, new_entry);
1317
1318         if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1319                 vm_map_pmap_enter(map, start, prot,
1320                                     object, OFF_TO_IDX(offset), end - start,
1321                                     cow & MAP_PREFAULT_PARTIAL);
1322         }
1323
1324         return (KERN_SUCCESS);
1325 }
1326
1327 /*
1328  *      vm_map_findspace:
1329  *
1330  *      Find the first fit (lowest VM address) for "length" free bytes
1331  *      beginning at address >= start in the given map.
1332  *
1333  *      In a vm_map_entry, "adj_free" is the amount of free space
1334  *      adjacent (higher address) to this entry, and "max_free" is the
1335  *      maximum amount of contiguous free space in its subtree.  This
1336  *      allows finding a free region in one path down the tree, so
1337  *      O(log n) amortized with splay trees.
1338  *
1339  *      The map must be locked, and leaves it so.
1340  *
1341  *      Returns: 0 on success, and starting address in *addr,
1342  *               1 if insufficient space.
1343  */
1344 int
1345 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1346     vm_offset_t *addr)  /* OUT */
1347 {
1348         vm_map_entry_t entry;
1349         vm_offset_t st;
1350
1351         /*
1352          * Request must fit within min/max VM address and must avoid
1353          * address wrap.
1354          */
1355         if (start < map->min_offset)
1356                 start = map->min_offset;
1357         if (start + length > map->max_offset || start + length < start)
1358                 return (1);
1359
1360         /* Empty tree means wide open address space. */
1361         if (map->root == NULL) {
1362                 *addr = start;
1363                 return (0);
1364         }
1365
1366         /*
1367          * After splay, if start comes before root node, then there
1368          * must be a gap from start to the root.
1369          */
1370         map->root = vm_map_entry_splay(start, map->root);
1371         if (start + length <= map->root->start) {
1372                 *addr = start;
1373                 return (0);
1374         }
1375
1376         /*
1377          * Root is the last node that might begin its gap before
1378          * start, and this is the last comparison where address
1379          * wrap might be a problem.
1380          */
1381         st = (start > map->root->end) ? start : map->root->end;
1382         if (length <= map->root->end + map->root->adj_free - st) {
1383                 *addr = st;
1384                 return (0);
1385         }
1386
1387         /* With max_free, can immediately tell if no solution. */
1388         entry = map->root->right;
1389         if (entry == NULL || length > entry->max_free)
1390                 return (1);
1391
1392         /*
1393          * Search the right subtree in the order: left subtree, root,
1394          * right subtree (first fit).  The previous splay implies that
1395          * all regions in the right subtree have addresses > start.
1396          */
1397         while (entry != NULL) {
1398                 if (entry->left != NULL && entry->left->max_free >= length)
1399                         entry = entry->left;
1400                 else if (entry->adj_free >= length) {
1401                         *addr = entry->end;
1402                         return (0);
1403                 } else
1404                         entry = entry->right;
1405         }
1406
1407         /* Can't get here, so panic if we do. */
1408         panic("vm_map_findspace: max_free corrupt");
1409 }
1410
1411 int
1412 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1413     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1414     vm_prot_t max, int cow)
1415 {
1416         vm_offset_t end;
1417         int result;
1418
1419         end = start + length;
1420         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1421             object == NULL,
1422             ("vm_map_fixed: non-NULL backing object for stack"));
1423         vm_map_lock(map);
1424         VM_MAP_RANGE_CHECK(map, start, end);
1425         if ((cow & MAP_CHECK_EXCL) == 0)
1426                 vm_map_delete(map, start, end);
1427         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1428                 result = vm_map_stack_locked(map, start, length, sgrowsiz,
1429                     prot, max, cow);
1430         } else {
1431                 result = vm_map_insert(map, object, offset, start, end,
1432                     prot, max, cow);
1433         }
1434         vm_map_unlock(map);
1435         return (result);
1436 }
1437
1438 /*
1439  *      vm_map_find finds an unallocated region in the target address
1440  *      map with the given length.  The search is defined to be
1441  *      first-fit from the specified address; the region found is
1442  *      returned in the same parameter.
1443  *
1444  *      If object is non-NULL, ref count must be bumped by caller
1445  *      prior to making call to account for the new entry.
1446  */
1447 int
1448 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1449             vm_offset_t *addr,  /* IN/OUT */
1450             vm_size_t length, vm_offset_t max_addr, int find_space,
1451             vm_prot_t prot, vm_prot_t max, int cow)
1452 {
1453         vm_offset_t alignment, initial_addr, start;
1454         int result;
1455
1456         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1457             object == NULL,
1458             ("vm_map_find: non-NULL backing object for stack"));
1459         if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1460             (object->flags & OBJ_COLORED) == 0))
1461                 find_space = VMFS_ANY_SPACE;
1462         if (find_space >> 8 != 0) {
1463                 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1464                 alignment = (vm_offset_t)1 << (find_space >> 8);
1465         } else
1466                 alignment = 0;
1467         initial_addr = *addr;
1468 again:
1469         start = initial_addr;
1470         vm_map_lock(map);
1471         do {
1472                 if (find_space != VMFS_NO_SPACE) {
1473                         if (vm_map_findspace(map, start, length, addr) ||
1474                             (max_addr != 0 && *addr + length > max_addr)) {
1475                                 vm_map_unlock(map);
1476                                 if (find_space == VMFS_OPTIMAL_SPACE) {
1477                                         find_space = VMFS_ANY_SPACE;
1478                                         goto again;
1479                                 }
1480                                 return (KERN_NO_SPACE);
1481                         }
1482                         switch (find_space) {
1483                         case VMFS_SUPER_SPACE:
1484                         case VMFS_OPTIMAL_SPACE:
1485                                 pmap_align_superpage(object, offset, addr,
1486                                     length);
1487                                 break;
1488                         case VMFS_ANY_SPACE:
1489                                 break;
1490                         default:
1491                                 if ((*addr & (alignment - 1)) != 0) {
1492                                         *addr &= ~(alignment - 1);
1493                                         *addr += alignment;
1494                                 }
1495                                 break;
1496                         }
1497
1498                         start = *addr;
1499                 }
1500                 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1501                         result = vm_map_stack_locked(map, start, length,
1502                             sgrowsiz, prot, max, cow);
1503                 } else {
1504                         result = vm_map_insert(map, object, offset, start,
1505                             start + length, prot, max, cow);
1506                 }
1507         } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE &&
1508             find_space != VMFS_ANY_SPACE);
1509         vm_map_unlock(map);
1510         return (result);
1511 }
1512
1513 /*
1514  *      vm_map_simplify_entry:
1515  *
1516  *      Simplify the given map entry by merging with either neighbor.  This
1517  *      routine also has the ability to merge with both neighbors.
1518  *
1519  *      The map must be locked.
1520  *
1521  *      This routine guarentees that the passed entry remains valid (though
1522  *      possibly extended).  When merging, this routine may delete one or
1523  *      both neighbors.
1524  */
1525 void
1526 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1527 {
1528         vm_map_entry_t next, prev;
1529         vm_size_t prevsize, esize;
1530
1531         if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1532                 return;
1533
1534         prev = entry->prev;
1535         if (prev != &map->header) {
1536                 prevsize = prev->end - prev->start;
1537                 if ( (prev->end == entry->start) &&
1538                      (prev->object.vm_object == entry->object.vm_object) &&
1539                      (!prev->object.vm_object ||
1540                         (prev->offset + prevsize == entry->offset)) &&
1541                      (prev->eflags == entry->eflags) &&
1542                      (prev->protection == entry->protection) &&
1543                      (prev->max_protection == entry->max_protection) &&
1544                      (prev->inheritance == entry->inheritance) &&
1545                      (prev->wired_count == entry->wired_count) &&
1546                      (prev->cred == entry->cred)) {
1547                         vm_map_entry_unlink(map, prev);
1548                         entry->start = prev->start;
1549                         entry->offset = prev->offset;
1550                         if (entry->prev != &map->header)
1551                                 vm_map_entry_resize_free(map, entry->prev);
1552
1553                         /*
1554                          * If the backing object is a vnode object,
1555                          * vm_object_deallocate() calls vrele().
1556                          * However, vrele() does not lock the vnode
1557                          * because the vnode has additional
1558                          * references.  Thus, the map lock can be kept
1559                          * without causing a lock-order reversal with
1560                          * the vnode lock.
1561                          *
1562                          * Since we count the number of virtual page
1563                          * mappings in object->un_pager.vnp.writemappings,
1564                          * the writemappings value should not be adjusted
1565                          * when the entry is disposed of.
1566                          */
1567                         if (prev->object.vm_object)
1568                                 vm_object_deallocate(prev->object.vm_object);
1569                         if (prev->cred != NULL)
1570                                 crfree(prev->cred);
1571                         vm_map_entry_dispose(map, prev);
1572                 }
1573         }
1574
1575         next = entry->next;
1576         if (next != &map->header) {
1577                 esize = entry->end - entry->start;
1578                 if ((entry->end == next->start) &&
1579                     (next->object.vm_object == entry->object.vm_object) &&
1580                      (!entry->object.vm_object ||
1581                         (entry->offset + esize == next->offset)) &&
1582                     (next->eflags == entry->eflags) &&
1583                     (next->protection == entry->protection) &&
1584                     (next->max_protection == entry->max_protection) &&
1585                     (next->inheritance == entry->inheritance) &&
1586                     (next->wired_count == entry->wired_count) &&
1587                     (next->cred == entry->cred)) {
1588                         vm_map_entry_unlink(map, next);
1589                         entry->end = next->end;
1590                         vm_map_entry_resize_free(map, entry);
1591
1592                         /*
1593                          * See comment above.
1594                          */
1595                         if (next->object.vm_object)
1596                                 vm_object_deallocate(next->object.vm_object);
1597                         if (next->cred != NULL)
1598                                 crfree(next->cred);
1599                         vm_map_entry_dispose(map, next);
1600                 }
1601         }
1602 }
1603 /*
1604  *      vm_map_clip_start:      [ internal use only ]
1605  *
1606  *      Asserts that the given entry begins at or after
1607  *      the specified address; if necessary,
1608  *      it splits the entry into two.
1609  */
1610 #define vm_map_clip_start(map, entry, startaddr) \
1611 { \
1612         if (startaddr > entry->start) \
1613                 _vm_map_clip_start(map, entry, startaddr); \
1614 }
1615
1616 /*
1617  *      This routine is called only when it is known that
1618  *      the entry must be split.
1619  */
1620 static void
1621 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1622 {
1623         vm_map_entry_t new_entry;
1624
1625         VM_MAP_ASSERT_LOCKED(map);
1626
1627         /*
1628          * Split off the front portion -- note that we must insert the new
1629          * entry BEFORE this one, so that this entry has the specified
1630          * starting address.
1631          */
1632         vm_map_simplify_entry(map, entry);
1633
1634         /*
1635          * If there is no object backing this entry, we might as well create
1636          * one now.  If we defer it, an object can get created after the map
1637          * is clipped, and individual objects will be created for the split-up
1638          * map.  This is a bit of a hack, but is also about the best place to
1639          * put this improvement.
1640          */
1641         if (entry->object.vm_object == NULL && !map->system_map) {
1642                 vm_object_t object;
1643                 object = vm_object_allocate(OBJT_DEFAULT,
1644                                 atop(entry->end - entry->start));
1645                 entry->object.vm_object = object;
1646                 entry->offset = 0;
1647                 if (entry->cred != NULL) {
1648                         object->cred = entry->cred;
1649                         object->charge = entry->end - entry->start;
1650                         entry->cred = NULL;
1651                 }
1652         } else if (entry->object.vm_object != NULL &&
1653                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1654                    entry->cred != NULL) {
1655                 VM_OBJECT_WLOCK(entry->object.vm_object);
1656                 KASSERT(entry->object.vm_object->cred == NULL,
1657                     ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1658                 entry->object.vm_object->cred = entry->cred;
1659                 entry->object.vm_object->charge = entry->end - entry->start;
1660                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
1661                 entry->cred = NULL;
1662         }
1663
1664         new_entry = vm_map_entry_create(map);
1665         *new_entry = *entry;
1666
1667         new_entry->end = start;
1668         entry->offset += (start - entry->start);
1669         entry->start = start;
1670         if (new_entry->cred != NULL)
1671                 crhold(entry->cred);
1672
1673         vm_map_entry_link(map, entry->prev, new_entry);
1674
1675         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1676                 vm_object_reference(new_entry->object.vm_object);
1677                 /*
1678                  * The object->un_pager.vnp.writemappings for the
1679                  * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1680                  * kept as is here.  The virtual pages are
1681                  * re-distributed among the clipped entries, so the sum is
1682                  * left the same.
1683                  */
1684         }
1685 }
1686
1687 /*
1688  *      vm_map_clip_end:        [ internal use only ]
1689  *
1690  *      Asserts that the given entry ends at or before
1691  *      the specified address; if necessary,
1692  *      it splits the entry into two.
1693  */
1694 #define vm_map_clip_end(map, entry, endaddr) \
1695 { \
1696         if ((endaddr) < (entry->end)) \
1697                 _vm_map_clip_end((map), (entry), (endaddr)); \
1698 }
1699
1700 /*
1701  *      This routine is called only when it is known that
1702  *      the entry must be split.
1703  */
1704 static void
1705 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1706 {
1707         vm_map_entry_t new_entry;
1708
1709         VM_MAP_ASSERT_LOCKED(map);
1710
1711         /*
1712          * If there is no object backing this entry, we might as well create
1713          * one now.  If we defer it, an object can get created after the map
1714          * is clipped, and individual objects will be created for the split-up
1715          * map.  This is a bit of a hack, but is also about the best place to
1716          * put this improvement.
1717          */
1718         if (entry->object.vm_object == NULL && !map->system_map) {
1719                 vm_object_t object;
1720                 object = vm_object_allocate(OBJT_DEFAULT,
1721                                 atop(entry->end - entry->start));
1722                 entry->object.vm_object = object;
1723                 entry->offset = 0;
1724                 if (entry->cred != NULL) {
1725                         object->cred = entry->cred;
1726                         object->charge = entry->end - entry->start;
1727                         entry->cred = NULL;
1728                 }
1729         } else if (entry->object.vm_object != NULL &&
1730                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1731                    entry->cred != NULL) {
1732                 VM_OBJECT_WLOCK(entry->object.vm_object);
1733                 KASSERT(entry->object.vm_object->cred == NULL,
1734                     ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1735                 entry->object.vm_object->cred = entry->cred;
1736                 entry->object.vm_object->charge = entry->end - entry->start;
1737                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
1738                 entry->cred = NULL;
1739         }
1740
1741         /*
1742          * Create a new entry and insert it AFTER the specified entry
1743          */
1744         new_entry = vm_map_entry_create(map);
1745         *new_entry = *entry;
1746
1747         new_entry->start = entry->end = end;
1748         new_entry->offset += (end - entry->start);
1749         if (new_entry->cred != NULL)
1750                 crhold(entry->cred);
1751
1752         vm_map_entry_link(map, entry, new_entry);
1753
1754         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1755                 vm_object_reference(new_entry->object.vm_object);
1756         }
1757 }
1758
1759 /*
1760  *      vm_map_submap:          [ kernel use only ]
1761  *
1762  *      Mark the given range as handled by a subordinate map.
1763  *
1764  *      This range must have been created with vm_map_find,
1765  *      and no other operations may have been performed on this
1766  *      range prior to calling vm_map_submap.
1767  *
1768  *      Only a limited number of operations can be performed
1769  *      within this rage after calling vm_map_submap:
1770  *              vm_fault
1771  *      [Don't try vm_map_copy!]
1772  *
1773  *      To remove a submapping, one must first remove the
1774  *      range from the superior map, and then destroy the
1775  *      submap (if desired).  [Better yet, don't try it.]
1776  */
1777 int
1778 vm_map_submap(
1779         vm_map_t map,
1780         vm_offset_t start,
1781         vm_offset_t end,
1782         vm_map_t submap)
1783 {
1784         vm_map_entry_t entry;
1785         int result = KERN_INVALID_ARGUMENT;
1786
1787         vm_map_lock(map);
1788
1789         VM_MAP_RANGE_CHECK(map, start, end);
1790
1791         if (vm_map_lookup_entry(map, start, &entry)) {
1792                 vm_map_clip_start(map, entry, start);
1793         } else
1794                 entry = entry->next;
1795
1796         vm_map_clip_end(map, entry, end);
1797
1798         if ((entry->start == start) && (entry->end == end) &&
1799             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1800             (entry->object.vm_object == NULL)) {
1801                 entry->object.sub_map = submap;
1802                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1803                 result = KERN_SUCCESS;
1804         }
1805         vm_map_unlock(map);
1806
1807         return (result);
1808 }
1809
1810 /*
1811  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
1812  */
1813 #define MAX_INIT_PT     96
1814
1815 /*
1816  *      vm_map_pmap_enter:
1817  *
1818  *      Preload the specified map's pmap with mappings to the specified
1819  *      object's memory-resident pages.  No further physical pages are
1820  *      allocated, and no further virtual pages are retrieved from secondary
1821  *      storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
1822  *      limited number of page mappings are created at the low-end of the
1823  *      specified address range.  (For this purpose, a superpage mapping
1824  *      counts as one page mapping.)  Otherwise, all resident pages within
1825  *      the specified address range are mapped.  Because these mappings are
1826  *      being created speculatively, cached pages are not reactivated and
1827  *      mapped.
1828  */
1829 void
1830 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1831     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1832 {
1833         vm_offset_t start;
1834         vm_page_t p, p_start;
1835         vm_pindex_t mask, psize, threshold, tmpidx;
1836
1837         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1838                 return;
1839         VM_OBJECT_RLOCK(object);
1840         if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1841                 VM_OBJECT_RUNLOCK(object);
1842                 VM_OBJECT_WLOCK(object);
1843                 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1844                         pmap_object_init_pt(map->pmap, addr, object, pindex,
1845                             size);
1846                         VM_OBJECT_WUNLOCK(object);
1847                         return;
1848                 }
1849                 VM_OBJECT_LOCK_DOWNGRADE(object);
1850         }
1851
1852         psize = atop(size);
1853         if (psize + pindex > object->size) {
1854                 if (object->size < pindex) {
1855                         VM_OBJECT_RUNLOCK(object);
1856                         return;
1857                 }
1858                 psize = object->size - pindex;
1859         }
1860
1861         start = 0;
1862         p_start = NULL;
1863         threshold = MAX_INIT_PT;
1864
1865         p = vm_page_find_least(object, pindex);
1866         /*
1867          * Assert: the variable p is either (1) the page with the
1868          * least pindex greater than or equal to the parameter pindex
1869          * or (2) NULL.
1870          */
1871         for (;
1872              p != NULL && (tmpidx = p->pindex - pindex) < psize;
1873              p = TAILQ_NEXT(p, listq)) {
1874                 /*
1875                  * don't allow an madvise to blow away our really
1876                  * free pages allocating pv entries.
1877                  */
1878                 if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
1879                     cnt.v_free_count < cnt.v_free_reserved) ||
1880                     ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
1881                     tmpidx >= threshold)) {
1882                         psize = tmpidx;
1883                         break;
1884                 }
1885                 if (p->valid == VM_PAGE_BITS_ALL) {
1886                         if (p_start == NULL) {
1887                                 start = addr + ptoa(tmpidx);
1888                                 p_start = p;
1889                         }
1890                         /* Jump ahead if a superpage mapping is possible. */
1891                         if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
1892                             (pagesizes[p->psind] - 1)) == 0) {
1893                                 mask = atop(pagesizes[p->psind]) - 1;
1894                                 if (tmpidx + mask < psize &&
1895                                     vm_page_ps_is_valid(p)) {
1896                                         p += mask;
1897                                         threshold += mask;
1898                                 }
1899                         }
1900                 } else if (p_start != NULL) {
1901                         pmap_enter_object(map->pmap, start, addr +
1902                             ptoa(tmpidx), p_start, prot);
1903                         p_start = NULL;
1904                 }
1905         }
1906         if (p_start != NULL)
1907                 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1908                     p_start, prot);
1909         VM_OBJECT_RUNLOCK(object);
1910 }
1911
1912 /*
1913  *      vm_map_protect:
1914  *
1915  *      Sets the protection of the specified address
1916  *      region in the target map.  If "set_max" is
1917  *      specified, the maximum protection is to be set;
1918  *      otherwise, only the current protection is affected.
1919  */
1920 int
1921 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1922                vm_prot_t new_prot, boolean_t set_max)
1923 {
1924         vm_map_entry_t current, entry;
1925         vm_object_t obj;
1926         struct ucred *cred;
1927         vm_prot_t old_prot;
1928
1929         if (start == end)
1930                 return (KERN_SUCCESS);
1931
1932         vm_map_lock(map);
1933
1934         VM_MAP_RANGE_CHECK(map, start, end);
1935
1936         if (vm_map_lookup_entry(map, start, &entry)) {
1937                 vm_map_clip_start(map, entry, start);
1938         } else {
1939                 entry = entry->next;
1940         }
1941
1942         /*
1943          * Make a first pass to check for protection violations.
1944          */
1945         current = entry;
1946         while ((current != &map->header) && (current->start < end)) {
1947                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1948                         vm_map_unlock(map);
1949                         return (KERN_INVALID_ARGUMENT);
1950                 }
1951                 if ((new_prot & current->max_protection) != new_prot) {
1952                         vm_map_unlock(map);
1953                         return (KERN_PROTECTION_FAILURE);
1954                 }
1955                 current = current->next;
1956         }
1957
1958
1959         /*
1960          * Do an accounting pass for private read-only mappings that
1961          * now will do cow due to allowed write (e.g. debugger sets
1962          * breakpoint on text segment)
1963          */
1964         for (current = entry; (current != &map->header) &&
1965              (current->start < end); current = current->next) {
1966
1967                 vm_map_clip_end(map, current, end);
1968
1969                 if (set_max ||
1970                     ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
1971                     ENTRY_CHARGED(current)) {
1972                         continue;
1973                 }
1974
1975                 cred = curthread->td_ucred;
1976                 obj = current->object.vm_object;
1977
1978                 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
1979                         if (!swap_reserve(current->end - current->start)) {
1980                                 vm_map_unlock(map);
1981                                 return (KERN_RESOURCE_SHORTAGE);
1982                         }
1983                         crhold(cred);
1984                         current->cred = cred;
1985                         continue;
1986                 }
1987
1988                 VM_OBJECT_WLOCK(obj);
1989                 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
1990                         VM_OBJECT_WUNLOCK(obj);
1991                         continue;
1992                 }
1993
1994                 /*
1995                  * Charge for the whole object allocation now, since
1996                  * we cannot distinguish between non-charged and
1997                  * charged clipped mapping of the same object later.
1998                  */
1999                 KASSERT(obj->charge == 0,
2000                     ("vm_map_protect: object %p overcharged (entry %p)",
2001                     obj, current));
2002                 if (!swap_reserve(ptoa(obj->size))) {
2003                         VM_OBJECT_WUNLOCK(obj);
2004                         vm_map_unlock(map);
2005                         return (KERN_RESOURCE_SHORTAGE);
2006                 }
2007
2008                 crhold(cred);
2009                 obj->cred = cred;
2010                 obj->charge = ptoa(obj->size);
2011                 VM_OBJECT_WUNLOCK(obj);
2012         }
2013
2014         /*
2015          * Go back and fix up protections. [Note that clipping is not
2016          * necessary the second time.]
2017          */
2018         current = entry;
2019         while ((current != &map->header) && (current->start < end)) {
2020                 old_prot = current->protection;
2021
2022                 if (set_max)
2023                         current->protection =
2024                             (current->max_protection = new_prot) &
2025                             old_prot;
2026                 else
2027                         current->protection = new_prot;
2028
2029                 /*
2030                  * For user wired map entries, the normal lazy evaluation of
2031                  * write access upgrades through soft page faults is
2032                  * undesirable.  Instead, immediately copy any pages that are
2033                  * copy-on-write and enable write access in the physical map.
2034                  */
2035                 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2036                     (current->protection & VM_PROT_WRITE) != 0 &&
2037                     (old_prot & VM_PROT_WRITE) == 0)
2038                         vm_fault_copy_entry(map, map, current, current, NULL);
2039
2040                 /*
2041                  * When restricting access, update the physical map.  Worry
2042                  * about copy-on-write here.
2043                  */
2044                 if ((old_prot & ~current->protection) != 0) {
2045 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2046                                                         VM_PROT_ALL)
2047                         pmap_protect(map->pmap, current->start,
2048                             current->end,
2049                             current->protection & MASK(current));
2050 #undef  MASK
2051                 }
2052                 vm_map_simplify_entry(map, current);
2053                 current = current->next;
2054         }
2055         vm_map_unlock(map);
2056         return (KERN_SUCCESS);
2057 }
2058
2059 /*
2060  *      vm_map_madvise:
2061  *
2062  *      This routine traverses a processes map handling the madvise
2063  *      system call.  Advisories are classified as either those effecting
2064  *      the vm_map_entry structure, or those effecting the underlying
2065  *      objects.
2066  */
2067 int
2068 vm_map_madvise(
2069         vm_map_t map,
2070         vm_offset_t start,
2071         vm_offset_t end,
2072         int behav)
2073 {
2074         vm_map_entry_t current, entry;
2075         int modify_map = 0;
2076
2077         /*
2078          * Some madvise calls directly modify the vm_map_entry, in which case
2079          * we need to use an exclusive lock on the map and we need to perform
2080          * various clipping operations.  Otherwise we only need a read-lock
2081          * on the map.
2082          */
2083         switch(behav) {
2084         case MADV_NORMAL:
2085         case MADV_SEQUENTIAL:
2086         case MADV_RANDOM:
2087         case MADV_NOSYNC:
2088         case MADV_AUTOSYNC:
2089         case MADV_NOCORE:
2090         case MADV_CORE:
2091                 if (start == end)
2092                         return (KERN_SUCCESS);
2093                 modify_map = 1;
2094                 vm_map_lock(map);
2095                 break;
2096         case MADV_WILLNEED:
2097         case MADV_DONTNEED:
2098         case MADV_FREE:
2099                 if (start == end)
2100                         return (KERN_SUCCESS);
2101                 vm_map_lock_read(map);
2102                 break;
2103         default:
2104                 return (KERN_INVALID_ARGUMENT);
2105         }
2106
2107         /*
2108          * Locate starting entry and clip if necessary.
2109          */
2110         VM_MAP_RANGE_CHECK(map, start, end);
2111
2112         if (vm_map_lookup_entry(map, start, &entry)) {
2113                 if (modify_map)
2114                         vm_map_clip_start(map, entry, start);
2115         } else {
2116                 entry = entry->next;
2117         }
2118
2119         if (modify_map) {
2120                 /*
2121                  * madvise behaviors that are implemented in the vm_map_entry.
2122                  *
2123                  * We clip the vm_map_entry so that behavioral changes are
2124                  * limited to the specified address range.
2125                  */
2126                 for (current = entry;
2127                      (current != &map->header) && (current->start < end);
2128                      current = current->next
2129                 ) {
2130                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2131                                 continue;
2132
2133                         vm_map_clip_end(map, current, end);
2134
2135                         switch (behav) {
2136                         case MADV_NORMAL:
2137                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2138                                 break;
2139                         case MADV_SEQUENTIAL:
2140                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2141                                 break;
2142                         case MADV_RANDOM:
2143                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2144                                 break;
2145                         case MADV_NOSYNC:
2146                                 current->eflags |= MAP_ENTRY_NOSYNC;
2147                                 break;
2148                         case MADV_AUTOSYNC:
2149                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
2150                                 break;
2151                         case MADV_NOCORE:
2152                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
2153                                 break;
2154                         case MADV_CORE:
2155                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2156                                 break;
2157                         default:
2158                                 break;
2159                         }
2160                         vm_map_simplify_entry(map, current);
2161                 }
2162                 vm_map_unlock(map);
2163         } else {
2164                 vm_pindex_t pstart, pend;
2165
2166                 /*
2167                  * madvise behaviors that are implemented in the underlying
2168                  * vm_object.
2169                  *
2170                  * Since we don't clip the vm_map_entry, we have to clip
2171                  * the vm_object pindex and count.
2172                  */
2173                 for (current = entry;
2174                      (current != &map->header) && (current->start < end);
2175                      current = current->next
2176                 ) {
2177                         vm_offset_t useEnd, useStart;
2178
2179                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2180                                 continue;
2181
2182                         pstart = OFF_TO_IDX(current->offset);
2183                         pend = pstart + atop(current->end - current->start);
2184                         useStart = current->start;
2185                         useEnd = current->end;
2186
2187                         if (current->start < start) {
2188                                 pstart += atop(start - current->start);
2189                                 useStart = start;
2190                         }
2191                         if (current->end > end) {
2192                                 pend -= atop(current->end - end);
2193                                 useEnd = end;
2194                         }
2195
2196                         if (pstart >= pend)
2197                                 continue;
2198
2199                         /*
2200                          * Perform the pmap_advise() before clearing
2201                          * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2202                          * concurrent pmap operation, such as pmap_remove(),
2203                          * could clear a reference in the pmap and set
2204                          * PGA_REFERENCED on the page before the pmap_advise()
2205                          * had completed.  Consequently, the page would appear
2206                          * referenced based upon an old reference that
2207                          * occurred before this pmap_advise() ran.
2208                          */
2209                         if (behav == MADV_DONTNEED || behav == MADV_FREE)
2210                                 pmap_advise(map->pmap, useStart, useEnd,
2211                                     behav);
2212
2213                         vm_object_madvise(current->object.vm_object, pstart,
2214                             pend, behav);
2215
2216                         /*
2217                          * Pre-populate paging structures in the
2218                          * WILLNEED case.  For wired entries, the
2219                          * paging structures are already populated.
2220                          */
2221                         if (behav == MADV_WILLNEED &&
2222                             current->wired_count == 0) {
2223                                 vm_map_pmap_enter(map,
2224                                     useStart,
2225                                     current->protection,
2226                                     current->object.vm_object,
2227                                     pstart,
2228                                     ptoa(pend - pstart),
2229                                     MAP_PREFAULT_MADVISE
2230                                 );
2231                         }
2232                 }
2233                 vm_map_unlock_read(map);
2234         }
2235         return (0);
2236 }
2237
2238
2239 /*
2240  *      vm_map_inherit:
2241  *
2242  *      Sets the inheritance of the specified address
2243  *      range in the target map.  Inheritance
2244  *      affects how the map will be shared with
2245  *      child maps at the time of vmspace_fork.
2246  */
2247 int
2248 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2249                vm_inherit_t new_inheritance)
2250 {
2251         vm_map_entry_t entry;
2252         vm_map_entry_t temp_entry;
2253
2254         switch (new_inheritance) {
2255         case VM_INHERIT_NONE:
2256         case VM_INHERIT_COPY:
2257         case VM_INHERIT_SHARE:
2258                 break;
2259         default:
2260                 return (KERN_INVALID_ARGUMENT);
2261         }
2262         if (start == end)
2263                 return (KERN_SUCCESS);
2264         vm_map_lock(map);
2265         VM_MAP_RANGE_CHECK(map, start, end);
2266         if (vm_map_lookup_entry(map, start, &temp_entry)) {
2267                 entry = temp_entry;
2268                 vm_map_clip_start(map, entry, start);
2269         } else
2270                 entry = temp_entry->next;
2271         while ((entry != &map->header) && (entry->start < end)) {
2272                 vm_map_clip_end(map, entry, end);
2273                 entry->inheritance = new_inheritance;
2274                 vm_map_simplify_entry(map, entry);
2275                 entry = entry->next;
2276         }
2277         vm_map_unlock(map);
2278         return (KERN_SUCCESS);
2279 }
2280
2281 /*
2282  *      vm_map_unwire:
2283  *
2284  *      Implements both kernel and user unwiring.
2285  */
2286 int
2287 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2288     int flags)
2289 {
2290         vm_map_entry_t entry, first_entry, tmp_entry;
2291         vm_offset_t saved_start;
2292         unsigned int last_timestamp;
2293         int rv;
2294         boolean_t need_wakeup, result, user_unwire;
2295
2296         if (start == end)
2297                 return (KERN_SUCCESS);
2298         user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2299         vm_map_lock(map);
2300         VM_MAP_RANGE_CHECK(map, start, end);
2301         if (!vm_map_lookup_entry(map, start, &first_entry)) {
2302                 if (flags & VM_MAP_WIRE_HOLESOK)
2303                         first_entry = first_entry->next;
2304                 else {
2305                         vm_map_unlock(map);
2306                         return (KERN_INVALID_ADDRESS);
2307                 }
2308         }
2309         last_timestamp = map->timestamp;
2310         entry = first_entry;
2311         while (entry != &map->header && entry->start < end) {
2312                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2313                         /*
2314                          * We have not yet clipped the entry.
2315                          */
2316                         saved_start = (start >= entry->start) ? start :
2317                             entry->start;
2318                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2319                         if (vm_map_unlock_and_wait(map, 0)) {
2320                                 /*
2321                                  * Allow interruption of user unwiring?
2322                                  */
2323                         }
2324                         vm_map_lock(map);
2325                         if (last_timestamp+1 != map->timestamp) {
2326                                 /*
2327                                  * Look again for the entry because the map was
2328                                  * modified while it was unlocked.
2329                                  * Specifically, the entry may have been
2330                                  * clipped, merged, or deleted.
2331                                  */
2332                                 if (!vm_map_lookup_entry(map, saved_start,
2333                                     &tmp_entry)) {
2334                                         if (flags & VM_MAP_WIRE_HOLESOK)
2335                                                 tmp_entry = tmp_entry->next;
2336                                         else {
2337                                                 if (saved_start == start) {
2338                                                         /*
2339                                                          * First_entry has been deleted.
2340                                                          */
2341                                                         vm_map_unlock(map);
2342                                                         return (KERN_INVALID_ADDRESS);
2343                                                 }
2344                                                 end = saved_start;
2345                                                 rv = KERN_INVALID_ADDRESS;
2346                                                 goto done;
2347                                         }
2348                                 }
2349                                 if (entry == first_entry)
2350                                         first_entry = tmp_entry;
2351                                 else
2352                                         first_entry = NULL;
2353                                 entry = tmp_entry;
2354                         }
2355                         last_timestamp = map->timestamp;
2356                         continue;
2357                 }
2358                 vm_map_clip_start(map, entry, start);
2359                 vm_map_clip_end(map, entry, end);
2360                 /*
2361                  * Mark the entry in case the map lock is released.  (See
2362                  * above.)
2363                  */
2364                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2365                     entry->wiring_thread == NULL,
2366                     ("owned map entry %p", entry));
2367                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2368                 entry->wiring_thread = curthread;
2369                 /*
2370                  * Check the map for holes in the specified region.
2371                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2372                  */
2373                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2374                     (entry->end < end && (entry->next == &map->header ||
2375                     entry->next->start > entry->end))) {
2376                         end = entry->end;
2377                         rv = KERN_INVALID_ADDRESS;
2378                         goto done;
2379                 }
2380                 /*
2381                  * If system unwiring, require that the entry is system wired.
2382                  */
2383                 if (!user_unwire &&
2384                     vm_map_entry_system_wired_count(entry) == 0) {
2385                         end = entry->end;
2386                         rv = KERN_INVALID_ARGUMENT;
2387                         goto done;
2388                 }
2389                 entry = entry->next;
2390         }
2391         rv = KERN_SUCCESS;
2392 done:
2393         need_wakeup = FALSE;
2394         if (first_entry == NULL) {
2395                 result = vm_map_lookup_entry(map, start, &first_entry);
2396                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2397                         first_entry = first_entry->next;
2398                 else
2399                         KASSERT(result, ("vm_map_unwire: lookup failed"));
2400         }
2401         for (entry = first_entry; entry != &map->header && entry->start < end;
2402             entry = entry->next) {
2403                 /*
2404                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
2405                  * space in the unwired region could have been mapped
2406                  * while the map lock was dropped for draining
2407                  * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2408                  * could be simultaneously wiring this new mapping
2409                  * entry.  Detect these cases and skip any entries
2410                  * marked as in transition by us.
2411                  */
2412                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2413                     entry->wiring_thread != curthread) {
2414                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2415                             ("vm_map_unwire: !HOLESOK and new/changed entry"));
2416                         continue;
2417                 }
2418
2419                 if (rv == KERN_SUCCESS && (!user_unwire ||
2420                     (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2421                         if (user_unwire)
2422                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2423                         if (entry->wired_count == 1)
2424                                 vm_map_entry_unwire(map, entry);
2425                         else
2426                                 entry->wired_count--;
2427                 }
2428                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2429                     ("vm_map_unwire: in-transition flag missing %p", entry));
2430                 KASSERT(entry->wiring_thread == curthread,
2431                     ("vm_map_unwire: alien wire %p", entry));
2432                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2433                 entry->wiring_thread = NULL;
2434                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2435                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2436                         need_wakeup = TRUE;
2437                 }
2438                 vm_map_simplify_entry(map, entry);
2439         }
2440         vm_map_unlock(map);
2441         if (need_wakeup)
2442                 vm_map_wakeup(map);
2443         return (rv);
2444 }
2445
2446 /*
2447  *      vm_map_wire_entry_failure:
2448  *
2449  *      Handle a wiring failure on the given entry.
2450  *
2451  *      The map should be locked.
2452  */
2453 static void
2454 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
2455     vm_offset_t failed_addr)
2456 {
2457
2458         VM_MAP_ASSERT_LOCKED(map);
2459         KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
2460             entry->wired_count == 1,
2461             ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
2462         KASSERT(failed_addr < entry->end,
2463             ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
2464
2465         /*
2466          * If any pages at the start of this entry were successfully wired,
2467          * then unwire them.
2468          */
2469         if (failed_addr > entry->start) {
2470                 pmap_unwire(map->pmap, entry->start, failed_addr);
2471                 vm_object_unwire(entry->object.vm_object, entry->offset,
2472                     failed_addr - entry->start, PQ_ACTIVE);
2473         }
2474
2475         /*
2476          * Assign an out-of-range value to represent the failure to wire this
2477          * entry.
2478          */
2479         entry->wired_count = -1;
2480 }
2481
2482 /*
2483  *      vm_map_wire:
2484  *
2485  *      Implements both kernel and user wiring.
2486  */
2487 int
2488 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2489     int flags)
2490 {
2491         vm_map_entry_t entry, first_entry, tmp_entry;
2492         vm_offset_t faddr, saved_end, saved_start;
2493         unsigned int last_timestamp;
2494         int rv;
2495         boolean_t need_wakeup, result, user_wire;
2496         vm_prot_t prot;
2497
2498         if (start == end)
2499                 return (KERN_SUCCESS);
2500         prot = 0;
2501         if (flags & VM_MAP_WIRE_WRITE)
2502                 prot |= VM_PROT_WRITE;
2503         user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2504         vm_map_lock(map);
2505         VM_MAP_RANGE_CHECK(map, start, end);
2506         if (!vm_map_lookup_entry(map, start, &first_entry)) {
2507                 if (flags & VM_MAP_WIRE_HOLESOK)
2508                         first_entry = first_entry->next;
2509                 else {
2510                         vm_map_unlock(map);
2511                         return (KERN_INVALID_ADDRESS);
2512                 }
2513         }
2514         last_timestamp = map->timestamp;
2515         entry = first_entry;
2516         while (entry != &map->header && entry->start < end) {
2517                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2518                         /*
2519                          * We have not yet clipped the entry.
2520                          */
2521                         saved_start = (start >= entry->start) ? start :
2522                             entry->start;
2523                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2524                         if (vm_map_unlock_and_wait(map, 0)) {
2525                                 /*
2526                                  * Allow interruption of user wiring?
2527                                  */
2528                         }
2529                         vm_map_lock(map);
2530                         if (last_timestamp + 1 != map->timestamp) {
2531                                 /*
2532                                  * Look again for the entry because the map was
2533                                  * modified while it was unlocked.
2534                                  * Specifically, the entry may have been
2535                                  * clipped, merged, or deleted.
2536                                  */
2537                                 if (!vm_map_lookup_entry(map, saved_start,
2538                                     &tmp_entry)) {
2539                                         if (flags & VM_MAP_WIRE_HOLESOK)
2540                                                 tmp_entry = tmp_entry->next;
2541                                         else {
2542                                                 if (saved_start == start) {
2543                                                         /*
2544                                                          * first_entry has been deleted.
2545                                                          */
2546                                                         vm_map_unlock(map);
2547                                                         return (KERN_INVALID_ADDRESS);
2548                                                 }
2549                                                 end = saved_start;
2550                                                 rv = KERN_INVALID_ADDRESS;
2551                                                 goto done;
2552                                         }
2553                                 }
2554                                 if (entry == first_entry)
2555                                         first_entry = tmp_entry;
2556                                 else
2557                                         first_entry = NULL;
2558                                 entry = tmp_entry;
2559                         }
2560                         last_timestamp = map->timestamp;
2561                         continue;
2562                 }
2563                 vm_map_clip_start(map, entry, start);
2564                 vm_map_clip_end(map, entry, end);
2565                 /*
2566                  * Mark the entry in case the map lock is released.  (See
2567                  * above.)
2568                  */
2569                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2570                     entry->wiring_thread == NULL,
2571                     ("owned map entry %p", entry));
2572                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2573                 entry->wiring_thread = curthread;
2574                 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2575                     || (entry->protection & prot) != prot) {
2576                         entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2577                         if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2578                                 end = entry->end;
2579                                 rv = KERN_INVALID_ADDRESS;
2580                                 goto done;
2581                         }
2582                         goto next_entry;
2583                 }
2584                 if (entry->wired_count == 0) {
2585                         entry->wired_count++;
2586                         saved_start = entry->start;
2587                         saved_end = entry->end;
2588
2589                         /*
2590                          * Release the map lock, relying on the in-transition
2591                          * mark.  Mark the map busy for fork.
2592                          */
2593                         vm_map_busy(map);
2594                         vm_map_unlock(map);
2595
2596                         faddr = saved_start;
2597                         do {
2598                                 /*
2599                                  * Simulate a fault to get the page and enter
2600                                  * it into the physical map.
2601                                  */
2602                                 if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
2603                                     VM_FAULT_CHANGE_WIRING)) != KERN_SUCCESS)
2604                                         break;
2605                         } while ((faddr += PAGE_SIZE) < saved_end);
2606                         vm_map_lock(map);
2607                         vm_map_unbusy(map);
2608                         if (last_timestamp + 1 != map->timestamp) {
2609                                 /*
2610                                  * Look again for the entry because the map was
2611                                  * modified while it was unlocked.  The entry
2612                                  * may have been clipped, but NOT merged or
2613                                  * deleted.
2614                                  */
2615                                 result = vm_map_lookup_entry(map, saved_start,
2616                                     &tmp_entry);
2617                                 KASSERT(result, ("vm_map_wire: lookup failed"));
2618                                 if (entry == first_entry)
2619                                         first_entry = tmp_entry;
2620                                 else
2621                                         first_entry = NULL;
2622                                 entry = tmp_entry;
2623                                 while (entry->end < saved_end) {
2624                                         /*
2625                                          * In case of failure, handle entries
2626                                          * that were not fully wired here;
2627                                          * fully wired entries are handled
2628                                          * later.
2629                                          */
2630                                         if (rv != KERN_SUCCESS &&
2631                                             faddr < entry->end)
2632                                                 vm_map_wire_entry_failure(map,
2633                                                     entry, faddr);
2634                                         entry = entry->next;
2635                                 }
2636                         }
2637                         last_timestamp = map->timestamp;
2638                         if (rv != KERN_SUCCESS) {
2639                                 vm_map_wire_entry_failure(map, entry, faddr);
2640                                 end = entry->end;
2641                                 goto done;
2642                         }
2643                 } else if (!user_wire ||
2644                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2645                         entry->wired_count++;
2646                 }
2647                 /*
2648                  * Check the map for holes in the specified region.
2649                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2650                  */
2651         next_entry:
2652                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2653                     (entry->end < end && (entry->next == &map->header ||
2654                     entry->next->start > entry->end))) {
2655                         end = entry->end;
2656                         rv = KERN_INVALID_ADDRESS;
2657                         goto done;
2658                 }
2659                 entry = entry->next;
2660         }
2661         rv = KERN_SUCCESS;
2662 done:
2663         need_wakeup = FALSE;
2664         if (first_entry == NULL) {
2665                 result = vm_map_lookup_entry(map, start, &first_entry);
2666                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2667                         first_entry = first_entry->next;
2668                 else
2669                         KASSERT(result, ("vm_map_wire: lookup failed"));
2670         }
2671         for (entry = first_entry; entry != &map->header && entry->start < end;
2672             entry = entry->next) {
2673                 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2674                         goto next_entry_done;
2675
2676                 /*
2677                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
2678                  * space in the unwired region could have been mapped
2679                  * while the map lock was dropped for faulting in the
2680                  * pages or draining MAP_ENTRY_IN_TRANSITION.
2681                  * Moreover, another thread could be simultaneously
2682                  * wiring this new mapping entry.  Detect these cases
2683                  * and skip any entries marked as in transition by us.
2684                  */
2685                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2686                     entry->wiring_thread != curthread) {
2687                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2688                             ("vm_map_wire: !HOLESOK and new/changed entry"));
2689                         continue;
2690                 }
2691
2692                 if (rv == KERN_SUCCESS) {
2693                         if (user_wire)
2694                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
2695                 } else if (entry->wired_count == -1) {
2696                         /*
2697                          * Wiring failed on this entry.  Thus, unwiring is
2698                          * unnecessary.
2699                          */
2700                         entry->wired_count = 0;
2701                 } else if (!user_wire ||
2702                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2703                         /*
2704                          * Undo the wiring.  Wiring succeeded on this entry
2705                          * but failed on a later entry.  
2706                          */
2707                         if (entry->wired_count == 1)
2708                                 vm_map_entry_unwire(map, entry);
2709                         else
2710                                 entry->wired_count--;
2711                 }
2712         next_entry_done:
2713                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2714                     ("vm_map_wire: in-transition flag missing %p", entry));
2715                 KASSERT(entry->wiring_thread == curthread,
2716                     ("vm_map_wire: alien wire %p", entry));
2717                 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2718                     MAP_ENTRY_WIRE_SKIPPED);
2719                 entry->wiring_thread = NULL;
2720                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2721                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2722                         need_wakeup = TRUE;
2723                 }
2724                 vm_map_simplify_entry(map, entry);
2725         }
2726         vm_map_unlock(map);
2727         if (need_wakeup)
2728                 vm_map_wakeup(map);
2729         return (rv);
2730 }
2731
2732 /*
2733  * vm_map_sync
2734  *
2735  * Push any dirty cached pages in the address range to their pager.
2736  * If syncio is TRUE, dirty pages are written synchronously.
2737  * If invalidate is TRUE, any cached pages are freed as well.
2738  *
2739  * If the size of the region from start to end is zero, we are
2740  * supposed to flush all modified pages within the region containing
2741  * start.  Unfortunately, a region can be split or coalesced with
2742  * neighboring regions, making it difficult to determine what the
2743  * original region was.  Therefore, we approximate this requirement by
2744  * flushing the current region containing start.
2745  *
2746  * Returns an error if any part of the specified range is not mapped.
2747  */
2748 int
2749 vm_map_sync(
2750         vm_map_t map,
2751         vm_offset_t start,
2752         vm_offset_t end,
2753         boolean_t syncio,
2754         boolean_t invalidate)
2755 {
2756         vm_map_entry_t current;
2757         vm_map_entry_t entry;
2758         vm_size_t size;
2759         vm_object_t object;
2760         vm_ooffset_t offset;
2761         unsigned int last_timestamp;
2762         boolean_t failed;
2763
2764         vm_map_lock_read(map);
2765         VM_MAP_RANGE_CHECK(map, start, end);
2766         if (!vm_map_lookup_entry(map, start, &entry)) {
2767                 vm_map_unlock_read(map);
2768                 return (KERN_INVALID_ADDRESS);
2769         } else if (start == end) {
2770                 start = entry->start;
2771                 end = entry->end;
2772         }
2773         /*
2774          * Make a first pass to check for user-wired memory and holes.
2775          */
2776         for (current = entry; current != &map->header && current->start < end;
2777             current = current->next) {
2778                 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2779                         vm_map_unlock_read(map);
2780                         return (KERN_INVALID_ARGUMENT);
2781                 }
2782                 if (end > current->end &&
2783                     (current->next == &map->header ||
2784                         current->end != current->next->start)) {
2785                         vm_map_unlock_read(map);
2786                         return (KERN_INVALID_ADDRESS);
2787                 }
2788         }
2789
2790         if (invalidate)
2791                 pmap_remove(map->pmap, start, end);
2792         failed = FALSE;
2793
2794         /*
2795          * Make a second pass, cleaning/uncaching pages from the indicated
2796          * objects as we go.
2797          */
2798         for (current = entry; current != &map->header && current->start < end;) {
2799                 offset = current->offset + (start - current->start);
2800                 size = (end <= current->end ? end : current->end) - start;
2801                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2802                         vm_map_t smap;
2803                         vm_map_entry_t tentry;
2804                         vm_size_t tsize;
2805
2806                         smap = current->object.sub_map;
2807                         vm_map_lock_read(smap);
2808                         (void) vm_map_lookup_entry(smap, offset, &tentry);
2809                         tsize = tentry->end - offset;
2810                         if (tsize < size)
2811                                 size = tsize;
2812                         object = tentry->object.vm_object;
2813                         offset = tentry->offset + (offset - tentry->start);
2814                         vm_map_unlock_read(smap);
2815                 } else {
2816                         object = current->object.vm_object;
2817                 }
2818                 vm_object_reference(object);
2819                 last_timestamp = map->timestamp;
2820                 vm_map_unlock_read(map);
2821                 if (!vm_object_sync(object, offset, size, syncio, invalidate))
2822                         failed = TRUE;
2823                 start += size;
2824                 vm_object_deallocate(object);
2825                 vm_map_lock_read(map);
2826                 if (last_timestamp == map->timestamp ||
2827                     !vm_map_lookup_entry(map, start, &current))
2828                         current = current->next;
2829         }
2830
2831         vm_map_unlock_read(map);
2832         return (failed ? KERN_FAILURE : KERN_SUCCESS);
2833 }
2834
2835 /*
2836  *      vm_map_entry_unwire:    [ internal use only ]
2837  *
2838  *      Make the region specified by this entry pageable.
2839  *
2840  *      The map in question should be locked.
2841  *      [This is the reason for this routine's existence.]
2842  */
2843 static void
2844 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2845 {
2846
2847         VM_MAP_ASSERT_LOCKED(map);
2848         KASSERT(entry->wired_count > 0,
2849             ("vm_map_entry_unwire: entry %p isn't wired", entry));
2850         pmap_unwire(map->pmap, entry->start, entry->end);
2851         vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
2852             entry->start, PQ_ACTIVE);
2853         entry->wired_count = 0;
2854 }
2855
2856 static void
2857 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2858 {
2859
2860         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2861                 vm_object_deallocate(entry->object.vm_object);
2862         uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2863 }
2864
2865 /*
2866  *      vm_map_entry_delete:    [ internal use only ]
2867  *
2868  *      Deallocate the given entry from the target map.
2869  */
2870 static void
2871 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2872 {
2873         vm_object_t object;
2874         vm_pindex_t offidxstart, offidxend, count, size1;
2875         vm_ooffset_t size;
2876
2877         vm_map_entry_unlink(map, entry);
2878         object = entry->object.vm_object;
2879         size = entry->end - entry->start;
2880         map->size -= size;
2881
2882         if (entry->cred != NULL) {
2883                 swap_release_by_cred(size, entry->cred);
2884                 crfree(entry->cred);
2885         }
2886
2887         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2888             (object != NULL)) {
2889                 KASSERT(entry->cred == NULL || object->cred == NULL ||
2890                     (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2891                     ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
2892                 count = OFF_TO_IDX(size);
2893                 offidxstart = OFF_TO_IDX(entry->offset);
2894                 offidxend = offidxstart + count;
2895                 VM_OBJECT_WLOCK(object);
2896                 if (object->ref_count != 1 &&
2897                     ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2898                     object == kernel_object || object == kmem_object)) {
2899                         vm_object_collapse(object);
2900
2901                         /*
2902                          * The option OBJPR_NOTMAPPED can be passed here
2903                          * because vm_map_delete() already performed
2904                          * pmap_remove() on the only mapping to this range
2905                          * of pages. 
2906                          */
2907                         vm_object_page_remove(object, offidxstart, offidxend,
2908                             OBJPR_NOTMAPPED);
2909                         if (object->type == OBJT_SWAP)
2910                                 swap_pager_freespace(object, offidxstart, count);
2911                         if (offidxend >= object->size &&
2912                             offidxstart < object->size) {
2913                                 size1 = object->size;
2914                                 object->size = offidxstart;
2915                                 if (object->cred != NULL) {
2916                                         size1 -= object->size;
2917                                         KASSERT(object->charge >= ptoa(size1),
2918                                             ("vm_map_entry_delete: object->charge < 0"));
2919                                         swap_release_by_cred(ptoa(size1), object->cred);
2920                                         object->charge -= ptoa(size1);
2921                                 }
2922                         }
2923                 }
2924                 VM_OBJECT_WUNLOCK(object);
2925         } else
2926                 entry->object.vm_object = NULL;
2927         if (map->system_map)
2928                 vm_map_entry_deallocate(entry, TRUE);
2929         else {
2930                 entry->next = curthread->td_map_def_user;
2931                 curthread->td_map_def_user = entry;
2932         }
2933 }
2934
2935 /*
2936  *      vm_map_delete:  [ internal use only ]
2937  *
2938  *      Deallocates the given address range from the target
2939  *      map.
2940  */
2941 int
2942 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2943 {
2944         vm_map_entry_t entry;
2945         vm_map_entry_t first_entry;
2946
2947         VM_MAP_ASSERT_LOCKED(map);
2948         if (start == end)
2949                 return (KERN_SUCCESS);
2950
2951         /*
2952          * Find the start of the region, and clip it
2953          */
2954         if (!vm_map_lookup_entry(map, start, &first_entry))
2955                 entry = first_entry->next;
2956         else {
2957                 entry = first_entry;
2958                 vm_map_clip_start(map, entry, start);
2959         }
2960
2961         /*
2962          * Step through all entries in this region
2963          */
2964         while ((entry != &map->header) && (entry->start < end)) {
2965                 vm_map_entry_t next;
2966
2967                 /*
2968                  * Wait for wiring or unwiring of an entry to complete.
2969                  * Also wait for any system wirings to disappear on
2970                  * user maps.
2971                  */
2972                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2973                     (vm_map_pmap(map) != kernel_pmap &&
2974                     vm_map_entry_system_wired_count(entry) != 0)) {
2975                         unsigned int last_timestamp;
2976                         vm_offset_t saved_start;
2977                         vm_map_entry_t tmp_entry;
2978
2979                         saved_start = entry->start;
2980                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2981                         last_timestamp = map->timestamp;
2982                         (void) vm_map_unlock_and_wait(map, 0);
2983                         vm_map_lock(map);
2984                         if (last_timestamp + 1 != map->timestamp) {
2985                                 /*
2986                                  * Look again for the entry because the map was
2987                                  * modified while it was unlocked.
2988                                  * Specifically, the entry may have been
2989                                  * clipped, merged, or deleted.
2990                                  */
2991                                 if (!vm_map_lookup_entry(map, saved_start,
2992                                                          &tmp_entry))
2993                                         entry = tmp_entry->next;
2994                                 else {
2995                                         entry = tmp_entry;
2996                                         vm_map_clip_start(map, entry,
2997                                                           saved_start);
2998                                 }
2999                         }
3000                         continue;
3001                 }
3002                 vm_map_clip_end(map, entry, end);
3003
3004                 next = entry->next;
3005
3006                 /*
3007                  * Unwire before removing addresses from the pmap; otherwise,
3008                  * unwiring will put the entries back in the pmap.
3009                  */
3010                 if (entry->wired_count != 0) {
3011                         vm_map_entry_unwire(map, entry);
3012                 }
3013
3014                 pmap_remove(map->pmap, entry->start, entry->end);
3015
3016                 /*
3017                  * Delete the entry only after removing all pmap
3018                  * entries pointing to its pages.  (Otherwise, its
3019                  * page frames may be reallocated, and any modify bits
3020                  * will be set in the wrong object!)
3021                  */
3022                 vm_map_entry_delete(map, entry);
3023                 entry = next;
3024         }
3025         return (KERN_SUCCESS);
3026 }
3027
3028 /*
3029  *      vm_map_remove:
3030  *
3031  *      Remove the given address range from the target map.
3032  *      This is the exported form of vm_map_delete.
3033  */
3034 int
3035 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3036 {
3037         int result;
3038
3039         vm_map_lock(map);
3040         VM_MAP_RANGE_CHECK(map, start, end);
3041         result = vm_map_delete(map, start, end);
3042         vm_map_unlock(map);
3043         return (result);
3044 }
3045
3046 /*
3047  *      vm_map_check_protection:
3048  *
3049  *      Assert that the target map allows the specified privilege on the
3050  *      entire address region given.  The entire region must be allocated.
3051  *
3052  *      WARNING!  This code does not and should not check whether the
3053  *      contents of the region is accessible.  For example a smaller file
3054  *      might be mapped into a larger address space.
3055  *
3056  *      NOTE!  This code is also called by munmap().
3057  *
3058  *      The map must be locked.  A read lock is sufficient.
3059  */
3060 boolean_t
3061 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3062                         vm_prot_t protection)
3063 {
3064         vm_map_entry_t entry;
3065         vm_map_entry_t tmp_entry;
3066
3067         if (!vm_map_lookup_entry(map, start, &tmp_entry))
3068                 return (FALSE);
3069         entry = tmp_entry;
3070
3071         while (start < end) {
3072                 if (entry == &map->header)
3073                         return (FALSE);
3074                 /*
3075                  * No holes allowed!
3076                  */
3077                 if (start < entry->start)
3078                         return (FALSE);
3079                 /*
3080                  * Check protection associated with entry.
3081                  */
3082                 if ((entry->protection & protection) != protection)
3083                         return (FALSE);
3084                 /* go to next entry */
3085                 start = entry->end;
3086                 entry = entry->next;
3087         }
3088         return (TRUE);
3089 }
3090
3091 /*
3092  *      vm_map_copy_entry:
3093  *
3094  *      Copies the contents of the source entry to the destination
3095  *      entry.  The entries *must* be aligned properly.
3096  */
3097 static void
3098 vm_map_copy_entry(
3099         vm_map_t src_map,
3100         vm_map_t dst_map,
3101         vm_map_entry_t src_entry,
3102         vm_map_entry_t dst_entry,
3103         vm_ooffset_t *fork_charge)
3104 {
3105         vm_object_t src_object;
3106         vm_map_entry_t fake_entry;
3107         vm_offset_t size;
3108         struct ucred *cred;
3109         int charged;
3110
3111         VM_MAP_ASSERT_LOCKED(dst_map);
3112
3113         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3114                 return;
3115
3116         if (src_entry->wired_count == 0 ||
3117             (src_entry->protection & VM_PROT_WRITE) == 0) {
3118                 /*
3119                  * If the source entry is marked needs_copy, it is already
3120                  * write-protected.
3121                  */
3122                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3123                     (src_entry->protection & VM_PROT_WRITE) != 0) {
3124                         pmap_protect(src_map->pmap,
3125                             src_entry->start,
3126                             src_entry->end,
3127                             src_entry->protection & ~VM_PROT_WRITE);
3128                 }
3129
3130                 /*
3131                  * Make a copy of the object.
3132                  */
3133                 size = src_entry->end - src_entry->start;
3134                 if ((src_object = src_entry->object.vm_object) != NULL) {
3135                         VM_OBJECT_WLOCK(src_object);
3136                         charged = ENTRY_CHARGED(src_entry);
3137                         if ((src_object->handle == NULL) &&
3138                                 (src_object->type == OBJT_DEFAULT ||
3139                                  src_object->type == OBJT_SWAP)) {
3140                                 vm_object_collapse(src_object);
3141                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3142                                         vm_object_split(src_entry);
3143                                         src_object = src_entry->object.vm_object;
3144                                 }
3145                         }
3146                         vm_object_reference_locked(src_object);
3147                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3148                         if (src_entry->cred != NULL &&
3149                             !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3150                                 KASSERT(src_object->cred == NULL,
3151                                     ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3152                                      src_object));
3153                                 src_object->cred = src_entry->cred;
3154                                 src_object->charge = size;
3155                         }
3156                         VM_OBJECT_WUNLOCK(src_object);
3157                         dst_entry->object.vm_object = src_object;
3158                         if (charged) {
3159                                 cred = curthread->td_ucred;
3160                                 crhold(cred);
3161                                 dst_entry->cred = cred;
3162                                 *fork_charge += size;
3163                                 if (!(src_entry->eflags &
3164                                       MAP_ENTRY_NEEDS_COPY)) {
3165                                         crhold(cred);
3166                                         src_entry->cred = cred;
3167                                         *fork_charge += size;
3168                                 }
3169                         }
3170                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3171                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3172                         dst_entry->offset = src_entry->offset;
3173                         if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3174                                 /*
3175                                  * MAP_ENTRY_VN_WRITECNT cannot
3176                                  * indicate write reference from
3177                                  * src_entry, since the entry is
3178                                  * marked as needs copy.  Allocate a
3179                                  * fake entry that is used to
3180                                  * decrement object->un_pager.vnp.writecount
3181                                  * at the appropriate time.  Attach
3182                                  * fake_entry to the deferred list.
3183                                  */
3184                                 fake_entry = vm_map_entry_create(dst_map);
3185                                 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3186                                 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3187                                 vm_object_reference(src_object);
3188                                 fake_entry->object.vm_object = src_object;
3189                                 fake_entry->start = src_entry->start;
3190                                 fake_entry->end = src_entry->end;
3191                                 fake_entry->next = curthread->td_map_def_user;
3192                                 curthread->td_map_def_user = fake_entry;
3193                         }
3194                 } else {
3195                         dst_entry->object.vm_object = NULL;
3196                         dst_entry->offset = 0;
3197                         if (src_entry->cred != NULL) {
3198                                 dst_entry->cred = curthread->td_ucred;
3199                                 crhold(dst_entry->cred);
3200                                 *fork_charge += size;
3201                         }
3202                 }
3203
3204                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3205                     dst_entry->end - dst_entry->start, src_entry->start);
3206         } else {
3207                 /*
3208                  * We don't want to make writeable wired pages copy-on-write.
3209                  * Immediately copy these pages into the new map by simulating
3210                  * page faults.  The new pages are pageable.
3211                  */
3212                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3213                     fork_charge);
3214         }
3215 }
3216
3217 /*
3218  * vmspace_map_entry_forked:
3219  * Update the newly-forked vmspace each time a map entry is inherited
3220  * or copied.  The values for vm_dsize and vm_tsize are approximate
3221  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3222  */
3223 static void
3224 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3225     vm_map_entry_t entry)
3226 {
3227         vm_size_t entrysize;
3228         vm_offset_t newend;
3229
3230         entrysize = entry->end - entry->start;
3231         vm2->vm_map.size += entrysize;
3232         if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3233                 vm2->vm_ssize += btoc(entrysize);
3234         } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3235             entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3236                 newend = MIN(entry->end,
3237                     (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3238                 vm2->vm_dsize += btoc(newend - entry->start);
3239         } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3240             entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3241                 newend = MIN(entry->end,
3242                     (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3243                 vm2->vm_tsize += btoc(newend - entry->start);
3244         }
3245 }
3246
3247 /*
3248  * vmspace_fork:
3249  * Create a new process vmspace structure and vm_map
3250  * based on those of an existing process.  The new map
3251  * is based on the old map, according to the inheritance
3252  * values on the regions in that map.
3253  *
3254  * XXX It might be worth coalescing the entries added to the new vmspace.
3255  *
3256  * The source map must not be locked.
3257  */
3258 struct vmspace *
3259 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3260 {
3261         struct vmspace *vm2;
3262         vm_map_t new_map, old_map;
3263         vm_map_entry_t new_entry, old_entry;
3264         vm_object_t object;
3265         int locked;
3266
3267         old_map = &vm1->vm_map;
3268         /* Copy immutable fields of vm1 to vm2. */
3269         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL);
3270         if (vm2 == NULL)
3271                 return (NULL);
3272         vm2->vm_taddr = vm1->vm_taddr;
3273         vm2->vm_daddr = vm1->vm_daddr;
3274         vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3275         vm_map_lock(old_map);
3276         if (old_map->busy)
3277                 vm_map_wait_busy(old_map);
3278         new_map = &vm2->vm_map;
3279         locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3280         KASSERT(locked, ("vmspace_fork: lock failed"));
3281
3282         old_entry = old_map->header.next;
3283
3284         while (old_entry != &old_map->header) {
3285                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3286                         panic("vm_map_fork: encountered a submap");
3287
3288                 switch (old_entry->inheritance) {
3289                 case VM_INHERIT_NONE:
3290                         break;
3291
3292                 case VM_INHERIT_SHARE:
3293                         /*
3294                          * Clone the entry, creating the shared object if necessary.
3295                          */
3296                         object = old_entry->object.vm_object;
3297                         if (object == NULL) {
3298                                 object = vm_object_allocate(OBJT_DEFAULT,
3299                                         atop(old_entry->end - old_entry->start));
3300                                 old_entry->object.vm_object = object;
3301                                 old_entry->offset = 0;
3302                                 if (old_entry->cred != NULL) {
3303                                         object->cred = old_entry->cred;
3304                                         object->charge = old_entry->end -
3305                                             old_entry->start;
3306                                         old_entry->cred = NULL;
3307                                 }
3308                         }
3309
3310                         /*
3311                          * Add the reference before calling vm_object_shadow
3312                          * to insure that a shadow object is created.
3313                          */
3314                         vm_object_reference(object);
3315                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3316                                 vm_object_shadow(&old_entry->object.vm_object,
3317                                     &old_entry->offset,
3318                                     old_entry->end - old_entry->start);
3319                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3320                                 /* Transfer the second reference too. */
3321                                 vm_object_reference(
3322                                     old_entry->object.vm_object);
3323
3324                                 /*
3325                                  * As in vm_map_simplify_entry(), the
3326                                  * vnode lock will not be acquired in
3327                                  * this call to vm_object_deallocate().
3328                                  */
3329                                 vm_object_deallocate(object);
3330                                 object = old_entry->object.vm_object;
3331                         }
3332                         VM_OBJECT_WLOCK(object);
3333                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
3334                         if (old_entry->cred != NULL) {
3335                                 KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3336                                 object->cred = old_entry->cred;
3337                                 object->charge = old_entry->end - old_entry->start;
3338                                 old_entry->cred = NULL;
3339                         }
3340
3341                         /*
3342                          * Assert the correct state of the vnode
3343                          * v_writecount while the object is locked, to
3344                          * not relock it later for the assertion
3345                          * correctness.
3346                          */
3347                         if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3348                             object->type == OBJT_VNODE) {
3349                                 KASSERT(((struct vnode *)object->handle)->
3350                                     v_writecount > 0,
3351                                     ("vmspace_fork: v_writecount %p", object));
3352                                 KASSERT(object->un_pager.vnp.writemappings > 0,
3353                                     ("vmspace_fork: vnp.writecount %p",
3354                                     object));
3355                         }
3356                         VM_OBJECT_WUNLOCK(object);
3357
3358                         /*
3359                          * Clone the entry, referencing the shared object.
3360                          */
3361                         new_entry = vm_map_entry_create(new_map);
3362                         *new_entry = *old_entry;
3363                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3364                             MAP_ENTRY_IN_TRANSITION);
3365                         new_entry->wiring_thread = NULL;
3366                         new_entry->wired_count = 0;
3367                         if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3368                                 vnode_pager_update_writecount(object,
3369                                     new_entry->start, new_entry->end);
3370                         }
3371
3372                         /*
3373                          * Insert the entry into the new map -- we know we're
3374                          * inserting at the end of the new map.
3375                          */
3376                         vm_map_entry_link(new_map, new_map->header.prev,
3377                             new_entry);
3378                         vmspace_map_entry_forked(vm1, vm2, new_entry);
3379
3380                         /*
3381                          * Update the physical map
3382                          */
3383                         pmap_copy(new_map->pmap, old_map->pmap,
3384                             new_entry->start,
3385                             (old_entry->end - old_entry->start),
3386                             old_entry->start);
3387                         break;
3388
3389                 case VM_INHERIT_COPY:
3390                         /*
3391                          * Clone the entry and link into the map.
3392                          */
3393                         new_entry = vm_map_entry_create(new_map);
3394                         *new_entry = *old_entry;
3395                         /*
3396                          * Copied entry is COW over the old object.
3397                          */
3398                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3399                             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3400                         new_entry->wiring_thread = NULL;
3401                         new_entry->wired_count = 0;
3402                         new_entry->object.vm_object = NULL;
3403                         new_entry->cred = NULL;
3404                         vm_map_entry_link(new_map, new_map->header.prev,
3405                             new_entry);
3406                         vmspace_map_entry_forked(vm1, vm2, new_entry);
3407                         vm_map_copy_entry(old_map, new_map, old_entry,
3408                             new_entry, fork_charge);
3409                         break;
3410                 }
3411                 old_entry = old_entry->next;
3412         }
3413         /*
3414          * Use inlined vm_map_unlock() to postpone handling the deferred
3415          * map entries, which cannot be done until both old_map and
3416          * new_map locks are released.
3417          */
3418         sx_xunlock(&old_map->lock);
3419         sx_xunlock(&new_map->lock);
3420         vm_map_process_deferred();
3421
3422         return (vm2);
3423 }
3424
3425 int
3426 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3427     vm_prot_t prot, vm_prot_t max, int cow)
3428 {
3429         vm_size_t growsize, init_ssize;
3430         rlim_t lmemlim, vmemlim;
3431         int rv;
3432
3433         growsize = sgrowsiz;
3434         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3435         vm_map_lock(map);
3436         PROC_LOCK(curproc);
3437         lmemlim = lim_cur(curproc, RLIMIT_MEMLOCK);
3438         vmemlim = lim_cur(curproc, RLIMIT_VMEM);
3439         PROC_UNLOCK(curproc);
3440         if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3441                 if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) {
3442                         rv = KERN_NO_SPACE;
3443                         goto out;
3444                 }
3445         }
3446         /* If we would blow our VMEM resource limit, no go */
3447         if (map->size + init_ssize > vmemlim) {
3448                 rv = KERN_NO_SPACE;
3449                 goto out;
3450         }
3451         rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
3452             max, cow);
3453 out:
3454         vm_map_unlock(map);
3455         return (rv);
3456 }
3457
3458 static int
3459 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3460     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
3461 {
3462         vm_map_entry_t new_entry, prev_entry;
3463         vm_offset_t bot, top;
3464         vm_size_t init_ssize;
3465         int orient, rv;
3466
3467         /*
3468          * The stack orientation is piggybacked with the cow argument.
3469          * Extract it into orient and mask the cow argument so that we
3470          * don't pass it around further.
3471          * NOTE: We explicitly allow bi-directional stacks.
3472          */
3473         orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3474         KASSERT(orient != 0, ("No stack grow direction"));
3475
3476         if (addrbos < vm_map_min(map) ||
3477             addrbos > vm_map_max(map) ||
3478             addrbos + max_ssize < addrbos)
3479                 return (KERN_NO_SPACE);
3480
3481         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3482
3483         /* If addr is already mapped, no go */
3484         if (vm_map_lookup_entry(map, addrbos, &prev_entry))
3485                 return (KERN_NO_SPACE);
3486
3487         /*
3488          * If we can't accomodate max_ssize in the current mapping, no go.
3489          * However, we need to be aware that subsequent user mappings might
3490          * map into the space we have reserved for stack, and currently this
3491          * space is not protected.
3492          *
3493          * Hopefully we will at least detect this condition when we try to
3494          * grow the stack.
3495          */
3496         if ((prev_entry->next != &map->header) &&
3497             (prev_entry->next->start < addrbos + max_ssize))
3498                 return (KERN_NO_SPACE);
3499
3500         /*
3501          * We initially map a stack of only init_ssize.  We will grow as
3502          * needed later.  Depending on the orientation of the stack (i.e.
3503          * the grow direction) we either map at the top of the range, the
3504          * bottom of the range or in the middle.
3505          *
3506          * Note: we would normally expect prot and max to be VM_PROT_ALL,
3507          * and cow to be 0.  Possibly we should eliminate these as input
3508          * parameters, and just pass these values here in the insert call.
3509          */
3510         if (orient == MAP_STACK_GROWS_DOWN)
3511                 bot = addrbos + max_ssize - init_ssize;
3512         else if (orient == MAP_STACK_GROWS_UP)
3513                 bot = addrbos;
3514         else
3515                 bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3516         top = bot + init_ssize;
3517         rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3518
3519         /* Now set the avail_ssize amount. */
3520         if (rv == KERN_SUCCESS) {
3521                 if (prev_entry != &map->header)
3522                         vm_map_clip_end(map, prev_entry, bot);
3523                 new_entry = prev_entry->next;
3524                 if (new_entry->end != top || new_entry->start != bot)
3525                         panic("Bad entry start/end for new stack entry");
3526
3527                 new_entry->avail_ssize = max_ssize - init_ssize;
3528                 if (orient & MAP_STACK_GROWS_DOWN)
3529                         new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3530                 if (orient & MAP_STACK_GROWS_UP)
3531                         new_entry->eflags |= MAP_ENTRY_GROWS_UP;
3532         }
3533
3534         return (rv);
3535 }
3536
3537 static int stack_guard_page = 0;
3538 TUNABLE_INT("security.bsd.stack_guard_page", &stack_guard_page);
3539 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RW,
3540     &stack_guard_page, 0,
3541     "Insert stack guard page ahead of the growable segments.");
3542
3543 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3544  * desired address is already mapped, or if we successfully grow
3545  * the stack.  Also returns KERN_SUCCESS if addr is outside the
3546  * stack range (this is strange, but preserves compatibility with
3547  * the grow function in vm_machdep.c).
3548  */
3549 int
3550 vm_map_growstack(struct proc *p, vm_offset_t addr)
3551 {
3552         vm_map_entry_t next_entry, prev_entry;
3553         vm_map_entry_t new_entry, stack_entry;
3554         struct vmspace *vm = p->p_vmspace;
3555         vm_map_t map = &vm->vm_map;
3556         vm_offset_t end;
3557         vm_size_t growsize;
3558         size_t grow_amount, max_grow;
3559         rlim_t lmemlim, stacklim, vmemlim;
3560         int is_procstack, rv;
3561         struct ucred *cred;
3562 #ifdef notyet
3563         uint64_t limit;
3564 #endif
3565 #ifdef RACCT
3566         int error;
3567 #endif
3568
3569 Retry:
3570         PROC_LOCK(p);
3571         lmemlim = lim_cur(p, RLIMIT_MEMLOCK);
3572         stacklim = lim_cur(p, RLIMIT_STACK);
3573         vmemlim = lim_cur(p, RLIMIT_VMEM);
3574         PROC_UNLOCK(p);
3575
3576         vm_map_lock_read(map);
3577
3578         /* If addr is already in the entry range, no need to grow.*/
3579         if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3580                 vm_map_unlock_read(map);
3581                 return (KERN_SUCCESS);
3582         }
3583
3584         next_entry = prev_entry->next;
3585         if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3586                 /*
3587                  * This entry does not grow upwards. Since the address lies
3588                  * beyond this entry, the next entry (if one exists) has to
3589                  * be a downward growable entry. The entry list header is
3590                  * never a growable entry, so it suffices to check the flags.
3591                  */
3592                 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3593                         vm_map_unlock_read(map);
3594                         return (KERN_SUCCESS);
3595                 }
3596                 stack_entry = next_entry;
3597         } else {
3598                 /*
3599                  * This entry grows upward. If the next entry does not at
3600                  * least grow downwards, this is the entry we need to grow.
3601                  * otherwise we have two possible choices and we have to
3602                  * select one.
3603                  */
3604                 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3605                         /*
3606                          * We have two choices; grow the entry closest to
3607                          * the address to minimize the amount of growth.
3608                          */
3609                         if (addr - prev_entry->end <= next_entry->start - addr)
3610                                 stack_entry = prev_entry;
3611                         else
3612                                 stack_entry = next_entry;
3613                 } else
3614                         stack_entry = prev_entry;
3615         }
3616
3617         if (stack_entry == next_entry) {
3618                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3619                 KASSERT(addr < stack_entry->start, ("foo"));
3620                 end = (prev_entry != &map->header) ? prev_entry->end :
3621                     stack_entry->start - stack_entry->avail_ssize;
3622                 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3623                 max_grow = stack_entry->start - end;
3624         } else {
3625                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3626                 KASSERT(addr >= stack_entry->end, ("foo"));
3627                 end = (next_entry != &map->header) ? next_entry->start :
3628                     stack_entry->end + stack_entry->avail_ssize;
3629                 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3630                 max_grow = end - stack_entry->end;
3631         }
3632
3633         if (grow_amount > stack_entry->avail_ssize) {
3634                 vm_map_unlock_read(map);
3635                 return (KERN_NO_SPACE);
3636         }
3637
3638         /*
3639          * If there is no longer enough space between the entries nogo, and
3640          * adjust the available space.  Note: this  should only happen if the
3641          * user has mapped into the stack area after the stack was created,
3642          * and is probably an error.
3643          *
3644          * This also effectively destroys any guard page the user might have
3645          * intended by limiting the stack size.
3646          */
3647         if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) {
3648                 if (vm_map_lock_upgrade(map))
3649                         goto Retry;
3650
3651                 stack_entry->avail_ssize = max_grow;
3652
3653                 vm_map_unlock(map);
3654                 return (KERN_NO_SPACE);
3655         }
3656
3657         is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
3658
3659         /*
3660          * If this is the main process stack, see if we're over the stack
3661          * limit.
3662          */
3663         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3664                 vm_map_unlock_read(map);
3665                 return (KERN_NO_SPACE);
3666         }
3667 #ifdef RACCT
3668         PROC_LOCK(p);
3669         if (is_procstack &&
3670             racct_set(p, RACCT_STACK, ctob(vm->vm_ssize) + grow_amount)) {
3671                 PROC_UNLOCK(p);
3672                 vm_map_unlock_read(map);
3673                 return (KERN_NO_SPACE);
3674         }
3675         PROC_UNLOCK(p);
3676 #endif
3677
3678         /* Round up the grow amount modulo sgrowsiz */
3679         growsize = sgrowsiz;
3680         grow_amount = roundup(grow_amount, growsize);
3681         if (grow_amount > stack_entry->avail_ssize)
3682                 grow_amount = stack_entry->avail_ssize;
3683         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3684                 grow_amount = trunc_page((vm_size_t)stacklim) -
3685                     ctob(vm->vm_ssize);
3686         }
3687 #ifdef notyet
3688         PROC_LOCK(p);
3689         limit = racct_get_available(p, RACCT_STACK);
3690         PROC_UNLOCK(p);
3691         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3692                 grow_amount = limit - ctob(vm->vm_ssize);
3693 #endif
3694         if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3695                 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3696                         vm_map_unlock_read(map);
3697                         rv = KERN_NO_SPACE;
3698                         goto out;
3699                 }
3700 #ifdef RACCT
3701                 PROC_LOCK(p);
3702                 if (racct_set(p, RACCT_MEMLOCK,
3703                     ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3704                         PROC_UNLOCK(p);
3705                         vm_map_unlock_read(map);
3706                         rv = KERN_NO_SPACE;
3707                         goto out;
3708                 }
3709                 PROC_UNLOCK(p);
3710 #endif
3711         }
3712         /* If we would blow our VMEM resource limit, no go */
3713         if (map->size + grow_amount > vmemlim) {
3714                 vm_map_unlock_read(map);
3715                 rv = KERN_NO_SPACE;
3716                 goto out;
3717         }
3718 #ifdef RACCT
3719         PROC_LOCK(p);
3720         if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3721                 PROC_UNLOCK(p);
3722                 vm_map_unlock_read(map);
3723                 rv = KERN_NO_SPACE;
3724                 goto out;
3725         }
3726         PROC_UNLOCK(p);
3727 #endif
3728
3729         if (vm_map_lock_upgrade(map))
3730                 goto Retry;
3731
3732         if (stack_entry == next_entry) {
3733                 /*
3734                  * Growing downward.
3735                  */
3736                 /* Get the preliminary new entry start value */
3737                 addr = stack_entry->start - grow_amount;
3738
3739                 /*
3740                  * If this puts us into the previous entry, cut back our
3741                  * growth to the available space. Also, see the note above.
3742                  */
3743                 if (addr < end) {
3744                         stack_entry->avail_ssize = max_grow;
3745                         addr = end;
3746                         if (stack_guard_page)
3747                                 addr += PAGE_SIZE;
3748                 }
3749
3750                 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3751                     next_entry->protection, next_entry->max_protection, 0);
3752
3753                 /* Adjust the available stack space by the amount we grew. */
3754                 if (rv == KERN_SUCCESS) {
3755                         if (prev_entry != &map->header)
3756                                 vm_map_clip_end(map, prev_entry, addr);
3757                         new_entry = prev_entry->next;
3758                         KASSERT(new_entry == stack_entry->prev, ("foo"));
3759                         KASSERT(new_entry->end == stack_entry->start, ("foo"));
3760                         KASSERT(new_entry->start == addr, ("foo"));
3761                         grow_amount = new_entry->end - new_entry->start;
3762                         new_entry->avail_ssize = stack_entry->avail_ssize -
3763                             grow_amount;
3764                         stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3765                         new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
3766                 }
3767         } else {
3768                 /*
3769                  * Growing upward.
3770                  */
3771                 addr = stack_entry->end + grow_amount;
3772
3773                 /*
3774                  * If this puts us into the next entry, cut back our growth
3775                  * to the available space. Also, see the note above.
3776                  */
3777                 if (addr > end) {
3778                         stack_entry->avail_ssize = end - stack_entry->end;
3779                         addr = end;
3780                         if (stack_guard_page)
3781                                 addr -= PAGE_SIZE;
3782                 }
3783
3784                 grow_amount = addr - stack_entry->end;
3785                 cred = stack_entry->cred;
3786                 if (cred == NULL && stack_entry->object.vm_object != NULL)
3787                         cred = stack_entry->object.vm_object->cred;
3788                 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3789                         rv = KERN_NO_SPACE;
3790                 /* Grow the underlying object if applicable. */
3791                 else if (stack_entry->object.vm_object == NULL ||
3792                          vm_object_coalesce(stack_entry->object.vm_object,
3793                          stack_entry->offset,
3794                          (vm_size_t)(stack_entry->end - stack_entry->start),
3795                          (vm_size_t)grow_amount, cred != NULL)) {
3796                         map->size += (addr - stack_entry->end);
3797                         /* Update the current entry. */
3798                         stack_entry->end = addr;
3799                         stack_entry->avail_ssize -= grow_amount;
3800                         vm_map_entry_resize_free(map, stack_entry);
3801                         rv = KERN_SUCCESS;
3802
3803                         if (next_entry != &map->header)
3804                                 vm_map_clip_start(map, next_entry, addr);
3805                 } else
3806                         rv = KERN_FAILURE;
3807         }
3808
3809         if (rv == KERN_SUCCESS && is_procstack)
3810                 vm->vm_ssize += btoc(grow_amount);
3811
3812         vm_map_unlock(map);
3813
3814         /*
3815          * Heed the MAP_WIREFUTURE flag if it was set for this process.
3816          */
3817         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3818                 vm_map_wire(map,
3819                     (stack_entry == next_entry) ? addr : addr - grow_amount,
3820                     (stack_entry == next_entry) ? stack_entry->start : addr,
3821                     (p->p_flag & P_SYSTEM)
3822                     ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3823                     : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3824         }
3825
3826 out:
3827 #ifdef RACCT
3828         if (rv != KERN_SUCCESS) {
3829                 PROC_LOCK(p);
3830                 error = racct_set(p, RACCT_VMEM, map->size);
3831                 KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
3832                 if (!old_mlock) {
3833                         error = racct_set(p, RACCT_MEMLOCK,
3834                             ptoa(pmap_wired_count(map->pmap)));
3835                         KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
3836                 }
3837                 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
3838                 KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
3839                 PROC_UNLOCK(p);
3840         }
3841 #endif
3842
3843         return (rv);
3844 }
3845
3846 /*
3847  * Unshare the specified VM space for exec.  If other processes are
3848  * mapped to it, then create a new one.  The new vmspace is null.
3849  */
3850 int
3851 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3852 {
3853         struct vmspace *oldvmspace = p->p_vmspace;
3854         struct vmspace *newvmspace;
3855
3856         KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
3857             ("vmspace_exec recursed"));
3858         newvmspace = vmspace_alloc(minuser, maxuser, NULL);
3859         if (newvmspace == NULL)
3860                 return (ENOMEM);
3861         newvmspace->vm_swrss = oldvmspace->vm_swrss;
3862         /*
3863          * This code is written like this for prototype purposes.  The
3864          * goal is to avoid running down the vmspace here, but let the
3865          * other process's that are still using the vmspace to finally
3866          * run it down.  Even though there is little or no chance of blocking
3867          * here, it is a good idea to keep this form for future mods.
3868          */
3869         PROC_VMSPACE_LOCK(p);
3870         p->p_vmspace = newvmspace;
3871         PROC_VMSPACE_UNLOCK(p);
3872         if (p == curthread->td_proc)
3873                 pmap_activate(curthread);
3874         curthread->td_pflags |= TDP_EXECVMSPC;
3875         return (0);
3876 }
3877
3878 /*
3879  * Unshare the specified VM space for forcing COW.  This
3880  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3881  */
3882 int
3883 vmspace_unshare(struct proc *p)
3884 {
3885         struct vmspace *oldvmspace = p->p_vmspace;
3886         struct vmspace *newvmspace;
3887         vm_ooffset_t fork_charge;
3888
3889         if (oldvmspace->vm_refcnt == 1)
3890                 return (0);
3891         fork_charge = 0;
3892         newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3893         if (newvmspace == NULL)
3894                 return (ENOMEM);
3895         if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
3896                 vmspace_free(newvmspace);
3897                 return (ENOMEM);
3898         }
3899         PROC_VMSPACE_LOCK(p);
3900         p->p_vmspace = newvmspace;
3901         PROC_VMSPACE_UNLOCK(p);
3902         if (p == curthread->td_proc)
3903                 pmap_activate(curthread);
3904         vmspace_free(oldvmspace);
3905         return (0);
3906 }
3907
3908 /*
3909  *      vm_map_lookup:
3910  *
3911  *      Finds the VM object, offset, and
3912  *      protection for a given virtual address in the
3913  *      specified map, assuming a page fault of the
3914  *      type specified.
3915  *
3916  *      Leaves the map in question locked for read; return
3917  *      values are guaranteed until a vm_map_lookup_done
3918  *      call is performed.  Note that the map argument
3919  *      is in/out; the returned map must be used in
3920  *      the call to vm_map_lookup_done.
3921  *
3922  *      A handle (out_entry) is returned for use in
3923  *      vm_map_lookup_done, to make that fast.
3924  *
3925  *      If a lookup is requested with "write protection"
3926  *      specified, the map may be changed to perform virtual
3927  *      copying operations, although the data referenced will
3928  *      remain the same.
3929  */
3930 int
3931 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
3932               vm_offset_t vaddr,
3933               vm_prot_t fault_typea,
3934               vm_map_entry_t *out_entry,        /* OUT */
3935               vm_object_t *object,              /* OUT */
3936               vm_pindex_t *pindex,              /* OUT */
3937               vm_prot_t *out_prot,              /* OUT */
3938               boolean_t *wired)                 /* OUT */
3939 {
3940         vm_map_entry_t entry;
3941         vm_map_t map = *var_map;
3942         vm_prot_t prot;
3943         vm_prot_t fault_type = fault_typea;
3944         vm_object_t eobject;
3945         vm_size_t size;
3946         struct ucred *cred;
3947
3948 RetryLookup:;
3949
3950         vm_map_lock_read(map);
3951
3952         /*
3953          * Lookup the faulting address.
3954          */
3955         if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3956                 vm_map_unlock_read(map);
3957                 return (KERN_INVALID_ADDRESS);
3958         }
3959
3960         entry = *out_entry;
3961
3962         /*
3963          * Handle submaps.
3964          */
3965         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3966                 vm_map_t old_map = map;
3967
3968                 *var_map = map = entry->object.sub_map;
3969                 vm_map_unlock_read(old_map);
3970                 goto RetryLookup;
3971         }
3972
3973         /*
3974          * Check whether this task is allowed to have this page.
3975          */
3976         prot = entry->protection;
3977         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3978         if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
3979                 vm_map_unlock_read(map);
3980                 return (KERN_PROTECTION_FAILURE);
3981         }
3982         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3983             (entry->eflags & MAP_ENTRY_COW) &&
3984             (fault_type & VM_PROT_WRITE)) {
3985                 vm_map_unlock_read(map);
3986                 return (KERN_PROTECTION_FAILURE);
3987         }
3988         if ((fault_typea & VM_PROT_COPY) != 0 &&
3989             (entry->max_protection & VM_PROT_WRITE) == 0 &&
3990             (entry->eflags & MAP_ENTRY_COW) == 0) {
3991                 vm_map_unlock_read(map);
3992                 return (KERN_PROTECTION_FAILURE);
3993         }
3994
3995         /*
3996          * If this page is not pageable, we have to get it for all possible
3997          * accesses.
3998          */
3999         *wired = (entry->wired_count != 0);
4000         if (*wired)
4001                 fault_type = entry->protection;
4002         size = entry->end - entry->start;
4003         /*
4004          * If the entry was copy-on-write, we either ...
4005          */
4006         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4007                 /*
4008                  * If we want to write the page, we may as well handle that
4009                  * now since we've got the map locked.
4010                  *
4011                  * If we don't need to write the page, we just demote the
4012                  * permissions allowed.
4013                  */
4014                 if ((fault_type & VM_PROT_WRITE) != 0 ||
4015                     (fault_typea & VM_PROT_COPY) != 0) {
4016                         /*
4017                          * Make a new object, and place it in the object
4018                          * chain.  Note that no new references have appeared
4019                          * -- one just moved from the map to the new
4020                          * object.
4021                          */
4022                         if (vm_map_lock_upgrade(map))
4023                                 goto RetryLookup;
4024
4025                         if (entry->cred == NULL) {
4026                                 /*
4027                                  * The debugger owner is charged for
4028                                  * the memory.
4029                                  */
4030                                 cred = curthread->td_ucred;
4031                                 crhold(cred);
4032                                 if (!swap_reserve_by_cred(size, cred)) {
4033                                         crfree(cred);
4034                                         vm_map_unlock(map);
4035                                         return (KERN_RESOURCE_SHORTAGE);
4036                                 }
4037                                 entry->cred = cred;
4038                         }
4039                         vm_object_shadow(&entry->object.vm_object,
4040                             &entry->offset, size);
4041                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4042                         eobject = entry->object.vm_object;
4043                         if (eobject->cred != NULL) {
4044                                 /*
4045                                  * The object was not shadowed.
4046                                  */
4047                                 swap_release_by_cred(size, entry->cred);
4048                                 crfree(entry->cred);
4049                                 entry->cred = NULL;
4050                         } else if (entry->cred != NULL) {
4051                                 VM_OBJECT_WLOCK(eobject);
4052                                 eobject->cred = entry->cred;
4053                                 eobject->charge = size;
4054                                 VM_OBJECT_WUNLOCK(eobject);
4055                                 entry->cred = NULL;
4056                         }
4057
4058                         vm_map_lock_downgrade(map);
4059                 } else {
4060                         /*
4061                          * We're attempting to read a copy-on-write page --
4062                          * don't allow writes.
4063                          */
4064                         prot &= ~VM_PROT_WRITE;
4065                 }
4066         }
4067
4068         /*
4069          * Create an object if necessary.
4070          */
4071         if (entry->object.vm_object == NULL &&
4072             !map->system_map) {
4073                 if (vm_map_lock_upgrade(map))
4074                         goto RetryLookup;
4075                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
4076                     atop(size));
4077                 entry->offset = 0;
4078                 if (entry->cred != NULL) {
4079                         VM_OBJECT_WLOCK(entry->object.vm_object);
4080                         entry->object.vm_object->cred = entry->cred;
4081                         entry->object.vm_object->charge = size;
4082                         VM_OBJECT_WUNLOCK(entry->object.vm_object);
4083                         entry->cred = NULL;
4084                 }
4085                 vm_map_lock_downgrade(map);
4086         }
4087
4088         /*
4089          * Return the object/offset from this entry.  If the entry was
4090          * copy-on-write or empty, it has been fixed up.
4091          */
4092         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4093         *object = entry->object.vm_object;
4094
4095         *out_prot = prot;
4096         return (KERN_SUCCESS);
4097 }
4098
4099 /*
4100  *      vm_map_lookup_locked:
4101  *
4102  *      Lookup the faulting address.  A version of vm_map_lookup that returns 
4103  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4104  */
4105 int
4106 vm_map_lookup_locked(vm_map_t *var_map,         /* IN/OUT */
4107                      vm_offset_t vaddr,
4108                      vm_prot_t fault_typea,
4109                      vm_map_entry_t *out_entry, /* OUT */
4110                      vm_object_t *object,       /* OUT */
4111                      vm_pindex_t *pindex,       /* OUT */
4112                      vm_prot_t *out_prot,       /* OUT */
4113                      boolean_t *wired)          /* OUT */
4114 {
4115         vm_map_entry_t entry;
4116         vm_map_t map = *var_map;
4117         vm_prot_t prot;
4118         vm_prot_t fault_type = fault_typea;
4119
4120         /*
4121          * Lookup the faulting address.
4122          */
4123         if (!vm_map_lookup_entry(map, vaddr, out_entry))
4124                 return (KERN_INVALID_ADDRESS);
4125
4126         entry = *out_entry;
4127
4128         /*
4129          * Fail if the entry refers to a submap.
4130          */
4131         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4132                 return (KERN_FAILURE);
4133
4134         /*
4135          * Check whether this task is allowed to have this page.
4136          */
4137         prot = entry->protection;
4138         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4139         if ((fault_type & prot) != fault_type)
4140                 return (KERN_PROTECTION_FAILURE);
4141         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
4142             (entry->eflags & MAP_ENTRY_COW) &&
4143             (fault_type & VM_PROT_WRITE))
4144                 return (KERN_PROTECTION_FAILURE);
4145
4146         /*
4147          * If this page is not pageable, we have to get it for all possible
4148          * accesses.
4149          */
4150         *wired = (entry->wired_count != 0);
4151         if (*wired)
4152                 fault_type = entry->protection;
4153
4154         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4155                 /*
4156                  * Fail if the entry was copy-on-write for a write fault.
4157                  */
4158                 if (fault_type & VM_PROT_WRITE)
4159                         return (KERN_FAILURE);
4160                 /*
4161                  * We're attempting to read a copy-on-write page --
4162                  * don't allow writes.
4163                  */
4164                 prot &= ~VM_PROT_WRITE;
4165         }
4166
4167         /*
4168          * Fail if an object should be created.
4169          */
4170         if (entry->object.vm_object == NULL && !map->system_map)
4171                 return (KERN_FAILURE);
4172
4173         /*
4174          * Return the object/offset from this entry.  If the entry was
4175          * copy-on-write or empty, it has been fixed up.
4176          */
4177         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4178         *object = entry->object.vm_object;
4179
4180         *out_prot = prot;
4181         return (KERN_SUCCESS);
4182 }
4183
4184 /*
4185  *      vm_map_lookup_done:
4186  *
4187  *      Releases locks acquired by a vm_map_lookup
4188  *      (according to the handle returned by that lookup).
4189  */
4190 void
4191 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4192 {
4193         /*
4194          * Unlock the main-level map
4195          */
4196         vm_map_unlock_read(map);
4197 }
4198
4199 #include "opt_ddb.h"
4200 #ifdef DDB
4201 #include <sys/kernel.h>
4202
4203 #include <ddb/ddb.h>
4204
4205 static void
4206 vm_map_print(vm_map_t map)
4207 {
4208         vm_map_entry_t entry;
4209
4210         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4211             (void *)map,
4212             (void *)map->pmap, map->nentries, map->timestamp);
4213
4214         db_indent += 2;
4215         for (entry = map->header.next; entry != &map->header;
4216             entry = entry->next) {
4217                 db_iprintf("map entry %p: start=%p, end=%p\n",
4218                     (void *)entry, (void *)entry->start, (void *)entry->end);
4219                 {
4220                         static char *inheritance_name[4] =
4221                         {"share", "copy", "none", "donate_copy"};
4222
4223                         db_iprintf(" prot=%x/%x/%s",
4224                             entry->protection,
4225                             entry->max_protection,
4226                             inheritance_name[(int)(unsigned char)entry->inheritance]);
4227                         if (entry->wired_count != 0)
4228                                 db_printf(", wired");
4229                 }
4230                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4231                         db_printf(", share=%p, offset=0x%jx\n",
4232                             (void *)entry->object.sub_map,
4233                             (uintmax_t)entry->offset);
4234                         if ((entry->prev == &map->header) ||
4235                             (entry->prev->object.sub_map !=
4236                                 entry->object.sub_map)) {
4237                                 db_indent += 2;
4238                                 vm_map_print((vm_map_t)entry->object.sub_map);
4239                                 db_indent -= 2;
4240                         }
4241                 } else {
4242                         if (entry->cred != NULL)
4243                                 db_printf(", ruid %d", entry->cred->cr_ruid);
4244                         db_printf(", object=%p, offset=0x%jx",
4245                             (void *)entry->object.vm_object,
4246                             (uintmax_t)entry->offset);
4247                         if (entry->object.vm_object && entry->object.vm_object->cred)
4248                                 db_printf(", obj ruid %d charge %jx",
4249                                     entry->object.vm_object->cred->cr_ruid,
4250                                     (uintmax_t)entry->object.vm_object->charge);
4251                         if (entry->eflags & MAP_ENTRY_COW)
4252                                 db_printf(", copy (%s)",
4253                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4254                         db_printf("\n");
4255
4256                         if ((entry->prev == &map->header) ||
4257                             (entry->prev->object.vm_object !=
4258                                 entry->object.vm_object)) {
4259                                 db_indent += 2;
4260                                 vm_object_print((db_expr_t)(intptr_t)
4261                                                 entry->object.vm_object,
4262                                                 0, 0, (char *)0);
4263                                 db_indent -= 2;
4264                         }
4265                 }
4266         }
4267         db_indent -= 2;
4268 }
4269
4270 DB_SHOW_COMMAND(map, map)
4271 {
4272
4273         if (!have_addr) {
4274                 db_printf("usage: show map <addr>\n");
4275                 return;
4276         }
4277         vm_map_print((vm_map_t)addr);
4278 }
4279
4280 DB_SHOW_COMMAND(procvm, procvm)
4281 {
4282         struct proc *p;
4283
4284         if (have_addr) {
4285                 p = (struct proc *) addr;
4286         } else {
4287                 p = curproc;
4288         }
4289
4290         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4291             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4292             (void *)vmspace_pmap(p->p_vmspace));
4293
4294         vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4295 }
4296
4297 #endif /* DDB */