]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_map.c
zfs: merge openzfs/zfs@2e2a46e0a
[FreeBSD/FreeBSD.git] / sys / vm / vm_map.c
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62
63 /*
64  *      Virtual memory mapping module.
65  */
66
67 #include <sys/cdefs.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/elf.h>
71 #include <sys/kernel.h>
72 #include <sys/ktr.h>
73 #include <sys/lock.h>
74 #include <sys/mutex.h>
75 #include <sys/proc.h>
76 #include <sys/vmmeter.h>
77 #include <sys/mman.h>
78 #include <sys/vnode.h>
79 #include <sys/racct.h>
80 #include <sys/resourcevar.h>
81 #include <sys/rwlock.h>
82 #include <sys/file.h>
83 #include <sys/sysctl.h>
84 #include <sys/sysent.h>
85 #include <sys/shm.h>
86
87 #include <vm/vm.h>
88 #include <vm/vm_param.h>
89 #include <vm/pmap.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_pageout.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/swap_pager.h>
99 #include <vm/uma.h>
100
101 /*
102  *      Virtual memory maps provide for the mapping, protection,
103  *      and sharing of virtual memory objects.  In addition,
104  *      this module provides for an efficient virtual copy of
105  *      memory from one map to another.
106  *
107  *      Synchronization is required prior to most operations.
108  *
109  *      Maps consist of an ordered doubly-linked list of simple
110  *      entries; a self-adjusting binary search tree of these
111  *      entries is used to speed up lookups.
112  *
113  *      Since portions of maps are specified by start/end addresses,
114  *      which may not align with existing map entries, all
115  *      routines merely "clip" entries to these start/end values.
116  *      [That is, an entry is split into two, bordering at a
117  *      start or end value.]  Note that these clippings may not
118  *      always be necessary (as the two resulting entries are then
119  *      not changed); however, the clipping is done for convenience.
120  *
121  *      As mentioned above, virtual copy operations are performed
122  *      by copying VM object references from one map to
123  *      another, and then marking both regions as copy-on-write.
124  */
125
126 static struct mtx map_sleep_mtx;
127 static uma_zone_t mapentzone;
128 static uma_zone_t kmapentzone;
129 static uma_zone_t vmspace_zone;
130 static int vmspace_zinit(void *mem, int size, int flags);
131 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
132     vm_offset_t max);
133 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
134 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
135 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
136 static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
137     vm_map_entry_t gap_entry);
138 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
139     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
140 #ifdef INVARIANTS
141 static void vmspace_zdtor(void *mem, int size, void *arg);
142 #endif
143 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
144     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
145     int cow);
146 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
147     vm_offset_t failed_addr);
148
149 #define CONTAINS_BITS(set, bits)        ((~(set) & (bits)) == 0)
150
151 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
152     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
153      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
154
155 /* 
156  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
157  * stable.
158  */
159 #define PROC_VMSPACE_LOCK(p) do { } while (0)
160 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
161
162 /*
163  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
164  *
165  *      Asserts that the starting and ending region
166  *      addresses fall within the valid range of the map.
167  */
168 #define VM_MAP_RANGE_CHECK(map, start, end)             \
169                 {                                       \
170                 if (start < vm_map_min(map))            \
171                         start = vm_map_min(map);        \
172                 if (end > vm_map_max(map))              \
173                         end = vm_map_max(map);          \
174                 if (start > end)                        \
175                         start = end;                    \
176                 }
177
178 #ifndef UMA_MD_SMALL_ALLOC
179
180 /*
181  * Allocate a new slab for kernel map entries.  The kernel map may be locked or
182  * unlocked, depending on whether the request is coming from the kernel map or a
183  * submap.  This function allocates a virtual address range directly from the
184  * kernel map instead of the kmem_* layer to avoid recursion on the kernel map
185  * lock and also to avoid triggering allocator recursion in the vmem boundary
186  * tag allocator.
187  */
188 static void *
189 kmapent_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
190     int wait)
191 {
192         vm_offset_t addr;
193         int error, locked;
194
195         *pflag = UMA_SLAB_PRIV;
196
197         if (!(locked = vm_map_locked(kernel_map)))
198                 vm_map_lock(kernel_map);
199         addr = vm_map_findspace(kernel_map, vm_map_min(kernel_map), bytes);
200         if (addr + bytes < addr || addr + bytes > vm_map_max(kernel_map))
201                 panic("%s: kernel map is exhausted", __func__);
202         error = vm_map_insert(kernel_map, NULL, 0, addr, addr + bytes,
203             VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
204         if (error != KERN_SUCCESS)
205                 panic("%s: vm_map_insert() failed: %d", __func__, error);
206         if (!locked)
207                 vm_map_unlock(kernel_map);
208         error = kmem_back_domain(domain, kernel_object, addr, bytes, M_NOWAIT |
209             M_USE_RESERVE | (wait & M_ZERO));
210         if (error == KERN_SUCCESS) {
211                 return ((void *)addr);
212         } else {
213                 if (!locked)
214                         vm_map_lock(kernel_map);
215                 vm_map_delete(kernel_map, addr, bytes);
216                 if (!locked)
217                         vm_map_unlock(kernel_map);
218                 return (NULL);
219         }
220 }
221
222 static void
223 kmapent_free(void *item, vm_size_t size, uint8_t pflag)
224 {
225         vm_offset_t addr;
226         int error __diagused;
227
228         if ((pflag & UMA_SLAB_PRIV) == 0)
229                 /* XXX leaked */
230                 return;
231
232         addr = (vm_offset_t)item;
233         kmem_unback(kernel_object, addr, size);
234         error = vm_map_remove(kernel_map, addr, addr + size);
235         KASSERT(error == KERN_SUCCESS,
236             ("%s: vm_map_remove failed: %d", __func__, error));
237 }
238
239 /*
240  * The worst-case upper bound on the number of kernel map entries that may be
241  * created before the zone must be replenished in _vm_map_unlock().
242  */
243 #define KMAPENT_RESERVE         1
244
245 #endif /* !UMD_MD_SMALL_ALLOC */
246
247 /*
248  *      vm_map_startup:
249  *
250  *      Initialize the vm_map module.  Must be called before any other vm_map
251  *      routines.
252  *
253  *      User map and entry structures are allocated from the general purpose
254  *      memory pool.  Kernel maps are statically defined.  Kernel map entries
255  *      require special handling to avoid recursion; see the comments above
256  *      kmapent_alloc() and in vm_map_entry_create().
257  */
258 void
259 vm_map_startup(void)
260 {
261         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
262
263         /*
264          * Disable the use of per-CPU buckets: map entry allocation is
265          * serialized by the kernel map lock.
266          */
267         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
268             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
269             UMA_ZONE_VM | UMA_ZONE_NOBUCKET);
270 #ifndef UMA_MD_SMALL_ALLOC
271         /* Reserve an extra map entry for use when replenishing the reserve. */
272         uma_zone_reserve(kmapentzone, KMAPENT_RESERVE + 1);
273         uma_prealloc(kmapentzone, KMAPENT_RESERVE + 1);
274         uma_zone_set_allocf(kmapentzone, kmapent_alloc);
275         uma_zone_set_freef(kmapentzone, kmapent_free);
276 #endif
277
278         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
279             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
280         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
281 #ifdef INVARIANTS
282             vmspace_zdtor,
283 #else
284             NULL,
285 #endif
286             vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
287 }
288
289 static int
290 vmspace_zinit(void *mem, int size, int flags)
291 {
292         struct vmspace *vm;
293         vm_map_t map;
294
295         vm = (struct vmspace *)mem;
296         map = &vm->vm_map;
297
298         memset(map, 0, sizeof(*map));
299         mtx_init(&map->system_mtx, "vm map (system)", NULL,
300             MTX_DEF | MTX_DUPOK);
301         sx_init(&map->lock, "vm map (user)");
302         PMAP_LOCK_INIT(vmspace_pmap(vm));
303         return (0);
304 }
305
306 #ifdef INVARIANTS
307 static void
308 vmspace_zdtor(void *mem, int size, void *arg)
309 {
310         struct vmspace *vm;
311
312         vm = (struct vmspace *)mem;
313         KASSERT(vm->vm_map.nentries == 0,
314             ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries));
315         KASSERT(vm->vm_map.size == 0,
316             ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size));
317 }
318 #endif  /* INVARIANTS */
319
320 /*
321  * Allocate a vmspace structure, including a vm_map and pmap,
322  * and initialize those structures.  The refcnt is set to 1.
323  */
324 struct vmspace *
325 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
326 {
327         struct vmspace *vm;
328
329         vm = uma_zalloc(vmspace_zone, M_WAITOK);
330         KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
331         if (!pinit(vmspace_pmap(vm))) {
332                 uma_zfree(vmspace_zone, vm);
333                 return (NULL);
334         }
335         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
336         _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
337         refcount_init(&vm->vm_refcnt, 1);
338         vm->vm_shm = NULL;
339         vm->vm_swrss = 0;
340         vm->vm_tsize = 0;
341         vm->vm_dsize = 0;
342         vm->vm_ssize = 0;
343         vm->vm_taddr = 0;
344         vm->vm_daddr = 0;
345         vm->vm_maxsaddr = 0;
346         return (vm);
347 }
348
349 #ifdef RACCT
350 static void
351 vmspace_container_reset(struct proc *p)
352 {
353
354         PROC_LOCK(p);
355         racct_set(p, RACCT_DATA, 0);
356         racct_set(p, RACCT_STACK, 0);
357         racct_set(p, RACCT_RSS, 0);
358         racct_set(p, RACCT_MEMLOCK, 0);
359         racct_set(p, RACCT_VMEM, 0);
360         PROC_UNLOCK(p);
361 }
362 #endif
363
364 static inline void
365 vmspace_dofree(struct vmspace *vm)
366 {
367
368         CTR1(KTR_VM, "vmspace_free: %p", vm);
369
370         /*
371          * Make sure any SysV shm is freed, it might not have been in
372          * exit1().
373          */
374         shmexit(vm);
375
376         /*
377          * Lock the map, to wait out all other references to it.
378          * Delete all of the mappings and pages they hold, then call
379          * the pmap module to reclaim anything left.
380          */
381         (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
382             vm_map_max(&vm->vm_map));
383
384         pmap_release(vmspace_pmap(vm));
385         vm->vm_map.pmap = NULL;
386         uma_zfree(vmspace_zone, vm);
387 }
388
389 void
390 vmspace_free(struct vmspace *vm)
391 {
392
393         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
394             "vmspace_free() called");
395
396         if (refcount_release(&vm->vm_refcnt))
397                 vmspace_dofree(vm);
398 }
399
400 void
401 vmspace_exitfree(struct proc *p)
402 {
403         struct vmspace *vm;
404
405         PROC_VMSPACE_LOCK(p);
406         vm = p->p_vmspace;
407         p->p_vmspace = NULL;
408         PROC_VMSPACE_UNLOCK(p);
409         KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
410         vmspace_free(vm);
411 }
412
413 void
414 vmspace_exit(struct thread *td)
415 {
416         struct vmspace *vm;
417         struct proc *p;
418         bool released;
419
420         p = td->td_proc;
421         vm = p->p_vmspace;
422
423         /*
424          * Prepare to release the vmspace reference.  The thread that releases
425          * the last reference is responsible for tearing down the vmspace.
426          * However, threads not releasing the final reference must switch to the
427          * kernel's vmspace0 before the decrement so that the subsequent pmap
428          * deactivation does not modify a freed vmspace.
429          */
430         refcount_acquire(&vmspace0.vm_refcnt);
431         if (!(released = refcount_release_if_last(&vm->vm_refcnt))) {
432                 if (p->p_vmspace != &vmspace0) {
433                         PROC_VMSPACE_LOCK(p);
434                         p->p_vmspace = &vmspace0;
435                         PROC_VMSPACE_UNLOCK(p);
436                         pmap_activate(td);
437                 }
438                 released = refcount_release(&vm->vm_refcnt);
439         }
440         if (released) {
441                 /*
442                  * pmap_remove_pages() expects the pmap to be active, so switch
443                  * back first if necessary.
444                  */
445                 if (p->p_vmspace != vm) {
446                         PROC_VMSPACE_LOCK(p);
447                         p->p_vmspace = vm;
448                         PROC_VMSPACE_UNLOCK(p);
449                         pmap_activate(td);
450                 }
451                 pmap_remove_pages(vmspace_pmap(vm));
452                 PROC_VMSPACE_LOCK(p);
453                 p->p_vmspace = &vmspace0;
454                 PROC_VMSPACE_UNLOCK(p);
455                 pmap_activate(td);
456                 vmspace_dofree(vm);
457         }
458 #ifdef RACCT
459         if (racct_enable)
460                 vmspace_container_reset(p);
461 #endif
462 }
463
464 /* Acquire reference to vmspace owned by another process. */
465
466 struct vmspace *
467 vmspace_acquire_ref(struct proc *p)
468 {
469         struct vmspace *vm;
470
471         PROC_VMSPACE_LOCK(p);
472         vm = p->p_vmspace;
473         if (vm == NULL || !refcount_acquire_if_not_zero(&vm->vm_refcnt)) {
474                 PROC_VMSPACE_UNLOCK(p);
475                 return (NULL);
476         }
477         if (vm != p->p_vmspace) {
478                 PROC_VMSPACE_UNLOCK(p);
479                 vmspace_free(vm);
480                 return (NULL);
481         }
482         PROC_VMSPACE_UNLOCK(p);
483         return (vm);
484 }
485
486 /*
487  * Switch between vmspaces in an AIO kernel process.
488  *
489  * The new vmspace is either the vmspace of a user process obtained
490  * from an active AIO request or the initial vmspace of the AIO kernel
491  * process (when it is idling).  Because user processes will block to
492  * drain any active AIO requests before proceeding in exit() or
493  * execve(), the reference count for vmspaces from AIO requests can
494  * never be 0.  Similarly, AIO kernel processes hold an extra
495  * reference on their initial vmspace for the life of the process.  As
496  * a result, the 'newvm' vmspace always has a non-zero reference
497  * count.  This permits an additional reference on 'newvm' to be
498  * acquired via a simple atomic increment rather than the loop in
499  * vmspace_acquire_ref() above.
500  */
501 void
502 vmspace_switch_aio(struct vmspace *newvm)
503 {
504         struct vmspace *oldvm;
505
506         /* XXX: Need some way to assert that this is an aio daemon. */
507
508         KASSERT(refcount_load(&newvm->vm_refcnt) > 0,
509             ("vmspace_switch_aio: newvm unreferenced"));
510
511         oldvm = curproc->p_vmspace;
512         if (oldvm == newvm)
513                 return;
514
515         /*
516          * Point to the new address space and refer to it.
517          */
518         curproc->p_vmspace = newvm;
519         refcount_acquire(&newvm->vm_refcnt);
520
521         /* Activate the new mapping. */
522         pmap_activate(curthread);
523
524         vmspace_free(oldvm);
525 }
526
527 void
528 _vm_map_lock(vm_map_t map, const char *file, int line)
529 {
530
531         if (map->system_map)
532                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
533         else
534                 sx_xlock_(&map->lock, file, line);
535         map->timestamp++;
536 }
537
538 void
539 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add)
540 {
541         vm_object_t object;
542         struct vnode *vp;
543         bool vp_held;
544
545         if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0)
546                 return;
547         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
548             ("Submap with execs"));
549         object = entry->object.vm_object;
550         KASSERT(object != NULL, ("No object for text, entry %p", entry));
551         if ((object->flags & OBJ_ANON) != 0)
552                 object = object->handle;
553         else
554                 KASSERT(object->backing_object == NULL,
555                     ("non-anon object %p shadows", object));
556         KASSERT(object != NULL, ("No content object for text, entry %p obj %p",
557             entry, entry->object.vm_object));
558
559         /*
560          * Mostly, we do not lock the backing object.  It is
561          * referenced by the entry we are processing, so it cannot go
562          * away.
563          */
564         vm_pager_getvp(object, &vp, &vp_held);
565         if (vp != NULL) {
566                 if (add) {
567                         VOP_SET_TEXT_CHECKED(vp);
568                 } else {
569                         vn_lock(vp, LK_SHARED | LK_RETRY);
570                         VOP_UNSET_TEXT_CHECKED(vp);
571                         VOP_UNLOCK(vp);
572                 }
573                 if (vp_held)
574                         vdrop(vp);
575         }
576 }
577
578 /*
579  * Use a different name for this vm_map_entry field when it's use
580  * is not consistent with its use as part of an ordered search tree.
581  */
582 #define defer_next right
583
584 static void
585 vm_map_process_deferred(void)
586 {
587         struct thread *td;
588         vm_map_entry_t entry, next;
589         vm_object_t object;
590
591         td = curthread;
592         entry = td->td_map_def_user;
593         td->td_map_def_user = NULL;
594         while (entry != NULL) {
595                 next = entry->defer_next;
596                 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT |
597                     MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT |
598                     MAP_ENTRY_VN_EXEC));
599                 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) {
600                         /*
601                          * Decrement the object's writemappings and
602                          * possibly the vnode's v_writecount.
603                          */
604                         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
605                             ("Submap with writecount"));
606                         object = entry->object.vm_object;
607                         KASSERT(object != NULL, ("No object for writecount"));
608                         vm_pager_release_writecount(object, entry->start,
609                             entry->end);
610                 }
611                 vm_map_entry_set_vnode_text(entry, false);
612                 vm_map_entry_deallocate(entry, FALSE);
613                 entry = next;
614         }
615 }
616
617 #ifdef INVARIANTS
618 static void
619 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
620 {
621
622         if (map->system_map)
623                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
624         else
625                 sx_assert_(&map->lock, SA_XLOCKED, file, line);
626 }
627
628 #define VM_MAP_ASSERT_LOCKED(map) \
629     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
630
631 enum { VMMAP_CHECK_NONE, VMMAP_CHECK_UNLOCK, VMMAP_CHECK_ALL };
632 #ifdef DIAGNOSTIC
633 static int enable_vmmap_check = VMMAP_CHECK_UNLOCK;
634 #else
635 static int enable_vmmap_check = VMMAP_CHECK_NONE;
636 #endif
637 SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN,
638     &enable_vmmap_check, 0, "Enable vm map consistency checking");
639
640 static void _vm_map_assert_consistent(vm_map_t map, int check);
641
642 #define VM_MAP_ASSERT_CONSISTENT(map) \
643     _vm_map_assert_consistent(map, VMMAP_CHECK_ALL)
644 #ifdef DIAGNOSTIC
645 #define VM_MAP_UNLOCK_CONSISTENT(map) do {                              \
646         if (map->nupdates > map->nentries) {                            \
647                 _vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK);     \
648                 map->nupdates = 0;                                      \
649         }                                                               \
650 } while (0)
651 #else
652 #define VM_MAP_UNLOCK_CONSISTENT(map)
653 #endif
654 #else
655 #define VM_MAP_ASSERT_LOCKED(map)
656 #define VM_MAP_ASSERT_CONSISTENT(map)
657 #define VM_MAP_UNLOCK_CONSISTENT(map)
658 #endif /* INVARIANTS */
659
660 void
661 _vm_map_unlock(vm_map_t map, const char *file, int line)
662 {
663
664         VM_MAP_UNLOCK_CONSISTENT(map);
665         if (map->system_map) {
666 #ifndef UMA_MD_SMALL_ALLOC
667                 if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) {
668                         uma_prealloc(kmapentzone, 1);
669                         map->flags &= ~MAP_REPLENISH;
670                 }
671 #endif
672                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
673         } else {
674                 sx_xunlock_(&map->lock, file, line);
675                 vm_map_process_deferred();
676         }
677 }
678
679 void
680 _vm_map_lock_read(vm_map_t map, const char *file, int line)
681 {
682
683         if (map->system_map)
684                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
685         else
686                 sx_slock_(&map->lock, file, line);
687 }
688
689 void
690 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
691 {
692
693         if (map->system_map) {
694                 KASSERT((map->flags & MAP_REPLENISH) == 0,
695                     ("%s: MAP_REPLENISH leaked", __func__));
696                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
697         } else {
698                 sx_sunlock_(&map->lock, file, line);
699                 vm_map_process_deferred();
700         }
701 }
702
703 int
704 _vm_map_trylock(vm_map_t map, const char *file, int line)
705 {
706         int error;
707
708         error = map->system_map ?
709             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
710             !sx_try_xlock_(&map->lock, file, line);
711         if (error == 0)
712                 map->timestamp++;
713         return (error == 0);
714 }
715
716 int
717 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
718 {
719         int error;
720
721         error = map->system_map ?
722             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
723             !sx_try_slock_(&map->lock, file, line);
724         return (error == 0);
725 }
726
727 /*
728  *      _vm_map_lock_upgrade:   [ internal use only ]
729  *
730  *      Tries to upgrade a read (shared) lock on the specified map to a write
731  *      (exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
732  *      non-zero value if the upgrade fails.  If the upgrade fails, the map is
733  *      returned without a read or write lock held.
734  *
735  *      Requires that the map be read locked.
736  */
737 int
738 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
739 {
740         unsigned int last_timestamp;
741
742         if (map->system_map) {
743                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
744         } else {
745                 if (!sx_try_upgrade_(&map->lock, file, line)) {
746                         last_timestamp = map->timestamp;
747                         sx_sunlock_(&map->lock, file, line);
748                         vm_map_process_deferred();
749                         /*
750                          * If the map's timestamp does not change while the
751                          * map is unlocked, then the upgrade succeeds.
752                          */
753                         sx_xlock_(&map->lock, file, line);
754                         if (last_timestamp != map->timestamp) {
755                                 sx_xunlock_(&map->lock, file, line);
756                                 return (1);
757                         }
758                 }
759         }
760         map->timestamp++;
761         return (0);
762 }
763
764 void
765 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
766 {
767
768         if (map->system_map) {
769                 KASSERT((map->flags & MAP_REPLENISH) == 0,
770                     ("%s: MAP_REPLENISH leaked", __func__));
771                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
772         } else {
773                 VM_MAP_UNLOCK_CONSISTENT(map);
774                 sx_downgrade_(&map->lock, file, line);
775         }
776 }
777
778 /*
779  *      vm_map_locked:
780  *
781  *      Returns a non-zero value if the caller holds a write (exclusive) lock
782  *      on the specified map and the value "0" otherwise.
783  */
784 int
785 vm_map_locked(vm_map_t map)
786 {
787
788         if (map->system_map)
789                 return (mtx_owned(&map->system_mtx));
790         else
791                 return (sx_xlocked(&map->lock));
792 }
793
794 /*
795  *      _vm_map_unlock_and_wait:
796  *
797  *      Atomically releases the lock on the specified map and puts the calling
798  *      thread to sleep.  The calling thread will remain asleep until either
799  *      vm_map_wakeup() is performed on the map or the specified timeout is
800  *      exceeded.
801  *
802  *      WARNING!  This function does not perform deferred deallocations of
803  *      objects and map entries.  Therefore, the calling thread is expected to
804  *      reacquire the map lock after reawakening and later perform an ordinary
805  *      unlock operation, such as vm_map_unlock(), before completing its
806  *      operation on the map.
807  */
808 int
809 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
810 {
811
812         VM_MAP_UNLOCK_CONSISTENT(map);
813         mtx_lock(&map_sleep_mtx);
814         if (map->system_map) {
815                 KASSERT((map->flags & MAP_REPLENISH) == 0,
816                     ("%s: MAP_REPLENISH leaked", __func__));
817                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
818         } else {
819                 sx_xunlock_(&map->lock, file, line);
820         }
821         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
822             timo));
823 }
824
825 /*
826  *      vm_map_wakeup:
827  *
828  *      Awaken any threads that have slept on the map using
829  *      vm_map_unlock_and_wait().
830  */
831 void
832 vm_map_wakeup(vm_map_t map)
833 {
834
835         /*
836          * Acquire and release map_sleep_mtx to prevent a wakeup()
837          * from being performed (and lost) between the map unlock
838          * and the msleep() in _vm_map_unlock_and_wait().
839          */
840         mtx_lock(&map_sleep_mtx);
841         mtx_unlock(&map_sleep_mtx);
842         wakeup(&map->root);
843 }
844
845 void
846 vm_map_busy(vm_map_t map)
847 {
848
849         VM_MAP_ASSERT_LOCKED(map);
850         map->busy++;
851 }
852
853 void
854 vm_map_unbusy(vm_map_t map)
855 {
856
857         VM_MAP_ASSERT_LOCKED(map);
858         KASSERT(map->busy, ("vm_map_unbusy: not busy"));
859         if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
860                 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
861                 wakeup(&map->busy);
862         }
863 }
864
865 void 
866 vm_map_wait_busy(vm_map_t map)
867 {
868
869         VM_MAP_ASSERT_LOCKED(map);
870         while (map->busy) {
871                 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
872                 if (map->system_map)
873                         msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
874                 else
875                         sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
876         }
877         map->timestamp++;
878 }
879
880 long
881 vmspace_resident_count(struct vmspace *vmspace)
882 {
883         return pmap_resident_count(vmspace_pmap(vmspace));
884 }
885
886 /*
887  * Initialize an existing vm_map structure
888  * such as that in the vmspace structure.
889  */
890 static void
891 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
892 {
893
894         map->header.eflags = MAP_ENTRY_HEADER;
895         map->needs_wakeup = FALSE;
896         map->system_map = 0;
897         map->pmap = pmap;
898         map->header.end = min;
899         map->header.start = max;
900         map->flags = 0;
901         map->header.left = map->header.right = &map->header;
902         map->root = NULL;
903         map->timestamp = 0;
904         map->busy = 0;
905         map->anon_loc = 0;
906 #ifdef DIAGNOSTIC
907         map->nupdates = 0;
908 #endif
909 }
910
911 void
912 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
913 {
914
915         _vm_map_init(map, pmap, min, max);
916         mtx_init(&map->system_mtx, "vm map (system)", NULL,
917             MTX_DEF | MTX_DUPOK);
918         sx_init(&map->lock, "vm map (user)");
919 }
920
921 /*
922  *      vm_map_entry_dispose:   [ internal use only ]
923  *
924  *      Inverse of vm_map_entry_create.
925  */
926 static void
927 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
928 {
929         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
930 }
931
932 /*
933  *      vm_map_entry_create:    [ internal use only ]
934  *
935  *      Allocates a VM map entry for insertion.
936  *      No entry fields are filled in.
937  */
938 static vm_map_entry_t
939 vm_map_entry_create(vm_map_t map)
940 {
941         vm_map_entry_t new_entry;
942
943 #ifndef UMA_MD_SMALL_ALLOC
944         if (map == kernel_map) {
945                 VM_MAP_ASSERT_LOCKED(map);
946
947                 /*
948                  * A new slab of kernel map entries cannot be allocated at this
949                  * point because the kernel map has not yet been updated to
950                  * reflect the caller's request.  Therefore, we allocate a new
951                  * map entry, dipping into the reserve if necessary, and set a
952                  * flag indicating that the reserve must be replenished before
953                  * the map is unlocked.
954                  */
955                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT | M_NOVM);
956                 if (new_entry == NULL) {
957                         new_entry = uma_zalloc(kmapentzone,
958                             M_NOWAIT | M_NOVM | M_USE_RESERVE);
959                         kernel_map->flags |= MAP_REPLENISH;
960                 }
961         } else
962 #endif
963         if (map->system_map) {
964                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
965         } else {
966                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
967         }
968         KASSERT(new_entry != NULL,
969             ("vm_map_entry_create: kernel resources exhausted"));
970         return (new_entry);
971 }
972
973 /*
974  *      vm_map_entry_set_behavior:
975  *
976  *      Set the expected access behavior, either normal, random, or
977  *      sequential.
978  */
979 static inline void
980 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
981 {
982         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
983             (behavior & MAP_ENTRY_BEHAV_MASK);
984 }
985
986 /*
987  *      vm_map_entry_max_free_{left,right}:
988  *
989  *      Compute the size of the largest free gap between two entries,
990  *      one the root of a tree and the other the ancestor of that root
991  *      that is the least or greatest ancestor found on the search path.
992  */
993 static inline vm_size_t
994 vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor)
995 {
996
997         return (root->left != left_ancestor ?
998             root->left->max_free : root->start - left_ancestor->end);
999 }
1000
1001 static inline vm_size_t
1002 vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor)
1003 {
1004
1005         return (root->right != right_ancestor ?
1006             root->right->max_free : right_ancestor->start - root->end);
1007 }
1008
1009 /*
1010  *      vm_map_entry_{pred,succ}:
1011  *
1012  *      Find the {predecessor, successor} of the entry by taking one step
1013  *      in the appropriate direction and backtracking as much as necessary.
1014  *      vm_map_entry_succ is defined in vm_map.h.
1015  */
1016 static inline vm_map_entry_t
1017 vm_map_entry_pred(vm_map_entry_t entry)
1018 {
1019         vm_map_entry_t prior;
1020
1021         prior = entry->left;
1022         if (prior->right->start < entry->start) {
1023                 do
1024                         prior = prior->right;
1025                 while (prior->right != entry);
1026         }
1027         return (prior);
1028 }
1029
1030 static inline vm_size_t
1031 vm_size_max(vm_size_t a, vm_size_t b)
1032 {
1033
1034         return (a > b ? a : b);
1035 }
1036
1037 #define SPLAY_LEFT_STEP(root, y, llist, rlist, test) do {               \
1038         vm_map_entry_t z;                                               \
1039         vm_size_t max_free;                                             \
1040                                                                         \
1041         /*                                                              \
1042          * Infer root->right->max_free == root->max_free when           \
1043          * y->max_free < root->max_free || root->max_free == 0.         \
1044          * Otherwise, look right to find it.                            \
1045          */                                                             \
1046         y = root->left;                                                 \
1047         max_free = root->max_free;                                      \
1048         KASSERT(max_free == vm_size_max(                                \
1049             vm_map_entry_max_free_left(root, llist),                    \
1050             vm_map_entry_max_free_right(root, rlist)),                  \
1051             ("%s: max_free invariant fails", __func__));                \
1052         if (max_free - 1 < vm_map_entry_max_free_left(root, llist))     \
1053                 max_free = vm_map_entry_max_free_right(root, rlist);    \
1054         if (y != llist && (test)) {                                     \
1055                 /* Rotate right and make y root. */                     \
1056                 z = y->right;                                           \
1057                 if (z != root) {                                        \
1058                         root->left = z;                                 \
1059                         y->right = root;                                \
1060                         if (max_free < y->max_free)                     \
1061                             root->max_free = max_free =                 \
1062                             vm_size_max(max_free, z->max_free);         \
1063                 } else if (max_free < y->max_free)                      \
1064                         root->max_free = max_free =                     \
1065                             vm_size_max(max_free, root->start - y->end);\
1066                 root = y;                                               \
1067                 y = root->left;                                         \
1068         }                                                               \
1069         /* Copy right->max_free.  Put root on rlist. */                 \
1070         root->max_free = max_free;                                      \
1071         KASSERT(max_free == vm_map_entry_max_free_right(root, rlist),   \
1072             ("%s: max_free not copied from right", __func__));          \
1073         root->left = rlist;                                             \
1074         rlist = root;                                                   \
1075         root = y != llist ? y : NULL;                                   \
1076 } while (0)
1077
1078 #define SPLAY_RIGHT_STEP(root, y, llist, rlist, test) do {              \
1079         vm_map_entry_t z;                                               \
1080         vm_size_t max_free;                                             \
1081                                                                         \
1082         /*                                                              \
1083          * Infer root->left->max_free == root->max_free when            \
1084          * y->max_free < root->max_free || root->max_free == 0.         \
1085          * Otherwise, look left to find it.                             \
1086          */                                                             \
1087         y = root->right;                                                \
1088         max_free = root->max_free;                                      \
1089         KASSERT(max_free == vm_size_max(                                \
1090             vm_map_entry_max_free_left(root, llist),                    \
1091             vm_map_entry_max_free_right(root, rlist)),                  \
1092             ("%s: max_free invariant fails", __func__));                \
1093         if (max_free - 1 < vm_map_entry_max_free_right(root, rlist))    \
1094                 max_free = vm_map_entry_max_free_left(root, llist);     \
1095         if (y != rlist && (test)) {                                     \
1096                 /* Rotate left and make y root. */                      \
1097                 z = y->left;                                            \
1098                 if (z != root) {                                        \
1099                         root->right = z;                                \
1100                         y->left = root;                                 \
1101                         if (max_free < y->max_free)                     \
1102                             root->max_free = max_free =                 \
1103                             vm_size_max(max_free, z->max_free);         \
1104                 } else if (max_free < y->max_free)                      \
1105                         root->max_free = max_free =                     \
1106                             vm_size_max(max_free, y->start - root->end);\
1107                 root = y;                                               \
1108                 y = root->right;                                        \
1109         }                                                               \
1110         /* Copy left->max_free.  Put root on llist. */                  \
1111         root->max_free = max_free;                                      \
1112         KASSERT(max_free == vm_map_entry_max_free_left(root, llist),    \
1113             ("%s: max_free not copied from left", __func__));           \
1114         root->right = llist;                                            \
1115         llist = root;                                                   \
1116         root = y != rlist ? y : NULL;                                   \
1117 } while (0)
1118
1119 /*
1120  * Walk down the tree until we find addr or a gap where addr would go, breaking
1121  * off left and right subtrees of nodes less than, or greater than addr.  Treat
1122  * subtrees with root->max_free < length as empty trees.  llist and rlist are
1123  * the two sides in reverse order (bottom-up), with llist linked by the right
1124  * pointer and rlist linked by the left pointer in the vm_map_entry, and both
1125  * lists terminated by &map->header.  This function, and the subsequent call to
1126  * vm_map_splay_merge_{left,right,pred,succ}, rely on the start and end address
1127  * values in &map->header.
1128  */
1129 static __always_inline vm_map_entry_t
1130 vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length,
1131     vm_map_entry_t *llist, vm_map_entry_t *rlist)
1132 {
1133         vm_map_entry_t left, right, root, y;
1134
1135         left = right = &map->header;
1136         root = map->root;
1137         while (root != NULL && root->max_free >= length) {
1138                 KASSERT(left->end <= root->start &&
1139                     root->end <= right->start,
1140                     ("%s: root not within tree bounds", __func__));
1141                 if (addr < root->start) {
1142                         SPLAY_LEFT_STEP(root, y, left, right,
1143                             y->max_free >= length && addr < y->start);
1144                 } else if (addr >= root->end) {
1145                         SPLAY_RIGHT_STEP(root, y, left, right,
1146                             y->max_free >= length && addr >= y->end);
1147                 } else
1148                         break;
1149         }
1150         *llist = left;
1151         *rlist = right;
1152         return (root);
1153 }
1154
1155 static __always_inline void
1156 vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *rlist)
1157 {
1158         vm_map_entry_t hi, right, y;
1159
1160         right = *rlist;
1161         hi = root->right == right ? NULL : root->right;
1162         if (hi == NULL)
1163                 return;
1164         do
1165                 SPLAY_LEFT_STEP(hi, y, root, right, true);
1166         while (hi != NULL);
1167         *rlist = right;
1168 }
1169
1170 static __always_inline void
1171 vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *llist)
1172 {
1173         vm_map_entry_t left, lo, y;
1174
1175         left = *llist;
1176         lo = root->left == left ? NULL : root->left;
1177         if (lo == NULL)
1178                 return;
1179         do
1180                 SPLAY_RIGHT_STEP(lo, y, left, root, true);
1181         while (lo != NULL);
1182         *llist = left;
1183 }
1184
1185 static inline void
1186 vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b)
1187 {
1188         vm_map_entry_t tmp;
1189
1190         tmp = *b;
1191         *b = *a;
1192         *a = tmp;
1193 }
1194
1195 /*
1196  * Walk back up the two spines, flip the pointers and set max_free.  The
1197  * subtrees of the root go at the bottom of llist and rlist.
1198  */
1199 static vm_size_t
1200 vm_map_splay_merge_left_walk(vm_map_entry_t header, vm_map_entry_t root,
1201     vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist)
1202 {
1203         do {
1204                 /*
1205                  * The max_free values of the children of llist are in
1206                  * llist->max_free and max_free.  Update with the
1207                  * max value.
1208                  */
1209                 llist->max_free = max_free =
1210                     vm_size_max(llist->max_free, max_free);
1211                 vm_map_entry_swap(&llist->right, &tail);
1212                 vm_map_entry_swap(&tail, &llist);
1213         } while (llist != header);
1214         root->left = tail;
1215         return (max_free);
1216 }
1217
1218 /*
1219  * When llist is known to be the predecessor of root.
1220  */
1221 static inline vm_size_t
1222 vm_map_splay_merge_pred(vm_map_entry_t header, vm_map_entry_t root,
1223     vm_map_entry_t llist)
1224 {
1225         vm_size_t max_free;
1226
1227         max_free = root->start - llist->end;
1228         if (llist != header) {
1229                 max_free = vm_map_splay_merge_left_walk(header, root,
1230                     root, max_free, llist);
1231         } else {
1232                 root->left = header;
1233                 header->right = root;
1234         }
1235         return (max_free);
1236 }
1237
1238 /*
1239  * When llist may or may not be the predecessor of root.
1240  */
1241 static inline vm_size_t
1242 vm_map_splay_merge_left(vm_map_entry_t header, vm_map_entry_t root,
1243     vm_map_entry_t llist)
1244 {
1245         vm_size_t max_free;
1246
1247         max_free = vm_map_entry_max_free_left(root, llist);
1248         if (llist != header) {
1249                 max_free = vm_map_splay_merge_left_walk(header, root,
1250                     root->left == llist ? root : root->left,
1251                     max_free, llist);
1252         }
1253         return (max_free);
1254 }
1255
1256 static vm_size_t
1257 vm_map_splay_merge_right_walk(vm_map_entry_t header, vm_map_entry_t root,
1258     vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist)
1259 {
1260         do {
1261                 /*
1262                  * The max_free values of the children of rlist are in
1263                  * rlist->max_free and max_free.  Update with the
1264                  * max value.
1265                  */
1266                 rlist->max_free = max_free =
1267                     vm_size_max(rlist->max_free, max_free);
1268                 vm_map_entry_swap(&rlist->left, &tail);
1269                 vm_map_entry_swap(&tail, &rlist);
1270         } while (rlist != header);
1271         root->right = tail;
1272         return (max_free);
1273 }
1274
1275 /*
1276  * When rlist is known to be the succecessor of root.
1277  */
1278 static inline vm_size_t
1279 vm_map_splay_merge_succ(vm_map_entry_t header, vm_map_entry_t root,
1280     vm_map_entry_t rlist)
1281 {
1282         vm_size_t max_free;
1283
1284         max_free = rlist->start - root->end;
1285         if (rlist != header) {
1286                 max_free = vm_map_splay_merge_right_walk(header, root,
1287                     root, max_free, rlist);
1288         } else {
1289                 root->right = header;
1290                 header->left = root;
1291         }
1292         return (max_free);
1293 }
1294
1295 /*
1296  * When rlist may or may not be the succecessor of root.
1297  */
1298 static inline vm_size_t
1299 vm_map_splay_merge_right(vm_map_entry_t header, vm_map_entry_t root,
1300     vm_map_entry_t rlist)
1301 {
1302         vm_size_t max_free;
1303
1304         max_free = vm_map_entry_max_free_right(root, rlist);
1305         if (rlist != header) {
1306                 max_free = vm_map_splay_merge_right_walk(header, root,
1307                     root->right == rlist ? root : root->right,
1308                     max_free, rlist);
1309         }
1310         return (max_free);
1311 }
1312
1313 /*
1314  *      vm_map_splay:
1315  *
1316  *      The Sleator and Tarjan top-down splay algorithm with the
1317  *      following variation.  Max_free must be computed bottom-up, so
1318  *      on the downward pass, maintain the left and right spines in
1319  *      reverse order.  Then, make a second pass up each side to fix
1320  *      the pointers and compute max_free.  The time bound is O(log n)
1321  *      amortized.
1322  *
1323  *      The tree is threaded, which means that there are no null pointers.
1324  *      When a node has no left child, its left pointer points to its
1325  *      predecessor, which the last ancestor on the search path from the root
1326  *      where the search branched right.  Likewise, when a node has no right
1327  *      child, its right pointer points to its successor.  The map header node
1328  *      is the predecessor of the first map entry, and the successor of the
1329  *      last.
1330  *
1331  *      The new root is the vm_map_entry containing "addr", or else an
1332  *      adjacent entry (lower if possible) if addr is not in the tree.
1333  *
1334  *      The map must be locked, and leaves it so.
1335  *
1336  *      Returns: the new root.
1337  */
1338 static vm_map_entry_t
1339 vm_map_splay(vm_map_t map, vm_offset_t addr)
1340 {
1341         vm_map_entry_t header, llist, rlist, root;
1342         vm_size_t max_free_left, max_free_right;
1343
1344         header = &map->header;
1345         root = vm_map_splay_split(map, addr, 0, &llist, &rlist);
1346         if (root != NULL) {
1347                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1348                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1349         } else if (llist != header) {
1350                 /*
1351                  * Recover the greatest node in the left
1352                  * subtree and make it the root.
1353                  */
1354                 root = llist;
1355                 llist = root->right;
1356                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1357                 max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1358         } else if (rlist != header) {
1359                 /*
1360                  * Recover the least node in the right
1361                  * subtree and make it the root.
1362                  */
1363                 root = rlist;
1364                 rlist = root->left;
1365                 max_free_left = vm_map_splay_merge_pred(header, root, llist);
1366                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1367         } else {
1368                 /* There is no root. */
1369                 return (NULL);
1370         }
1371         root->max_free = vm_size_max(max_free_left, max_free_right);
1372         map->root = root;
1373         VM_MAP_ASSERT_CONSISTENT(map);
1374         return (root);
1375 }
1376
1377 /*
1378  *      vm_map_entry_{un,}link:
1379  *
1380  *      Insert/remove entries from maps.  On linking, if new entry clips
1381  *      existing entry, trim existing entry to avoid overlap, and manage
1382  *      offsets.  On unlinking, merge disappearing entry with neighbor, if
1383  *      called for, and manage offsets.  Callers should not modify fields in
1384  *      entries already mapped.
1385  */
1386 static void
1387 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
1388 {
1389         vm_map_entry_t header, llist, rlist, root;
1390         vm_size_t max_free_left, max_free_right;
1391
1392         CTR3(KTR_VM,
1393             "vm_map_entry_link: map %p, nentries %d, entry %p", map,
1394             map->nentries, entry);
1395         VM_MAP_ASSERT_LOCKED(map);
1396         map->nentries++;
1397         header = &map->header;
1398         root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1399         if (root == NULL) {
1400                 /*
1401                  * The new entry does not overlap any existing entry in the
1402                  * map, so it becomes the new root of the map tree.
1403                  */
1404                 max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1405                 max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1406         } else if (entry->start == root->start) {
1407                 /*
1408                  * The new entry is a clone of root, with only the end field
1409                  * changed.  The root entry will be shrunk to abut the new
1410                  * entry, and will be the right child of the new root entry in
1411                  * the modified map.
1412                  */
1413                 KASSERT(entry->end < root->end,
1414                     ("%s: clip_start not within entry", __func__));
1415                 vm_map_splay_findprev(root, &llist);
1416                 if ((root->eflags & (MAP_ENTRY_STACK_GAP_DN |
1417                     MAP_ENTRY_STACK_GAP_UP)) == 0)
1418                         root->offset += entry->end - root->start;
1419                 root->start = entry->end;
1420                 max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1421                 max_free_right = root->max_free = vm_size_max(
1422                     vm_map_splay_merge_pred(entry, root, entry),
1423                     vm_map_splay_merge_right(header, root, rlist));
1424         } else {
1425                 /*
1426                  * The new entry is a clone of root, with only the start field
1427                  * changed.  The root entry will be shrunk to abut the new
1428                  * entry, and will be the left child of the new root entry in
1429                  * the modified map.
1430                  */
1431                 KASSERT(entry->end == root->end,
1432                     ("%s: clip_start not within entry", __func__));
1433                 vm_map_splay_findnext(root, &rlist);
1434                 if ((entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
1435                     MAP_ENTRY_STACK_GAP_UP)) == 0)
1436                         entry->offset += entry->start - root->start;
1437                 root->end = entry->start;
1438                 max_free_left = root->max_free = vm_size_max(
1439                     vm_map_splay_merge_left(header, root, llist),
1440                     vm_map_splay_merge_succ(entry, root, entry));
1441                 max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1442         }
1443         entry->max_free = vm_size_max(max_free_left, max_free_right);
1444         map->root = entry;
1445         VM_MAP_ASSERT_CONSISTENT(map);
1446 }
1447
1448 enum unlink_merge_type {
1449         UNLINK_MERGE_NONE,
1450         UNLINK_MERGE_NEXT
1451 };
1452
1453 static void
1454 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
1455     enum unlink_merge_type op)
1456 {
1457         vm_map_entry_t header, llist, rlist, root;
1458         vm_size_t max_free_left, max_free_right;
1459
1460         VM_MAP_ASSERT_LOCKED(map);
1461         header = &map->header;
1462         root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1463         KASSERT(root != NULL,
1464             ("vm_map_entry_unlink: unlink object not mapped"));
1465
1466         vm_map_splay_findprev(root, &llist);
1467         vm_map_splay_findnext(root, &rlist);
1468         if (op == UNLINK_MERGE_NEXT) {
1469                 rlist->start = root->start;
1470                 MPASS((rlist->eflags & (MAP_ENTRY_STACK_GAP_DN |
1471                     MAP_ENTRY_STACK_GAP_UP)) == 0);
1472                 rlist->offset = root->offset;
1473         }
1474         if (llist != header) {
1475                 root = llist;
1476                 llist = root->right;
1477                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1478                 max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1479         } else if (rlist != header) {
1480                 root = rlist;
1481                 rlist = root->left;
1482                 max_free_left = vm_map_splay_merge_pred(header, root, llist);
1483                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1484         } else {
1485                 header->left = header->right = header;
1486                 root = NULL;
1487         }
1488         if (root != NULL)
1489                 root->max_free = vm_size_max(max_free_left, max_free_right);
1490         map->root = root;
1491         VM_MAP_ASSERT_CONSISTENT(map);
1492         map->nentries--;
1493         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1494             map->nentries, entry);
1495 }
1496
1497 /*
1498  *      vm_map_entry_resize:
1499  *
1500  *      Resize a vm_map_entry, recompute the amount of free space that
1501  *      follows it and propagate that value up the tree.
1502  *
1503  *      The map must be locked, and leaves it so.
1504  */
1505 static void
1506 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount)
1507 {
1508         vm_map_entry_t header, llist, rlist, root;
1509
1510         VM_MAP_ASSERT_LOCKED(map);
1511         header = &map->header;
1512         root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1513         KASSERT(root != NULL, ("%s: resize object not mapped", __func__));
1514         vm_map_splay_findnext(root, &rlist);
1515         entry->end += grow_amount;
1516         root->max_free = vm_size_max(
1517             vm_map_splay_merge_left(header, root, llist),
1518             vm_map_splay_merge_succ(header, root, rlist));
1519         map->root = root;
1520         VM_MAP_ASSERT_CONSISTENT(map);
1521         CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p",
1522             __func__, map, map->nentries, entry);
1523 }
1524
1525 /*
1526  *      vm_map_lookup_entry:    [ internal use only ]
1527  *
1528  *      Finds the map entry containing (or
1529  *      immediately preceding) the specified address
1530  *      in the given map; the entry is returned
1531  *      in the "entry" parameter.  The boolean
1532  *      result indicates whether the address is
1533  *      actually contained in the map.
1534  */
1535 boolean_t
1536 vm_map_lookup_entry(
1537         vm_map_t map,
1538         vm_offset_t address,
1539         vm_map_entry_t *entry)  /* OUT */
1540 {
1541         vm_map_entry_t cur, header, lbound, ubound;
1542         boolean_t locked;
1543
1544         /*
1545          * If the map is empty, then the map entry immediately preceding
1546          * "address" is the map's header.
1547          */
1548         header = &map->header;
1549         cur = map->root;
1550         if (cur == NULL) {
1551                 *entry = header;
1552                 return (FALSE);
1553         }
1554         if (address >= cur->start && cur->end > address) {
1555                 *entry = cur;
1556                 return (TRUE);
1557         }
1558         if ((locked = vm_map_locked(map)) ||
1559             sx_try_upgrade(&map->lock)) {
1560                 /*
1561                  * Splay requires a write lock on the map.  However, it only
1562                  * restructures the binary search tree; it does not otherwise
1563                  * change the map.  Thus, the map's timestamp need not change
1564                  * on a temporary upgrade.
1565                  */
1566                 cur = vm_map_splay(map, address);
1567                 if (!locked) {
1568                         VM_MAP_UNLOCK_CONSISTENT(map);
1569                         sx_downgrade(&map->lock);
1570                 }
1571
1572                 /*
1573                  * If "address" is contained within a map entry, the new root
1574                  * is that map entry.  Otherwise, the new root is a map entry
1575                  * immediately before or after "address".
1576                  */
1577                 if (address < cur->start) {
1578                         *entry = header;
1579                         return (FALSE);
1580                 }
1581                 *entry = cur;
1582                 return (address < cur->end);
1583         }
1584         /*
1585          * Since the map is only locked for read access, perform a
1586          * standard binary search tree lookup for "address".
1587          */
1588         lbound = ubound = header;
1589         for (;;) {
1590                 if (address < cur->start) {
1591                         ubound = cur;
1592                         cur = cur->left;
1593                         if (cur == lbound)
1594                                 break;
1595                 } else if (cur->end <= address) {
1596                         lbound = cur;
1597                         cur = cur->right;
1598                         if (cur == ubound)
1599                                 break;
1600                 } else {
1601                         *entry = cur;
1602                         return (TRUE);
1603                 }
1604         }
1605         *entry = lbound;
1606         return (FALSE);
1607 }
1608
1609 /*
1610  * vm_map_insert1() is identical to vm_map_insert() except that it
1611  * returns the newly inserted map entry in '*res'.  In case the new
1612  * entry is coalesced with a neighbor or an existing entry was
1613  * resized, that entry is returned.  In any case, the returned entry
1614  * covers the specified address range.
1615  */
1616 static int
1617 vm_map_insert1(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1618     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow,
1619     vm_map_entry_t *res)
1620 {
1621         vm_map_entry_t new_entry, next_entry, prev_entry;
1622         struct ucred *cred;
1623         vm_eflags_t protoeflags;
1624         vm_inherit_t inheritance;
1625         u_long bdry;
1626         u_int bidx;
1627
1628         VM_MAP_ASSERT_LOCKED(map);
1629         KASSERT(object != kernel_object ||
1630             (cow & MAP_COPY_ON_WRITE) == 0,
1631             ("vm_map_insert: kernel object and COW"));
1632         KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0 ||
1633             (cow & MAP_SPLIT_BOUNDARY_MASK) != 0,
1634             ("vm_map_insert: paradoxical MAP_NOFAULT request, obj %p cow %#x",
1635             object, cow));
1636         KASSERT((prot & ~max) == 0,
1637             ("prot %#x is not subset of max_prot %#x", prot, max));
1638
1639         /*
1640          * Check that the start and end points are not bogus.
1641          */
1642         if (start == end || !vm_map_range_valid(map, start, end))
1643                 return (KERN_INVALID_ADDRESS);
1644
1645         if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE |
1646             VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE))
1647                 return (KERN_PROTECTION_FAILURE);
1648
1649         /*
1650          * Find the entry prior to the proposed starting address; if it's part
1651          * of an existing entry, this range is bogus.
1652          */
1653         if (vm_map_lookup_entry(map, start, &prev_entry))
1654                 return (KERN_NO_SPACE);
1655
1656         /*
1657          * Assert that the next entry doesn't overlap the end point.
1658          */
1659         next_entry = vm_map_entry_succ(prev_entry);
1660         if (next_entry->start < end)
1661                 return (KERN_NO_SPACE);
1662
1663         if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
1664             max != VM_PROT_NONE))
1665                 return (KERN_INVALID_ARGUMENT);
1666
1667         protoeflags = 0;
1668         if (cow & MAP_COPY_ON_WRITE)
1669                 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
1670         if (cow & MAP_NOFAULT)
1671                 protoeflags |= MAP_ENTRY_NOFAULT;
1672         if (cow & MAP_DISABLE_SYNCER)
1673                 protoeflags |= MAP_ENTRY_NOSYNC;
1674         if (cow & MAP_DISABLE_COREDUMP)
1675                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1676         if (cow & MAP_STACK_GROWS_DOWN)
1677                 protoeflags |= MAP_ENTRY_GROWS_DOWN;
1678         if (cow & MAP_STACK_GROWS_UP)
1679                 protoeflags |= MAP_ENTRY_GROWS_UP;
1680         if (cow & MAP_WRITECOUNT)
1681                 protoeflags |= MAP_ENTRY_WRITECNT;
1682         if (cow & MAP_VN_EXEC)
1683                 protoeflags |= MAP_ENTRY_VN_EXEC;
1684         if ((cow & MAP_CREATE_GUARD) != 0)
1685                 protoeflags |= MAP_ENTRY_GUARD;
1686         if ((cow & MAP_CREATE_STACK_GAP_DN) != 0)
1687                 protoeflags |= MAP_ENTRY_STACK_GAP_DN;
1688         if ((cow & MAP_CREATE_STACK_GAP_UP) != 0)
1689                 protoeflags |= MAP_ENTRY_STACK_GAP_UP;
1690         if (cow & MAP_INHERIT_SHARE)
1691                 inheritance = VM_INHERIT_SHARE;
1692         else
1693                 inheritance = VM_INHERIT_DEFAULT;
1694         if ((cow & MAP_SPLIT_BOUNDARY_MASK) != 0) {
1695                 /* This magically ignores index 0, for usual page size. */
1696                 bidx = (cow & MAP_SPLIT_BOUNDARY_MASK) >>
1697                     MAP_SPLIT_BOUNDARY_SHIFT;
1698                 if (bidx >= MAXPAGESIZES)
1699                         return (KERN_INVALID_ARGUMENT);
1700                 bdry = pagesizes[bidx] - 1;
1701                 if ((start & bdry) != 0 || (end & bdry) != 0)
1702                         return (KERN_INVALID_ARGUMENT);
1703                 protoeflags |= bidx << MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
1704         }
1705
1706         cred = NULL;
1707         if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0)
1708                 goto charged;
1709         if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1710             ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1711                 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1712                         return (KERN_RESOURCE_SHORTAGE);
1713                 KASSERT(object == NULL ||
1714                     (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
1715                     object->cred == NULL,
1716                     ("overcommit: vm_map_insert o %p", object));
1717                 cred = curthread->td_ucred;
1718         }
1719
1720 charged:
1721         /* Expand the kernel pmap, if necessary. */
1722         if (map == kernel_map && end > kernel_vm_end)
1723                 pmap_growkernel(end);
1724         if (object != NULL) {
1725                 /*
1726                  * OBJ_ONEMAPPING must be cleared unless this mapping
1727                  * is trivially proven to be the only mapping for any
1728                  * of the object's pages.  (Object granularity
1729                  * reference counting is insufficient to recognize
1730                  * aliases with precision.)
1731                  */
1732                 if ((object->flags & OBJ_ANON) != 0) {
1733                         VM_OBJECT_WLOCK(object);
1734                         if (object->ref_count > 1 || object->shadow_count != 0)
1735                                 vm_object_clear_flag(object, OBJ_ONEMAPPING);
1736                         VM_OBJECT_WUNLOCK(object);
1737                 }
1738         } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
1739             protoeflags &&
1740             (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP |
1741             MAP_VN_EXEC)) == 0 &&
1742             prev_entry->end == start && (prev_entry->cred == cred ||
1743             (prev_entry->object.vm_object != NULL &&
1744             prev_entry->object.vm_object->cred == cred)) &&
1745             vm_object_coalesce(prev_entry->object.vm_object,
1746             prev_entry->offset,
1747             (vm_size_t)(prev_entry->end - prev_entry->start),
1748             (vm_size_t)(end - prev_entry->end), cred != NULL &&
1749             (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
1750                 /*
1751                  * We were able to extend the object.  Determine if we
1752                  * can extend the previous map entry to include the
1753                  * new range as well.
1754                  */
1755                 if (prev_entry->inheritance == inheritance &&
1756                     prev_entry->protection == prot &&
1757                     prev_entry->max_protection == max &&
1758                     prev_entry->wired_count == 0) {
1759                         KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
1760                             0, ("prev_entry %p has incoherent wiring",
1761                             prev_entry));
1762                         if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
1763                                 map->size += end - prev_entry->end;
1764                         vm_map_entry_resize(map, prev_entry,
1765                             end - prev_entry->end);
1766                         *res = vm_map_try_merge_entries(map, prev_entry,
1767                             next_entry);
1768                         return (KERN_SUCCESS);
1769                 }
1770
1771                 /*
1772                  * If we can extend the object but cannot extend the
1773                  * map entry, we have to create a new map entry.  We
1774                  * must bump the ref count on the extended object to
1775                  * account for it.  object may be NULL.
1776                  */
1777                 object = prev_entry->object.vm_object;
1778                 offset = prev_entry->offset +
1779                     (prev_entry->end - prev_entry->start);
1780                 vm_object_reference(object);
1781                 if (cred != NULL && object != NULL && object->cred != NULL &&
1782                     !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1783                         /* Object already accounts for this uid. */
1784                         cred = NULL;
1785                 }
1786         }
1787         if (cred != NULL)
1788                 crhold(cred);
1789
1790         /*
1791          * Create a new entry
1792          */
1793         new_entry = vm_map_entry_create(map);
1794         new_entry->start = start;
1795         new_entry->end = end;
1796         new_entry->cred = NULL;
1797
1798         new_entry->eflags = protoeflags;
1799         new_entry->object.vm_object = object;
1800         new_entry->offset = offset;
1801
1802         new_entry->inheritance = inheritance;
1803         new_entry->protection = prot;
1804         new_entry->max_protection = max;
1805         new_entry->wired_count = 0;
1806         new_entry->wiring_thread = NULL;
1807         new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1808         new_entry->next_read = start;
1809
1810         KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1811             ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
1812         new_entry->cred = cred;
1813
1814         /*
1815          * Insert the new entry into the list
1816          */
1817         vm_map_entry_link(map, new_entry);
1818         if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
1819                 map->size += new_entry->end - new_entry->start;
1820
1821         /*
1822          * Try to coalesce the new entry with both the previous and next
1823          * entries in the list.  Previously, we only attempted to coalesce
1824          * with the previous entry when object is NULL.  Here, we handle the
1825          * other cases, which are less common.
1826          */
1827         vm_map_try_merge_entries(map, prev_entry, new_entry);
1828         *res = vm_map_try_merge_entries(map, new_entry, next_entry);
1829
1830         if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
1831                 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
1832                     end - start, cow & MAP_PREFAULT_PARTIAL);
1833         }
1834
1835         return (KERN_SUCCESS);
1836 }
1837
1838 /*
1839  *      vm_map_insert:
1840  *
1841  *      Inserts the given VM object into the target map at the
1842  *      specified address range.
1843  *
1844  *      Requires that the map be locked, and leaves it so.
1845  *
1846  *      If object is non-NULL, ref count must be bumped by caller
1847  *      prior to making call to account for the new entry.
1848  */
1849 int
1850 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1851     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
1852 {
1853         vm_map_entry_t res;
1854
1855         return (vm_map_insert1(map, object, offset, start, end, prot, max,
1856             cow, &res));
1857 }
1858
1859 /*
1860  *      vm_map_findspace:
1861  *
1862  *      Find the first fit (lowest VM address) for "length" free bytes
1863  *      beginning at address >= start in the given map.
1864  *
1865  *      In a vm_map_entry, "max_free" is the maximum amount of
1866  *      contiguous free space between an entry in its subtree and a
1867  *      neighbor of that entry.  This allows finding a free region in
1868  *      one path down the tree, so O(log n) amortized with splay
1869  *      trees.
1870  *
1871  *      The map must be locked, and leaves it so.
1872  *
1873  *      Returns: starting address if sufficient space,
1874  *               vm_map_max(map)-length+1 if insufficient space.
1875  */
1876 vm_offset_t
1877 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
1878 {
1879         vm_map_entry_t header, llist, rlist, root, y;
1880         vm_size_t left_length, max_free_left, max_free_right;
1881         vm_offset_t gap_end;
1882
1883         VM_MAP_ASSERT_LOCKED(map);
1884
1885         /*
1886          * Request must fit within min/max VM address and must avoid
1887          * address wrap.
1888          */
1889         start = MAX(start, vm_map_min(map));
1890         if (start >= vm_map_max(map) || length > vm_map_max(map) - start)
1891                 return (vm_map_max(map) - length + 1);
1892
1893         /* Empty tree means wide open address space. */
1894         if (map->root == NULL)
1895                 return (start);
1896
1897         /*
1898          * After splay_split, if start is within an entry, push it to the start
1899          * of the following gap.  If rlist is at the end of the gap containing
1900          * start, save the end of that gap in gap_end to see if the gap is big
1901          * enough; otherwise set gap_end to start skip gap-checking and move
1902          * directly to a search of the right subtree.
1903          */
1904         header = &map->header;
1905         root = vm_map_splay_split(map, start, length, &llist, &rlist);
1906         gap_end = rlist->start;
1907         if (root != NULL) {
1908                 start = root->end;
1909                 if (root->right != rlist)
1910                         gap_end = start;
1911                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1912                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1913         } else if (rlist != header) {
1914                 root = rlist;
1915                 rlist = root->left;
1916                 max_free_left = vm_map_splay_merge_pred(header, root, llist);
1917                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1918         } else {
1919                 root = llist;
1920                 llist = root->right;
1921                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1922                 max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1923         }
1924         root->max_free = vm_size_max(max_free_left, max_free_right);
1925         map->root = root;
1926         VM_MAP_ASSERT_CONSISTENT(map);
1927         if (length <= gap_end - start)
1928                 return (start);
1929
1930         /* With max_free, can immediately tell if no solution. */
1931         if (root->right == header || length > root->right->max_free)
1932                 return (vm_map_max(map) - length + 1);
1933
1934         /*
1935          * Splay for the least large-enough gap in the right subtree.
1936          */
1937         llist = rlist = header;
1938         for (left_length = 0;;
1939             left_length = vm_map_entry_max_free_left(root, llist)) {
1940                 if (length <= left_length)
1941                         SPLAY_LEFT_STEP(root, y, llist, rlist,
1942                             length <= vm_map_entry_max_free_left(y, llist));
1943                 else
1944                         SPLAY_RIGHT_STEP(root, y, llist, rlist,
1945                             length > vm_map_entry_max_free_left(y, root));
1946                 if (root == NULL)
1947                         break;
1948         }
1949         root = llist;
1950         llist = root->right;
1951         max_free_left = vm_map_splay_merge_left(header, root, llist);
1952         if (rlist == header) {
1953                 root->max_free = vm_size_max(max_free_left,
1954                     vm_map_splay_merge_succ(header, root, rlist));
1955         } else {
1956                 y = rlist;
1957                 rlist = y->left;
1958                 y->max_free = vm_size_max(
1959                     vm_map_splay_merge_pred(root, y, root),
1960                     vm_map_splay_merge_right(header, y, rlist));
1961                 root->max_free = vm_size_max(max_free_left, y->max_free);
1962         }
1963         map->root = root;
1964         VM_MAP_ASSERT_CONSISTENT(map);
1965         return (root->end);
1966 }
1967
1968 int
1969 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1970     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1971     vm_prot_t max, int cow)
1972 {
1973         vm_offset_t end;
1974         int result;
1975
1976         end = start + length;
1977         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1978             object == NULL,
1979             ("vm_map_fixed: non-NULL backing object for stack"));
1980         vm_map_lock(map);
1981         VM_MAP_RANGE_CHECK(map, start, end);
1982         if ((cow & MAP_CHECK_EXCL) == 0) {
1983                 result = vm_map_delete(map, start, end);
1984                 if (result != KERN_SUCCESS)
1985                         goto out;
1986         }
1987         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1988                 result = vm_map_stack_locked(map, start, length, sgrowsiz,
1989                     prot, max, cow);
1990         } else {
1991                 result = vm_map_insert(map, object, offset, start, end,
1992                     prot, max, cow);
1993         }
1994 out:
1995         vm_map_unlock(map);
1996         return (result);
1997 }
1998
1999 static const int aslr_pages_rnd_64[2] = {0x1000, 0x10};
2000 static const int aslr_pages_rnd_32[2] = {0x100, 0x4};
2001
2002 static int cluster_anon = 1;
2003 SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW,
2004     &cluster_anon, 0,
2005     "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always");
2006
2007 static bool
2008 clustering_anon_allowed(vm_offset_t addr, int cow)
2009 {
2010
2011         switch (cluster_anon) {
2012         case 0:
2013                 return (false);
2014         case 1:
2015                 return (addr == 0 || (cow & MAP_NO_HINT) != 0);
2016         case 2:
2017         default:
2018                 return (true);
2019         }
2020 }
2021
2022 static long aslr_restarts;
2023 SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD,
2024     &aslr_restarts, 0,
2025     "Number of aslr failures");
2026
2027 /*
2028  * Searches for the specified amount of free space in the given map with the
2029  * specified alignment.  Performs an address-ordered, first-fit search from
2030  * the given address "*addr", with an optional upper bound "max_addr".  If the
2031  * parameter "alignment" is zero, then the alignment is computed from the
2032  * given (object, offset) pair so as to enable the greatest possible use of
2033  * superpage mappings.  Returns KERN_SUCCESS and the address of the free space
2034  * in "*addr" if successful.  Otherwise, returns KERN_NO_SPACE.
2035  *
2036  * The map must be locked.  Initially, there must be at least "length" bytes
2037  * of free space at the given address.
2038  */
2039 static int
2040 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2041     vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr,
2042     vm_offset_t alignment)
2043 {
2044         vm_offset_t aligned_addr, free_addr;
2045
2046         VM_MAP_ASSERT_LOCKED(map);
2047         free_addr = *addr;
2048         KASSERT(free_addr == vm_map_findspace(map, free_addr, length),
2049             ("caller failed to provide space %#jx at address %p",
2050              (uintmax_t)length, (void *)free_addr));
2051         for (;;) {
2052                 /*
2053                  * At the start of every iteration, the free space at address
2054                  * "*addr" is at least "length" bytes.
2055                  */
2056                 if (alignment == 0)
2057                         pmap_align_superpage(object, offset, addr, length);
2058                 else
2059                         *addr = roundup2(*addr, alignment);
2060                 aligned_addr = *addr;
2061                 if (aligned_addr == free_addr) {
2062                         /*
2063                          * Alignment did not change "*addr", so "*addr" must
2064                          * still provide sufficient free space.
2065                          */
2066                         return (KERN_SUCCESS);
2067                 }
2068
2069                 /*
2070                  * Test for address wrap on "*addr".  A wrapped "*addr" could
2071                  * be a valid address, in which case vm_map_findspace() cannot
2072                  * be relied upon to fail.
2073                  */
2074                 if (aligned_addr < free_addr)
2075                         return (KERN_NO_SPACE);
2076                 *addr = vm_map_findspace(map, aligned_addr, length);
2077                 if (*addr + length > vm_map_max(map) ||
2078                     (max_addr != 0 && *addr + length > max_addr))
2079                         return (KERN_NO_SPACE);
2080                 free_addr = *addr;
2081                 if (free_addr == aligned_addr) {
2082                         /*
2083                          * If a successful call to vm_map_findspace() did not
2084                          * change "*addr", then "*addr" must still be aligned
2085                          * and provide sufficient free space.
2086                          */
2087                         return (KERN_SUCCESS);
2088                 }
2089         }
2090 }
2091
2092 int
2093 vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length,
2094     vm_offset_t max_addr, vm_offset_t alignment)
2095 {
2096         /* XXXKIB ASLR eh ? */
2097         *addr = vm_map_findspace(map, *addr, length);
2098         if (*addr + length > vm_map_max(map) ||
2099             (max_addr != 0 && *addr + length > max_addr))
2100                 return (KERN_NO_SPACE);
2101         return (vm_map_alignspace(map, NULL, 0, addr, length, max_addr,
2102             alignment));
2103 }
2104
2105 /*
2106  *      vm_map_find finds an unallocated region in the target address
2107  *      map with the given length.  The search is defined to be
2108  *      first-fit from the specified address; the region found is
2109  *      returned in the same parameter.
2110  *
2111  *      If object is non-NULL, ref count must be bumped by caller
2112  *      prior to making call to account for the new entry.
2113  */
2114 int
2115 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2116             vm_offset_t *addr,  /* IN/OUT */
2117             vm_size_t length, vm_offset_t max_addr, int find_space,
2118             vm_prot_t prot, vm_prot_t max, int cow)
2119 {
2120         vm_offset_t alignment, curr_min_addr, min_addr;
2121         int gap, pidx, rv, try;
2122         bool cluster, en_aslr, update_anon;
2123
2124         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
2125             object == NULL,
2126             ("vm_map_find: non-NULL backing object for stack"));
2127         MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE &&
2128             (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0));
2129         if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
2130             (object->flags & OBJ_COLORED) == 0))
2131                 find_space = VMFS_ANY_SPACE;
2132         if (find_space >> 8 != 0) {
2133                 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
2134                 alignment = (vm_offset_t)1 << (find_space >> 8);
2135         } else
2136                 alignment = 0;
2137         en_aslr = (map->flags & MAP_ASLR) != 0;
2138         update_anon = cluster = clustering_anon_allowed(*addr, cow) &&
2139             (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 &&
2140             find_space != VMFS_NO_SPACE && object == NULL &&
2141             (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP |
2142             MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE;
2143         curr_min_addr = min_addr = *addr;
2144         if (en_aslr && min_addr == 0 && !cluster &&
2145             find_space != VMFS_NO_SPACE &&
2146             (map->flags & MAP_ASLR_IGNSTART) != 0)
2147                 curr_min_addr = min_addr = vm_map_min(map);
2148         try = 0;
2149         vm_map_lock(map);
2150         if (cluster) {
2151                 curr_min_addr = map->anon_loc;
2152                 if (curr_min_addr == 0)
2153                         cluster = false;
2154         }
2155         if (find_space != VMFS_NO_SPACE) {
2156                 KASSERT(find_space == VMFS_ANY_SPACE ||
2157                     find_space == VMFS_OPTIMAL_SPACE ||
2158                     find_space == VMFS_SUPER_SPACE ||
2159                     alignment != 0, ("unexpected VMFS flag"));
2160 again:
2161                 /*
2162                  * When creating an anonymous mapping, try clustering
2163                  * with an existing anonymous mapping first.
2164                  *
2165                  * We make up to two attempts to find address space
2166                  * for a given find_space value. The first attempt may
2167                  * apply randomization or may cluster with an existing
2168                  * anonymous mapping. If this first attempt fails,
2169                  * perform a first-fit search of the available address
2170                  * space.
2171                  *
2172                  * If all tries failed, and find_space is
2173                  * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE.
2174                  * Again enable clustering and randomization.
2175                  */
2176                 try++;
2177                 MPASS(try <= 2);
2178
2179                 if (try == 2) {
2180                         /*
2181                          * Second try: we failed either to find a
2182                          * suitable region for randomizing the
2183                          * allocation, or to cluster with an existing
2184                          * mapping.  Retry with free run.
2185                          */
2186                         curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ?
2187                             vm_map_min(map) : min_addr;
2188                         atomic_add_long(&aslr_restarts, 1);
2189                 }
2190
2191                 if (try == 1 && en_aslr && !cluster) {
2192                         /*
2193                          * Find space for allocation, including
2194                          * gap needed for later randomization.
2195                          */
2196                         pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 &&
2197                             (find_space == VMFS_SUPER_SPACE || find_space ==
2198                             VMFS_OPTIMAL_SPACE) ? 1 : 0;
2199                         gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR &&
2200                             (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ?
2201                             aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx];
2202                         *addr = vm_map_findspace(map, curr_min_addr,
2203                             length + gap * pagesizes[pidx]);
2204                         if (*addr + length + gap * pagesizes[pidx] >
2205                             vm_map_max(map))
2206                                 goto again;
2207                         /* And randomize the start address. */
2208                         *addr += (arc4random() % gap) * pagesizes[pidx];
2209                         if (max_addr != 0 && *addr + length > max_addr)
2210                                 goto again;
2211                 } else {
2212                         *addr = vm_map_findspace(map, curr_min_addr, length);
2213                         if (*addr + length > vm_map_max(map) ||
2214                             (max_addr != 0 && *addr + length > max_addr)) {
2215                                 if (cluster) {
2216                                         cluster = false;
2217                                         MPASS(try == 1);
2218                                         goto again;
2219                                 }
2220                                 rv = KERN_NO_SPACE;
2221                                 goto done;
2222                         }
2223                 }
2224
2225                 if (find_space != VMFS_ANY_SPACE &&
2226                     (rv = vm_map_alignspace(map, object, offset, addr, length,
2227                     max_addr, alignment)) != KERN_SUCCESS) {
2228                         if (find_space == VMFS_OPTIMAL_SPACE) {
2229                                 find_space = VMFS_ANY_SPACE;
2230                                 curr_min_addr = min_addr;
2231                                 cluster = update_anon;
2232                                 try = 0;
2233                                 goto again;
2234                         }
2235                         goto done;
2236                 }
2237         } else if ((cow & MAP_REMAP) != 0) {
2238                 if (!vm_map_range_valid(map, *addr, *addr + length)) {
2239                         rv = KERN_INVALID_ADDRESS;
2240                         goto done;
2241                 }
2242                 rv = vm_map_delete(map, *addr, *addr + length);
2243                 if (rv != KERN_SUCCESS)
2244                         goto done;
2245         }
2246         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
2247                 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
2248                     max, cow);
2249         } else {
2250                 rv = vm_map_insert(map, object, offset, *addr, *addr + length,
2251                     prot, max, cow);
2252         }
2253         if (rv == KERN_SUCCESS && update_anon)
2254                 map->anon_loc = *addr + length;
2255 done:
2256         vm_map_unlock(map);
2257         return (rv);
2258 }
2259
2260 /*
2261  *      vm_map_find_min() is a variant of vm_map_find() that takes an
2262  *      additional parameter ("default_addr") and treats the given address
2263  *      ("*addr") differently.  Specifically, it treats "*addr" as a hint
2264  *      and not as the minimum address where the mapping is created.
2265  *
2266  *      This function works in two phases.  First, it tries to
2267  *      allocate above the hint.  If that fails and the hint is
2268  *      greater than "default_addr", it performs a second pass, replacing
2269  *      the hint with "default_addr" as the minimum address for the
2270  *      allocation.
2271  */
2272 int
2273 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2274     vm_offset_t *addr, vm_size_t length, vm_offset_t default_addr,
2275     vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max,
2276     int cow)
2277 {
2278         vm_offset_t hint;
2279         int rv;
2280
2281         hint = *addr;
2282         if (hint == 0) {
2283                 cow |= MAP_NO_HINT;
2284                 *addr = hint = default_addr;
2285         }
2286         for (;;) {
2287                 rv = vm_map_find(map, object, offset, addr, length, max_addr,
2288                     find_space, prot, max, cow);
2289                 if (rv == KERN_SUCCESS || default_addr >= hint)
2290                         return (rv);
2291                 *addr = hint = default_addr;
2292         }
2293 }
2294
2295 /*
2296  * A map entry with any of the following flags set must not be merged with
2297  * another entry.
2298  */
2299 #define MAP_ENTRY_NOMERGE_MASK  (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \
2300     MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC | \
2301     MAP_ENTRY_STACK_GAP_UP | MAP_ENTRY_STACK_GAP_DN)
2302
2303 static bool
2304 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
2305 {
2306
2307         KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 ||
2308             (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0,
2309             ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable",
2310             prev, entry));
2311         return (prev->end == entry->start &&
2312             prev->object.vm_object == entry->object.vm_object &&
2313             (prev->object.vm_object == NULL ||
2314             prev->offset + (prev->end - prev->start) == entry->offset) &&
2315             prev->eflags == entry->eflags &&
2316             prev->protection == entry->protection &&
2317             prev->max_protection == entry->max_protection &&
2318             prev->inheritance == entry->inheritance &&
2319             prev->wired_count == entry->wired_count &&
2320             prev->cred == entry->cred);
2321 }
2322
2323 static void
2324 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
2325 {
2326
2327         /*
2328          * If the backing object is a vnode object, vm_object_deallocate()
2329          * calls vrele().  However, vrele() does not lock the vnode because
2330          * the vnode has additional references.  Thus, the map lock can be
2331          * kept without causing a lock-order reversal with the vnode lock.
2332          *
2333          * Since we count the number of virtual page mappings in
2334          * object->un_pager.vnp.writemappings, the writemappings value
2335          * should not be adjusted when the entry is disposed of.
2336          */
2337         if (entry->object.vm_object != NULL)
2338                 vm_object_deallocate(entry->object.vm_object);
2339         if (entry->cred != NULL)
2340                 crfree(entry->cred);
2341         vm_map_entry_dispose(map, entry);
2342 }
2343
2344 /*
2345  *      vm_map_try_merge_entries:
2346  *
2347  *      Compare two map entries that represent consecutive ranges. If
2348  *      the entries can be merged, expand the range of the second to
2349  *      cover the range of the first and delete the first. Then return
2350  *      the map entry that includes the first range.
2351  *
2352  *      The map must be locked.
2353  */
2354 vm_map_entry_t
2355 vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry,
2356     vm_map_entry_t entry)
2357 {
2358
2359         VM_MAP_ASSERT_LOCKED(map);
2360         if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 &&
2361             vm_map_mergeable_neighbors(prev_entry, entry)) {
2362                 vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT);
2363                 vm_map_merged_neighbor_dispose(map, prev_entry);
2364                 return (entry);
2365         }
2366         return (prev_entry);
2367 }
2368
2369 /*
2370  *      vm_map_entry_back:
2371  *
2372  *      Allocate an object to back a map entry.
2373  */
2374 static inline void
2375 vm_map_entry_back(vm_map_entry_t entry)
2376 {
2377         vm_object_t object;
2378
2379         KASSERT(entry->object.vm_object == NULL,
2380             ("map entry %p has backing object", entry));
2381         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2382             ("map entry %p is a submap", entry));
2383         object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL,
2384             entry->cred, entry->end - entry->start);
2385         entry->object.vm_object = object;
2386         entry->offset = 0;
2387         entry->cred = NULL;
2388 }
2389
2390 /*
2391  *      vm_map_entry_charge_object
2392  *
2393  *      If there is no object backing this entry, create one.  Otherwise, if
2394  *      the entry has cred, give it to the backing object.
2395  */
2396 static inline void
2397 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry)
2398 {
2399
2400         VM_MAP_ASSERT_LOCKED(map);
2401         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2402             ("map entry %p is a submap", entry));
2403         if (entry->object.vm_object == NULL && !map->system_map &&
2404             (entry->eflags & MAP_ENTRY_GUARD) == 0)
2405                 vm_map_entry_back(entry);
2406         else if (entry->object.vm_object != NULL &&
2407             ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
2408             entry->cred != NULL) {
2409                 VM_OBJECT_WLOCK(entry->object.vm_object);
2410                 KASSERT(entry->object.vm_object->cred == NULL,
2411                     ("OVERCOMMIT: %s: both cred e %p", __func__, entry));
2412                 entry->object.vm_object->cred = entry->cred;
2413                 entry->object.vm_object->charge = entry->end - entry->start;
2414                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
2415                 entry->cred = NULL;
2416         }
2417 }
2418
2419 /*
2420  *      vm_map_entry_clone
2421  *
2422  *      Create a duplicate map entry for clipping.
2423  */
2424 static vm_map_entry_t
2425 vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry)
2426 {
2427         vm_map_entry_t new_entry;
2428
2429         VM_MAP_ASSERT_LOCKED(map);
2430
2431         /*
2432          * Create a backing object now, if none exists, so that more individual
2433          * objects won't be created after the map entry is split.
2434          */
2435         vm_map_entry_charge_object(map, entry);
2436
2437         /* Clone the entry. */
2438         new_entry = vm_map_entry_create(map);
2439         *new_entry = *entry;
2440         if (new_entry->cred != NULL)
2441                 crhold(entry->cred);
2442         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2443                 vm_object_reference(new_entry->object.vm_object);
2444                 vm_map_entry_set_vnode_text(new_entry, true);
2445                 /*
2446                  * The object->un_pager.vnp.writemappings for the object of
2447                  * MAP_ENTRY_WRITECNT type entry shall be kept as is here.  The
2448                  * virtual pages are re-distributed among the clipped entries,
2449                  * so the sum is left the same.
2450                  */
2451         }
2452         return (new_entry);
2453 }
2454
2455 /*
2456  *      vm_map_clip_start:      [ internal use only ]
2457  *
2458  *      Asserts that the given entry begins at or after
2459  *      the specified address; if necessary,
2460  *      it splits the entry into two.
2461  */
2462 static int
2463 vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr)
2464 {
2465         vm_map_entry_t new_entry;
2466         int bdry_idx;
2467
2468         if (!map->system_map)
2469                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2470                     "%s: map %p entry %p start 0x%jx", __func__, map, entry,
2471                     (uintmax_t)startaddr);
2472
2473         if (startaddr <= entry->start)
2474                 return (KERN_SUCCESS);
2475
2476         VM_MAP_ASSERT_LOCKED(map);
2477         KASSERT(entry->end > startaddr && entry->start < startaddr,
2478             ("%s: invalid clip of entry %p", __func__, entry));
2479
2480         bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry);
2481         if (bdry_idx != 0) {
2482                 if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0)
2483                         return (KERN_INVALID_ARGUMENT);
2484         }
2485
2486         new_entry = vm_map_entry_clone(map, entry);
2487
2488         /*
2489          * Split off the front portion.  Insert the new entry BEFORE this one,
2490          * so that this entry has the specified starting address.
2491          */
2492         new_entry->end = startaddr;
2493         vm_map_entry_link(map, new_entry);
2494         return (KERN_SUCCESS);
2495 }
2496
2497 /*
2498  *      vm_map_lookup_clip_start:
2499  *
2500  *      Find the entry at or just after 'start', and clip it if 'start' is in
2501  *      the interior of the entry.  Return entry after 'start', and in
2502  *      prev_entry set the entry before 'start'.
2503  */
2504 static int
2505 vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start,
2506     vm_map_entry_t *res_entry, vm_map_entry_t *prev_entry)
2507 {
2508         vm_map_entry_t entry;
2509         int rv;
2510
2511         if (!map->system_map)
2512                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2513                     "%s: map %p start 0x%jx prev %p", __func__, map,
2514                     (uintmax_t)start, prev_entry);
2515
2516         if (vm_map_lookup_entry(map, start, prev_entry)) {
2517                 entry = *prev_entry;
2518                 rv = vm_map_clip_start(map, entry, start);
2519                 if (rv != KERN_SUCCESS)
2520                         return (rv);
2521                 *prev_entry = vm_map_entry_pred(entry);
2522         } else
2523                 entry = vm_map_entry_succ(*prev_entry);
2524         *res_entry = entry;
2525         return (KERN_SUCCESS);
2526 }
2527
2528 /*
2529  *      vm_map_clip_end:        [ internal use only ]
2530  *
2531  *      Asserts that the given entry ends at or before
2532  *      the specified address; if necessary,
2533  *      it splits the entry into two.
2534  */
2535 static int
2536 vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr)
2537 {
2538         vm_map_entry_t new_entry;
2539         int bdry_idx;
2540
2541         if (!map->system_map)
2542                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2543                     "%s: map %p entry %p end 0x%jx", __func__, map, entry,
2544                     (uintmax_t)endaddr);
2545
2546         if (endaddr >= entry->end)
2547                 return (KERN_SUCCESS);
2548
2549         VM_MAP_ASSERT_LOCKED(map);
2550         KASSERT(entry->start < endaddr && entry->end > endaddr,
2551             ("%s: invalid clip of entry %p", __func__, entry));
2552
2553         bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry);
2554         if (bdry_idx != 0) {
2555                 if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0)
2556                         return (KERN_INVALID_ARGUMENT);
2557         }
2558
2559         new_entry = vm_map_entry_clone(map, entry);
2560
2561         /*
2562          * Split off the back portion.  Insert the new entry AFTER this one,
2563          * so that this entry has the specified ending address.
2564          */
2565         new_entry->start = endaddr;
2566         vm_map_entry_link(map, new_entry);
2567
2568         return (KERN_SUCCESS);
2569 }
2570
2571 /*
2572  *      vm_map_submap:          [ kernel use only ]
2573  *
2574  *      Mark the given range as handled by a subordinate map.
2575  *
2576  *      This range must have been created with vm_map_find,
2577  *      and no other operations may have been performed on this
2578  *      range prior to calling vm_map_submap.
2579  *
2580  *      Only a limited number of operations can be performed
2581  *      within this rage after calling vm_map_submap:
2582  *              vm_fault
2583  *      [Don't try vm_map_copy!]
2584  *
2585  *      To remove a submapping, one must first remove the
2586  *      range from the superior map, and then destroy the
2587  *      submap (if desired).  [Better yet, don't try it.]
2588  */
2589 int
2590 vm_map_submap(
2591         vm_map_t map,
2592         vm_offset_t start,
2593         vm_offset_t end,
2594         vm_map_t submap)
2595 {
2596         vm_map_entry_t entry;
2597         int result;
2598
2599         result = KERN_INVALID_ARGUMENT;
2600
2601         vm_map_lock(submap);
2602         submap->flags |= MAP_IS_SUB_MAP;
2603         vm_map_unlock(submap);
2604
2605         vm_map_lock(map);
2606         VM_MAP_RANGE_CHECK(map, start, end);
2607         if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end &&
2608             (entry->eflags & MAP_ENTRY_COW) == 0 &&
2609             entry->object.vm_object == NULL) {
2610                 result = vm_map_clip_start(map, entry, start);
2611                 if (result != KERN_SUCCESS)
2612                         goto unlock;
2613                 result = vm_map_clip_end(map, entry, end);
2614                 if (result != KERN_SUCCESS)
2615                         goto unlock;
2616                 entry->object.sub_map = submap;
2617                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
2618                 result = KERN_SUCCESS;
2619         }
2620 unlock:
2621         vm_map_unlock(map);
2622
2623         if (result != KERN_SUCCESS) {
2624                 vm_map_lock(submap);
2625                 submap->flags &= ~MAP_IS_SUB_MAP;
2626                 vm_map_unlock(submap);
2627         }
2628         return (result);
2629 }
2630
2631 /*
2632  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
2633  */
2634 #define MAX_INIT_PT     96
2635
2636 /*
2637  *      vm_map_pmap_enter:
2638  *
2639  *      Preload the specified map's pmap with mappings to the specified
2640  *      object's memory-resident pages.  No further physical pages are
2641  *      allocated, and no further virtual pages are retrieved from secondary
2642  *      storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
2643  *      limited number of page mappings are created at the low-end of the
2644  *      specified address range.  (For this purpose, a superpage mapping
2645  *      counts as one page mapping.)  Otherwise, all resident pages within
2646  *      the specified address range are mapped.
2647  */
2648 static void
2649 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
2650     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
2651 {
2652         vm_offset_t start;
2653         vm_page_t p, p_start;
2654         vm_pindex_t mask, psize, threshold, tmpidx;
2655
2656         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
2657                 return;
2658         if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2659                 VM_OBJECT_WLOCK(object);
2660                 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2661                         pmap_object_init_pt(map->pmap, addr, object, pindex,
2662                             size);
2663                         VM_OBJECT_WUNLOCK(object);
2664                         return;
2665                 }
2666                 VM_OBJECT_LOCK_DOWNGRADE(object);
2667         } else
2668                 VM_OBJECT_RLOCK(object);
2669
2670         psize = atop(size);
2671         if (psize + pindex > object->size) {
2672                 if (pindex >= object->size) {
2673                         VM_OBJECT_RUNLOCK(object);
2674                         return;
2675                 }
2676                 psize = object->size - pindex;
2677         }
2678
2679         start = 0;
2680         p_start = NULL;
2681         threshold = MAX_INIT_PT;
2682
2683         p = vm_page_find_least(object, pindex);
2684         /*
2685          * Assert: the variable p is either (1) the page with the
2686          * least pindex greater than or equal to the parameter pindex
2687          * or (2) NULL.
2688          */
2689         for (;
2690              p != NULL && (tmpidx = p->pindex - pindex) < psize;
2691              p = TAILQ_NEXT(p, listq)) {
2692                 /*
2693                  * don't allow an madvise to blow away our really
2694                  * free pages allocating pv entries.
2695                  */
2696                 if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
2697                     vm_page_count_severe()) ||
2698                     ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
2699                     tmpidx >= threshold)) {
2700                         psize = tmpidx;
2701                         break;
2702                 }
2703                 if (vm_page_all_valid(p)) {
2704                         if (p_start == NULL) {
2705                                 start = addr + ptoa(tmpidx);
2706                                 p_start = p;
2707                         }
2708                         /* Jump ahead if a superpage mapping is possible. */
2709                         if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
2710                             (pagesizes[p->psind] - 1)) == 0) {
2711                                 mask = atop(pagesizes[p->psind]) - 1;
2712                                 if (tmpidx + mask < psize &&
2713                                     vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
2714                                         p += mask;
2715                                         threshold += mask;
2716                                 }
2717                         }
2718                 } else if (p_start != NULL) {
2719                         pmap_enter_object(map->pmap, start, addr +
2720                             ptoa(tmpidx), p_start, prot);
2721                         p_start = NULL;
2722                 }
2723         }
2724         if (p_start != NULL)
2725                 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
2726                     p_start, prot);
2727         VM_OBJECT_RUNLOCK(object);
2728 }
2729
2730 static void
2731 vm_map_protect_guard(vm_map_entry_t entry, vm_prot_t new_prot,
2732     vm_prot_t new_maxprot, int flags)
2733 {
2734         vm_prot_t old_prot;
2735
2736         MPASS((entry->eflags & MAP_ENTRY_GUARD) != 0);
2737         if ((entry->eflags & (MAP_ENTRY_STACK_GAP_UP |
2738             MAP_ENTRY_STACK_GAP_DN)) == 0)
2739                 return;
2740
2741         old_prot = PROT_EXTRACT(entry->offset);
2742         if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) {
2743                 entry->offset = PROT_MAX(new_maxprot) |
2744                     (new_maxprot & old_prot);
2745         }
2746         if ((flags & VM_MAP_PROTECT_SET_PROT) != 0) {
2747                 entry->offset = new_prot | PROT_MAX(
2748                     PROT_MAX_EXTRACT(entry->offset));
2749         }
2750 }
2751
2752 /*
2753  *      vm_map_protect:
2754  *
2755  *      Sets the protection and/or the maximum protection of the
2756  *      specified address region in the target map.
2757  */
2758 int
2759 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2760     vm_prot_t new_prot, vm_prot_t new_maxprot, int flags)
2761 {
2762         vm_map_entry_t entry, first_entry, in_tran, prev_entry;
2763         vm_object_t obj;
2764         struct ucred *cred;
2765         vm_offset_t orig_start;
2766         vm_prot_t check_prot, max_prot, old_prot;
2767         int rv;
2768
2769         if (start == end)
2770                 return (KERN_SUCCESS);
2771
2772         if (CONTAINS_BITS(flags, VM_MAP_PROTECT_SET_PROT |
2773             VM_MAP_PROTECT_SET_MAXPROT) &&
2774             !CONTAINS_BITS(new_maxprot, new_prot))
2775                 return (KERN_OUT_OF_BOUNDS);
2776
2777         orig_start = start;
2778 again:
2779         in_tran = NULL;
2780         start = orig_start;
2781         vm_map_lock(map);
2782
2783         if ((map->flags & MAP_WXORX) != 0 &&
2784             (flags & VM_MAP_PROTECT_SET_PROT) != 0 &&
2785             CONTAINS_BITS(new_prot, VM_PROT_WRITE | VM_PROT_EXECUTE)) {
2786                 vm_map_unlock(map);
2787                 return (KERN_PROTECTION_FAILURE);
2788         }
2789
2790         /*
2791          * Ensure that we are not concurrently wiring pages.  vm_map_wire() may
2792          * need to fault pages into the map and will drop the map lock while
2793          * doing so, and the VM object may end up in an inconsistent state if we
2794          * update the protection on the map entry in between faults.
2795          */
2796         vm_map_wait_busy(map);
2797
2798         VM_MAP_RANGE_CHECK(map, start, end);
2799
2800         if (!vm_map_lookup_entry(map, start, &first_entry))
2801                 first_entry = vm_map_entry_succ(first_entry);
2802
2803         if ((flags & VM_MAP_PROTECT_GROWSDOWN) != 0 &&
2804             (first_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0) {
2805                 /*
2806                  * Handle Linux's PROT_GROWSDOWN flag.
2807                  * It means that protection is applied down to the
2808                  * whole stack, including the specified range of the
2809                  * mapped region, and the grow down region (AKA
2810                  * guard).
2811                  */
2812                 while (!CONTAINS_BITS(first_entry->eflags,
2813                     MAP_ENTRY_GUARD | MAP_ENTRY_STACK_GAP_DN) &&
2814                     first_entry != vm_map_entry_first(map))
2815                         first_entry = vm_map_entry_pred(first_entry);
2816                 start = first_entry->start;
2817         }
2818
2819         /*
2820          * Make a first pass to check for protection violations.
2821          */
2822         check_prot = 0;
2823         if ((flags & VM_MAP_PROTECT_SET_PROT) != 0)
2824                 check_prot |= new_prot;
2825         if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0)
2826                 check_prot |= new_maxprot;
2827         for (entry = first_entry; entry->start < end;
2828             entry = vm_map_entry_succ(entry)) {
2829                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
2830                         vm_map_unlock(map);
2831                         return (KERN_INVALID_ARGUMENT);
2832                 }
2833                 if ((entry->eflags & (MAP_ENTRY_GUARD |
2834                     MAP_ENTRY_STACK_GAP_DN | MAP_ENTRY_STACK_GAP_UP)) ==
2835                     MAP_ENTRY_GUARD)
2836                         continue;
2837                 max_prot = (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
2838                     MAP_ENTRY_STACK_GAP_UP)) != 0 ?
2839                     PROT_MAX_EXTRACT(entry->offset) : entry->max_protection;
2840                 if (!CONTAINS_BITS(max_prot, check_prot)) {
2841                         vm_map_unlock(map);
2842                         return (KERN_PROTECTION_FAILURE);
2843                 }
2844                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
2845                         in_tran = entry;
2846         }
2847
2848         /*
2849          * Postpone the operation until all in-transition map entries have
2850          * stabilized.  An in-transition entry might already have its pages
2851          * wired and wired_count incremented, but not yet have its
2852          * MAP_ENTRY_USER_WIRED flag set.  In which case, we would fail to call
2853          * vm_fault_copy_entry() in the final loop below.
2854          */
2855         if (in_tran != NULL) {
2856                 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2857                 vm_map_unlock_and_wait(map, 0);
2858                 goto again;
2859         }
2860
2861         /*
2862          * Before changing the protections, try to reserve swap space for any
2863          * private (i.e., copy-on-write) mappings that are transitioning from
2864          * read-only to read/write access.  If a reservation fails, break out
2865          * of this loop early and let the next loop simplify the entries, since
2866          * some may now be mergeable.
2867          */
2868         rv = vm_map_clip_start(map, first_entry, start);
2869         if (rv != KERN_SUCCESS) {
2870                 vm_map_unlock(map);
2871                 return (rv);
2872         }
2873         for (entry = first_entry; entry->start < end;
2874             entry = vm_map_entry_succ(entry)) {
2875                 rv = vm_map_clip_end(map, entry, end);
2876                 if (rv != KERN_SUCCESS) {
2877                         vm_map_unlock(map);
2878                         return (rv);
2879                 }
2880
2881                 if ((flags & VM_MAP_PROTECT_SET_PROT) == 0 ||
2882                     ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 ||
2883                     ENTRY_CHARGED(entry) ||
2884                     (entry->eflags & MAP_ENTRY_GUARD) != 0)
2885                         continue;
2886
2887                 cred = curthread->td_ucred;
2888                 obj = entry->object.vm_object;
2889
2890                 if (obj == NULL ||
2891                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) {
2892                         if (!swap_reserve(entry->end - entry->start)) {
2893                                 rv = KERN_RESOURCE_SHORTAGE;
2894                                 end = entry->end;
2895                                 break;
2896                         }
2897                         crhold(cred);
2898                         entry->cred = cred;
2899                         continue;
2900                 }
2901
2902                 VM_OBJECT_WLOCK(obj);
2903                 if ((obj->flags & OBJ_SWAP) == 0) {
2904                         VM_OBJECT_WUNLOCK(obj);
2905                         continue;
2906                 }
2907
2908                 /*
2909                  * Charge for the whole object allocation now, since
2910                  * we cannot distinguish between non-charged and
2911                  * charged clipped mapping of the same object later.
2912                  */
2913                 KASSERT(obj->charge == 0,
2914                     ("vm_map_protect: object %p overcharged (entry %p)",
2915                     obj, entry));
2916                 if (!swap_reserve(ptoa(obj->size))) {
2917                         VM_OBJECT_WUNLOCK(obj);
2918                         rv = KERN_RESOURCE_SHORTAGE;
2919                         end = entry->end;
2920                         break;
2921                 }
2922
2923                 crhold(cred);
2924                 obj->cred = cred;
2925                 obj->charge = ptoa(obj->size);
2926                 VM_OBJECT_WUNLOCK(obj);
2927         }
2928
2929         /*
2930          * If enough swap space was available, go back and fix up protections.
2931          * Otherwise, just simplify entries, since some may have been modified.
2932          * [Note that clipping is not necessary the second time.]
2933          */
2934         for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry;
2935             entry->start < end;
2936             vm_map_try_merge_entries(map, prev_entry, entry),
2937             prev_entry = entry, entry = vm_map_entry_succ(entry)) {
2938                 if (rv != KERN_SUCCESS)
2939                         continue;
2940
2941                 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
2942                         vm_map_protect_guard(entry, new_prot, new_maxprot,
2943                             flags);
2944                         continue;
2945                 }
2946
2947                 old_prot = entry->protection;
2948
2949                 if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) {
2950                         entry->max_protection = new_maxprot;
2951                         entry->protection = new_maxprot & old_prot;
2952                 }
2953                 if ((flags & VM_MAP_PROTECT_SET_PROT) != 0)
2954                         entry->protection = new_prot;
2955
2956                 /*
2957                  * For user wired map entries, the normal lazy evaluation of
2958                  * write access upgrades through soft page faults is
2959                  * undesirable.  Instead, immediately copy any pages that are
2960                  * copy-on-write and enable write access in the physical map.
2961                  */
2962                 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2963                     (entry->protection & VM_PROT_WRITE) != 0 &&
2964                     (old_prot & VM_PROT_WRITE) == 0)
2965                         vm_fault_copy_entry(map, map, entry, entry, NULL);
2966
2967                 /*
2968                  * When restricting access, update the physical map.  Worry
2969                  * about copy-on-write here.
2970                  */
2971                 if ((old_prot & ~entry->protection) != 0) {
2972 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2973                                                         VM_PROT_ALL)
2974                         pmap_protect(map->pmap, entry->start,
2975                             entry->end,
2976                             entry->protection & MASK(entry));
2977 #undef  MASK
2978                 }
2979         }
2980         vm_map_try_merge_entries(map, prev_entry, entry);
2981         vm_map_unlock(map);
2982         return (rv);
2983 }
2984
2985 /*
2986  *      vm_map_madvise:
2987  *
2988  *      This routine traverses a processes map handling the madvise
2989  *      system call.  Advisories are classified as either those effecting
2990  *      the vm_map_entry structure, or those effecting the underlying
2991  *      objects.
2992  */
2993 int
2994 vm_map_madvise(
2995         vm_map_t map,
2996         vm_offset_t start,
2997         vm_offset_t end,
2998         int behav)
2999 {
3000         vm_map_entry_t entry, prev_entry;
3001         int rv;
3002         bool modify_map;
3003
3004         /*
3005          * Some madvise calls directly modify the vm_map_entry, in which case
3006          * we need to use an exclusive lock on the map and we need to perform
3007          * various clipping operations.  Otherwise we only need a read-lock
3008          * on the map.
3009          */
3010         switch(behav) {
3011         case MADV_NORMAL:
3012         case MADV_SEQUENTIAL:
3013         case MADV_RANDOM:
3014         case MADV_NOSYNC:
3015         case MADV_AUTOSYNC:
3016         case MADV_NOCORE:
3017         case MADV_CORE:
3018                 if (start == end)
3019                         return (0);
3020                 modify_map = true;
3021                 vm_map_lock(map);
3022                 break;
3023         case MADV_WILLNEED:
3024         case MADV_DONTNEED:
3025         case MADV_FREE:
3026                 if (start == end)
3027                         return (0);
3028                 modify_map = false;
3029                 vm_map_lock_read(map);
3030                 break;
3031         default:
3032                 return (EINVAL);
3033         }
3034
3035         /*
3036          * Locate starting entry and clip if necessary.
3037          */
3038         VM_MAP_RANGE_CHECK(map, start, end);
3039
3040         if (modify_map) {
3041                 /*
3042                  * madvise behaviors that are implemented in the vm_map_entry.
3043                  *
3044                  * We clip the vm_map_entry so that behavioral changes are
3045                  * limited to the specified address range.
3046                  */
3047                 rv = vm_map_lookup_clip_start(map, start, &entry, &prev_entry);
3048                 if (rv != KERN_SUCCESS) {
3049                         vm_map_unlock(map);
3050                         return (vm_mmap_to_errno(rv));
3051                 }
3052
3053                 for (; entry->start < end; prev_entry = entry,
3054                     entry = vm_map_entry_succ(entry)) {
3055                         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
3056                                 continue;
3057
3058                         rv = vm_map_clip_end(map, entry, end);
3059                         if (rv != KERN_SUCCESS) {
3060                                 vm_map_unlock(map);
3061                                 return (vm_mmap_to_errno(rv));
3062                         }
3063
3064                         switch (behav) {
3065                         case MADV_NORMAL:
3066                                 vm_map_entry_set_behavior(entry,
3067                                     MAP_ENTRY_BEHAV_NORMAL);
3068                                 break;
3069                         case MADV_SEQUENTIAL:
3070                                 vm_map_entry_set_behavior(entry,
3071                                     MAP_ENTRY_BEHAV_SEQUENTIAL);
3072                                 break;
3073                         case MADV_RANDOM:
3074                                 vm_map_entry_set_behavior(entry,
3075                                     MAP_ENTRY_BEHAV_RANDOM);
3076                                 break;
3077                         case MADV_NOSYNC:
3078                                 entry->eflags |= MAP_ENTRY_NOSYNC;
3079                                 break;
3080                         case MADV_AUTOSYNC:
3081                                 entry->eflags &= ~MAP_ENTRY_NOSYNC;
3082                                 break;
3083                         case MADV_NOCORE:
3084                                 entry->eflags |= MAP_ENTRY_NOCOREDUMP;
3085                                 break;
3086                         case MADV_CORE:
3087                                 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP;
3088                                 break;
3089                         default:
3090                                 break;
3091                         }
3092                         vm_map_try_merge_entries(map, prev_entry, entry);
3093                 }
3094                 vm_map_try_merge_entries(map, prev_entry, entry);
3095                 vm_map_unlock(map);
3096         } else {
3097                 vm_pindex_t pstart, pend;
3098
3099                 /*
3100                  * madvise behaviors that are implemented in the underlying
3101                  * vm_object.
3102                  *
3103                  * Since we don't clip the vm_map_entry, we have to clip
3104                  * the vm_object pindex and count.
3105                  */
3106                 if (!vm_map_lookup_entry(map, start, &entry))
3107                         entry = vm_map_entry_succ(entry);
3108                 for (; entry->start < end;
3109                     entry = vm_map_entry_succ(entry)) {
3110                         vm_offset_t useEnd, useStart;
3111
3112                         if ((entry->eflags & (MAP_ENTRY_IS_SUB_MAP |
3113                             MAP_ENTRY_GUARD)) != 0)
3114                                 continue;
3115
3116                         /*
3117                          * MADV_FREE would otherwise rewind time to
3118                          * the creation of the shadow object.  Because
3119                          * we hold the VM map read-locked, neither the
3120                          * entry's object nor the presence of a
3121                          * backing object can change.
3122                          */
3123                         if (behav == MADV_FREE &&
3124                             entry->object.vm_object != NULL &&
3125                             entry->object.vm_object->backing_object != NULL)
3126                                 continue;
3127
3128                         pstart = OFF_TO_IDX(entry->offset);
3129                         pend = pstart + atop(entry->end - entry->start);
3130                         useStart = entry->start;
3131                         useEnd = entry->end;
3132
3133                         if (entry->start < start) {
3134                                 pstart += atop(start - entry->start);
3135                                 useStart = start;
3136                         }
3137                         if (entry->end > end) {
3138                                 pend -= atop(entry->end - end);
3139                                 useEnd = end;
3140                         }
3141
3142                         if (pstart >= pend)
3143                                 continue;
3144
3145                         /*
3146                          * Perform the pmap_advise() before clearing
3147                          * PGA_REFERENCED in vm_page_advise().  Otherwise, a
3148                          * concurrent pmap operation, such as pmap_remove(),
3149                          * could clear a reference in the pmap and set
3150                          * PGA_REFERENCED on the page before the pmap_advise()
3151                          * had completed.  Consequently, the page would appear
3152                          * referenced based upon an old reference that
3153                          * occurred before this pmap_advise() ran.
3154                          */
3155                         if (behav == MADV_DONTNEED || behav == MADV_FREE)
3156                                 pmap_advise(map->pmap, useStart, useEnd,
3157                                     behav);
3158
3159                         vm_object_madvise(entry->object.vm_object, pstart,
3160                             pend, behav);
3161
3162                         /*
3163                          * Pre-populate paging structures in the
3164                          * WILLNEED case.  For wired entries, the
3165                          * paging structures are already populated.
3166                          */
3167                         if (behav == MADV_WILLNEED &&
3168                             entry->wired_count == 0) {
3169                                 vm_map_pmap_enter(map,
3170                                     useStart,
3171                                     entry->protection,
3172                                     entry->object.vm_object,
3173                                     pstart,
3174                                     ptoa(pend - pstart),
3175                                     MAP_PREFAULT_MADVISE
3176                                 );
3177                         }
3178                 }
3179                 vm_map_unlock_read(map);
3180         }
3181         return (0);
3182 }
3183
3184 /*
3185  *      vm_map_inherit:
3186  *
3187  *      Sets the inheritance of the specified address
3188  *      range in the target map.  Inheritance
3189  *      affects how the map will be shared with
3190  *      child maps at the time of vmspace_fork.
3191  */
3192 int
3193 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
3194                vm_inherit_t new_inheritance)
3195 {
3196         vm_map_entry_t entry, lentry, prev_entry, start_entry;
3197         int rv;
3198
3199         switch (new_inheritance) {
3200         case VM_INHERIT_NONE:
3201         case VM_INHERIT_COPY:
3202         case VM_INHERIT_SHARE:
3203         case VM_INHERIT_ZERO:
3204                 break;
3205         default:
3206                 return (KERN_INVALID_ARGUMENT);
3207         }
3208         if (start == end)
3209                 return (KERN_SUCCESS);
3210         vm_map_lock(map);
3211         VM_MAP_RANGE_CHECK(map, start, end);
3212         rv = vm_map_lookup_clip_start(map, start, &start_entry, &prev_entry);
3213         if (rv != KERN_SUCCESS)
3214                 goto unlock;
3215         if (vm_map_lookup_entry(map, end - 1, &lentry)) {
3216                 rv = vm_map_clip_end(map, lentry, end);
3217                 if (rv != KERN_SUCCESS)
3218                         goto unlock;
3219         }
3220         if (new_inheritance == VM_INHERIT_COPY) {
3221                 for (entry = start_entry; entry->start < end;
3222                     prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3223                         if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK)
3224                             != 0) {
3225                                 rv = KERN_INVALID_ARGUMENT;
3226                                 goto unlock;
3227                         }
3228                 }
3229         }
3230         for (entry = start_entry; entry->start < end; prev_entry = entry,
3231             entry = vm_map_entry_succ(entry)) {
3232                 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx",
3233                     entry, (uintmax_t)entry->end, (uintmax_t)end));
3234                 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
3235                     new_inheritance != VM_INHERIT_ZERO)
3236                         entry->inheritance = new_inheritance;
3237                 vm_map_try_merge_entries(map, prev_entry, entry);
3238         }
3239         vm_map_try_merge_entries(map, prev_entry, entry);
3240 unlock:
3241         vm_map_unlock(map);
3242         return (rv);
3243 }
3244
3245 /*
3246  *      vm_map_entry_in_transition:
3247  *
3248  *      Release the map lock, and sleep until the entry is no longer in
3249  *      transition.  Awake and acquire the map lock.  If the map changed while
3250  *      another held the lock, lookup a possibly-changed entry at or after the
3251  *      'start' position of the old entry.
3252  */
3253 static vm_map_entry_t
3254 vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start,
3255     vm_offset_t *io_end, bool holes_ok, vm_map_entry_t in_entry)
3256 {
3257         vm_map_entry_t entry;
3258         vm_offset_t start;
3259         u_int last_timestamp;
3260
3261         VM_MAP_ASSERT_LOCKED(map);
3262         KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3263             ("not in-tranition map entry %p", in_entry));
3264         /*
3265          * We have not yet clipped the entry.
3266          */
3267         start = MAX(in_start, in_entry->start);
3268         in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3269         last_timestamp = map->timestamp;
3270         if (vm_map_unlock_and_wait(map, 0)) {
3271                 /*
3272                  * Allow interruption of user wiring/unwiring?
3273                  */
3274         }
3275         vm_map_lock(map);
3276         if (last_timestamp + 1 == map->timestamp)
3277                 return (in_entry);
3278
3279         /*
3280          * Look again for the entry because the map was modified while it was
3281          * unlocked.  Specifically, the entry may have been clipped, merged, or
3282          * deleted.
3283          */
3284         if (!vm_map_lookup_entry(map, start, &entry)) {
3285                 if (!holes_ok) {
3286                         *io_end = start;
3287                         return (NULL);
3288                 }
3289                 entry = vm_map_entry_succ(entry);
3290         }
3291         return (entry);
3292 }
3293
3294 /*
3295  *      vm_map_unwire:
3296  *
3297  *      Implements both kernel and user unwiring.
3298  */
3299 int
3300 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
3301     int flags)
3302 {
3303         vm_map_entry_t entry, first_entry, next_entry, prev_entry;
3304         int rv;
3305         bool holes_ok, need_wakeup, user_unwire;
3306
3307         if (start == end)
3308                 return (KERN_SUCCESS);
3309         holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0;
3310         user_unwire = (flags & VM_MAP_WIRE_USER) != 0;
3311         vm_map_lock(map);
3312         VM_MAP_RANGE_CHECK(map, start, end);
3313         if (!vm_map_lookup_entry(map, start, &first_entry)) {
3314                 if (holes_ok)
3315                         first_entry = vm_map_entry_succ(first_entry);
3316                 else {
3317                         vm_map_unlock(map);
3318                         return (KERN_INVALID_ADDRESS);
3319                 }
3320         }
3321         rv = KERN_SUCCESS;
3322         for (entry = first_entry; entry->start < end; entry = next_entry) {
3323                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3324                         /*
3325                          * We have not yet clipped the entry.
3326                          */
3327                         next_entry = vm_map_entry_in_transition(map, start,
3328                             &end, holes_ok, entry);
3329                         if (next_entry == NULL) {
3330                                 if (entry == first_entry) {
3331                                         vm_map_unlock(map);
3332                                         return (KERN_INVALID_ADDRESS);
3333                                 }
3334                                 rv = KERN_INVALID_ADDRESS;
3335                                 break;
3336                         }
3337                         first_entry = (entry == first_entry) ?
3338                             next_entry : NULL;
3339                         continue;
3340                 }
3341                 rv = vm_map_clip_start(map, entry, start);
3342                 if (rv != KERN_SUCCESS)
3343                         break;
3344                 rv = vm_map_clip_end(map, entry, end);
3345                 if (rv != KERN_SUCCESS)
3346                         break;
3347
3348                 /*
3349                  * Mark the entry in case the map lock is released.  (See
3350                  * above.)
3351                  */
3352                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3353                     entry->wiring_thread == NULL,
3354                     ("owned map entry %p", entry));
3355                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3356                 entry->wiring_thread = curthread;
3357                 next_entry = vm_map_entry_succ(entry);
3358                 /*
3359                  * Check the map for holes in the specified region.
3360                  * If holes_ok, skip this check.
3361                  */
3362                 if (!holes_ok &&
3363                     entry->end < end && next_entry->start > entry->end) {
3364                         end = entry->end;
3365                         rv = KERN_INVALID_ADDRESS;
3366                         break;
3367                 }
3368                 /*
3369                  * If system unwiring, require that the entry is system wired.
3370                  */
3371                 if (!user_unwire &&
3372                     vm_map_entry_system_wired_count(entry) == 0) {
3373                         end = entry->end;
3374                         rv = KERN_INVALID_ARGUMENT;
3375                         break;
3376                 }
3377         }
3378         need_wakeup = false;
3379         if (first_entry == NULL &&
3380             !vm_map_lookup_entry(map, start, &first_entry)) {
3381                 KASSERT(holes_ok, ("vm_map_unwire: lookup failed"));
3382                 prev_entry = first_entry;
3383                 entry = vm_map_entry_succ(first_entry);
3384         } else {
3385                 prev_entry = vm_map_entry_pred(first_entry);
3386                 entry = first_entry;
3387         }
3388         for (; entry->start < end;
3389             prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3390                 /*
3391                  * If holes_ok was specified, an empty
3392                  * space in the unwired region could have been mapped
3393                  * while the map lock was dropped for draining
3394                  * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
3395                  * could be simultaneously wiring this new mapping
3396                  * entry.  Detect these cases and skip any entries
3397                  * marked as in transition by us.
3398                  */
3399                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3400                     entry->wiring_thread != curthread) {
3401                         KASSERT(holes_ok,
3402                             ("vm_map_unwire: !HOLESOK and new/changed entry"));
3403                         continue;
3404                 }
3405
3406                 if (rv == KERN_SUCCESS && (!user_unwire ||
3407                     (entry->eflags & MAP_ENTRY_USER_WIRED))) {
3408                         if (entry->wired_count == 1)
3409                                 vm_map_entry_unwire(map, entry);
3410                         else
3411                                 entry->wired_count--;
3412                         if (user_unwire)
3413                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3414                 }
3415                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3416                     ("vm_map_unwire: in-transition flag missing %p", entry));
3417                 KASSERT(entry->wiring_thread == curthread,
3418                     ("vm_map_unwire: alien wire %p", entry));
3419                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
3420                 entry->wiring_thread = NULL;
3421                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3422                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3423                         need_wakeup = true;
3424                 }
3425                 vm_map_try_merge_entries(map, prev_entry, entry);
3426         }
3427         vm_map_try_merge_entries(map, prev_entry, entry);
3428         vm_map_unlock(map);
3429         if (need_wakeup)
3430                 vm_map_wakeup(map);
3431         return (rv);
3432 }
3433
3434 static void
3435 vm_map_wire_user_count_sub(u_long npages)
3436 {
3437
3438         atomic_subtract_long(&vm_user_wire_count, npages);
3439 }
3440
3441 static bool
3442 vm_map_wire_user_count_add(u_long npages)
3443 {
3444         u_long wired;
3445
3446         wired = vm_user_wire_count;
3447         do {
3448                 if (npages + wired > vm_page_max_user_wired)
3449                         return (false);
3450         } while (!atomic_fcmpset_long(&vm_user_wire_count, &wired,
3451             npages + wired));
3452
3453         return (true);
3454 }
3455
3456 /*
3457  *      vm_map_wire_entry_failure:
3458  *
3459  *      Handle a wiring failure on the given entry.
3460  *
3461  *      The map should be locked.
3462  */
3463 static void
3464 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
3465     vm_offset_t failed_addr)
3466 {
3467
3468         VM_MAP_ASSERT_LOCKED(map);
3469         KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
3470             entry->wired_count == 1,
3471             ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
3472         KASSERT(failed_addr < entry->end,
3473             ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
3474
3475         /*
3476          * If any pages at the start of this entry were successfully wired,
3477          * then unwire them.
3478          */
3479         if (failed_addr > entry->start) {
3480                 pmap_unwire(map->pmap, entry->start, failed_addr);
3481                 vm_object_unwire(entry->object.vm_object, entry->offset,
3482                     failed_addr - entry->start, PQ_ACTIVE);
3483         }
3484
3485         /*
3486          * Assign an out-of-range value to represent the failure to wire this
3487          * entry.
3488          */
3489         entry->wired_count = -1;
3490 }
3491
3492 int
3493 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3494 {
3495         int rv;
3496
3497         vm_map_lock(map);
3498         rv = vm_map_wire_locked(map, start, end, flags);
3499         vm_map_unlock(map);
3500         return (rv);
3501 }
3502
3503 /*
3504  *      vm_map_wire_locked:
3505  *
3506  *      Implements both kernel and user wiring.  Returns with the map locked,
3507  *      the map lock may be dropped.
3508  */
3509 int
3510 vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3511 {
3512         vm_map_entry_t entry, first_entry, next_entry, prev_entry;
3513         vm_offset_t faddr, saved_end, saved_start;
3514         u_long incr, npages;
3515         u_int bidx, last_timestamp;
3516         int rv;
3517         bool holes_ok, need_wakeup, user_wire;
3518         vm_prot_t prot;
3519
3520         VM_MAP_ASSERT_LOCKED(map);
3521
3522         if (start == end)
3523                 return (KERN_SUCCESS);
3524         prot = 0;
3525         if (flags & VM_MAP_WIRE_WRITE)
3526                 prot |= VM_PROT_WRITE;
3527         holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0;
3528         user_wire = (flags & VM_MAP_WIRE_USER) != 0;
3529         VM_MAP_RANGE_CHECK(map, start, end);
3530         if (!vm_map_lookup_entry(map, start, &first_entry)) {
3531                 if (holes_ok)
3532                         first_entry = vm_map_entry_succ(first_entry);
3533                 else
3534                         return (KERN_INVALID_ADDRESS);
3535         }
3536         for (entry = first_entry; entry->start < end; entry = next_entry) {
3537                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3538                         /*
3539                          * We have not yet clipped the entry.
3540                          */
3541                         next_entry = vm_map_entry_in_transition(map, start,
3542                             &end, holes_ok, entry);
3543                         if (next_entry == NULL) {
3544                                 if (entry == first_entry)
3545                                         return (KERN_INVALID_ADDRESS);
3546                                 rv = KERN_INVALID_ADDRESS;
3547                                 goto done;
3548                         }
3549                         first_entry = (entry == first_entry) ?
3550                             next_entry : NULL;
3551                         continue;
3552                 }
3553                 rv = vm_map_clip_start(map, entry, start);
3554                 if (rv != KERN_SUCCESS)
3555                         goto done;
3556                 rv = vm_map_clip_end(map, entry, end);
3557                 if (rv != KERN_SUCCESS)
3558                         goto done;
3559
3560                 /*
3561                  * Mark the entry in case the map lock is released.  (See
3562                  * above.)
3563                  */
3564                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3565                     entry->wiring_thread == NULL,
3566                     ("owned map entry %p", entry));
3567                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3568                 entry->wiring_thread = curthread;
3569                 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
3570                     || (entry->protection & prot) != prot) {
3571                         entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
3572                         if (!holes_ok) {
3573                                 end = entry->end;
3574                                 rv = KERN_INVALID_ADDRESS;
3575                                 goto done;
3576                         }
3577                 } else if (entry->wired_count == 0) {
3578                         entry->wired_count++;
3579
3580                         npages = atop(entry->end - entry->start);
3581                         if (user_wire && !vm_map_wire_user_count_add(npages)) {
3582                                 vm_map_wire_entry_failure(map, entry,
3583                                     entry->start);
3584                                 end = entry->end;
3585                                 rv = KERN_RESOURCE_SHORTAGE;
3586                                 goto done;
3587                         }
3588
3589                         /*
3590                          * Release the map lock, relying on the in-transition
3591                          * mark.  Mark the map busy for fork.
3592                          */
3593                         saved_start = entry->start;
3594                         saved_end = entry->end;
3595                         last_timestamp = map->timestamp;
3596                         bidx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry);
3597                         incr =  pagesizes[bidx];
3598                         vm_map_busy(map);
3599                         vm_map_unlock(map);
3600
3601                         for (faddr = saved_start; faddr < saved_end;
3602                             faddr += incr) {
3603                                 /*
3604                                  * Simulate a fault to get the page and enter
3605                                  * it into the physical map.
3606                                  */
3607                                 rv = vm_fault(map, faddr, VM_PROT_NONE,
3608                                     VM_FAULT_WIRE, NULL);
3609                                 if (rv != KERN_SUCCESS)
3610                                         break;
3611                         }
3612                         vm_map_lock(map);
3613                         vm_map_unbusy(map);
3614                         if (last_timestamp + 1 != map->timestamp) {
3615                                 /*
3616                                  * Look again for the entry because the map was
3617                                  * modified while it was unlocked.  The entry
3618                                  * may have been clipped, but NOT merged or
3619                                  * deleted.
3620                                  */
3621                                 if (!vm_map_lookup_entry(map, saved_start,
3622                                     &next_entry))
3623                                         KASSERT(false,
3624                                             ("vm_map_wire: lookup failed"));
3625                                 first_entry = (entry == first_entry) ?
3626                                     next_entry : NULL;
3627                                 for (entry = next_entry; entry->end < saved_end;
3628                                     entry = vm_map_entry_succ(entry)) {
3629                                         /*
3630                                          * In case of failure, handle entries
3631                                          * that were not fully wired here;
3632                                          * fully wired entries are handled
3633                                          * later.
3634                                          */
3635                                         if (rv != KERN_SUCCESS &&
3636                                             faddr < entry->end)
3637                                                 vm_map_wire_entry_failure(map,
3638                                                     entry, faddr);
3639                                 }
3640                         }
3641                         if (rv != KERN_SUCCESS) {
3642                                 vm_map_wire_entry_failure(map, entry, faddr);
3643                                 if (user_wire)
3644                                         vm_map_wire_user_count_sub(npages);
3645                                 end = entry->end;
3646                                 goto done;
3647                         }
3648                 } else if (!user_wire ||
3649                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3650                         entry->wired_count++;
3651                 }
3652                 /*
3653                  * Check the map for holes in the specified region.
3654                  * If holes_ok was specified, skip this check.
3655                  */
3656                 next_entry = vm_map_entry_succ(entry);
3657                 if (!holes_ok &&
3658                     entry->end < end && next_entry->start > entry->end) {
3659                         end = entry->end;
3660                         rv = KERN_INVALID_ADDRESS;
3661                         goto done;
3662                 }
3663         }
3664         rv = KERN_SUCCESS;
3665 done:
3666         need_wakeup = false;
3667         if (first_entry == NULL &&
3668             !vm_map_lookup_entry(map, start, &first_entry)) {
3669                 KASSERT(holes_ok, ("vm_map_wire: lookup failed"));
3670                 prev_entry = first_entry;
3671                 entry = vm_map_entry_succ(first_entry);
3672         } else {
3673                 prev_entry = vm_map_entry_pred(first_entry);
3674                 entry = first_entry;
3675         }
3676         for (; entry->start < end;
3677             prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3678                 /*
3679                  * If holes_ok was specified, an empty
3680                  * space in the unwired region could have been mapped
3681                  * while the map lock was dropped for faulting in the
3682                  * pages or draining MAP_ENTRY_IN_TRANSITION.
3683                  * Moreover, another thread could be simultaneously
3684                  * wiring this new mapping entry.  Detect these cases
3685                  * and skip any entries marked as in transition not by us.
3686                  *
3687                  * Another way to get an entry not marked with
3688                  * MAP_ENTRY_IN_TRANSITION is after failed clipping,
3689                  * which set rv to KERN_INVALID_ARGUMENT.
3690                  */
3691                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3692                     entry->wiring_thread != curthread) {
3693                         KASSERT(holes_ok || rv == KERN_INVALID_ARGUMENT,
3694                             ("vm_map_wire: !HOLESOK and new/changed entry"));
3695                         continue;
3696                 }
3697
3698                 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) {
3699                         /* do nothing */
3700                 } else if (rv == KERN_SUCCESS) {
3701                         if (user_wire)
3702                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
3703                 } else if (entry->wired_count == -1) {
3704                         /*
3705                          * Wiring failed on this entry.  Thus, unwiring is
3706                          * unnecessary.
3707                          */
3708                         entry->wired_count = 0;
3709                 } else if (!user_wire ||
3710                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3711                         /*
3712                          * Undo the wiring.  Wiring succeeded on this entry
3713                          * but failed on a later entry.  
3714                          */
3715                         if (entry->wired_count == 1) {
3716                                 vm_map_entry_unwire(map, entry);
3717                                 if (user_wire)
3718                                         vm_map_wire_user_count_sub(
3719                                             atop(entry->end - entry->start));
3720                         } else
3721                                 entry->wired_count--;
3722                 }
3723                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3724                     ("vm_map_wire: in-transition flag missing %p", entry));
3725                 KASSERT(entry->wiring_thread == curthread,
3726                     ("vm_map_wire: alien wire %p", entry));
3727                 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
3728                     MAP_ENTRY_WIRE_SKIPPED);
3729                 entry->wiring_thread = NULL;
3730                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3731                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3732                         need_wakeup = true;
3733                 }
3734                 vm_map_try_merge_entries(map, prev_entry, entry);
3735         }
3736         vm_map_try_merge_entries(map, prev_entry, entry);
3737         if (need_wakeup)
3738                 vm_map_wakeup(map);
3739         return (rv);
3740 }
3741
3742 /*
3743  * vm_map_sync
3744  *
3745  * Push any dirty cached pages in the address range to their pager.
3746  * If syncio is TRUE, dirty pages are written synchronously.
3747  * If invalidate is TRUE, any cached pages are freed as well.
3748  *
3749  * If the size of the region from start to end is zero, we are
3750  * supposed to flush all modified pages within the region containing
3751  * start.  Unfortunately, a region can be split or coalesced with
3752  * neighboring regions, making it difficult to determine what the
3753  * original region was.  Therefore, we approximate this requirement by
3754  * flushing the current region containing start.
3755  *
3756  * Returns an error if any part of the specified range is not mapped.
3757  */
3758 int
3759 vm_map_sync(
3760         vm_map_t map,
3761         vm_offset_t start,
3762         vm_offset_t end,
3763         boolean_t syncio,
3764         boolean_t invalidate)
3765 {
3766         vm_map_entry_t entry, first_entry, next_entry;
3767         vm_size_t size;
3768         vm_object_t object;
3769         vm_ooffset_t offset;
3770         unsigned int last_timestamp;
3771         int bdry_idx;
3772         boolean_t failed;
3773
3774         vm_map_lock_read(map);
3775         VM_MAP_RANGE_CHECK(map, start, end);
3776         if (!vm_map_lookup_entry(map, start, &first_entry)) {
3777                 vm_map_unlock_read(map);
3778                 return (KERN_INVALID_ADDRESS);
3779         } else if (start == end) {
3780                 start = first_entry->start;
3781                 end = first_entry->end;
3782         }
3783
3784         /*
3785          * Make a first pass to check for user-wired memory, holes,
3786          * and partial invalidation of largepage mappings.
3787          */
3788         for (entry = first_entry; entry->start < end; entry = next_entry) {
3789                 if (invalidate) {
3790                         if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) {
3791                                 vm_map_unlock_read(map);
3792                                 return (KERN_INVALID_ARGUMENT);
3793                         }
3794                         bdry_idx = MAP_ENTRY_SPLIT_BOUNDARY_INDEX(entry);
3795                         if (bdry_idx != 0 &&
3796                             ((start & (pagesizes[bdry_idx] - 1)) != 0 ||
3797                             (end & (pagesizes[bdry_idx] - 1)) != 0)) {
3798                                 vm_map_unlock_read(map);
3799                                 return (KERN_INVALID_ARGUMENT);
3800                         }
3801                 }
3802                 next_entry = vm_map_entry_succ(entry);
3803                 if (end > entry->end &&
3804                     entry->end != next_entry->start) {
3805                         vm_map_unlock_read(map);
3806                         return (KERN_INVALID_ADDRESS);
3807                 }
3808         }
3809
3810         if (invalidate)
3811                 pmap_remove(map->pmap, start, end);
3812         failed = FALSE;
3813
3814         /*
3815          * Make a second pass, cleaning/uncaching pages from the indicated
3816          * objects as we go.
3817          */
3818         for (entry = first_entry; entry->start < end;) {
3819                 offset = entry->offset + (start - entry->start);
3820                 size = (end <= entry->end ? end : entry->end) - start;
3821                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
3822                         vm_map_t smap;
3823                         vm_map_entry_t tentry;
3824                         vm_size_t tsize;
3825
3826                         smap = entry->object.sub_map;
3827                         vm_map_lock_read(smap);
3828                         (void) vm_map_lookup_entry(smap, offset, &tentry);
3829                         tsize = tentry->end - offset;
3830                         if (tsize < size)
3831                                 size = tsize;
3832                         object = tentry->object.vm_object;
3833                         offset = tentry->offset + (offset - tentry->start);
3834                         vm_map_unlock_read(smap);
3835                 } else {
3836                         object = entry->object.vm_object;
3837                 }
3838                 vm_object_reference(object);
3839                 last_timestamp = map->timestamp;
3840                 vm_map_unlock_read(map);
3841                 if (!vm_object_sync(object, offset, size, syncio, invalidate))
3842                         failed = TRUE;
3843                 start += size;
3844                 vm_object_deallocate(object);
3845                 vm_map_lock_read(map);
3846                 if (last_timestamp == map->timestamp ||
3847                     !vm_map_lookup_entry(map, start, &entry))
3848                         entry = vm_map_entry_succ(entry);
3849         }
3850
3851         vm_map_unlock_read(map);
3852         return (failed ? KERN_FAILURE : KERN_SUCCESS);
3853 }
3854
3855 /*
3856  *      vm_map_entry_unwire:    [ internal use only ]
3857  *
3858  *      Make the region specified by this entry pageable.
3859  *
3860  *      The map in question should be locked.
3861  *      [This is the reason for this routine's existence.]
3862  */
3863 static void
3864 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3865 {
3866         vm_size_t size;
3867
3868         VM_MAP_ASSERT_LOCKED(map);
3869         KASSERT(entry->wired_count > 0,
3870             ("vm_map_entry_unwire: entry %p isn't wired", entry));
3871
3872         size = entry->end - entry->start;
3873         if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0)
3874                 vm_map_wire_user_count_sub(atop(size));
3875         pmap_unwire(map->pmap, entry->start, entry->end);
3876         vm_object_unwire(entry->object.vm_object, entry->offset, size,
3877             PQ_ACTIVE);
3878         entry->wired_count = 0;
3879 }
3880
3881 static void
3882 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
3883 {
3884
3885         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
3886                 vm_object_deallocate(entry->object.vm_object);
3887         uma_zfree(system_map ? kmapentzone : mapentzone, entry);
3888 }
3889
3890 /*
3891  *      vm_map_entry_delete:    [ internal use only ]
3892  *
3893  *      Deallocate the given entry from the target map.
3894  */
3895 static void
3896 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
3897 {
3898         vm_object_t object;
3899         vm_pindex_t offidxstart, offidxend, size1;
3900         vm_size_t size;
3901
3902         vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE);
3903         object = entry->object.vm_object;
3904
3905         if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
3906                 MPASS(entry->cred == NULL);
3907                 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
3908                 MPASS(object == NULL);
3909                 vm_map_entry_deallocate(entry, map->system_map);
3910                 return;
3911         }
3912
3913         size = entry->end - entry->start;
3914         map->size -= size;
3915
3916         if (entry->cred != NULL) {
3917                 swap_release_by_cred(size, entry->cred);
3918                 crfree(entry->cred);
3919         }
3920
3921         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) {
3922                 entry->object.vm_object = NULL;
3923         } else if ((object->flags & OBJ_ANON) != 0 ||
3924             object == kernel_object) {
3925                 KASSERT(entry->cred == NULL || object->cred == NULL ||
3926                     (entry->eflags & MAP_ENTRY_NEEDS_COPY),
3927                     ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
3928                 offidxstart = OFF_TO_IDX(entry->offset);
3929                 offidxend = offidxstart + atop(size);
3930                 VM_OBJECT_WLOCK(object);
3931                 if (object->ref_count != 1 &&
3932                     ((object->flags & OBJ_ONEMAPPING) != 0 ||
3933                     object == kernel_object)) {
3934                         vm_object_collapse(object);
3935
3936                         /*
3937                          * The option OBJPR_NOTMAPPED can be passed here
3938                          * because vm_map_delete() already performed
3939                          * pmap_remove() on the only mapping to this range
3940                          * of pages. 
3941                          */
3942                         vm_object_page_remove(object, offidxstart, offidxend,
3943                             OBJPR_NOTMAPPED);
3944                         if (offidxend >= object->size &&
3945                             offidxstart < object->size) {
3946                                 size1 = object->size;
3947                                 object->size = offidxstart;
3948                                 if (object->cred != NULL) {
3949                                         size1 -= object->size;
3950                                         KASSERT(object->charge >= ptoa(size1),
3951                                             ("object %p charge < 0", object));
3952                                         swap_release_by_cred(ptoa(size1),
3953                                             object->cred);
3954                                         object->charge -= ptoa(size1);
3955                                 }
3956                         }
3957                 }
3958                 VM_OBJECT_WUNLOCK(object);
3959         }
3960         if (map->system_map)
3961                 vm_map_entry_deallocate(entry, TRUE);
3962         else {
3963                 entry->defer_next = curthread->td_map_def_user;
3964                 curthread->td_map_def_user = entry;
3965         }
3966 }
3967
3968 /*
3969  *      vm_map_delete:  [ internal use only ]
3970  *
3971  *      Deallocates the given address range from the target
3972  *      map.
3973  */
3974 int
3975 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
3976 {
3977         vm_map_entry_t entry, next_entry, scratch_entry;
3978         int rv;
3979
3980         VM_MAP_ASSERT_LOCKED(map);
3981
3982         if (start == end)
3983                 return (KERN_SUCCESS);
3984
3985         /*
3986          * Find the start of the region, and clip it.
3987          * Step through all entries in this region.
3988          */
3989         rv = vm_map_lookup_clip_start(map, start, &entry, &scratch_entry);
3990         if (rv != KERN_SUCCESS)
3991                 return (rv);
3992         for (; entry->start < end; entry = next_entry) {
3993                 /*
3994                  * Wait for wiring or unwiring of an entry to complete.
3995                  * Also wait for any system wirings to disappear on
3996                  * user maps.
3997                  */
3998                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
3999                     (vm_map_pmap(map) != kernel_pmap &&
4000                     vm_map_entry_system_wired_count(entry) != 0)) {
4001                         unsigned int last_timestamp;
4002                         vm_offset_t saved_start;
4003
4004                         saved_start = entry->start;
4005                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
4006                         last_timestamp = map->timestamp;
4007                         (void) vm_map_unlock_and_wait(map, 0);
4008                         vm_map_lock(map);
4009                         if (last_timestamp + 1 != map->timestamp) {
4010                                 /*
4011                                  * Look again for the entry because the map was
4012                                  * modified while it was unlocked.
4013                                  * Specifically, the entry may have been
4014                                  * clipped, merged, or deleted.
4015                                  */
4016                                 rv = vm_map_lookup_clip_start(map, saved_start,
4017                                     &next_entry, &scratch_entry);
4018                                 if (rv != KERN_SUCCESS)
4019                                         break;
4020                         } else
4021                                 next_entry = entry;
4022                         continue;
4023                 }
4024
4025                 /* XXXKIB or delete to the upper superpage boundary ? */
4026                 rv = vm_map_clip_end(map, entry, end);
4027                 if (rv != KERN_SUCCESS)
4028                         break;
4029                 next_entry = vm_map_entry_succ(entry);
4030
4031                 /*
4032                  * Unwire before removing addresses from the pmap; otherwise,
4033                  * unwiring will put the entries back in the pmap.
4034                  */
4035                 if (entry->wired_count != 0)
4036                         vm_map_entry_unwire(map, entry);
4037
4038                 /*
4039                  * Remove mappings for the pages, but only if the
4040                  * mappings could exist.  For instance, it does not
4041                  * make sense to call pmap_remove() for guard entries.
4042                  */
4043                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
4044                     entry->object.vm_object != NULL)
4045                         pmap_map_delete(map->pmap, entry->start, entry->end);
4046
4047                 if (entry->end == map->anon_loc)
4048                         map->anon_loc = entry->start;
4049
4050                 /*
4051                  * Delete the entry only after removing all pmap
4052                  * entries pointing to its pages.  (Otherwise, its
4053                  * page frames may be reallocated, and any modify bits
4054                  * will be set in the wrong object!)
4055                  */
4056                 vm_map_entry_delete(map, entry);
4057         }
4058         return (rv);
4059 }
4060
4061 /*
4062  *      vm_map_remove:
4063  *
4064  *      Remove the given address range from the target map.
4065  *      This is the exported form of vm_map_delete.
4066  */
4067 int
4068 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
4069 {
4070         int result;
4071
4072         vm_map_lock(map);
4073         VM_MAP_RANGE_CHECK(map, start, end);
4074         result = vm_map_delete(map, start, end);
4075         vm_map_unlock(map);
4076         return (result);
4077 }
4078
4079 /*
4080  *      vm_map_check_protection:
4081  *
4082  *      Assert that the target map allows the specified privilege on the
4083  *      entire address region given.  The entire region must be allocated.
4084  *
4085  *      WARNING!  This code does not and should not check whether the
4086  *      contents of the region is accessible.  For example a smaller file
4087  *      might be mapped into a larger address space.
4088  *
4089  *      NOTE!  This code is also called by munmap().
4090  *
4091  *      The map must be locked.  A read lock is sufficient.
4092  */
4093 boolean_t
4094 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
4095                         vm_prot_t protection)
4096 {
4097         vm_map_entry_t entry;
4098         vm_map_entry_t tmp_entry;
4099
4100         if (!vm_map_lookup_entry(map, start, &tmp_entry))
4101                 return (FALSE);
4102         entry = tmp_entry;
4103
4104         while (start < end) {
4105                 /*
4106                  * No holes allowed!
4107                  */
4108                 if (start < entry->start)
4109                         return (FALSE);
4110                 /*
4111                  * Check protection associated with entry.
4112                  */
4113                 if ((entry->protection & protection) != protection)
4114                         return (FALSE);
4115                 /* go to next entry */
4116                 start = entry->end;
4117                 entry = vm_map_entry_succ(entry);
4118         }
4119         return (TRUE);
4120 }
4121
4122 /*
4123  *
4124  *      vm_map_copy_swap_object:
4125  *
4126  *      Copies a swap-backed object from an existing map entry to a
4127  *      new one.  Carries forward the swap charge.  May change the
4128  *      src object on return.
4129  */
4130 static void
4131 vm_map_copy_swap_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry,
4132     vm_offset_t size, vm_ooffset_t *fork_charge)
4133 {
4134         vm_object_t src_object;
4135         struct ucred *cred;
4136         int charged;
4137
4138         src_object = src_entry->object.vm_object;
4139         charged = ENTRY_CHARGED(src_entry);
4140         if ((src_object->flags & OBJ_ANON) != 0) {
4141                 VM_OBJECT_WLOCK(src_object);
4142                 vm_object_collapse(src_object);
4143                 if ((src_object->flags & OBJ_ONEMAPPING) != 0) {
4144                         vm_object_split(src_entry);
4145                         src_object = src_entry->object.vm_object;
4146                 }
4147                 vm_object_reference_locked(src_object);
4148                 vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
4149                 VM_OBJECT_WUNLOCK(src_object);
4150         } else
4151                 vm_object_reference(src_object);
4152         if (src_entry->cred != NULL &&
4153             !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
4154                 KASSERT(src_object->cred == NULL,
4155                     ("OVERCOMMIT: vm_map_copy_anon_entry: cred %p",
4156                      src_object));
4157                 src_object->cred = src_entry->cred;
4158                 src_object->charge = size;
4159         }
4160         dst_entry->object.vm_object = src_object;
4161         if (charged) {
4162                 cred = curthread->td_ucred;
4163                 crhold(cred);
4164                 dst_entry->cred = cred;
4165                 *fork_charge += size;
4166                 if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
4167                         crhold(cred);
4168                         src_entry->cred = cred;
4169                         *fork_charge += size;
4170                 }
4171         }
4172 }
4173
4174 /*
4175  *      vm_map_copy_entry:
4176  *
4177  *      Copies the contents of the source entry to the destination
4178  *      entry.  The entries *must* be aligned properly.
4179  */
4180 static void
4181 vm_map_copy_entry(
4182         vm_map_t src_map,
4183         vm_map_t dst_map,
4184         vm_map_entry_t src_entry,
4185         vm_map_entry_t dst_entry,
4186         vm_ooffset_t *fork_charge)
4187 {
4188         vm_object_t src_object;
4189         vm_map_entry_t fake_entry;
4190         vm_offset_t size;
4191
4192         VM_MAP_ASSERT_LOCKED(dst_map);
4193
4194         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
4195                 return;
4196
4197         if (src_entry->wired_count == 0 ||
4198             (src_entry->protection & VM_PROT_WRITE) == 0) {
4199                 /*
4200                  * If the source entry is marked needs_copy, it is already
4201                  * write-protected.
4202                  */
4203                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
4204                     (src_entry->protection & VM_PROT_WRITE) != 0) {
4205                         pmap_protect(src_map->pmap,
4206                             src_entry->start,
4207                             src_entry->end,
4208                             src_entry->protection & ~VM_PROT_WRITE);
4209                 }
4210
4211                 /*
4212                  * Make a copy of the object.
4213                  */
4214                 size = src_entry->end - src_entry->start;
4215                 if ((src_object = src_entry->object.vm_object) != NULL) {
4216                         if ((src_object->flags & OBJ_SWAP) != 0) {
4217                                 vm_map_copy_swap_object(src_entry, dst_entry,
4218                                     size, fork_charge);
4219                                 /* May have split/collapsed, reload obj. */
4220                                 src_object = src_entry->object.vm_object;
4221                         } else {
4222                                 vm_object_reference(src_object);
4223                                 dst_entry->object.vm_object = src_object;
4224                         }
4225                         src_entry->eflags |= MAP_ENTRY_COW |
4226                             MAP_ENTRY_NEEDS_COPY;
4227                         dst_entry->eflags |= MAP_ENTRY_COW |
4228                             MAP_ENTRY_NEEDS_COPY;
4229                         dst_entry->offset = src_entry->offset;
4230                         if (src_entry->eflags & MAP_ENTRY_WRITECNT) {
4231                                 /*
4232                                  * MAP_ENTRY_WRITECNT cannot
4233                                  * indicate write reference from
4234                                  * src_entry, since the entry is
4235                                  * marked as needs copy.  Allocate a
4236                                  * fake entry that is used to
4237                                  * decrement object->un_pager writecount
4238                                  * at the appropriate time.  Attach
4239                                  * fake_entry to the deferred list.
4240                                  */
4241                                 fake_entry = vm_map_entry_create(dst_map);
4242                                 fake_entry->eflags = MAP_ENTRY_WRITECNT;
4243                                 src_entry->eflags &= ~MAP_ENTRY_WRITECNT;
4244                                 vm_object_reference(src_object);
4245                                 fake_entry->object.vm_object = src_object;
4246                                 fake_entry->start = src_entry->start;
4247                                 fake_entry->end = src_entry->end;
4248                                 fake_entry->defer_next =
4249                                     curthread->td_map_def_user;
4250                                 curthread->td_map_def_user = fake_entry;
4251                         }
4252
4253                         pmap_copy(dst_map->pmap, src_map->pmap,
4254                             dst_entry->start, dst_entry->end - dst_entry->start,
4255                             src_entry->start);
4256                 } else {
4257                         dst_entry->object.vm_object = NULL;
4258                         if ((dst_entry->eflags & MAP_ENTRY_GUARD) == 0)
4259                                 dst_entry->offset = 0;
4260                         if (src_entry->cred != NULL) {
4261                                 dst_entry->cred = curthread->td_ucred;
4262                                 crhold(dst_entry->cred);
4263                                 *fork_charge += size;
4264                         }
4265                 }
4266         } else {
4267                 /*
4268                  * We don't want to make writeable wired pages copy-on-write.
4269                  * Immediately copy these pages into the new map by simulating
4270                  * page faults.  The new pages are pageable.
4271                  */
4272                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
4273                     fork_charge);
4274         }
4275 }
4276
4277 /*
4278  * vmspace_map_entry_forked:
4279  * Update the newly-forked vmspace each time a map entry is inherited
4280  * or copied.  The values for vm_dsize and vm_tsize are approximate
4281  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
4282  */
4283 static void
4284 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
4285     vm_map_entry_t entry)
4286 {
4287         vm_size_t entrysize;
4288         vm_offset_t newend;
4289
4290         if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
4291                 return;
4292         entrysize = entry->end - entry->start;
4293         vm2->vm_map.size += entrysize;
4294         if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
4295                 vm2->vm_ssize += btoc(entrysize);
4296         } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
4297             entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
4298                 newend = MIN(entry->end,
4299                     (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
4300                 vm2->vm_dsize += btoc(newend - entry->start);
4301         } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
4302             entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
4303                 newend = MIN(entry->end,
4304                     (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
4305                 vm2->vm_tsize += btoc(newend - entry->start);
4306         }
4307 }
4308
4309 /*
4310  * vmspace_fork:
4311  * Create a new process vmspace structure and vm_map
4312  * based on those of an existing process.  The new map
4313  * is based on the old map, according to the inheritance
4314  * values on the regions in that map.
4315  *
4316  * XXX It might be worth coalescing the entries added to the new vmspace.
4317  *
4318  * The source map must not be locked.
4319  */
4320 struct vmspace *
4321 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
4322 {
4323         struct vmspace *vm2;
4324         vm_map_t new_map, old_map;
4325         vm_map_entry_t new_entry, old_entry;
4326         vm_object_t object;
4327         int error, locked __diagused;
4328         vm_inherit_t inh;
4329
4330         old_map = &vm1->vm_map;
4331         /* Copy immutable fields of vm1 to vm2. */
4332         vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
4333             pmap_pinit);
4334         if (vm2 == NULL)
4335                 return (NULL);
4336
4337         vm2->vm_taddr = vm1->vm_taddr;
4338         vm2->vm_daddr = vm1->vm_daddr;
4339         vm2->vm_maxsaddr = vm1->vm_maxsaddr;
4340         vm2->vm_stacktop = vm1->vm_stacktop;
4341         vm2->vm_shp_base = vm1->vm_shp_base;
4342         vm_map_lock(old_map);
4343         if (old_map->busy)
4344                 vm_map_wait_busy(old_map);
4345         new_map = &vm2->vm_map;
4346         locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
4347         KASSERT(locked, ("vmspace_fork: lock failed"));
4348
4349         error = pmap_vmspace_copy(new_map->pmap, old_map->pmap);
4350         if (error != 0) {
4351                 sx_xunlock(&old_map->lock);
4352                 sx_xunlock(&new_map->lock);
4353                 vm_map_process_deferred();
4354                 vmspace_free(vm2);
4355                 return (NULL);
4356         }
4357
4358         new_map->anon_loc = old_map->anon_loc;
4359         new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART |
4360             MAP_ASLR_STACK | MAP_WXORX);
4361
4362         VM_MAP_ENTRY_FOREACH(old_entry, old_map) {
4363                 if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
4364                         panic("vm_map_fork: encountered a submap");
4365
4366                 inh = old_entry->inheritance;
4367                 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4368                     inh != VM_INHERIT_NONE)
4369                         inh = VM_INHERIT_COPY;
4370
4371                 switch (inh) {
4372                 case VM_INHERIT_NONE:
4373                         break;
4374
4375                 case VM_INHERIT_SHARE:
4376                         /*
4377                          * Clone the entry, creating the shared object if
4378                          * necessary.
4379                          */
4380                         object = old_entry->object.vm_object;
4381                         if (object == NULL) {
4382                                 vm_map_entry_back(old_entry);
4383                                 object = old_entry->object.vm_object;
4384                         }
4385
4386                         /*
4387                          * Add the reference before calling vm_object_shadow
4388                          * to insure that a shadow object is created.
4389                          */
4390                         vm_object_reference(object);
4391                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4392                                 vm_object_shadow(&old_entry->object.vm_object,
4393                                     &old_entry->offset,
4394                                     old_entry->end - old_entry->start,
4395                                     old_entry->cred,
4396                                     /* Transfer the second reference too. */
4397                                     true);
4398                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4399                                 old_entry->cred = NULL;
4400
4401                                 /*
4402                                  * As in vm_map_merged_neighbor_dispose(),
4403                                  * the vnode lock will not be acquired in
4404                                  * this call to vm_object_deallocate().
4405                                  */
4406                                 vm_object_deallocate(object);
4407                                 object = old_entry->object.vm_object;
4408                         } else {
4409                                 VM_OBJECT_WLOCK(object);
4410                                 vm_object_clear_flag(object, OBJ_ONEMAPPING);
4411                                 if (old_entry->cred != NULL) {
4412                                         KASSERT(object->cred == NULL,
4413                                             ("vmspace_fork both cred"));
4414                                         object->cred = old_entry->cred;
4415                                         object->charge = old_entry->end -
4416                                             old_entry->start;
4417                                         old_entry->cred = NULL;
4418                                 }
4419
4420                                 /*
4421                                  * Assert the correct state of the vnode
4422                                  * v_writecount while the object is locked, to
4423                                  * not relock it later for the assertion
4424                                  * correctness.
4425                                  */
4426                                 if (old_entry->eflags & MAP_ENTRY_WRITECNT &&
4427                                     object->type == OBJT_VNODE) {
4428                                         KASSERT(((struct vnode *)object->
4429                                             handle)->v_writecount > 0,
4430                                             ("vmspace_fork: v_writecount %p",
4431                                             object));
4432                                         KASSERT(object->un_pager.vnp.
4433                                             writemappings > 0,
4434                                             ("vmspace_fork: vnp.writecount %p",
4435                                             object));
4436                                 }
4437                                 VM_OBJECT_WUNLOCK(object);
4438                         }
4439
4440                         /*
4441                          * Clone the entry, referencing the shared object.
4442                          */
4443                         new_entry = vm_map_entry_create(new_map);
4444                         *new_entry = *old_entry;
4445                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4446                             MAP_ENTRY_IN_TRANSITION);
4447                         new_entry->wiring_thread = NULL;
4448                         new_entry->wired_count = 0;
4449                         if (new_entry->eflags & MAP_ENTRY_WRITECNT) {
4450                                 vm_pager_update_writecount(object,
4451                                     new_entry->start, new_entry->end);
4452                         }
4453                         vm_map_entry_set_vnode_text(new_entry, true);
4454
4455                         /*
4456                          * Insert the entry into the new map -- we know we're
4457                          * inserting at the end of the new map.
4458                          */
4459                         vm_map_entry_link(new_map, new_entry);
4460                         vmspace_map_entry_forked(vm1, vm2, new_entry);
4461
4462                         /*
4463                          * Update the physical map
4464                          */
4465                         pmap_copy(new_map->pmap, old_map->pmap,
4466                             new_entry->start,
4467                             (old_entry->end - old_entry->start),
4468                             old_entry->start);
4469                         break;
4470
4471                 case VM_INHERIT_COPY:
4472                         /*
4473                          * Clone the entry and link into the map.
4474                          */
4475                         new_entry = vm_map_entry_create(new_map);
4476                         *new_entry = *old_entry;
4477                         /*
4478                          * Copied entry is COW over the old object.
4479                          */
4480                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4481                             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WRITECNT);
4482                         new_entry->wiring_thread = NULL;
4483                         new_entry->wired_count = 0;
4484                         new_entry->object.vm_object = NULL;
4485                         new_entry->cred = NULL;
4486                         vm_map_entry_link(new_map, new_entry);
4487                         vmspace_map_entry_forked(vm1, vm2, new_entry);
4488                         vm_map_copy_entry(old_map, new_map, old_entry,
4489                             new_entry, fork_charge);
4490                         vm_map_entry_set_vnode_text(new_entry, true);
4491                         break;
4492
4493                 case VM_INHERIT_ZERO:
4494                         /*
4495                          * Create a new anonymous mapping entry modelled from
4496                          * the old one.
4497                          */
4498                         new_entry = vm_map_entry_create(new_map);
4499                         memset(new_entry, 0, sizeof(*new_entry));
4500
4501                         new_entry->start = old_entry->start;
4502                         new_entry->end = old_entry->end;
4503                         new_entry->eflags = old_entry->eflags &
4504                             ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION |
4505                             MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC |
4506                             MAP_ENTRY_SPLIT_BOUNDARY_MASK);
4507                         new_entry->protection = old_entry->protection;
4508                         new_entry->max_protection = old_entry->max_protection;
4509                         new_entry->inheritance = VM_INHERIT_ZERO;
4510
4511                         vm_map_entry_link(new_map, new_entry);
4512                         vmspace_map_entry_forked(vm1, vm2, new_entry);
4513
4514                         new_entry->cred = curthread->td_ucred;
4515                         crhold(new_entry->cred);
4516                         *fork_charge += (new_entry->end - new_entry->start);
4517
4518                         break;
4519                 }
4520         }
4521         /*
4522          * Use inlined vm_map_unlock() to postpone handling the deferred
4523          * map entries, which cannot be done until both old_map and
4524          * new_map locks are released.
4525          */
4526         sx_xunlock(&old_map->lock);
4527         sx_xunlock(&new_map->lock);
4528         vm_map_process_deferred();
4529
4530         return (vm2);
4531 }
4532
4533 /*
4534  * Create a process's stack for exec_new_vmspace().  This function is never
4535  * asked to wire the newly created stack.
4536  */
4537 int
4538 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4539     vm_prot_t prot, vm_prot_t max, int cow)
4540 {
4541         vm_size_t growsize, init_ssize;
4542         rlim_t vmemlim;
4543         int rv;
4544
4545         MPASS((map->flags & MAP_WIREFUTURE) == 0);
4546         growsize = sgrowsiz;
4547         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
4548         vm_map_lock(map);
4549         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4550         /* If we would blow our VMEM resource limit, no go */
4551         if (map->size + init_ssize > vmemlim) {
4552                 rv = KERN_NO_SPACE;
4553                 goto out;
4554         }
4555         rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
4556             max, cow);
4557 out:
4558         vm_map_unlock(map);
4559         return (rv);
4560 }
4561
4562 static int stack_guard_page = 1;
4563 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
4564     &stack_guard_page, 0,
4565     "Specifies the number of guard pages for a stack that grows");
4566
4567 static int
4568 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4569     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
4570 {
4571         vm_map_entry_t gap_entry, new_entry, prev_entry;
4572         vm_offset_t bot, gap_bot, gap_top, top;
4573         vm_size_t init_ssize, sgp;
4574         int orient, rv;
4575
4576         /*
4577          * The stack orientation is piggybacked with the cow argument.
4578          * Extract it into orient and mask the cow argument so that we
4579          * don't pass it around further.
4580          */
4581         orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP);
4582         KASSERT(orient != 0, ("No stack grow direction"));
4583         KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
4584             ("bi-dir stack"));
4585
4586         if (max_ssize == 0 ||
4587             !vm_map_range_valid(map, addrbos, addrbos + max_ssize))
4588                 return (KERN_INVALID_ADDRESS);
4589         sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4590             (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4591             (vm_size_t)stack_guard_page * PAGE_SIZE;
4592         if (sgp >= max_ssize)
4593                 return (KERN_INVALID_ARGUMENT);
4594
4595         init_ssize = growsize;
4596         if (max_ssize < init_ssize + sgp)
4597                 init_ssize = max_ssize - sgp;
4598
4599         /* If addr is already mapped, no go */
4600         if (vm_map_lookup_entry(map, addrbos, &prev_entry))
4601                 return (KERN_NO_SPACE);
4602
4603         /*
4604          * If we can't accommodate max_ssize in the current mapping, no go.
4605          */
4606         if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize)
4607                 return (KERN_NO_SPACE);
4608
4609         /*
4610          * We initially map a stack of only init_ssize.  We will grow as
4611          * needed later.  Depending on the orientation of the stack (i.e.
4612          * the grow direction) we either map at the top of the range, the
4613          * bottom of the range or in the middle.
4614          *
4615          * Note: we would normally expect prot and max to be VM_PROT_ALL,
4616          * and cow to be 0.  Possibly we should eliminate these as input
4617          * parameters, and just pass these values here in the insert call.
4618          */
4619         if (orient == MAP_STACK_GROWS_DOWN) {
4620                 bot = addrbos + max_ssize - init_ssize;
4621                 top = bot + init_ssize;
4622                 gap_bot = addrbos;
4623                 gap_top = bot;
4624         } else /* if (orient == MAP_STACK_GROWS_UP) */ {
4625                 bot = addrbos;
4626                 top = bot + init_ssize;
4627                 gap_bot = top;
4628                 gap_top = addrbos + max_ssize;
4629         }
4630         rv = vm_map_insert1(map, NULL, 0, bot, top, prot, max, cow,
4631             &new_entry);
4632         if (rv != KERN_SUCCESS)
4633                 return (rv);
4634         KASSERT(new_entry->end == top || new_entry->start == bot,
4635             ("Bad entry start/end for new stack entry"));
4636         KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
4637             (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
4638             ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
4639         KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
4640             (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
4641             ("new entry lacks MAP_ENTRY_GROWS_UP"));
4642         if (gap_bot == gap_top)
4643                 return (KERN_SUCCESS);
4644         rv = vm_map_insert1(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
4645             VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
4646             MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP), &gap_entry);
4647         if (rv == KERN_SUCCESS) {
4648                 KASSERT((gap_entry->eflags & MAP_ENTRY_GUARD) != 0,
4649                     ("entry %p not gap %#x", gap_entry, gap_entry->eflags));
4650                 KASSERT((gap_entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
4651                     MAP_ENTRY_STACK_GAP_UP)) != 0,
4652                     ("entry %p not stack gap %#x", gap_entry,
4653                     gap_entry->eflags));
4654
4655                 /*
4656                  * Gap can never successfully handle a fault, so
4657                  * read-ahead logic is never used for it.  Re-use
4658                  * next_read of the gap entry to store
4659                  * stack_guard_page for vm_map_growstack().
4660                  * Similarly, since a gap cannot have a backing object,
4661                  * store the original stack protections in the
4662                  * object offset.
4663                  */
4664                 gap_entry->next_read = sgp;
4665                 gap_entry->offset = prot | PROT_MAX(max);
4666         } else {
4667                 (void)vm_map_delete(map, bot, top);
4668         }
4669         return (rv);
4670 }
4671
4672 /*
4673  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if we
4674  * successfully grow the stack.
4675  */
4676 static int
4677 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
4678 {
4679         vm_map_entry_t stack_entry;
4680         struct proc *p;
4681         struct vmspace *vm;
4682         struct ucred *cred;
4683         vm_offset_t gap_end, gap_start, grow_start;
4684         vm_size_t grow_amount, guard, max_grow, sgp;
4685         vm_prot_t prot, max;
4686         rlim_t lmemlim, stacklim, vmemlim;
4687         int rv, rv1 __diagused;
4688         bool gap_deleted, grow_down, is_procstack;
4689 #ifdef notyet
4690         uint64_t limit;
4691 #endif
4692 #ifdef RACCT
4693         int error __diagused;
4694 #endif
4695
4696         p = curproc;
4697         vm = p->p_vmspace;
4698
4699         /*
4700          * Disallow stack growth when the access is performed by a
4701          * debugger or AIO daemon.  The reason is that the wrong
4702          * resource limits are applied.
4703          */
4704         if (p != initproc && (map != &p->p_vmspace->vm_map ||
4705             p->p_textvp == NULL))
4706                 return (KERN_FAILURE);
4707
4708         MPASS(!map->system_map);
4709
4710         lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
4711         stacklim = lim_cur(curthread, RLIMIT_STACK);
4712         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4713 retry:
4714         /* If addr is not in a hole for a stack grow area, no need to grow. */
4715         if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
4716                 return (KERN_FAILURE);
4717         if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
4718                 return (KERN_SUCCESS);
4719         if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
4720                 stack_entry = vm_map_entry_succ(gap_entry);
4721                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
4722                     stack_entry->start != gap_entry->end)
4723                         return (KERN_FAILURE);
4724                 grow_amount = round_page(stack_entry->start - addr);
4725                 grow_down = true;
4726         } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
4727                 stack_entry = vm_map_entry_pred(gap_entry);
4728                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
4729                     stack_entry->end != gap_entry->start)
4730                         return (KERN_FAILURE);
4731                 grow_amount = round_page(addr + 1 - stack_entry->end);
4732                 grow_down = false;
4733         } else {
4734                 return (KERN_FAILURE);
4735         }
4736         guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4737             (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4738             gap_entry->next_read;
4739         max_grow = gap_entry->end - gap_entry->start;
4740         if (guard > max_grow)
4741                 return (KERN_NO_SPACE);
4742         max_grow -= guard;
4743         if (grow_amount > max_grow)
4744                 return (KERN_NO_SPACE);
4745
4746         /*
4747          * If this is the main process stack, see if we're over the stack
4748          * limit.
4749          */
4750         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
4751             addr < (vm_offset_t)vm->vm_stacktop;
4752         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
4753                 return (KERN_NO_SPACE);
4754
4755 #ifdef RACCT
4756         if (racct_enable) {
4757                 PROC_LOCK(p);
4758                 if (is_procstack && racct_set(p, RACCT_STACK,
4759                     ctob(vm->vm_ssize) + grow_amount)) {
4760                         PROC_UNLOCK(p);
4761                         return (KERN_NO_SPACE);
4762                 }
4763                 PROC_UNLOCK(p);
4764         }
4765 #endif
4766
4767         grow_amount = roundup(grow_amount, sgrowsiz);
4768         if (grow_amount > max_grow)
4769                 grow_amount = max_grow;
4770         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
4771                 grow_amount = trunc_page((vm_size_t)stacklim) -
4772                     ctob(vm->vm_ssize);
4773         }
4774
4775 #ifdef notyet
4776         PROC_LOCK(p);
4777         limit = racct_get_available(p, RACCT_STACK);
4778         PROC_UNLOCK(p);
4779         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
4780                 grow_amount = limit - ctob(vm->vm_ssize);
4781 #endif
4782
4783         if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
4784                 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
4785                         rv = KERN_NO_SPACE;
4786                         goto out;
4787                 }
4788 #ifdef RACCT
4789                 if (racct_enable) {
4790                         PROC_LOCK(p);
4791                         if (racct_set(p, RACCT_MEMLOCK,
4792                             ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
4793                                 PROC_UNLOCK(p);
4794                                 rv = KERN_NO_SPACE;
4795                                 goto out;
4796                         }
4797                         PROC_UNLOCK(p);
4798                 }
4799 #endif
4800         }
4801
4802         /* If we would blow our VMEM resource limit, no go */
4803         if (map->size + grow_amount > vmemlim) {
4804                 rv = KERN_NO_SPACE;
4805                 goto out;
4806         }
4807 #ifdef RACCT
4808         if (racct_enable) {
4809                 PROC_LOCK(p);
4810                 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
4811                         PROC_UNLOCK(p);
4812                         rv = KERN_NO_SPACE;
4813                         goto out;
4814                 }
4815                 PROC_UNLOCK(p);
4816         }
4817 #endif
4818
4819         if (vm_map_lock_upgrade(map)) {
4820                 gap_entry = NULL;
4821                 vm_map_lock_read(map);
4822                 goto retry;
4823         }
4824
4825         if (grow_down) {
4826                 /*
4827                  * The gap_entry "offset" field is overloaded.  See
4828                  * vm_map_stack_locked().
4829                  */
4830                 prot = PROT_EXTRACT(gap_entry->offset);
4831                 max = PROT_MAX_EXTRACT(gap_entry->offset);
4832                 sgp = gap_entry->next_read;
4833
4834                 grow_start = gap_entry->end - grow_amount;
4835                 if (gap_entry->start + grow_amount == gap_entry->end) {
4836                         gap_start = gap_entry->start;
4837                         gap_end = gap_entry->end;
4838                         vm_map_entry_delete(map, gap_entry);
4839                         gap_deleted = true;
4840                 } else {
4841                         MPASS(gap_entry->start < gap_entry->end - grow_amount);
4842                         vm_map_entry_resize(map, gap_entry, -grow_amount);
4843                         gap_deleted = false;
4844                 }
4845                 rv = vm_map_insert(map, NULL, 0, grow_start,
4846                     grow_start + grow_amount, prot, max, MAP_STACK_GROWS_DOWN);
4847                 if (rv != KERN_SUCCESS) {
4848                         if (gap_deleted) {
4849                                 rv1 = vm_map_insert1(map, NULL, 0, gap_start,
4850                                     gap_end, VM_PROT_NONE, VM_PROT_NONE,
4851                                     MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN,
4852                                     &gap_entry);
4853                                 MPASS(rv1 == KERN_SUCCESS);
4854                                 gap_entry->next_read = sgp;
4855                                 gap_entry->offset = prot | PROT_MAX(max);
4856                         } else
4857                                 vm_map_entry_resize(map, gap_entry,
4858                                     grow_amount);
4859                 }
4860         } else {
4861                 grow_start = stack_entry->end;
4862                 cred = stack_entry->cred;
4863                 if (cred == NULL && stack_entry->object.vm_object != NULL)
4864                         cred = stack_entry->object.vm_object->cred;
4865                 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
4866                         rv = KERN_NO_SPACE;
4867                 /* Grow the underlying object if applicable. */
4868                 else if (stack_entry->object.vm_object == NULL ||
4869                     vm_object_coalesce(stack_entry->object.vm_object,
4870                     stack_entry->offset,
4871                     (vm_size_t)(stack_entry->end - stack_entry->start),
4872                     grow_amount, cred != NULL)) {
4873                         if (gap_entry->start + grow_amount == gap_entry->end) {
4874                                 vm_map_entry_delete(map, gap_entry);
4875                                 vm_map_entry_resize(map, stack_entry,
4876                                     grow_amount);
4877                         } else {
4878                                 gap_entry->start += grow_amount;
4879                                 stack_entry->end += grow_amount;
4880                         }
4881                         map->size += grow_amount;
4882                         rv = KERN_SUCCESS;
4883                 } else
4884                         rv = KERN_FAILURE;
4885         }
4886         if (rv == KERN_SUCCESS && is_procstack)
4887                 vm->vm_ssize += btoc(grow_amount);
4888
4889         /*
4890          * Heed the MAP_WIREFUTURE flag if it was set for this process.
4891          */
4892         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
4893                 rv = vm_map_wire_locked(map, grow_start,
4894                     grow_start + grow_amount,
4895                     VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
4896         }
4897         vm_map_lock_downgrade(map);
4898
4899 out:
4900 #ifdef RACCT
4901         if (racct_enable && rv != KERN_SUCCESS) {
4902                 PROC_LOCK(p);
4903                 error = racct_set(p, RACCT_VMEM, map->size);
4904                 KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
4905                 if (!old_mlock) {
4906                         error = racct_set(p, RACCT_MEMLOCK,
4907                             ptoa(pmap_wired_count(map->pmap)));
4908                         KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
4909                 }
4910                 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
4911                 KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
4912                 PROC_UNLOCK(p);
4913         }
4914 #endif
4915
4916         return (rv);
4917 }
4918
4919 /*
4920  * Unshare the specified VM space for exec.  If other processes are
4921  * mapped to it, then create a new one.  The new vmspace is null.
4922  */
4923 int
4924 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
4925 {
4926         struct vmspace *oldvmspace = p->p_vmspace;
4927         struct vmspace *newvmspace;
4928
4929         KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
4930             ("vmspace_exec recursed"));
4931         newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit);
4932         if (newvmspace == NULL)
4933                 return (ENOMEM);
4934         newvmspace->vm_swrss = oldvmspace->vm_swrss;
4935         /*
4936          * This code is written like this for prototype purposes.  The
4937          * goal is to avoid running down the vmspace here, but let the
4938          * other process's that are still using the vmspace to finally
4939          * run it down.  Even though there is little or no chance of blocking
4940          * here, it is a good idea to keep this form for future mods.
4941          */
4942         PROC_VMSPACE_LOCK(p);
4943         p->p_vmspace = newvmspace;
4944         PROC_VMSPACE_UNLOCK(p);
4945         if (p == curthread->td_proc)
4946                 pmap_activate(curthread);
4947         curthread->td_pflags |= TDP_EXECVMSPC;
4948         return (0);
4949 }
4950
4951 /*
4952  * Unshare the specified VM space for forcing COW.  This
4953  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
4954  */
4955 int
4956 vmspace_unshare(struct proc *p)
4957 {
4958         struct vmspace *oldvmspace = p->p_vmspace;
4959         struct vmspace *newvmspace;
4960         vm_ooffset_t fork_charge;
4961
4962         /*
4963          * The caller is responsible for ensuring that the reference count
4964          * cannot concurrently transition 1 -> 2.
4965          */
4966         if (refcount_load(&oldvmspace->vm_refcnt) == 1)
4967                 return (0);
4968         fork_charge = 0;
4969         newvmspace = vmspace_fork(oldvmspace, &fork_charge);
4970         if (newvmspace == NULL)
4971                 return (ENOMEM);
4972         if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
4973                 vmspace_free(newvmspace);
4974                 return (ENOMEM);
4975         }
4976         PROC_VMSPACE_LOCK(p);
4977         p->p_vmspace = newvmspace;
4978         PROC_VMSPACE_UNLOCK(p);
4979         if (p == curthread->td_proc)
4980                 pmap_activate(curthread);
4981         vmspace_free(oldvmspace);
4982         return (0);
4983 }
4984
4985 /*
4986  *      vm_map_lookup:
4987  *
4988  *      Finds the VM object, offset, and
4989  *      protection for a given virtual address in the
4990  *      specified map, assuming a page fault of the
4991  *      type specified.
4992  *
4993  *      Leaves the map in question locked for read; return
4994  *      values are guaranteed until a vm_map_lookup_done
4995  *      call is performed.  Note that the map argument
4996  *      is in/out; the returned map must be used in
4997  *      the call to vm_map_lookup_done.
4998  *
4999  *      A handle (out_entry) is returned for use in
5000  *      vm_map_lookup_done, to make that fast.
5001  *
5002  *      If a lookup is requested with "write protection"
5003  *      specified, the map may be changed to perform virtual
5004  *      copying operations, although the data referenced will
5005  *      remain the same.
5006  */
5007 int
5008 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
5009               vm_offset_t vaddr,
5010               vm_prot_t fault_typea,
5011               vm_map_entry_t *out_entry,        /* OUT */
5012               vm_object_t *object,              /* OUT */
5013               vm_pindex_t *pindex,              /* OUT */
5014               vm_prot_t *out_prot,              /* OUT */
5015               boolean_t *wired)                 /* OUT */
5016 {
5017         vm_map_entry_t entry;
5018         vm_map_t map = *var_map;
5019         vm_prot_t prot;
5020         vm_prot_t fault_type;
5021         vm_object_t eobject;
5022         vm_size_t size;
5023         struct ucred *cred;
5024
5025 RetryLookup:
5026
5027         vm_map_lock_read(map);
5028
5029 RetryLookupLocked:
5030         /*
5031          * Lookup the faulting address.
5032          */
5033         if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
5034                 vm_map_unlock_read(map);
5035                 return (KERN_INVALID_ADDRESS);
5036         }
5037
5038         entry = *out_entry;
5039
5040         /*
5041          * Handle submaps.
5042          */
5043         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5044                 vm_map_t old_map = map;
5045
5046                 *var_map = map = entry->object.sub_map;
5047                 vm_map_unlock_read(old_map);
5048                 goto RetryLookup;
5049         }
5050
5051         /*
5052          * Check whether this task is allowed to have this page.
5053          */
5054         prot = entry->protection;
5055         if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) {
5056                 fault_typea &= ~VM_PROT_FAULT_LOOKUP;
5057                 if (prot == VM_PROT_NONE && map != kernel_map &&
5058                     (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
5059                     (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
5060                     MAP_ENTRY_STACK_GAP_UP)) != 0 &&
5061                     vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
5062                         goto RetryLookupLocked;
5063         }
5064         fault_type = fault_typea & VM_PROT_ALL;
5065         if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
5066                 vm_map_unlock_read(map);
5067                 return (KERN_PROTECTION_FAILURE);
5068         }
5069         KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
5070             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
5071             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
5072             ("entry %p flags %x", entry, entry->eflags));
5073         if ((fault_typea & VM_PROT_COPY) != 0 &&
5074             (entry->max_protection & VM_PROT_WRITE) == 0 &&
5075             (entry->eflags & MAP_ENTRY_COW) == 0) {
5076                 vm_map_unlock_read(map);
5077                 return (KERN_PROTECTION_FAILURE);
5078         }
5079
5080         /*
5081          * If this page is not pageable, we have to get it for all possible
5082          * accesses.
5083          */
5084         *wired = (entry->wired_count != 0);
5085         if (*wired)
5086                 fault_type = entry->protection;
5087         size = entry->end - entry->start;
5088
5089         /*
5090          * If the entry was copy-on-write, we either ...
5091          */
5092         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
5093                 /*
5094                  * If we want to write the page, we may as well handle that
5095                  * now since we've got the map locked.
5096                  *
5097                  * If we don't need to write the page, we just demote the
5098                  * permissions allowed.
5099                  */
5100                 if ((fault_type & VM_PROT_WRITE) != 0 ||
5101                     (fault_typea & VM_PROT_COPY) != 0) {
5102                         /*
5103                          * Make a new object, and place it in the object
5104                          * chain.  Note that no new references have appeared
5105                          * -- one just moved from the map to the new
5106                          * object.
5107                          */
5108                         if (vm_map_lock_upgrade(map))
5109                                 goto RetryLookup;
5110
5111                         if (entry->cred == NULL) {
5112                                 /*
5113                                  * The debugger owner is charged for
5114                                  * the memory.
5115                                  */
5116                                 cred = curthread->td_ucred;
5117                                 crhold(cred);
5118                                 if (!swap_reserve_by_cred(size, cred)) {
5119                                         crfree(cred);
5120                                         vm_map_unlock(map);
5121                                         return (KERN_RESOURCE_SHORTAGE);
5122                                 }
5123                                 entry->cred = cred;
5124                         }
5125                         eobject = entry->object.vm_object;
5126                         vm_object_shadow(&entry->object.vm_object,
5127                             &entry->offset, size, entry->cred, false);
5128                         if (eobject == entry->object.vm_object) {
5129                                 /*
5130                                  * The object was not shadowed.
5131                                  */
5132                                 swap_release_by_cred(size, entry->cred);
5133                                 crfree(entry->cred);
5134                         }
5135                         entry->cred = NULL;
5136                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
5137
5138                         vm_map_lock_downgrade(map);
5139                 } else {
5140                         /*
5141                          * We're attempting to read a copy-on-write page --
5142                          * don't allow writes.
5143                          */
5144                         prot &= ~VM_PROT_WRITE;
5145                 }
5146         }
5147
5148         /*
5149          * Create an object if necessary.
5150          */
5151         if (entry->object.vm_object == NULL && !map->system_map) {
5152                 if (vm_map_lock_upgrade(map))
5153                         goto RetryLookup;
5154                 entry->object.vm_object = vm_object_allocate_anon(atop(size),
5155                     NULL, entry->cred, size);
5156                 entry->offset = 0;
5157                 entry->cred = NULL;
5158                 vm_map_lock_downgrade(map);
5159         }
5160
5161         /*
5162          * Return the object/offset from this entry.  If the entry was
5163          * copy-on-write or empty, it has been fixed up.
5164          */
5165         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
5166         *object = entry->object.vm_object;
5167
5168         *out_prot = prot;
5169         return (KERN_SUCCESS);
5170 }
5171
5172 /*
5173  *      vm_map_lookup_locked:
5174  *
5175  *      Lookup the faulting address.  A version of vm_map_lookup that returns 
5176  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
5177  */
5178 int
5179 vm_map_lookup_locked(vm_map_t *var_map,         /* IN/OUT */
5180                      vm_offset_t vaddr,
5181                      vm_prot_t fault_typea,
5182                      vm_map_entry_t *out_entry, /* OUT */
5183                      vm_object_t *object,       /* OUT */
5184                      vm_pindex_t *pindex,       /* OUT */
5185                      vm_prot_t *out_prot,       /* OUT */
5186                      boolean_t *wired)          /* OUT */
5187 {
5188         vm_map_entry_t entry;
5189         vm_map_t map = *var_map;
5190         vm_prot_t prot;
5191         vm_prot_t fault_type = fault_typea;
5192
5193         /*
5194          * Lookup the faulting address.
5195          */
5196         if (!vm_map_lookup_entry(map, vaddr, out_entry))
5197                 return (KERN_INVALID_ADDRESS);
5198
5199         entry = *out_entry;
5200
5201         /*
5202          * Fail if the entry refers to a submap.
5203          */
5204         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
5205                 return (KERN_FAILURE);
5206
5207         /*
5208          * Check whether this task is allowed to have this page.
5209          */
5210         prot = entry->protection;
5211         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
5212         if ((fault_type & prot) != fault_type)
5213                 return (KERN_PROTECTION_FAILURE);
5214
5215         /*
5216          * If this page is not pageable, we have to get it for all possible
5217          * accesses.
5218          */
5219         *wired = (entry->wired_count != 0);
5220         if (*wired)
5221                 fault_type = entry->protection;
5222
5223         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
5224                 /*
5225                  * Fail if the entry was copy-on-write for a write fault.
5226                  */
5227                 if (fault_type & VM_PROT_WRITE)
5228                         return (KERN_FAILURE);
5229                 /*
5230                  * We're attempting to read a copy-on-write page --
5231                  * don't allow writes.
5232                  */
5233                 prot &= ~VM_PROT_WRITE;
5234         }
5235
5236         /*
5237          * Fail if an object should be created.
5238          */
5239         if (entry->object.vm_object == NULL && !map->system_map)
5240                 return (KERN_FAILURE);
5241
5242         /*
5243          * Return the object/offset from this entry.  If the entry was
5244          * copy-on-write or empty, it has been fixed up.
5245          */
5246         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
5247         *object = entry->object.vm_object;
5248
5249         *out_prot = prot;
5250         return (KERN_SUCCESS);
5251 }
5252
5253 /*
5254  *      vm_map_lookup_done:
5255  *
5256  *      Releases locks acquired by a vm_map_lookup
5257  *      (according to the handle returned by that lookup).
5258  */
5259 void
5260 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
5261 {
5262         /*
5263          * Unlock the main-level map
5264          */
5265         vm_map_unlock_read(map);
5266 }
5267
5268 vm_offset_t
5269 vm_map_max_KBI(const struct vm_map *map)
5270 {
5271
5272         return (vm_map_max(map));
5273 }
5274
5275 vm_offset_t
5276 vm_map_min_KBI(const struct vm_map *map)
5277 {
5278
5279         return (vm_map_min(map));
5280 }
5281
5282 pmap_t
5283 vm_map_pmap_KBI(vm_map_t map)
5284 {
5285
5286         return (map->pmap);
5287 }
5288
5289 bool
5290 vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end)
5291 {
5292
5293         return (vm_map_range_valid(map, start, end));
5294 }
5295
5296 #ifdef INVARIANTS
5297 static void
5298 _vm_map_assert_consistent(vm_map_t map, int check)
5299 {
5300         vm_map_entry_t entry, prev;
5301         vm_map_entry_t cur, header, lbound, ubound;
5302         vm_size_t max_left, max_right;
5303
5304 #ifdef DIAGNOSTIC
5305         ++map->nupdates;
5306 #endif
5307         if (enable_vmmap_check != check)
5308                 return;
5309
5310         header = prev = &map->header;
5311         VM_MAP_ENTRY_FOREACH(entry, map) {
5312                 KASSERT(prev->end <= entry->start,
5313                     ("map %p prev->end = %jx, start = %jx", map,
5314                     (uintmax_t)prev->end, (uintmax_t)entry->start));
5315                 KASSERT(entry->start < entry->end,
5316                     ("map %p start = %jx, end = %jx", map,
5317                     (uintmax_t)entry->start, (uintmax_t)entry->end));
5318                 KASSERT(entry->left == header ||
5319                     entry->left->start < entry->start,
5320                     ("map %p left->start = %jx, start = %jx", map,
5321                     (uintmax_t)entry->left->start, (uintmax_t)entry->start));
5322                 KASSERT(entry->right == header ||
5323                     entry->start < entry->right->start,
5324                     ("map %p start = %jx, right->start = %jx", map,
5325                     (uintmax_t)entry->start, (uintmax_t)entry->right->start));
5326                 cur = map->root;
5327                 lbound = ubound = header;
5328                 for (;;) {
5329                         if (entry->start < cur->start) {
5330                                 ubound = cur;
5331                                 cur = cur->left;
5332                                 KASSERT(cur != lbound,
5333                                     ("map %p cannot find %jx",
5334                                     map, (uintmax_t)entry->start));
5335                         } else if (cur->end <= entry->start) {
5336                                 lbound = cur;
5337                                 cur = cur->right;
5338                                 KASSERT(cur != ubound,
5339                                     ("map %p cannot find %jx",
5340                                     map, (uintmax_t)entry->start));
5341                         } else {
5342                                 KASSERT(cur == entry,
5343                                     ("map %p cannot find %jx",
5344                                     map, (uintmax_t)entry->start));
5345                                 break;
5346                         }
5347                 }
5348                 max_left = vm_map_entry_max_free_left(entry, lbound);
5349                 max_right = vm_map_entry_max_free_right(entry, ubound);
5350                 KASSERT(entry->max_free == vm_size_max(max_left, max_right),
5351                     ("map %p max = %jx, max_left = %jx, max_right = %jx", map,
5352                     (uintmax_t)entry->max_free,
5353                     (uintmax_t)max_left, (uintmax_t)max_right));
5354                 prev = entry;
5355         }
5356         KASSERT(prev->end <= entry->start,
5357             ("map %p prev->end = %jx, start = %jx", map,
5358             (uintmax_t)prev->end, (uintmax_t)entry->start));
5359 }
5360 #endif
5361
5362 #include "opt_ddb.h"
5363 #ifdef DDB
5364 #include <sys/kernel.h>
5365
5366 #include <ddb/ddb.h>
5367
5368 static void
5369 vm_map_print(vm_map_t map)
5370 {
5371         vm_map_entry_t entry, prev;
5372
5373         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
5374             (void *)map,
5375             (void *)map->pmap, map->nentries, map->timestamp);
5376
5377         db_indent += 2;
5378         prev = &map->header;
5379         VM_MAP_ENTRY_FOREACH(entry, map) {
5380                 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
5381                     (void *)entry, (void *)entry->start, (void *)entry->end,
5382                     entry->eflags);
5383                 {
5384                         static const char * const inheritance_name[4] =
5385                         {"share", "copy", "none", "donate_copy"};
5386
5387                         db_iprintf(" prot=%x/%x/%s",
5388                             entry->protection,
5389                             entry->max_protection,
5390                             inheritance_name[(int)(unsigned char)
5391                             entry->inheritance]);
5392                         if (entry->wired_count != 0)
5393                                 db_printf(", wired");
5394                 }
5395                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5396                         db_printf(", share=%p, offset=0x%jx\n",
5397                             (void *)entry->object.sub_map,
5398                             (uintmax_t)entry->offset);
5399                         if (prev == &map->header ||
5400                             prev->object.sub_map !=
5401                                 entry->object.sub_map) {
5402                                 db_indent += 2;
5403                                 vm_map_print((vm_map_t)entry->object.sub_map);
5404                                 db_indent -= 2;
5405                         }
5406                 } else {
5407                         if (entry->cred != NULL)
5408                                 db_printf(", ruid %d", entry->cred->cr_ruid);
5409                         db_printf(", object=%p, offset=0x%jx",
5410                             (void *)entry->object.vm_object,
5411                             (uintmax_t)entry->offset);
5412                         if (entry->object.vm_object && entry->object.vm_object->cred)
5413                                 db_printf(", obj ruid %d charge %jx",
5414                                     entry->object.vm_object->cred->cr_ruid,
5415                                     (uintmax_t)entry->object.vm_object->charge);
5416                         if (entry->eflags & MAP_ENTRY_COW)
5417                                 db_printf(", copy (%s)",
5418                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
5419                         db_printf("\n");
5420
5421                         if (prev == &map->header ||
5422                             prev->object.vm_object !=
5423                                 entry->object.vm_object) {
5424                                 db_indent += 2;
5425                                 vm_object_print((db_expr_t)(intptr_t)
5426                                                 entry->object.vm_object,
5427                                                 0, 0, (char *)0);
5428                                 db_indent -= 2;
5429                         }
5430                 }
5431                 prev = entry;
5432         }
5433         db_indent -= 2;
5434 }
5435
5436 DB_SHOW_COMMAND(map, map)
5437 {
5438
5439         if (!have_addr) {
5440                 db_printf("usage: show map <addr>\n");
5441                 return;
5442         }
5443         vm_map_print((vm_map_t)addr);
5444 }
5445
5446 DB_SHOW_COMMAND(procvm, procvm)
5447 {
5448         struct proc *p;
5449
5450         if (have_addr) {
5451                 p = db_lookup_proc(addr);
5452         } else {
5453                 p = curproc;
5454         }
5455
5456         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
5457             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
5458             (void *)vmspace_pmap(p->p_vmspace));
5459
5460         vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
5461 }
5462
5463 #endif /* DDB */