]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_map.c
Revert r364310.
[FreeBSD/FreeBSD.git] / sys / vm / vm_map.c
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62
63 /*
64  *      Virtual memory mapping module.
65  */
66
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/elf.h>
73 #include <sys/kernel.h>
74 #include <sys/ktr.h>
75 #include <sys/lock.h>
76 #include <sys/mutex.h>
77 #include <sys/proc.h>
78 #include <sys/vmmeter.h>
79 #include <sys/mman.h>
80 #include <sys/vnode.h>
81 #include <sys/racct.h>
82 #include <sys/resourcevar.h>
83 #include <sys/rwlock.h>
84 #include <sys/file.h>
85 #include <sys/sysctl.h>
86 #include <sys/sysent.h>
87 #include <sys/shm.h>
88
89 #include <vm/vm.h>
90 #include <vm/vm_param.h>
91 #include <vm/pmap.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vnode_pager.h>
100 #include <vm/swap_pager.h>
101 #include <vm/uma.h>
102
103 /*
104  *      Virtual memory maps provide for the mapping, protection,
105  *      and sharing of virtual memory objects.  In addition,
106  *      this module provides for an efficient virtual copy of
107  *      memory from one map to another.
108  *
109  *      Synchronization is required prior to most operations.
110  *
111  *      Maps consist of an ordered doubly-linked list of simple
112  *      entries; a self-adjusting binary search tree of these
113  *      entries is used to speed up lookups.
114  *
115  *      Since portions of maps are specified by start/end addresses,
116  *      which may not align with existing map entries, all
117  *      routines merely "clip" entries to these start/end values.
118  *      [That is, an entry is split into two, bordering at a
119  *      start or end value.]  Note that these clippings may not
120  *      always be necessary (as the two resulting entries are then
121  *      not changed); however, the clipping is done for convenience.
122  *
123  *      As mentioned above, virtual copy operations are performed
124  *      by copying VM object references from one map to
125  *      another, and then marking both regions as copy-on-write.
126  */
127
128 static struct mtx map_sleep_mtx;
129 static uma_zone_t mapentzone;
130 static uma_zone_t kmapentzone;
131 static uma_zone_t vmspace_zone;
132 static int vmspace_zinit(void *mem, int size, int flags);
133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
134     vm_offset_t max);
135 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
136 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
137 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
138 static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
139     vm_map_entry_t gap_entry);
140 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
141     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
142 #ifdef INVARIANTS
143 static void vmspace_zdtor(void *mem, int size, void *arg);
144 #endif
145 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
146     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
147     int cow);
148 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
149     vm_offset_t failed_addr);
150
151 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
152     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
153      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
154
155 /* 
156  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
157  * stable.
158  */
159 #define PROC_VMSPACE_LOCK(p) do { } while (0)
160 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
161
162 /*
163  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
164  *
165  *      Asserts that the starting and ending region
166  *      addresses fall within the valid range of the map.
167  */
168 #define VM_MAP_RANGE_CHECK(map, start, end)             \
169                 {                                       \
170                 if (start < vm_map_min(map))            \
171                         start = vm_map_min(map);        \
172                 if (end > vm_map_max(map))              \
173                         end = vm_map_max(map);          \
174                 if (start > end)                        \
175                         start = end;                    \
176                 }
177
178 /*
179  *      vm_map_startup:
180  *
181  *      Initialize the vm_map module.  Must be called before
182  *      any other vm_map routines.
183  *
184  *      Map and entry structures are allocated from the general
185  *      purpose memory pool with some exceptions:
186  *
187  *      - The kernel map and kmem submap are allocated statically.
188  *      - Kernel map entries are allocated out of a static pool.
189  *
190  *      These restrictions are necessary since malloc() uses the
191  *      maps and requires map entries.
192  */
193
194 void
195 vm_map_startup(void)
196 {
197         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
198         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
199             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
200             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
201         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
202             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
203         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
204 #ifdef INVARIANTS
205             vmspace_zdtor,
206 #else
207             NULL,
208 #endif
209             vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
210 }
211
212 static int
213 vmspace_zinit(void *mem, int size, int flags)
214 {
215         struct vmspace *vm;
216         vm_map_t map;
217
218         vm = (struct vmspace *)mem;
219         map = &vm->vm_map;
220
221         memset(map, 0, sizeof(*map));
222         mtx_init(&map->system_mtx, "vm map (system)", NULL,
223             MTX_DEF | MTX_DUPOK);
224         sx_init(&map->lock, "vm map (user)");
225         PMAP_LOCK_INIT(vmspace_pmap(vm));
226         return (0);
227 }
228
229 #ifdef INVARIANTS
230 static void
231 vmspace_zdtor(void *mem, int size, void *arg)
232 {
233         struct vmspace *vm;
234
235         vm = (struct vmspace *)mem;
236         KASSERT(vm->vm_map.nentries == 0,
237             ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries));
238         KASSERT(vm->vm_map.size == 0,
239             ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size));
240 }
241 #endif  /* INVARIANTS */
242
243 /*
244  * Allocate a vmspace structure, including a vm_map and pmap,
245  * and initialize those structures.  The refcnt is set to 1.
246  */
247 struct vmspace *
248 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
249 {
250         struct vmspace *vm;
251
252         vm = uma_zalloc(vmspace_zone, M_WAITOK);
253         KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
254         if (!pinit(vmspace_pmap(vm))) {
255                 uma_zfree(vmspace_zone, vm);
256                 return (NULL);
257         }
258         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
259         _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
260         vm->vm_refcnt = 1;
261         vm->vm_shm = NULL;
262         vm->vm_swrss = 0;
263         vm->vm_tsize = 0;
264         vm->vm_dsize = 0;
265         vm->vm_ssize = 0;
266         vm->vm_taddr = 0;
267         vm->vm_daddr = 0;
268         vm->vm_maxsaddr = 0;
269         return (vm);
270 }
271
272 #ifdef RACCT
273 static void
274 vmspace_container_reset(struct proc *p)
275 {
276
277         PROC_LOCK(p);
278         racct_set(p, RACCT_DATA, 0);
279         racct_set(p, RACCT_STACK, 0);
280         racct_set(p, RACCT_RSS, 0);
281         racct_set(p, RACCT_MEMLOCK, 0);
282         racct_set(p, RACCT_VMEM, 0);
283         PROC_UNLOCK(p);
284 }
285 #endif
286
287 static inline void
288 vmspace_dofree(struct vmspace *vm)
289 {
290
291         CTR1(KTR_VM, "vmspace_free: %p", vm);
292
293         /*
294          * Make sure any SysV shm is freed, it might not have been in
295          * exit1().
296          */
297         shmexit(vm);
298
299         /*
300          * Lock the map, to wait out all other references to it.
301          * Delete all of the mappings and pages they hold, then call
302          * the pmap module to reclaim anything left.
303          */
304         (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
305             vm_map_max(&vm->vm_map));
306
307         pmap_release(vmspace_pmap(vm));
308         vm->vm_map.pmap = NULL;
309         uma_zfree(vmspace_zone, vm);
310 }
311
312 void
313 vmspace_free(struct vmspace *vm)
314 {
315
316         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
317             "vmspace_free() called");
318
319         if (vm->vm_refcnt == 0)
320                 panic("vmspace_free: attempt to free already freed vmspace");
321
322         if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
323                 vmspace_dofree(vm);
324 }
325
326 void
327 vmspace_exitfree(struct proc *p)
328 {
329         struct vmspace *vm;
330
331         PROC_VMSPACE_LOCK(p);
332         vm = p->p_vmspace;
333         p->p_vmspace = NULL;
334         PROC_VMSPACE_UNLOCK(p);
335         KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
336         vmspace_free(vm);
337 }
338
339 void
340 vmspace_exit(struct thread *td)
341 {
342         int refcnt;
343         struct vmspace *vm;
344         struct proc *p;
345
346         /*
347          * Release user portion of address space.
348          * This releases references to vnodes,
349          * which could cause I/O if the file has been unlinked.
350          * Need to do this early enough that we can still sleep.
351          *
352          * The last exiting process to reach this point releases as
353          * much of the environment as it can. vmspace_dofree() is the
354          * slower fallback in case another process had a temporary
355          * reference to the vmspace.
356          */
357
358         p = td->td_proc;
359         vm = p->p_vmspace;
360         atomic_add_int(&vmspace0.vm_refcnt, 1);
361         refcnt = vm->vm_refcnt;
362         do {
363                 if (refcnt > 1 && p->p_vmspace != &vmspace0) {
364                         /* Switch now since other proc might free vmspace */
365                         PROC_VMSPACE_LOCK(p);
366                         p->p_vmspace = &vmspace0;
367                         PROC_VMSPACE_UNLOCK(p);
368                         pmap_activate(td);
369                 }
370         } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt - 1));
371         if (refcnt == 1) {
372                 if (p->p_vmspace != vm) {
373                         /* vmspace not yet freed, switch back */
374                         PROC_VMSPACE_LOCK(p);
375                         p->p_vmspace = vm;
376                         PROC_VMSPACE_UNLOCK(p);
377                         pmap_activate(td);
378                 }
379                 pmap_remove_pages(vmspace_pmap(vm));
380                 /* Switch now since this proc will free vmspace */
381                 PROC_VMSPACE_LOCK(p);
382                 p->p_vmspace = &vmspace0;
383                 PROC_VMSPACE_UNLOCK(p);
384                 pmap_activate(td);
385                 vmspace_dofree(vm);
386         }
387 #ifdef RACCT
388         if (racct_enable)
389                 vmspace_container_reset(p);
390 #endif
391 }
392
393 /* Acquire reference to vmspace owned by another process. */
394
395 struct vmspace *
396 vmspace_acquire_ref(struct proc *p)
397 {
398         struct vmspace *vm;
399         int refcnt;
400
401         PROC_VMSPACE_LOCK(p);
402         vm = p->p_vmspace;
403         if (vm == NULL) {
404                 PROC_VMSPACE_UNLOCK(p);
405                 return (NULL);
406         }
407         refcnt = vm->vm_refcnt;
408         do {
409                 if (refcnt <= 0) {      /* Avoid 0->1 transition */
410                         PROC_VMSPACE_UNLOCK(p);
411                         return (NULL);
412                 }
413         } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt + 1));
414         if (vm != p->p_vmspace) {
415                 PROC_VMSPACE_UNLOCK(p);
416                 vmspace_free(vm);
417                 return (NULL);
418         }
419         PROC_VMSPACE_UNLOCK(p);
420         return (vm);
421 }
422
423 /*
424  * Switch between vmspaces in an AIO kernel process.
425  *
426  * The new vmspace is either the vmspace of a user process obtained
427  * from an active AIO request or the initial vmspace of the AIO kernel
428  * process (when it is idling).  Because user processes will block to
429  * drain any active AIO requests before proceeding in exit() or
430  * execve(), the reference count for vmspaces from AIO requests can
431  * never be 0.  Similarly, AIO kernel processes hold an extra
432  * reference on their initial vmspace for the life of the process.  As
433  * a result, the 'newvm' vmspace always has a non-zero reference
434  * count.  This permits an additional reference on 'newvm' to be
435  * acquired via a simple atomic increment rather than the loop in
436  * vmspace_acquire_ref() above.
437  */
438 void
439 vmspace_switch_aio(struct vmspace *newvm)
440 {
441         struct vmspace *oldvm;
442
443         /* XXX: Need some way to assert that this is an aio daemon. */
444
445         KASSERT(newvm->vm_refcnt > 0,
446             ("vmspace_switch_aio: newvm unreferenced"));
447
448         oldvm = curproc->p_vmspace;
449         if (oldvm == newvm)
450                 return;
451
452         /*
453          * Point to the new address space and refer to it.
454          */
455         curproc->p_vmspace = newvm;
456         atomic_add_int(&newvm->vm_refcnt, 1);
457
458         /* Activate the new mapping. */
459         pmap_activate(curthread);
460
461         vmspace_free(oldvm);
462 }
463
464 void
465 _vm_map_lock(vm_map_t map, const char *file, int line)
466 {
467
468         if (map->system_map)
469                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
470         else
471                 sx_xlock_(&map->lock, file, line);
472         map->timestamp++;
473 }
474
475 void
476 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add)
477 {
478         vm_object_t object;
479         struct vnode *vp;
480         bool vp_held;
481
482         if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0)
483                 return;
484         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
485             ("Submap with execs"));
486         object = entry->object.vm_object;
487         KASSERT(object != NULL, ("No object for text, entry %p", entry));
488         if ((object->flags & OBJ_ANON) != 0)
489                 object = object->handle;
490         else
491                 KASSERT(object->backing_object == NULL,
492                     ("non-anon object %p shadows", object));
493         KASSERT(object != NULL, ("No content object for text, entry %p obj %p",
494             entry, entry->object.vm_object));
495
496         /*
497          * Mostly, we do not lock the backing object.  It is
498          * referenced by the entry we are processing, so it cannot go
499          * away.
500          */
501         vp = NULL;
502         vp_held = false;
503         if (object->type == OBJT_DEAD) {
504                 /*
505                  * For OBJT_DEAD objects, v_writecount was handled in
506                  * vnode_pager_dealloc().
507                  */
508         } else if (object->type == OBJT_VNODE) {
509                 vp = object->handle;
510         } else if (object->type == OBJT_SWAP) {
511                 KASSERT((object->flags & OBJ_TMPFS_NODE) != 0,
512                     ("vm_map_entry_set_vnode_text: swap and !TMPFS "
513                     "entry %p, object %p, add %d", entry, object, add));
514                 /*
515                  * Tmpfs VREG node, which was reclaimed, has
516                  * OBJ_TMPFS_NODE flag set, but not OBJ_TMPFS.  In
517                  * this case there is no v_writecount to adjust.
518                  */
519                 VM_OBJECT_RLOCK(object);
520                 if ((object->flags & OBJ_TMPFS) != 0) {
521                         vp = object->un_pager.swp.swp_tmpfs;
522                         if (vp != NULL) {
523                                 vhold(vp);
524                                 vp_held = true;
525                         }
526                 }
527                 VM_OBJECT_RUNLOCK(object);
528         } else {
529                 KASSERT(0,
530                     ("vm_map_entry_set_vnode_text: wrong object type, "
531                     "entry %p, object %p, add %d", entry, object, add));
532         }
533         if (vp != NULL) {
534                 if (add) {
535                         VOP_SET_TEXT_CHECKED(vp);
536                 } else {
537                         vn_lock(vp, LK_SHARED | LK_RETRY);
538                         VOP_UNSET_TEXT_CHECKED(vp);
539                         VOP_UNLOCK(vp);
540                 }
541                 if (vp_held)
542                         vdrop(vp);
543         }
544 }
545
546 /*
547  * Use a different name for this vm_map_entry field when it's use
548  * is not consistent with its use as part of an ordered search tree.
549  */
550 #define defer_next right
551
552 static void
553 vm_map_process_deferred(void)
554 {
555         struct thread *td;
556         vm_map_entry_t entry, next;
557         vm_object_t object;
558
559         td = curthread;
560         entry = td->td_map_def_user;
561         td->td_map_def_user = NULL;
562         while (entry != NULL) {
563                 next = entry->defer_next;
564                 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT |
565                     MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT |
566                     MAP_ENTRY_VN_EXEC));
567                 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) {
568                         /*
569                          * Decrement the object's writemappings and
570                          * possibly the vnode's v_writecount.
571                          */
572                         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
573                             ("Submap with writecount"));
574                         object = entry->object.vm_object;
575                         KASSERT(object != NULL, ("No object for writecount"));
576                         vm_pager_release_writecount(object, entry->start,
577                             entry->end);
578                 }
579                 vm_map_entry_set_vnode_text(entry, false);
580                 vm_map_entry_deallocate(entry, FALSE);
581                 entry = next;
582         }
583 }
584
585 #ifdef INVARIANTS
586 static void
587 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
588 {
589
590         if (map->system_map)
591                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
592         else
593                 sx_assert_(&map->lock, SA_XLOCKED, file, line);
594 }
595
596 #define VM_MAP_ASSERT_LOCKED(map) \
597     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
598
599 enum { VMMAP_CHECK_NONE, VMMAP_CHECK_UNLOCK, VMMAP_CHECK_ALL };
600 #ifdef DIAGNOSTIC
601 static int enable_vmmap_check = VMMAP_CHECK_UNLOCK;
602 #else
603 static int enable_vmmap_check = VMMAP_CHECK_NONE;
604 #endif
605 SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN,
606     &enable_vmmap_check, 0, "Enable vm map consistency checking");
607
608 static void _vm_map_assert_consistent(vm_map_t map, int check);
609
610 #define VM_MAP_ASSERT_CONSISTENT(map) \
611     _vm_map_assert_consistent(map, VMMAP_CHECK_ALL)
612 #ifdef DIAGNOSTIC
613 #define VM_MAP_UNLOCK_CONSISTENT(map) do {                              \
614         if (map->nupdates > map->nentries) {                            \
615                 _vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK);     \
616                 map->nupdates = 0;                                      \
617         }                                                               \
618 } while (0)
619 #else
620 #define VM_MAP_UNLOCK_CONSISTENT(map)
621 #endif
622 #else
623 #define VM_MAP_ASSERT_LOCKED(map)
624 #define VM_MAP_ASSERT_CONSISTENT(map)
625 #define VM_MAP_UNLOCK_CONSISTENT(map)
626 #endif /* INVARIANTS */
627
628 void
629 _vm_map_unlock(vm_map_t map, const char *file, int line)
630 {
631
632         VM_MAP_UNLOCK_CONSISTENT(map);
633         if (map->system_map)
634                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
635         else {
636                 sx_xunlock_(&map->lock, file, line);
637                 vm_map_process_deferred();
638         }
639 }
640
641 void
642 _vm_map_lock_read(vm_map_t map, const char *file, int line)
643 {
644
645         if (map->system_map)
646                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
647         else
648                 sx_slock_(&map->lock, file, line);
649 }
650
651 void
652 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
653 {
654
655         if (map->system_map)
656                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
657         else {
658                 sx_sunlock_(&map->lock, file, line);
659                 vm_map_process_deferred();
660         }
661 }
662
663 int
664 _vm_map_trylock(vm_map_t map, const char *file, int line)
665 {
666         int error;
667
668         error = map->system_map ?
669             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
670             !sx_try_xlock_(&map->lock, file, line);
671         if (error == 0)
672                 map->timestamp++;
673         return (error == 0);
674 }
675
676 int
677 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
678 {
679         int error;
680
681         error = map->system_map ?
682             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
683             !sx_try_slock_(&map->lock, file, line);
684         return (error == 0);
685 }
686
687 /*
688  *      _vm_map_lock_upgrade:   [ internal use only ]
689  *
690  *      Tries to upgrade a read (shared) lock on the specified map to a write
691  *      (exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
692  *      non-zero value if the upgrade fails.  If the upgrade fails, the map is
693  *      returned without a read or write lock held.
694  *
695  *      Requires that the map be read locked.
696  */
697 int
698 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
699 {
700         unsigned int last_timestamp;
701
702         if (map->system_map) {
703                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
704         } else {
705                 if (!sx_try_upgrade_(&map->lock, file, line)) {
706                         last_timestamp = map->timestamp;
707                         sx_sunlock_(&map->lock, file, line);
708                         vm_map_process_deferred();
709                         /*
710                          * If the map's timestamp does not change while the
711                          * map is unlocked, then the upgrade succeeds.
712                          */
713                         sx_xlock_(&map->lock, file, line);
714                         if (last_timestamp != map->timestamp) {
715                                 sx_xunlock_(&map->lock, file, line);
716                                 return (1);
717                         }
718                 }
719         }
720         map->timestamp++;
721         return (0);
722 }
723
724 void
725 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
726 {
727
728         if (map->system_map) {
729                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
730         } else {
731                 VM_MAP_UNLOCK_CONSISTENT(map);
732                 sx_downgrade_(&map->lock, file, line);
733         }
734 }
735
736 /*
737  *      vm_map_locked:
738  *
739  *      Returns a non-zero value if the caller holds a write (exclusive) lock
740  *      on the specified map and the value "0" otherwise.
741  */
742 int
743 vm_map_locked(vm_map_t map)
744 {
745
746         if (map->system_map)
747                 return (mtx_owned(&map->system_mtx));
748         else
749                 return (sx_xlocked(&map->lock));
750 }
751
752 /*
753  *      _vm_map_unlock_and_wait:
754  *
755  *      Atomically releases the lock on the specified map and puts the calling
756  *      thread to sleep.  The calling thread will remain asleep until either
757  *      vm_map_wakeup() is performed on the map or the specified timeout is
758  *      exceeded.
759  *
760  *      WARNING!  This function does not perform deferred deallocations of
761  *      objects and map entries.  Therefore, the calling thread is expected to
762  *      reacquire the map lock after reawakening and later perform an ordinary
763  *      unlock operation, such as vm_map_unlock(), before completing its
764  *      operation on the map.
765  */
766 int
767 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
768 {
769
770         VM_MAP_UNLOCK_CONSISTENT(map);
771         mtx_lock(&map_sleep_mtx);
772         if (map->system_map)
773                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
774         else
775                 sx_xunlock_(&map->lock, file, line);
776         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
777             timo));
778 }
779
780 /*
781  *      vm_map_wakeup:
782  *
783  *      Awaken any threads that have slept on the map using
784  *      vm_map_unlock_and_wait().
785  */
786 void
787 vm_map_wakeup(vm_map_t map)
788 {
789
790         /*
791          * Acquire and release map_sleep_mtx to prevent a wakeup()
792          * from being performed (and lost) between the map unlock
793          * and the msleep() in _vm_map_unlock_and_wait().
794          */
795         mtx_lock(&map_sleep_mtx);
796         mtx_unlock(&map_sleep_mtx);
797         wakeup(&map->root);
798 }
799
800 void
801 vm_map_busy(vm_map_t map)
802 {
803
804         VM_MAP_ASSERT_LOCKED(map);
805         map->busy++;
806 }
807
808 void
809 vm_map_unbusy(vm_map_t map)
810 {
811
812         VM_MAP_ASSERT_LOCKED(map);
813         KASSERT(map->busy, ("vm_map_unbusy: not busy"));
814         if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
815                 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
816                 wakeup(&map->busy);
817         }
818 }
819
820 void 
821 vm_map_wait_busy(vm_map_t map)
822 {
823
824         VM_MAP_ASSERT_LOCKED(map);
825         while (map->busy) {
826                 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
827                 if (map->system_map)
828                         msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
829                 else
830                         sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
831         }
832         map->timestamp++;
833 }
834
835 long
836 vmspace_resident_count(struct vmspace *vmspace)
837 {
838         return pmap_resident_count(vmspace_pmap(vmspace));
839 }
840
841 /*
842  * Initialize an existing vm_map structure
843  * such as that in the vmspace structure.
844  */
845 static void
846 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
847 {
848
849         map->header.eflags = MAP_ENTRY_HEADER;
850         map->needs_wakeup = FALSE;
851         map->system_map = 0;
852         map->pmap = pmap;
853         map->header.end = min;
854         map->header.start = max;
855         map->flags = 0;
856         map->header.left = map->header.right = &map->header;
857         map->root = NULL;
858         map->timestamp = 0;
859         map->busy = 0;
860         map->anon_loc = 0;
861 #ifdef DIAGNOSTIC
862         map->nupdates = 0;
863 #endif
864 }
865
866 void
867 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
868 {
869
870         _vm_map_init(map, pmap, min, max);
871         mtx_init(&map->system_mtx, "vm map (system)", NULL,
872             MTX_DEF | MTX_DUPOK);
873         sx_init(&map->lock, "vm map (user)");
874 }
875
876 /*
877  *      vm_map_entry_dispose:   [ internal use only ]
878  *
879  *      Inverse of vm_map_entry_create.
880  */
881 static void
882 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
883 {
884         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
885 }
886
887 /*
888  *      vm_map_entry_create:    [ internal use only ]
889  *
890  *      Allocates a VM map entry for insertion.
891  *      No entry fields are filled in.
892  */
893 static vm_map_entry_t
894 vm_map_entry_create(vm_map_t map)
895 {
896         vm_map_entry_t new_entry;
897
898         if (map->system_map)
899                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
900         else
901                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
902         if (new_entry == NULL)
903                 panic("vm_map_entry_create: kernel resources exhausted");
904         return (new_entry);
905 }
906
907 /*
908  *      vm_map_entry_set_behavior:
909  *
910  *      Set the expected access behavior, either normal, random, or
911  *      sequential.
912  */
913 static inline void
914 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
915 {
916         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
917             (behavior & MAP_ENTRY_BEHAV_MASK);
918 }
919
920 /*
921  *      vm_map_entry_max_free_{left,right}:
922  *
923  *      Compute the size of the largest free gap between two entries,
924  *      one the root of a tree and the other the ancestor of that root
925  *      that is the least or greatest ancestor found on the search path.
926  */
927 static inline vm_size_t
928 vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor)
929 {
930
931         return (root->left != left_ancestor ?
932             root->left->max_free : root->start - left_ancestor->end);
933 }
934
935 static inline vm_size_t
936 vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor)
937 {
938
939         return (root->right != right_ancestor ?
940             root->right->max_free : right_ancestor->start - root->end);
941 }
942
943 /*
944  *      vm_map_entry_{pred,succ}:
945  *
946  *      Find the {predecessor, successor} of the entry by taking one step
947  *      in the appropriate direction and backtracking as much as necessary.
948  *      vm_map_entry_succ is defined in vm_map.h.
949  */
950 static inline vm_map_entry_t
951 vm_map_entry_pred(vm_map_entry_t entry)
952 {
953         vm_map_entry_t prior;
954
955         prior = entry->left;
956         if (prior->right->start < entry->start) {
957                 do
958                         prior = prior->right;
959                 while (prior->right != entry);
960         }
961         return (prior);
962 }
963
964 static inline vm_size_t
965 vm_size_max(vm_size_t a, vm_size_t b)
966 {
967
968         return (a > b ? a : b);
969 }
970
971 #define SPLAY_LEFT_STEP(root, y, llist, rlist, test) do {               \
972         vm_map_entry_t z;                                               \
973         vm_size_t max_free;                                             \
974                                                                         \
975         /*                                                              \
976          * Infer root->right->max_free == root->max_free when           \
977          * y->max_free < root->max_free || root->max_free == 0.         \
978          * Otherwise, look right to find it.                            \
979          */                                                             \
980         y = root->left;                                                 \
981         max_free = root->max_free;                                      \
982         KASSERT(max_free == vm_size_max(                                \
983             vm_map_entry_max_free_left(root, llist),                    \
984             vm_map_entry_max_free_right(root, rlist)),                  \
985             ("%s: max_free invariant fails", __func__));                \
986         if (max_free - 1 < vm_map_entry_max_free_left(root, llist))     \
987                 max_free = vm_map_entry_max_free_right(root, rlist);    \
988         if (y != llist && (test)) {                                     \
989                 /* Rotate right and make y root. */                     \
990                 z = y->right;                                           \
991                 if (z != root) {                                        \
992                         root->left = z;                                 \
993                         y->right = root;                                \
994                         if (max_free < y->max_free)                     \
995                             root->max_free = max_free =                 \
996                             vm_size_max(max_free, z->max_free);         \
997                 } else if (max_free < y->max_free)                      \
998                         root->max_free = max_free =                     \
999                             vm_size_max(max_free, root->start - y->end);\
1000                 root = y;                                               \
1001                 y = root->left;                                         \
1002         }                                                               \
1003         /* Copy right->max_free.  Put root on rlist. */                 \
1004         root->max_free = max_free;                                      \
1005         KASSERT(max_free == vm_map_entry_max_free_right(root, rlist),   \
1006             ("%s: max_free not copied from right", __func__));          \
1007         root->left = rlist;                                             \
1008         rlist = root;                                                   \
1009         root = y != llist ? y : NULL;                                   \
1010 } while (0)
1011
1012 #define SPLAY_RIGHT_STEP(root, y, llist, rlist, test) do {              \
1013         vm_map_entry_t z;                                               \
1014         vm_size_t max_free;                                             \
1015                                                                         \
1016         /*                                                              \
1017          * Infer root->left->max_free == root->max_free when            \
1018          * y->max_free < root->max_free || root->max_free == 0.         \
1019          * Otherwise, look left to find it.                             \
1020          */                                                             \
1021         y = root->right;                                                \
1022         max_free = root->max_free;                                      \
1023         KASSERT(max_free == vm_size_max(                                \
1024             vm_map_entry_max_free_left(root, llist),                    \
1025             vm_map_entry_max_free_right(root, rlist)),                  \
1026             ("%s: max_free invariant fails", __func__));                \
1027         if (max_free - 1 < vm_map_entry_max_free_right(root, rlist))    \
1028                 max_free = vm_map_entry_max_free_left(root, llist);     \
1029         if (y != rlist && (test)) {                                     \
1030                 /* Rotate left and make y root. */                      \
1031                 z = y->left;                                            \
1032                 if (z != root) {                                        \
1033                         root->right = z;                                \
1034                         y->left = root;                                 \
1035                         if (max_free < y->max_free)                     \
1036                             root->max_free = max_free =                 \
1037                             vm_size_max(max_free, z->max_free);         \
1038                 } else if (max_free < y->max_free)                      \
1039                         root->max_free = max_free =                     \
1040                             vm_size_max(max_free, y->start - root->end);\
1041                 root = y;                                               \
1042                 y = root->right;                                        \
1043         }                                                               \
1044         /* Copy left->max_free.  Put root on llist. */                  \
1045         root->max_free = max_free;                                      \
1046         KASSERT(max_free == vm_map_entry_max_free_left(root, llist),    \
1047             ("%s: max_free not copied from left", __func__));           \
1048         root->right = llist;                                            \
1049         llist = root;                                                   \
1050         root = y != rlist ? y : NULL;                                   \
1051 } while (0)
1052
1053 /*
1054  * Walk down the tree until we find addr or a gap where addr would go, breaking
1055  * off left and right subtrees of nodes less than, or greater than addr.  Treat
1056  * subtrees with root->max_free < length as empty trees.  llist and rlist are
1057  * the two sides in reverse order (bottom-up), with llist linked by the right
1058  * pointer and rlist linked by the left pointer in the vm_map_entry, and both
1059  * lists terminated by &map->header.  This function, and the subsequent call to
1060  * vm_map_splay_merge_{left,right,pred,succ}, rely on the start and end address
1061  * values in &map->header.
1062  */
1063 static __always_inline vm_map_entry_t
1064 vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length,
1065     vm_map_entry_t *llist, vm_map_entry_t *rlist)
1066 {
1067         vm_map_entry_t left, right, root, y;
1068
1069         left = right = &map->header;
1070         root = map->root;
1071         while (root != NULL && root->max_free >= length) {
1072                 KASSERT(left->end <= root->start &&
1073                     root->end <= right->start,
1074                     ("%s: root not within tree bounds", __func__));
1075                 if (addr < root->start) {
1076                         SPLAY_LEFT_STEP(root, y, left, right,
1077                             y->max_free >= length && addr < y->start);
1078                 } else if (addr >= root->end) {
1079                         SPLAY_RIGHT_STEP(root, y, left, right,
1080                             y->max_free >= length && addr >= y->end);
1081                 } else
1082                         break;
1083         }
1084         *llist = left;
1085         *rlist = right;
1086         return (root);
1087 }
1088
1089 static __always_inline void
1090 vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *rlist)
1091 {
1092         vm_map_entry_t hi, right, y;
1093
1094         right = *rlist;
1095         hi = root->right == right ? NULL : root->right;
1096         if (hi == NULL)
1097                 return;
1098         do
1099                 SPLAY_LEFT_STEP(hi, y, root, right, true);
1100         while (hi != NULL);
1101         *rlist = right;
1102 }
1103
1104 static __always_inline void
1105 vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *llist)
1106 {
1107         vm_map_entry_t left, lo, y;
1108
1109         left = *llist;
1110         lo = root->left == left ? NULL : root->left;
1111         if (lo == NULL)
1112                 return;
1113         do
1114                 SPLAY_RIGHT_STEP(lo, y, left, root, true);
1115         while (lo != NULL);
1116         *llist = left;
1117 }
1118
1119 static inline void
1120 vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b)
1121 {
1122         vm_map_entry_t tmp;
1123
1124         tmp = *b;
1125         *b = *a;
1126         *a = tmp;
1127 }
1128
1129 /*
1130  * Walk back up the two spines, flip the pointers and set max_free.  The
1131  * subtrees of the root go at the bottom of llist and rlist.
1132  */
1133 static vm_size_t
1134 vm_map_splay_merge_left_walk(vm_map_entry_t header, vm_map_entry_t root,
1135     vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist)
1136 {
1137         do {
1138                 /*
1139                  * The max_free values of the children of llist are in
1140                  * llist->max_free and max_free.  Update with the
1141                  * max value.
1142                  */
1143                 llist->max_free = max_free =
1144                     vm_size_max(llist->max_free, max_free);
1145                 vm_map_entry_swap(&llist->right, &tail);
1146                 vm_map_entry_swap(&tail, &llist);
1147         } while (llist != header);
1148         root->left = tail;
1149         return (max_free);
1150 }
1151
1152 /*
1153  * When llist is known to be the predecessor of root.
1154  */
1155 static inline vm_size_t
1156 vm_map_splay_merge_pred(vm_map_entry_t header, vm_map_entry_t root,
1157     vm_map_entry_t llist)
1158 {
1159         vm_size_t max_free;
1160
1161         max_free = root->start - llist->end;
1162         if (llist != header) {
1163                 max_free = vm_map_splay_merge_left_walk(header, root,
1164                     root, max_free, llist);
1165         } else {
1166                 root->left = header;
1167                 header->right = root;
1168         }
1169         return (max_free);
1170 }
1171
1172 /*
1173  * When llist may or may not be the predecessor of root.
1174  */
1175 static inline vm_size_t
1176 vm_map_splay_merge_left(vm_map_entry_t header, vm_map_entry_t root,
1177     vm_map_entry_t llist)
1178 {
1179         vm_size_t max_free;
1180
1181         max_free = vm_map_entry_max_free_left(root, llist);
1182         if (llist != header) {
1183                 max_free = vm_map_splay_merge_left_walk(header, root,
1184                     root->left == llist ? root : root->left,
1185                     max_free, llist);
1186         }
1187         return (max_free);
1188 }
1189
1190 static vm_size_t
1191 vm_map_splay_merge_right_walk(vm_map_entry_t header, vm_map_entry_t root,
1192     vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist)
1193 {
1194         do {
1195                 /*
1196                  * The max_free values of the children of rlist are in
1197                  * rlist->max_free and max_free.  Update with the
1198                  * max value.
1199                  */
1200                 rlist->max_free = max_free =
1201                     vm_size_max(rlist->max_free, max_free);
1202                 vm_map_entry_swap(&rlist->left, &tail);
1203                 vm_map_entry_swap(&tail, &rlist);
1204         } while (rlist != header);
1205         root->right = tail;
1206         return (max_free);
1207 }
1208
1209 /*
1210  * When rlist is known to be the succecessor of root.
1211  */
1212 static inline vm_size_t
1213 vm_map_splay_merge_succ(vm_map_entry_t header, vm_map_entry_t root,
1214     vm_map_entry_t rlist)
1215 {
1216         vm_size_t max_free;
1217
1218         max_free = rlist->start - root->end;
1219         if (rlist != header) {
1220                 max_free = vm_map_splay_merge_right_walk(header, root,
1221                     root, max_free, rlist);
1222         } else {
1223                 root->right = header;
1224                 header->left = root;
1225         }
1226         return (max_free);
1227 }
1228
1229 /*
1230  * When rlist may or may not be the succecessor of root.
1231  */
1232 static inline vm_size_t
1233 vm_map_splay_merge_right(vm_map_entry_t header, vm_map_entry_t root,
1234     vm_map_entry_t rlist)
1235 {
1236         vm_size_t max_free;
1237
1238         max_free = vm_map_entry_max_free_right(root, rlist);
1239         if (rlist != header) {
1240                 max_free = vm_map_splay_merge_right_walk(header, root,
1241                     root->right == rlist ? root : root->right,
1242                     max_free, rlist);
1243         }
1244         return (max_free);
1245 }
1246
1247 /*
1248  *      vm_map_splay:
1249  *
1250  *      The Sleator and Tarjan top-down splay algorithm with the
1251  *      following variation.  Max_free must be computed bottom-up, so
1252  *      on the downward pass, maintain the left and right spines in
1253  *      reverse order.  Then, make a second pass up each side to fix
1254  *      the pointers and compute max_free.  The time bound is O(log n)
1255  *      amortized.
1256  *
1257  *      The tree is threaded, which means that there are no null pointers.
1258  *      When a node has no left child, its left pointer points to its
1259  *      predecessor, which the last ancestor on the search path from the root
1260  *      where the search branched right.  Likewise, when a node has no right
1261  *      child, its right pointer points to its successor.  The map header node
1262  *      is the predecessor of the first map entry, and the successor of the
1263  *      last.
1264  *
1265  *      The new root is the vm_map_entry containing "addr", or else an
1266  *      adjacent entry (lower if possible) if addr is not in the tree.
1267  *
1268  *      The map must be locked, and leaves it so.
1269  *
1270  *      Returns: the new root.
1271  */
1272 static vm_map_entry_t
1273 vm_map_splay(vm_map_t map, vm_offset_t addr)
1274 {
1275         vm_map_entry_t header, llist, rlist, root;
1276         vm_size_t max_free_left, max_free_right;
1277
1278         header = &map->header;
1279         root = vm_map_splay_split(map, addr, 0, &llist, &rlist);
1280         if (root != NULL) {
1281                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1282                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1283         } else if (llist != header) {
1284                 /*
1285                  * Recover the greatest node in the left
1286                  * subtree and make it the root.
1287                  */
1288                 root = llist;
1289                 llist = root->right;
1290                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1291                 max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1292         } else if (rlist != header) {
1293                 /*
1294                  * Recover the least node in the right
1295                  * subtree and make it the root.
1296                  */
1297                 root = rlist;
1298                 rlist = root->left;
1299                 max_free_left = vm_map_splay_merge_pred(header, root, llist);
1300                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1301         } else {
1302                 /* There is no root. */
1303                 return (NULL);
1304         }
1305         root->max_free = vm_size_max(max_free_left, max_free_right);
1306         map->root = root;
1307         VM_MAP_ASSERT_CONSISTENT(map);
1308         return (root);
1309 }
1310
1311 /*
1312  *      vm_map_entry_{un,}link:
1313  *
1314  *      Insert/remove entries from maps.  On linking, if new entry clips
1315  *      existing entry, trim existing entry to avoid overlap, and manage
1316  *      offsets.  On unlinking, merge disappearing entry with neighbor, if
1317  *      called for, and manage offsets.  Callers should not modify fields in
1318  *      entries already mapped.
1319  */
1320 static void
1321 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
1322 {
1323         vm_map_entry_t header, llist, rlist, root;
1324         vm_size_t max_free_left, max_free_right;
1325
1326         CTR3(KTR_VM,
1327             "vm_map_entry_link: map %p, nentries %d, entry %p", map,
1328             map->nentries, entry);
1329         VM_MAP_ASSERT_LOCKED(map);
1330         map->nentries++;
1331         header = &map->header;
1332         root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1333         if (root == NULL) {
1334                 /*
1335                  * The new entry does not overlap any existing entry in the
1336                  * map, so it becomes the new root of the map tree.
1337                  */
1338                 max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1339                 max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1340         } else if (entry->start == root->start) {
1341                 /*
1342                  * The new entry is a clone of root, with only the end field
1343                  * changed.  The root entry will be shrunk to abut the new
1344                  * entry, and will be the right child of the new root entry in
1345                  * the modified map.
1346                  */
1347                 KASSERT(entry->end < root->end,
1348                     ("%s: clip_start not within entry", __func__));
1349                 vm_map_splay_findprev(root, &llist);
1350                 root->offset += entry->end - root->start;
1351                 root->start = entry->end;
1352                 max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1353                 max_free_right = root->max_free = vm_size_max(
1354                     vm_map_splay_merge_pred(entry, root, entry),
1355                     vm_map_splay_merge_right(header, root, rlist));
1356         } else {
1357                 /*
1358                  * The new entry is a clone of root, with only the start field
1359                  * changed.  The root entry will be shrunk to abut the new
1360                  * entry, and will be the left child of the new root entry in
1361                  * the modified map.
1362                  */
1363                 KASSERT(entry->end == root->end,
1364                     ("%s: clip_start not within entry", __func__));
1365                 vm_map_splay_findnext(root, &rlist);
1366                 entry->offset += entry->start - root->start;
1367                 root->end = entry->start;
1368                 max_free_left = root->max_free = vm_size_max(
1369                     vm_map_splay_merge_left(header, root, llist),
1370                     vm_map_splay_merge_succ(entry, root, entry));
1371                 max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1372         }
1373         entry->max_free = vm_size_max(max_free_left, max_free_right);
1374         map->root = entry;
1375         VM_MAP_ASSERT_CONSISTENT(map);
1376 }
1377
1378 enum unlink_merge_type {
1379         UNLINK_MERGE_NONE,
1380         UNLINK_MERGE_NEXT
1381 };
1382
1383 static void
1384 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
1385     enum unlink_merge_type op)
1386 {
1387         vm_map_entry_t header, llist, rlist, root;
1388         vm_size_t max_free_left, max_free_right;
1389
1390         VM_MAP_ASSERT_LOCKED(map);
1391         header = &map->header;
1392         root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1393         KASSERT(root != NULL,
1394             ("vm_map_entry_unlink: unlink object not mapped"));
1395
1396         vm_map_splay_findprev(root, &llist);
1397         vm_map_splay_findnext(root, &rlist);
1398         if (op == UNLINK_MERGE_NEXT) {
1399                 rlist->start = root->start;
1400                 rlist->offset = root->offset;
1401         }
1402         if (llist != header) {
1403                 root = llist;
1404                 llist = root->right;
1405                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1406                 max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1407         } else if (rlist != header) {
1408                 root = rlist;
1409                 rlist = root->left;
1410                 max_free_left = vm_map_splay_merge_pred(header, root, llist);
1411                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1412         } else {
1413                 header->left = header->right = header;
1414                 root = NULL;
1415         }
1416         if (root != NULL)
1417                 root->max_free = vm_size_max(max_free_left, max_free_right);
1418         map->root = root;
1419         VM_MAP_ASSERT_CONSISTENT(map);
1420         map->nentries--;
1421         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1422             map->nentries, entry);
1423 }
1424
1425 /*
1426  *      vm_map_entry_resize:
1427  *
1428  *      Resize a vm_map_entry, recompute the amount of free space that
1429  *      follows it and propagate that value up the tree.
1430  *
1431  *      The map must be locked, and leaves it so.
1432  */
1433 static void
1434 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount)
1435 {
1436         vm_map_entry_t header, llist, rlist, root;
1437
1438         VM_MAP_ASSERT_LOCKED(map);
1439         header = &map->header;
1440         root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1441         KASSERT(root != NULL, ("%s: resize object not mapped", __func__));
1442         vm_map_splay_findnext(root, &rlist);
1443         entry->end += grow_amount;
1444         root->max_free = vm_size_max(
1445             vm_map_splay_merge_left(header, root, llist),
1446             vm_map_splay_merge_succ(header, root, rlist));
1447         map->root = root;
1448         VM_MAP_ASSERT_CONSISTENT(map);
1449         CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p",
1450             __func__, map, map->nentries, entry);
1451 }
1452
1453 /*
1454  *      vm_map_lookup_entry:    [ internal use only ]
1455  *
1456  *      Finds the map entry containing (or
1457  *      immediately preceding) the specified address
1458  *      in the given map; the entry is returned
1459  *      in the "entry" parameter.  The boolean
1460  *      result indicates whether the address is
1461  *      actually contained in the map.
1462  */
1463 boolean_t
1464 vm_map_lookup_entry(
1465         vm_map_t map,
1466         vm_offset_t address,
1467         vm_map_entry_t *entry)  /* OUT */
1468 {
1469         vm_map_entry_t cur, header, lbound, ubound;
1470         boolean_t locked;
1471
1472         /*
1473          * If the map is empty, then the map entry immediately preceding
1474          * "address" is the map's header.
1475          */
1476         header = &map->header;
1477         cur = map->root;
1478         if (cur == NULL) {
1479                 *entry = header;
1480                 return (FALSE);
1481         }
1482         if (address >= cur->start && cur->end > address) {
1483                 *entry = cur;
1484                 return (TRUE);
1485         }
1486         if ((locked = vm_map_locked(map)) ||
1487             sx_try_upgrade(&map->lock)) {
1488                 /*
1489                  * Splay requires a write lock on the map.  However, it only
1490                  * restructures the binary search tree; it does not otherwise
1491                  * change the map.  Thus, the map's timestamp need not change
1492                  * on a temporary upgrade.
1493                  */
1494                 cur = vm_map_splay(map, address);
1495                 if (!locked) {
1496                         VM_MAP_UNLOCK_CONSISTENT(map);
1497                         sx_downgrade(&map->lock);
1498                 }
1499
1500                 /*
1501                  * If "address" is contained within a map entry, the new root
1502                  * is that map entry.  Otherwise, the new root is a map entry
1503                  * immediately before or after "address".
1504                  */
1505                 if (address < cur->start) {
1506                         *entry = header;
1507                         return (FALSE);
1508                 }
1509                 *entry = cur;
1510                 return (address < cur->end);
1511         }
1512         /*
1513          * Since the map is only locked for read access, perform a
1514          * standard binary search tree lookup for "address".
1515          */
1516         lbound = ubound = header;
1517         for (;;) {
1518                 if (address < cur->start) {
1519                         ubound = cur;
1520                         cur = cur->left;
1521                         if (cur == lbound)
1522                                 break;
1523                 } else if (cur->end <= address) {
1524                         lbound = cur;
1525                         cur = cur->right;
1526                         if (cur == ubound)
1527                                 break;
1528                 } else {
1529                         *entry = cur;
1530                         return (TRUE);
1531                 }
1532         }
1533         *entry = lbound;
1534         return (FALSE);
1535 }
1536
1537 /*
1538  *      vm_map_insert:
1539  *
1540  *      Inserts the given whole VM object into the target
1541  *      map at the specified address range.  The object's
1542  *      size should match that of the address range.
1543  *
1544  *      Requires that the map be locked, and leaves it so.
1545  *
1546  *      If object is non-NULL, ref count must be bumped by caller
1547  *      prior to making call to account for the new entry.
1548  */
1549 int
1550 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1551     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
1552 {
1553         vm_map_entry_t new_entry, next_entry, prev_entry;
1554         struct ucred *cred;
1555         vm_eflags_t protoeflags;
1556         vm_inherit_t inheritance;
1557
1558         VM_MAP_ASSERT_LOCKED(map);
1559         KASSERT(object != kernel_object ||
1560             (cow & MAP_COPY_ON_WRITE) == 0,
1561             ("vm_map_insert: kernel object and COW"));
1562         KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0,
1563             ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1564         KASSERT((prot & ~max) == 0,
1565             ("prot %#x is not subset of max_prot %#x", prot, max));
1566
1567         /*
1568          * Check that the start and end points are not bogus.
1569          */
1570         if (start == end || !vm_map_range_valid(map, start, end))
1571                 return (KERN_INVALID_ADDRESS);
1572
1573         /*
1574          * Find the entry prior to the proposed starting address; if it's part
1575          * of an existing entry, this range is bogus.
1576          */
1577         if (vm_map_lookup_entry(map, start, &prev_entry))
1578                 return (KERN_NO_SPACE);
1579
1580         /*
1581          * Assert that the next entry doesn't overlap the end point.
1582          */
1583         next_entry = vm_map_entry_succ(prev_entry);
1584         if (next_entry->start < end)
1585                 return (KERN_NO_SPACE);
1586
1587         if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
1588             max != VM_PROT_NONE))
1589                 return (KERN_INVALID_ARGUMENT);
1590
1591         protoeflags = 0;
1592         if (cow & MAP_COPY_ON_WRITE)
1593                 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
1594         if (cow & MAP_NOFAULT)
1595                 protoeflags |= MAP_ENTRY_NOFAULT;
1596         if (cow & MAP_DISABLE_SYNCER)
1597                 protoeflags |= MAP_ENTRY_NOSYNC;
1598         if (cow & MAP_DISABLE_COREDUMP)
1599                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1600         if (cow & MAP_STACK_GROWS_DOWN)
1601                 protoeflags |= MAP_ENTRY_GROWS_DOWN;
1602         if (cow & MAP_STACK_GROWS_UP)
1603                 protoeflags |= MAP_ENTRY_GROWS_UP;
1604         if (cow & MAP_WRITECOUNT)
1605                 protoeflags |= MAP_ENTRY_WRITECNT;
1606         if (cow & MAP_VN_EXEC)
1607                 protoeflags |= MAP_ENTRY_VN_EXEC;
1608         if ((cow & MAP_CREATE_GUARD) != 0)
1609                 protoeflags |= MAP_ENTRY_GUARD;
1610         if ((cow & MAP_CREATE_STACK_GAP_DN) != 0)
1611                 protoeflags |= MAP_ENTRY_STACK_GAP_DN;
1612         if ((cow & MAP_CREATE_STACK_GAP_UP) != 0)
1613                 protoeflags |= MAP_ENTRY_STACK_GAP_UP;
1614         if (cow & MAP_INHERIT_SHARE)
1615                 inheritance = VM_INHERIT_SHARE;
1616         else
1617                 inheritance = VM_INHERIT_DEFAULT;
1618
1619         cred = NULL;
1620         if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0)
1621                 goto charged;
1622         if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1623             ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1624                 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1625                         return (KERN_RESOURCE_SHORTAGE);
1626                 KASSERT(object == NULL ||
1627                     (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
1628                     object->cred == NULL,
1629                     ("overcommit: vm_map_insert o %p", object));
1630                 cred = curthread->td_ucred;
1631         }
1632
1633 charged:
1634         /* Expand the kernel pmap, if necessary. */
1635         if (map == kernel_map && end > kernel_vm_end)
1636                 pmap_growkernel(end);
1637         if (object != NULL) {
1638                 /*
1639                  * OBJ_ONEMAPPING must be cleared unless this mapping
1640                  * is trivially proven to be the only mapping for any
1641                  * of the object's pages.  (Object granularity
1642                  * reference counting is insufficient to recognize
1643                  * aliases with precision.)
1644                  */
1645                 if ((object->flags & OBJ_ANON) != 0) {
1646                         VM_OBJECT_WLOCK(object);
1647                         if (object->ref_count > 1 || object->shadow_count != 0)
1648                                 vm_object_clear_flag(object, OBJ_ONEMAPPING);
1649                         VM_OBJECT_WUNLOCK(object);
1650                 }
1651         } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
1652             protoeflags &&
1653             (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP |
1654             MAP_VN_EXEC)) == 0 &&
1655             prev_entry->end == start && (prev_entry->cred == cred ||
1656             (prev_entry->object.vm_object != NULL &&
1657             prev_entry->object.vm_object->cred == cred)) &&
1658             vm_object_coalesce(prev_entry->object.vm_object,
1659             prev_entry->offset,
1660             (vm_size_t)(prev_entry->end - prev_entry->start),
1661             (vm_size_t)(end - prev_entry->end), cred != NULL &&
1662             (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
1663                 /*
1664                  * We were able to extend the object.  Determine if we
1665                  * can extend the previous map entry to include the
1666                  * new range as well.
1667                  */
1668                 if (prev_entry->inheritance == inheritance &&
1669                     prev_entry->protection == prot &&
1670                     prev_entry->max_protection == max &&
1671                     prev_entry->wired_count == 0) {
1672                         KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
1673                             0, ("prev_entry %p has incoherent wiring",
1674                             prev_entry));
1675                         if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
1676                                 map->size += end - prev_entry->end;
1677                         vm_map_entry_resize(map, prev_entry,
1678                             end - prev_entry->end);
1679                         vm_map_try_merge_entries(map, prev_entry, next_entry);
1680                         return (KERN_SUCCESS);
1681                 }
1682
1683                 /*
1684                  * If we can extend the object but cannot extend the
1685                  * map entry, we have to create a new map entry.  We
1686                  * must bump the ref count on the extended object to
1687                  * account for it.  object may be NULL.
1688                  */
1689                 object = prev_entry->object.vm_object;
1690                 offset = prev_entry->offset +
1691                     (prev_entry->end - prev_entry->start);
1692                 vm_object_reference(object);
1693                 if (cred != NULL && object != NULL && object->cred != NULL &&
1694                     !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1695                         /* Object already accounts for this uid. */
1696                         cred = NULL;
1697                 }
1698         }
1699         if (cred != NULL)
1700                 crhold(cred);
1701
1702         /*
1703          * Create a new entry
1704          */
1705         new_entry = vm_map_entry_create(map);
1706         new_entry->start = start;
1707         new_entry->end = end;
1708         new_entry->cred = NULL;
1709
1710         new_entry->eflags = protoeflags;
1711         new_entry->object.vm_object = object;
1712         new_entry->offset = offset;
1713
1714         new_entry->inheritance = inheritance;
1715         new_entry->protection = prot;
1716         new_entry->max_protection = max;
1717         new_entry->wired_count = 0;
1718         new_entry->wiring_thread = NULL;
1719         new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1720         new_entry->next_read = start;
1721
1722         KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1723             ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
1724         new_entry->cred = cred;
1725
1726         /*
1727          * Insert the new entry into the list
1728          */
1729         vm_map_entry_link(map, new_entry);
1730         if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
1731                 map->size += new_entry->end - new_entry->start;
1732
1733         /*
1734          * Try to coalesce the new entry with both the previous and next
1735          * entries in the list.  Previously, we only attempted to coalesce
1736          * with the previous entry when object is NULL.  Here, we handle the
1737          * other cases, which are less common.
1738          */
1739         vm_map_try_merge_entries(map, prev_entry, new_entry);
1740         vm_map_try_merge_entries(map, new_entry, next_entry);
1741
1742         if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
1743                 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
1744                     end - start, cow & MAP_PREFAULT_PARTIAL);
1745         }
1746
1747         return (KERN_SUCCESS);
1748 }
1749
1750 /*
1751  *      vm_map_findspace:
1752  *
1753  *      Find the first fit (lowest VM address) for "length" free bytes
1754  *      beginning at address >= start in the given map.
1755  *
1756  *      In a vm_map_entry, "max_free" is the maximum amount of
1757  *      contiguous free space between an entry in its subtree and a
1758  *      neighbor of that entry.  This allows finding a free region in
1759  *      one path down the tree, so O(log n) amortized with splay
1760  *      trees.
1761  *
1762  *      The map must be locked, and leaves it so.
1763  *
1764  *      Returns: starting address if sufficient space,
1765  *               vm_map_max(map)-length+1 if insufficient space.
1766  */
1767 vm_offset_t
1768 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
1769 {
1770         vm_map_entry_t header, llist, rlist, root, y;
1771         vm_size_t left_length, max_free_left, max_free_right;
1772         vm_offset_t gap_end;
1773
1774         /*
1775          * Request must fit within min/max VM address and must avoid
1776          * address wrap.
1777          */
1778         start = MAX(start, vm_map_min(map));
1779         if (start >= vm_map_max(map) || length > vm_map_max(map) - start)
1780                 return (vm_map_max(map) - length + 1);
1781
1782         /* Empty tree means wide open address space. */
1783         if (map->root == NULL)
1784                 return (start);
1785
1786         /*
1787          * After splay_split, if start is within an entry, push it to the start
1788          * of the following gap.  If rlist is at the end of the gap containing
1789          * start, save the end of that gap in gap_end to see if the gap is big
1790          * enough; otherwise set gap_end to start skip gap-checking and move
1791          * directly to a search of the right subtree.
1792          */
1793         header = &map->header;
1794         root = vm_map_splay_split(map, start, length, &llist, &rlist);
1795         gap_end = rlist->start;
1796         if (root != NULL) {
1797                 start = root->end;
1798                 if (root->right != rlist)
1799                         gap_end = start;
1800                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1801                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1802         } else if (rlist != header) {
1803                 root = rlist;
1804                 rlist = root->left;
1805                 max_free_left = vm_map_splay_merge_pred(header, root, llist);
1806                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1807         } else {
1808                 root = llist;
1809                 llist = root->right;
1810                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1811                 max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1812         }
1813         root->max_free = vm_size_max(max_free_left, max_free_right);
1814         map->root = root;
1815         VM_MAP_ASSERT_CONSISTENT(map);
1816         if (length <= gap_end - start)
1817                 return (start);
1818
1819         /* With max_free, can immediately tell if no solution. */
1820         if (root->right == header || length > root->right->max_free)
1821                 return (vm_map_max(map) - length + 1);
1822
1823         /*
1824          * Splay for the least large-enough gap in the right subtree.
1825          */
1826         llist = rlist = header;
1827         for (left_length = 0;;
1828             left_length = vm_map_entry_max_free_left(root, llist)) {
1829                 if (length <= left_length)
1830                         SPLAY_LEFT_STEP(root, y, llist, rlist,
1831                             length <= vm_map_entry_max_free_left(y, llist));
1832                 else
1833                         SPLAY_RIGHT_STEP(root, y, llist, rlist,
1834                             length > vm_map_entry_max_free_left(y, root));
1835                 if (root == NULL)
1836                         break;
1837         }
1838         root = llist;
1839         llist = root->right;
1840         max_free_left = vm_map_splay_merge_left(header, root, llist);
1841         if (rlist == header) {
1842                 root->max_free = vm_size_max(max_free_left,
1843                     vm_map_splay_merge_succ(header, root, rlist));
1844         } else {
1845                 y = rlist;
1846                 rlist = y->left;
1847                 y->max_free = vm_size_max(
1848                     vm_map_splay_merge_pred(root, y, root),
1849                     vm_map_splay_merge_right(header, y, rlist));
1850                 root->max_free = vm_size_max(max_free_left, y->max_free);
1851         }
1852         map->root = root;
1853         VM_MAP_ASSERT_CONSISTENT(map);
1854         return (root->end);
1855 }
1856
1857 int
1858 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1859     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1860     vm_prot_t max, int cow)
1861 {
1862         vm_offset_t end;
1863         int result;
1864
1865         end = start + length;
1866         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1867             object == NULL,
1868             ("vm_map_fixed: non-NULL backing object for stack"));
1869         vm_map_lock(map);
1870         VM_MAP_RANGE_CHECK(map, start, end);
1871         if ((cow & MAP_CHECK_EXCL) == 0)
1872                 vm_map_delete(map, start, end);
1873         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1874                 result = vm_map_stack_locked(map, start, length, sgrowsiz,
1875                     prot, max, cow);
1876         } else {
1877                 result = vm_map_insert(map, object, offset, start, end,
1878                     prot, max, cow);
1879         }
1880         vm_map_unlock(map);
1881         return (result);
1882 }
1883
1884 static const int aslr_pages_rnd_64[2] = {0x1000, 0x10};
1885 static const int aslr_pages_rnd_32[2] = {0x100, 0x4};
1886
1887 static int cluster_anon = 1;
1888 SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW,
1889     &cluster_anon, 0,
1890     "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always");
1891
1892 static bool
1893 clustering_anon_allowed(vm_offset_t addr)
1894 {
1895
1896         switch (cluster_anon) {
1897         case 0:
1898                 return (false);
1899         case 1:
1900                 return (addr == 0);
1901         case 2:
1902         default:
1903                 return (true);
1904         }
1905 }
1906
1907 static long aslr_restarts;
1908 SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD,
1909     &aslr_restarts, 0,
1910     "Number of aslr failures");
1911
1912 #define MAP_32BIT_MAX_ADDR      ((vm_offset_t)1 << 31)
1913
1914 /*
1915  * Searches for the specified amount of free space in the given map with the
1916  * specified alignment.  Performs an address-ordered, first-fit search from
1917  * the given address "*addr", with an optional upper bound "max_addr".  If the
1918  * parameter "alignment" is zero, then the alignment is computed from the
1919  * given (object, offset) pair so as to enable the greatest possible use of
1920  * superpage mappings.  Returns KERN_SUCCESS and the address of the free space
1921  * in "*addr" if successful.  Otherwise, returns KERN_NO_SPACE.
1922  *
1923  * The map must be locked.  Initially, there must be at least "length" bytes
1924  * of free space at the given address.
1925  */
1926 static int
1927 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1928     vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr,
1929     vm_offset_t alignment)
1930 {
1931         vm_offset_t aligned_addr, free_addr;
1932
1933         VM_MAP_ASSERT_LOCKED(map);
1934         free_addr = *addr;
1935         KASSERT(free_addr == vm_map_findspace(map, free_addr, length),
1936             ("caller failed to provide space %#jx at address %p",
1937              (uintmax_t)length, (void *)free_addr));
1938         for (;;) {
1939                 /*
1940                  * At the start of every iteration, the free space at address
1941                  * "*addr" is at least "length" bytes.
1942                  */
1943                 if (alignment == 0)
1944                         pmap_align_superpage(object, offset, addr, length);
1945                 else if ((*addr & (alignment - 1)) != 0) {
1946                         *addr &= ~(alignment - 1);
1947                         *addr += alignment;
1948                 }
1949                 aligned_addr = *addr;
1950                 if (aligned_addr == free_addr) {
1951                         /*
1952                          * Alignment did not change "*addr", so "*addr" must
1953                          * still provide sufficient free space.
1954                          */
1955                         return (KERN_SUCCESS);
1956                 }
1957
1958                 /*
1959                  * Test for address wrap on "*addr".  A wrapped "*addr" could
1960                  * be a valid address, in which case vm_map_findspace() cannot
1961                  * be relied upon to fail.
1962                  */
1963                 if (aligned_addr < free_addr)
1964                         return (KERN_NO_SPACE);
1965                 *addr = vm_map_findspace(map, aligned_addr, length);
1966                 if (*addr + length > vm_map_max(map) ||
1967                     (max_addr != 0 && *addr + length > max_addr))
1968                         return (KERN_NO_SPACE);
1969                 free_addr = *addr;
1970                 if (free_addr == aligned_addr) {
1971                         /*
1972                          * If a successful call to vm_map_findspace() did not
1973                          * change "*addr", then "*addr" must still be aligned
1974                          * and provide sufficient free space.
1975                          */
1976                         return (KERN_SUCCESS);
1977                 }
1978         }
1979 }
1980
1981 /*
1982  *      vm_map_find finds an unallocated region in the target address
1983  *      map with the given length.  The search is defined to be
1984  *      first-fit from the specified address; the region found is
1985  *      returned in the same parameter.
1986  *
1987  *      If object is non-NULL, ref count must be bumped by caller
1988  *      prior to making call to account for the new entry.
1989  */
1990 int
1991 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1992             vm_offset_t *addr,  /* IN/OUT */
1993             vm_size_t length, vm_offset_t max_addr, int find_space,
1994             vm_prot_t prot, vm_prot_t max, int cow)
1995 {
1996         vm_offset_t alignment, curr_min_addr, min_addr;
1997         int gap, pidx, rv, try;
1998         bool cluster, en_aslr, update_anon;
1999
2000         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
2001             object == NULL,
2002             ("vm_map_find: non-NULL backing object for stack"));
2003         MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE &&
2004             (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0));
2005         if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
2006             (object->flags & OBJ_COLORED) == 0))
2007                 find_space = VMFS_ANY_SPACE;
2008         if (find_space >> 8 != 0) {
2009                 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
2010                 alignment = (vm_offset_t)1 << (find_space >> 8);
2011         } else
2012                 alignment = 0;
2013         en_aslr = (map->flags & MAP_ASLR) != 0;
2014         update_anon = cluster = clustering_anon_allowed(*addr) &&
2015             (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 &&
2016             find_space != VMFS_NO_SPACE && object == NULL &&
2017             (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP |
2018             MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE;
2019         curr_min_addr = min_addr = *addr;
2020         if (en_aslr && min_addr == 0 && !cluster &&
2021             find_space != VMFS_NO_SPACE &&
2022             (map->flags & MAP_ASLR_IGNSTART) != 0)
2023                 curr_min_addr = min_addr = vm_map_min(map);
2024         try = 0;
2025         vm_map_lock(map);
2026         if (cluster) {
2027                 curr_min_addr = map->anon_loc;
2028                 if (curr_min_addr == 0)
2029                         cluster = false;
2030         }
2031         if (find_space != VMFS_NO_SPACE) {
2032                 KASSERT(find_space == VMFS_ANY_SPACE ||
2033                     find_space == VMFS_OPTIMAL_SPACE ||
2034                     find_space == VMFS_SUPER_SPACE ||
2035                     alignment != 0, ("unexpected VMFS flag"));
2036 again:
2037                 /*
2038                  * When creating an anonymous mapping, try clustering
2039                  * with an existing anonymous mapping first.
2040                  *
2041                  * We make up to two attempts to find address space
2042                  * for a given find_space value. The first attempt may
2043                  * apply randomization or may cluster with an existing
2044                  * anonymous mapping. If this first attempt fails,
2045                  * perform a first-fit search of the available address
2046                  * space.
2047                  *
2048                  * If all tries failed, and find_space is
2049                  * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE.
2050                  * Again enable clustering and randomization.
2051                  */
2052                 try++;
2053                 MPASS(try <= 2);
2054
2055                 if (try == 2) {
2056                         /*
2057                          * Second try: we failed either to find a
2058                          * suitable region for randomizing the
2059                          * allocation, or to cluster with an existing
2060                          * mapping.  Retry with free run.
2061                          */
2062                         curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ?
2063                             vm_map_min(map) : min_addr;
2064                         atomic_add_long(&aslr_restarts, 1);
2065                 }
2066
2067                 if (try == 1 && en_aslr && !cluster) {
2068                         /*
2069                          * Find space for allocation, including
2070                          * gap needed for later randomization.
2071                          */
2072                         pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 &&
2073                             (find_space == VMFS_SUPER_SPACE || find_space ==
2074                             VMFS_OPTIMAL_SPACE) ? 1 : 0;
2075                         gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR &&
2076                             (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ?
2077                             aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx];
2078                         *addr = vm_map_findspace(map, curr_min_addr,
2079                             length + gap * pagesizes[pidx]);
2080                         if (*addr + length + gap * pagesizes[pidx] >
2081                             vm_map_max(map))
2082                                 goto again;
2083                         /* And randomize the start address. */
2084                         *addr += (arc4random() % gap) * pagesizes[pidx];
2085                         if (max_addr != 0 && *addr + length > max_addr)
2086                                 goto again;
2087                 } else {
2088                         *addr = vm_map_findspace(map, curr_min_addr, length);
2089                         if (*addr + length > vm_map_max(map) ||
2090                             (max_addr != 0 && *addr + length > max_addr)) {
2091                                 if (cluster) {
2092                                         cluster = false;
2093                                         MPASS(try == 1);
2094                                         goto again;
2095                                 }
2096                                 rv = KERN_NO_SPACE;
2097                                 goto done;
2098                         }
2099                 }
2100
2101                 if (find_space != VMFS_ANY_SPACE &&
2102                     (rv = vm_map_alignspace(map, object, offset, addr, length,
2103                     max_addr, alignment)) != KERN_SUCCESS) {
2104                         if (find_space == VMFS_OPTIMAL_SPACE) {
2105                                 find_space = VMFS_ANY_SPACE;
2106                                 curr_min_addr = min_addr;
2107                                 cluster = update_anon;
2108                                 try = 0;
2109                                 goto again;
2110                         }
2111                         goto done;
2112                 }
2113         } else if ((cow & MAP_REMAP) != 0) {
2114                 if (!vm_map_range_valid(map, *addr, *addr + length)) {
2115                         rv = KERN_INVALID_ADDRESS;
2116                         goto done;
2117                 }
2118                 vm_map_delete(map, *addr, *addr + length);
2119         }
2120         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
2121                 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
2122                     max, cow);
2123         } else {
2124                 rv = vm_map_insert(map, object, offset, *addr, *addr + length,
2125                     prot, max, cow);
2126         }
2127         if (rv == KERN_SUCCESS && update_anon)
2128                 map->anon_loc = *addr + length;
2129 done:
2130         vm_map_unlock(map);
2131         return (rv);
2132 }
2133
2134 /*
2135  *      vm_map_find_min() is a variant of vm_map_find() that takes an
2136  *      additional parameter (min_addr) and treats the given address
2137  *      (*addr) differently.  Specifically, it treats *addr as a hint
2138  *      and not as the minimum address where the mapping is created.
2139  *
2140  *      This function works in two phases.  First, it tries to
2141  *      allocate above the hint.  If that fails and the hint is
2142  *      greater than min_addr, it performs a second pass, replacing
2143  *      the hint with min_addr as the minimum address for the
2144  *      allocation.
2145  */
2146 int
2147 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2148     vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr,
2149     vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max,
2150     int cow)
2151 {
2152         vm_offset_t hint;
2153         int rv;
2154
2155         hint = *addr;
2156         for (;;) {
2157                 rv = vm_map_find(map, object, offset, addr, length, max_addr,
2158                     find_space, prot, max, cow);
2159                 if (rv == KERN_SUCCESS || min_addr >= hint)
2160                         return (rv);
2161                 *addr = hint = min_addr;
2162         }
2163 }
2164
2165 /*
2166  * A map entry with any of the following flags set must not be merged with
2167  * another entry.
2168  */
2169 #define MAP_ENTRY_NOMERGE_MASK  (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \
2170             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC)
2171
2172 static bool
2173 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
2174 {
2175
2176         KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 ||
2177             (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0,
2178             ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable",
2179             prev, entry));
2180         return (prev->end == entry->start &&
2181             prev->object.vm_object == entry->object.vm_object &&
2182             (prev->object.vm_object == NULL ||
2183             prev->offset + (prev->end - prev->start) == entry->offset) &&
2184             prev->eflags == entry->eflags &&
2185             prev->protection == entry->protection &&
2186             prev->max_protection == entry->max_protection &&
2187             prev->inheritance == entry->inheritance &&
2188             prev->wired_count == entry->wired_count &&
2189             prev->cred == entry->cred);
2190 }
2191
2192 static void
2193 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
2194 {
2195
2196         /*
2197          * If the backing object is a vnode object, vm_object_deallocate()
2198          * calls vrele().  However, vrele() does not lock the vnode because
2199          * the vnode has additional references.  Thus, the map lock can be
2200          * kept without causing a lock-order reversal with the vnode lock.
2201          *
2202          * Since we count the number of virtual page mappings in
2203          * object->un_pager.vnp.writemappings, the writemappings value
2204          * should not be adjusted when the entry is disposed of.
2205          */
2206         if (entry->object.vm_object != NULL)
2207                 vm_object_deallocate(entry->object.vm_object);
2208         if (entry->cred != NULL)
2209                 crfree(entry->cred);
2210         vm_map_entry_dispose(map, entry);
2211 }
2212
2213 /*
2214  *      vm_map_try_merge_entries:
2215  *
2216  *      Compare the given map entry to its predecessor, and merge its precessor
2217  *      into it if possible.  The entry remains valid, and may be extended.
2218  *      The predecessor may be deleted.
2219  *
2220  *      The map must be locked.
2221  */
2222 void
2223 vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry,
2224     vm_map_entry_t entry)
2225 {
2226
2227         VM_MAP_ASSERT_LOCKED(map);
2228         if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 &&
2229             vm_map_mergeable_neighbors(prev_entry, entry)) {
2230                 vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT);
2231                 vm_map_merged_neighbor_dispose(map, prev_entry);
2232         }
2233 }
2234
2235 /*
2236  *      vm_map_entry_back:
2237  *
2238  *      Allocate an object to back a map entry.
2239  */
2240 static inline void
2241 vm_map_entry_back(vm_map_entry_t entry)
2242 {
2243         vm_object_t object;
2244
2245         KASSERT(entry->object.vm_object == NULL,
2246             ("map entry %p has backing object", entry));
2247         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2248             ("map entry %p is a submap", entry));
2249         object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL,
2250             entry->cred, entry->end - entry->start);
2251         entry->object.vm_object = object;
2252         entry->offset = 0;
2253         entry->cred = NULL;
2254 }
2255
2256 /*
2257  *      vm_map_entry_charge_object
2258  *
2259  *      If there is no object backing this entry, create one.  Otherwise, if
2260  *      the entry has cred, give it to the backing object.
2261  */
2262 static inline void
2263 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry)
2264 {
2265
2266         VM_MAP_ASSERT_LOCKED(map);
2267         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2268             ("map entry %p is a submap", entry));
2269         if (entry->object.vm_object == NULL && !map->system_map &&
2270             (entry->eflags & MAP_ENTRY_GUARD) == 0)
2271                 vm_map_entry_back(entry);
2272         else if (entry->object.vm_object != NULL &&
2273             ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
2274             entry->cred != NULL) {
2275                 VM_OBJECT_WLOCK(entry->object.vm_object);
2276                 KASSERT(entry->object.vm_object->cred == NULL,
2277                     ("OVERCOMMIT: %s: both cred e %p", __func__, entry));
2278                 entry->object.vm_object->cred = entry->cred;
2279                 entry->object.vm_object->charge = entry->end - entry->start;
2280                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
2281                 entry->cred = NULL;
2282         }
2283 }
2284
2285 /*
2286  *      vm_map_entry_clone
2287  *
2288  *      Create a duplicate map entry for clipping.
2289  */
2290 static vm_map_entry_t
2291 vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry)
2292 {
2293         vm_map_entry_t new_entry;
2294
2295         VM_MAP_ASSERT_LOCKED(map);
2296
2297         /*
2298          * Create a backing object now, if none exists, so that more individual
2299          * objects won't be created after the map entry is split.
2300          */
2301         vm_map_entry_charge_object(map, entry);
2302
2303         /* Clone the entry. */
2304         new_entry = vm_map_entry_create(map);
2305         *new_entry = *entry;
2306         if (new_entry->cred != NULL)
2307                 crhold(entry->cred);
2308         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2309                 vm_object_reference(new_entry->object.vm_object);
2310                 vm_map_entry_set_vnode_text(new_entry, true);
2311                 /*
2312                  * The object->un_pager.vnp.writemappings for the object of
2313                  * MAP_ENTRY_WRITECNT type entry shall be kept as is here.  The
2314                  * virtual pages are re-distributed among the clipped entries,
2315                  * so the sum is left the same.
2316                  */
2317         }
2318         return (new_entry);
2319 }
2320
2321 /*
2322  *      vm_map_clip_start:      [ internal use only ]
2323  *
2324  *      Asserts that the given entry begins at or after
2325  *      the specified address; if necessary,
2326  *      it splits the entry into two.
2327  */
2328 static inline void
2329 vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
2330 {
2331         vm_map_entry_t new_entry;
2332
2333         if (!map->system_map)
2334                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2335                     "%s: map %p entry %p start 0x%jx", __func__, map, entry,
2336                     (uintmax_t)start);
2337
2338         if (start <= entry->start)
2339                 return;
2340
2341         VM_MAP_ASSERT_LOCKED(map);
2342         KASSERT(entry->end > start && entry->start < start,
2343             ("%s: invalid clip of entry %p", __func__, entry));
2344
2345         new_entry = vm_map_entry_clone(map, entry);
2346
2347         /*
2348          * Split off the front portion.  Insert the new entry BEFORE this one,
2349          * so that this entry has the specified starting address.
2350          */
2351         new_entry->end = start;
2352         vm_map_entry_link(map, new_entry);
2353 }
2354
2355 /*
2356  *      vm_map_lookup_clip_start:
2357  *
2358  *      Find the entry at or just after 'start', and clip it if 'start' is in
2359  *      the interior of the entry.  Return entry after 'start', and in
2360  *      prev_entry set the entry before 'start'.
2361  */
2362 static inline vm_map_entry_t
2363 vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start,
2364     vm_map_entry_t *prev_entry)
2365 {
2366         vm_map_entry_t entry;
2367
2368         if (!map->system_map)
2369                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2370                     "%s: map %p start 0x%jx prev %p", __func__, map,
2371                     (uintmax_t)start, prev_entry);
2372
2373         if (vm_map_lookup_entry(map, start, prev_entry)) {
2374                 entry = *prev_entry;
2375                 vm_map_clip_start(map, entry, start);
2376                 *prev_entry = vm_map_entry_pred(entry);
2377         } else
2378                 entry = vm_map_entry_succ(*prev_entry);
2379         return (entry);
2380 }
2381
2382 /*
2383  *      vm_map_clip_end:        [ internal use only ]
2384  *
2385  *      Asserts that the given entry ends at or before
2386  *      the specified address; if necessary,
2387  *      it splits the entry into two.
2388  */
2389 static inline void
2390 vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
2391 {
2392         vm_map_entry_t new_entry;
2393
2394         if (!map->system_map)
2395                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2396                     "%s: map %p entry %p end 0x%jx", __func__, map, entry,
2397                     (uintmax_t)end);
2398
2399         if (end >= entry->end)
2400                 return;
2401
2402         VM_MAP_ASSERT_LOCKED(map);
2403         KASSERT(entry->start < end && entry->end > end,
2404             ("%s: invalid clip of entry %p", __func__, entry));
2405
2406         new_entry = vm_map_entry_clone(map, entry);
2407
2408         /*
2409          * Split off the back portion.  Insert the new entry AFTER this one,
2410          * so that this entry has the specified ending address.
2411          */
2412         new_entry->start = end;
2413         vm_map_entry_link(map, new_entry);
2414 }
2415
2416 /*
2417  *      vm_map_submap:          [ kernel use only ]
2418  *
2419  *      Mark the given range as handled by a subordinate map.
2420  *
2421  *      This range must have been created with vm_map_find,
2422  *      and no other operations may have been performed on this
2423  *      range prior to calling vm_map_submap.
2424  *
2425  *      Only a limited number of operations can be performed
2426  *      within this rage after calling vm_map_submap:
2427  *              vm_fault
2428  *      [Don't try vm_map_copy!]
2429  *
2430  *      To remove a submapping, one must first remove the
2431  *      range from the superior map, and then destroy the
2432  *      submap (if desired).  [Better yet, don't try it.]
2433  */
2434 int
2435 vm_map_submap(
2436         vm_map_t map,
2437         vm_offset_t start,
2438         vm_offset_t end,
2439         vm_map_t submap)
2440 {
2441         vm_map_entry_t entry;
2442         int result;
2443
2444         result = KERN_INVALID_ARGUMENT;
2445
2446         vm_map_lock(submap);
2447         submap->flags |= MAP_IS_SUB_MAP;
2448         vm_map_unlock(submap);
2449
2450         vm_map_lock(map);
2451         VM_MAP_RANGE_CHECK(map, start, end);
2452         if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end &&
2453             (entry->eflags & MAP_ENTRY_COW) == 0 &&
2454             entry->object.vm_object == NULL) {
2455                 vm_map_clip_start(map, entry, start);
2456                 vm_map_clip_end(map, entry, end);
2457                 entry->object.sub_map = submap;
2458                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
2459                 result = KERN_SUCCESS;
2460         }
2461         vm_map_unlock(map);
2462
2463         if (result != KERN_SUCCESS) {
2464                 vm_map_lock(submap);
2465                 submap->flags &= ~MAP_IS_SUB_MAP;
2466                 vm_map_unlock(submap);
2467         }
2468         return (result);
2469 }
2470
2471 /*
2472  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
2473  */
2474 #define MAX_INIT_PT     96
2475
2476 /*
2477  *      vm_map_pmap_enter:
2478  *
2479  *      Preload the specified map's pmap with mappings to the specified
2480  *      object's memory-resident pages.  No further physical pages are
2481  *      allocated, and no further virtual pages are retrieved from secondary
2482  *      storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
2483  *      limited number of page mappings are created at the low-end of the
2484  *      specified address range.  (For this purpose, a superpage mapping
2485  *      counts as one page mapping.)  Otherwise, all resident pages within
2486  *      the specified address range are mapped.
2487  */
2488 static void
2489 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
2490     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
2491 {
2492         vm_offset_t start;
2493         vm_page_t p, p_start;
2494         vm_pindex_t mask, psize, threshold, tmpidx;
2495
2496         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
2497                 return;
2498         if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2499                 VM_OBJECT_WLOCK(object);
2500                 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2501                         pmap_object_init_pt(map->pmap, addr, object, pindex,
2502                             size);
2503                         VM_OBJECT_WUNLOCK(object);
2504                         return;
2505                 }
2506                 VM_OBJECT_LOCK_DOWNGRADE(object);
2507         } else
2508                 VM_OBJECT_RLOCK(object);
2509
2510         psize = atop(size);
2511         if (psize + pindex > object->size) {
2512                 if (pindex >= object->size) {
2513                         VM_OBJECT_RUNLOCK(object);
2514                         return;
2515                 }
2516                 psize = object->size - pindex;
2517         }
2518
2519         start = 0;
2520         p_start = NULL;
2521         threshold = MAX_INIT_PT;
2522
2523         p = vm_page_find_least(object, pindex);
2524         /*
2525          * Assert: the variable p is either (1) the page with the
2526          * least pindex greater than or equal to the parameter pindex
2527          * or (2) NULL.
2528          */
2529         for (;
2530              p != NULL && (tmpidx = p->pindex - pindex) < psize;
2531              p = TAILQ_NEXT(p, listq)) {
2532                 /*
2533                  * don't allow an madvise to blow away our really
2534                  * free pages allocating pv entries.
2535                  */
2536                 if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
2537                     vm_page_count_severe()) ||
2538                     ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
2539                     tmpidx >= threshold)) {
2540                         psize = tmpidx;
2541                         break;
2542                 }
2543                 if (vm_page_all_valid(p)) {
2544                         if (p_start == NULL) {
2545                                 start = addr + ptoa(tmpidx);
2546                                 p_start = p;
2547                         }
2548                         /* Jump ahead if a superpage mapping is possible. */
2549                         if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
2550                             (pagesizes[p->psind] - 1)) == 0) {
2551                                 mask = atop(pagesizes[p->psind]) - 1;
2552                                 if (tmpidx + mask < psize &&
2553                                     vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
2554                                         p += mask;
2555                                         threshold += mask;
2556                                 }
2557                         }
2558                 } else if (p_start != NULL) {
2559                         pmap_enter_object(map->pmap, start, addr +
2560                             ptoa(tmpidx), p_start, prot);
2561                         p_start = NULL;
2562                 }
2563         }
2564         if (p_start != NULL)
2565                 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
2566                     p_start, prot);
2567         VM_OBJECT_RUNLOCK(object);
2568 }
2569
2570 /*
2571  *      vm_map_protect:
2572  *
2573  *      Sets the protection of the specified address
2574  *      region in the target map.  If "set_max" is
2575  *      specified, the maximum protection is to be set;
2576  *      otherwise, only the current protection is affected.
2577  */
2578 int
2579 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2580                vm_prot_t new_prot, boolean_t set_max)
2581 {
2582         vm_map_entry_t entry, first_entry, in_tran, prev_entry;
2583         vm_object_t obj;
2584         struct ucred *cred;
2585         vm_prot_t old_prot;
2586         int rv;
2587
2588         if (start == end)
2589                 return (KERN_SUCCESS);
2590
2591 again:
2592         in_tran = NULL;
2593         vm_map_lock(map);
2594
2595         /*
2596          * Ensure that we are not concurrently wiring pages.  vm_map_wire() may
2597          * need to fault pages into the map and will drop the map lock while
2598          * doing so, and the VM object may end up in an inconsistent state if we
2599          * update the protection on the map entry in between faults.
2600          */
2601         vm_map_wait_busy(map);
2602
2603         VM_MAP_RANGE_CHECK(map, start, end);
2604
2605         if (!vm_map_lookup_entry(map, start, &first_entry))
2606                 first_entry = vm_map_entry_succ(first_entry);
2607
2608         /*
2609          * Make a first pass to check for protection violations.
2610          */
2611         for (entry = first_entry; entry->start < end;
2612             entry = vm_map_entry_succ(entry)) {
2613                 if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
2614                         continue;
2615                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
2616                         vm_map_unlock(map);
2617                         return (KERN_INVALID_ARGUMENT);
2618                 }
2619                 if ((new_prot & entry->max_protection) != new_prot) {
2620                         vm_map_unlock(map);
2621                         return (KERN_PROTECTION_FAILURE);
2622                 }
2623                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
2624                         in_tran = entry;
2625         }
2626
2627         /*
2628          * Postpone the operation until all in-transition map entries have
2629          * stabilized.  An in-transition entry might already have its pages
2630          * wired and wired_count incremented, but not yet have its
2631          * MAP_ENTRY_USER_WIRED flag set.  In which case, we would fail to call
2632          * vm_fault_copy_entry() in the final loop below.
2633          */
2634         if (in_tran != NULL) {
2635                 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2636                 vm_map_unlock_and_wait(map, 0);
2637                 goto again;
2638         }
2639
2640         /*
2641          * Before changing the protections, try to reserve swap space for any
2642          * private (i.e., copy-on-write) mappings that are transitioning from
2643          * read-only to read/write access.  If a reservation fails, break out
2644          * of this loop early and let the next loop simplify the entries, since
2645          * some may now be mergeable.
2646          */
2647         rv = KERN_SUCCESS;
2648         vm_map_clip_start(map, first_entry, start);
2649         for (entry = first_entry; entry->start < end;
2650             entry = vm_map_entry_succ(entry)) {
2651                 vm_map_clip_end(map, entry, end);
2652
2653                 if (set_max ||
2654                     ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 ||
2655                     ENTRY_CHARGED(entry) ||
2656                     (entry->eflags & MAP_ENTRY_GUARD) != 0) {
2657                         continue;
2658                 }
2659
2660                 cred = curthread->td_ucred;
2661                 obj = entry->object.vm_object;
2662
2663                 if (obj == NULL ||
2664                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) {
2665                         if (!swap_reserve(entry->end - entry->start)) {
2666                                 rv = KERN_RESOURCE_SHORTAGE;
2667                                 end = entry->end;
2668                                 break;
2669                         }
2670                         crhold(cred);
2671                         entry->cred = cred;
2672                         continue;
2673                 }
2674
2675                 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP)
2676                         continue;
2677                 VM_OBJECT_WLOCK(obj);
2678                 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
2679                         VM_OBJECT_WUNLOCK(obj);
2680                         continue;
2681                 }
2682
2683                 /*
2684                  * Charge for the whole object allocation now, since
2685                  * we cannot distinguish between non-charged and
2686                  * charged clipped mapping of the same object later.
2687                  */
2688                 KASSERT(obj->charge == 0,
2689                     ("vm_map_protect: object %p overcharged (entry %p)",
2690                     obj, entry));
2691                 if (!swap_reserve(ptoa(obj->size))) {
2692                         VM_OBJECT_WUNLOCK(obj);
2693                         rv = KERN_RESOURCE_SHORTAGE;
2694                         end = entry->end;
2695                         break;
2696                 }
2697
2698                 crhold(cred);
2699                 obj->cred = cred;
2700                 obj->charge = ptoa(obj->size);
2701                 VM_OBJECT_WUNLOCK(obj);
2702         }
2703
2704         /*
2705          * If enough swap space was available, go back and fix up protections.
2706          * Otherwise, just simplify entries, since some may have been modified.
2707          * [Note that clipping is not necessary the second time.]
2708          */
2709         for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry;
2710             entry->start < end;
2711             vm_map_try_merge_entries(map, prev_entry, entry),
2712             prev_entry = entry, entry = vm_map_entry_succ(entry)) {
2713                 if (rv != KERN_SUCCESS ||
2714                     (entry->eflags & MAP_ENTRY_GUARD) != 0)
2715                         continue;
2716
2717                 old_prot = entry->protection;
2718
2719                 if (set_max)
2720                         entry->protection =
2721                             (entry->max_protection = new_prot) &
2722                             old_prot;
2723                 else
2724                         entry->protection = new_prot;
2725
2726                 /*
2727                  * For user wired map entries, the normal lazy evaluation of
2728                  * write access upgrades through soft page faults is
2729                  * undesirable.  Instead, immediately copy any pages that are
2730                  * copy-on-write and enable write access in the physical map.
2731                  */
2732                 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2733                     (entry->protection & VM_PROT_WRITE) != 0 &&
2734                     (old_prot & VM_PROT_WRITE) == 0)
2735                         vm_fault_copy_entry(map, map, entry, entry, NULL);
2736
2737                 /*
2738                  * When restricting access, update the physical map.  Worry
2739                  * about copy-on-write here.
2740                  */
2741                 if ((old_prot & ~entry->protection) != 0) {
2742 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2743                                                         VM_PROT_ALL)
2744                         pmap_protect(map->pmap, entry->start,
2745                             entry->end,
2746                             entry->protection & MASK(entry));
2747 #undef  MASK
2748                 }
2749         }
2750         vm_map_try_merge_entries(map, prev_entry, entry);
2751         vm_map_unlock(map);
2752         return (rv);
2753 }
2754
2755 /*
2756  *      vm_map_madvise:
2757  *
2758  *      This routine traverses a processes map handling the madvise
2759  *      system call.  Advisories are classified as either those effecting
2760  *      the vm_map_entry structure, or those effecting the underlying
2761  *      objects.
2762  */
2763 int
2764 vm_map_madvise(
2765         vm_map_t map,
2766         vm_offset_t start,
2767         vm_offset_t end,
2768         int behav)
2769 {
2770         vm_map_entry_t entry, prev_entry;
2771         bool modify_map;
2772
2773         /*
2774          * Some madvise calls directly modify the vm_map_entry, in which case
2775          * we need to use an exclusive lock on the map and we need to perform
2776          * various clipping operations.  Otherwise we only need a read-lock
2777          * on the map.
2778          */
2779         switch(behav) {
2780         case MADV_NORMAL:
2781         case MADV_SEQUENTIAL:
2782         case MADV_RANDOM:
2783         case MADV_NOSYNC:
2784         case MADV_AUTOSYNC:
2785         case MADV_NOCORE:
2786         case MADV_CORE:
2787                 if (start == end)
2788                         return (0);
2789                 modify_map = true;
2790                 vm_map_lock(map);
2791                 break;
2792         case MADV_WILLNEED:
2793         case MADV_DONTNEED:
2794         case MADV_FREE:
2795                 if (start == end)
2796                         return (0);
2797                 modify_map = false;
2798                 vm_map_lock_read(map);
2799                 break;
2800         default:
2801                 return (EINVAL);
2802         }
2803
2804         /*
2805          * Locate starting entry and clip if necessary.
2806          */
2807         VM_MAP_RANGE_CHECK(map, start, end);
2808
2809         if (modify_map) {
2810                 /*
2811                  * madvise behaviors that are implemented in the vm_map_entry.
2812                  *
2813                  * We clip the vm_map_entry so that behavioral changes are
2814                  * limited to the specified address range.
2815                  */
2816                 for (entry = vm_map_lookup_clip_start(map, start, &prev_entry);
2817                     entry->start < end;
2818                     prev_entry = entry, entry = vm_map_entry_succ(entry)) {
2819                         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2820                                 continue;
2821
2822                         vm_map_clip_end(map, entry, end);
2823
2824                         switch (behav) {
2825                         case MADV_NORMAL:
2826                                 vm_map_entry_set_behavior(entry,
2827                                     MAP_ENTRY_BEHAV_NORMAL);
2828                                 break;
2829                         case MADV_SEQUENTIAL:
2830                                 vm_map_entry_set_behavior(entry,
2831                                     MAP_ENTRY_BEHAV_SEQUENTIAL);
2832                                 break;
2833                         case MADV_RANDOM:
2834                                 vm_map_entry_set_behavior(entry,
2835                                     MAP_ENTRY_BEHAV_RANDOM);
2836                                 break;
2837                         case MADV_NOSYNC:
2838                                 entry->eflags |= MAP_ENTRY_NOSYNC;
2839                                 break;
2840                         case MADV_AUTOSYNC:
2841                                 entry->eflags &= ~MAP_ENTRY_NOSYNC;
2842                                 break;
2843                         case MADV_NOCORE:
2844                                 entry->eflags |= MAP_ENTRY_NOCOREDUMP;
2845                                 break;
2846                         case MADV_CORE:
2847                                 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2848                                 break;
2849                         default:
2850                                 break;
2851                         }
2852                         vm_map_try_merge_entries(map, prev_entry, entry);
2853                 }
2854                 vm_map_try_merge_entries(map, prev_entry, entry);
2855                 vm_map_unlock(map);
2856         } else {
2857                 vm_pindex_t pstart, pend;
2858
2859                 /*
2860                  * madvise behaviors that are implemented in the underlying
2861                  * vm_object.
2862                  *
2863                  * Since we don't clip the vm_map_entry, we have to clip
2864                  * the vm_object pindex and count.
2865                  */
2866                 if (!vm_map_lookup_entry(map, start, &entry))
2867                         entry = vm_map_entry_succ(entry);
2868                 for (; entry->start < end;
2869                     entry = vm_map_entry_succ(entry)) {
2870                         vm_offset_t useEnd, useStart;
2871
2872                         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2873                                 continue;
2874
2875                         /*
2876                          * MADV_FREE would otherwise rewind time to
2877                          * the creation of the shadow object.  Because
2878                          * we hold the VM map read-locked, neither the
2879                          * entry's object nor the presence of a
2880                          * backing object can change.
2881                          */
2882                         if (behav == MADV_FREE &&
2883                             entry->object.vm_object != NULL &&
2884                             entry->object.vm_object->backing_object != NULL)
2885                                 continue;
2886
2887                         pstart = OFF_TO_IDX(entry->offset);
2888                         pend = pstart + atop(entry->end - entry->start);
2889                         useStart = entry->start;
2890                         useEnd = entry->end;
2891
2892                         if (entry->start < start) {
2893                                 pstart += atop(start - entry->start);
2894                                 useStart = start;
2895                         }
2896                         if (entry->end > end) {
2897                                 pend -= atop(entry->end - end);
2898                                 useEnd = end;
2899                         }
2900
2901                         if (pstart >= pend)
2902                                 continue;
2903
2904                         /*
2905                          * Perform the pmap_advise() before clearing
2906                          * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2907                          * concurrent pmap operation, such as pmap_remove(),
2908                          * could clear a reference in the pmap and set
2909                          * PGA_REFERENCED on the page before the pmap_advise()
2910                          * had completed.  Consequently, the page would appear
2911                          * referenced based upon an old reference that
2912                          * occurred before this pmap_advise() ran.
2913                          */
2914                         if (behav == MADV_DONTNEED || behav == MADV_FREE)
2915                                 pmap_advise(map->pmap, useStart, useEnd,
2916                                     behav);
2917
2918                         vm_object_madvise(entry->object.vm_object, pstart,
2919                             pend, behav);
2920
2921                         /*
2922                          * Pre-populate paging structures in the
2923                          * WILLNEED case.  For wired entries, the
2924                          * paging structures are already populated.
2925                          */
2926                         if (behav == MADV_WILLNEED &&
2927                             entry->wired_count == 0) {
2928                                 vm_map_pmap_enter(map,
2929                                     useStart,
2930                                     entry->protection,
2931                                     entry->object.vm_object,
2932                                     pstart,
2933                                     ptoa(pend - pstart),
2934                                     MAP_PREFAULT_MADVISE
2935                                 );
2936                         }
2937                 }
2938                 vm_map_unlock_read(map);
2939         }
2940         return (0);
2941 }
2942
2943
2944 /*
2945  *      vm_map_inherit:
2946  *
2947  *      Sets the inheritance of the specified address
2948  *      range in the target map.  Inheritance
2949  *      affects how the map will be shared with
2950  *      child maps at the time of vmspace_fork.
2951  */
2952 int
2953 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2954                vm_inherit_t new_inheritance)
2955 {
2956         vm_map_entry_t entry, prev_entry;
2957
2958         switch (new_inheritance) {
2959         case VM_INHERIT_NONE:
2960         case VM_INHERIT_COPY:
2961         case VM_INHERIT_SHARE:
2962         case VM_INHERIT_ZERO:
2963                 break;
2964         default:
2965                 return (KERN_INVALID_ARGUMENT);
2966         }
2967         if (start == end)
2968                 return (KERN_SUCCESS);
2969         vm_map_lock(map);
2970         VM_MAP_RANGE_CHECK(map, start, end);
2971         for (entry = vm_map_lookup_clip_start(map, start, &prev_entry);
2972             entry->start < end;
2973             prev_entry = entry, entry = vm_map_entry_succ(entry)) {
2974                 vm_map_clip_end(map, entry, end);
2975                 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
2976                     new_inheritance != VM_INHERIT_ZERO)
2977                         entry->inheritance = new_inheritance;
2978                 vm_map_try_merge_entries(map, prev_entry, entry);
2979         }
2980         vm_map_try_merge_entries(map, prev_entry, entry);
2981         vm_map_unlock(map);
2982         return (KERN_SUCCESS);
2983 }
2984
2985 /*
2986  *      vm_map_entry_in_transition:
2987  *
2988  *      Release the map lock, and sleep until the entry is no longer in
2989  *      transition.  Awake and acquire the map lock.  If the map changed while
2990  *      another held the lock, lookup a possibly-changed entry at or after the
2991  *      'start' position of the old entry.
2992  */
2993 static vm_map_entry_t
2994 vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start,
2995     vm_offset_t *io_end, bool holes_ok, vm_map_entry_t in_entry)
2996 {
2997         vm_map_entry_t entry;
2998         vm_offset_t start;
2999         u_int last_timestamp;
3000
3001         VM_MAP_ASSERT_LOCKED(map);
3002         KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3003             ("not in-tranition map entry %p", in_entry));
3004         /*
3005          * We have not yet clipped the entry.
3006          */
3007         start = MAX(in_start, in_entry->start);
3008         in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3009         last_timestamp = map->timestamp;
3010         if (vm_map_unlock_and_wait(map, 0)) {
3011                 /*
3012                  * Allow interruption of user wiring/unwiring?
3013                  */
3014         }
3015         vm_map_lock(map);
3016         if (last_timestamp + 1 == map->timestamp)
3017                 return (in_entry);
3018
3019         /*
3020          * Look again for the entry because the map was modified while it was
3021          * unlocked.  Specifically, the entry may have been clipped, merged, or
3022          * deleted.
3023          */
3024         if (!vm_map_lookup_entry(map, start, &entry)) {
3025                 if (!holes_ok) {
3026                         *io_end = start;
3027                         return (NULL);
3028                 }
3029                 entry = vm_map_entry_succ(entry);
3030         }
3031         return (entry);
3032 }
3033
3034 /*
3035  *      vm_map_unwire:
3036  *
3037  *      Implements both kernel and user unwiring.
3038  */
3039 int
3040 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
3041     int flags)
3042 {
3043         vm_map_entry_t entry, first_entry, next_entry, prev_entry;
3044         int rv;
3045         bool holes_ok, need_wakeup, user_unwire;
3046
3047         if (start == end)
3048                 return (KERN_SUCCESS);
3049         holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0;
3050         user_unwire = (flags & VM_MAP_WIRE_USER) != 0;
3051         vm_map_lock(map);
3052         VM_MAP_RANGE_CHECK(map, start, end);
3053         if (!vm_map_lookup_entry(map, start, &first_entry)) {
3054                 if (holes_ok)
3055                         first_entry = vm_map_entry_succ(first_entry);
3056                 else {
3057                         vm_map_unlock(map);
3058                         return (KERN_INVALID_ADDRESS);
3059                 }
3060         }
3061         rv = KERN_SUCCESS;
3062         for (entry = first_entry; entry->start < end; entry = next_entry) {
3063                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3064                         /*
3065                          * We have not yet clipped the entry.
3066                          */
3067                         next_entry = vm_map_entry_in_transition(map, start,
3068                             &end, holes_ok, entry);
3069                         if (next_entry == NULL) {
3070                                 if (entry == first_entry) {
3071                                         vm_map_unlock(map);
3072                                         return (KERN_INVALID_ADDRESS);
3073                                 }
3074                                 rv = KERN_INVALID_ADDRESS;
3075                                 break;
3076                         }
3077                         first_entry = (entry == first_entry) ?
3078                             next_entry : NULL;
3079                         continue;
3080                 }
3081                 vm_map_clip_start(map, entry, start);
3082                 vm_map_clip_end(map, entry, end);
3083                 /*
3084                  * Mark the entry in case the map lock is released.  (See
3085                  * above.)
3086                  */
3087                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3088                     entry->wiring_thread == NULL,
3089                     ("owned map entry %p", entry));
3090                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3091                 entry->wiring_thread = curthread;
3092                 next_entry = vm_map_entry_succ(entry);
3093                 /*
3094                  * Check the map for holes in the specified region.
3095                  * If holes_ok, skip this check.
3096                  */
3097                 if (!holes_ok &&
3098                     entry->end < end && next_entry->start > entry->end) {
3099                         end = entry->end;
3100                         rv = KERN_INVALID_ADDRESS;
3101                         break;
3102                 }
3103                 /*
3104                  * If system unwiring, require that the entry is system wired.
3105                  */
3106                 if (!user_unwire &&
3107                     vm_map_entry_system_wired_count(entry) == 0) {
3108                         end = entry->end;
3109                         rv = KERN_INVALID_ARGUMENT;
3110                         break;
3111                 }
3112         }
3113         need_wakeup = false;
3114         if (first_entry == NULL &&
3115             !vm_map_lookup_entry(map, start, &first_entry)) {
3116                 KASSERT(holes_ok, ("vm_map_unwire: lookup failed"));
3117                 prev_entry = first_entry;
3118                 entry = vm_map_entry_succ(first_entry);
3119         } else {
3120                 prev_entry = vm_map_entry_pred(first_entry);
3121                 entry = first_entry;
3122         }
3123         for (; entry->start < end;
3124             prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3125                 /*
3126                  * If holes_ok was specified, an empty
3127                  * space in the unwired region could have been mapped
3128                  * while the map lock was dropped for draining
3129                  * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
3130                  * could be simultaneously wiring this new mapping
3131                  * entry.  Detect these cases and skip any entries
3132                  * marked as in transition by us.
3133                  */
3134                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3135                     entry->wiring_thread != curthread) {
3136                         KASSERT(holes_ok,
3137                             ("vm_map_unwire: !HOLESOK and new/changed entry"));
3138                         continue;
3139                 }
3140
3141                 if (rv == KERN_SUCCESS && (!user_unwire ||
3142                     (entry->eflags & MAP_ENTRY_USER_WIRED))) {
3143                         if (entry->wired_count == 1)
3144                                 vm_map_entry_unwire(map, entry);
3145                         else
3146                                 entry->wired_count--;
3147                         if (user_unwire)
3148                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3149                 }
3150                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3151                     ("vm_map_unwire: in-transition flag missing %p", entry));
3152                 KASSERT(entry->wiring_thread == curthread,
3153                     ("vm_map_unwire: alien wire %p", entry));
3154                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
3155                 entry->wiring_thread = NULL;
3156                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3157                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3158                         need_wakeup = true;
3159                 }
3160                 vm_map_try_merge_entries(map, prev_entry, entry);
3161         }
3162         vm_map_try_merge_entries(map, prev_entry, entry);
3163         vm_map_unlock(map);
3164         if (need_wakeup)
3165                 vm_map_wakeup(map);
3166         return (rv);
3167 }
3168
3169 static void
3170 vm_map_wire_user_count_sub(u_long npages)
3171 {
3172
3173         atomic_subtract_long(&vm_user_wire_count, npages);
3174 }
3175
3176 static bool
3177 vm_map_wire_user_count_add(u_long npages)
3178 {
3179         u_long wired;
3180
3181         wired = vm_user_wire_count;
3182         do {
3183                 if (npages + wired > vm_page_max_user_wired)
3184                         return (false);
3185         } while (!atomic_fcmpset_long(&vm_user_wire_count, &wired,
3186             npages + wired));
3187
3188         return (true);
3189 }
3190
3191 /*
3192  *      vm_map_wire_entry_failure:
3193  *
3194  *      Handle a wiring failure on the given entry.
3195  *
3196  *      The map should be locked.
3197  */
3198 static void
3199 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
3200     vm_offset_t failed_addr)
3201 {
3202
3203         VM_MAP_ASSERT_LOCKED(map);
3204         KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
3205             entry->wired_count == 1,
3206             ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
3207         KASSERT(failed_addr < entry->end,
3208             ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
3209
3210         /*
3211          * If any pages at the start of this entry were successfully wired,
3212          * then unwire them.
3213          */
3214         if (failed_addr > entry->start) {
3215                 pmap_unwire(map->pmap, entry->start, failed_addr);
3216                 vm_object_unwire(entry->object.vm_object, entry->offset,
3217                     failed_addr - entry->start, PQ_ACTIVE);
3218         }
3219
3220         /*
3221          * Assign an out-of-range value to represent the failure to wire this
3222          * entry.
3223          */
3224         entry->wired_count = -1;
3225 }
3226
3227 int
3228 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3229 {
3230         int rv;
3231
3232         vm_map_lock(map);
3233         rv = vm_map_wire_locked(map, start, end, flags);
3234         vm_map_unlock(map);
3235         return (rv);
3236 }
3237
3238
3239 /*
3240  *      vm_map_wire_locked:
3241  *
3242  *      Implements both kernel and user wiring.  Returns with the map locked,
3243  *      the map lock may be dropped.
3244  */
3245 int
3246 vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3247 {
3248         vm_map_entry_t entry, first_entry, next_entry, prev_entry;
3249         vm_offset_t faddr, saved_end, saved_start;
3250         u_long npages;
3251         u_int last_timestamp;
3252         int rv;
3253         bool holes_ok, need_wakeup, user_wire;
3254         vm_prot_t prot;
3255
3256         VM_MAP_ASSERT_LOCKED(map);
3257
3258         if (start == end)
3259                 return (KERN_SUCCESS);
3260         prot = 0;
3261         if (flags & VM_MAP_WIRE_WRITE)
3262                 prot |= VM_PROT_WRITE;
3263         holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0;
3264         user_wire = (flags & VM_MAP_WIRE_USER) != 0;
3265         VM_MAP_RANGE_CHECK(map, start, end);
3266         if (!vm_map_lookup_entry(map, start, &first_entry)) {
3267                 if (holes_ok)
3268                         first_entry = vm_map_entry_succ(first_entry);
3269                 else
3270                         return (KERN_INVALID_ADDRESS);
3271         }
3272         for (entry = first_entry; entry->start < end; entry = next_entry) {
3273                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3274                         /*
3275                          * We have not yet clipped the entry.
3276                          */
3277                         next_entry = vm_map_entry_in_transition(map, start,
3278                             &end, holes_ok, entry);
3279                         if (next_entry == NULL) {
3280                                 if (entry == first_entry)
3281                                         return (KERN_INVALID_ADDRESS);
3282                                 rv = KERN_INVALID_ADDRESS;
3283                                 goto done;
3284                         }
3285                         first_entry = (entry == first_entry) ?
3286                             next_entry : NULL;
3287                         continue;
3288                 }
3289                 vm_map_clip_start(map, entry, start);
3290                 vm_map_clip_end(map, entry, end);
3291                 /*
3292                  * Mark the entry in case the map lock is released.  (See
3293                  * above.)
3294                  */
3295                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3296                     entry->wiring_thread == NULL,
3297                     ("owned map entry %p", entry));
3298                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3299                 entry->wiring_thread = curthread;
3300                 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
3301                     || (entry->protection & prot) != prot) {
3302                         entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
3303                         if (!holes_ok) {
3304                                 end = entry->end;
3305                                 rv = KERN_INVALID_ADDRESS;
3306                                 goto done;
3307                         }
3308                 } else if (entry->wired_count == 0) {
3309                         entry->wired_count++;
3310
3311                         npages = atop(entry->end - entry->start);
3312                         if (user_wire && !vm_map_wire_user_count_add(npages)) {
3313                                 vm_map_wire_entry_failure(map, entry,
3314                                     entry->start);
3315                                 end = entry->end;
3316                                 rv = KERN_RESOURCE_SHORTAGE;
3317                                 goto done;
3318                         }
3319
3320                         /*
3321                          * Release the map lock, relying on the in-transition
3322                          * mark.  Mark the map busy for fork.
3323                          */
3324                         saved_start = entry->start;
3325                         saved_end = entry->end;
3326                         last_timestamp = map->timestamp;
3327                         vm_map_busy(map);
3328                         vm_map_unlock(map);
3329
3330                         faddr = saved_start;
3331                         do {
3332                                 /*
3333                                  * Simulate a fault to get the page and enter
3334                                  * it into the physical map.
3335                                  */
3336                                 if ((rv = vm_fault(map, faddr,
3337                                     VM_PROT_NONE, VM_FAULT_WIRE, NULL)) !=
3338                                     KERN_SUCCESS)
3339                                         break;
3340                         } while ((faddr += PAGE_SIZE) < saved_end);
3341                         vm_map_lock(map);
3342                         vm_map_unbusy(map);
3343                         if (last_timestamp + 1 != map->timestamp) {
3344                                 /*
3345                                  * Look again for the entry because the map was
3346                                  * modified while it was unlocked.  The entry
3347                                  * may have been clipped, but NOT merged or
3348                                  * deleted.
3349                                  */
3350                                 if (!vm_map_lookup_entry(map, saved_start,
3351                                     &next_entry))
3352                                         KASSERT(false,
3353                                             ("vm_map_wire: lookup failed"));
3354                                 first_entry = (entry == first_entry) ?
3355                                     next_entry : NULL;
3356                                 for (entry = next_entry; entry->end < saved_end;
3357                                     entry = vm_map_entry_succ(entry)) {
3358                                         /*
3359                                          * In case of failure, handle entries
3360                                          * that were not fully wired here;
3361                                          * fully wired entries are handled
3362                                          * later.
3363                                          */
3364                                         if (rv != KERN_SUCCESS &&
3365                                             faddr < entry->end)
3366                                                 vm_map_wire_entry_failure(map,
3367                                                     entry, faddr);
3368                                 }
3369                         }
3370                         if (rv != KERN_SUCCESS) {
3371                                 vm_map_wire_entry_failure(map, entry, faddr);
3372                                 if (user_wire)
3373                                         vm_map_wire_user_count_sub(npages);
3374                                 end = entry->end;
3375                                 goto done;
3376                         }
3377                 } else if (!user_wire ||
3378                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3379                         entry->wired_count++;
3380                 }
3381                 /*
3382                  * Check the map for holes in the specified region.
3383                  * If holes_ok was specified, skip this check.
3384                  */
3385                 next_entry = vm_map_entry_succ(entry);
3386                 if (!holes_ok &&
3387                     entry->end < end && next_entry->start > entry->end) {
3388                         end = entry->end;
3389                         rv = KERN_INVALID_ADDRESS;
3390                         goto done;
3391                 }
3392         }
3393         rv = KERN_SUCCESS;
3394 done:
3395         need_wakeup = false;
3396         if (first_entry == NULL &&
3397             !vm_map_lookup_entry(map, start, &first_entry)) {
3398                 KASSERT(holes_ok, ("vm_map_wire: lookup failed"));
3399                 prev_entry = first_entry;
3400                 entry = vm_map_entry_succ(first_entry);
3401         } else {
3402                 prev_entry = vm_map_entry_pred(first_entry);
3403                 entry = first_entry;
3404         }
3405         for (; entry->start < end;
3406             prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3407                 /*
3408                  * If holes_ok was specified, an empty
3409                  * space in the unwired region could have been mapped
3410                  * while the map lock was dropped for faulting in the
3411                  * pages or draining MAP_ENTRY_IN_TRANSITION.
3412                  * Moreover, another thread could be simultaneously
3413                  * wiring this new mapping entry.  Detect these cases
3414                  * and skip any entries marked as in transition not by us.
3415                  */
3416                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3417                     entry->wiring_thread != curthread) {
3418                         KASSERT(holes_ok,
3419                             ("vm_map_wire: !HOLESOK and new/changed entry"));
3420                         continue;
3421                 }
3422
3423                 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) {
3424                         /* do nothing */
3425                 } else if (rv == KERN_SUCCESS) {
3426                         if (user_wire)
3427                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
3428                 } else if (entry->wired_count == -1) {
3429                         /*
3430                          * Wiring failed on this entry.  Thus, unwiring is
3431                          * unnecessary.
3432                          */
3433                         entry->wired_count = 0;
3434                 } else if (!user_wire ||
3435                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3436                         /*
3437                          * Undo the wiring.  Wiring succeeded on this entry
3438                          * but failed on a later entry.  
3439                          */
3440                         if (entry->wired_count == 1) {
3441                                 vm_map_entry_unwire(map, entry);
3442                                 if (user_wire)
3443                                         vm_map_wire_user_count_sub(
3444                                             atop(entry->end - entry->start));
3445                         } else
3446                                 entry->wired_count--;
3447                 }
3448                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3449                     ("vm_map_wire: in-transition flag missing %p", entry));
3450                 KASSERT(entry->wiring_thread == curthread,
3451                     ("vm_map_wire: alien wire %p", entry));
3452                 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
3453                     MAP_ENTRY_WIRE_SKIPPED);
3454                 entry->wiring_thread = NULL;
3455                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3456                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3457                         need_wakeup = true;
3458                 }
3459                 vm_map_try_merge_entries(map, prev_entry, entry);
3460         }
3461         vm_map_try_merge_entries(map, prev_entry, entry);
3462         if (need_wakeup)
3463                 vm_map_wakeup(map);
3464         return (rv);
3465 }
3466
3467 /*
3468  * vm_map_sync
3469  *
3470  * Push any dirty cached pages in the address range to their pager.
3471  * If syncio is TRUE, dirty pages are written synchronously.
3472  * If invalidate is TRUE, any cached pages are freed as well.
3473  *
3474  * If the size of the region from start to end is zero, we are
3475  * supposed to flush all modified pages within the region containing
3476  * start.  Unfortunately, a region can be split or coalesced with
3477  * neighboring regions, making it difficult to determine what the
3478  * original region was.  Therefore, we approximate this requirement by
3479  * flushing the current region containing start.
3480  *
3481  * Returns an error if any part of the specified range is not mapped.
3482  */
3483 int
3484 vm_map_sync(
3485         vm_map_t map,
3486         vm_offset_t start,
3487         vm_offset_t end,
3488         boolean_t syncio,
3489         boolean_t invalidate)
3490 {
3491         vm_map_entry_t entry, first_entry, next_entry;
3492         vm_size_t size;
3493         vm_object_t object;
3494         vm_ooffset_t offset;
3495         unsigned int last_timestamp;
3496         boolean_t failed;
3497
3498         vm_map_lock_read(map);
3499         VM_MAP_RANGE_CHECK(map, start, end);
3500         if (!vm_map_lookup_entry(map, start, &first_entry)) {
3501                 vm_map_unlock_read(map);
3502                 return (KERN_INVALID_ADDRESS);
3503         } else if (start == end) {
3504                 start = first_entry->start;
3505                 end = first_entry->end;
3506         }
3507         /*
3508          * Make a first pass to check for user-wired memory and holes.
3509          */
3510         for (entry = first_entry; entry->start < end; entry = next_entry) {
3511                 if (invalidate &&
3512                     (entry->eflags & MAP_ENTRY_USER_WIRED) != 0) {
3513                         vm_map_unlock_read(map);
3514                         return (KERN_INVALID_ARGUMENT);
3515                 }
3516                 next_entry = vm_map_entry_succ(entry);
3517                 if (end > entry->end &&
3518                     entry->end != next_entry->start) {
3519                         vm_map_unlock_read(map);
3520                         return (KERN_INVALID_ADDRESS);
3521                 }
3522         }
3523
3524         if (invalidate)
3525                 pmap_remove(map->pmap, start, end);
3526         failed = FALSE;
3527
3528         /*
3529          * Make a second pass, cleaning/uncaching pages from the indicated
3530          * objects as we go.
3531          */
3532         for (entry = first_entry; entry->start < end;) {
3533                 offset = entry->offset + (start - entry->start);
3534                 size = (end <= entry->end ? end : entry->end) - start;
3535                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
3536                         vm_map_t smap;
3537                         vm_map_entry_t tentry;
3538                         vm_size_t tsize;
3539
3540                         smap = entry->object.sub_map;
3541                         vm_map_lock_read(smap);
3542                         (void) vm_map_lookup_entry(smap, offset, &tentry);
3543                         tsize = tentry->end - offset;
3544                         if (tsize < size)
3545                                 size = tsize;
3546                         object = tentry->object.vm_object;
3547                         offset = tentry->offset + (offset - tentry->start);
3548                         vm_map_unlock_read(smap);
3549                 } else {
3550                         object = entry->object.vm_object;
3551                 }
3552                 vm_object_reference(object);
3553                 last_timestamp = map->timestamp;
3554                 vm_map_unlock_read(map);
3555                 if (!vm_object_sync(object, offset, size, syncio, invalidate))
3556                         failed = TRUE;
3557                 start += size;
3558                 vm_object_deallocate(object);
3559                 vm_map_lock_read(map);
3560                 if (last_timestamp == map->timestamp ||
3561                     !vm_map_lookup_entry(map, start, &entry))
3562                         entry = vm_map_entry_succ(entry);
3563         }
3564
3565         vm_map_unlock_read(map);
3566         return (failed ? KERN_FAILURE : KERN_SUCCESS);
3567 }
3568
3569 /*
3570  *      vm_map_entry_unwire:    [ internal use only ]
3571  *
3572  *      Make the region specified by this entry pageable.
3573  *
3574  *      The map in question should be locked.
3575  *      [This is the reason for this routine's existence.]
3576  */
3577 static void
3578 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3579 {
3580         vm_size_t size;
3581
3582         VM_MAP_ASSERT_LOCKED(map);
3583         KASSERT(entry->wired_count > 0,
3584             ("vm_map_entry_unwire: entry %p isn't wired", entry));
3585
3586         size = entry->end - entry->start;
3587         if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0)
3588                 vm_map_wire_user_count_sub(atop(size));
3589         pmap_unwire(map->pmap, entry->start, entry->end);
3590         vm_object_unwire(entry->object.vm_object, entry->offset, size,
3591             PQ_ACTIVE);
3592         entry->wired_count = 0;
3593 }
3594
3595 static void
3596 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
3597 {
3598
3599         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
3600                 vm_object_deallocate(entry->object.vm_object);
3601         uma_zfree(system_map ? kmapentzone : mapentzone, entry);
3602 }
3603
3604 /*
3605  *      vm_map_entry_delete:    [ internal use only ]
3606  *
3607  *      Deallocate the given entry from the target map.
3608  */
3609 static void
3610 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
3611 {
3612         vm_object_t object;
3613         vm_pindex_t offidxstart, offidxend, size1;
3614         vm_size_t size;
3615
3616         vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE);
3617         object = entry->object.vm_object;
3618
3619         if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
3620                 MPASS(entry->cred == NULL);
3621                 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
3622                 MPASS(object == NULL);
3623                 vm_map_entry_deallocate(entry, map->system_map);
3624                 return;
3625         }
3626
3627         size = entry->end - entry->start;
3628         map->size -= size;
3629
3630         if (entry->cred != NULL) {
3631                 swap_release_by_cred(size, entry->cred);
3632                 crfree(entry->cred);
3633         }
3634
3635         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) {
3636                 entry->object.vm_object = NULL;
3637         } else if ((object->flags & OBJ_ANON) != 0 ||
3638             object == kernel_object) {
3639                 KASSERT(entry->cred == NULL || object->cred == NULL ||
3640                     (entry->eflags & MAP_ENTRY_NEEDS_COPY),
3641                     ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
3642                 offidxstart = OFF_TO_IDX(entry->offset);
3643                 offidxend = offidxstart + atop(size);
3644                 VM_OBJECT_WLOCK(object);
3645                 if (object->ref_count != 1 &&
3646                     ((object->flags & OBJ_ONEMAPPING) != 0 ||
3647                     object == kernel_object)) {
3648                         vm_object_collapse(object);
3649
3650                         /*
3651                          * The option OBJPR_NOTMAPPED can be passed here
3652                          * because vm_map_delete() already performed
3653                          * pmap_remove() on the only mapping to this range
3654                          * of pages. 
3655                          */
3656                         vm_object_page_remove(object, offidxstart, offidxend,
3657                             OBJPR_NOTMAPPED);
3658                         if (offidxend >= object->size &&
3659                             offidxstart < object->size) {
3660                                 size1 = object->size;
3661                                 object->size = offidxstart;
3662                                 if (object->cred != NULL) {
3663                                         size1 -= object->size;
3664                                         KASSERT(object->charge >= ptoa(size1),
3665                                             ("object %p charge < 0", object));
3666                                         swap_release_by_cred(ptoa(size1),
3667                                             object->cred);
3668                                         object->charge -= ptoa(size1);
3669                                 }
3670                         }
3671                 }
3672                 VM_OBJECT_WUNLOCK(object);
3673         }
3674         if (map->system_map)
3675                 vm_map_entry_deallocate(entry, TRUE);
3676         else {
3677                 entry->defer_next = curthread->td_map_def_user;
3678                 curthread->td_map_def_user = entry;
3679         }
3680 }
3681
3682 /*
3683  *      vm_map_delete:  [ internal use only ]
3684  *
3685  *      Deallocates the given address range from the target
3686  *      map.
3687  */
3688 int
3689 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
3690 {
3691         vm_map_entry_t entry, next_entry;
3692
3693         VM_MAP_ASSERT_LOCKED(map);
3694
3695         if (start == end)
3696                 return (KERN_SUCCESS);
3697
3698         /*
3699          * Find the start of the region, and clip it.
3700          * Step through all entries in this region.
3701          */
3702         for (entry = vm_map_lookup_clip_start(map, start, &entry);
3703             entry->start < end; entry = next_entry) {
3704                 /*
3705                  * Wait for wiring or unwiring of an entry to complete.
3706                  * Also wait for any system wirings to disappear on
3707                  * user maps.
3708                  */
3709                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
3710                     (vm_map_pmap(map) != kernel_pmap &&
3711                     vm_map_entry_system_wired_count(entry) != 0)) {
3712                         unsigned int last_timestamp;
3713                         vm_offset_t saved_start;
3714
3715                         saved_start = entry->start;
3716                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3717                         last_timestamp = map->timestamp;
3718                         (void) vm_map_unlock_and_wait(map, 0);
3719                         vm_map_lock(map);
3720                         if (last_timestamp + 1 != map->timestamp) {
3721                                 /*
3722                                  * Look again for the entry because the map was
3723                                  * modified while it was unlocked.
3724                                  * Specifically, the entry may have been
3725                                  * clipped, merged, or deleted.
3726                                  */
3727                                 next_entry = vm_map_lookup_clip_start(map,
3728                                     saved_start, &next_entry);
3729                         } else
3730                                 next_entry = entry;
3731                         continue;
3732                 }
3733                 vm_map_clip_end(map, entry, end);
3734                 next_entry = vm_map_entry_succ(entry);
3735
3736                 /*
3737                  * Unwire before removing addresses from the pmap; otherwise,
3738                  * unwiring will put the entries back in the pmap.
3739                  */
3740                 if (entry->wired_count != 0)
3741                         vm_map_entry_unwire(map, entry);
3742
3743                 /*
3744                  * Remove mappings for the pages, but only if the
3745                  * mappings could exist.  For instance, it does not
3746                  * make sense to call pmap_remove() for guard entries.
3747                  */
3748                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
3749                     entry->object.vm_object != NULL)
3750                         pmap_remove(map->pmap, entry->start, entry->end);
3751
3752                 if (entry->end == map->anon_loc)
3753                         map->anon_loc = entry->start;
3754
3755                 /*
3756                  * Delete the entry only after removing all pmap
3757                  * entries pointing to its pages.  (Otherwise, its
3758                  * page frames may be reallocated, and any modify bits
3759                  * will be set in the wrong object!)
3760                  */
3761                 vm_map_entry_delete(map, entry);
3762         }
3763         return (KERN_SUCCESS);
3764 }
3765
3766 /*
3767  *      vm_map_remove:
3768  *
3769  *      Remove the given address range from the target map.
3770  *      This is the exported form of vm_map_delete.
3771  */
3772 int
3773 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3774 {
3775         int result;
3776
3777         vm_map_lock(map);
3778         VM_MAP_RANGE_CHECK(map, start, end);
3779         result = vm_map_delete(map, start, end);
3780         vm_map_unlock(map);
3781         return (result);
3782 }
3783
3784 /*
3785  *      vm_map_check_protection:
3786  *
3787  *      Assert that the target map allows the specified privilege on the
3788  *      entire address region given.  The entire region must be allocated.
3789  *
3790  *      WARNING!  This code does not and should not check whether the
3791  *      contents of the region is accessible.  For example a smaller file
3792  *      might be mapped into a larger address space.
3793  *
3794  *      NOTE!  This code is also called by munmap().
3795  *
3796  *      The map must be locked.  A read lock is sufficient.
3797  */
3798 boolean_t
3799 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3800                         vm_prot_t protection)
3801 {
3802         vm_map_entry_t entry;
3803         vm_map_entry_t tmp_entry;
3804
3805         if (!vm_map_lookup_entry(map, start, &tmp_entry))
3806                 return (FALSE);
3807         entry = tmp_entry;
3808
3809         while (start < end) {
3810                 /*
3811                  * No holes allowed!
3812                  */
3813                 if (start < entry->start)
3814                         return (FALSE);
3815                 /*
3816                  * Check protection associated with entry.
3817                  */
3818                 if ((entry->protection & protection) != protection)
3819                         return (FALSE);
3820                 /* go to next entry */
3821                 start = entry->end;
3822                 entry = vm_map_entry_succ(entry);
3823         }
3824         return (TRUE);
3825 }
3826
3827
3828 /*
3829  *
3830  *      vm_map_copy_swap_object:
3831  *
3832  *      Copies a swap-backed object from an existing map entry to a
3833  *      new one.  Carries forward the swap charge.  May change the
3834  *      src object on return.
3835  */
3836 static void
3837 vm_map_copy_swap_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry,
3838     vm_offset_t size, vm_ooffset_t *fork_charge)
3839 {
3840         vm_object_t src_object;
3841         struct ucred *cred;
3842         int charged;
3843
3844         src_object = src_entry->object.vm_object;
3845         charged = ENTRY_CHARGED(src_entry);
3846         if ((src_object->flags & OBJ_ANON) != 0) {
3847                 VM_OBJECT_WLOCK(src_object);
3848                 vm_object_collapse(src_object);
3849                 if ((src_object->flags & OBJ_ONEMAPPING) != 0) {
3850                         vm_object_split(src_entry);
3851                         src_object = src_entry->object.vm_object;
3852                 }
3853                 vm_object_reference_locked(src_object);
3854                 vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3855                 VM_OBJECT_WUNLOCK(src_object);
3856         } else
3857                 vm_object_reference(src_object);
3858         if (src_entry->cred != NULL &&
3859             !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3860                 KASSERT(src_object->cred == NULL,
3861                     ("OVERCOMMIT: vm_map_copy_anon_entry: cred %p",
3862                      src_object));
3863                 src_object->cred = src_entry->cred;
3864                 src_object->charge = size;
3865         }
3866         dst_entry->object.vm_object = src_object;
3867         if (charged) {
3868                 cred = curthread->td_ucred;
3869                 crhold(cred);
3870                 dst_entry->cred = cred;
3871                 *fork_charge += size;
3872                 if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3873                         crhold(cred);
3874                         src_entry->cred = cred;
3875                         *fork_charge += size;
3876                 }
3877         }
3878 }
3879
3880 /*
3881  *      vm_map_copy_entry:
3882  *
3883  *      Copies the contents of the source entry to the destination
3884  *      entry.  The entries *must* be aligned properly.
3885  */
3886 static void
3887 vm_map_copy_entry(
3888         vm_map_t src_map,
3889         vm_map_t dst_map,
3890         vm_map_entry_t src_entry,
3891         vm_map_entry_t dst_entry,
3892         vm_ooffset_t *fork_charge)
3893 {
3894         vm_object_t src_object;
3895         vm_map_entry_t fake_entry;
3896         vm_offset_t size;
3897
3898         VM_MAP_ASSERT_LOCKED(dst_map);
3899
3900         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3901                 return;
3902
3903         if (src_entry->wired_count == 0 ||
3904             (src_entry->protection & VM_PROT_WRITE) == 0) {
3905                 /*
3906                  * If the source entry is marked needs_copy, it is already
3907                  * write-protected.
3908                  */
3909                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3910                     (src_entry->protection & VM_PROT_WRITE) != 0) {
3911                         pmap_protect(src_map->pmap,
3912                             src_entry->start,
3913                             src_entry->end,
3914                             src_entry->protection & ~VM_PROT_WRITE);
3915                 }
3916
3917                 /*
3918                  * Make a copy of the object.
3919                  */
3920                 size = src_entry->end - src_entry->start;
3921                 if ((src_object = src_entry->object.vm_object) != NULL) {
3922                         if (src_object->type == OBJT_DEFAULT ||
3923                             src_object->type == OBJT_SWAP) {
3924                                 vm_map_copy_swap_object(src_entry, dst_entry,
3925                                     size, fork_charge);
3926                                 /* May have split/collapsed, reload obj. */
3927                                 src_object = src_entry->object.vm_object;
3928                         } else {
3929                                 vm_object_reference(src_object);
3930                                 dst_entry->object.vm_object = src_object;
3931                         }
3932                         src_entry->eflags |= MAP_ENTRY_COW |
3933                             MAP_ENTRY_NEEDS_COPY;
3934                         dst_entry->eflags |= MAP_ENTRY_COW |
3935                             MAP_ENTRY_NEEDS_COPY;
3936                         dst_entry->offset = src_entry->offset;
3937                         if (src_entry->eflags & MAP_ENTRY_WRITECNT) {
3938                                 /*
3939                                  * MAP_ENTRY_WRITECNT cannot
3940                                  * indicate write reference from
3941                                  * src_entry, since the entry is
3942                                  * marked as needs copy.  Allocate a
3943                                  * fake entry that is used to
3944                                  * decrement object->un_pager writecount
3945                                  * at the appropriate time.  Attach
3946                                  * fake_entry to the deferred list.
3947                                  */
3948                                 fake_entry = vm_map_entry_create(dst_map);
3949                                 fake_entry->eflags = MAP_ENTRY_WRITECNT;
3950                                 src_entry->eflags &= ~MAP_ENTRY_WRITECNT;
3951                                 vm_object_reference(src_object);
3952                                 fake_entry->object.vm_object = src_object;
3953                                 fake_entry->start = src_entry->start;
3954                                 fake_entry->end = src_entry->end;
3955                                 fake_entry->defer_next =
3956                                     curthread->td_map_def_user;
3957                                 curthread->td_map_def_user = fake_entry;
3958                         }
3959
3960                         pmap_copy(dst_map->pmap, src_map->pmap,
3961                             dst_entry->start, dst_entry->end - dst_entry->start,
3962                             src_entry->start);
3963                 } else {
3964                         dst_entry->object.vm_object = NULL;
3965                         dst_entry->offset = 0;
3966                         if (src_entry->cred != NULL) {
3967                                 dst_entry->cred = curthread->td_ucred;
3968                                 crhold(dst_entry->cred);
3969                                 *fork_charge += size;
3970                         }
3971                 }
3972         } else {
3973                 /*
3974                  * We don't want to make writeable wired pages copy-on-write.
3975                  * Immediately copy these pages into the new map by simulating
3976                  * page faults.  The new pages are pageable.
3977                  */
3978                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3979                     fork_charge);
3980         }
3981 }
3982
3983 /*
3984  * vmspace_map_entry_forked:
3985  * Update the newly-forked vmspace each time a map entry is inherited
3986  * or copied.  The values for vm_dsize and vm_tsize are approximate
3987  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3988  */
3989 static void
3990 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3991     vm_map_entry_t entry)
3992 {
3993         vm_size_t entrysize;
3994         vm_offset_t newend;
3995
3996         if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
3997                 return;
3998         entrysize = entry->end - entry->start;
3999         vm2->vm_map.size += entrysize;
4000         if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
4001                 vm2->vm_ssize += btoc(entrysize);
4002         } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
4003             entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
4004                 newend = MIN(entry->end,
4005                     (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
4006                 vm2->vm_dsize += btoc(newend - entry->start);
4007         } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
4008             entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
4009                 newend = MIN(entry->end,
4010                     (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
4011                 vm2->vm_tsize += btoc(newend - entry->start);
4012         }
4013 }
4014
4015 /*
4016  * vmspace_fork:
4017  * Create a new process vmspace structure and vm_map
4018  * based on those of an existing process.  The new map
4019  * is based on the old map, according to the inheritance
4020  * values on the regions in that map.
4021  *
4022  * XXX It might be worth coalescing the entries added to the new vmspace.
4023  *
4024  * The source map must not be locked.
4025  */
4026 struct vmspace *
4027 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
4028 {
4029         struct vmspace *vm2;
4030         vm_map_t new_map, old_map;
4031         vm_map_entry_t new_entry, old_entry;
4032         vm_object_t object;
4033         int error, locked;
4034         vm_inherit_t inh;
4035
4036         old_map = &vm1->vm_map;
4037         /* Copy immutable fields of vm1 to vm2. */
4038         vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
4039             pmap_pinit);
4040         if (vm2 == NULL)
4041                 return (NULL);
4042
4043         vm2->vm_taddr = vm1->vm_taddr;
4044         vm2->vm_daddr = vm1->vm_daddr;
4045         vm2->vm_maxsaddr = vm1->vm_maxsaddr;
4046         vm_map_lock(old_map);
4047         if (old_map->busy)
4048                 vm_map_wait_busy(old_map);
4049         new_map = &vm2->vm_map;
4050         locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
4051         KASSERT(locked, ("vmspace_fork: lock failed"));
4052
4053         error = pmap_vmspace_copy(new_map->pmap, old_map->pmap);
4054         if (error != 0) {
4055                 sx_xunlock(&old_map->lock);
4056                 sx_xunlock(&new_map->lock);
4057                 vm_map_process_deferred();
4058                 vmspace_free(vm2);
4059                 return (NULL);
4060         }
4061
4062         new_map->anon_loc = old_map->anon_loc;
4063         new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART);
4064
4065         VM_MAP_ENTRY_FOREACH(old_entry, old_map) {
4066                 if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
4067                         panic("vm_map_fork: encountered a submap");
4068
4069                 inh = old_entry->inheritance;
4070                 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4071                     inh != VM_INHERIT_NONE)
4072                         inh = VM_INHERIT_COPY;
4073
4074                 switch (inh) {
4075                 case VM_INHERIT_NONE:
4076                         break;
4077
4078                 case VM_INHERIT_SHARE:
4079                         /*
4080                          * Clone the entry, creating the shared object if
4081                          * necessary.
4082                          */
4083                         object = old_entry->object.vm_object;
4084                         if (object == NULL) {
4085                                 vm_map_entry_back(old_entry);
4086                                 object = old_entry->object.vm_object;
4087                         }
4088
4089                         /*
4090                          * Add the reference before calling vm_object_shadow
4091                          * to insure that a shadow object is created.
4092                          */
4093                         vm_object_reference(object);
4094                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4095                                 vm_object_shadow(&old_entry->object.vm_object,
4096                                     &old_entry->offset,
4097                                     old_entry->end - old_entry->start,
4098                                     old_entry->cred,
4099                                     /* Transfer the second reference too. */
4100                                     true);
4101                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4102                                 old_entry->cred = NULL;
4103
4104                                 /*
4105                                  * As in vm_map_merged_neighbor_dispose(),
4106                                  * the vnode lock will not be acquired in
4107                                  * this call to vm_object_deallocate().
4108                                  */
4109                                 vm_object_deallocate(object);
4110                                 object = old_entry->object.vm_object;
4111                         } else {
4112                                 VM_OBJECT_WLOCK(object);
4113                                 vm_object_clear_flag(object, OBJ_ONEMAPPING);
4114                                 if (old_entry->cred != NULL) {
4115                                         KASSERT(object->cred == NULL,
4116                                             ("vmspace_fork both cred"));
4117                                         object->cred = old_entry->cred;
4118                                         object->charge = old_entry->end -
4119                                             old_entry->start;
4120                                         old_entry->cred = NULL;
4121                                 }
4122
4123                                 /*
4124                                  * Assert the correct state of the vnode
4125                                  * v_writecount while the object is locked, to
4126                                  * not relock it later for the assertion
4127                                  * correctness.
4128                                  */
4129                                 if (old_entry->eflags & MAP_ENTRY_WRITECNT &&
4130                                     object->type == OBJT_VNODE) {
4131                                         KASSERT(((struct vnode *)object->
4132                                             handle)->v_writecount > 0,
4133                                             ("vmspace_fork: v_writecount %p",
4134                                             object));
4135                                         KASSERT(object->un_pager.vnp.
4136                                             writemappings > 0,
4137                                             ("vmspace_fork: vnp.writecount %p",
4138                                             object));
4139                                 }
4140                                 VM_OBJECT_WUNLOCK(object);
4141                         }
4142
4143                         /*
4144                          * Clone the entry, referencing the shared object.
4145                          */
4146                         new_entry = vm_map_entry_create(new_map);
4147                         *new_entry = *old_entry;
4148                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4149                             MAP_ENTRY_IN_TRANSITION);
4150                         new_entry->wiring_thread = NULL;
4151                         new_entry->wired_count = 0;
4152                         if (new_entry->eflags & MAP_ENTRY_WRITECNT) {
4153                                 vm_pager_update_writecount(object,
4154                                     new_entry->start, new_entry->end);
4155                         }
4156                         vm_map_entry_set_vnode_text(new_entry, true);
4157
4158                         /*
4159                          * Insert the entry into the new map -- we know we're
4160                          * inserting at the end of the new map.
4161                          */
4162                         vm_map_entry_link(new_map, new_entry);
4163                         vmspace_map_entry_forked(vm1, vm2, new_entry);
4164
4165                         /*
4166                          * Update the physical map
4167                          */
4168                         pmap_copy(new_map->pmap, old_map->pmap,
4169                             new_entry->start,
4170                             (old_entry->end - old_entry->start),
4171                             old_entry->start);
4172                         break;
4173
4174                 case VM_INHERIT_COPY:
4175                         /*
4176                          * Clone the entry and link into the map.
4177                          */
4178                         new_entry = vm_map_entry_create(new_map);
4179                         *new_entry = *old_entry;
4180                         /*
4181                          * Copied entry is COW over the old object.
4182                          */
4183                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4184                             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WRITECNT);
4185                         new_entry->wiring_thread = NULL;
4186                         new_entry->wired_count = 0;
4187                         new_entry->object.vm_object = NULL;
4188                         new_entry->cred = NULL;
4189                         vm_map_entry_link(new_map, new_entry);
4190                         vmspace_map_entry_forked(vm1, vm2, new_entry);
4191                         vm_map_copy_entry(old_map, new_map, old_entry,
4192                             new_entry, fork_charge);
4193                         vm_map_entry_set_vnode_text(new_entry, true);
4194                         break;
4195
4196                 case VM_INHERIT_ZERO:
4197                         /*
4198                          * Create a new anonymous mapping entry modelled from
4199                          * the old one.
4200                          */
4201                         new_entry = vm_map_entry_create(new_map);
4202                         memset(new_entry, 0, sizeof(*new_entry));
4203
4204                         new_entry->start = old_entry->start;
4205                         new_entry->end = old_entry->end;
4206                         new_entry->eflags = old_entry->eflags &
4207                             ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION |
4208                             MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC);
4209                         new_entry->protection = old_entry->protection;
4210                         new_entry->max_protection = old_entry->max_protection;
4211                         new_entry->inheritance = VM_INHERIT_ZERO;
4212
4213                         vm_map_entry_link(new_map, new_entry);
4214                         vmspace_map_entry_forked(vm1, vm2, new_entry);
4215
4216                         new_entry->cred = curthread->td_ucred;
4217                         crhold(new_entry->cred);
4218                         *fork_charge += (new_entry->end - new_entry->start);
4219
4220                         break;
4221                 }
4222         }
4223         /*
4224          * Use inlined vm_map_unlock() to postpone handling the deferred
4225          * map entries, which cannot be done until both old_map and
4226          * new_map locks are released.
4227          */
4228         sx_xunlock(&old_map->lock);
4229         sx_xunlock(&new_map->lock);
4230         vm_map_process_deferred();
4231
4232         return (vm2);
4233 }
4234
4235 /*
4236  * Create a process's stack for exec_new_vmspace().  This function is never
4237  * asked to wire the newly created stack.
4238  */
4239 int
4240 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4241     vm_prot_t prot, vm_prot_t max, int cow)
4242 {
4243         vm_size_t growsize, init_ssize;
4244         rlim_t vmemlim;
4245         int rv;
4246
4247         MPASS((map->flags & MAP_WIREFUTURE) == 0);
4248         growsize = sgrowsiz;
4249         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
4250         vm_map_lock(map);
4251         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4252         /* If we would blow our VMEM resource limit, no go */
4253         if (map->size + init_ssize > vmemlim) {
4254                 rv = KERN_NO_SPACE;
4255                 goto out;
4256         }
4257         rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
4258             max, cow);
4259 out:
4260         vm_map_unlock(map);
4261         return (rv);
4262 }
4263
4264 static int stack_guard_page = 1;
4265 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
4266     &stack_guard_page, 0,
4267     "Specifies the number of guard pages for a stack that grows");
4268
4269 static int
4270 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4271     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
4272 {
4273         vm_map_entry_t new_entry, prev_entry;
4274         vm_offset_t bot, gap_bot, gap_top, top;
4275         vm_size_t init_ssize, sgp;
4276         int orient, rv;
4277
4278         /*
4279          * The stack orientation is piggybacked with the cow argument.
4280          * Extract it into orient and mask the cow argument so that we
4281          * don't pass it around further.
4282          */
4283         orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP);
4284         KASSERT(orient != 0, ("No stack grow direction"));
4285         KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
4286             ("bi-dir stack"));
4287
4288         if (max_ssize == 0 ||
4289             !vm_map_range_valid(map, addrbos, addrbos + max_ssize))
4290                 return (KERN_INVALID_ADDRESS);
4291         sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4292             (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4293             (vm_size_t)stack_guard_page * PAGE_SIZE;
4294         if (sgp >= max_ssize)
4295                 return (KERN_INVALID_ARGUMENT);
4296
4297         init_ssize = growsize;
4298         if (max_ssize < init_ssize + sgp)
4299                 init_ssize = max_ssize - sgp;
4300
4301         /* If addr is already mapped, no go */
4302         if (vm_map_lookup_entry(map, addrbos, &prev_entry))
4303                 return (KERN_NO_SPACE);
4304
4305         /*
4306          * If we can't accommodate max_ssize in the current mapping, no go.
4307          */
4308         if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize)
4309                 return (KERN_NO_SPACE);
4310
4311         /*
4312          * We initially map a stack of only init_ssize.  We will grow as
4313          * needed later.  Depending on the orientation of the stack (i.e.
4314          * the grow direction) we either map at the top of the range, the
4315          * bottom of the range or in the middle.
4316          *
4317          * Note: we would normally expect prot and max to be VM_PROT_ALL,
4318          * and cow to be 0.  Possibly we should eliminate these as input
4319          * parameters, and just pass these values here in the insert call.
4320          */
4321         if (orient == MAP_STACK_GROWS_DOWN) {
4322                 bot = addrbos + max_ssize - init_ssize;
4323                 top = bot + init_ssize;
4324                 gap_bot = addrbos;
4325                 gap_top = bot;
4326         } else /* if (orient == MAP_STACK_GROWS_UP) */ {
4327                 bot = addrbos;
4328                 top = bot + init_ssize;
4329                 gap_bot = top;
4330                 gap_top = addrbos + max_ssize;
4331         }
4332         rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
4333         if (rv != KERN_SUCCESS)
4334                 return (rv);
4335         new_entry = vm_map_entry_succ(prev_entry);
4336         KASSERT(new_entry->end == top || new_entry->start == bot,
4337             ("Bad entry start/end for new stack entry"));
4338         KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
4339             (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
4340             ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
4341         KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
4342             (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
4343             ("new entry lacks MAP_ENTRY_GROWS_UP"));
4344         if (gap_bot == gap_top)
4345                 return (KERN_SUCCESS);
4346         rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
4347             VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
4348             MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP));
4349         if (rv == KERN_SUCCESS) {
4350                 /*
4351                  * Gap can never successfully handle a fault, so
4352                  * read-ahead logic is never used for it.  Re-use
4353                  * next_read of the gap entry to store
4354                  * stack_guard_page for vm_map_growstack().
4355                  */
4356                 if (orient == MAP_STACK_GROWS_DOWN)
4357                         vm_map_entry_pred(new_entry)->next_read = sgp;
4358                 else
4359                         vm_map_entry_succ(new_entry)->next_read = sgp;
4360         } else {
4361                 (void)vm_map_delete(map, bot, top);
4362         }
4363         return (rv);
4364 }
4365
4366 /*
4367  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if we
4368  * successfully grow the stack.
4369  */
4370 static int
4371 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
4372 {
4373         vm_map_entry_t stack_entry;
4374         struct proc *p;
4375         struct vmspace *vm;
4376         struct ucred *cred;
4377         vm_offset_t gap_end, gap_start, grow_start;
4378         vm_size_t grow_amount, guard, max_grow;
4379         rlim_t lmemlim, stacklim, vmemlim;
4380         int rv, rv1;
4381         bool gap_deleted, grow_down, is_procstack;
4382 #ifdef notyet
4383         uint64_t limit;
4384 #endif
4385 #ifdef RACCT
4386         int error;
4387 #endif
4388
4389         p = curproc;
4390         vm = p->p_vmspace;
4391
4392         /*
4393          * Disallow stack growth when the access is performed by a
4394          * debugger or AIO daemon.  The reason is that the wrong
4395          * resource limits are applied.
4396          */
4397         if (p != initproc && (map != &p->p_vmspace->vm_map ||
4398             p->p_textvp == NULL))
4399                 return (KERN_FAILURE);
4400
4401         MPASS(!map->system_map);
4402
4403         lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
4404         stacklim = lim_cur(curthread, RLIMIT_STACK);
4405         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4406 retry:
4407         /* If addr is not in a hole for a stack grow area, no need to grow. */
4408         if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
4409                 return (KERN_FAILURE);
4410         if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
4411                 return (KERN_SUCCESS);
4412         if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
4413                 stack_entry = vm_map_entry_succ(gap_entry);
4414                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
4415                     stack_entry->start != gap_entry->end)
4416                         return (KERN_FAILURE);
4417                 grow_amount = round_page(stack_entry->start - addr);
4418                 grow_down = true;
4419         } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
4420                 stack_entry = vm_map_entry_pred(gap_entry);
4421                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
4422                     stack_entry->end != gap_entry->start)
4423                         return (KERN_FAILURE);
4424                 grow_amount = round_page(addr + 1 - stack_entry->end);
4425                 grow_down = false;
4426         } else {
4427                 return (KERN_FAILURE);
4428         }
4429         guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4430             (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4431             gap_entry->next_read;
4432         max_grow = gap_entry->end - gap_entry->start;
4433         if (guard > max_grow)
4434                 return (KERN_NO_SPACE);
4435         max_grow -= guard;
4436         if (grow_amount > max_grow)
4437                 return (KERN_NO_SPACE);
4438
4439         /*
4440          * If this is the main process stack, see if we're over the stack
4441          * limit.
4442          */
4443         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
4444             addr < (vm_offset_t)p->p_sysent->sv_usrstack;
4445         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
4446                 return (KERN_NO_SPACE);
4447
4448 #ifdef RACCT
4449         if (racct_enable) {
4450                 PROC_LOCK(p);
4451                 if (is_procstack && racct_set(p, RACCT_STACK,
4452                     ctob(vm->vm_ssize) + grow_amount)) {
4453                         PROC_UNLOCK(p);
4454                         return (KERN_NO_SPACE);
4455                 }
4456                 PROC_UNLOCK(p);
4457         }
4458 #endif
4459
4460         grow_amount = roundup(grow_amount, sgrowsiz);
4461         if (grow_amount > max_grow)
4462                 grow_amount = max_grow;
4463         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
4464                 grow_amount = trunc_page((vm_size_t)stacklim) -
4465                     ctob(vm->vm_ssize);
4466         }
4467
4468 #ifdef notyet
4469         PROC_LOCK(p);
4470         limit = racct_get_available(p, RACCT_STACK);
4471         PROC_UNLOCK(p);
4472         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
4473                 grow_amount = limit - ctob(vm->vm_ssize);
4474 #endif
4475
4476         if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
4477                 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
4478                         rv = KERN_NO_SPACE;
4479                         goto out;
4480                 }
4481 #ifdef RACCT
4482                 if (racct_enable) {
4483                         PROC_LOCK(p);
4484                         if (racct_set(p, RACCT_MEMLOCK,
4485                             ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
4486                                 PROC_UNLOCK(p);
4487                                 rv = KERN_NO_SPACE;
4488                                 goto out;
4489                         }
4490                         PROC_UNLOCK(p);
4491                 }
4492 #endif
4493         }
4494
4495         /* If we would blow our VMEM resource limit, no go */
4496         if (map->size + grow_amount > vmemlim) {
4497                 rv = KERN_NO_SPACE;
4498                 goto out;
4499         }
4500 #ifdef RACCT
4501         if (racct_enable) {
4502                 PROC_LOCK(p);
4503                 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
4504                         PROC_UNLOCK(p);
4505                         rv = KERN_NO_SPACE;
4506                         goto out;
4507                 }
4508                 PROC_UNLOCK(p);
4509         }
4510 #endif
4511
4512         if (vm_map_lock_upgrade(map)) {
4513                 gap_entry = NULL;
4514                 vm_map_lock_read(map);
4515                 goto retry;
4516         }
4517
4518         if (grow_down) {
4519                 grow_start = gap_entry->end - grow_amount;
4520                 if (gap_entry->start + grow_amount == gap_entry->end) {
4521                         gap_start = gap_entry->start;
4522                         gap_end = gap_entry->end;
4523                         vm_map_entry_delete(map, gap_entry);
4524                         gap_deleted = true;
4525                 } else {
4526                         MPASS(gap_entry->start < gap_entry->end - grow_amount);
4527                         vm_map_entry_resize(map, gap_entry, -grow_amount);
4528                         gap_deleted = false;
4529                 }
4530                 rv = vm_map_insert(map, NULL, 0, grow_start,
4531                     grow_start + grow_amount,
4532                     stack_entry->protection, stack_entry->max_protection,
4533                     MAP_STACK_GROWS_DOWN);
4534                 if (rv != KERN_SUCCESS) {
4535                         if (gap_deleted) {
4536                                 rv1 = vm_map_insert(map, NULL, 0, gap_start,
4537                                     gap_end, VM_PROT_NONE, VM_PROT_NONE,
4538                                     MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
4539                                 MPASS(rv1 == KERN_SUCCESS);
4540                         } else
4541                                 vm_map_entry_resize(map, gap_entry,
4542                                     grow_amount);
4543                 }
4544         } else {
4545                 grow_start = stack_entry->end;
4546                 cred = stack_entry->cred;
4547                 if (cred == NULL && stack_entry->object.vm_object != NULL)
4548                         cred = stack_entry->object.vm_object->cred;
4549                 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
4550                         rv = KERN_NO_SPACE;
4551                 /* Grow the underlying object if applicable. */
4552                 else if (stack_entry->object.vm_object == NULL ||
4553                     vm_object_coalesce(stack_entry->object.vm_object,
4554                     stack_entry->offset,
4555                     (vm_size_t)(stack_entry->end - stack_entry->start),
4556                     grow_amount, cred != NULL)) {
4557                         if (gap_entry->start + grow_amount == gap_entry->end) {
4558                                 vm_map_entry_delete(map, gap_entry);
4559                                 vm_map_entry_resize(map, stack_entry,
4560                                     grow_amount);
4561                         } else {
4562                                 gap_entry->start += grow_amount;
4563                                 stack_entry->end += grow_amount;
4564                         }
4565                         map->size += grow_amount;
4566                         rv = KERN_SUCCESS;
4567                 } else
4568                         rv = KERN_FAILURE;
4569         }
4570         if (rv == KERN_SUCCESS && is_procstack)
4571                 vm->vm_ssize += btoc(grow_amount);
4572
4573         /*
4574          * Heed the MAP_WIREFUTURE flag if it was set for this process.
4575          */
4576         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
4577                 rv = vm_map_wire_locked(map, grow_start,
4578                     grow_start + grow_amount,
4579                     VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
4580         }
4581         vm_map_lock_downgrade(map);
4582
4583 out:
4584 #ifdef RACCT
4585         if (racct_enable && rv != KERN_SUCCESS) {
4586                 PROC_LOCK(p);
4587                 error = racct_set(p, RACCT_VMEM, map->size);
4588                 KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
4589                 if (!old_mlock) {
4590                         error = racct_set(p, RACCT_MEMLOCK,
4591                             ptoa(pmap_wired_count(map->pmap)));
4592                         KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
4593                 }
4594                 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
4595                 KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
4596                 PROC_UNLOCK(p);
4597         }
4598 #endif
4599
4600         return (rv);
4601 }
4602
4603 /*
4604  * Unshare the specified VM space for exec.  If other processes are
4605  * mapped to it, then create a new one.  The new vmspace is null.
4606  */
4607 int
4608 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
4609 {
4610         struct vmspace *oldvmspace = p->p_vmspace;
4611         struct vmspace *newvmspace;
4612
4613         KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
4614             ("vmspace_exec recursed"));
4615         newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit);
4616         if (newvmspace == NULL)
4617                 return (ENOMEM);
4618         newvmspace->vm_swrss = oldvmspace->vm_swrss;
4619         /*
4620          * This code is written like this for prototype purposes.  The
4621          * goal is to avoid running down the vmspace here, but let the
4622          * other process's that are still using the vmspace to finally
4623          * run it down.  Even though there is little or no chance of blocking
4624          * here, it is a good idea to keep this form for future mods.
4625          */
4626         PROC_VMSPACE_LOCK(p);
4627         p->p_vmspace = newvmspace;
4628         PROC_VMSPACE_UNLOCK(p);
4629         if (p == curthread->td_proc)
4630                 pmap_activate(curthread);
4631         curthread->td_pflags |= TDP_EXECVMSPC;
4632         return (0);
4633 }
4634
4635 /*
4636  * Unshare the specified VM space for forcing COW.  This
4637  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
4638  */
4639 int
4640 vmspace_unshare(struct proc *p)
4641 {
4642         struct vmspace *oldvmspace = p->p_vmspace;
4643         struct vmspace *newvmspace;
4644         vm_ooffset_t fork_charge;
4645
4646         if (oldvmspace->vm_refcnt == 1)
4647                 return (0);
4648         fork_charge = 0;
4649         newvmspace = vmspace_fork(oldvmspace, &fork_charge);
4650         if (newvmspace == NULL)
4651                 return (ENOMEM);
4652         if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
4653                 vmspace_free(newvmspace);
4654                 return (ENOMEM);
4655         }
4656         PROC_VMSPACE_LOCK(p);
4657         p->p_vmspace = newvmspace;
4658         PROC_VMSPACE_UNLOCK(p);
4659         if (p == curthread->td_proc)
4660                 pmap_activate(curthread);
4661         vmspace_free(oldvmspace);
4662         return (0);
4663 }
4664
4665 /*
4666  *      vm_map_lookup:
4667  *
4668  *      Finds the VM object, offset, and
4669  *      protection for a given virtual address in the
4670  *      specified map, assuming a page fault of the
4671  *      type specified.
4672  *
4673  *      Leaves the map in question locked for read; return
4674  *      values are guaranteed until a vm_map_lookup_done
4675  *      call is performed.  Note that the map argument
4676  *      is in/out; the returned map must be used in
4677  *      the call to vm_map_lookup_done.
4678  *
4679  *      A handle (out_entry) is returned for use in
4680  *      vm_map_lookup_done, to make that fast.
4681  *
4682  *      If a lookup is requested with "write protection"
4683  *      specified, the map may be changed to perform virtual
4684  *      copying operations, although the data referenced will
4685  *      remain the same.
4686  */
4687 int
4688 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
4689               vm_offset_t vaddr,
4690               vm_prot_t fault_typea,
4691               vm_map_entry_t *out_entry,        /* OUT */
4692               vm_object_t *object,              /* OUT */
4693               vm_pindex_t *pindex,              /* OUT */
4694               vm_prot_t *out_prot,              /* OUT */
4695               boolean_t *wired)                 /* OUT */
4696 {
4697         vm_map_entry_t entry;
4698         vm_map_t map = *var_map;
4699         vm_prot_t prot;
4700         vm_prot_t fault_type;
4701         vm_object_t eobject;
4702         vm_size_t size;
4703         struct ucred *cred;
4704
4705 RetryLookup:
4706
4707         vm_map_lock_read(map);
4708
4709 RetryLookupLocked:
4710         /*
4711          * Lookup the faulting address.
4712          */
4713         if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
4714                 vm_map_unlock_read(map);
4715                 return (KERN_INVALID_ADDRESS);
4716         }
4717
4718         entry = *out_entry;
4719
4720         /*
4721          * Handle submaps.
4722          */
4723         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4724                 vm_map_t old_map = map;
4725
4726                 *var_map = map = entry->object.sub_map;
4727                 vm_map_unlock_read(old_map);
4728                 goto RetryLookup;
4729         }
4730
4731         /*
4732          * Check whether this task is allowed to have this page.
4733          */
4734         prot = entry->protection;
4735         if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) {
4736                 fault_typea &= ~VM_PROT_FAULT_LOOKUP;
4737                 if (prot == VM_PROT_NONE && map != kernel_map &&
4738                     (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4739                     (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
4740                     MAP_ENTRY_STACK_GAP_UP)) != 0 &&
4741                     vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
4742                         goto RetryLookupLocked;
4743         }
4744         fault_type = fault_typea & VM_PROT_ALL;
4745         if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
4746                 vm_map_unlock_read(map);
4747                 return (KERN_PROTECTION_FAILURE);
4748         }
4749         KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
4750             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
4751             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
4752             ("entry %p flags %x", entry, entry->eflags));
4753         if ((fault_typea & VM_PROT_COPY) != 0 &&
4754             (entry->max_protection & VM_PROT_WRITE) == 0 &&
4755             (entry->eflags & MAP_ENTRY_COW) == 0) {
4756                 vm_map_unlock_read(map);
4757                 return (KERN_PROTECTION_FAILURE);
4758         }
4759
4760         /*
4761          * If this page is not pageable, we have to get it for all possible
4762          * accesses.
4763          */
4764         *wired = (entry->wired_count != 0);
4765         if (*wired)
4766                 fault_type = entry->protection;
4767         size = entry->end - entry->start;
4768
4769         /*
4770          * If the entry was copy-on-write, we either ...
4771          */
4772         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4773                 /*
4774                  * If we want to write the page, we may as well handle that
4775                  * now since we've got the map locked.
4776                  *
4777                  * If we don't need to write the page, we just demote the
4778                  * permissions allowed.
4779                  */
4780                 if ((fault_type & VM_PROT_WRITE) != 0 ||
4781                     (fault_typea & VM_PROT_COPY) != 0) {
4782                         /*
4783                          * Make a new object, and place it in the object
4784                          * chain.  Note that no new references have appeared
4785                          * -- one just moved from the map to the new
4786                          * object.
4787                          */
4788                         if (vm_map_lock_upgrade(map))
4789                                 goto RetryLookup;
4790
4791                         if (entry->cred == NULL) {
4792                                 /*
4793                                  * The debugger owner is charged for
4794                                  * the memory.
4795                                  */
4796                                 cred = curthread->td_ucred;
4797                                 crhold(cred);
4798                                 if (!swap_reserve_by_cred(size, cred)) {
4799                                         crfree(cred);
4800                                         vm_map_unlock(map);
4801                                         return (KERN_RESOURCE_SHORTAGE);
4802                                 }
4803                                 entry->cred = cred;
4804                         }
4805                         eobject = entry->object.vm_object;
4806                         vm_object_shadow(&entry->object.vm_object,
4807                             &entry->offset, size, entry->cred, false);
4808                         if (eobject == entry->object.vm_object) {
4809                                 /*
4810                                  * The object was not shadowed.
4811                                  */
4812                                 swap_release_by_cred(size, entry->cred);
4813                                 crfree(entry->cred);
4814                         }
4815                         entry->cred = NULL;
4816                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4817
4818                         vm_map_lock_downgrade(map);
4819                 } else {
4820                         /*
4821                          * We're attempting to read a copy-on-write page --
4822                          * don't allow writes.
4823                          */
4824                         prot &= ~VM_PROT_WRITE;
4825                 }
4826         }
4827
4828         /*
4829          * Create an object if necessary.
4830          */
4831         if (entry->object.vm_object == NULL && !map->system_map) {
4832                 if (vm_map_lock_upgrade(map))
4833                         goto RetryLookup;
4834                 entry->object.vm_object = vm_object_allocate_anon(atop(size),
4835                     NULL, entry->cred, entry->cred != NULL ? size : 0);
4836                 entry->offset = 0;
4837                 entry->cred = NULL;
4838                 vm_map_lock_downgrade(map);
4839         }
4840
4841         /*
4842          * Return the object/offset from this entry.  If the entry was
4843          * copy-on-write or empty, it has been fixed up.
4844          */
4845         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4846         *object = entry->object.vm_object;
4847
4848         *out_prot = prot;
4849         return (KERN_SUCCESS);
4850 }
4851
4852 /*
4853  *      vm_map_lookup_locked:
4854  *
4855  *      Lookup the faulting address.  A version of vm_map_lookup that returns 
4856  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4857  */
4858 int
4859 vm_map_lookup_locked(vm_map_t *var_map,         /* IN/OUT */
4860                      vm_offset_t vaddr,
4861                      vm_prot_t fault_typea,
4862                      vm_map_entry_t *out_entry, /* OUT */
4863                      vm_object_t *object,       /* OUT */
4864                      vm_pindex_t *pindex,       /* OUT */
4865                      vm_prot_t *out_prot,       /* OUT */
4866                      boolean_t *wired)          /* OUT */
4867 {
4868         vm_map_entry_t entry;
4869         vm_map_t map = *var_map;
4870         vm_prot_t prot;
4871         vm_prot_t fault_type = fault_typea;
4872
4873         /*
4874          * Lookup the faulting address.
4875          */
4876         if (!vm_map_lookup_entry(map, vaddr, out_entry))
4877                 return (KERN_INVALID_ADDRESS);
4878
4879         entry = *out_entry;
4880
4881         /*
4882          * Fail if the entry refers to a submap.
4883          */
4884         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4885                 return (KERN_FAILURE);
4886
4887         /*
4888          * Check whether this task is allowed to have this page.
4889          */
4890         prot = entry->protection;
4891         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4892         if ((fault_type & prot) != fault_type)
4893                 return (KERN_PROTECTION_FAILURE);
4894
4895         /*
4896          * If this page is not pageable, we have to get it for all possible
4897          * accesses.
4898          */
4899         *wired = (entry->wired_count != 0);
4900         if (*wired)
4901                 fault_type = entry->protection;
4902
4903         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4904                 /*
4905                  * Fail if the entry was copy-on-write for a write fault.
4906                  */
4907                 if (fault_type & VM_PROT_WRITE)
4908                         return (KERN_FAILURE);
4909                 /*
4910                  * We're attempting to read a copy-on-write page --
4911                  * don't allow writes.
4912                  */
4913                 prot &= ~VM_PROT_WRITE;
4914         }
4915
4916         /*
4917          * Fail if an object should be created.
4918          */
4919         if (entry->object.vm_object == NULL && !map->system_map)
4920                 return (KERN_FAILURE);
4921
4922         /*
4923          * Return the object/offset from this entry.  If the entry was
4924          * copy-on-write or empty, it has been fixed up.
4925          */
4926         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4927         *object = entry->object.vm_object;
4928
4929         *out_prot = prot;
4930         return (KERN_SUCCESS);
4931 }
4932
4933 /*
4934  *      vm_map_lookup_done:
4935  *
4936  *      Releases locks acquired by a vm_map_lookup
4937  *      (according to the handle returned by that lookup).
4938  */
4939 void
4940 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4941 {
4942         /*
4943          * Unlock the main-level map
4944          */
4945         vm_map_unlock_read(map);
4946 }
4947
4948 vm_offset_t
4949 vm_map_max_KBI(const struct vm_map *map)
4950 {
4951
4952         return (vm_map_max(map));
4953 }
4954
4955 vm_offset_t
4956 vm_map_min_KBI(const struct vm_map *map)
4957 {
4958
4959         return (vm_map_min(map));
4960 }
4961
4962 pmap_t
4963 vm_map_pmap_KBI(vm_map_t map)
4964 {
4965
4966         return (map->pmap);
4967 }
4968
4969 bool
4970 vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end)
4971 {
4972
4973         return (vm_map_range_valid(map, start, end));
4974 }
4975
4976 #ifdef INVARIANTS
4977 static void
4978 _vm_map_assert_consistent(vm_map_t map, int check)
4979 {
4980         vm_map_entry_t entry, prev;
4981         vm_map_entry_t cur, header, lbound, ubound;
4982         vm_size_t max_left, max_right;
4983
4984 #ifdef DIAGNOSTIC
4985         ++map->nupdates;
4986 #endif
4987         if (enable_vmmap_check != check)
4988                 return;
4989
4990         header = prev = &map->header;
4991         VM_MAP_ENTRY_FOREACH(entry, map) {
4992                 KASSERT(prev->end <= entry->start,
4993                     ("map %p prev->end = %jx, start = %jx", map,
4994                     (uintmax_t)prev->end, (uintmax_t)entry->start));
4995                 KASSERT(entry->start < entry->end,
4996                     ("map %p start = %jx, end = %jx", map,
4997                     (uintmax_t)entry->start, (uintmax_t)entry->end));
4998                 KASSERT(entry->left == header ||
4999                     entry->left->start < entry->start,
5000                     ("map %p left->start = %jx, start = %jx", map,
5001                     (uintmax_t)entry->left->start, (uintmax_t)entry->start));
5002                 KASSERT(entry->right == header ||
5003                     entry->start < entry->right->start,
5004                     ("map %p start = %jx, right->start = %jx", map,
5005                     (uintmax_t)entry->start, (uintmax_t)entry->right->start));
5006                 cur = map->root;
5007                 lbound = ubound = header;
5008                 for (;;) {
5009                         if (entry->start < cur->start) {
5010                                 ubound = cur;
5011                                 cur = cur->left;
5012                                 KASSERT(cur != lbound,
5013                                     ("map %p cannot find %jx",
5014                                     map, (uintmax_t)entry->start));
5015                         } else if (cur->end <= entry->start) {
5016                                 lbound = cur;
5017                                 cur = cur->right;
5018                                 KASSERT(cur != ubound,
5019                                     ("map %p cannot find %jx",
5020                                     map, (uintmax_t)entry->start));
5021                         } else {
5022                                 KASSERT(cur == entry,
5023                                     ("map %p cannot find %jx",
5024                                     map, (uintmax_t)entry->start));
5025                                 break;
5026                         }
5027                 }
5028                 max_left = vm_map_entry_max_free_left(entry, lbound);
5029                 max_right = vm_map_entry_max_free_right(entry, ubound);
5030                 KASSERT(entry->max_free == vm_size_max(max_left, max_right),
5031                     ("map %p max = %jx, max_left = %jx, max_right = %jx", map,
5032                     (uintmax_t)entry->max_free,
5033                     (uintmax_t)max_left, (uintmax_t)max_right));
5034                 prev = entry;
5035         }
5036         KASSERT(prev->end <= entry->start,
5037             ("map %p prev->end = %jx, start = %jx", map,
5038             (uintmax_t)prev->end, (uintmax_t)entry->start));
5039 }
5040 #endif
5041
5042 #include "opt_ddb.h"
5043 #ifdef DDB
5044 #include <sys/kernel.h>
5045
5046 #include <ddb/ddb.h>
5047
5048 static void
5049 vm_map_print(vm_map_t map)
5050 {
5051         vm_map_entry_t entry, prev;
5052
5053         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
5054             (void *)map,
5055             (void *)map->pmap, map->nentries, map->timestamp);
5056
5057         db_indent += 2;
5058         prev = &map->header;
5059         VM_MAP_ENTRY_FOREACH(entry, map) {
5060                 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
5061                     (void *)entry, (void *)entry->start, (void *)entry->end,
5062                     entry->eflags);
5063                 {
5064                         static const char * const inheritance_name[4] =
5065                         {"share", "copy", "none", "donate_copy"};
5066
5067                         db_iprintf(" prot=%x/%x/%s",
5068                             entry->protection,
5069                             entry->max_protection,
5070                             inheritance_name[(int)(unsigned char)
5071                             entry->inheritance]);
5072                         if (entry->wired_count != 0)
5073                                 db_printf(", wired");
5074                 }
5075                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5076                         db_printf(", share=%p, offset=0x%jx\n",
5077                             (void *)entry->object.sub_map,
5078                             (uintmax_t)entry->offset);
5079                         if (prev == &map->header ||
5080                             prev->object.sub_map !=
5081                                 entry->object.sub_map) {
5082                                 db_indent += 2;
5083                                 vm_map_print((vm_map_t)entry->object.sub_map);
5084                                 db_indent -= 2;
5085                         }
5086                 } else {
5087                         if (entry->cred != NULL)
5088                                 db_printf(", ruid %d", entry->cred->cr_ruid);
5089                         db_printf(", object=%p, offset=0x%jx",
5090                             (void *)entry->object.vm_object,
5091                             (uintmax_t)entry->offset);
5092                         if (entry->object.vm_object && entry->object.vm_object->cred)
5093                                 db_printf(", obj ruid %d charge %jx",
5094                                     entry->object.vm_object->cred->cr_ruid,
5095                                     (uintmax_t)entry->object.vm_object->charge);
5096                         if (entry->eflags & MAP_ENTRY_COW)
5097                                 db_printf(", copy (%s)",
5098                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
5099                         db_printf("\n");
5100
5101                         if (prev == &map->header ||
5102                             prev->object.vm_object !=
5103                                 entry->object.vm_object) {
5104                                 db_indent += 2;
5105                                 vm_object_print((db_expr_t)(intptr_t)
5106                                                 entry->object.vm_object,
5107                                                 0, 0, (char *)0);
5108                                 db_indent -= 2;
5109                         }
5110                 }
5111                 prev = entry;
5112         }
5113         db_indent -= 2;
5114 }
5115
5116 DB_SHOW_COMMAND(map, map)
5117 {
5118
5119         if (!have_addr) {
5120                 db_printf("usage: show map <addr>\n");
5121                 return;
5122         }
5123         vm_map_print((vm_map_t)addr);
5124 }
5125
5126 DB_SHOW_COMMAND(procvm, procvm)
5127 {
5128         struct proc *p;
5129
5130         if (have_addr) {
5131                 p = db_lookup_proc(addr);
5132         } else {
5133                 p = curproc;
5134         }
5135
5136         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
5137             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
5138             (void *)vmspace_pmap(p->p_vmspace));
5139
5140         vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
5141 }
5142
5143 #endif /* DDB */