]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_map.c
uma: Avoid depleting keg reserves when filling a bucket
[FreeBSD/FreeBSD.git] / sys / vm / vm_map.c
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62
63 /*
64  *      Virtual memory mapping module.
65  */
66
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/elf.h>
73 #include <sys/kernel.h>
74 #include <sys/ktr.h>
75 #include <sys/lock.h>
76 #include <sys/mutex.h>
77 #include <sys/proc.h>
78 #include <sys/vmmeter.h>
79 #include <sys/mman.h>
80 #include <sys/vnode.h>
81 #include <sys/racct.h>
82 #include <sys/resourcevar.h>
83 #include <sys/rwlock.h>
84 #include <sys/file.h>
85 #include <sys/sysctl.h>
86 #include <sys/sysent.h>
87 #include <sys/shm.h>
88
89 #include <vm/vm.h>
90 #include <vm/vm_param.h>
91 #include <vm/pmap.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vnode_pager.h>
100 #include <vm/swap_pager.h>
101 #include <vm/uma.h>
102
103 /*
104  *      Virtual memory maps provide for the mapping, protection,
105  *      and sharing of virtual memory objects.  In addition,
106  *      this module provides for an efficient virtual copy of
107  *      memory from one map to another.
108  *
109  *      Synchronization is required prior to most operations.
110  *
111  *      Maps consist of an ordered doubly-linked list of simple
112  *      entries; a self-adjusting binary search tree of these
113  *      entries is used to speed up lookups.
114  *
115  *      Since portions of maps are specified by start/end addresses,
116  *      which may not align with existing map entries, all
117  *      routines merely "clip" entries to these start/end values.
118  *      [That is, an entry is split into two, bordering at a
119  *      start or end value.]  Note that these clippings may not
120  *      always be necessary (as the two resulting entries are then
121  *      not changed); however, the clipping is done for convenience.
122  *
123  *      As mentioned above, virtual copy operations are performed
124  *      by copying VM object references from one map to
125  *      another, and then marking both regions as copy-on-write.
126  */
127
128 static struct mtx map_sleep_mtx;
129 static uma_zone_t mapentzone;
130 static uma_zone_t kmapentzone;
131 static uma_zone_t vmspace_zone;
132 static int vmspace_zinit(void *mem, int size, int flags);
133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
134     vm_offset_t max);
135 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
136 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
137 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
138 static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
139     vm_map_entry_t gap_entry);
140 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
141     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
142 #ifdef INVARIANTS
143 static void vmspace_zdtor(void *mem, int size, void *arg);
144 #endif
145 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
146     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
147     int cow);
148 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
149     vm_offset_t failed_addr);
150
151 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
152     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
153      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
154
155 /* 
156  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
157  * stable.
158  */
159 #define PROC_VMSPACE_LOCK(p) do { } while (0)
160 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
161
162 /*
163  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
164  *
165  *      Asserts that the starting and ending region
166  *      addresses fall within the valid range of the map.
167  */
168 #define VM_MAP_RANGE_CHECK(map, start, end)             \
169                 {                                       \
170                 if (start < vm_map_min(map))            \
171                         start = vm_map_min(map);        \
172                 if (end > vm_map_max(map))              \
173                         end = vm_map_max(map);          \
174                 if (start > end)                        \
175                         start = end;                    \
176                 }
177
178 /*
179  *      vm_map_startup:
180  *
181  *      Initialize the vm_map module.  Must be called before
182  *      any other vm_map routines.
183  *
184  *      Map and entry structures are allocated from the general
185  *      purpose memory pool with some exceptions:
186  *
187  *      - The kernel map and kmem submap are allocated statically.
188  *      - Kernel map entries are allocated out of a static pool.
189  *
190  *      These restrictions are necessary since malloc() uses the
191  *      maps and requires map entries.
192  */
193
194 void
195 vm_map_startup(void)
196 {
197         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
198         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
199             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
200             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
201         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
202             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
203         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
204 #ifdef INVARIANTS
205             vmspace_zdtor,
206 #else
207             NULL,
208 #endif
209             vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
210 }
211
212 static int
213 vmspace_zinit(void *mem, int size, int flags)
214 {
215         struct vmspace *vm;
216         vm_map_t map;
217
218         vm = (struct vmspace *)mem;
219         map = &vm->vm_map;
220
221         memset(map, 0, sizeof(*map));
222         mtx_init(&map->system_mtx, "vm map (system)", NULL,
223             MTX_DEF | MTX_DUPOK);
224         sx_init(&map->lock, "vm map (user)");
225         PMAP_LOCK_INIT(vmspace_pmap(vm));
226         return (0);
227 }
228
229 #ifdef INVARIANTS
230 static void
231 vmspace_zdtor(void *mem, int size, void *arg)
232 {
233         struct vmspace *vm;
234
235         vm = (struct vmspace *)mem;
236         KASSERT(vm->vm_map.nentries == 0,
237             ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries));
238         KASSERT(vm->vm_map.size == 0,
239             ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size));
240 }
241 #endif  /* INVARIANTS */
242
243 /*
244  * Allocate a vmspace structure, including a vm_map and pmap,
245  * and initialize those structures.  The refcnt is set to 1.
246  */
247 struct vmspace *
248 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
249 {
250         struct vmspace *vm;
251
252         vm = uma_zalloc(vmspace_zone, M_WAITOK);
253         KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
254         if (!pinit(vmspace_pmap(vm))) {
255                 uma_zfree(vmspace_zone, vm);
256                 return (NULL);
257         }
258         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
259         _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
260         vm->vm_refcnt = 1;
261         vm->vm_shm = NULL;
262         vm->vm_swrss = 0;
263         vm->vm_tsize = 0;
264         vm->vm_dsize = 0;
265         vm->vm_ssize = 0;
266         vm->vm_taddr = 0;
267         vm->vm_daddr = 0;
268         vm->vm_maxsaddr = 0;
269         return (vm);
270 }
271
272 #ifdef RACCT
273 static void
274 vmspace_container_reset(struct proc *p)
275 {
276
277         PROC_LOCK(p);
278         racct_set(p, RACCT_DATA, 0);
279         racct_set(p, RACCT_STACK, 0);
280         racct_set(p, RACCT_RSS, 0);
281         racct_set(p, RACCT_MEMLOCK, 0);
282         racct_set(p, RACCT_VMEM, 0);
283         PROC_UNLOCK(p);
284 }
285 #endif
286
287 static inline void
288 vmspace_dofree(struct vmspace *vm)
289 {
290
291         CTR1(KTR_VM, "vmspace_free: %p", vm);
292
293         /*
294          * Make sure any SysV shm is freed, it might not have been in
295          * exit1().
296          */
297         shmexit(vm);
298
299         /*
300          * Lock the map, to wait out all other references to it.
301          * Delete all of the mappings and pages they hold, then call
302          * the pmap module to reclaim anything left.
303          */
304         (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
305             vm_map_max(&vm->vm_map));
306
307         pmap_release(vmspace_pmap(vm));
308         vm->vm_map.pmap = NULL;
309         uma_zfree(vmspace_zone, vm);
310 }
311
312 void
313 vmspace_free(struct vmspace *vm)
314 {
315
316         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
317             "vmspace_free() called");
318
319         if (vm->vm_refcnt == 0)
320                 panic("vmspace_free: attempt to free already freed vmspace");
321
322         if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
323                 vmspace_dofree(vm);
324 }
325
326 void
327 vmspace_exitfree(struct proc *p)
328 {
329         struct vmspace *vm;
330
331         PROC_VMSPACE_LOCK(p);
332         vm = p->p_vmspace;
333         p->p_vmspace = NULL;
334         PROC_VMSPACE_UNLOCK(p);
335         KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
336         vmspace_free(vm);
337 }
338
339 void
340 vmspace_exit(struct thread *td)
341 {
342         int refcnt;
343         struct vmspace *vm;
344         struct proc *p;
345
346         /*
347          * Release user portion of address space.
348          * This releases references to vnodes,
349          * which could cause I/O if the file has been unlinked.
350          * Need to do this early enough that we can still sleep.
351          *
352          * The last exiting process to reach this point releases as
353          * much of the environment as it can. vmspace_dofree() is the
354          * slower fallback in case another process had a temporary
355          * reference to the vmspace.
356          */
357
358         p = td->td_proc;
359         vm = p->p_vmspace;
360         atomic_add_int(&vmspace0.vm_refcnt, 1);
361         refcnt = vm->vm_refcnt;
362         do {
363                 if (refcnt > 1 && p->p_vmspace != &vmspace0) {
364                         /* Switch now since other proc might free vmspace */
365                         PROC_VMSPACE_LOCK(p);
366                         p->p_vmspace = &vmspace0;
367                         PROC_VMSPACE_UNLOCK(p);
368                         pmap_activate(td);
369                 }
370         } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt - 1));
371         if (refcnt == 1) {
372                 if (p->p_vmspace != vm) {
373                         /* vmspace not yet freed, switch back */
374                         PROC_VMSPACE_LOCK(p);
375                         p->p_vmspace = vm;
376                         PROC_VMSPACE_UNLOCK(p);
377                         pmap_activate(td);
378                 }
379                 pmap_remove_pages(vmspace_pmap(vm));
380                 /* Switch now since this proc will free vmspace */
381                 PROC_VMSPACE_LOCK(p);
382                 p->p_vmspace = &vmspace0;
383                 PROC_VMSPACE_UNLOCK(p);
384                 pmap_activate(td);
385                 vmspace_dofree(vm);
386         }
387 #ifdef RACCT
388         if (racct_enable)
389                 vmspace_container_reset(p);
390 #endif
391 }
392
393 /* Acquire reference to vmspace owned by another process. */
394
395 struct vmspace *
396 vmspace_acquire_ref(struct proc *p)
397 {
398         struct vmspace *vm;
399         int refcnt;
400
401         PROC_VMSPACE_LOCK(p);
402         vm = p->p_vmspace;
403         if (vm == NULL) {
404                 PROC_VMSPACE_UNLOCK(p);
405                 return (NULL);
406         }
407         refcnt = vm->vm_refcnt;
408         do {
409                 if (refcnt <= 0) {      /* Avoid 0->1 transition */
410                         PROC_VMSPACE_UNLOCK(p);
411                         return (NULL);
412                 }
413         } while (!atomic_fcmpset_int(&vm->vm_refcnt, &refcnt, refcnt + 1));
414         if (vm != p->p_vmspace) {
415                 PROC_VMSPACE_UNLOCK(p);
416                 vmspace_free(vm);
417                 return (NULL);
418         }
419         PROC_VMSPACE_UNLOCK(p);
420         return (vm);
421 }
422
423 /*
424  * Switch between vmspaces in an AIO kernel process.
425  *
426  * The new vmspace is either the vmspace of a user process obtained
427  * from an active AIO request or the initial vmspace of the AIO kernel
428  * process (when it is idling).  Because user processes will block to
429  * drain any active AIO requests before proceeding in exit() or
430  * execve(), the reference count for vmspaces from AIO requests can
431  * never be 0.  Similarly, AIO kernel processes hold an extra
432  * reference on their initial vmspace for the life of the process.  As
433  * a result, the 'newvm' vmspace always has a non-zero reference
434  * count.  This permits an additional reference on 'newvm' to be
435  * acquired via a simple atomic increment rather than the loop in
436  * vmspace_acquire_ref() above.
437  */
438 void
439 vmspace_switch_aio(struct vmspace *newvm)
440 {
441         struct vmspace *oldvm;
442
443         /* XXX: Need some way to assert that this is an aio daemon. */
444
445         KASSERT(newvm->vm_refcnt > 0,
446             ("vmspace_switch_aio: newvm unreferenced"));
447
448         oldvm = curproc->p_vmspace;
449         if (oldvm == newvm)
450                 return;
451
452         /*
453          * Point to the new address space and refer to it.
454          */
455         curproc->p_vmspace = newvm;
456         atomic_add_int(&newvm->vm_refcnt, 1);
457
458         /* Activate the new mapping. */
459         pmap_activate(curthread);
460
461         vmspace_free(oldvm);
462 }
463
464 void
465 _vm_map_lock(vm_map_t map, const char *file, int line)
466 {
467
468         if (map->system_map)
469                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
470         else
471                 sx_xlock_(&map->lock, file, line);
472         map->timestamp++;
473 }
474
475 void
476 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add)
477 {
478         vm_object_t object;
479         struct vnode *vp;
480         bool vp_held;
481
482         if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0)
483                 return;
484         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
485             ("Submap with execs"));
486         object = entry->object.vm_object;
487         KASSERT(object != NULL, ("No object for text, entry %p", entry));
488         if ((object->flags & OBJ_ANON) != 0)
489                 object = object->handle;
490         else
491                 KASSERT(object->backing_object == NULL,
492                     ("non-anon object %p shadows", object));
493         KASSERT(object != NULL, ("No content object for text, entry %p obj %p",
494             entry, entry->object.vm_object));
495
496         /*
497          * Mostly, we do not lock the backing object.  It is
498          * referenced by the entry we are processing, so it cannot go
499          * away.
500          */
501         vp = NULL;
502         vp_held = false;
503         if (object->type == OBJT_DEAD) {
504                 /*
505                  * For OBJT_DEAD objects, v_writecount was handled in
506                  * vnode_pager_dealloc().
507                  */
508         } else if (object->type == OBJT_VNODE) {
509                 vp = object->handle;
510         } else if (object->type == OBJT_SWAP) {
511                 KASSERT((object->flags & OBJ_TMPFS_NODE) != 0,
512                     ("vm_map_entry_set_vnode_text: swap and !TMPFS "
513                     "entry %p, object %p, add %d", entry, object, add));
514                 /*
515                  * Tmpfs VREG node, which was reclaimed, has
516                  * OBJ_TMPFS_NODE flag set, but not OBJ_TMPFS.  In
517                  * this case there is no v_writecount to adjust.
518                  */
519                 VM_OBJECT_RLOCK(object);
520                 if ((object->flags & OBJ_TMPFS) != 0) {
521                         vp = object->un_pager.swp.swp_tmpfs;
522                         if (vp != NULL) {
523                                 vhold(vp);
524                                 vp_held = true;
525                         }
526                 }
527                 VM_OBJECT_RUNLOCK(object);
528         } else {
529                 KASSERT(0,
530                     ("vm_map_entry_set_vnode_text: wrong object type, "
531                     "entry %p, object %p, add %d", entry, object, add));
532         }
533         if (vp != NULL) {
534                 if (add) {
535                         VOP_SET_TEXT_CHECKED(vp);
536                 } else {
537                         vn_lock(vp, LK_SHARED | LK_RETRY);
538                         VOP_UNSET_TEXT_CHECKED(vp);
539                         VOP_UNLOCK(vp);
540                 }
541                 if (vp_held)
542                         vdrop(vp);
543         }
544 }
545
546 /*
547  * Use a different name for this vm_map_entry field when it's use
548  * is not consistent with its use as part of an ordered search tree.
549  */
550 #define defer_next right
551
552 static void
553 vm_map_process_deferred(void)
554 {
555         struct thread *td;
556         vm_map_entry_t entry, next;
557         vm_object_t object;
558
559         td = curthread;
560         entry = td->td_map_def_user;
561         td->td_map_def_user = NULL;
562         while (entry != NULL) {
563                 next = entry->defer_next;
564                 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT |
565                     MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT |
566                     MAP_ENTRY_VN_EXEC));
567                 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) {
568                         /*
569                          * Decrement the object's writemappings and
570                          * possibly the vnode's v_writecount.
571                          */
572                         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
573                             ("Submap with writecount"));
574                         object = entry->object.vm_object;
575                         KASSERT(object != NULL, ("No object for writecount"));
576                         vm_pager_release_writecount(object, entry->start,
577                             entry->end);
578                 }
579                 vm_map_entry_set_vnode_text(entry, false);
580                 vm_map_entry_deallocate(entry, FALSE);
581                 entry = next;
582         }
583 }
584
585 #ifdef INVARIANTS
586 static void
587 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
588 {
589
590         if (map->system_map)
591                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
592         else
593                 sx_assert_(&map->lock, SA_XLOCKED, file, line);
594 }
595
596 #define VM_MAP_ASSERT_LOCKED(map) \
597     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
598
599 enum { VMMAP_CHECK_NONE, VMMAP_CHECK_UNLOCK, VMMAP_CHECK_ALL };
600 #ifdef DIAGNOSTIC
601 static int enable_vmmap_check = VMMAP_CHECK_UNLOCK;
602 #else
603 static int enable_vmmap_check = VMMAP_CHECK_NONE;
604 #endif
605 SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN,
606     &enable_vmmap_check, 0, "Enable vm map consistency checking");
607
608 static void _vm_map_assert_consistent(vm_map_t map, int check);
609
610 #define VM_MAP_ASSERT_CONSISTENT(map) \
611     _vm_map_assert_consistent(map, VMMAP_CHECK_ALL)
612 #ifdef DIAGNOSTIC
613 #define VM_MAP_UNLOCK_CONSISTENT(map) do {                              \
614         if (map->nupdates > map->nentries) {                            \
615                 _vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK);     \
616                 map->nupdates = 0;                                      \
617         }                                                               \
618 } while (0)
619 #else
620 #define VM_MAP_UNLOCK_CONSISTENT(map)
621 #endif
622 #else
623 #define VM_MAP_ASSERT_LOCKED(map)
624 #define VM_MAP_ASSERT_CONSISTENT(map)
625 #define VM_MAP_UNLOCK_CONSISTENT(map)
626 #endif /* INVARIANTS */
627
628 void
629 _vm_map_unlock(vm_map_t map, const char *file, int line)
630 {
631
632         VM_MAP_UNLOCK_CONSISTENT(map);
633         if (map->system_map)
634                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
635         else {
636                 sx_xunlock_(&map->lock, file, line);
637                 vm_map_process_deferred();
638         }
639 }
640
641 void
642 _vm_map_lock_read(vm_map_t map, const char *file, int line)
643 {
644
645         if (map->system_map)
646                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
647         else
648                 sx_slock_(&map->lock, file, line);
649 }
650
651 void
652 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
653 {
654
655         if (map->system_map)
656                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
657         else {
658                 sx_sunlock_(&map->lock, file, line);
659                 vm_map_process_deferred();
660         }
661 }
662
663 int
664 _vm_map_trylock(vm_map_t map, const char *file, int line)
665 {
666         int error;
667
668         error = map->system_map ?
669             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
670             !sx_try_xlock_(&map->lock, file, line);
671         if (error == 0)
672                 map->timestamp++;
673         return (error == 0);
674 }
675
676 int
677 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
678 {
679         int error;
680
681         error = map->system_map ?
682             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
683             !sx_try_slock_(&map->lock, file, line);
684         return (error == 0);
685 }
686
687 /*
688  *      _vm_map_lock_upgrade:   [ internal use only ]
689  *
690  *      Tries to upgrade a read (shared) lock on the specified map to a write
691  *      (exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
692  *      non-zero value if the upgrade fails.  If the upgrade fails, the map is
693  *      returned without a read or write lock held.
694  *
695  *      Requires that the map be read locked.
696  */
697 int
698 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
699 {
700         unsigned int last_timestamp;
701
702         if (map->system_map) {
703                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
704         } else {
705                 if (!sx_try_upgrade_(&map->lock, file, line)) {
706                         last_timestamp = map->timestamp;
707                         sx_sunlock_(&map->lock, file, line);
708                         vm_map_process_deferred();
709                         /*
710                          * If the map's timestamp does not change while the
711                          * map is unlocked, then the upgrade succeeds.
712                          */
713                         sx_xlock_(&map->lock, file, line);
714                         if (last_timestamp != map->timestamp) {
715                                 sx_xunlock_(&map->lock, file, line);
716                                 return (1);
717                         }
718                 }
719         }
720         map->timestamp++;
721         return (0);
722 }
723
724 void
725 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
726 {
727
728         if (map->system_map) {
729                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
730         } else {
731                 VM_MAP_UNLOCK_CONSISTENT(map);
732                 sx_downgrade_(&map->lock, file, line);
733         }
734 }
735
736 /*
737  *      vm_map_locked:
738  *
739  *      Returns a non-zero value if the caller holds a write (exclusive) lock
740  *      on the specified map and the value "0" otherwise.
741  */
742 int
743 vm_map_locked(vm_map_t map)
744 {
745
746         if (map->system_map)
747                 return (mtx_owned(&map->system_mtx));
748         else
749                 return (sx_xlocked(&map->lock));
750 }
751
752 /*
753  *      _vm_map_unlock_and_wait:
754  *
755  *      Atomically releases the lock on the specified map and puts the calling
756  *      thread to sleep.  The calling thread will remain asleep until either
757  *      vm_map_wakeup() is performed on the map or the specified timeout is
758  *      exceeded.
759  *
760  *      WARNING!  This function does not perform deferred deallocations of
761  *      objects and map entries.  Therefore, the calling thread is expected to
762  *      reacquire the map lock after reawakening and later perform an ordinary
763  *      unlock operation, such as vm_map_unlock(), before completing its
764  *      operation on the map.
765  */
766 int
767 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
768 {
769
770         VM_MAP_UNLOCK_CONSISTENT(map);
771         mtx_lock(&map_sleep_mtx);
772         if (map->system_map)
773                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
774         else
775                 sx_xunlock_(&map->lock, file, line);
776         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
777             timo));
778 }
779
780 /*
781  *      vm_map_wakeup:
782  *
783  *      Awaken any threads that have slept on the map using
784  *      vm_map_unlock_and_wait().
785  */
786 void
787 vm_map_wakeup(vm_map_t map)
788 {
789
790         /*
791          * Acquire and release map_sleep_mtx to prevent a wakeup()
792          * from being performed (and lost) between the map unlock
793          * and the msleep() in _vm_map_unlock_and_wait().
794          */
795         mtx_lock(&map_sleep_mtx);
796         mtx_unlock(&map_sleep_mtx);
797         wakeup(&map->root);
798 }
799
800 void
801 vm_map_busy(vm_map_t map)
802 {
803
804         VM_MAP_ASSERT_LOCKED(map);
805         map->busy++;
806 }
807
808 void
809 vm_map_unbusy(vm_map_t map)
810 {
811
812         VM_MAP_ASSERT_LOCKED(map);
813         KASSERT(map->busy, ("vm_map_unbusy: not busy"));
814         if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
815                 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
816                 wakeup(&map->busy);
817         }
818 }
819
820 void 
821 vm_map_wait_busy(vm_map_t map)
822 {
823
824         VM_MAP_ASSERT_LOCKED(map);
825         while (map->busy) {
826                 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
827                 if (map->system_map)
828                         msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
829                 else
830                         sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
831         }
832         map->timestamp++;
833 }
834
835 long
836 vmspace_resident_count(struct vmspace *vmspace)
837 {
838         return pmap_resident_count(vmspace_pmap(vmspace));
839 }
840
841 /*
842  * Initialize an existing vm_map structure
843  * such as that in the vmspace structure.
844  */
845 static void
846 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
847 {
848
849         map->header.eflags = MAP_ENTRY_HEADER;
850         map->needs_wakeup = FALSE;
851         map->system_map = 0;
852         map->pmap = pmap;
853         map->header.end = min;
854         map->header.start = max;
855         map->flags = 0;
856         map->header.left = map->header.right = &map->header;
857         map->root = NULL;
858         map->timestamp = 0;
859         map->busy = 0;
860         map->anon_loc = 0;
861 #ifdef DIAGNOSTIC
862         map->nupdates = 0;
863 #endif
864 }
865
866 void
867 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
868 {
869
870         _vm_map_init(map, pmap, min, max);
871         mtx_init(&map->system_mtx, "vm map (system)", NULL,
872             MTX_DEF | MTX_DUPOK);
873         sx_init(&map->lock, "vm map (user)");
874 }
875
876 /*
877  *      vm_map_entry_dispose:   [ internal use only ]
878  *
879  *      Inverse of vm_map_entry_create.
880  */
881 static void
882 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
883 {
884         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
885 }
886
887 /*
888  *      vm_map_entry_create:    [ internal use only ]
889  *
890  *      Allocates a VM map entry for insertion.
891  *      No entry fields are filled in.
892  */
893 static vm_map_entry_t
894 vm_map_entry_create(vm_map_t map)
895 {
896         vm_map_entry_t new_entry;
897
898         if (map->system_map)
899                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
900         else
901                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
902         if (new_entry == NULL)
903                 panic("vm_map_entry_create: kernel resources exhausted");
904         return (new_entry);
905 }
906
907 /*
908  *      vm_map_entry_set_behavior:
909  *
910  *      Set the expected access behavior, either normal, random, or
911  *      sequential.
912  */
913 static inline void
914 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
915 {
916         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
917             (behavior & MAP_ENTRY_BEHAV_MASK);
918 }
919
920 /*
921  *      vm_map_entry_max_free_{left,right}:
922  *
923  *      Compute the size of the largest free gap between two entries,
924  *      one the root of a tree and the other the ancestor of that root
925  *      that is the least or greatest ancestor found on the search path.
926  */
927 static inline vm_size_t
928 vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor)
929 {
930
931         return (root->left != left_ancestor ?
932             root->left->max_free : root->start - left_ancestor->end);
933 }
934
935 static inline vm_size_t
936 vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor)
937 {
938
939         return (root->right != right_ancestor ?
940             root->right->max_free : right_ancestor->start - root->end);
941 }
942
943 /*
944  *      vm_map_entry_{pred,succ}:
945  *
946  *      Find the {predecessor, successor} of the entry by taking one step
947  *      in the appropriate direction and backtracking as much as necessary.
948  *      vm_map_entry_succ is defined in vm_map.h.
949  */
950 static inline vm_map_entry_t
951 vm_map_entry_pred(vm_map_entry_t entry)
952 {
953         vm_map_entry_t prior;
954
955         prior = entry->left;
956         if (prior->right->start < entry->start) {
957                 do
958                         prior = prior->right;
959                 while (prior->right != entry);
960         }
961         return (prior);
962 }
963
964 static inline vm_size_t
965 vm_size_max(vm_size_t a, vm_size_t b)
966 {
967
968         return (a > b ? a : b);
969 }
970
971 #define SPLAY_LEFT_STEP(root, y, llist, rlist, test) do {               \
972         vm_map_entry_t z;                                               \
973         vm_size_t max_free;                                             \
974                                                                         \
975         /*                                                              \
976          * Infer root->right->max_free == root->max_free when           \
977          * y->max_free < root->max_free || root->max_free == 0.         \
978          * Otherwise, look right to find it.                            \
979          */                                                             \
980         y = root->left;                                                 \
981         max_free = root->max_free;                                      \
982         KASSERT(max_free == vm_size_max(                                \
983             vm_map_entry_max_free_left(root, llist),                    \
984             vm_map_entry_max_free_right(root, rlist)),                  \
985             ("%s: max_free invariant fails", __func__));                \
986         if (max_free - 1 < vm_map_entry_max_free_left(root, llist))     \
987                 max_free = vm_map_entry_max_free_right(root, rlist);    \
988         if (y != llist && (test)) {                                     \
989                 /* Rotate right and make y root. */                     \
990                 z = y->right;                                           \
991                 if (z != root) {                                        \
992                         root->left = z;                                 \
993                         y->right = root;                                \
994                         if (max_free < y->max_free)                     \
995                             root->max_free = max_free =                 \
996                             vm_size_max(max_free, z->max_free);         \
997                 } else if (max_free < y->max_free)                      \
998                         root->max_free = max_free =                     \
999                             vm_size_max(max_free, root->start - y->end);\
1000                 root = y;                                               \
1001                 y = root->left;                                         \
1002         }                                                               \
1003         /* Copy right->max_free.  Put root on rlist. */                 \
1004         root->max_free = max_free;                                      \
1005         KASSERT(max_free == vm_map_entry_max_free_right(root, rlist),   \
1006             ("%s: max_free not copied from right", __func__));          \
1007         root->left = rlist;                                             \
1008         rlist = root;                                                   \
1009         root = y != llist ? y : NULL;                                   \
1010 } while (0)
1011
1012 #define SPLAY_RIGHT_STEP(root, y, llist, rlist, test) do {              \
1013         vm_map_entry_t z;                                               \
1014         vm_size_t max_free;                                             \
1015                                                                         \
1016         /*                                                              \
1017          * Infer root->left->max_free == root->max_free when            \
1018          * y->max_free < root->max_free || root->max_free == 0.         \
1019          * Otherwise, look left to find it.                             \
1020          */                                                             \
1021         y = root->right;                                                \
1022         max_free = root->max_free;                                      \
1023         KASSERT(max_free == vm_size_max(                                \
1024             vm_map_entry_max_free_left(root, llist),                    \
1025             vm_map_entry_max_free_right(root, rlist)),                  \
1026             ("%s: max_free invariant fails", __func__));                \
1027         if (max_free - 1 < vm_map_entry_max_free_right(root, rlist))    \
1028                 max_free = vm_map_entry_max_free_left(root, llist);     \
1029         if (y != rlist && (test)) {                                     \
1030                 /* Rotate left and make y root. */                      \
1031                 z = y->left;                                            \
1032                 if (z != root) {                                        \
1033                         root->right = z;                                \
1034                         y->left = root;                                 \
1035                         if (max_free < y->max_free)                     \
1036                             root->max_free = max_free =                 \
1037                             vm_size_max(max_free, z->max_free);         \
1038                 } else if (max_free < y->max_free)                      \
1039                         root->max_free = max_free =                     \
1040                             vm_size_max(max_free, y->start - root->end);\
1041                 root = y;                                               \
1042                 y = root->right;                                        \
1043         }                                                               \
1044         /* Copy left->max_free.  Put root on llist. */                  \
1045         root->max_free = max_free;                                      \
1046         KASSERT(max_free == vm_map_entry_max_free_left(root, llist),    \
1047             ("%s: max_free not copied from left", __func__));           \
1048         root->right = llist;                                            \
1049         llist = root;                                                   \
1050         root = y != rlist ? y : NULL;                                   \
1051 } while (0)
1052
1053 /*
1054  * Walk down the tree until we find addr or a gap where addr would go, breaking
1055  * off left and right subtrees of nodes less than, or greater than addr.  Treat
1056  * subtrees with root->max_free < length as empty trees.  llist and rlist are
1057  * the two sides in reverse order (bottom-up), with llist linked by the right
1058  * pointer and rlist linked by the left pointer in the vm_map_entry, and both
1059  * lists terminated by &map->header.  This function, and the subsequent call to
1060  * vm_map_splay_merge_{left,right,pred,succ}, rely on the start and end address
1061  * values in &map->header.
1062  */
1063 static __always_inline vm_map_entry_t
1064 vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length,
1065     vm_map_entry_t *llist, vm_map_entry_t *rlist)
1066 {
1067         vm_map_entry_t left, right, root, y;
1068
1069         left = right = &map->header;
1070         root = map->root;
1071         while (root != NULL && root->max_free >= length) {
1072                 KASSERT(left->end <= root->start &&
1073                     root->end <= right->start,
1074                     ("%s: root not within tree bounds", __func__));
1075                 if (addr < root->start) {
1076                         SPLAY_LEFT_STEP(root, y, left, right,
1077                             y->max_free >= length && addr < y->start);
1078                 } else if (addr >= root->end) {
1079                         SPLAY_RIGHT_STEP(root, y, left, right,
1080                             y->max_free >= length && addr >= y->end);
1081                 } else
1082                         break;
1083         }
1084         *llist = left;
1085         *rlist = right;
1086         return (root);
1087 }
1088
1089 static __always_inline void
1090 vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *rlist)
1091 {
1092         vm_map_entry_t hi, right, y;
1093
1094         right = *rlist;
1095         hi = root->right == right ? NULL : root->right;
1096         if (hi == NULL)
1097                 return;
1098         do
1099                 SPLAY_LEFT_STEP(hi, y, root, right, true);
1100         while (hi != NULL);
1101         *rlist = right;
1102 }
1103
1104 static __always_inline void
1105 vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *llist)
1106 {
1107         vm_map_entry_t left, lo, y;
1108
1109         left = *llist;
1110         lo = root->left == left ? NULL : root->left;
1111         if (lo == NULL)
1112                 return;
1113         do
1114                 SPLAY_RIGHT_STEP(lo, y, left, root, true);
1115         while (lo != NULL);
1116         *llist = left;
1117 }
1118
1119 static inline void
1120 vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b)
1121 {
1122         vm_map_entry_t tmp;
1123
1124         tmp = *b;
1125         *b = *a;
1126         *a = tmp;
1127 }
1128
1129 /*
1130  * Walk back up the two spines, flip the pointers and set max_free.  The
1131  * subtrees of the root go at the bottom of llist and rlist.
1132  */
1133 static vm_size_t
1134 vm_map_splay_merge_left_walk(vm_map_entry_t header, vm_map_entry_t root,
1135     vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist)
1136 {
1137         do {
1138                 /*
1139                  * The max_free values of the children of llist are in
1140                  * llist->max_free and max_free.  Update with the
1141                  * max value.
1142                  */
1143                 llist->max_free = max_free =
1144                     vm_size_max(llist->max_free, max_free);
1145                 vm_map_entry_swap(&llist->right, &tail);
1146                 vm_map_entry_swap(&tail, &llist);
1147         } while (llist != header);
1148         root->left = tail;
1149         return (max_free);
1150 }
1151
1152 /*
1153  * When llist is known to be the predecessor of root.
1154  */
1155 static inline vm_size_t
1156 vm_map_splay_merge_pred(vm_map_entry_t header, vm_map_entry_t root,
1157     vm_map_entry_t llist)
1158 {
1159         vm_size_t max_free;
1160
1161         max_free = root->start - llist->end;
1162         if (llist != header) {
1163                 max_free = vm_map_splay_merge_left_walk(header, root,
1164                     root, max_free, llist);
1165         } else {
1166                 root->left = header;
1167                 header->right = root;
1168         }
1169         return (max_free);
1170 }
1171
1172 /*
1173  * When llist may or may not be the predecessor of root.
1174  */
1175 static inline vm_size_t
1176 vm_map_splay_merge_left(vm_map_entry_t header, vm_map_entry_t root,
1177     vm_map_entry_t llist)
1178 {
1179         vm_size_t max_free;
1180
1181         max_free = vm_map_entry_max_free_left(root, llist);
1182         if (llist != header) {
1183                 max_free = vm_map_splay_merge_left_walk(header, root,
1184                     root->left == llist ? root : root->left,
1185                     max_free, llist);
1186         }
1187         return (max_free);
1188 }
1189
1190 static vm_size_t
1191 vm_map_splay_merge_right_walk(vm_map_entry_t header, vm_map_entry_t root,
1192     vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist)
1193 {
1194         do {
1195                 /*
1196                  * The max_free values of the children of rlist are in
1197                  * rlist->max_free and max_free.  Update with the
1198                  * max value.
1199                  */
1200                 rlist->max_free = max_free =
1201                     vm_size_max(rlist->max_free, max_free);
1202                 vm_map_entry_swap(&rlist->left, &tail);
1203                 vm_map_entry_swap(&tail, &rlist);
1204         } while (rlist != header);
1205         root->right = tail;
1206         return (max_free);
1207 }
1208
1209 /*
1210  * When rlist is known to be the succecessor of root.
1211  */
1212 static inline vm_size_t
1213 vm_map_splay_merge_succ(vm_map_entry_t header, vm_map_entry_t root,
1214     vm_map_entry_t rlist)
1215 {
1216         vm_size_t max_free;
1217
1218         max_free = rlist->start - root->end;
1219         if (rlist != header) {
1220                 max_free = vm_map_splay_merge_right_walk(header, root,
1221                     root, max_free, rlist);
1222         } else {
1223                 root->right = header;
1224                 header->left = root;
1225         }
1226         return (max_free);
1227 }
1228
1229 /*
1230  * When rlist may or may not be the succecessor of root.
1231  */
1232 static inline vm_size_t
1233 vm_map_splay_merge_right(vm_map_entry_t header, vm_map_entry_t root,
1234     vm_map_entry_t rlist)
1235 {
1236         vm_size_t max_free;
1237
1238         max_free = vm_map_entry_max_free_right(root, rlist);
1239         if (rlist != header) {
1240                 max_free = vm_map_splay_merge_right_walk(header, root,
1241                     root->right == rlist ? root : root->right,
1242                     max_free, rlist);
1243         }
1244         return (max_free);
1245 }
1246
1247 /*
1248  *      vm_map_splay:
1249  *
1250  *      The Sleator and Tarjan top-down splay algorithm with the
1251  *      following variation.  Max_free must be computed bottom-up, so
1252  *      on the downward pass, maintain the left and right spines in
1253  *      reverse order.  Then, make a second pass up each side to fix
1254  *      the pointers and compute max_free.  The time bound is O(log n)
1255  *      amortized.
1256  *
1257  *      The tree is threaded, which means that there are no null pointers.
1258  *      When a node has no left child, its left pointer points to its
1259  *      predecessor, which the last ancestor on the search path from the root
1260  *      where the search branched right.  Likewise, when a node has no right
1261  *      child, its right pointer points to its successor.  The map header node
1262  *      is the predecessor of the first map entry, and the successor of the
1263  *      last.
1264  *
1265  *      The new root is the vm_map_entry containing "addr", or else an
1266  *      adjacent entry (lower if possible) if addr is not in the tree.
1267  *
1268  *      The map must be locked, and leaves it so.
1269  *
1270  *      Returns: the new root.
1271  */
1272 static vm_map_entry_t
1273 vm_map_splay(vm_map_t map, vm_offset_t addr)
1274 {
1275         vm_map_entry_t header, llist, rlist, root;
1276         vm_size_t max_free_left, max_free_right;
1277
1278         header = &map->header;
1279         root = vm_map_splay_split(map, addr, 0, &llist, &rlist);
1280         if (root != NULL) {
1281                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1282                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1283         } else if (llist != header) {
1284                 /*
1285                  * Recover the greatest node in the left
1286                  * subtree and make it the root.
1287                  */
1288                 root = llist;
1289                 llist = root->right;
1290                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1291                 max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1292         } else if (rlist != header) {
1293                 /*
1294                  * Recover the least node in the right
1295                  * subtree and make it the root.
1296                  */
1297                 root = rlist;
1298                 rlist = root->left;
1299                 max_free_left = vm_map_splay_merge_pred(header, root, llist);
1300                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1301         } else {
1302                 /* There is no root. */
1303                 return (NULL);
1304         }
1305         root->max_free = vm_size_max(max_free_left, max_free_right);
1306         map->root = root;
1307         VM_MAP_ASSERT_CONSISTENT(map);
1308         return (root);
1309 }
1310
1311 /*
1312  *      vm_map_entry_{un,}link:
1313  *
1314  *      Insert/remove entries from maps.  On linking, if new entry clips
1315  *      existing entry, trim existing entry to avoid overlap, and manage
1316  *      offsets.  On unlinking, merge disappearing entry with neighbor, if
1317  *      called for, and manage offsets.  Callers should not modify fields in
1318  *      entries already mapped.
1319  */
1320 static void
1321 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
1322 {
1323         vm_map_entry_t header, llist, rlist, root;
1324         vm_size_t max_free_left, max_free_right;
1325
1326         CTR3(KTR_VM,
1327             "vm_map_entry_link: map %p, nentries %d, entry %p", map,
1328             map->nentries, entry);
1329         VM_MAP_ASSERT_LOCKED(map);
1330         map->nentries++;
1331         header = &map->header;
1332         root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1333         if (root == NULL) {
1334                 /*
1335                  * The new entry does not overlap any existing entry in the
1336                  * map, so it becomes the new root of the map tree.
1337                  */
1338                 max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1339                 max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1340         } else if (entry->start == root->start) {
1341                 /*
1342                  * The new entry is a clone of root, with only the end field
1343                  * changed.  The root entry will be shrunk to abut the new
1344                  * entry, and will be the right child of the new root entry in
1345                  * the modified map.
1346                  */
1347                 KASSERT(entry->end < root->end,
1348                     ("%s: clip_start not within entry", __func__));
1349                 vm_map_splay_findprev(root, &llist);
1350                 root->offset += entry->end - root->start;
1351                 root->start = entry->end;
1352                 max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1353                 max_free_right = root->max_free = vm_size_max(
1354                     vm_map_splay_merge_pred(entry, root, entry),
1355                     vm_map_splay_merge_right(header, root, rlist));
1356         } else {
1357                 /*
1358                  * The new entry is a clone of root, with only the start field
1359                  * changed.  The root entry will be shrunk to abut the new
1360                  * entry, and will be the left child of the new root entry in
1361                  * the modified map.
1362                  */
1363                 KASSERT(entry->end == root->end,
1364                     ("%s: clip_start not within entry", __func__));
1365                 vm_map_splay_findnext(root, &rlist);
1366                 entry->offset += entry->start - root->start;
1367                 root->end = entry->start;
1368                 max_free_left = root->max_free = vm_size_max(
1369                     vm_map_splay_merge_left(header, root, llist),
1370                     vm_map_splay_merge_succ(entry, root, entry));
1371                 max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1372         }
1373         entry->max_free = vm_size_max(max_free_left, max_free_right);
1374         map->root = entry;
1375         VM_MAP_ASSERT_CONSISTENT(map);
1376 }
1377
1378 enum unlink_merge_type {
1379         UNLINK_MERGE_NONE,
1380         UNLINK_MERGE_NEXT
1381 };
1382
1383 static void
1384 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
1385     enum unlink_merge_type op)
1386 {
1387         vm_map_entry_t header, llist, rlist, root;
1388         vm_size_t max_free_left, max_free_right;
1389
1390         VM_MAP_ASSERT_LOCKED(map);
1391         header = &map->header;
1392         root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1393         KASSERT(root != NULL,
1394             ("vm_map_entry_unlink: unlink object not mapped"));
1395
1396         vm_map_splay_findprev(root, &llist);
1397         vm_map_splay_findnext(root, &rlist);
1398         if (op == UNLINK_MERGE_NEXT) {
1399                 rlist->start = root->start;
1400                 rlist->offset = root->offset;
1401         }
1402         if (llist != header) {
1403                 root = llist;
1404                 llist = root->right;
1405                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1406                 max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1407         } else if (rlist != header) {
1408                 root = rlist;
1409                 rlist = root->left;
1410                 max_free_left = vm_map_splay_merge_pred(header, root, llist);
1411                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1412         } else {
1413                 header->left = header->right = header;
1414                 root = NULL;
1415         }
1416         if (root != NULL)
1417                 root->max_free = vm_size_max(max_free_left, max_free_right);
1418         map->root = root;
1419         VM_MAP_ASSERT_CONSISTENT(map);
1420         map->nentries--;
1421         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1422             map->nentries, entry);
1423 }
1424
1425 /*
1426  *      vm_map_entry_resize:
1427  *
1428  *      Resize a vm_map_entry, recompute the amount of free space that
1429  *      follows it and propagate that value up the tree.
1430  *
1431  *      The map must be locked, and leaves it so.
1432  */
1433 static void
1434 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount)
1435 {
1436         vm_map_entry_t header, llist, rlist, root;
1437
1438         VM_MAP_ASSERT_LOCKED(map);
1439         header = &map->header;
1440         root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1441         KASSERT(root != NULL, ("%s: resize object not mapped", __func__));
1442         vm_map_splay_findnext(root, &rlist);
1443         entry->end += grow_amount;
1444         root->max_free = vm_size_max(
1445             vm_map_splay_merge_left(header, root, llist),
1446             vm_map_splay_merge_succ(header, root, rlist));
1447         map->root = root;
1448         VM_MAP_ASSERT_CONSISTENT(map);
1449         CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p",
1450             __func__, map, map->nentries, entry);
1451 }
1452
1453 /*
1454  *      vm_map_lookup_entry:    [ internal use only ]
1455  *
1456  *      Finds the map entry containing (or
1457  *      immediately preceding) the specified address
1458  *      in the given map; the entry is returned
1459  *      in the "entry" parameter.  The boolean
1460  *      result indicates whether the address is
1461  *      actually contained in the map.
1462  */
1463 boolean_t
1464 vm_map_lookup_entry(
1465         vm_map_t map,
1466         vm_offset_t address,
1467         vm_map_entry_t *entry)  /* OUT */
1468 {
1469         vm_map_entry_t cur, header, lbound, ubound;
1470         boolean_t locked;
1471
1472         /*
1473          * If the map is empty, then the map entry immediately preceding
1474          * "address" is the map's header.
1475          */
1476         header = &map->header;
1477         cur = map->root;
1478         if (cur == NULL) {
1479                 *entry = header;
1480                 return (FALSE);
1481         }
1482         if (address >= cur->start && cur->end > address) {
1483                 *entry = cur;
1484                 return (TRUE);
1485         }
1486         if ((locked = vm_map_locked(map)) ||
1487             sx_try_upgrade(&map->lock)) {
1488                 /*
1489                  * Splay requires a write lock on the map.  However, it only
1490                  * restructures the binary search tree; it does not otherwise
1491                  * change the map.  Thus, the map's timestamp need not change
1492                  * on a temporary upgrade.
1493                  */
1494                 cur = vm_map_splay(map, address);
1495                 if (!locked) {
1496                         VM_MAP_UNLOCK_CONSISTENT(map);
1497                         sx_downgrade(&map->lock);
1498                 }
1499
1500                 /*
1501                  * If "address" is contained within a map entry, the new root
1502                  * is that map entry.  Otherwise, the new root is a map entry
1503                  * immediately before or after "address".
1504                  */
1505                 if (address < cur->start) {
1506                         *entry = header;
1507                         return (FALSE);
1508                 }
1509                 *entry = cur;
1510                 return (address < cur->end);
1511         }
1512         /*
1513          * Since the map is only locked for read access, perform a
1514          * standard binary search tree lookup for "address".
1515          */
1516         lbound = ubound = header;
1517         for (;;) {
1518                 if (address < cur->start) {
1519                         ubound = cur;
1520                         cur = cur->left;
1521                         if (cur == lbound)
1522                                 break;
1523                 } else if (cur->end <= address) {
1524                         lbound = cur;
1525                         cur = cur->right;
1526                         if (cur == ubound)
1527                                 break;
1528                 } else {
1529                         *entry = cur;
1530                         return (TRUE);
1531                 }
1532         }
1533         *entry = lbound;
1534         return (FALSE);
1535 }
1536
1537 /*
1538  *      vm_map_insert:
1539  *
1540  *      Inserts the given whole VM object into the target
1541  *      map at the specified address range.  The object's
1542  *      size should match that of the address range.
1543  *
1544  *      Requires that the map be locked, and leaves it so.
1545  *
1546  *      If object is non-NULL, ref count must be bumped by caller
1547  *      prior to making call to account for the new entry.
1548  */
1549 int
1550 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1551     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
1552 {
1553         vm_map_entry_t new_entry, next_entry, prev_entry;
1554         struct ucred *cred;
1555         vm_eflags_t protoeflags;
1556         vm_inherit_t inheritance;
1557         u_long bdry;
1558         u_int bidx;
1559
1560         VM_MAP_ASSERT_LOCKED(map);
1561         KASSERT(object != kernel_object ||
1562             (cow & MAP_COPY_ON_WRITE) == 0,
1563             ("vm_map_insert: kernel object and COW"));
1564         KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0 ||
1565             (cow & MAP_SPLIT_BOUNDARY_MASK) != 0,
1566             ("vm_map_insert: paradoxical MAP_NOFAULT request, obj %p cow %#x",
1567             object, cow));
1568         KASSERT((prot & ~max) == 0,
1569             ("prot %#x is not subset of max_prot %#x", prot, max));
1570
1571         /*
1572          * Check that the start and end points are not bogus.
1573          */
1574         if (start == end || !vm_map_range_valid(map, start, end))
1575                 return (KERN_INVALID_ADDRESS);
1576
1577         /*
1578          * Find the entry prior to the proposed starting address; if it's part
1579          * of an existing entry, this range is bogus.
1580          */
1581         if (vm_map_lookup_entry(map, start, &prev_entry))
1582                 return (KERN_NO_SPACE);
1583
1584         /*
1585          * Assert that the next entry doesn't overlap the end point.
1586          */
1587         next_entry = vm_map_entry_succ(prev_entry);
1588         if (next_entry->start < end)
1589                 return (KERN_NO_SPACE);
1590
1591         if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
1592             max != VM_PROT_NONE))
1593                 return (KERN_INVALID_ARGUMENT);
1594
1595         protoeflags = 0;
1596         if (cow & MAP_COPY_ON_WRITE)
1597                 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
1598         if (cow & MAP_NOFAULT)
1599                 protoeflags |= MAP_ENTRY_NOFAULT;
1600         if (cow & MAP_DISABLE_SYNCER)
1601                 protoeflags |= MAP_ENTRY_NOSYNC;
1602         if (cow & MAP_DISABLE_COREDUMP)
1603                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1604         if (cow & MAP_STACK_GROWS_DOWN)
1605                 protoeflags |= MAP_ENTRY_GROWS_DOWN;
1606         if (cow & MAP_STACK_GROWS_UP)
1607                 protoeflags |= MAP_ENTRY_GROWS_UP;
1608         if (cow & MAP_WRITECOUNT)
1609                 protoeflags |= MAP_ENTRY_WRITECNT;
1610         if (cow & MAP_VN_EXEC)
1611                 protoeflags |= MAP_ENTRY_VN_EXEC;
1612         if ((cow & MAP_CREATE_GUARD) != 0)
1613                 protoeflags |= MAP_ENTRY_GUARD;
1614         if ((cow & MAP_CREATE_STACK_GAP_DN) != 0)
1615                 protoeflags |= MAP_ENTRY_STACK_GAP_DN;
1616         if ((cow & MAP_CREATE_STACK_GAP_UP) != 0)
1617                 protoeflags |= MAP_ENTRY_STACK_GAP_UP;
1618         if (cow & MAP_INHERIT_SHARE)
1619                 inheritance = VM_INHERIT_SHARE;
1620         else
1621                 inheritance = VM_INHERIT_DEFAULT;
1622         if ((cow & MAP_SPLIT_BOUNDARY_MASK) != 0) {
1623                 /* This magically ignores index 0, for usual page size. */
1624                 bidx = (cow & MAP_SPLIT_BOUNDARY_MASK) >>
1625                     MAP_SPLIT_BOUNDARY_SHIFT;
1626                 if (bidx >= MAXPAGESIZES)
1627                         return (KERN_INVALID_ARGUMENT);
1628                 bdry = pagesizes[bidx] - 1;
1629                 if ((start & bdry) != 0 || (end & bdry) != 0)
1630                         return (KERN_INVALID_ARGUMENT);
1631                 protoeflags |= bidx << MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
1632         }
1633
1634         cred = NULL;
1635         if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0)
1636                 goto charged;
1637         if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1638             ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1639                 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1640                         return (KERN_RESOURCE_SHORTAGE);
1641                 KASSERT(object == NULL ||
1642                     (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
1643                     object->cred == NULL,
1644                     ("overcommit: vm_map_insert o %p", object));
1645                 cred = curthread->td_ucred;
1646         }
1647
1648 charged:
1649         /* Expand the kernel pmap, if necessary. */
1650         if (map == kernel_map && end > kernel_vm_end)
1651                 pmap_growkernel(end);
1652         if (object != NULL) {
1653                 /*
1654                  * OBJ_ONEMAPPING must be cleared unless this mapping
1655                  * is trivially proven to be the only mapping for any
1656                  * of the object's pages.  (Object granularity
1657                  * reference counting is insufficient to recognize
1658                  * aliases with precision.)
1659                  */
1660                 if ((object->flags & OBJ_ANON) != 0) {
1661                         VM_OBJECT_WLOCK(object);
1662                         if (object->ref_count > 1 || object->shadow_count != 0)
1663                                 vm_object_clear_flag(object, OBJ_ONEMAPPING);
1664                         VM_OBJECT_WUNLOCK(object);
1665                 }
1666         } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
1667             protoeflags &&
1668             (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP |
1669             MAP_VN_EXEC)) == 0 &&
1670             prev_entry->end == start && (prev_entry->cred == cred ||
1671             (prev_entry->object.vm_object != NULL &&
1672             prev_entry->object.vm_object->cred == cred)) &&
1673             vm_object_coalesce(prev_entry->object.vm_object,
1674             prev_entry->offset,
1675             (vm_size_t)(prev_entry->end - prev_entry->start),
1676             (vm_size_t)(end - prev_entry->end), cred != NULL &&
1677             (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
1678                 /*
1679                  * We were able to extend the object.  Determine if we
1680                  * can extend the previous map entry to include the
1681                  * new range as well.
1682                  */
1683                 if (prev_entry->inheritance == inheritance &&
1684                     prev_entry->protection == prot &&
1685                     prev_entry->max_protection == max &&
1686                     prev_entry->wired_count == 0) {
1687                         KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
1688                             0, ("prev_entry %p has incoherent wiring",
1689                             prev_entry));
1690                         if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
1691                                 map->size += end - prev_entry->end;
1692                         vm_map_entry_resize(map, prev_entry,
1693                             end - prev_entry->end);
1694                         vm_map_try_merge_entries(map, prev_entry, next_entry);
1695                         return (KERN_SUCCESS);
1696                 }
1697
1698                 /*
1699                  * If we can extend the object but cannot extend the
1700                  * map entry, we have to create a new map entry.  We
1701                  * must bump the ref count on the extended object to
1702                  * account for it.  object may be NULL.
1703                  */
1704                 object = prev_entry->object.vm_object;
1705                 offset = prev_entry->offset +
1706                     (prev_entry->end - prev_entry->start);
1707                 vm_object_reference(object);
1708                 if (cred != NULL && object != NULL && object->cred != NULL &&
1709                     !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1710                         /* Object already accounts for this uid. */
1711                         cred = NULL;
1712                 }
1713         }
1714         if (cred != NULL)
1715                 crhold(cred);
1716
1717         /*
1718          * Create a new entry
1719          */
1720         new_entry = vm_map_entry_create(map);
1721         new_entry->start = start;
1722         new_entry->end = end;
1723         new_entry->cred = NULL;
1724
1725         new_entry->eflags = protoeflags;
1726         new_entry->object.vm_object = object;
1727         new_entry->offset = offset;
1728
1729         new_entry->inheritance = inheritance;
1730         new_entry->protection = prot;
1731         new_entry->max_protection = max;
1732         new_entry->wired_count = 0;
1733         new_entry->wiring_thread = NULL;
1734         new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1735         new_entry->next_read = start;
1736
1737         KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1738             ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
1739         new_entry->cred = cred;
1740
1741         /*
1742          * Insert the new entry into the list
1743          */
1744         vm_map_entry_link(map, new_entry);
1745         if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
1746                 map->size += new_entry->end - new_entry->start;
1747
1748         /*
1749          * Try to coalesce the new entry with both the previous and next
1750          * entries in the list.  Previously, we only attempted to coalesce
1751          * with the previous entry when object is NULL.  Here, we handle the
1752          * other cases, which are less common.
1753          */
1754         vm_map_try_merge_entries(map, prev_entry, new_entry);
1755         vm_map_try_merge_entries(map, new_entry, next_entry);
1756
1757         if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
1758                 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
1759                     end - start, cow & MAP_PREFAULT_PARTIAL);
1760         }
1761
1762         return (KERN_SUCCESS);
1763 }
1764
1765 /*
1766  *      vm_map_findspace:
1767  *
1768  *      Find the first fit (lowest VM address) for "length" free bytes
1769  *      beginning at address >= start in the given map.
1770  *
1771  *      In a vm_map_entry, "max_free" is the maximum amount of
1772  *      contiguous free space between an entry in its subtree and a
1773  *      neighbor of that entry.  This allows finding a free region in
1774  *      one path down the tree, so O(log n) amortized with splay
1775  *      trees.
1776  *
1777  *      The map must be locked, and leaves it so.
1778  *
1779  *      Returns: starting address if sufficient space,
1780  *               vm_map_max(map)-length+1 if insufficient space.
1781  */
1782 vm_offset_t
1783 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
1784 {
1785         vm_map_entry_t header, llist, rlist, root, y;
1786         vm_size_t left_length, max_free_left, max_free_right;
1787         vm_offset_t gap_end;
1788
1789         /*
1790          * Request must fit within min/max VM address and must avoid
1791          * address wrap.
1792          */
1793         start = MAX(start, vm_map_min(map));
1794         if (start >= vm_map_max(map) || length > vm_map_max(map) - start)
1795                 return (vm_map_max(map) - length + 1);
1796
1797         /* Empty tree means wide open address space. */
1798         if (map->root == NULL)
1799                 return (start);
1800
1801         /*
1802          * After splay_split, if start is within an entry, push it to the start
1803          * of the following gap.  If rlist is at the end of the gap containing
1804          * start, save the end of that gap in gap_end to see if the gap is big
1805          * enough; otherwise set gap_end to start skip gap-checking and move
1806          * directly to a search of the right subtree.
1807          */
1808         header = &map->header;
1809         root = vm_map_splay_split(map, start, length, &llist, &rlist);
1810         gap_end = rlist->start;
1811         if (root != NULL) {
1812                 start = root->end;
1813                 if (root->right != rlist)
1814                         gap_end = start;
1815                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1816                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1817         } else if (rlist != header) {
1818                 root = rlist;
1819                 rlist = root->left;
1820                 max_free_left = vm_map_splay_merge_pred(header, root, llist);
1821                 max_free_right = vm_map_splay_merge_right(header, root, rlist);
1822         } else {
1823                 root = llist;
1824                 llist = root->right;
1825                 max_free_left = vm_map_splay_merge_left(header, root, llist);
1826                 max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1827         }
1828         root->max_free = vm_size_max(max_free_left, max_free_right);
1829         map->root = root;
1830         VM_MAP_ASSERT_CONSISTENT(map);
1831         if (length <= gap_end - start)
1832                 return (start);
1833
1834         /* With max_free, can immediately tell if no solution. */
1835         if (root->right == header || length > root->right->max_free)
1836                 return (vm_map_max(map) - length + 1);
1837
1838         /*
1839          * Splay for the least large-enough gap in the right subtree.
1840          */
1841         llist = rlist = header;
1842         for (left_length = 0;;
1843             left_length = vm_map_entry_max_free_left(root, llist)) {
1844                 if (length <= left_length)
1845                         SPLAY_LEFT_STEP(root, y, llist, rlist,
1846                             length <= vm_map_entry_max_free_left(y, llist));
1847                 else
1848                         SPLAY_RIGHT_STEP(root, y, llist, rlist,
1849                             length > vm_map_entry_max_free_left(y, root));
1850                 if (root == NULL)
1851                         break;
1852         }
1853         root = llist;
1854         llist = root->right;
1855         max_free_left = vm_map_splay_merge_left(header, root, llist);
1856         if (rlist == header) {
1857                 root->max_free = vm_size_max(max_free_left,
1858                     vm_map_splay_merge_succ(header, root, rlist));
1859         } else {
1860                 y = rlist;
1861                 rlist = y->left;
1862                 y->max_free = vm_size_max(
1863                     vm_map_splay_merge_pred(root, y, root),
1864                     vm_map_splay_merge_right(header, y, rlist));
1865                 root->max_free = vm_size_max(max_free_left, y->max_free);
1866         }
1867         map->root = root;
1868         VM_MAP_ASSERT_CONSISTENT(map);
1869         return (root->end);
1870 }
1871
1872 int
1873 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1874     vm_offset_t start, vm_size_t length, vm_prot_t prot,
1875     vm_prot_t max, int cow)
1876 {
1877         vm_offset_t end;
1878         int result;
1879
1880         end = start + length;
1881         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1882             object == NULL,
1883             ("vm_map_fixed: non-NULL backing object for stack"));
1884         vm_map_lock(map);
1885         VM_MAP_RANGE_CHECK(map, start, end);
1886         if ((cow & MAP_CHECK_EXCL) == 0) {
1887                 result = vm_map_delete(map, start, end);
1888                 if (result != KERN_SUCCESS)
1889                         goto out;
1890         }
1891         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1892                 result = vm_map_stack_locked(map, start, length, sgrowsiz,
1893                     prot, max, cow);
1894         } else {
1895                 result = vm_map_insert(map, object, offset, start, end,
1896                     prot, max, cow);
1897         }
1898 out:
1899         vm_map_unlock(map);
1900         return (result);
1901 }
1902
1903 static const int aslr_pages_rnd_64[2] = {0x1000, 0x10};
1904 static const int aslr_pages_rnd_32[2] = {0x100, 0x4};
1905
1906 static int cluster_anon = 1;
1907 SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW,
1908     &cluster_anon, 0,
1909     "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always");
1910
1911 static bool
1912 clustering_anon_allowed(vm_offset_t addr)
1913 {
1914
1915         switch (cluster_anon) {
1916         case 0:
1917                 return (false);
1918         case 1:
1919                 return (addr == 0);
1920         case 2:
1921         default:
1922                 return (true);
1923         }
1924 }
1925
1926 static long aslr_restarts;
1927 SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD,
1928     &aslr_restarts, 0,
1929     "Number of aslr failures");
1930
1931 /*
1932  * Searches for the specified amount of free space in the given map with the
1933  * specified alignment.  Performs an address-ordered, first-fit search from
1934  * the given address "*addr", with an optional upper bound "max_addr".  If the
1935  * parameter "alignment" is zero, then the alignment is computed from the
1936  * given (object, offset) pair so as to enable the greatest possible use of
1937  * superpage mappings.  Returns KERN_SUCCESS and the address of the free space
1938  * in "*addr" if successful.  Otherwise, returns KERN_NO_SPACE.
1939  *
1940  * The map must be locked.  Initially, there must be at least "length" bytes
1941  * of free space at the given address.
1942  */
1943 static int
1944 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1945     vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr,
1946     vm_offset_t alignment)
1947 {
1948         vm_offset_t aligned_addr, free_addr;
1949
1950         VM_MAP_ASSERT_LOCKED(map);
1951         free_addr = *addr;
1952         KASSERT(free_addr == vm_map_findspace(map, free_addr, length),
1953             ("caller failed to provide space %#jx at address %p",
1954              (uintmax_t)length, (void *)free_addr));
1955         for (;;) {
1956                 /*
1957                  * At the start of every iteration, the free space at address
1958                  * "*addr" is at least "length" bytes.
1959                  */
1960                 if (alignment == 0)
1961                         pmap_align_superpage(object, offset, addr, length);
1962                 else if ((*addr & (alignment - 1)) != 0) {
1963                         *addr &= ~(alignment - 1);
1964                         *addr += alignment;
1965                 }
1966                 aligned_addr = *addr;
1967                 if (aligned_addr == free_addr) {
1968                         /*
1969                          * Alignment did not change "*addr", so "*addr" must
1970                          * still provide sufficient free space.
1971                          */
1972                         return (KERN_SUCCESS);
1973                 }
1974
1975                 /*
1976                  * Test for address wrap on "*addr".  A wrapped "*addr" could
1977                  * be a valid address, in which case vm_map_findspace() cannot
1978                  * be relied upon to fail.
1979                  */
1980                 if (aligned_addr < free_addr)
1981                         return (KERN_NO_SPACE);
1982                 *addr = vm_map_findspace(map, aligned_addr, length);
1983                 if (*addr + length > vm_map_max(map) ||
1984                     (max_addr != 0 && *addr + length > max_addr))
1985                         return (KERN_NO_SPACE);
1986                 free_addr = *addr;
1987                 if (free_addr == aligned_addr) {
1988                         /*
1989                          * If a successful call to vm_map_findspace() did not
1990                          * change "*addr", then "*addr" must still be aligned
1991                          * and provide sufficient free space.
1992                          */
1993                         return (KERN_SUCCESS);
1994                 }
1995         }
1996 }
1997
1998 int
1999 vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length,
2000     vm_offset_t max_addr, vm_offset_t alignment)
2001 {
2002         /* XXXKIB ASLR eh ? */
2003         *addr = vm_map_findspace(map, *addr, length);
2004         if (*addr + length > vm_map_max(map) ||
2005             (max_addr != 0 && *addr + length > max_addr))
2006                 return (KERN_NO_SPACE);
2007         return (vm_map_alignspace(map, NULL, 0, addr, length, max_addr,
2008             alignment));
2009 }
2010
2011 /*
2012  *      vm_map_find finds an unallocated region in the target address
2013  *      map with the given length.  The search is defined to be
2014  *      first-fit from the specified address; the region found is
2015  *      returned in the same parameter.
2016  *
2017  *      If object is non-NULL, ref count must be bumped by caller
2018  *      prior to making call to account for the new entry.
2019  */
2020 int
2021 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2022             vm_offset_t *addr,  /* IN/OUT */
2023             vm_size_t length, vm_offset_t max_addr, int find_space,
2024             vm_prot_t prot, vm_prot_t max, int cow)
2025 {
2026         vm_offset_t alignment, curr_min_addr, min_addr;
2027         int gap, pidx, rv, try;
2028         bool cluster, en_aslr, update_anon;
2029
2030         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
2031             object == NULL,
2032             ("vm_map_find: non-NULL backing object for stack"));
2033         MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE &&
2034             (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0));
2035         if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
2036             (object->flags & OBJ_COLORED) == 0))
2037                 find_space = VMFS_ANY_SPACE;
2038         if (find_space >> 8 != 0) {
2039                 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
2040                 alignment = (vm_offset_t)1 << (find_space >> 8);
2041         } else
2042                 alignment = 0;
2043         en_aslr = (map->flags & MAP_ASLR) != 0;
2044         update_anon = cluster = clustering_anon_allowed(*addr) &&
2045             (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 &&
2046             find_space != VMFS_NO_SPACE && object == NULL &&
2047             (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP |
2048             MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE;
2049         curr_min_addr = min_addr = *addr;
2050         if (en_aslr && min_addr == 0 && !cluster &&
2051             find_space != VMFS_NO_SPACE &&
2052             (map->flags & MAP_ASLR_IGNSTART) != 0)
2053                 curr_min_addr = min_addr = vm_map_min(map);
2054         try = 0;
2055         vm_map_lock(map);
2056         if (cluster) {
2057                 curr_min_addr = map->anon_loc;
2058                 if (curr_min_addr == 0)
2059                         cluster = false;
2060         }
2061         if (find_space != VMFS_NO_SPACE) {
2062                 KASSERT(find_space == VMFS_ANY_SPACE ||
2063                     find_space == VMFS_OPTIMAL_SPACE ||
2064                     find_space == VMFS_SUPER_SPACE ||
2065                     alignment != 0, ("unexpected VMFS flag"));
2066 again:
2067                 /*
2068                  * When creating an anonymous mapping, try clustering
2069                  * with an existing anonymous mapping first.
2070                  *
2071                  * We make up to two attempts to find address space
2072                  * for a given find_space value. The first attempt may
2073                  * apply randomization or may cluster with an existing
2074                  * anonymous mapping. If this first attempt fails,
2075                  * perform a first-fit search of the available address
2076                  * space.
2077                  *
2078                  * If all tries failed, and find_space is
2079                  * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE.
2080                  * Again enable clustering and randomization.
2081                  */
2082                 try++;
2083                 MPASS(try <= 2);
2084
2085                 if (try == 2) {
2086                         /*
2087                          * Second try: we failed either to find a
2088                          * suitable region for randomizing the
2089                          * allocation, or to cluster with an existing
2090                          * mapping.  Retry with free run.
2091                          */
2092                         curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ?
2093                             vm_map_min(map) : min_addr;
2094                         atomic_add_long(&aslr_restarts, 1);
2095                 }
2096
2097                 if (try == 1 && en_aslr && !cluster) {
2098                         /*
2099                          * Find space for allocation, including
2100                          * gap needed for later randomization.
2101                          */
2102                         pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 &&
2103                             (find_space == VMFS_SUPER_SPACE || find_space ==
2104                             VMFS_OPTIMAL_SPACE) ? 1 : 0;
2105                         gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR &&
2106                             (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ?
2107                             aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx];
2108                         *addr = vm_map_findspace(map, curr_min_addr,
2109                             length + gap * pagesizes[pidx]);
2110                         if (*addr + length + gap * pagesizes[pidx] >
2111                             vm_map_max(map))
2112                                 goto again;
2113                         /* And randomize the start address. */
2114                         *addr += (arc4random() % gap) * pagesizes[pidx];
2115                         if (max_addr != 0 && *addr + length > max_addr)
2116                                 goto again;
2117                 } else {
2118                         *addr = vm_map_findspace(map, curr_min_addr, length);
2119                         if (*addr + length > vm_map_max(map) ||
2120                             (max_addr != 0 && *addr + length > max_addr)) {
2121                                 if (cluster) {
2122                                         cluster = false;
2123                                         MPASS(try == 1);
2124                                         goto again;
2125                                 }
2126                                 rv = KERN_NO_SPACE;
2127                                 goto done;
2128                         }
2129                 }
2130
2131                 if (find_space != VMFS_ANY_SPACE &&
2132                     (rv = vm_map_alignspace(map, object, offset, addr, length,
2133                     max_addr, alignment)) != KERN_SUCCESS) {
2134                         if (find_space == VMFS_OPTIMAL_SPACE) {
2135                                 find_space = VMFS_ANY_SPACE;
2136                                 curr_min_addr = min_addr;
2137                                 cluster = update_anon;
2138                                 try = 0;
2139                                 goto again;
2140                         }
2141                         goto done;
2142                 }
2143         } else if ((cow & MAP_REMAP) != 0) {
2144                 if (!vm_map_range_valid(map, *addr, *addr + length)) {
2145                         rv = KERN_INVALID_ADDRESS;
2146                         goto done;
2147                 }
2148                 rv = vm_map_delete(map, *addr, *addr + length);
2149                 if (rv != KERN_SUCCESS)
2150                         goto done;
2151         }
2152         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
2153                 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
2154                     max, cow);
2155         } else {
2156                 rv = vm_map_insert(map, object, offset, *addr, *addr + length,
2157                     prot, max, cow);
2158         }
2159         if (rv == KERN_SUCCESS && update_anon)
2160                 map->anon_loc = *addr + length;
2161 done:
2162         vm_map_unlock(map);
2163         return (rv);
2164 }
2165
2166 /*
2167  *      vm_map_find_min() is a variant of vm_map_find() that takes an
2168  *      additional parameter (min_addr) and treats the given address
2169  *      (*addr) differently.  Specifically, it treats *addr as a hint
2170  *      and not as the minimum address where the mapping is created.
2171  *
2172  *      This function works in two phases.  First, it tries to
2173  *      allocate above the hint.  If that fails and the hint is
2174  *      greater than min_addr, it performs a second pass, replacing
2175  *      the hint with min_addr as the minimum address for the
2176  *      allocation.
2177  */
2178 int
2179 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2180     vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr,
2181     vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max,
2182     int cow)
2183 {
2184         vm_offset_t hint;
2185         int rv;
2186
2187         hint = *addr;
2188         for (;;) {
2189                 rv = vm_map_find(map, object, offset, addr, length, max_addr,
2190                     find_space, prot, max, cow);
2191                 if (rv == KERN_SUCCESS || min_addr >= hint)
2192                         return (rv);
2193                 *addr = hint = min_addr;
2194         }
2195 }
2196
2197 /*
2198  * A map entry with any of the following flags set must not be merged with
2199  * another entry.
2200  */
2201 #define MAP_ENTRY_NOMERGE_MASK  (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \
2202             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC)
2203
2204 static bool
2205 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
2206 {
2207
2208         KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 ||
2209             (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0,
2210             ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable",
2211             prev, entry));
2212         return (prev->end == entry->start &&
2213             prev->object.vm_object == entry->object.vm_object &&
2214             (prev->object.vm_object == NULL ||
2215             prev->offset + (prev->end - prev->start) == entry->offset) &&
2216             prev->eflags == entry->eflags &&
2217             prev->protection == entry->protection &&
2218             prev->max_protection == entry->max_protection &&
2219             prev->inheritance == entry->inheritance &&
2220             prev->wired_count == entry->wired_count &&
2221             prev->cred == entry->cred);
2222 }
2223
2224 static void
2225 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
2226 {
2227
2228         /*
2229          * If the backing object is a vnode object, vm_object_deallocate()
2230          * calls vrele().  However, vrele() does not lock the vnode because
2231          * the vnode has additional references.  Thus, the map lock can be
2232          * kept without causing a lock-order reversal with the vnode lock.
2233          *
2234          * Since we count the number of virtual page mappings in
2235          * object->un_pager.vnp.writemappings, the writemappings value
2236          * should not be adjusted when the entry is disposed of.
2237          */
2238         if (entry->object.vm_object != NULL)
2239                 vm_object_deallocate(entry->object.vm_object);
2240         if (entry->cred != NULL)
2241                 crfree(entry->cred);
2242         vm_map_entry_dispose(map, entry);
2243 }
2244
2245 /*
2246  *      vm_map_try_merge_entries:
2247  *
2248  *      Compare the given map entry to its predecessor, and merge its precessor
2249  *      into it if possible.  The entry remains valid, and may be extended.
2250  *      The predecessor may be deleted.
2251  *
2252  *      The map must be locked.
2253  */
2254 void
2255 vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry,
2256     vm_map_entry_t entry)
2257 {
2258
2259         VM_MAP_ASSERT_LOCKED(map);
2260         if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 &&
2261             vm_map_mergeable_neighbors(prev_entry, entry)) {
2262                 vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT);
2263                 vm_map_merged_neighbor_dispose(map, prev_entry);
2264         }
2265 }
2266
2267 /*
2268  *      vm_map_entry_back:
2269  *
2270  *      Allocate an object to back a map entry.
2271  */
2272 static inline void
2273 vm_map_entry_back(vm_map_entry_t entry)
2274 {
2275         vm_object_t object;
2276
2277         KASSERT(entry->object.vm_object == NULL,
2278             ("map entry %p has backing object", entry));
2279         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2280             ("map entry %p is a submap", entry));
2281         object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL,
2282             entry->cred, entry->end - entry->start);
2283         entry->object.vm_object = object;
2284         entry->offset = 0;
2285         entry->cred = NULL;
2286 }
2287
2288 /*
2289  *      vm_map_entry_charge_object
2290  *
2291  *      If there is no object backing this entry, create one.  Otherwise, if
2292  *      the entry has cred, give it to the backing object.
2293  */
2294 static inline void
2295 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry)
2296 {
2297
2298         VM_MAP_ASSERT_LOCKED(map);
2299         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2300             ("map entry %p is a submap", entry));
2301         if (entry->object.vm_object == NULL && !map->system_map &&
2302             (entry->eflags & MAP_ENTRY_GUARD) == 0)
2303                 vm_map_entry_back(entry);
2304         else if (entry->object.vm_object != NULL &&
2305             ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
2306             entry->cred != NULL) {
2307                 VM_OBJECT_WLOCK(entry->object.vm_object);
2308                 KASSERT(entry->object.vm_object->cred == NULL,
2309                     ("OVERCOMMIT: %s: both cred e %p", __func__, entry));
2310                 entry->object.vm_object->cred = entry->cred;
2311                 entry->object.vm_object->charge = entry->end - entry->start;
2312                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
2313                 entry->cred = NULL;
2314         }
2315 }
2316
2317 /*
2318  *      vm_map_entry_clone
2319  *
2320  *      Create a duplicate map entry for clipping.
2321  */
2322 static vm_map_entry_t
2323 vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry)
2324 {
2325         vm_map_entry_t new_entry;
2326
2327         VM_MAP_ASSERT_LOCKED(map);
2328
2329         /*
2330          * Create a backing object now, if none exists, so that more individual
2331          * objects won't be created after the map entry is split.
2332          */
2333         vm_map_entry_charge_object(map, entry);
2334
2335         /* Clone the entry. */
2336         new_entry = vm_map_entry_create(map);
2337         *new_entry = *entry;
2338         if (new_entry->cred != NULL)
2339                 crhold(entry->cred);
2340         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2341                 vm_object_reference(new_entry->object.vm_object);
2342                 vm_map_entry_set_vnode_text(new_entry, true);
2343                 /*
2344                  * The object->un_pager.vnp.writemappings for the object of
2345                  * MAP_ENTRY_WRITECNT type entry shall be kept as is here.  The
2346                  * virtual pages are re-distributed among the clipped entries,
2347                  * so the sum is left the same.
2348                  */
2349         }
2350         return (new_entry);
2351 }
2352
2353 /*
2354  *      vm_map_clip_start:      [ internal use only ]
2355  *
2356  *      Asserts that the given entry begins at or after
2357  *      the specified address; if necessary,
2358  *      it splits the entry into two.
2359  */
2360 static int
2361 vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr)
2362 {
2363         vm_map_entry_t new_entry;
2364         int bdry_idx;
2365
2366         if (!map->system_map)
2367                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2368                     "%s: map %p entry %p start 0x%jx", __func__, map, entry,
2369                     (uintmax_t)startaddr);
2370
2371         if (startaddr <= entry->start)
2372                 return (KERN_SUCCESS);
2373
2374         VM_MAP_ASSERT_LOCKED(map);
2375         KASSERT(entry->end > startaddr && entry->start < startaddr,
2376             ("%s: invalid clip of entry %p", __func__, entry));
2377
2378         bdry_idx = (entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >>
2379             MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
2380         if (bdry_idx != 0) {
2381                 if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0)
2382                         return (KERN_INVALID_ARGUMENT);
2383         }
2384
2385         new_entry = vm_map_entry_clone(map, entry);
2386
2387         /*
2388          * Split off the front portion.  Insert the new entry BEFORE this one,
2389          * so that this entry has the specified starting address.
2390          */
2391         new_entry->end = startaddr;
2392         vm_map_entry_link(map, new_entry);
2393         return (KERN_SUCCESS);
2394 }
2395
2396 /*
2397  *      vm_map_lookup_clip_start:
2398  *
2399  *      Find the entry at or just after 'start', and clip it if 'start' is in
2400  *      the interior of the entry.  Return entry after 'start', and in
2401  *      prev_entry set the entry before 'start'.
2402  */
2403 static int
2404 vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start,
2405     vm_map_entry_t *res_entry, vm_map_entry_t *prev_entry)
2406 {
2407         vm_map_entry_t entry;
2408         int rv;
2409
2410         if (!map->system_map)
2411                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2412                     "%s: map %p start 0x%jx prev %p", __func__, map,
2413                     (uintmax_t)start, prev_entry);
2414
2415         if (vm_map_lookup_entry(map, start, prev_entry)) {
2416                 entry = *prev_entry;
2417                 rv = vm_map_clip_start(map, entry, start);
2418                 if (rv != KERN_SUCCESS)
2419                         return (rv);
2420                 *prev_entry = vm_map_entry_pred(entry);
2421         } else
2422                 entry = vm_map_entry_succ(*prev_entry);
2423         *res_entry = entry;
2424         return (KERN_SUCCESS);
2425 }
2426
2427 /*
2428  *      vm_map_clip_end:        [ internal use only ]
2429  *
2430  *      Asserts that the given entry ends at or before
2431  *      the specified address; if necessary,
2432  *      it splits the entry into two.
2433  */
2434 static int
2435 vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr)
2436 {
2437         vm_map_entry_t new_entry;
2438         int bdry_idx;
2439
2440         if (!map->system_map)
2441                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2442                     "%s: map %p entry %p end 0x%jx", __func__, map, entry,
2443                     (uintmax_t)endaddr);
2444
2445         if (endaddr >= entry->end)
2446                 return (KERN_SUCCESS);
2447
2448         VM_MAP_ASSERT_LOCKED(map);
2449         KASSERT(entry->start < endaddr && entry->end > endaddr,
2450             ("%s: invalid clip of entry %p", __func__, entry));
2451
2452         bdry_idx = (entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >>
2453             MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
2454         if (bdry_idx != 0) {
2455                 if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0)
2456                         return (KERN_INVALID_ARGUMENT);
2457         }
2458
2459         new_entry = vm_map_entry_clone(map, entry);
2460
2461         /*
2462          * Split off the back portion.  Insert the new entry AFTER this one,
2463          * so that this entry has the specified ending address.
2464          */
2465         new_entry->start = endaddr;
2466         vm_map_entry_link(map, new_entry);
2467
2468         return (KERN_SUCCESS);
2469 }
2470
2471 /*
2472  *      vm_map_submap:          [ kernel use only ]
2473  *
2474  *      Mark the given range as handled by a subordinate map.
2475  *
2476  *      This range must have been created with vm_map_find,
2477  *      and no other operations may have been performed on this
2478  *      range prior to calling vm_map_submap.
2479  *
2480  *      Only a limited number of operations can be performed
2481  *      within this rage after calling vm_map_submap:
2482  *              vm_fault
2483  *      [Don't try vm_map_copy!]
2484  *
2485  *      To remove a submapping, one must first remove the
2486  *      range from the superior map, and then destroy the
2487  *      submap (if desired).  [Better yet, don't try it.]
2488  */
2489 int
2490 vm_map_submap(
2491         vm_map_t map,
2492         vm_offset_t start,
2493         vm_offset_t end,
2494         vm_map_t submap)
2495 {
2496         vm_map_entry_t entry;
2497         int result;
2498
2499         result = KERN_INVALID_ARGUMENT;
2500
2501         vm_map_lock(submap);
2502         submap->flags |= MAP_IS_SUB_MAP;
2503         vm_map_unlock(submap);
2504
2505         vm_map_lock(map);
2506         VM_MAP_RANGE_CHECK(map, start, end);
2507         if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end &&
2508             (entry->eflags & MAP_ENTRY_COW) == 0 &&
2509             entry->object.vm_object == NULL) {
2510                 result = vm_map_clip_start(map, entry, start);
2511                 if (result != KERN_SUCCESS)
2512                         goto unlock;
2513                 result = vm_map_clip_end(map, entry, end);
2514                 if (result != KERN_SUCCESS)
2515                         goto unlock;
2516                 entry->object.sub_map = submap;
2517                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
2518                 result = KERN_SUCCESS;
2519         }
2520 unlock:
2521         vm_map_unlock(map);
2522
2523         if (result != KERN_SUCCESS) {
2524                 vm_map_lock(submap);
2525                 submap->flags &= ~MAP_IS_SUB_MAP;
2526                 vm_map_unlock(submap);
2527         }
2528         return (result);
2529 }
2530
2531 /*
2532  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
2533  */
2534 #define MAX_INIT_PT     96
2535
2536 /*
2537  *      vm_map_pmap_enter:
2538  *
2539  *      Preload the specified map's pmap with mappings to the specified
2540  *      object's memory-resident pages.  No further physical pages are
2541  *      allocated, and no further virtual pages are retrieved from secondary
2542  *      storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
2543  *      limited number of page mappings are created at the low-end of the
2544  *      specified address range.  (For this purpose, a superpage mapping
2545  *      counts as one page mapping.)  Otherwise, all resident pages within
2546  *      the specified address range are mapped.
2547  */
2548 static void
2549 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
2550     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
2551 {
2552         vm_offset_t start;
2553         vm_page_t p, p_start;
2554         vm_pindex_t mask, psize, threshold, tmpidx;
2555
2556         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
2557                 return;
2558         if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2559                 VM_OBJECT_WLOCK(object);
2560                 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2561                         pmap_object_init_pt(map->pmap, addr, object, pindex,
2562                             size);
2563                         VM_OBJECT_WUNLOCK(object);
2564                         return;
2565                 }
2566                 VM_OBJECT_LOCK_DOWNGRADE(object);
2567         } else
2568                 VM_OBJECT_RLOCK(object);
2569
2570         psize = atop(size);
2571         if (psize + pindex > object->size) {
2572                 if (pindex >= object->size) {
2573                         VM_OBJECT_RUNLOCK(object);
2574                         return;
2575                 }
2576                 psize = object->size - pindex;
2577         }
2578
2579         start = 0;
2580         p_start = NULL;
2581         threshold = MAX_INIT_PT;
2582
2583         p = vm_page_find_least(object, pindex);
2584         /*
2585          * Assert: the variable p is either (1) the page with the
2586          * least pindex greater than or equal to the parameter pindex
2587          * or (2) NULL.
2588          */
2589         for (;
2590              p != NULL && (tmpidx = p->pindex - pindex) < psize;
2591              p = TAILQ_NEXT(p, listq)) {
2592                 /*
2593                  * don't allow an madvise to blow away our really
2594                  * free pages allocating pv entries.
2595                  */
2596                 if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
2597                     vm_page_count_severe()) ||
2598                     ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
2599                     tmpidx >= threshold)) {
2600                         psize = tmpidx;
2601                         break;
2602                 }
2603                 if (vm_page_all_valid(p)) {
2604                         if (p_start == NULL) {
2605                                 start = addr + ptoa(tmpidx);
2606                                 p_start = p;
2607                         }
2608                         /* Jump ahead if a superpage mapping is possible. */
2609                         if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
2610                             (pagesizes[p->psind] - 1)) == 0) {
2611                                 mask = atop(pagesizes[p->psind]) - 1;
2612                                 if (tmpidx + mask < psize &&
2613                                     vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
2614                                         p += mask;
2615                                         threshold += mask;
2616                                 }
2617                         }
2618                 } else if (p_start != NULL) {
2619                         pmap_enter_object(map->pmap, start, addr +
2620                             ptoa(tmpidx), p_start, prot);
2621                         p_start = NULL;
2622                 }
2623         }
2624         if (p_start != NULL)
2625                 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
2626                     p_start, prot);
2627         VM_OBJECT_RUNLOCK(object);
2628 }
2629
2630 /*
2631  *      vm_map_protect:
2632  *
2633  *      Sets the protection of the specified address
2634  *      region in the target map.  If "set_max" is
2635  *      specified, the maximum protection is to be set;
2636  *      otherwise, only the current protection is affected.
2637  */
2638 int
2639 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2640                vm_prot_t new_prot, boolean_t set_max)
2641 {
2642         vm_map_entry_t entry, first_entry, in_tran, prev_entry;
2643         vm_object_t obj;
2644         struct ucred *cred;
2645         vm_prot_t old_prot;
2646         int rv;
2647
2648         if (start == end)
2649                 return (KERN_SUCCESS);
2650
2651 again:
2652         in_tran = NULL;
2653         vm_map_lock(map);
2654
2655         /*
2656          * Ensure that we are not concurrently wiring pages.  vm_map_wire() may
2657          * need to fault pages into the map and will drop the map lock while
2658          * doing so, and the VM object may end up in an inconsistent state if we
2659          * update the protection on the map entry in between faults.
2660          */
2661         vm_map_wait_busy(map);
2662
2663         VM_MAP_RANGE_CHECK(map, start, end);
2664
2665         if (!vm_map_lookup_entry(map, start, &first_entry))
2666                 first_entry = vm_map_entry_succ(first_entry);
2667
2668         /*
2669          * Make a first pass to check for protection violations.
2670          */
2671         for (entry = first_entry; entry->start < end;
2672             entry = vm_map_entry_succ(entry)) {
2673                 if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
2674                         continue;
2675                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
2676                         vm_map_unlock(map);
2677                         return (KERN_INVALID_ARGUMENT);
2678                 }
2679                 if ((new_prot & entry->max_protection) != new_prot) {
2680                         vm_map_unlock(map);
2681                         return (KERN_PROTECTION_FAILURE);
2682                 }
2683                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
2684                         in_tran = entry;
2685         }
2686
2687         /*
2688          * Postpone the operation until all in-transition map entries have
2689          * stabilized.  An in-transition entry might already have its pages
2690          * wired and wired_count incremented, but not yet have its
2691          * MAP_ENTRY_USER_WIRED flag set.  In which case, we would fail to call
2692          * vm_fault_copy_entry() in the final loop below.
2693          */
2694         if (in_tran != NULL) {
2695                 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2696                 vm_map_unlock_and_wait(map, 0);
2697                 goto again;
2698         }
2699
2700         /*
2701          * Before changing the protections, try to reserve swap space for any
2702          * private (i.e., copy-on-write) mappings that are transitioning from
2703          * read-only to read/write access.  If a reservation fails, break out
2704          * of this loop early and let the next loop simplify the entries, since
2705          * some may now be mergeable.
2706          */
2707         rv = vm_map_clip_start(map, first_entry, start);
2708         if (rv != KERN_SUCCESS) {
2709                 vm_map_unlock(map);
2710                 return (rv);
2711         }
2712         for (entry = first_entry; entry->start < end;
2713             entry = vm_map_entry_succ(entry)) {
2714                 rv = vm_map_clip_end(map, entry, end);
2715                 if (rv != KERN_SUCCESS) {
2716                         vm_map_unlock(map);
2717                         return (rv);
2718                 }
2719
2720                 if (set_max ||
2721                     ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 ||
2722                     ENTRY_CHARGED(entry) ||
2723                     (entry->eflags & MAP_ENTRY_GUARD) != 0) {
2724                         continue;
2725                 }
2726
2727                 cred = curthread->td_ucred;
2728                 obj = entry->object.vm_object;
2729
2730                 if (obj == NULL ||
2731                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) {
2732                         if (!swap_reserve(entry->end - entry->start)) {
2733                                 rv = KERN_RESOURCE_SHORTAGE;
2734                                 end = entry->end;
2735                                 break;
2736                         }
2737                         crhold(cred);
2738                         entry->cred = cred;
2739                         continue;
2740                 }
2741
2742                 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP)
2743                         continue;
2744                 VM_OBJECT_WLOCK(obj);
2745                 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
2746                         VM_OBJECT_WUNLOCK(obj);
2747                         continue;
2748                 }
2749
2750                 /*
2751                  * Charge for the whole object allocation now, since
2752                  * we cannot distinguish between non-charged and
2753                  * charged clipped mapping of the same object later.
2754                  */
2755                 KASSERT(obj->charge == 0,
2756                     ("vm_map_protect: object %p overcharged (entry %p)",
2757                     obj, entry));
2758                 if (!swap_reserve(ptoa(obj->size))) {
2759                         VM_OBJECT_WUNLOCK(obj);
2760                         rv = KERN_RESOURCE_SHORTAGE;
2761                         end = entry->end;
2762                         break;
2763                 }
2764
2765                 crhold(cred);
2766                 obj->cred = cred;
2767                 obj->charge = ptoa(obj->size);
2768                 VM_OBJECT_WUNLOCK(obj);
2769         }
2770
2771         /*
2772          * If enough swap space was available, go back and fix up protections.
2773          * Otherwise, just simplify entries, since some may have been modified.
2774          * [Note that clipping is not necessary the second time.]
2775          */
2776         for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry;
2777             entry->start < end;
2778             vm_map_try_merge_entries(map, prev_entry, entry),
2779             prev_entry = entry, entry = vm_map_entry_succ(entry)) {
2780                 if (rv != KERN_SUCCESS ||
2781                     (entry->eflags & MAP_ENTRY_GUARD) != 0)
2782                         continue;
2783
2784                 old_prot = entry->protection;
2785
2786                 if (set_max)
2787                         entry->protection =
2788                             (entry->max_protection = new_prot) &
2789                             old_prot;
2790                 else
2791                         entry->protection = new_prot;
2792
2793                 /*
2794                  * For user wired map entries, the normal lazy evaluation of
2795                  * write access upgrades through soft page faults is
2796                  * undesirable.  Instead, immediately copy any pages that are
2797                  * copy-on-write and enable write access in the physical map.
2798                  */
2799                 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2800                     (entry->protection & VM_PROT_WRITE) != 0 &&
2801                     (old_prot & VM_PROT_WRITE) == 0)
2802                         vm_fault_copy_entry(map, map, entry, entry, NULL);
2803
2804                 /*
2805                  * When restricting access, update the physical map.  Worry
2806                  * about copy-on-write here.
2807                  */
2808                 if ((old_prot & ~entry->protection) != 0) {
2809 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2810                                                         VM_PROT_ALL)
2811                         pmap_protect(map->pmap, entry->start,
2812                             entry->end,
2813                             entry->protection & MASK(entry));
2814 #undef  MASK
2815                 }
2816         }
2817         vm_map_try_merge_entries(map, prev_entry, entry);
2818         vm_map_unlock(map);
2819         return (rv);
2820 }
2821
2822 /*
2823  *      vm_map_madvise:
2824  *
2825  *      This routine traverses a processes map handling the madvise
2826  *      system call.  Advisories are classified as either those effecting
2827  *      the vm_map_entry structure, or those effecting the underlying
2828  *      objects.
2829  */
2830 int
2831 vm_map_madvise(
2832         vm_map_t map,
2833         vm_offset_t start,
2834         vm_offset_t end,
2835         int behav)
2836 {
2837         vm_map_entry_t entry, prev_entry;
2838         int rv;
2839         bool modify_map;
2840
2841         /*
2842          * Some madvise calls directly modify the vm_map_entry, in which case
2843          * we need to use an exclusive lock on the map and we need to perform
2844          * various clipping operations.  Otherwise we only need a read-lock
2845          * on the map.
2846          */
2847         switch(behav) {
2848         case MADV_NORMAL:
2849         case MADV_SEQUENTIAL:
2850         case MADV_RANDOM:
2851         case MADV_NOSYNC:
2852         case MADV_AUTOSYNC:
2853         case MADV_NOCORE:
2854         case MADV_CORE:
2855                 if (start == end)
2856                         return (0);
2857                 modify_map = true;
2858                 vm_map_lock(map);
2859                 break;
2860         case MADV_WILLNEED:
2861         case MADV_DONTNEED:
2862         case MADV_FREE:
2863                 if (start == end)
2864                         return (0);
2865                 modify_map = false;
2866                 vm_map_lock_read(map);
2867                 break;
2868         default:
2869                 return (EINVAL);
2870         }
2871
2872         /*
2873          * Locate starting entry and clip if necessary.
2874          */
2875         VM_MAP_RANGE_CHECK(map, start, end);
2876
2877         if (modify_map) {
2878                 /*
2879                  * madvise behaviors that are implemented in the vm_map_entry.
2880                  *
2881                  * We clip the vm_map_entry so that behavioral changes are
2882                  * limited to the specified address range.
2883                  */
2884                 rv = vm_map_lookup_clip_start(map, start, &entry, &prev_entry);
2885                 if (rv != KERN_SUCCESS) {
2886                         vm_map_unlock(map);
2887                         return (vm_mmap_to_errno(rv));
2888                 }
2889
2890                 for (; entry->start < end; prev_entry = entry,
2891                     entry = vm_map_entry_succ(entry)) {
2892                         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2893                                 continue;
2894
2895                         rv = vm_map_clip_end(map, entry, end);
2896                         if (rv != KERN_SUCCESS) {
2897                                 vm_map_unlock(map);
2898                                 return (vm_mmap_to_errno(rv));
2899                         }
2900
2901                         switch (behav) {
2902                         case MADV_NORMAL:
2903                                 vm_map_entry_set_behavior(entry,
2904                                     MAP_ENTRY_BEHAV_NORMAL);
2905                                 break;
2906                         case MADV_SEQUENTIAL:
2907                                 vm_map_entry_set_behavior(entry,
2908                                     MAP_ENTRY_BEHAV_SEQUENTIAL);
2909                                 break;
2910                         case MADV_RANDOM:
2911                                 vm_map_entry_set_behavior(entry,
2912                                     MAP_ENTRY_BEHAV_RANDOM);
2913                                 break;
2914                         case MADV_NOSYNC:
2915                                 entry->eflags |= MAP_ENTRY_NOSYNC;
2916                                 break;
2917                         case MADV_AUTOSYNC:
2918                                 entry->eflags &= ~MAP_ENTRY_NOSYNC;
2919                                 break;
2920                         case MADV_NOCORE:
2921                                 entry->eflags |= MAP_ENTRY_NOCOREDUMP;
2922                                 break;
2923                         case MADV_CORE:
2924                                 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2925                                 break;
2926                         default:
2927                                 break;
2928                         }
2929                         vm_map_try_merge_entries(map, prev_entry, entry);
2930                 }
2931                 vm_map_try_merge_entries(map, prev_entry, entry);
2932                 vm_map_unlock(map);
2933         } else {
2934                 vm_pindex_t pstart, pend;
2935
2936                 /*
2937                  * madvise behaviors that are implemented in the underlying
2938                  * vm_object.
2939                  *
2940                  * Since we don't clip the vm_map_entry, we have to clip
2941                  * the vm_object pindex and count.
2942                  */
2943                 if (!vm_map_lookup_entry(map, start, &entry))
2944                         entry = vm_map_entry_succ(entry);
2945                 for (; entry->start < end;
2946                     entry = vm_map_entry_succ(entry)) {
2947                         vm_offset_t useEnd, useStart;
2948
2949                         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2950                                 continue;
2951
2952                         /*
2953                          * MADV_FREE would otherwise rewind time to
2954                          * the creation of the shadow object.  Because
2955                          * we hold the VM map read-locked, neither the
2956                          * entry's object nor the presence of a
2957                          * backing object can change.
2958                          */
2959                         if (behav == MADV_FREE &&
2960                             entry->object.vm_object != NULL &&
2961                             entry->object.vm_object->backing_object != NULL)
2962                                 continue;
2963
2964                         pstart = OFF_TO_IDX(entry->offset);
2965                         pend = pstart + atop(entry->end - entry->start);
2966                         useStart = entry->start;
2967                         useEnd = entry->end;
2968
2969                         if (entry->start < start) {
2970                                 pstart += atop(start - entry->start);
2971                                 useStart = start;
2972                         }
2973                         if (entry->end > end) {
2974                                 pend -= atop(entry->end - end);
2975                                 useEnd = end;
2976                         }
2977
2978                         if (pstart >= pend)
2979                                 continue;
2980
2981                         /*
2982                          * Perform the pmap_advise() before clearing
2983                          * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2984                          * concurrent pmap operation, such as pmap_remove(),
2985                          * could clear a reference in the pmap and set
2986                          * PGA_REFERENCED on the page before the pmap_advise()
2987                          * had completed.  Consequently, the page would appear
2988                          * referenced based upon an old reference that
2989                          * occurred before this pmap_advise() ran.
2990                          */
2991                         if (behav == MADV_DONTNEED || behav == MADV_FREE)
2992                                 pmap_advise(map->pmap, useStart, useEnd,
2993                                     behav);
2994
2995                         vm_object_madvise(entry->object.vm_object, pstart,
2996                             pend, behav);
2997
2998                         /*
2999                          * Pre-populate paging structures in the
3000                          * WILLNEED case.  For wired entries, the
3001                          * paging structures are already populated.
3002                          */
3003                         if (behav == MADV_WILLNEED &&
3004                             entry->wired_count == 0) {
3005                                 vm_map_pmap_enter(map,
3006                                     useStart,
3007                                     entry->protection,
3008                                     entry->object.vm_object,
3009                                     pstart,
3010                                     ptoa(pend - pstart),
3011                                     MAP_PREFAULT_MADVISE
3012                                 );
3013                         }
3014                 }
3015                 vm_map_unlock_read(map);
3016         }
3017         return (0);
3018 }
3019
3020 /*
3021  *      vm_map_inherit:
3022  *
3023  *      Sets the inheritance of the specified address
3024  *      range in the target map.  Inheritance
3025  *      affects how the map will be shared with
3026  *      child maps at the time of vmspace_fork.
3027  */
3028 int
3029 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
3030                vm_inherit_t new_inheritance)
3031 {
3032         vm_map_entry_t entry, lentry, prev_entry, start_entry;
3033         int rv;
3034
3035         switch (new_inheritance) {
3036         case VM_INHERIT_NONE:
3037         case VM_INHERIT_COPY:
3038         case VM_INHERIT_SHARE:
3039         case VM_INHERIT_ZERO:
3040                 break;
3041         default:
3042                 return (KERN_INVALID_ARGUMENT);
3043         }
3044         if (start == end)
3045                 return (KERN_SUCCESS);
3046         vm_map_lock(map);
3047         VM_MAP_RANGE_CHECK(map, start, end);
3048         rv = vm_map_lookup_clip_start(map, start, &start_entry, &prev_entry);
3049         if (rv != KERN_SUCCESS)
3050                 goto unlock;
3051         if (vm_map_lookup_entry(map, end - 1, &lentry)) {
3052                 rv = vm_map_clip_end(map, lentry, end);
3053                 if (rv != KERN_SUCCESS)
3054                         goto unlock;
3055         }
3056         if (new_inheritance == VM_INHERIT_COPY) {
3057                 for (entry = start_entry; entry->start < end;
3058                     prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3059                         if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK)
3060                             != 0) {
3061                                 rv = KERN_INVALID_ARGUMENT;
3062                                 goto unlock;
3063                         }
3064                 }
3065         }
3066         for (entry = start_entry; entry->start < end; prev_entry = entry,
3067             entry = vm_map_entry_succ(entry)) {
3068                 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx",
3069                     entry, (uintmax_t)entry->end, (uintmax_t)end));
3070                 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
3071                     new_inheritance != VM_INHERIT_ZERO)
3072                         entry->inheritance = new_inheritance;
3073                 vm_map_try_merge_entries(map, prev_entry, entry);
3074         }
3075         vm_map_try_merge_entries(map, prev_entry, entry);
3076 unlock:
3077         vm_map_unlock(map);
3078         return (rv);
3079 }
3080
3081 /*
3082  *      vm_map_entry_in_transition:
3083  *
3084  *      Release the map lock, and sleep until the entry is no longer in
3085  *      transition.  Awake and acquire the map lock.  If the map changed while
3086  *      another held the lock, lookup a possibly-changed entry at or after the
3087  *      'start' position of the old entry.
3088  */
3089 static vm_map_entry_t
3090 vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start,
3091     vm_offset_t *io_end, bool holes_ok, vm_map_entry_t in_entry)
3092 {
3093         vm_map_entry_t entry;
3094         vm_offset_t start;
3095         u_int last_timestamp;
3096
3097         VM_MAP_ASSERT_LOCKED(map);
3098         KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3099             ("not in-tranition map entry %p", in_entry));
3100         /*
3101          * We have not yet clipped the entry.
3102          */
3103         start = MAX(in_start, in_entry->start);
3104         in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3105         last_timestamp = map->timestamp;
3106         if (vm_map_unlock_and_wait(map, 0)) {
3107                 /*
3108                  * Allow interruption of user wiring/unwiring?
3109                  */
3110         }
3111         vm_map_lock(map);
3112         if (last_timestamp + 1 == map->timestamp)
3113                 return (in_entry);
3114
3115         /*
3116          * Look again for the entry because the map was modified while it was
3117          * unlocked.  Specifically, the entry may have been clipped, merged, or
3118          * deleted.
3119          */
3120         if (!vm_map_lookup_entry(map, start, &entry)) {
3121                 if (!holes_ok) {
3122                         *io_end = start;
3123                         return (NULL);
3124                 }
3125                 entry = vm_map_entry_succ(entry);
3126         }
3127         return (entry);
3128 }
3129
3130 /*
3131  *      vm_map_unwire:
3132  *
3133  *      Implements both kernel and user unwiring.
3134  */
3135 int
3136 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
3137     int flags)
3138 {
3139         vm_map_entry_t entry, first_entry, next_entry, prev_entry;
3140         int rv;
3141         bool holes_ok, need_wakeup, user_unwire;
3142
3143         if (start == end)
3144                 return (KERN_SUCCESS);
3145         holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0;
3146         user_unwire = (flags & VM_MAP_WIRE_USER) != 0;
3147         vm_map_lock(map);
3148         VM_MAP_RANGE_CHECK(map, start, end);
3149         if (!vm_map_lookup_entry(map, start, &first_entry)) {
3150                 if (holes_ok)
3151                         first_entry = vm_map_entry_succ(first_entry);
3152                 else {
3153                         vm_map_unlock(map);
3154                         return (KERN_INVALID_ADDRESS);
3155                 }
3156         }
3157         rv = KERN_SUCCESS;
3158         for (entry = first_entry; entry->start < end; entry = next_entry) {
3159                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3160                         /*
3161                          * We have not yet clipped the entry.
3162                          */
3163                         next_entry = vm_map_entry_in_transition(map, start,
3164                             &end, holes_ok, entry);
3165                         if (next_entry == NULL) {
3166                                 if (entry == first_entry) {
3167                                         vm_map_unlock(map);
3168                                         return (KERN_INVALID_ADDRESS);
3169                                 }
3170                                 rv = KERN_INVALID_ADDRESS;
3171                                 break;
3172                         }
3173                         first_entry = (entry == first_entry) ?
3174                             next_entry : NULL;
3175                         continue;
3176                 }
3177                 rv = vm_map_clip_start(map, entry, start);
3178                 if (rv != KERN_SUCCESS)
3179                         break;
3180                 rv = vm_map_clip_end(map, entry, end);
3181                 if (rv != KERN_SUCCESS)
3182                         break;
3183
3184                 /*
3185                  * Mark the entry in case the map lock is released.  (See
3186                  * above.)
3187                  */
3188                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3189                     entry->wiring_thread == NULL,
3190                     ("owned map entry %p", entry));
3191                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3192                 entry->wiring_thread = curthread;
3193                 next_entry = vm_map_entry_succ(entry);
3194                 /*
3195                  * Check the map for holes in the specified region.
3196                  * If holes_ok, skip this check.
3197                  */
3198                 if (!holes_ok &&
3199                     entry->end < end && next_entry->start > entry->end) {
3200                         end = entry->end;
3201                         rv = KERN_INVALID_ADDRESS;
3202                         break;
3203                 }
3204                 /*
3205                  * If system unwiring, require that the entry is system wired.
3206                  */
3207                 if (!user_unwire &&
3208                     vm_map_entry_system_wired_count(entry) == 0) {
3209                         end = entry->end;
3210                         rv = KERN_INVALID_ARGUMENT;
3211                         break;
3212                 }
3213         }
3214         need_wakeup = false;
3215         if (first_entry == NULL &&
3216             !vm_map_lookup_entry(map, start, &first_entry)) {
3217                 KASSERT(holes_ok, ("vm_map_unwire: lookup failed"));
3218                 prev_entry = first_entry;
3219                 entry = vm_map_entry_succ(first_entry);
3220         } else {
3221                 prev_entry = vm_map_entry_pred(first_entry);
3222                 entry = first_entry;
3223         }
3224         for (; entry->start < end;
3225             prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3226                 /*
3227                  * If holes_ok was specified, an empty
3228                  * space in the unwired region could have been mapped
3229                  * while the map lock was dropped for draining
3230                  * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
3231                  * could be simultaneously wiring this new mapping
3232                  * entry.  Detect these cases and skip any entries
3233                  * marked as in transition by us.
3234                  */
3235                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3236                     entry->wiring_thread != curthread) {
3237                         KASSERT(holes_ok,
3238                             ("vm_map_unwire: !HOLESOK and new/changed entry"));
3239                         continue;
3240                 }
3241
3242                 if (rv == KERN_SUCCESS && (!user_unwire ||
3243                     (entry->eflags & MAP_ENTRY_USER_WIRED))) {
3244                         if (entry->wired_count == 1)
3245                                 vm_map_entry_unwire(map, entry);
3246                         else
3247                                 entry->wired_count--;
3248                         if (user_unwire)
3249                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3250                 }
3251                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3252                     ("vm_map_unwire: in-transition flag missing %p", entry));
3253                 KASSERT(entry->wiring_thread == curthread,
3254                     ("vm_map_unwire: alien wire %p", entry));
3255                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
3256                 entry->wiring_thread = NULL;
3257                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3258                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3259                         need_wakeup = true;
3260                 }
3261                 vm_map_try_merge_entries(map, prev_entry, entry);
3262         }
3263         vm_map_try_merge_entries(map, prev_entry, entry);
3264         vm_map_unlock(map);
3265         if (need_wakeup)
3266                 vm_map_wakeup(map);
3267         return (rv);
3268 }
3269
3270 static void
3271 vm_map_wire_user_count_sub(u_long npages)
3272 {
3273
3274         atomic_subtract_long(&vm_user_wire_count, npages);
3275 }
3276
3277 static bool
3278 vm_map_wire_user_count_add(u_long npages)
3279 {
3280         u_long wired;
3281
3282         wired = vm_user_wire_count;
3283         do {
3284                 if (npages + wired > vm_page_max_user_wired)
3285                         return (false);
3286         } while (!atomic_fcmpset_long(&vm_user_wire_count, &wired,
3287             npages + wired));
3288
3289         return (true);
3290 }
3291
3292 /*
3293  *      vm_map_wire_entry_failure:
3294  *
3295  *      Handle a wiring failure on the given entry.
3296  *
3297  *      The map should be locked.
3298  */
3299 static void
3300 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
3301     vm_offset_t failed_addr)
3302 {
3303
3304         VM_MAP_ASSERT_LOCKED(map);
3305         KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
3306             entry->wired_count == 1,
3307             ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
3308         KASSERT(failed_addr < entry->end,
3309             ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
3310
3311         /*
3312          * If any pages at the start of this entry were successfully wired,
3313          * then unwire them.
3314          */
3315         if (failed_addr > entry->start) {
3316                 pmap_unwire(map->pmap, entry->start, failed_addr);
3317                 vm_object_unwire(entry->object.vm_object, entry->offset,
3318                     failed_addr - entry->start, PQ_ACTIVE);
3319         }
3320
3321         /*
3322          * Assign an out-of-range value to represent the failure to wire this
3323          * entry.
3324          */
3325         entry->wired_count = -1;
3326 }
3327
3328 int
3329 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3330 {
3331         int rv;
3332
3333         vm_map_lock(map);
3334         rv = vm_map_wire_locked(map, start, end, flags);
3335         vm_map_unlock(map);
3336         return (rv);
3337 }
3338
3339 /*
3340  *      vm_map_wire_locked:
3341  *
3342  *      Implements both kernel and user wiring.  Returns with the map locked,
3343  *      the map lock may be dropped.
3344  */
3345 int
3346 vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3347 {
3348         vm_map_entry_t entry, first_entry, next_entry, prev_entry;
3349         vm_offset_t faddr, saved_end, saved_start;
3350         u_long incr, npages;
3351         u_int bidx, last_timestamp;
3352         int rv;
3353         bool holes_ok, need_wakeup, user_wire;
3354         vm_prot_t prot;
3355
3356         VM_MAP_ASSERT_LOCKED(map);
3357
3358         if (start == end)
3359                 return (KERN_SUCCESS);
3360         prot = 0;
3361         if (flags & VM_MAP_WIRE_WRITE)
3362                 prot |= VM_PROT_WRITE;
3363         holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0;
3364         user_wire = (flags & VM_MAP_WIRE_USER) != 0;
3365         VM_MAP_RANGE_CHECK(map, start, end);
3366         if (!vm_map_lookup_entry(map, start, &first_entry)) {
3367                 if (holes_ok)
3368                         first_entry = vm_map_entry_succ(first_entry);
3369                 else
3370                         return (KERN_INVALID_ADDRESS);
3371         }
3372         for (entry = first_entry; entry->start < end; entry = next_entry) {
3373                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3374                         /*
3375                          * We have not yet clipped the entry.
3376                          */
3377                         next_entry = vm_map_entry_in_transition(map, start,
3378                             &end, holes_ok, entry);
3379                         if (next_entry == NULL) {
3380                                 if (entry == first_entry)
3381                                         return (KERN_INVALID_ADDRESS);
3382                                 rv = KERN_INVALID_ADDRESS;
3383                                 goto done;
3384                         }
3385                         first_entry = (entry == first_entry) ?
3386                             next_entry : NULL;
3387                         continue;
3388                 }
3389                 rv = vm_map_clip_start(map, entry, start);
3390                 if (rv != KERN_SUCCESS)
3391                         goto done;
3392                 rv = vm_map_clip_end(map, entry, end);
3393                 if (rv != KERN_SUCCESS)
3394                         goto done;
3395
3396                 /*
3397                  * Mark the entry in case the map lock is released.  (See
3398                  * above.)
3399                  */
3400                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3401                     entry->wiring_thread == NULL,
3402                     ("owned map entry %p", entry));
3403                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3404                 entry->wiring_thread = curthread;
3405                 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
3406                     || (entry->protection & prot) != prot) {
3407                         entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
3408                         if (!holes_ok) {
3409                                 end = entry->end;
3410                                 rv = KERN_INVALID_ADDRESS;
3411                                 goto done;
3412                         }
3413                 } else if (entry->wired_count == 0) {
3414                         entry->wired_count++;
3415
3416                         npages = atop(entry->end - entry->start);
3417                         if (user_wire && !vm_map_wire_user_count_add(npages)) {
3418                                 vm_map_wire_entry_failure(map, entry,
3419                                     entry->start);
3420                                 end = entry->end;
3421                                 rv = KERN_RESOURCE_SHORTAGE;
3422                                 goto done;
3423                         }
3424
3425                         /*
3426                          * Release the map lock, relying on the in-transition
3427                          * mark.  Mark the map busy for fork.
3428                          */
3429                         saved_start = entry->start;
3430                         saved_end = entry->end;
3431                         last_timestamp = map->timestamp;
3432                         bidx = (entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK)
3433                             >> MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
3434                         incr =  pagesizes[bidx];
3435                         vm_map_busy(map);
3436                         vm_map_unlock(map);
3437
3438                         for (faddr = saved_start; faddr < saved_end;
3439                             faddr += incr) {
3440                                 /*
3441                                  * Simulate a fault to get the page and enter
3442                                  * it into the physical map.
3443                                  */
3444                                 rv = vm_fault(map, faddr, VM_PROT_NONE,
3445                                     VM_FAULT_WIRE, NULL);
3446                                 if (rv != KERN_SUCCESS)
3447                                         break;
3448                         }
3449                         vm_map_lock(map);
3450                         vm_map_unbusy(map);
3451                         if (last_timestamp + 1 != map->timestamp) {
3452                                 /*
3453                                  * Look again for the entry because the map was
3454                                  * modified while it was unlocked.  The entry
3455                                  * may have been clipped, but NOT merged or
3456                                  * deleted.
3457                                  */
3458                                 if (!vm_map_lookup_entry(map, saved_start,
3459                                     &next_entry))
3460                                         KASSERT(false,
3461                                             ("vm_map_wire: lookup failed"));
3462                                 first_entry = (entry == first_entry) ?
3463                                     next_entry : NULL;
3464                                 for (entry = next_entry; entry->end < saved_end;
3465                                     entry = vm_map_entry_succ(entry)) {
3466                                         /*
3467                                          * In case of failure, handle entries
3468                                          * that were not fully wired here;
3469                                          * fully wired entries are handled
3470                                          * later.
3471                                          */
3472                                         if (rv != KERN_SUCCESS &&
3473                                             faddr < entry->end)
3474                                                 vm_map_wire_entry_failure(map,
3475                                                     entry, faddr);
3476                                 }
3477                         }
3478                         if (rv != KERN_SUCCESS) {
3479                                 vm_map_wire_entry_failure(map, entry, faddr);
3480                                 if (user_wire)
3481                                         vm_map_wire_user_count_sub(npages);
3482                                 end = entry->end;
3483                                 goto done;
3484                         }
3485                 } else if (!user_wire ||
3486                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3487                         entry->wired_count++;
3488                 }
3489                 /*
3490                  * Check the map for holes in the specified region.
3491                  * If holes_ok was specified, skip this check.
3492                  */
3493                 next_entry = vm_map_entry_succ(entry);
3494                 if (!holes_ok &&
3495                     entry->end < end && next_entry->start > entry->end) {
3496                         end = entry->end;
3497                         rv = KERN_INVALID_ADDRESS;
3498                         goto done;
3499                 }
3500         }
3501         rv = KERN_SUCCESS;
3502 done:
3503         need_wakeup = false;
3504         if (first_entry == NULL &&
3505             !vm_map_lookup_entry(map, start, &first_entry)) {
3506                 KASSERT(holes_ok, ("vm_map_wire: lookup failed"));
3507                 prev_entry = first_entry;
3508                 entry = vm_map_entry_succ(first_entry);
3509         } else {
3510                 prev_entry = vm_map_entry_pred(first_entry);
3511                 entry = first_entry;
3512         }
3513         for (; entry->start < end;
3514             prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3515                 /*
3516                  * If holes_ok was specified, an empty
3517                  * space in the unwired region could have been mapped
3518                  * while the map lock was dropped for faulting in the
3519                  * pages or draining MAP_ENTRY_IN_TRANSITION.
3520                  * Moreover, another thread could be simultaneously
3521                  * wiring this new mapping entry.  Detect these cases
3522                  * and skip any entries marked as in transition not by us.
3523                  *
3524                  * Another way to get an entry not marked with
3525                  * MAP_ENTRY_IN_TRANSITION is after failed clipping,
3526                  * which set rv to KERN_INVALID_ARGUMENT.
3527                  */
3528                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3529                     entry->wiring_thread != curthread) {
3530                         KASSERT(holes_ok || rv == KERN_INVALID_ARGUMENT,
3531                             ("vm_map_wire: !HOLESOK and new/changed entry"));
3532                         continue;
3533                 }
3534
3535                 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) {
3536                         /* do nothing */
3537                 } else if (rv == KERN_SUCCESS) {
3538                         if (user_wire)
3539                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
3540                 } else if (entry->wired_count == -1) {
3541                         /*
3542                          * Wiring failed on this entry.  Thus, unwiring is
3543                          * unnecessary.
3544                          */
3545                         entry->wired_count = 0;
3546                 } else if (!user_wire ||
3547                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3548                         /*
3549                          * Undo the wiring.  Wiring succeeded on this entry
3550                          * but failed on a later entry.  
3551                          */
3552                         if (entry->wired_count == 1) {
3553                                 vm_map_entry_unwire(map, entry);
3554                                 if (user_wire)
3555                                         vm_map_wire_user_count_sub(
3556                                             atop(entry->end - entry->start));
3557                         } else
3558                                 entry->wired_count--;
3559                 }
3560                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3561                     ("vm_map_wire: in-transition flag missing %p", entry));
3562                 KASSERT(entry->wiring_thread == curthread,
3563                     ("vm_map_wire: alien wire %p", entry));
3564                 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
3565                     MAP_ENTRY_WIRE_SKIPPED);
3566                 entry->wiring_thread = NULL;
3567                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3568                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3569                         need_wakeup = true;
3570                 }
3571                 vm_map_try_merge_entries(map, prev_entry, entry);
3572         }
3573         vm_map_try_merge_entries(map, prev_entry, entry);
3574         if (need_wakeup)
3575                 vm_map_wakeup(map);
3576         return (rv);
3577 }
3578
3579 /*
3580  * vm_map_sync
3581  *
3582  * Push any dirty cached pages in the address range to their pager.
3583  * If syncio is TRUE, dirty pages are written synchronously.
3584  * If invalidate is TRUE, any cached pages are freed as well.
3585  *
3586  * If the size of the region from start to end is zero, we are
3587  * supposed to flush all modified pages within the region containing
3588  * start.  Unfortunately, a region can be split or coalesced with
3589  * neighboring regions, making it difficult to determine what the
3590  * original region was.  Therefore, we approximate this requirement by
3591  * flushing the current region containing start.
3592  *
3593  * Returns an error if any part of the specified range is not mapped.
3594  */
3595 int
3596 vm_map_sync(
3597         vm_map_t map,
3598         vm_offset_t start,
3599         vm_offset_t end,
3600         boolean_t syncio,
3601         boolean_t invalidate)
3602 {
3603         vm_map_entry_t entry, first_entry, next_entry;
3604         vm_size_t size;
3605         vm_object_t object;
3606         vm_ooffset_t offset;
3607         unsigned int last_timestamp;
3608         int bdry_idx;
3609         boolean_t failed;
3610
3611         vm_map_lock_read(map);
3612         VM_MAP_RANGE_CHECK(map, start, end);
3613         if (!vm_map_lookup_entry(map, start, &first_entry)) {
3614                 vm_map_unlock_read(map);
3615                 return (KERN_INVALID_ADDRESS);
3616         } else if (start == end) {
3617                 start = first_entry->start;
3618                 end = first_entry->end;
3619         }
3620
3621         /*
3622          * Make a first pass to check for user-wired memory, holes,
3623          * and partial invalidation of largepage mappings.
3624          */
3625         for (entry = first_entry; entry->start < end; entry = next_entry) {
3626                 if (invalidate) {
3627                         if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) {
3628                                 vm_map_unlock_read(map);
3629                                 return (KERN_INVALID_ARGUMENT);
3630                         }
3631                         bdry_idx = (entry->eflags &
3632                             MAP_ENTRY_SPLIT_BOUNDARY_MASK) >>
3633                             MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
3634                         if (bdry_idx != 0 &&
3635                             ((start & (pagesizes[bdry_idx] - 1)) != 0 ||
3636                             (end & (pagesizes[bdry_idx] - 1)) != 0)) {
3637                                 vm_map_unlock_read(map);
3638                                 return (KERN_INVALID_ARGUMENT);
3639                         }
3640                 }
3641                 next_entry = vm_map_entry_succ(entry);
3642                 if (end > entry->end &&
3643                     entry->end != next_entry->start) {
3644                         vm_map_unlock_read(map);
3645                         return (KERN_INVALID_ADDRESS);
3646                 }
3647         }
3648
3649         if (invalidate)
3650                 pmap_remove(map->pmap, start, end);
3651         failed = FALSE;
3652
3653         /*
3654          * Make a second pass, cleaning/uncaching pages from the indicated
3655          * objects as we go.
3656          */
3657         for (entry = first_entry; entry->start < end;) {
3658                 offset = entry->offset + (start - entry->start);
3659                 size = (end <= entry->end ? end : entry->end) - start;
3660                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
3661                         vm_map_t smap;
3662                         vm_map_entry_t tentry;
3663                         vm_size_t tsize;
3664
3665                         smap = entry->object.sub_map;
3666                         vm_map_lock_read(smap);
3667                         (void) vm_map_lookup_entry(smap, offset, &tentry);
3668                         tsize = tentry->end - offset;
3669                         if (tsize < size)
3670                                 size = tsize;
3671                         object = tentry->object.vm_object;
3672                         offset = tentry->offset + (offset - tentry->start);
3673                         vm_map_unlock_read(smap);
3674                 } else {
3675                         object = entry->object.vm_object;
3676                 }
3677                 vm_object_reference(object);
3678                 last_timestamp = map->timestamp;
3679                 vm_map_unlock_read(map);
3680                 if (!vm_object_sync(object, offset, size, syncio, invalidate))
3681                         failed = TRUE;
3682                 start += size;
3683                 vm_object_deallocate(object);
3684                 vm_map_lock_read(map);
3685                 if (last_timestamp == map->timestamp ||
3686                     !vm_map_lookup_entry(map, start, &entry))
3687                         entry = vm_map_entry_succ(entry);
3688         }
3689
3690         vm_map_unlock_read(map);
3691         return (failed ? KERN_FAILURE : KERN_SUCCESS);
3692 }
3693
3694 /*
3695  *      vm_map_entry_unwire:    [ internal use only ]
3696  *
3697  *      Make the region specified by this entry pageable.
3698  *
3699  *      The map in question should be locked.
3700  *      [This is the reason for this routine's existence.]
3701  */
3702 static void
3703 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3704 {
3705         vm_size_t size;
3706
3707         VM_MAP_ASSERT_LOCKED(map);
3708         KASSERT(entry->wired_count > 0,
3709             ("vm_map_entry_unwire: entry %p isn't wired", entry));
3710
3711         size = entry->end - entry->start;
3712         if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0)
3713                 vm_map_wire_user_count_sub(atop(size));
3714         pmap_unwire(map->pmap, entry->start, entry->end);
3715         vm_object_unwire(entry->object.vm_object, entry->offset, size,
3716             PQ_ACTIVE);
3717         entry->wired_count = 0;
3718 }
3719
3720 static void
3721 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
3722 {
3723
3724         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
3725                 vm_object_deallocate(entry->object.vm_object);
3726         uma_zfree(system_map ? kmapentzone : mapentzone, entry);
3727 }
3728
3729 /*
3730  *      vm_map_entry_delete:    [ internal use only ]
3731  *
3732  *      Deallocate the given entry from the target map.
3733  */
3734 static void
3735 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
3736 {
3737         vm_object_t object;
3738         vm_pindex_t offidxstart, offidxend, size1;
3739         vm_size_t size;
3740
3741         vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE);
3742         object = entry->object.vm_object;
3743
3744         if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
3745                 MPASS(entry->cred == NULL);
3746                 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
3747                 MPASS(object == NULL);
3748                 vm_map_entry_deallocate(entry, map->system_map);
3749                 return;
3750         }
3751
3752         size = entry->end - entry->start;
3753         map->size -= size;
3754
3755         if (entry->cred != NULL) {
3756                 swap_release_by_cred(size, entry->cred);
3757                 crfree(entry->cred);
3758         }
3759
3760         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) {
3761                 entry->object.vm_object = NULL;
3762         } else if ((object->flags & OBJ_ANON) != 0 ||
3763             object == kernel_object) {
3764                 KASSERT(entry->cred == NULL || object->cred == NULL ||
3765                     (entry->eflags & MAP_ENTRY_NEEDS_COPY),
3766                     ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
3767                 offidxstart = OFF_TO_IDX(entry->offset);
3768                 offidxend = offidxstart + atop(size);
3769                 VM_OBJECT_WLOCK(object);
3770                 if (object->ref_count != 1 &&
3771                     ((object->flags & OBJ_ONEMAPPING) != 0 ||
3772                     object == kernel_object)) {
3773                         vm_object_collapse(object);
3774
3775                         /*
3776                          * The option OBJPR_NOTMAPPED can be passed here
3777                          * because vm_map_delete() already performed
3778                          * pmap_remove() on the only mapping to this range
3779                          * of pages. 
3780                          */
3781                         vm_object_page_remove(object, offidxstart, offidxend,
3782                             OBJPR_NOTMAPPED);
3783                         if (offidxend >= object->size &&
3784                             offidxstart < object->size) {
3785                                 size1 = object->size;
3786                                 object->size = offidxstart;
3787                                 if (object->cred != NULL) {
3788                                         size1 -= object->size;
3789                                         KASSERT(object->charge >= ptoa(size1),
3790                                             ("object %p charge < 0", object));
3791                                         swap_release_by_cred(ptoa(size1),
3792                                             object->cred);
3793                                         object->charge -= ptoa(size1);
3794                                 }
3795                         }
3796                 }
3797                 VM_OBJECT_WUNLOCK(object);
3798         }
3799         if (map->system_map)
3800                 vm_map_entry_deallocate(entry, TRUE);
3801         else {
3802                 entry->defer_next = curthread->td_map_def_user;
3803                 curthread->td_map_def_user = entry;
3804         }
3805 }
3806
3807 /*
3808  *      vm_map_delete:  [ internal use only ]
3809  *
3810  *      Deallocates the given address range from the target
3811  *      map.
3812  */
3813 int
3814 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
3815 {
3816         vm_map_entry_t entry, next_entry, scratch_entry;
3817         int rv;
3818
3819         VM_MAP_ASSERT_LOCKED(map);
3820
3821         if (start == end)
3822                 return (KERN_SUCCESS);
3823
3824         /*
3825          * Find the start of the region, and clip it.
3826          * Step through all entries in this region.
3827          */
3828         rv = vm_map_lookup_clip_start(map, start, &entry, &scratch_entry);
3829         if (rv != KERN_SUCCESS)
3830                 return (rv);
3831         for (; entry->start < end; entry = next_entry) {
3832                 /*
3833                  * Wait for wiring or unwiring of an entry to complete.
3834                  * Also wait for any system wirings to disappear on
3835                  * user maps.
3836                  */
3837                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
3838                     (vm_map_pmap(map) != kernel_pmap &&
3839                     vm_map_entry_system_wired_count(entry) != 0)) {
3840                         unsigned int last_timestamp;
3841                         vm_offset_t saved_start;
3842
3843                         saved_start = entry->start;
3844                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3845                         last_timestamp = map->timestamp;
3846                         (void) vm_map_unlock_and_wait(map, 0);
3847                         vm_map_lock(map);
3848                         if (last_timestamp + 1 != map->timestamp) {
3849                                 /*
3850                                  * Look again for the entry because the map was
3851                                  * modified while it was unlocked.
3852                                  * Specifically, the entry may have been
3853                                  * clipped, merged, or deleted.
3854                                  */
3855                                 rv = vm_map_lookup_clip_start(map, saved_start,
3856                                     &next_entry, &scratch_entry);
3857                                 if (rv != KERN_SUCCESS)
3858                                         break;
3859                         } else
3860                                 next_entry = entry;
3861                         continue;
3862                 }
3863
3864                 /* XXXKIB or delete to the upper superpage boundary ? */
3865                 rv = vm_map_clip_end(map, entry, end);
3866                 if (rv != KERN_SUCCESS)
3867                         break;
3868                 next_entry = vm_map_entry_succ(entry);
3869
3870                 /*
3871                  * Unwire before removing addresses from the pmap; otherwise,
3872                  * unwiring will put the entries back in the pmap.
3873                  */
3874                 if (entry->wired_count != 0)
3875                         vm_map_entry_unwire(map, entry);
3876
3877                 /*
3878                  * Remove mappings for the pages, but only if the
3879                  * mappings could exist.  For instance, it does not
3880                  * make sense to call pmap_remove() for guard entries.
3881                  */
3882                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
3883                     entry->object.vm_object != NULL)
3884                         pmap_remove(map->pmap, entry->start, entry->end);
3885
3886                 if (entry->end == map->anon_loc)
3887                         map->anon_loc = entry->start;
3888
3889                 /*
3890                  * Delete the entry only after removing all pmap
3891                  * entries pointing to its pages.  (Otherwise, its
3892                  * page frames may be reallocated, and any modify bits
3893                  * will be set in the wrong object!)
3894                  */
3895                 vm_map_entry_delete(map, entry);
3896         }
3897         return (rv);
3898 }
3899
3900 /*
3901  *      vm_map_remove:
3902  *
3903  *      Remove the given address range from the target map.
3904  *      This is the exported form of vm_map_delete.
3905  */
3906 int
3907 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3908 {
3909         int result;
3910
3911         vm_map_lock(map);
3912         VM_MAP_RANGE_CHECK(map, start, end);
3913         result = vm_map_delete(map, start, end);
3914         vm_map_unlock(map);
3915         return (result);
3916 }
3917
3918 /*
3919  *      vm_map_check_protection:
3920  *
3921  *      Assert that the target map allows the specified privilege on the
3922  *      entire address region given.  The entire region must be allocated.
3923  *
3924  *      WARNING!  This code does not and should not check whether the
3925  *      contents of the region is accessible.  For example a smaller file
3926  *      might be mapped into a larger address space.
3927  *
3928  *      NOTE!  This code is also called by munmap().
3929  *
3930  *      The map must be locked.  A read lock is sufficient.
3931  */
3932 boolean_t
3933 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3934                         vm_prot_t protection)
3935 {
3936         vm_map_entry_t entry;
3937         vm_map_entry_t tmp_entry;
3938
3939         if (!vm_map_lookup_entry(map, start, &tmp_entry))
3940                 return (FALSE);
3941         entry = tmp_entry;
3942
3943         while (start < end) {
3944                 /*
3945                  * No holes allowed!
3946                  */
3947                 if (start < entry->start)
3948                         return (FALSE);
3949                 /*
3950                  * Check protection associated with entry.
3951                  */
3952                 if ((entry->protection & protection) != protection)
3953                         return (FALSE);
3954                 /* go to next entry */
3955                 start = entry->end;
3956                 entry = vm_map_entry_succ(entry);
3957         }
3958         return (TRUE);
3959 }
3960
3961 /*
3962  *
3963  *      vm_map_copy_swap_object:
3964  *
3965  *      Copies a swap-backed object from an existing map entry to a
3966  *      new one.  Carries forward the swap charge.  May change the
3967  *      src object on return.
3968  */
3969 static void
3970 vm_map_copy_swap_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry,
3971     vm_offset_t size, vm_ooffset_t *fork_charge)
3972 {
3973         vm_object_t src_object;
3974         struct ucred *cred;
3975         int charged;
3976
3977         src_object = src_entry->object.vm_object;
3978         charged = ENTRY_CHARGED(src_entry);
3979         if ((src_object->flags & OBJ_ANON) != 0) {
3980                 VM_OBJECT_WLOCK(src_object);
3981                 vm_object_collapse(src_object);
3982                 if ((src_object->flags & OBJ_ONEMAPPING) != 0) {
3983                         vm_object_split(src_entry);
3984                         src_object = src_entry->object.vm_object;
3985                 }
3986                 vm_object_reference_locked(src_object);
3987                 vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3988                 VM_OBJECT_WUNLOCK(src_object);
3989         } else
3990                 vm_object_reference(src_object);
3991         if (src_entry->cred != NULL &&
3992             !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3993                 KASSERT(src_object->cred == NULL,
3994                     ("OVERCOMMIT: vm_map_copy_anon_entry: cred %p",
3995                      src_object));
3996                 src_object->cred = src_entry->cred;
3997                 src_object->charge = size;
3998         }
3999         dst_entry->object.vm_object = src_object;
4000         if (charged) {
4001                 cred = curthread->td_ucred;
4002                 crhold(cred);
4003                 dst_entry->cred = cred;
4004                 *fork_charge += size;
4005                 if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
4006                         crhold(cred);
4007                         src_entry->cred = cred;
4008                         *fork_charge += size;
4009                 }
4010         }
4011 }
4012
4013 /*
4014  *      vm_map_copy_entry:
4015  *
4016  *      Copies the contents of the source entry to the destination
4017  *      entry.  The entries *must* be aligned properly.
4018  */
4019 static void
4020 vm_map_copy_entry(
4021         vm_map_t src_map,
4022         vm_map_t dst_map,
4023         vm_map_entry_t src_entry,
4024         vm_map_entry_t dst_entry,
4025         vm_ooffset_t *fork_charge)
4026 {
4027         vm_object_t src_object;
4028         vm_map_entry_t fake_entry;
4029         vm_offset_t size;
4030
4031         VM_MAP_ASSERT_LOCKED(dst_map);
4032
4033         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
4034                 return;
4035
4036         if (src_entry->wired_count == 0 ||
4037             (src_entry->protection & VM_PROT_WRITE) == 0) {
4038                 /*
4039                  * If the source entry is marked needs_copy, it is already
4040                  * write-protected.
4041                  */
4042                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
4043                     (src_entry->protection & VM_PROT_WRITE) != 0) {
4044                         pmap_protect(src_map->pmap,
4045                             src_entry->start,
4046                             src_entry->end,
4047                             src_entry->protection & ~VM_PROT_WRITE);
4048                 }
4049
4050                 /*
4051                  * Make a copy of the object.
4052                  */
4053                 size = src_entry->end - src_entry->start;
4054                 if ((src_object = src_entry->object.vm_object) != NULL) {
4055                         if (src_object->type == OBJT_DEFAULT ||
4056                             src_object->type == OBJT_SWAP) {
4057                                 vm_map_copy_swap_object(src_entry, dst_entry,
4058                                     size, fork_charge);
4059                                 /* May have split/collapsed, reload obj. */
4060                                 src_object = src_entry->object.vm_object;
4061                         } else {
4062                                 vm_object_reference(src_object);
4063                                 dst_entry->object.vm_object = src_object;
4064                         }
4065                         src_entry->eflags |= MAP_ENTRY_COW |
4066                             MAP_ENTRY_NEEDS_COPY;
4067                         dst_entry->eflags |= MAP_ENTRY_COW |
4068                             MAP_ENTRY_NEEDS_COPY;
4069                         dst_entry->offset = src_entry->offset;
4070                         if (src_entry->eflags & MAP_ENTRY_WRITECNT) {
4071                                 /*
4072                                  * MAP_ENTRY_WRITECNT cannot
4073                                  * indicate write reference from
4074                                  * src_entry, since the entry is
4075                                  * marked as needs copy.  Allocate a
4076                                  * fake entry that is used to
4077                                  * decrement object->un_pager writecount
4078                                  * at the appropriate time.  Attach
4079                                  * fake_entry to the deferred list.
4080                                  */
4081                                 fake_entry = vm_map_entry_create(dst_map);
4082                                 fake_entry->eflags = MAP_ENTRY_WRITECNT;
4083                                 src_entry->eflags &= ~MAP_ENTRY_WRITECNT;
4084                                 vm_object_reference(src_object);
4085                                 fake_entry->object.vm_object = src_object;
4086                                 fake_entry->start = src_entry->start;
4087                                 fake_entry->end = src_entry->end;
4088                                 fake_entry->defer_next =
4089                                     curthread->td_map_def_user;
4090                                 curthread->td_map_def_user = fake_entry;
4091                         }
4092
4093                         pmap_copy(dst_map->pmap, src_map->pmap,
4094                             dst_entry->start, dst_entry->end - dst_entry->start,
4095                             src_entry->start);
4096                 } else {
4097                         dst_entry->object.vm_object = NULL;
4098                         dst_entry->offset = 0;
4099                         if (src_entry->cred != NULL) {
4100                                 dst_entry->cred = curthread->td_ucred;
4101                                 crhold(dst_entry->cred);
4102                                 *fork_charge += size;
4103                         }
4104                 }
4105         } else {
4106                 /*
4107                  * We don't want to make writeable wired pages copy-on-write.
4108                  * Immediately copy these pages into the new map by simulating
4109                  * page faults.  The new pages are pageable.
4110                  */
4111                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
4112                     fork_charge);
4113         }
4114 }
4115
4116 /*
4117  * vmspace_map_entry_forked:
4118  * Update the newly-forked vmspace each time a map entry is inherited
4119  * or copied.  The values for vm_dsize and vm_tsize are approximate
4120  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
4121  */
4122 static void
4123 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
4124     vm_map_entry_t entry)
4125 {
4126         vm_size_t entrysize;
4127         vm_offset_t newend;
4128
4129         if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
4130                 return;
4131         entrysize = entry->end - entry->start;
4132         vm2->vm_map.size += entrysize;
4133         if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
4134                 vm2->vm_ssize += btoc(entrysize);
4135         } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
4136             entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
4137                 newend = MIN(entry->end,
4138                     (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
4139                 vm2->vm_dsize += btoc(newend - entry->start);
4140         } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
4141             entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
4142                 newend = MIN(entry->end,
4143                     (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
4144                 vm2->vm_tsize += btoc(newend - entry->start);
4145         }
4146 }
4147
4148 /*
4149  * vmspace_fork:
4150  * Create a new process vmspace structure and vm_map
4151  * based on those of an existing process.  The new map
4152  * is based on the old map, according to the inheritance
4153  * values on the regions in that map.
4154  *
4155  * XXX It might be worth coalescing the entries added to the new vmspace.
4156  *
4157  * The source map must not be locked.
4158  */
4159 struct vmspace *
4160 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
4161 {
4162         struct vmspace *vm2;
4163         vm_map_t new_map, old_map;
4164         vm_map_entry_t new_entry, old_entry;
4165         vm_object_t object;
4166         int error, locked;
4167         vm_inherit_t inh;
4168
4169         old_map = &vm1->vm_map;
4170         /* Copy immutable fields of vm1 to vm2. */
4171         vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
4172             pmap_pinit);
4173         if (vm2 == NULL)
4174                 return (NULL);
4175
4176         vm2->vm_taddr = vm1->vm_taddr;
4177         vm2->vm_daddr = vm1->vm_daddr;
4178         vm2->vm_maxsaddr = vm1->vm_maxsaddr;
4179         vm_map_lock(old_map);
4180         if (old_map->busy)
4181                 vm_map_wait_busy(old_map);
4182         new_map = &vm2->vm_map;
4183         locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
4184         KASSERT(locked, ("vmspace_fork: lock failed"));
4185
4186         error = pmap_vmspace_copy(new_map->pmap, old_map->pmap);
4187         if (error != 0) {
4188                 sx_xunlock(&old_map->lock);
4189                 sx_xunlock(&new_map->lock);
4190                 vm_map_process_deferred();
4191                 vmspace_free(vm2);
4192                 return (NULL);
4193         }
4194
4195         new_map->anon_loc = old_map->anon_loc;
4196         new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART);
4197
4198         VM_MAP_ENTRY_FOREACH(old_entry, old_map) {
4199                 if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
4200                         panic("vm_map_fork: encountered a submap");
4201
4202                 inh = old_entry->inheritance;
4203                 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4204                     inh != VM_INHERIT_NONE)
4205                         inh = VM_INHERIT_COPY;
4206
4207                 switch (inh) {
4208                 case VM_INHERIT_NONE:
4209                         break;
4210
4211                 case VM_INHERIT_SHARE:
4212                         /*
4213                          * Clone the entry, creating the shared object if
4214                          * necessary.
4215                          */
4216                         object = old_entry->object.vm_object;
4217                         if (object == NULL) {
4218                                 vm_map_entry_back(old_entry);
4219                                 object = old_entry->object.vm_object;
4220                         }
4221
4222                         /*
4223                          * Add the reference before calling vm_object_shadow
4224                          * to insure that a shadow object is created.
4225                          */
4226                         vm_object_reference(object);
4227                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4228                                 vm_object_shadow(&old_entry->object.vm_object,
4229                                     &old_entry->offset,
4230                                     old_entry->end - old_entry->start,
4231                                     old_entry->cred,
4232                                     /* Transfer the second reference too. */
4233                                     true);
4234                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4235                                 old_entry->cred = NULL;
4236
4237                                 /*
4238                                  * As in vm_map_merged_neighbor_dispose(),
4239                                  * the vnode lock will not be acquired in
4240                                  * this call to vm_object_deallocate().
4241                                  */
4242                                 vm_object_deallocate(object);
4243                                 object = old_entry->object.vm_object;
4244                         } else {
4245                                 VM_OBJECT_WLOCK(object);
4246                                 vm_object_clear_flag(object, OBJ_ONEMAPPING);
4247                                 if (old_entry->cred != NULL) {
4248                                         KASSERT(object->cred == NULL,
4249                                             ("vmspace_fork both cred"));
4250                                         object->cred = old_entry->cred;
4251                                         object->charge = old_entry->end -
4252                                             old_entry->start;
4253                                         old_entry->cred = NULL;
4254                                 }
4255
4256                                 /*
4257                                  * Assert the correct state of the vnode
4258                                  * v_writecount while the object is locked, to
4259                                  * not relock it later for the assertion
4260                                  * correctness.
4261                                  */
4262                                 if (old_entry->eflags & MAP_ENTRY_WRITECNT &&
4263                                     object->type == OBJT_VNODE) {
4264                                         KASSERT(((struct vnode *)object->
4265                                             handle)->v_writecount > 0,
4266                                             ("vmspace_fork: v_writecount %p",
4267                                             object));
4268                                         KASSERT(object->un_pager.vnp.
4269                                             writemappings > 0,
4270                                             ("vmspace_fork: vnp.writecount %p",
4271                                             object));
4272                                 }
4273                                 VM_OBJECT_WUNLOCK(object);
4274                         }
4275
4276                         /*
4277                          * Clone the entry, referencing the shared object.
4278                          */
4279                         new_entry = vm_map_entry_create(new_map);
4280                         *new_entry = *old_entry;
4281                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4282                             MAP_ENTRY_IN_TRANSITION);
4283                         new_entry->wiring_thread = NULL;
4284                         new_entry->wired_count = 0;
4285                         if (new_entry->eflags & MAP_ENTRY_WRITECNT) {
4286                                 vm_pager_update_writecount(object,
4287                                     new_entry->start, new_entry->end);
4288                         }
4289                         vm_map_entry_set_vnode_text(new_entry, true);
4290
4291                         /*
4292                          * Insert the entry into the new map -- we know we're
4293                          * inserting at the end of the new map.
4294                          */
4295                         vm_map_entry_link(new_map, new_entry);
4296                         vmspace_map_entry_forked(vm1, vm2, new_entry);
4297
4298                         /*
4299                          * Update the physical map
4300                          */
4301                         pmap_copy(new_map->pmap, old_map->pmap,
4302                             new_entry->start,
4303                             (old_entry->end - old_entry->start),
4304                             old_entry->start);
4305                         break;
4306
4307                 case VM_INHERIT_COPY:
4308                         /*
4309                          * Clone the entry and link into the map.
4310                          */
4311                         new_entry = vm_map_entry_create(new_map);
4312                         *new_entry = *old_entry;
4313                         /*
4314                          * Copied entry is COW over the old object.
4315                          */
4316                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4317                             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WRITECNT);
4318                         new_entry->wiring_thread = NULL;
4319                         new_entry->wired_count = 0;
4320                         new_entry->object.vm_object = NULL;
4321                         new_entry->cred = NULL;
4322                         vm_map_entry_link(new_map, new_entry);
4323                         vmspace_map_entry_forked(vm1, vm2, new_entry);
4324                         vm_map_copy_entry(old_map, new_map, old_entry,
4325                             new_entry, fork_charge);
4326                         vm_map_entry_set_vnode_text(new_entry, true);
4327                         break;
4328
4329                 case VM_INHERIT_ZERO:
4330                         /*
4331                          * Create a new anonymous mapping entry modelled from
4332                          * the old one.
4333                          */
4334                         new_entry = vm_map_entry_create(new_map);
4335                         memset(new_entry, 0, sizeof(*new_entry));
4336
4337                         new_entry->start = old_entry->start;
4338                         new_entry->end = old_entry->end;
4339                         new_entry->eflags = old_entry->eflags &
4340                             ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION |
4341                             MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC |
4342                             MAP_ENTRY_SPLIT_BOUNDARY_MASK);
4343                         new_entry->protection = old_entry->protection;
4344                         new_entry->max_protection = old_entry->max_protection;
4345                         new_entry->inheritance = VM_INHERIT_ZERO;
4346
4347                         vm_map_entry_link(new_map, new_entry);
4348                         vmspace_map_entry_forked(vm1, vm2, new_entry);
4349
4350                         new_entry->cred = curthread->td_ucred;
4351                         crhold(new_entry->cred);
4352                         *fork_charge += (new_entry->end - new_entry->start);
4353
4354                         break;
4355                 }
4356         }
4357         /*
4358          * Use inlined vm_map_unlock() to postpone handling the deferred
4359          * map entries, which cannot be done until both old_map and
4360          * new_map locks are released.
4361          */
4362         sx_xunlock(&old_map->lock);
4363         sx_xunlock(&new_map->lock);
4364         vm_map_process_deferred();
4365
4366         return (vm2);
4367 }
4368
4369 /*
4370  * Create a process's stack for exec_new_vmspace().  This function is never
4371  * asked to wire the newly created stack.
4372  */
4373 int
4374 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4375     vm_prot_t prot, vm_prot_t max, int cow)
4376 {
4377         vm_size_t growsize, init_ssize;
4378         rlim_t vmemlim;
4379         int rv;
4380
4381         MPASS((map->flags & MAP_WIREFUTURE) == 0);
4382         growsize = sgrowsiz;
4383         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
4384         vm_map_lock(map);
4385         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4386         /* If we would blow our VMEM resource limit, no go */
4387         if (map->size + init_ssize > vmemlim) {
4388                 rv = KERN_NO_SPACE;
4389                 goto out;
4390         }
4391         rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
4392             max, cow);
4393 out:
4394         vm_map_unlock(map);
4395         return (rv);
4396 }
4397
4398 static int stack_guard_page = 1;
4399 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
4400     &stack_guard_page, 0,
4401     "Specifies the number of guard pages for a stack that grows");
4402
4403 static int
4404 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4405     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
4406 {
4407         vm_map_entry_t new_entry, prev_entry;
4408         vm_offset_t bot, gap_bot, gap_top, top;
4409         vm_size_t init_ssize, sgp;
4410         int orient, rv;
4411
4412         /*
4413          * The stack orientation is piggybacked with the cow argument.
4414          * Extract it into orient and mask the cow argument so that we
4415          * don't pass it around further.
4416          */
4417         orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP);
4418         KASSERT(orient != 0, ("No stack grow direction"));
4419         KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
4420             ("bi-dir stack"));
4421
4422         if (max_ssize == 0 ||
4423             !vm_map_range_valid(map, addrbos, addrbos + max_ssize))
4424                 return (KERN_INVALID_ADDRESS);
4425         sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4426             (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4427             (vm_size_t)stack_guard_page * PAGE_SIZE;
4428         if (sgp >= max_ssize)
4429                 return (KERN_INVALID_ARGUMENT);
4430
4431         init_ssize = growsize;
4432         if (max_ssize < init_ssize + sgp)
4433                 init_ssize = max_ssize - sgp;
4434
4435         /* If addr is already mapped, no go */
4436         if (vm_map_lookup_entry(map, addrbos, &prev_entry))
4437                 return (KERN_NO_SPACE);
4438
4439         /*
4440          * If we can't accommodate max_ssize in the current mapping, no go.
4441          */
4442         if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize)
4443                 return (KERN_NO_SPACE);
4444
4445         /*
4446          * We initially map a stack of only init_ssize.  We will grow as
4447          * needed later.  Depending on the orientation of the stack (i.e.
4448          * the grow direction) we either map at the top of the range, the
4449          * bottom of the range or in the middle.
4450          *
4451          * Note: we would normally expect prot and max to be VM_PROT_ALL,
4452          * and cow to be 0.  Possibly we should eliminate these as input
4453          * parameters, and just pass these values here in the insert call.
4454          */
4455         if (orient == MAP_STACK_GROWS_DOWN) {
4456                 bot = addrbos + max_ssize - init_ssize;
4457                 top = bot + init_ssize;
4458                 gap_bot = addrbos;
4459                 gap_top = bot;
4460         } else /* if (orient == MAP_STACK_GROWS_UP) */ {
4461                 bot = addrbos;
4462                 top = bot + init_ssize;
4463                 gap_bot = top;
4464                 gap_top = addrbos + max_ssize;
4465         }
4466         rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
4467         if (rv != KERN_SUCCESS)
4468                 return (rv);
4469         new_entry = vm_map_entry_succ(prev_entry);
4470         KASSERT(new_entry->end == top || new_entry->start == bot,
4471             ("Bad entry start/end for new stack entry"));
4472         KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
4473             (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
4474             ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
4475         KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
4476             (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
4477             ("new entry lacks MAP_ENTRY_GROWS_UP"));
4478         if (gap_bot == gap_top)
4479                 return (KERN_SUCCESS);
4480         rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
4481             VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
4482             MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP));
4483         if (rv == KERN_SUCCESS) {
4484                 /*
4485                  * Gap can never successfully handle a fault, so
4486                  * read-ahead logic is never used for it.  Re-use
4487                  * next_read of the gap entry to store
4488                  * stack_guard_page for vm_map_growstack().
4489                  */
4490                 if (orient == MAP_STACK_GROWS_DOWN)
4491                         vm_map_entry_pred(new_entry)->next_read = sgp;
4492                 else
4493                         vm_map_entry_succ(new_entry)->next_read = sgp;
4494         } else {
4495                 (void)vm_map_delete(map, bot, top);
4496         }
4497         return (rv);
4498 }
4499
4500 /*
4501  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if we
4502  * successfully grow the stack.
4503  */
4504 static int
4505 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
4506 {
4507         vm_map_entry_t stack_entry;
4508         struct proc *p;
4509         struct vmspace *vm;
4510         struct ucred *cred;
4511         vm_offset_t gap_end, gap_start, grow_start;
4512         vm_size_t grow_amount, guard, max_grow;
4513         rlim_t lmemlim, stacklim, vmemlim;
4514         int rv, rv1;
4515         bool gap_deleted, grow_down, is_procstack;
4516 #ifdef notyet
4517         uint64_t limit;
4518 #endif
4519 #ifdef RACCT
4520         int error;
4521 #endif
4522
4523         p = curproc;
4524         vm = p->p_vmspace;
4525
4526         /*
4527          * Disallow stack growth when the access is performed by a
4528          * debugger or AIO daemon.  The reason is that the wrong
4529          * resource limits are applied.
4530          */
4531         if (p != initproc && (map != &p->p_vmspace->vm_map ||
4532             p->p_textvp == NULL))
4533                 return (KERN_FAILURE);
4534
4535         MPASS(!map->system_map);
4536
4537         lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
4538         stacklim = lim_cur(curthread, RLIMIT_STACK);
4539         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4540 retry:
4541         /* If addr is not in a hole for a stack grow area, no need to grow. */
4542         if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
4543                 return (KERN_FAILURE);
4544         if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
4545                 return (KERN_SUCCESS);
4546         if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
4547                 stack_entry = vm_map_entry_succ(gap_entry);
4548                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
4549                     stack_entry->start != gap_entry->end)
4550                         return (KERN_FAILURE);
4551                 grow_amount = round_page(stack_entry->start - addr);
4552                 grow_down = true;
4553         } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
4554                 stack_entry = vm_map_entry_pred(gap_entry);
4555                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
4556                     stack_entry->end != gap_entry->start)
4557                         return (KERN_FAILURE);
4558                 grow_amount = round_page(addr + 1 - stack_entry->end);
4559                 grow_down = false;
4560         } else {
4561                 return (KERN_FAILURE);
4562         }
4563         guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4564             (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4565             gap_entry->next_read;
4566         max_grow = gap_entry->end - gap_entry->start;
4567         if (guard > max_grow)
4568                 return (KERN_NO_SPACE);
4569         max_grow -= guard;
4570         if (grow_amount > max_grow)
4571                 return (KERN_NO_SPACE);
4572
4573         /*
4574          * If this is the main process stack, see if we're over the stack
4575          * limit.
4576          */
4577         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
4578             addr < (vm_offset_t)p->p_sysent->sv_usrstack;
4579         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
4580                 return (KERN_NO_SPACE);
4581
4582 #ifdef RACCT
4583         if (racct_enable) {
4584                 PROC_LOCK(p);
4585                 if (is_procstack && racct_set(p, RACCT_STACK,
4586                     ctob(vm->vm_ssize) + grow_amount)) {
4587                         PROC_UNLOCK(p);
4588                         return (KERN_NO_SPACE);
4589                 }
4590                 PROC_UNLOCK(p);
4591         }
4592 #endif
4593
4594         grow_amount = roundup(grow_amount, sgrowsiz);
4595         if (grow_amount > max_grow)
4596                 grow_amount = max_grow;
4597         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
4598                 grow_amount = trunc_page((vm_size_t)stacklim) -
4599                     ctob(vm->vm_ssize);
4600         }
4601
4602 #ifdef notyet
4603         PROC_LOCK(p);
4604         limit = racct_get_available(p, RACCT_STACK);
4605         PROC_UNLOCK(p);
4606         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
4607                 grow_amount = limit - ctob(vm->vm_ssize);
4608 #endif
4609
4610         if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
4611                 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
4612                         rv = KERN_NO_SPACE;
4613                         goto out;
4614                 }
4615 #ifdef RACCT
4616                 if (racct_enable) {
4617                         PROC_LOCK(p);
4618                         if (racct_set(p, RACCT_MEMLOCK,
4619                             ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
4620                                 PROC_UNLOCK(p);
4621                                 rv = KERN_NO_SPACE;
4622                                 goto out;
4623                         }
4624                         PROC_UNLOCK(p);
4625                 }
4626 #endif
4627         }
4628
4629         /* If we would blow our VMEM resource limit, no go */
4630         if (map->size + grow_amount > vmemlim) {
4631                 rv = KERN_NO_SPACE;
4632                 goto out;
4633         }
4634 #ifdef RACCT
4635         if (racct_enable) {
4636                 PROC_LOCK(p);
4637                 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
4638                         PROC_UNLOCK(p);
4639                         rv = KERN_NO_SPACE;
4640                         goto out;
4641                 }
4642                 PROC_UNLOCK(p);
4643         }
4644 #endif
4645
4646         if (vm_map_lock_upgrade(map)) {
4647                 gap_entry = NULL;
4648                 vm_map_lock_read(map);
4649                 goto retry;
4650         }
4651
4652         if (grow_down) {
4653                 grow_start = gap_entry->end - grow_amount;
4654                 if (gap_entry->start + grow_amount == gap_entry->end) {
4655                         gap_start = gap_entry->start;
4656                         gap_end = gap_entry->end;
4657                         vm_map_entry_delete(map, gap_entry);
4658                         gap_deleted = true;
4659                 } else {
4660                         MPASS(gap_entry->start < gap_entry->end - grow_amount);
4661                         vm_map_entry_resize(map, gap_entry, -grow_amount);
4662                         gap_deleted = false;
4663                 }
4664                 rv = vm_map_insert(map, NULL, 0, grow_start,
4665                     grow_start + grow_amount,
4666                     stack_entry->protection, stack_entry->max_protection,
4667                     MAP_STACK_GROWS_DOWN);
4668                 if (rv != KERN_SUCCESS) {
4669                         if (gap_deleted) {
4670                                 rv1 = vm_map_insert(map, NULL, 0, gap_start,
4671                                     gap_end, VM_PROT_NONE, VM_PROT_NONE,
4672                                     MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
4673                                 MPASS(rv1 == KERN_SUCCESS);
4674                         } else
4675                                 vm_map_entry_resize(map, gap_entry,
4676                                     grow_amount);
4677                 }
4678         } else {
4679                 grow_start = stack_entry->end;
4680                 cred = stack_entry->cred;
4681                 if (cred == NULL && stack_entry->object.vm_object != NULL)
4682                         cred = stack_entry->object.vm_object->cred;
4683                 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
4684                         rv = KERN_NO_SPACE;
4685                 /* Grow the underlying object if applicable. */
4686                 else if (stack_entry->object.vm_object == NULL ||
4687                     vm_object_coalesce(stack_entry->object.vm_object,
4688                     stack_entry->offset,
4689                     (vm_size_t)(stack_entry->end - stack_entry->start),
4690                     grow_amount, cred != NULL)) {
4691                         if (gap_entry->start + grow_amount == gap_entry->end) {
4692                                 vm_map_entry_delete(map, gap_entry);
4693                                 vm_map_entry_resize(map, stack_entry,
4694                                     grow_amount);
4695                         } else {
4696                                 gap_entry->start += grow_amount;
4697                                 stack_entry->end += grow_amount;
4698                         }
4699                         map->size += grow_amount;
4700                         rv = KERN_SUCCESS;
4701                 } else
4702                         rv = KERN_FAILURE;
4703         }
4704         if (rv == KERN_SUCCESS && is_procstack)
4705                 vm->vm_ssize += btoc(grow_amount);
4706
4707         /*
4708          * Heed the MAP_WIREFUTURE flag if it was set for this process.
4709          */
4710         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
4711                 rv = vm_map_wire_locked(map, grow_start,
4712                     grow_start + grow_amount,
4713                     VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
4714         }
4715         vm_map_lock_downgrade(map);
4716
4717 out:
4718 #ifdef RACCT
4719         if (racct_enable && rv != KERN_SUCCESS) {
4720                 PROC_LOCK(p);
4721                 error = racct_set(p, RACCT_VMEM, map->size);
4722                 KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
4723                 if (!old_mlock) {
4724                         error = racct_set(p, RACCT_MEMLOCK,
4725                             ptoa(pmap_wired_count(map->pmap)));
4726                         KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
4727                 }
4728                 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
4729                 KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
4730                 PROC_UNLOCK(p);
4731         }
4732 #endif
4733
4734         return (rv);
4735 }
4736
4737 /*
4738  * Unshare the specified VM space for exec.  If other processes are
4739  * mapped to it, then create a new one.  The new vmspace is null.
4740  */
4741 int
4742 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
4743 {
4744         struct vmspace *oldvmspace = p->p_vmspace;
4745         struct vmspace *newvmspace;
4746
4747         KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
4748             ("vmspace_exec recursed"));
4749         newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit);
4750         if (newvmspace == NULL)
4751                 return (ENOMEM);
4752         newvmspace->vm_swrss = oldvmspace->vm_swrss;
4753         /*
4754          * This code is written like this for prototype purposes.  The
4755          * goal is to avoid running down the vmspace here, but let the
4756          * other process's that are still using the vmspace to finally
4757          * run it down.  Even though there is little or no chance of blocking
4758          * here, it is a good idea to keep this form for future mods.
4759          */
4760         PROC_VMSPACE_LOCK(p);
4761         p->p_vmspace = newvmspace;
4762         PROC_VMSPACE_UNLOCK(p);
4763         if (p == curthread->td_proc)
4764                 pmap_activate(curthread);
4765         curthread->td_pflags |= TDP_EXECVMSPC;
4766         return (0);
4767 }
4768
4769 /*
4770  * Unshare the specified VM space for forcing COW.  This
4771  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
4772  */
4773 int
4774 vmspace_unshare(struct proc *p)
4775 {
4776         struct vmspace *oldvmspace = p->p_vmspace;
4777         struct vmspace *newvmspace;
4778         vm_ooffset_t fork_charge;
4779
4780         if (oldvmspace->vm_refcnt == 1)
4781                 return (0);
4782         fork_charge = 0;
4783         newvmspace = vmspace_fork(oldvmspace, &fork_charge);
4784         if (newvmspace == NULL)
4785                 return (ENOMEM);
4786         if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
4787                 vmspace_free(newvmspace);
4788                 return (ENOMEM);
4789         }
4790         PROC_VMSPACE_LOCK(p);
4791         p->p_vmspace = newvmspace;
4792         PROC_VMSPACE_UNLOCK(p);
4793         if (p == curthread->td_proc)
4794                 pmap_activate(curthread);
4795         vmspace_free(oldvmspace);
4796         return (0);
4797 }
4798
4799 /*
4800  *      vm_map_lookup:
4801  *
4802  *      Finds the VM object, offset, and
4803  *      protection for a given virtual address in the
4804  *      specified map, assuming a page fault of the
4805  *      type specified.
4806  *
4807  *      Leaves the map in question locked for read; return
4808  *      values are guaranteed until a vm_map_lookup_done
4809  *      call is performed.  Note that the map argument
4810  *      is in/out; the returned map must be used in
4811  *      the call to vm_map_lookup_done.
4812  *
4813  *      A handle (out_entry) is returned for use in
4814  *      vm_map_lookup_done, to make that fast.
4815  *
4816  *      If a lookup is requested with "write protection"
4817  *      specified, the map may be changed to perform virtual
4818  *      copying operations, although the data referenced will
4819  *      remain the same.
4820  */
4821 int
4822 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
4823               vm_offset_t vaddr,
4824               vm_prot_t fault_typea,
4825               vm_map_entry_t *out_entry,        /* OUT */
4826               vm_object_t *object,              /* OUT */
4827               vm_pindex_t *pindex,              /* OUT */
4828               vm_prot_t *out_prot,              /* OUT */
4829               boolean_t *wired)                 /* OUT */
4830 {
4831         vm_map_entry_t entry;
4832         vm_map_t map = *var_map;
4833         vm_prot_t prot;
4834         vm_prot_t fault_type;
4835         vm_object_t eobject;
4836         vm_size_t size;
4837         struct ucred *cred;
4838
4839 RetryLookup:
4840
4841         vm_map_lock_read(map);
4842
4843 RetryLookupLocked:
4844         /*
4845          * Lookup the faulting address.
4846          */
4847         if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
4848                 vm_map_unlock_read(map);
4849                 return (KERN_INVALID_ADDRESS);
4850         }
4851
4852         entry = *out_entry;
4853
4854         /*
4855          * Handle submaps.
4856          */
4857         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4858                 vm_map_t old_map = map;
4859
4860                 *var_map = map = entry->object.sub_map;
4861                 vm_map_unlock_read(old_map);
4862                 goto RetryLookup;
4863         }
4864
4865         /*
4866          * Check whether this task is allowed to have this page.
4867          */
4868         prot = entry->protection;
4869         if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) {
4870                 fault_typea &= ~VM_PROT_FAULT_LOOKUP;
4871                 if (prot == VM_PROT_NONE && map != kernel_map &&
4872                     (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4873                     (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
4874                     MAP_ENTRY_STACK_GAP_UP)) != 0 &&
4875                     vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
4876                         goto RetryLookupLocked;
4877         }
4878         fault_type = fault_typea & VM_PROT_ALL;
4879         if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
4880                 vm_map_unlock_read(map);
4881                 return (KERN_PROTECTION_FAILURE);
4882         }
4883         KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
4884             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
4885             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
4886             ("entry %p flags %x", entry, entry->eflags));
4887         if ((fault_typea & VM_PROT_COPY) != 0 &&
4888             (entry->max_protection & VM_PROT_WRITE) == 0 &&
4889             (entry->eflags & MAP_ENTRY_COW) == 0) {
4890                 vm_map_unlock_read(map);
4891                 return (KERN_PROTECTION_FAILURE);
4892         }
4893
4894         /*
4895          * If this page is not pageable, we have to get it for all possible
4896          * accesses.
4897          */
4898         *wired = (entry->wired_count != 0);
4899         if (*wired)
4900                 fault_type = entry->protection;
4901         size = entry->end - entry->start;
4902
4903         /*
4904          * If the entry was copy-on-write, we either ...
4905          */
4906         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4907                 /*
4908                  * If we want to write the page, we may as well handle that
4909                  * now since we've got the map locked.
4910                  *
4911                  * If we don't need to write the page, we just demote the
4912                  * permissions allowed.
4913                  */
4914                 if ((fault_type & VM_PROT_WRITE) != 0 ||
4915                     (fault_typea & VM_PROT_COPY) != 0) {
4916                         /*
4917                          * Make a new object, and place it in the object
4918                          * chain.  Note that no new references have appeared
4919                          * -- one just moved from the map to the new
4920                          * object.
4921                          */
4922                         if (vm_map_lock_upgrade(map))
4923                                 goto RetryLookup;
4924
4925                         if (entry->cred == NULL) {
4926                                 /*
4927                                  * The debugger owner is charged for
4928                                  * the memory.
4929                                  */
4930                                 cred = curthread->td_ucred;
4931                                 crhold(cred);
4932                                 if (!swap_reserve_by_cred(size, cred)) {
4933                                         crfree(cred);
4934                                         vm_map_unlock(map);
4935                                         return (KERN_RESOURCE_SHORTAGE);
4936                                 }
4937                                 entry->cred = cred;
4938                         }
4939                         eobject = entry->object.vm_object;
4940                         vm_object_shadow(&entry->object.vm_object,
4941                             &entry->offset, size, entry->cred, false);
4942                         if (eobject == entry->object.vm_object) {
4943                                 /*
4944                                  * The object was not shadowed.
4945                                  */
4946                                 swap_release_by_cred(size, entry->cred);
4947                                 crfree(entry->cred);
4948                         }
4949                         entry->cred = NULL;
4950                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4951
4952                         vm_map_lock_downgrade(map);
4953                 } else {
4954                         /*
4955                          * We're attempting to read a copy-on-write page --
4956                          * don't allow writes.
4957                          */
4958                         prot &= ~VM_PROT_WRITE;
4959                 }
4960         }
4961
4962         /*
4963          * Create an object if necessary.
4964          */
4965         if (entry->object.vm_object == NULL && !map->system_map) {
4966                 if (vm_map_lock_upgrade(map))
4967                         goto RetryLookup;
4968                 entry->object.vm_object = vm_object_allocate_anon(atop(size),
4969                     NULL, entry->cred, entry->cred != NULL ? size : 0);
4970                 entry->offset = 0;
4971                 entry->cred = NULL;
4972                 vm_map_lock_downgrade(map);
4973         }
4974
4975         /*
4976          * Return the object/offset from this entry.  If the entry was
4977          * copy-on-write or empty, it has been fixed up.
4978          */
4979         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4980         *object = entry->object.vm_object;
4981
4982         *out_prot = prot;
4983         return (KERN_SUCCESS);
4984 }
4985
4986 /*
4987  *      vm_map_lookup_locked:
4988  *
4989  *      Lookup the faulting address.  A version of vm_map_lookup that returns 
4990  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4991  */
4992 int
4993 vm_map_lookup_locked(vm_map_t *var_map,         /* IN/OUT */
4994                      vm_offset_t vaddr,
4995                      vm_prot_t fault_typea,
4996                      vm_map_entry_t *out_entry, /* OUT */
4997                      vm_object_t *object,       /* OUT */
4998                      vm_pindex_t *pindex,       /* OUT */
4999                      vm_prot_t *out_prot,       /* OUT */
5000                      boolean_t *wired)          /* OUT */
5001 {
5002         vm_map_entry_t entry;
5003         vm_map_t map = *var_map;
5004         vm_prot_t prot;
5005         vm_prot_t fault_type = fault_typea;
5006
5007         /*
5008          * Lookup the faulting address.
5009          */
5010         if (!vm_map_lookup_entry(map, vaddr, out_entry))
5011                 return (KERN_INVALID_ADDRESS);
5012
5013         entry = *out_entry;
5014
5015         /*
5016          * Fail if the entry refers to a submap.
5017          */
5018         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
5019                 return (KERN_FAILURE);
5020
5021         /*
5022          * Check whether this task is allowed to have this page.
5023          */
5024         prot = entry->protection;
5025         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
5026         if ((fault_type & prot) != fault_type)
5027                 return (KERN_PROTECTION_FAILURE);
5028
5029         /*
5030          * If this page is not pageable, we have to get it for all possible
5031          * accesses.
5032          */
5033         *wired = (entry->wired_count != 0);
5034         if (*wired)
5035                 fault_type = entry->protection;
5036
5037         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
5038                 /*
5039                  * Fail if the entry was copy-on-write for a write fault.
5040                  */
5041                 if (fault_type & VM_PROT_WRITE)
5042                         return (KERN_FAILURE);
5043                 /*
5044                  * We're attempting to read a copy-on-write page --
5045                  * don't allow writes.
5046                  */
5047                 prot &= ~VM_PROT_WRITE;
5048         }
5049
5050         /*
5051          * Fail if an object should be created.
5052          */
5053         if (entry->object.vm_object == NULL && !map->system_map)
5054                 return (KERN_FAILURE);
5055
5056         /*
5057          * Return the object/offset from this entry.  If the entry was
5058          * copy-on-write or empty, it has been fixed up.
5059          */
5060         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
5061         *object = entry->object.vm_object;
5062
5063         *out_prot = prot;
5064         return (KERN_SUCCESS);
5065 }
5066
5067 /*
5068  *      vm_map_lookup_done:
5069  *
5070  *      Releases locks acquired by a vm_map_lookup
5071  *      (according to the handle returned by that lookup).
5072  */
5073 void
5074 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
5075 {
5076         /*
5077          * Unlock the main-level map
5078          */
5079         vm_map_unlock_read(map);
5080 }
5081
5082 vm_offset_t
5083 vm_map_max_KBI(const struct vm_map *map)
5084 {
5085
5086         return (vm_map_max(map));
5087 }
5088
5089 vm_offset_t
5090 vm_map_min_KBI(const struct vm_map *map)
5091 {
5092
5093         return (vm_map_min(map));
5094 }
5095
5096 pmap_t
5097 vm_map_pmap_KBI(vm_map_t map)
5098 {
5099
5100         return (map->pmap);
5101 }
5102
5103 bool
5104 vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end)
5105 {
5106
5107         return (vm_map_range_valid(map, start, end));
5108 }
5109
5110 #ifdef INVARIANTS
5111 static void
5112 _vm_map_assert_consistent(vm_map_t map, int check)
5113 {
5114         vm_map_entry_t entry, prev;
5115         vm_map_entry_t cur, header, lbound, ubound;
5116         vm_size_t max_left, max_right;
5117
5118 #ifdef DIAGNOSTIC
5119         ++map->nupdates;
5120 #endif
5121         if (enable_vmmap_check != check)
5122                 return;
5123
5124         header = prev = &map->header;
5125         VM_MAP_ENTRY_FOREACH(entry, map) {
5126                 KASSERT(prev->end <= entry->start,
5127                     ("map %p prev->end = %jx, start = %jx", map,
5128                     (uintmax_t)prev->end, (uintmax_t)entry->start));
5129                 KASSERT(entry->start < entry->end,
5130                     ("map %p start = %jx, end = %jx", map,
5131                     (uintmax_t)entry->start, (uintmax_t)entry->end));
5132                 KASSERT(entry->left == header ||
5133                     entry->left->start < entry->start,
5134                     ("map %p left->start = %jx, start = %jx", map,
5135                     (uintmax_t)entry->left->start, (uintmax_t)entry->start));
5136                 KASSERT(entry->right == header ||
5137                     entry->start < entry->right->start,
5138                     ("map %p start = %jx, right->start = %jx", map,
5139                     (uintmax_t)entry->start, (uintmax_t)entry->right->start));
5140                 cur = map->root;
5141                 lbound = ubound = header;
5142                 for (;;) {
5143                         if (entry->start < cur->start) {
5144                                 ubound = cur;
5145                                 cur = cur->left;
5146                                 KASSERT(cur != lbound,
5147                                     ("map %p cannot find %jx",
5148                                     map, (uintmax_t)entry->start));
5149                         } else if (cur->end <= entry->start) {
5150                                 lbound = cur;
5151                                 cur = cur->right;
5152                                 KASSERT(cur != ubound,
5153                                     ("map %p cannot find %jx",
5154                                     map, (uintmax_t)entry->start));
5155                         } else {
5156                                 KASSERT(cur == entry,
5157                                     ("map %p cannot find %jx",
5158                                     map, (uintmax_t)entry->start));
5159                                 break;
5160                         }
5161                 }
5162                 max_left = vm_map_entry_max_free_left(entry, lbound);
5163                 max_right = vm_map_entry_max_free_right(entry, ubound);
5164                 KASSERT(entry->max_free == vm_size_max(max_left, max_right),
5165                     ("map %p max = %jx, max_left = %jx, max_right = %jx", map,
5166                     (uintmax_t)entry->max_free,
5167                     (uintmax_t)max_left, (uintmax_t)max_right));
5168                 prev = entry;
5169         }
5170         KASSERT(prev->end <= entry->start,
5171             ("map %p prev->end = %jx, start = %jx", map,
5172             (uintmax_t)prev->end, (uintmax_t)entry->start));
5173 }
5174 #endif
5175
5176 #include "opt_ddb.h"
5177 #ifdef DDB
5178 #include <sys/kernel.h>
5179
5180 #include <ddb/ddb.h>
5181
5182 static void
5183 vm_map_print(vm_map_t map)
5184 {
5185         vm_map_entry_t entry, prev;
5186
5187         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
5188             (void *)map,
5189             (void *)map->pmap, map->nentries, map->timestamp);
5190
5191         db_indent += 2;
5192         prev = &map->header;
5193         VM_MAP_ENTRY_FOREACH(entry, map) {
5194                 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
5195                     (void *)entry, (void *)entry->start, (void *)entry->end,
5196                     entry->eflags);
5197                 {
5198                         static const char * const inheritance_name[4] =
5199                         {"share", "copy", "none", "donate_copy"};
5200
5201                         db_iprintf(" prot=%x/%x/%s",
5202                             entry->protection,
5203                             entry->max_protection,
5204                             inheritance_name[(int)(unsigned char)
5205                             entry->inheritance]);
5206                         if (entry->wired_count != 0)
5207                                 db_printf(", wired");
5208                 }
5209                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5210                         db_printf(", share=%p, offset=0x%jx\n",
5211                             (void *)entry->object.sub_map,
5212                             (uintmax_t)entry->offset);
5213                         if (prev == &map->header ||
5214                             prev->object.sub_map !=
5215                                 entry->object.sub_map) {
5216                                 db_indent += 2;
5217                                 vm_map_print((vm_map_t)entry->object.sub_map);
5218                                 db_indent -= 2;
5219                         }
5220                 } else {
5221                         if (entry->cred != NULL)
5222                                 db_printf(", ruid %d", entry->cred->cr_ruid);
5223                         db_printf(", object=%p, offset=0x%jx",
5224                             (void *)entry->object.vm_object,
5225                             (uintmax_t)entry->offset);
5226                         if (entry->object.vm_object && entry->object.vm_object->cred)
5227                                 db_printf(", obj ruid %d charge %jx",
5228                                     entry->object.vm_object->cred->cr_ruid,
5229                                     (uintmax_t)entry->object.vm_object->charge);
5230                         if (entry->eflags & MAP_ENTRY_COW)
5231                                 db_printf(", copy (%s)",
5232                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
5233                         db_printf("\n");
5234
5235                         if (prev == &map->header ||
5236                             prev->object.vm_object !=
5237                                 entry->object.vm_object) {
5238                                 db_indent += 2;
5239                                 vm_object_print((db_expr_t)(intptr_t)
5240                                                 entry->object.vm_object,
5241                                                 0, 0, (char *)0);
5242                                 db_indent -= 2;
5243                         }
5244                 }
5245                 prev = entry;
5246         }
5247         db_indent -= 2;
5248 }
5249
5250 DB_SHOW_COMMAND(map, map)
5251 {
5252
5253         if (!have_addr) {
5254                 db_printf("usage: show map <addr>\n");
5255                 return;
5256         }
5257         vm_map_print((vm_map_t)addr);
5258 }
5259
5260 DB_SHOW_COMMAND(procvm, procvm)
5261 {
5262         struct proc *p;
5263
5264         if (have_addr) {
5265                 p = db_lookup_proc(addr);
5266         } else {
5267                 p = curproc;
5268         }
5269
5270         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
5271             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
5272             (void *)vmspace_pmap(p->p_vmspace));
5273
5274         vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
5275 }
5276
5277 #endif /* DDB */