]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - 6/sys/vm/vm_map.c
merge fix for boot-time hang on centos' xen
[FreeBSD/FreeBSD.git] / 6 / sys / vm / vm_map.c
1 /*-
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60
61 /*
62  *      Virtual memory mapping module.
63  */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/ktr.h>
71 #include <sys/lock.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/vmmeter.h>
75 #include <sys/mman.h>
76 #include <sys/vnode.h>
77 #include <sys/resourcevar.h>
78 #include <sys/file.h>
79 #include <sys/sysent.h>
80 #include <sys/shm.h>
81
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_pager.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
91 #include <vm/swap_pager.h>
92 #include <vm/uma.h>
93
94 /*
95  *      Virtual memory maps provide for the mapping, protection,
96  *      and sharing of virtual memory objects.  In addition,
97  *      this module provides for an efficient virtual copy of
98  *      memory from one map to another.
99  *
100  *      Synchronization is required prior to most operations.
101  *
102  *      Maps consist of an ordered doubly-linked list of simple
103  *      entries; a single hint is used to speed up lookups.
104  *
105  *      Since portions of maps are specified by start/end addresses,
106  *      which may not align with existing map entries, all
107  *      routines merely "clip" entries to these start/end values.
108  *      [That is, an entry is split into two, bordering at a
109  *      start or end value.]  Note that these clippings may not
110  *      always be necessary (as the two resulting entries are then
111  *      not changed); however, the clipping is done for convenience.
112  *
113  *      As mentioned above, virtual copy operations are performed
114  *      by copying VM object references from one map to
115  *      another, and then marking both regions as copy-on-write.
116  */
117
118 /*
119  *      vm_map_startup:
120  *
121  *      Initialize the vm_map module.  Must be called before
122  *      any other vm_map routines.
123  *
124  *      Map and entry structures are allocated from the general
125  *      purpose memory pool with some exceptions:
126  *
127  *      - The kernel map and kmem submap are allocated statically.
128  *      - Kernel map entries are allocated out of a static pool.
129  *
130  *      These restrictions are necessary since malloc() uses the
131  *      maps and requires map entries.
132  */
133
134 static struct mtx map_sleep_mtx;
135 static uma_zone_t mapentzone;
136 static uma_zone_t kmapentzone;
137 static uma_zone_t mapzone;
138 static uma_zone_t vmspace_zone;
139 static struct vm_object kmapentobj;
140 static int vmspace_zinit(void *mem, int size, int flags);
141 static void vmspace_zfini(void *mem, int size);
142 static int vm_map_zinit(void *mem, int ize, int flags);
143 static void vm_map_zfini(void *mem, int size);
144 static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max);
145
146 #ifdef INVARIANTS
147 static void vm_map_zdtor(void *mem, int size, void *arg);
148 static void vmspace_zdtor(void *mem, int size, void *arg);
149 #endif
150
151 /* 
152  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
153  * stable.
154  */
155 #define PROC_VMSPACE_LOCK(p) do { } while (0)
156 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
157
158 /*
159  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
160  *
161  *      Asserts that the starting and ending region
162  *      addresses fall within the valid range of the map.
163  */
164 #define VM_MAP_RANGE_CHECK(map, start, end)             \
165                 {                                       \
166                 if (start < vm_map_min(map))            \
167                         start = vm_map_min(map);        \
168                 if (end > vm_map_max(map))              \
169                         end = vm_map_max(map);          \
170                 if (start > end)                        \
171                         start = end;                    \
172                 }
173
174 void
175 vm_map_startup(void)
176 {
177         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
178         mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
179 #ifdef INVARIANTS
180             vm_map_zdtor,
181 #else
182             NULL,
183 #endif
184             vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
185         uma_prealloc(mapzone, MAX_KMAP);
186         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
187             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
188             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
189         uma_prealloc(kmapentzone, MAX_KMAPENT);
190         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
191             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
192         uma_prealloc(mapentzone, MAX_MAPENT);
193 }
194
195 static void
196 vmspace_zfini(void *mem, int size)
197 {
198         struct vmspace *vm;
199
200         vm = (struct vmspace *)mem;
201         pmap_release(vmspace_pmap(vm));
202         vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
203 }
204
205 static int
206 vmspace_zinit(void *mem, int size, int flags)
207 {
208         struct vmspace *vm;
209
210         vm = (struct vmspace *)mem;
211
212         (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
213         pmap_pinit(vmspace_pmap(vm));
214         return (0);
215 }
216
217 static void
218 vm_map_zfini(void *mem, int size)
219 {
220         vm_map_t map;
221
222         map = (vm_map_t)mem;
223         mtx_destroy(&map->system_mtx);
224         sx_destroy(&map->lock);
225 }
226
227 static int
228 vm_map_zinit(void *mem, int size, int flags)
229 {
230         vm_map_t map;
231
232         map = (vm_map_t)mem;
233         map->nentries = 0;
234         map->size = 0;
235         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
236         sx_init(&map->lock, "user map");
237         return (0);
238 }
239
240 #ifdef INVARIANTS
241 static void
242 vmspace_zdtor(void *mem, int size, void *arg)
243 {
244         struct vmspace *vm;
245
246         vm = (struct vmspace *)mem;
247
248         vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
249 }
250 static void
251 vm_map_zdtor(void *mem, int size, void *arg)
252 {
253         vm_map_t map;
254
255         map = (vm_map_t)mem;
256         KASSERT(map->nentries == 0,
257             ("map %p nentries == %d on free.",
258             map, map->nentries));
259         KASSERT(map->size == 0,
260             ("map %p size == %lu on free.",
261             map, (unsigned long)map->size));
262 }
263 #endif  /* INVARIANTS */
264
265 /*
266  * Allocate a vmspace structure, including a vm_map and pmap,
267  * and initialize those structures.  The refcnt is set to 1.
268  */
269 struct vmspace *
270 vmspace_alloc(min, max)
271         vm_offset_t min, max;
272 {
273         struct vmspace *vm;
274
275         vm = uma_zalloc(vmspace_zone, M_WAITOK);
276         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
277         _vm_map_init(&vm->vm_map, min, max);
278         vm->vm_map.pmap = vmspace_pmap(vm);             /* XXX */
279         vm->vm_refcnt = 1;
280         vm->vm_shm = NULL;
281         vm->vm_swrss = 0;
282         vm->vm_tsize = 0;
283         vm->vm_dsize = 0;
284         vm->vm_ssize = 0;
285         vm->vm_taddr = 0;
286         vm->vm_daddr = 0;
287         vm->vm_maxsaddr = 0;
288         return (vm);
289 }
290
291 void
292 vm_init2(void)
293 {
294         uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
295             (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8 +
296              maxproc * 2 + maxfiles);
297         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
298 #ifdef INVARIANTS
299             vmspace_zdtor,
300 #else
301             NULL,
302 #endif
303             vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
304         pmap_init2();
305 }
306
307 static __inline void
308 vmspace_dofree(struct vmspace *vm)
309 {
310         CTR1(KTR_VM, "vmspace_free: %p", vm);
311
312         /*
313          * Make sure any SysV shm is freed, it might not have been in
314          * exit1().
315          */
316         shmexit(vm);
317
318         /*
319          * Lock the map, to wait out all other references to it.
320          * Delete all of the mappings and pages they hold, then call
321          * the pmap module to reclaim anything left.
322          */
323         vm_map_lock(&vm->vm_map);
324         (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
325             vm->vm_map.max_offset);
326         vm_map_unlock(&vm->vm_map);
327
328         uma_zfree(vmspace_zone, vm);
329 }
330
331 void
332 vmspace_free(struct vmspace *vm)
333 {
334         int refcnt;
335
336         if (vm->vm_refcnt == 0)
337                 panic("vmspace_free: attempt to free already freed vmspace");
338
339         do
340                 refcnt = vm->vm_refcnt;
341         while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
342         if (refcnt == 1)
343                 vmspace_dofree(vm);
344 }
345
346 void
347 vmspace_exitfree(struct proc *p)
348 {
349         struct vmspace *vm;
350
351         PROC_VMSPACE_LOCK(p);
352         vm = p->p_vmspace;
353         p->p_vmspace = NULL;
354         PROC_VMSPACE_UNLOCK(p);
355         KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
356         vmspace_free(vm);
357 }
358
359 void
360 vmspace_exit(struct thread *td)
361 {
362         int refcnt;
363         struct vmspace *vm;
364         struct proc *p;
365
366         /*
367          * Release user portion of address space.
368          * This releases references to vnodes,
369          * which could cause I/O if the file has been unlinked.
370          * Need to do this early enough that we can still sleep.
371          *
372          * The last exiting process to reach this point releases as
373          * much of the environment as it can. vmspace_dofree() is the
374          * slower fallback in case another process had a temporary
375          * reference to the vmspace.
376          */
377
378         p = td->td_proc;
379         vm = p->p_vmspace;
380         atomic_add_int(&vmspace0.vm_refcnt, 1);
381         do {
382                 refcnt = vm->vm_refcnt;
383                 if (refcnt > 1 && p->p_vmspace != &vmspace0) {
384                         /* Switch now since other proc might free vmspace */
385                         PROC_VMSPACE_LOCK(p);
386                         p->p_vmspace = &vmspace0;
387                         PROC_VMSPACE_UNLOCK(p);
388                         pmap_activate(td);
389                 }
390         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
391         if (refcnt == 1) {
392                 if (p->p_vmspace != vm) {
393                         /* vmspace not yet freed, switch back */
394                         PROC_VMSPACE_LOCK(p);
395                         p->p_vmspace = vm;
396                         PROC_VMSPACE_UNLOCK(p);
397                         pmap_activate(td);
398                 }
399                 pmap_remove_pages(vmspace_pmap(vm),
400                                   vm_map_min(&vm->vm_map),
401                                   vm_map_max(&vm->vm_map));
402                 /* Switch now since this proc will free vmspace */
403                 PROC_VMSPACE_LOCK(p);
404                 p->p_vmspace = &vmspace0;
405                 PROC_VMSPACE_UNLOCK(p);
406                 pmap_activate(td);
407                 vmspace_dofree(vm);
408         }
409 }
410
411 /* Acquire reference to vmspace owned by another process. */
412
413 struct vmspace *
414 vmspace_acquire_ref(struct proc *p)
415 {
416         struct vmspace *vm;
417         int refcnt;
418
419         PROC_VMSPACE_LOCK(p);
420         vm = p->p_vmspace;
421         if (vm == NULL) {
422                 PROC_VMSPACE_UNLOCK(p);
423                 return (NULL);
424         }
425         do {
426                 refcnt = vm->vm_refcnt;
427                 if (refcnt <= 0) {      /* Avoid 0->1 transition */
428                         PROC_VMSPACE_UNLOCK(p);
429                         return (NULL);
430                 }
431         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
432         if (vm != p->p_vmspace) {
433                 PROC_VMSPACE_UNLOCK(p);
434                 vmspace_free(vm);
435                 return (NULL);
436         }
437         PROC_VMSPACE_UNLOCK(p);
438         return (vm);
439 }
440
441 void
442 _vm_map_lock(vm_map_t map, const char *file, int line)
443 {
444
445         if (map->system_map)
446                 _mtx_lock_flags(&map->system_mtx, 0, file, line);
447         else
448                 (void) _sx_xlock(&map->lock, 0, file, line);
449         map->timestamp++;
450 }
451
452 void
453 _vm_map_unlock(vm_map_t map, const char *file, int line)
454 {
455
456         if (map->system_map)
457                 _mtx_unlock_flags(&map->system_mtx, 0, file, line);
458         else
459                 _sx_xunlock(&map->lock, file, line);
460 }
461
462 void
463 _vm_map_lock_read(vm_map_t map, const char *file, int line)
464 {
465
466         if (map->system_map)
467                 _mtx_lock_flags(&map->system_mtx, 0, file, line);
468         else
469                 (void) _sx_xlock(&map->lock, 0, file, line);
470 }
471
472 void
473 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
474 {
475
476         if (map->system_map)
477                 _mtx_unlock_flags(&map->system_mtx, 0, file, line);
478         else
479                 _sx_xunlock(&map->lock, file, line);
480 }
481
482 int
483 _vm_map_trylock(vm_map_t map, const char *file, int line)
484 {
485         int error;
486
487         error = map->system_map ?
488             !_mtx_trylock(&map->system_mtx, 0, file, line) :
489             !_sx_try_xlock(&map->lock, file, line);
490         if (error == 0)
491                 map->timestamp++;
492         return (error == 0);
493 }
494
495 int
496 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
497 {
498         int error;
499
500         error = map->system_map ?
501             !_mtx_trylock(&map->system_mtx, 0, file, line) :
502             !_sx_try_xlock(&map->lock, file, line);
503         return (error == 0);
504 }
505
506 int
507 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
508 {
509
510 #ifdef INVARIANTS
511         if (map->system_map) {
512                 _mtx_assert(&map->system_mtx, MA_OWNED, file, line);
513         } else
514                 _sx_assert(&map->lock, SX_XLOCKED, file, line);
515 #endif
516         map->timestamp++;
517         return (0);
518 }
519
520 void
521 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
522 {
523
524 #ifdef INVARIANTS
525         if (map->system_map) {
526                 _mtx_assert(&map->system_mtx, MA_OWNED, file, line);
527         } else
528                 _sx_assert(&map->lock, SX_XLOCKED, file, line);
529 #endif
530 }
531
532 /*
533  *      vm_map_unlock_and_wait:
534  */
535 int
536 vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait)
537 {
538
539         mtx_lock(&map_sleep_mtx);
540         vm_map_unlock(map);
541         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 0));
542 }
543
544 /*
545  *      vm_map_wakeup:
546  */
547 void
548 vm_map_wakeup(vm_map_t map)
549 {
550
551         /*
552          * Acquire and release map_sleep_mtx to prevent a wakeup()
553          * from being performed (and lost) between the vm_map_unlock()
554          * and the msleep() in vm_map_unlock_and_wait().
555          */
556         mtx_lock(&map_sleep_mtx);
557         mtx_unlock(&map_sleep_mtx);
558         wakeup(&map->root);
559 }
560
561 long
562 vmspace_resident_count(struct vmspace *vmspace)
563 {
564         return pmap_resident_count(vmspace_pmap(vmspace));
565 }
566
567 long
568 vmspace_wired_count(struct vmspace *vmspace)
569 {
570         return pmap_wired_count(vmspace_pmap(vmspace));
571 }
572
573 /*
574  *      vm_map_create:
575  *
576  *      Creates and returns a new empty VM map with
577  *      the given physical map structure, and having
578  *      the given lower and upper address bounds.
579  */
580 vm_map_t
581 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
582 {
583         vm_map_t result;
584
585         result = uma_zalloc(mapzone, M_WAITOK);
586         CTR1(KTR_VM, "vm_map_create: %p", result);
587         _vm_map_init(result, min, max);
588         result->pmap = pmap;
589         return (result);
590 }
591
592 /*
593  * Initialize an existing vm_map structure
594  * such as that in the vmspace structure.
595  * The pmap is set elsewhere.
596  */
597 static void
598 _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
599 {
600
601         map->header.next = map->header.prev = &map->header;
602         map->needs_wakeup = FALSE;
603         map->system_map = 0;
604         map->min_offset = min;
605         map->max_offset = max;
606         map->flags = 0;
607         map->root = NULL;
608         map->timestamp = 0;
609 }
610
611 void
612 vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
613 {
614         _vm_map_init(map, min, max);
615         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
616         sx_init(&map->lock, "user map");
617 }
618
619 /*
620  *      vm_map_entry_dispose:   [ internal use only ]
621  *
622  *      Inverse of vm_map_entry_create.
623  */
624 static void
625 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
626 {
627         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
628 }
629
630 /*
631  *      vm_map_entry_create:    [ internal use only ]
632  *
633  *      Allocates a VM map entry for insertion.
634  *      No entry fields are filled in.
635  */
636 static vm_map_entry_t
637 vm_map_entry_create(vm_map_t map)
638 {
639         vm_map_entry_t new_entry;
640
641         if (map->system_map)
642                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
643         else
644                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
645         if (new_entry == NULL)
646                 panic("vm_map_entry_create: kernel resources exhausted");
647         return (new_entry);
648 }
649
650 /*
651  *      vm_map_entry_set_behavior:
652  *
653  *      Set the expected access behavior, either normal, random, or
654  *      sequential.
655  */
656 static __inline void
657 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
658 {
659         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
660             (behavior & MAP_ENTRY_BEHAV_MASK);
661 }
662
663 /*
664  *      vm_map_entry_set_max_free:
665  *
666  *      Set the max_free field in a vm_map_entry.
667  */
668 static __inline void
669 vm_map_entry_set_max_free(vm_map_entry_t entry)
670 {
671
672         entry->max_free = entry->adj_free;
673         if (entry->left != NULL && entry->left->max_free > entry->max_free)
674                 entry->max_free = entry->left->max_free;
675         if (entry->right != NULL && entry->right->max_free > entry->max_free)
676                 entry->max_free = entry->right->max_free;
677 }
678
679 /*
680  *      vm_map_entry_splay:
681  *
682  *      The Sleator and Tarjan top-down splay algorithm with the
683  *      following variation.  Max_free must be computed bottom-up, so
684  *      on the downward pass, maintain the left and right spines in
685  *      reverse order.  Then, make a second pass up each side to fix
686  *      the pointers and compute max_free.  The time bound is O(log n)
687  *      amortized.
688  *
689  *      The new root is the vm_map_entry containing "addr", or else an
690  *      adjacent entry (lower or higher) if addr is not in the tree.
691  *
692  *      The map must be locked, and leaves it so.
693  *
694  *      Returns: the new root.
695  */
696 static vm_map_entry_t
697 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
698 {
699         vm_map_entry_t llist, rlist;
700         vm_map_entry_t ltree, rtree;
701         vm_map_entry_t y;
702
703         /* Special case of empty tree. */
704         if (root == NULL)
705                 return (root);
706
707         /*
708          * Pass One: Splay down the tree until we find addr or a NULL
709          * pointer where addr would go.  llist and rlist are the two
710          * sides in reverse order (bottom-up), with llist linked by
711          * the right pointer and rlist linked by the left pointer in
712          * the vm_map_entry.  Wait until Pass Two to set max_free on
713          * the two spines.
714          */
715         llist = NULL;
716         rlist = NULL;
717         for (;;) {
718                 /* root is never NULL in here. */
719                 if (addr < root->start) {
720                         y = root->left;
721                         if (y == NULL)
722                                 break;
723                         if (addr < y->start && y->left != NULL) {
724                                 /* Rotate right and put y on rlist. */
725                                 root->left = y->right;
726                                 y->right = root;
727                                 vm_map_entry_set_max_free(root);
728                                 root = y->left;
729                                 y->left = rlist;
730                                 rlist = y;
731                         } else {
732                                 /* Put root on rlist. */
733                                 root->left = rlist;
734                                 rlist = root;
735                                 root = y;
736                         }
737                 } else {
738                         y = root->right;
739                         if (addr < root->end || y == NULL)
740                                 break;
741                         if (addr >= y->end && y->right != NULL) {
742                                 /* Rotate left and put y on llist. */
743                                 root->right = y->left;
744                                 y->left = root;
745                                 vm_map_entry_set_max_free(root);
746                                 root = y->right;
747                                 y->right = llist;
748                                 llist = y;
749                         } else {
750                                 /* Put root on llist. */
751                                 root->right = llist;
752                                 llist = root;
753                                 root = y;
754                         }
755                 }
756         }
757
758         /*
759          * Pass Two: Walk back up the two spines, flip the pointers
760          * and set max_free.  The subtrees of the root go at the
761          * bottom of llist and rlist.
762          */
763         ltree = root->left;
764         while (llist != NULL) {
765                 y = llist->right;
766                 llist->right = ltree;
767                 vm_map_entry_set_max_free(llist);
768                 ltree = llist;
769                 llist = y;
770         }
771         rtree = root->right;
772         while (rlist != NULL) {
773                 y = rlist->left;
774                 rlist->left = rtree;
775                 vm_map_entry_set_max_free(rlist);
776                 rtree = rlist;
777                 rlist = y;
778         }
779
780         /*
781          * Final assembly: add ltree and rtree as subtrees of root.
782          */
783         root->left = ltree;
784         root->right = rtree;
785         vm_map_entry_set_max_free(root);
786
787         return (root);
788 }
789
790 /*
791  *      vm_map_entry_{un,}link:
792  *
793  *      Insert/remove entries from maps.
794  */
795 static void
796 vm_map_entry_link(vm_map_t map,
797                   vm_map_entry_t after_where,
798                   vm_map_entry_t entry)
799 {
800
801         CTR4(KTR_VM,
802             "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
803             map->nentries, entry, after_where);
804         map->nentries++;
805         entry->prev = after_where;
806         entry->next = after_where->next;
807         entry->next->prev = entry;
808         after_where->next = entry;
809
810         if (after_where != &map->header) {
811                 if (after_where != map->root)
812                         vm_map_entry_splay(after_where->start, map->root);
813                 entry->right = after_where->right;
814                 entry->left = after_where;
815                 after_where->right = NULL;
816                 after_where->adj_free = entry->start - after_where->end;
817                 vm_map_entry_set_max_free(after_where);
818         } else {
819                 entry->right = map->root;
820                 entry->left = NULL;
821         }
822         entry->adj_free = (entry->next == &map->header ? map->max_offset :
823             entry->next->start) - entry->end;
824         vm_map_entry_set_max_free(entry);
825         map->root = entry;
826 }
827
828 static void
829 vm_map_entry_unlink(vm_map_t map,
830                     vm_map_entry_t entry)
831 {
832         vm_map_entry_t next, prev, root;
833
834         if (entry != map->root)
835                 vm_map_entry_splay(entry->start, map->root);
836         if (entry->left == NULL)
837                 root = entry->right;
838         else {
839                 root = vm_map_entry_splay(entry->start, entry->left);
840                 root->right = entry->right;
841                 root->adj_free = (entry->next == &map->header ? map->max_offset :
842                     entry->next->start) - root->end;
843                 vm_map_entry_set_max_free(root);
844         }
845         map->root = root;
846
847         prev = entry->prev;
848         next = entry->next;
849         next->prev = prev;
850         prev->next = next;
851         map->nentries--;
852         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
853             map->nentries, entry);
854 }
855
856 /*
857  *      vm_map_entry_resize_free:
858  *
859  *      Recompute the amount of free space following a vm_map_entry
860  *      and propagate that value up the tree.  Call this function after
861  *      resizing a map entry in-place, that is, without a call to
862  *      vm_map_entry_link() or _unlink().
863  *
864  *      The map must be locked, and leaves it so.
865  */
866 static void
867 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
868 {
869
870         /*
871          * Using splay trees without parent pointers, propagating
872          * max_free up the tree is done by moving the entry to the
873          * root and making the change there.
874          */
875         if (entry != map->root)
876                 map->root = vm_map_entry_splay(entry->start, map->root);
877
878         entry->adj_free = (entry->next == &map->header ? map->max_offset :
879             entry->next->start) - entry->end;
880         vm_map_entry_set_max_free(entry);
881 }
882
883 /*
884  *      vm_map_lookup_entry:    [ internal use only ]
885  *
886  *      Finds the map entry containing (or
887  *      immediately preceding) the specified address
888  *      in the given map; the entry is returned
889  *      in the "entry" parameter.  The boolean
890  *      result indicates whether the address is
891  *      actually contained in the map.
892  */
893 boolean_t
894 vm_map_lookup_entry(
895         vm_map_t map,
896         vm_offset_t address,
897         vm_map_entry_t *entry)  /* OUT */
898 {
899         vm_map_entry_t cur;
900
901         cur = vm_map_entry_splay(address, map->root);
902         if (cur == NULL)
903                 *entry = &map->header;
904         else {
905                 map->root = cur;
906
907                 if (address >= cur->start) {
908                         *entry = cur;
909                         if (cur->end > address)
910                                 return (TRUE);
911                 } else
912                         *entry = cur->prev;
913         }
914         return (FALSE);
915 }
916
917 /*
918  *      vm_map_insert:
919  *
920  *      Inserts the given whole VM object into the target
921  *      map at the specified address range.  The object's
922  *      size should match that of the address range.
923  *
924  *      Requires that the map be locked, and leaves it so.
925  *
926  *      If object is non-NULL, ref count must be bumped by caller
927  *      prior to making call to account for the new entry.
928  */
929 int
930 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
931               vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
932               int cow)
933 {
934         vm_map_entry_t new_entry;
935         vm_map_entry_t prev_entry;
936         vm_map_entry_t temp_entry;
937         vm_eflags_t protoeflags;
938
939         /*
940          * Check that the start and end points are not bogus.
941          */
942         if ((start < map->min_offset) || (end > map->max_offset) ||
943             (start >= end))
944                 return (KERN_INVALID_ADDRESS);
945
946         /*
947          * Find the entry prior to the proposed starting address; if it's part
948          * of an existing entry, this range is bogus.
949          */
950         if (vm_map_lookup_entry(map, start, &temp_entry))
951                 return (KERN_NO_SPACE);
952
953         prev_entry = temp_entry;
954
955         /*
956          * Assert that the next entry doesn't overlap the end point.
957          */
958         if ((prev_entry->next != &map->header) &&
959             (prev_entry->next->start < end))
960                 return (KERN_NO_SPACE);
961
962         protoeflags = 0;
963
964         if (cow & MAP_COPY_ON_WRITE)
965                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
966
967         if (cow & MAP_NOFAULT) {
968                 protoeflags |= MAP_ENTRY_NOFAULT;
969
970                 KASSERT(object == NULL,
971                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
972         }
973         if (cow & MAP_DISABLE_SYNCER)
974                 protoeflags |= MAP_ENTRY_NOSYNC;
975         if (cow & MAP_DISABLE_COREDUMP)
976                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
977
978         if (object != NULL) {
979                 /*
980                  * OBJ_ONEMAPPING must be cleared unless this mapping
981                  * is trivially proven to be the only mapping for any
982                  * of the object's pages.  (Object granularity
983                  * reference counting is insufficient to recognize
984                  * aliases with precision.)
985                  */
986                 VM_OBJECT_LOCK(object);
987                 if (object->ref_count > 1 || object->shadow_count != 0)
988                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
989                 VM_OBJECT_UNLOCK(object);
990         }
991         else if ((prev_entry != &map->header) &&
992                  (prev_entry->eflags == protoeflags) &&
993                  (prev_entry->end == start) &&
994                  (prev_entry->wired_count == 0) &&
995                  ((prev_entry->object.vm_object == NULL) ||
996                   vm_object_coalesce(prev_entry->object.vm_object,
997                                      prev_entry->offset,
998                                      (vm_size_t)(prev_entry->end - prev_entry->start),
999                                      (vm_size_t)(end - prev_entry->end)))) {
1000                 /*
1001                  * We were able to extend the object.  Determine if we
1002                  * can extend the previous map entry to include the
1003                  * new range as well.
1004                  */
1005                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
1006                     (prev_entry->protection == prot) &&
1007                     (prev_entry->max_protection == max)) {
1008                         map->size += (end - prev_entry->end);
1009                         prev_entry->end = end;
1010                         vm_map_entry_resize_free(map, prev_entry);
1011                         vm_map_simplify_entry(map, prev_entry);
1012                         return (KERN_SUCCESS);
1013                 }
1014
1015                 /*
1016                  * If we can extend the object but cannot extend the
1017                  * map entry, we have to create a new map entry.  We
1018                  * must bump the ref count on the extended object to
1019                  * account for it.  object may be NULL.
1020                  */
1021                 object = prev_entry->object.vm_object;
1022                 offset = prev_entry->offset +
1023                         (prev_entry->end - prev_entry->start);
1024                 vm_object_reference(object);
1025         }
1026
1027         /*
1028          * NOTE: if conditionals fail, object can be NULL here.  This occurs
1029          * in things like the buffer map where we manage kva but do not manage
1030          * backing objects.
1031          */
1032
1033         /*
1034          * Create a new entry
1035          */
1036         new_entry = vm_map_entry_create(map);
1037         new_entry->start = start;
1038         new_entry->end = end;
1039
1040         new_entry->eflags = protoeflags;
1041         new_entry->object.vm_object = object;
1042         new_entry->offset = offset;
1043         new_entry->avail_ssize = 0;
1044
1045         new_entry->inheritance = VM_INHERIT_DEFAULT;
1046         new_entry->protection = prot;
1047         new_entry->max_protection = max;
1048         new_entry->wired_count = 0;
1049
1050         /*
1051          * Insert the new entry into the list
1052          */
1053         vm_map_entry_link(map, prev_entry, new_entry);
1054         map->size += new_entry->end - new_entry->start;
1055
1056 #if 0
1057         /*
1058          * Temporarily removed to avoid MAP_STACK panic, due to
1059          * MAP_STACK being a huge hack.  Will be added back in
1060          * when MAP_STACK (and the user stack mapping) is fixed.
1061          */
1062         /*
1063          * It may be possible to simplify the entry
1064          */
1065         vm_map_simplify_entry(map, new_entry);
1066 #endif
1067
1068         if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1069                 vm_map_pmap_enter(map, start, prot,
1070                                     object, OFF_TO_IDX(offset), end - start,
1071                                     cow & MAP_PREFAULT_PARTIAL);
1072         }
1073
1074         return (KERN_SUCCESS);
1075 }
1076
1077 /*
1078  *      vm_map_findspace:
1079  *
1080  *      Find the first fit (lowest VM address) for "length" free bytes
1081  *      beginning at address >= start in the given map.
1082  *
1083  *      In a vm_map_entry, "adj_free" is the amount of free space
1084  *      adjacent (higher address) to this entry, and "max_free" is the
1085  *      maximum amount of contiguous free space in its subtree.  This
1086  *      allows finding a free region in one path down the tree, so
1087  *      O(log n) amortized with splay trees.
1088  *
1089  *      The map must be locked, and leaves it so.
1090  *
1091  *      Returns: 0 on success, and starting address in *addr,
1092  *               1 if insufficient space.
1093  */
1094 int
1095 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1096     vm_offset_t *addr)  /* OUT */
1097 {
1098         vm_map_entry_t entry;
1099         vm_offset_t end, st;
1100
1101         /*
1102          * Request must fit within min/max VM address and must avoid
1103          * address wrap.
1104          */
1105         if (start < map->min_offset)
1106                 start = map->min_offset;
1107         if (start + length > map->max_offset || start + length < start)
1108                 return (1);
1109
1110         /* Empty tree means wide open address space. */
1111         if (map->root == NULL) {
1112                 *addr = start;
1113                 goto found;
1114         }
1115
1116         /*
1117          * After splay, if start comes before root node, then there
1118          * must be a gap from start to the root.
1119          */
1120         map->root = vm_map_entry_splay(start, map->root);
1121         if (start + length <= map->root->start) {
1122                 *addr = start;
1123                 goto found;
1124         }
1125
1126         /*
1127          * Root is the last node that might begin its gap before
1128          * start, and this is the last comparison where address
1129          * wrap might be a problem.
1130          */
1131         st = (start > map->root->end) ? start : map->root->end;
1132         if (length <= map->root->end + map->root->adj_free - st) {
1133                 *addr = st;
1134                 goto found;
1135         }
1136
1137         /* With max_free, can immediately tell if no solution. */
1138         entry = map->root->right;
1139         if (entry == NULL || length > entry->max_free)
1140                 return (1);
1141
1142         /*
1143          * Search the right subtree in the order: left subtree, root,
1144          * right subtree (first fit).  The previous splay implies that
1145          * all regions in the right subtree have addresses > start.
1146          */
1147         while (entry != NULL) {
1148                 if (entry->left != NULL && entry->left->max_free >= length)
1149                         entry = entry->left;
1150                 else if (entry->adj_free >= length) {
1151                         *addr = entry->end;
1152                         goto found;
1153                 } else
1154                         entry = entry->right;
1155         }
1156
1157         /* Can't get here, so panic if we do. */
1158         panic("vm_map_findspace: max_free corrupt");
1159
1160 found:
1161         /* Expand the kernel pmap, if necessary. */
1162         if (map == kernel_map) {
1163                 end = round_page(*addr + length);
1164                 if (end > kernel_vm_end)
1165                         pmap_growkernel(end);
1166         }
1167         return (0);
1168 }
1169
1170 int
1171 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1172     vm_offset_t *addr /* IN/OUT */, vm_size_t length, vm_prot_t prot,
1173     vm_prot_t max, int cow)
1174 {
1175         vm_offset_t start, end;
1176         int result;
1177
1178         start = *addr;
1179         vm_map_lock(map);
1180         end = start + length;
1181         VM_MAP_RANGE_CHECK(map, start, end);
1182         (void) vm_map_delete(map, start, end);
1183         result = vm_map_insert(map, object, offset, start, end, prot,
1184             max, cow);
1185         vm_map_unlock(map);
1186         return (result);
1187 }
1188
1189 /*
1190  *      vm_map_find finds an unallocated region in the target address
1191  *      map with the given length.  The search is defined to be
1192  *      first-fit from the specified address; the region found is
1193  *      returned in the same parameter.
1194  *
1195  *      If object is non-NULL, ref count must be bumped by caller
1196  *      prior to making call to account for the new entry.
1197  */
1198 int
1199 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1200             vm_offset_t *addr,  /* IN/OUT */
1201             vm_size_t length, boolean_t find_space, vm_prot_t prot,
1202             vm_prot_t max, int cow)
1203 {
1204         vm_offset_t start;
1205         int result;
1206
1207         start = *addr;
1208         vm_map_lock(map);
1209         if (find_space) {
1210                 if (vm_map_findspace(map, start, length, addr)) {
1211                         vm_map_unlock(map);
1212                         return (KERN_NO_SPACE);
1213                 }
1214                 start = *addr;
1215         }
1216         result = vm_map_insert(map, object, offset,
1217                 start, start + length, prot, max, cow);
1218         vm_map_unlock(map);
1219         return (result);
1220 }
1221
1222 /*
1223  *      vm_map_simplify_entry:
1224  *
1225  *      Simplify the given map entry by merging with either neighbor.  This
1226  *      routine also has the ability to merge with both neighbors.
1227  *
1228  *      The map must be locked.
1229  *
1230  *      This routine guarentees that the passed entry remains valid (though
1231  *      possibly extended).  When merging, this routine may delete one or
1232  *      both neighbors.
1233  */
1234 void
1235 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1236 {
1237         vm_map_entry_t next, prev;
1238         vm_size_t prevsize, esize;
1239
1240         if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1241                 return;
1242
1243         prev = entry->prev;
1244         if (prev != &map->header) {
1245                 prevsize = prev->end - prev->start;
1246                 if ( (prev->end == entry->start) &&
1247                      (prev->object.vm_object == entry->object.vm_object) &&
1248                      (!prev->object.vm_object ||
1249                         (prev->offset + prevsize == entry->offset)) &&
1250                      (prev->eflags == entry->eflags) &&
1251                      (prev->protection == entry->protection) &&
1252                      (prev->max_protection == entry->max_protection) &&
1253                      (prev->inheritance == entry->inheritance) &&
1254                      (prev->wired_count == entry->wired_count)) {
1255                         vm_map_entry_unlink(map, prev);
1256                         entry->start = prev->start;
1257                         entry->offset = prev->offset;
1258                         if (entry->prev != &map->header)
1259                                 vm_map_entry_resize_free(map, entry->prev);
1260                         if (prev->object.vm_object)
1261                                 vm_object_deallocate(prev->object.vm_object);
1262                         vm_map_entry_dispose(map, prev);
1263                 }
1264         }
1265
1266         next = entry->next;
1267         if (next != &map->header) {
1268                 esize = entry->end - entry->start;
1269                 if ((entry->end == next->start) &&
1270                     (next->object.vm_object == entry->object.vm_object) &&
1271                      (!entry->object.vm_object ||
1272                         (entry->offset + esize == next->offset)) &&
1273                     (next->eflags == entry->eflags) &&
1274                     (next->protection == entry->protection) &&
1275                     (next->max_protection == entry->max_protection) &&
1276                     (next->inheritance == entry->inheritance) &&
1277                     (next->wired_count == entry->wired_count)) {
1278                         vm_map_entry_unlink(map, next);
1279                         entry->end = next->end;
1280                         vm_map_entry_resize_free(map, entry);
1281                         if (next->object.vm_object)
1282                                 vm_object_deallocate(next->object.vm_object);
1283                         vm_map_entry_dispose(map, next);
1284                 }
1285         }
1286 }
1287 /*
1288  *      vm_map_clip_start:      [ internal use only ]
1289  *
1290  *      Asserts that the given entry begins at or after
1291  *      the specified address; if necessary,
1292  *      it splits the entry into two.
1293  */
1294 #define vm_map_clip_start(map, entry, startaddr) \
1295 { \
1296         if (startaddr > entry->start) \
1297                 _vm_map_clip_start(map, entry, startaddr); \
1298 }
1299
1300 /*
1301  *      This routine is called only when it is known that
1302  *      the entry must be split.
1303  */
1304 static void
1305 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1306 {
1307         vm_map_entry_t new_entry;
1308
1309         /*
1310          * Split off the front portion -- note that we must insert the new
1311          * entry BEFORE this one, so that this entry has the specified
1312          * starting address.
1313          */
1314         vm_map_simplify_entry(map, entry);
1315
1316         /*
1317          * If there is no object backing this entry, we might as well create
1318          * one now.  If we defer it, an object can get created after the map
1319          * is clipped, and individual objects will be created for the split-up
1320          * map.  This is a bit of a hack, but is also about the best place to
1321          * put this improvement.
1322          */
1323         if (entry->object.vm_object == NULL && !map->system_map) {
1324                 vm_object_t object;
1325                 object = vm_object_allocate(OBJT_DEFAULT,
1326                                 atop(entry->end - entry->start));
1327                 entry->object.vm_object = object;
1328                 entry->offset = 0;
1329         }
1330
1331         new_entry = vm_map_entry_create(map);
1332         *new_entry = *entry;
1333
1334         new_entry->end = start;
1335         entry->offset += (start - entry->start);
1336         entry->start = start;
1337
1338         vm_map_entry_link(map, entry->prev, new_entry);
1339
1340         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1341                 vm_object_reference(new_entry->object.vm_object);
1342         }
1343 }
1344
1345 /*
1346  *      vm_map_clip_end:        [ internal use only ]
1347  *
1348  *      Asserts that the given entry ends at or before
1349  *      the specified address; if necessary,
1350  *      it splits the entry into two.
1351  */
1352 #define vm_map_clip_end(map, entry, endaddr) \
1353 { \
1354         if ((endaddr) < (entry->end)) \
1355                 _vm_map_clip_end((map), (entry), (endaddr)); \
1356 }
1357
1358 /*
1359  *      This routine is called only when it is known that
1360  *      the entry must be split.
1361  */
1362 static void
1363 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1364 {
1365         vm_map_entry_t new_entry;
1366
1367         /*
1368          * If there is no object backing this entry, we might as well create
1369          * one now.  If we defer it, an object can get created after the map
1370          * is clipped, and individual objects will be created for the split-up
1371          * map.  This is a bit of a hack, but is also about the best place to
1372          * put this improvement.
1373          */
1374         if (entry->object.vm_object == NULL && !map->system_map) {
1375                 vm_object_t object;
1376                 object = vm_object_allocate(OBJT_DEFAULT,
1377                                 atop(entry->end - entry->start));
1378                 entry->object.vm_object = object;
1379                 entry->offset = 0;
1380         }
1381
1382         /*
1383          * Create a new entry and insert it AFTER the specified entry
1384          */
1385         new_entry = vm_map_entry_create(map);
1386         *new_entry = *entry;
1387
1388         new_entry->start = entry->end = end;
1389         new_entry->offset += (end - entry->start);
1390
1391         vm_map_entry_link(map, entry, new_entry);
1392
1393         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1394                 vm_object_reference(new_entry->object.vm_object);
1395         }
1396 }
1397
1398 /*
1399  *      vm_map_submap:          [ kernel use only ]
1400  *
1401  *      Mark the given range as handled by a subordinate map.
1402  *
1403  *      This range must have been created with vm_map_find,
1404  *      and no other operations may have been performed on this
1405  *      range prior to calling vm_map_submap.
1406  *
1407  *      Only a limited number of operations can be performed
1408  *      within this rage after calling vm_map_submap:
1409  *              vm_fault
1410  *      [Don't try vm_map_copy!]
1411  *
1412  *      To remove a submapping, one must first remove the
1413  *      range from the superior map, and then destroy the
1414  *      submap (if desired).  [Better yet, don't try it.]
1415  */
1416 int
1417 vm_map_submap(
1418         vm_map_t map,
1419         vm_offset_t start,
1420         vm_offset_t end,
1421         vm_map_t submap)
1422 {
1423         vm_map_entry_t entry;
1424         int result = KERN_INVALID_ARGUMENT;
1425
1426         vm_map_lock(map);
1427
1428         VM_MAP_RANGE_CHECK(map, start, end);
1429
1430         if (vm_map_lookup_entry(map, start, &entry)) {
1431                 vm_map_clip_start(map, entry, start);
1432         } else
1433                 entry = entry->next;
1434
1435         vm_map_clip_end(map, entry, end);
1436
1437         if ((entry->start == start) && (entry->end == end) &&
1438             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1439             (entry->object.vm_object == NULL)) {
1440                 entry->object.sub_map = submap;
1441                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1442                 result = KERN_SUCCESS;
1443         }
1444         vm_map_unlock(map);
1445
1446         return (result);
1447 }
1448
1449 /*
1450  * The maximum number of pages to map
1451  */
1452 #define MAX_INIT_PT     96
1453
1454 /*
1455  *      vm_map_pmap_enter:
1456  *
1457  *      Preload read-only mappings for the given object into the specified
1458  *      map.  This eliminates the soft faults on process startup and
1459  *      immediately after an mmap(2).
1460  */
1461 void
1462 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1463     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1464 {
1465         vm_offset_t start;
1466         vm_page_t p, p_start;
1467         vm_pindex_t psize, tmpidx;
1468         boolean_t are_queues_locked;
1469
1470         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1471                 return;
1472         VM_OBJECT_LOCK(object);
1473         if (object->type == OBJT_DEVICE) {
1474                 pmap_object_init_pt(map->pmap, addr, object, pindex, size);
1475                 goto unlock_return;
1476         }
1477
1478         psize = atop(size);
1479
1480         if (object->type != OBJT_VNODE ||
1481             ((flags & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
1482              (object->resident_page_count > MAX_INIT_PT))) {
1483                 goto unlock_return;
1484         }
1485
1486         if (psize + pindex > object->size) {
1487                 if (object->size < pindex)
1488                         goto unlock_return;
1489                 psize = object->size - pindex;
1490         }
1491
1492         are_queues_locked = FALSE;
1493         start = 0;
1494         p_start = NULL;
1495
1496         if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
1497                 if (p->pindex < pindex) {
1498                         p = vm_page_splay(pindex, object->root);
1499                         if ((object->root = p)->pindex < pindex)
1500                                 p = TAILQ_NEXT(p, listq);
1501                 }
1502         }
1503         /*
1504          * Assert: the variable p is either (1) the page with the
1505          * least pindex greater than or equal to the parameter pindex
1506          * or (2) NULL.
1507          */
1508         for (;
1509              p != NULL && (tmpidx = p->pindex - pindex) < psize;
1510              p = TAILQ_NEXT(p, listq)) {
1511                 /*
1512                  * don't allow an madvise to blow away our really
1513                  * free pages allocating pv entries.
1514                  */
1515                 if ((flags & MAP_PREFAULT_MADVISE) &&
1516                     cnt.v_free_count < cnt.v_free_reserved) {
1517                         psize = tmpidx;
1518                         break;
1519                 }
1520                 if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
1521                     (p->busy == 0)) {
1522                         if (p_start == NULL) {
1523                                 start = addr + ptoa(tmpidx);
1524                                 p_start = p;
1525                         }
1526                         if (!are_queues_locked) {
1527                                 are_queues_locked = TRUE;
1528                                 vm_page_lock_queues();
1529                         }
1530                         if ((p->queue - p->pc) == PQ_CACHE)
1531                                 vm_page_deactivate(p);
1532                 } else if (p_start != NULL) {
1533                         pmap_enter_object(map->pmap, start, addr +
1534                             ptoa(tmpidx), p_start, prot);
1535                         p_start = NULL;
1536                 }
1537         }
1538         if (p_start != NULL)
1539                 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1540                     p_start, prot);
1541         if (are_queues_locked)
1542                 vm_page_unlock_queues();
1543 unlock_return:
1544         VM_OBJECT_UNLOCK(object);
1545 }
1546
1547 /*
1548  *      vm_map_protect:
1549  *
1550  *      Sets the protection of the specified address
1551  *      region in the target map.  If "set_max" is
1552  *      specified, the maximum protection is to be set;
1553  *      otherwise, only the current protection is affected.
1554  */
1555 int
1556 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1557                vm_prot_t new_prot, boolean_t set_max)
1558 {
1559         vm_map_entry_t current;
1560         vm_map_entry_t entry;
1561
1562         vm_map_lock(map);
1563
1564         VM_MAP_RANGE_CHECK(map, start, end);
1565
1566         if (vm_map_lookup_entry(map, start, &entry)) {
1567                 vm_map_clip_start(map, entry, start);
1568         } else {
1569                 entry = entry->next;
1570         }
1571
1572         /*
1573          * Make a first pass to check for protection violations.
1574          */
1575         current = entry;
1576         while ((current != &map->header) && (current->start < end)) {
1577                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1578                         vm_map_unlock(map);
1579                         return (KERN_INVALID_ARGUMENT);
1580                 }
1581                 if ((new_prot & current->max_protection) != new_prot) {
1582                         vm_map_unlock(map);
1583                         return (KERN_PROTECTION_FAILURE);
1584                 }
1585                 current = current->next;
1586         }
1587
1588         /*
1589          * Go back and fix up protections. [Note that clipping is not
1590          * necessary the second time.]
1591          */
1592         current = entry;
1593         while ((current != &map->header) && (current->start < end)) {
1594                 vm_prot_t old_prot;
1595
1596                 vm_map_clip_end(map, current, end);
1597
1598                 old_prot = current->protection;
1599                 if (set_max)
1600                         current->protection =
1601                             (current->max_protection = new_prot) &
1602                             old_prot;
1603                 else
1604                         current->protection = new_prot;
1605
1606                 /*
1607                  * Update physical map if necessary. Worry about copy-on-write
1608                  * here -- CHECK THIS XXX
1609                  */
1610                 if (current->protection != old_prot) {
1611 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1612                                                         VM_PROT_ALL)
1613                         pmap_protect(map->pmap, current->start,
1614                             current->end,
1615                             current->protection & MASK(current));
1616 #undef  MASK
1617                 }
1618                 vm_map_simplify_entry(map, current);
1619                 current = current->next;
1620         }
1621         vm_map_unlock(map);
1622         return (KERN_SUCCESS);
1623 }
1624
1625 /*
1626  *      vm_map_madvise:
1627  *
1628  *      This routine traverses a processes map handling the madvise
1629  *      system call.  Advisories are classified as either those effecting
1630  *      the vm_map_entry structure, or those effecting the underlying
1631  *      objects.
1632  */
1633 int
1634 vm_map_madvise(
1635         vm_map_t map,
1636         vm_offset_t start,
1637         vm_offset_t end,
1638         int behav)
1639 {
1640         vm_map_entry_t current, entry;
1641         int modify_map = 0;
1642
1643         /*
1644          * Some madvise calls directly modify the vm_map_entry, in which case
1645          * we need to use an exclusive lock on the map and we need to perform
1646          * various clipping operations.  Otherwise we only need a read-lock
1647          * on the map.
1648          */
1649         switch(behav) {
1650         case MADV_NORMAL:
1651         case MADV_SEQUENTIAL:
1652         case MADV_RANDOM:
1653         case MADV_NOSYNC:
1654         case MADV_AUTOSYNC:
1655         case MADV_NOCORE:
1656         case MADV_CORE:
1657                 modify_map = 1;
1658                 vm_map_lock(map);
1659                 break;
1660         case MADV_WILLNEED:
1661         case MADV_DONTNEED:
1662         case MADV_FREE:
1663                 vm_map_lock_read(map);
1664                 break;
1665         default:
1666                 return (KERN_INVALID_ARGUMENT);
1667         }
1668
1669         /*
1670          * Locate starting entry and clip if necessary.
1671          */
1672         VM_MAP_RANGE_CHECK(map, start, end);
1673
1674         if (vm_map_lookup_entry(map, start, &entry)) {
1675                 if (modify_map)
1676                         vm_map_clip_start(map, entry, start);
1677         } else {
1678                 entry = entry->next;
1679         }
1680
1681         if (modify_map) {
1682                 /*
1683                  * madvise behaviors that are implemented in the vm_map_entry.
1684                  *
1685                  * We clip the vm_map_entry so that behavioral changes are
1686                  * limited to the specified address range.
1687                  */
1688                 for (current = entry;
1689                      (current != &map->header) && (current->start < end);
1690                      current = current->next
1691                 ) {
1692                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1693                                 continue;
1694
1695                         vm_map_clip_end(map, current, end);
1696
1697                         switch (behav) {
1698                         case MADV_NORMAL:
1699                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1700                                 break;
1701                         case MADV_SEQUENTIAL:
1702                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1703                                 break;
1704                         case MADV_RANDOM:
1705                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1706                                 break;
1707                         case MADV_NOSYNC:
1708                                 current->eflags |= MAP_ENTRY_NOSYNC;
1709                                 break;
1710                         case MADV_AUTOSYNC:
1711                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
1712                                 break;
1713                         case MADV_NOCORE:
1714                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
1715                                 break;
1716                         case MADV_CORE:
1717                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1718                                 break;
1719                         default:
1720                                 break;
1721                         }
1722                         vm_map_simplify_entry(map, current);
1723                 }
1724                 vm_map_unlock(map);
1725         } else {
1726                 vm_pindex_t pindex;
1727                 int count;
1728
1729                 /*
1730                  * madvise behaviors that are implemented in the underlying
1731                  * vm_object.
1732                  *
1733                  * Since we don't clip the vm_map_entry, we have to clip
1734                  * the vm_object pindex and count.
1735                  */
1736                 for (current = entry;
1737                      (current != &map->header) && (current->start < end);
1738                      current = current->next
1739                 ) {
1740                         vm_offset_t useStart;
1741
1742                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1743                                 continue;
1744
1745                         pindex = OFF_TO_IDX(current->offset);
1746                         count = atop(current->end - current->start);
1747                         useStart = current->start;
1748
1749                         if (current->start < start) {
1750                                 pindex += atop(start - current->start);
1751                                 count -= atop(start - current->start);
1752                                 useStart = start;
1753                         }
1754                         if (current->end > end)
1755                                 count -= atop(current->end - end);
1756
1757                         if (count <= 0)
1758                                 continue;
1759
1760                         vm_object_madvise(current->object.vm_object,
1761                                           pindex, count, behav);
1762                         if (behav == MADV_WILLNEED) {
1763                                 vm_map_pmap_enter(map,
1764                                     useStart,
1765                                     current->protection,
1766                                     current->object.vm_object,
1767                                     pindex,
1768                                     (count << PAGE_SHIFT),
1769                                     MAP_PREFAULT_MADVISE
1770                                 );
1771                         }
1772                 }
1773                 vm_map_unlock_read(map);
1774         }
1775         return (0);
1776 }
1777
1778
1779 /*
1780  *      vm_map_inherit:
1781  *
1782  *      Sets the inheritance of the specified address
1783  *      range in the target map.  Inheritance
1784  *      affects how the map will be shared with
1785  *      child maps at the time of vm_map_fork.
1786  */
1787 int
1788 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1789                vm_inherit_t new_inheritance)
1790 {
1791         vm_map_entry_t entry;
1792         vm_map_entry_t temp_entry;
1793
1794         switch (new_inheritance) {
1795         case VM_INHERIT_NONE:
1796         case VM_INHERIT_COPY:
1797         case VM_INHERIT_SHARE:
1798                 break;
1799         default:
1800                 return (KERN_INVALID_ARGUMENT);
1801         }
1802         vm_map_lock(map);
1803         VM_MAP_RANGE_CHECK(map, start, end);
1804         if (vm_map_lookup_entry(map, start, &temp_entry)) {
1805                 entry = temp_entry;
1806                 vm_map_clip_start(map, entry, start);
1807         } else
1808                 entry = temp_entry->next;
1809         while ((entry != &map->header) && (entry->start < end)) {
1810                 vm_map_clip_end(map, entry, end);
1811                 entry->inheritance = new_inheritance;
1812                 vm_map_simplify_entry(map, entry);
1813                 entry = entry->next;
1814         }
1815         vm_map_unlock(map);
1816         return (KERN_SUCCESS);
1817 }
1818
1819 /*
1820  *      vm_map_unwire:
1821  *
1822  *      Implements both kernel and user unwiring.
1823  */
1824 int
1825 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1826     int flags)
1827 {
1828         vm_map_entry_t entry, first_entry, tmp_entry;
1829         vm_offset_t saved_start;
1830         unsigned int last_timestamp;
1831         int rv;
1832         boolean_t need_wakeup, result, user_unwire;
1833
1834         user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
1835         vm_map_lock(map);
1836         VM_MAP_RANGE_CHECK(map, start, end);
1837         if (!vm_map_lookup_entry(map, start, &first_entry)) {
1838                 if (flags & VM_MAP_WIRE_HOLESOK)
1839                         first_entry = first_entry->next;
1840                 else {
1841                         vm_map_unlock(map);
1842                         return (KERN_INVALID_ADDRESS);
1843                 }
1844         }
1845         last_timestamp = map->timestamp;
1846         entry = first_entry;
1847         while (entry != &map->header && entry->start < end) {
1848                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1849                         /*
1850                          * We have not yet clipped the entry.
1851                          */
1852                         saved_start = (start >= entry->start) ? start :
1853                             entry->start;
1854                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1855                         if (vm_map_unlock_and_wait(map, user_unwire)) {
1856                                 /*
1857                                  * Allow interruption of user unwiring?
1858                                  */
1859                         }
1860                         vm_map_lock(map);
1861                         if (last_timestamp+1 != map->timestamp) {
1862                                 /*
1863                                  * Look again for the entry because the map was
1864                                  * modified while it was unlocked.
1865                                  * Specifically, the entry may have been
1866                                  * clipped, merged, or deleted.
1867                                  */
1868                                 if (!vm_map_lookup_entry(map, saved_start,
1869                                     &tmp_entry)) {
1870                                         if (flags & VM_MAP_WIRE_HOLESOK)
1871                                                 tmp_entry = tmp_entry->next;
1872                                         else {
1873                                                 if (saved_start == start) {
1874                                                         /*
1875                                                          * First_entry has been deleted.
1876                                                          */
1877                                                         vm_map_unlock(map);
1878                                                         return (KERN_INVALID_ADDRESS);
1879                                                 }
1880                                                 end = saved_start;
1881                                                 rv = KERN_INVALID_ADDRESS;
1882                                                 goto done;
1883                                         }
1884                                 }
1885                                 if (entry == first_entry)
1886                                         first_entry = tmp_entry;
1887                                 else
1888                                         first_entry = NULL;
1889                                 entry = tmp_entry;
1890                         }
1891                         last_timestamp = map->timestamp;
1892                         continue;
1893                 }
1894                 vm_map_clip_start(map, entry, start);
1895                 vm_map_clip_end(map, entry, end);
1896                 /*
1897                  * Mark the entry in case the map lock is released.  (See
1898                  * above.)
1899                  */
1900                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1901                 /*
1902                  * Check the map for holes in the specified region.
1903                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
1904                  */
1905                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
1906                     (entry->end < end && (entry->next == &map->header ||
1907                     entry->next->start > entry->end))) {
1908                         end = entry->end;
1909                         rv = KERN_INVALID_ADDRESS;
1910                         goto done;
1911                 }
1912                 /*
1913                  * If system unwiring, require that the entry is system wired.
1914                  */
1915                 if (!user_unwire &&
1916                     vm_map_entry_system_wired_count(entry) == 0) {
1917                         end = entry->end;
1918                         rv = KERN_INVALID_ARGUMENT;
1919                         goto done;
1920                 }
1921                 entry = entry->next;
1922         }
1923         rv = KERN_SUCCESS;
1924 done:
1925         need_wakeup = FALSE;
1926         if (first_entry == NULL) {
1927                 result = vm_map_lookup_entry(map, start, &first_entry);
1928                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
1929                         first_entry = first_entry->next;
1930                 else
1931                         KASSERT(result, ("vm_map_unwire: lookup failed"));
1932         }
1933         entry = first_entry;
1934         while (entry != &map->header && entry->start < end) {
1935                 if (rv == KERN_SUCCESS && (!user_unwire ||
1936                     (entry->eflags & MAP_ENTRY_USER_WIRED))) {
1937                         if (user_unwire)
1938                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1939                         entry->wired_count--;
1940                         if (entry->wired_count == 0) {
1941                                 /*
1942                                  * Retain the map lock.
1943                                  */
1944                                 vm_fault_unwire(map, entry->start, entry->end,
1945                                     entry->object.vm_object != NULL &&
1946                                     entry->object.vm_object->type == OBJT_DEVICE);
1947                         }
1948                 }
1949                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1950                         ("vm_map_unwire: in-transition flag missing"));
1951                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1952                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1953                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1954                         need_wakeup = TRUE;
1955                 }
1956                 vm_map_simplify_entry(map, entry);
1957                 entry = entry->next;
1958         }
1959         vm_map_unlock(map);
1960         if (need_wakeup)
1961                 vm_map_wakeup(map);
1962         return (rv);
1963 }
1964
1965 /*
1966  *      vm_map_wire:
1967  *
1968  *      Implements both kernel and user wiring.
1969  */
1970 int
1971 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1972     int flags)
1973 {
1974         vm_map_entry_t entry, first_entry, tmp_entry;
1975         vm_offset_t saved_end, saved_start;
1976         unsigned int last_timestamp;
1977         int rv;
1978         boolean_t fictitious, need_wakeup, result, user_wire;
1979
1980         user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
1981         vm_map_lock(map);
1982         VM_MAP_RANGE_CHECK(map, start, end);
1983         if (!vm_map_lookup_entry(map, start, &first_entry)) {
1984                 if (flags & VM_MAP_WIRE_HOLESOK)
1985                         first_entry = first_entry->next;
1986                 else {
1987                         vm_map_unlock(map);
1988                         return (KERN_INVALID_ADDRESS);
1989                 }
1990         }
1991         last_timestamp = map->timestamp;
1992         entry = first_entry;
1993         while (entry != &map->header && entry->start < end) {
1994                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1995                         /*
1996                          * We have not yet clipped the entry.
1997                          */
1998                         saved_start = (start >= entry->start) ? start :
1999                             entry->start;
2000                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2001                         if (vm_map_unlock_and_wait(map, user_wire)) {
2002                                 /*
2003                                  * Allow interruption of user wiring?
2004                                  */
2005                         }
2006                         vm_map_lock(map);
2007                         if (last_timestamp + 1 != map->timestamp) {
2008                                 /*
2009                                  * Look again for the entry because the map was
2010                                  * modified while it was unlocked.
2011                                  * Specifically, the entry may have been
2012                                  * clipped, merged, or deleted.
2013                                  */
2014                                 if (!vm_map_lookup_entry(map, saved_start,
2015                                     &tmp_entry)) {
2016                                         if (flags & VM_MAP_WIRE_HOLESOK)
2017                                                 tmp_entry = tmp_entry->next;
2018                                         else {
2019                                                 if (saved_start == start) {
2020                                                         /*
2021                                                          * first_entry has been deleted.
2022                                                          */
2023                                                         vm_map_unlock(map);
2024                                                         return (KERN_INVALID_ADDRESS);
2025                                                 }
2026                                                 end = saved_start;
2027                                                 rv = KERN_INVALID_ADDRESS;
2028                                                 goto done;
2029                                         }
2030                                 }
2031                                 if (entry == first_entry)
2032                                         first_entry = tmp_entry;
2033                                 else
2034                                         first_entry = NULL;
2035                                 entry = tmp_entry;
2036                         }
2037                         last_timestamp = map->timestamp;
2038                         continue;
2039                 }
2040                 vm_map_clip_start(map, entry, start);
2041                 vm_map_clip_end(map, entry, end);
2042                 /*
2043                  * Mark the entry in case the map lock is released.  (See
2044                  * above.)
2045                  */
2046                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2047                 /*
2048                  *
2049                  */
2050                 if (entry->wired_count == 0) {
2051                         entry->wired_count++;
2052                         saved_start = entry->start;
2053                         saved_end = entry->end;
2054                         fictitious = entry->object.vm_object != NULL &&
2055                             entry->object.vm_object->type == OBJT_DEVICE;
2056                         /*
2057                          * Release the map lock, relying on the in-transition
2058                          * mark.
2059                          */
2060                         vm_map_unlock(map);
2061                         rv = vm_fault_wire(map, saved_start, saved_end,
2062                             user_wire, fictitious);
2063                         vm_map_lock(map);
2064                         if (last_timestamp + 1 != map->timestamp) {
2065                                 /*
2066                                  * Look again for the entry because the map was
2067                                  * modified while it was unlocked.  The entry
2068                                  * may have been clipped, but NOT merged or
2069                                  * deleted.
2070                                  */
2071                                 result = vm_map_lookup_entry(map, saved_start,
2072                                     &tmp_entry);
2073                                 KASSERT(result, ("vm_map_wire: lookup failed"));
2074                                 if (entry == first_entry)
2075                                         first_entry = tmp_entry;
2076                                 else
2077                                         first_entry = NULL;
2078                                 entry = tmp_entry;
2079                                 while (entry->end < saved_end) {
2080                                         if (rv != KERN_SUCCESS) {
2081                                                 KASSERT(entry->wired_count == 1,
2082                                                     ("vm_map_wire: bad count"));
2083                                                 entry->wired_count = -1;
2084                                         }
2085                                         entry = entry->next;
2086                                 }
2087                         }
2088                         last_timestamp = map->timestamp;
2089                         if (rv != KERN_SUCCESS) {
2090                                 KASSERT(entry->wired_count == 1,
2091                                     ("vm_map_wire: bad count"));
2092                                 /*
2093                                  * Assign an out-of-range value to represent
2094                                  * the failure to wire this entry.
2095                                  */
2096                                 entry->wired_count = -1;
2097                                 end = entry->end;
2098                                 goto done;
2099                         }
2100                 } else if (!user_wire ||
2101                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2102                         entry->wired_count++;
2103                 }
2104                 /*
2105                  * Check the map for holes in the specified region.
2106                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2107                  */
2108                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2109                     (entry->end < end && (entry->next == &map->header ||
2110                     entry->next->start > entry->end))) {
2111                         end = entry->end;
2112                         rv = KERN_INVALID_ADDRESS;
2113                         goto done;
2114                 }
2115                 entry = entry->next;
2116         }
2117         rv = KERN_SUCCESS;
2118 done:
2119         need_wakeup = FALSE;
2120         if (first_entry == NULL) {
2121                 result = vm_map_lookup_entry(map, start, &first_entry);
2122                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2123                         first_entry = first_entry->next;
2124                 else
2125                         KASSERT(result, ("vm_map_wire: lookup failed"));
2126         }
2127         entry = first_entry;
2128         while (entry != &map->header && entry->start < end) {
2129                 if (rv == KERN_SUCCESS) {
2130                         if (user_wire)
2131                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
2132                 } else if (entry->wired_count == -1) {
2133                         /*
2134                          * Wiring failed on this entry.  Thus, unwiring is
2135                          * unnecessary.
2136                          */
2137                         entry->wired_count = 0;
2138                 } else {
2139                         if (!user_wire ||
2140                             (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
2141                                 entry->wired_count--;
2142                         if (entry->wired_count == 0) {
2143                                 /*
2144                                  * Retain the map lock.
2145                                  */
2146                                 vm_fault_unwire(map, entry->start, entry->end,
2147                                     entry->object.vm_object != NULL &&
2148                                     entry->object.vm_object->type == OBJT_DEVICE);
2149                         }
2150                 }
2151                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
2152                         ("vm_map_wire: in-transition flag missing"));
2153                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2154                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2155                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2156                         need_wakeup = TRUE;
2157                 }
2158                 vm_map_simplify_entry(map, entry);
2159                 entry = entry->next;
2160         }
2161         vm_map_unlock(map);
2162         if (need_wakeup)
2163                 vm_map_wakeup(map);
2164         return (rv);
2165 }
2166
2167 /*
2168  * vm_map_sync
2169  *
2170  * Push any dirty cached pages in the address range to their pager.
2171  * If syncio is TRUE, dirty pages are written synchronously.
2172  * If invalidate is TRUE, any cached pages are freed as well.
2173  *
2174  * If the size of the region from start to end is zero, we are
2175  * supposed to flush all modified pages within the region containing
2176  * start.  Unfortunately, a region can be split or coalesced with
2177  * neighboring regions, making it difficult to determine what the
2178  * original region was.  Therefore, we approximate this requirement by
2179  * flushing the current region containing start.
2180  *
2181  * Returns an error if any part of the specified range is not mapped.
2182  */
2183 int
2184 vm_map_sync(
2185         vm_map_t map,
2186         vm_offset_t start,
2187         vm_offset_t end,
2188         boolean_t syncio,
2189         boolean_t invalidate)
2190 {
2191         vm_map_entry_t current;
2192         vm_map_entry_t entry;
2193         vm_size_t size;
2194         vm_object_t object;
2195         vm_ooffset_t offset;
2196
2197         vm_map_lock_read(map);
2198         VM_MAP_RANGE_CHECK(map, start, end);
2199         if (!vm_map_lookup_entry(map, start, &entry)) {
2200                 vm_map_unlock_read(map);
2201                 return (KERN_INVALID_ADDRESS);
2202         } else if (start == end) {
2203                 start = entry->start;
2204                 end = entry->end;
2205         }
2206         /*
2207          * Make a first pass to check for user-wired memory and holes.
2208          */
2209         for (current = entry; current != &map->header && current->start < end;
2210             current = current->next) {
2211                 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2212                         vm_map_unlock_read(map);
2213                         return (KERN_INVALID_ARGUMENT);
2214                 }
2215                 if (end > current->end &&
2216                     (current->next == &map->header ||
2217                         current->end != current->next->start)) {
2218                         vm_map_unlock_read(map);
2219                         return (KERN_INVALID_ADDRESS);
2220                 }
2221         }
2222
2223         if (invalidate) {
2224                 VM_LOCK_GIANT();
2225                 pmap_remove(map->pmap, start, end);
2226                 VM_UNLOCK_GIANT();
2227         }
2228         /*
2229          * Make a second pass, cleaning/uncaching pages from the indicated
2230          * objects as we go.
2231          */
2232         for (current = entry; current != &map->header && current->start < end;
2233             current = current->next) {
2234                 offset = current->offset + (start - current->start);
2235                 size = (end <= current->end ? end : current->end) - start;
2236                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2237                         vm_map_t smap;
2238                         vm_map_entry_t tentry;
2239                         vm_size_t tsize;
2240
2241                         smap = current->object.sub_map;
2242                         vm_map_lock_read(smap);
2243                         (void) vm_map_lookup_entry(smap, offset, &tentry);
2244                         tsize = tentry->end - offset;
2245                         if (tsize < size)
2246                                 size = tsize;
2247                         object = tentry->object.vm_object;
2248                         offset = tentry->offset + (offset - tentry->start);
2249                         vm_map_unlock_read(smap);
2250                 } else {
2251                         object = current->object.vm_object;
2252                 }
2253                 vm_object_sync(object, offset, size, syncio, invalidate);
2254                 start += size;
2255         }
2256
2257         vm_map_unlock_read(map);
2258         return (KERN_SUCCESS);
2259 }
2260
2261 /*
2262  *      vm_map_entry_unwire:    [ internal use only ]
2263  *
2264  *      Make the region specified by this entry pageable.
2265  *
2266  *      The map in question should be locked.
2267  *      [This is the reason for this routine's existence.]
2268  */
2269 static void
2270 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2271 {
2272         vm_fault_unwire(map, entry->start, entry->end,
2273             entry->object.vm_object != NULL &&
2274             entry->object.vm_object->type == OBJT_DEVICE);
2275         entry->wired_count = 0;
2276 }
2277
2278 /*
2279  *      vm_map_entry_delete:    [ internal use only ]
2280  *
2281  *      Deallocate the given entry from the target map.
2282  */
2283 static void
2284 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2285 {
2286         vm_object_t object;
2287         vm_pindex_t offidxstart, offidxend, count;
2288
2289         vm_map_entry_unlink(map, entry);
2290         map->size -= entry->end - entry->start;
2291
2292         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2293             (object = entry->object.vm_object) != NULL) {
2294                 count = OFF_TO_IDX(entry->end - entry->start);
2295                 offidxstart = OFF_TO_IDX(entry->offset);
2296                 offidxend = offidxstart + count;
2297                 VM_OBJECT_LOCK(object);
2298                 if (object->ref_count != 1 &&
2299                     ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2300                      object == kernel_object || object == kmem_object) &&
2301                     (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2302                         vm_object_collapse(object);
2303                         vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2304                         if (object->type == OBJT_SWAP)
2305                                 swap_pager_freespace(object, offidxstart, count);
2306                         if (offidxend >= object->size &&
2307                             offidxstart < object->size)
2308                                 object->size = offidxstart;
2309                 }
2310                 VM_OBJECT_UNLOCK(object);
2311                 vm_object_deallocate(object);
2312         }
2313
2314         vm_map_entry_dispose(map, entry);
2315 }
2316
2317 /*
2318  *      vm_map_delete:  [ internal use only ]
2319  *
2320  *      Deallocates the given address range from the target
2321  *      map.
2322  */
2323 int
2324 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2325 {
2326         vm_map_entry_t entry;
2327         vm_map_entry_t first_entry;
2328
2329         /*
2330          * Find the start of the region, and clip it
2331          */
2332         if (!vm_map_lookup_entry(map, start, &first_entry))
2333                 entry = first_entry->next;
2334         else {
2335                 entry = first_entry;
2336                 vm_map_clip_start(map, entry, start);
2337         }
2338
2339         /*
2340          * Step through all entries in this region
2341          */
2342         while ((entry != &map->header) && (entry->start < end)) {
2343                 vm_map_entry_t next;
2344
2345                 /*
2346                  * Wait for wiring or unwiring of an entry to complete.
2347                  * Also wait for any system wirings to disappear on
2348                  * user maps.
2349                  */
2350                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
2351                     (vm_map_pmap(map) != kernel_pmap &&
2352                     vm_map_entry_system_wired_count(entry) != 0)) {
2353                         unsigned int last_timestamp;
2354                         vm_offset_t saved_start;
2355                         vm_map_entry_t tmp_entry;
2356
2357                         saved_start = entry->start;
2358                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2359                         last_timestamp = map->timestamp;
2360                         (void) vm_map_unlock_and_wait(map, FALSE);
2361                         vm_map_lock(map);
2362                         if (last_timestamp + 1 != map->timestamp) {
2363                                 /*
2364                                  * Look again for the entry because the map was
2365                                  * modified while it was unlocked.
2366                                  * Specifically, the entry may have been
2367                                  * clipped, merged, or deleted.
2368                                  */
2369                                 if (!vm_map_lookup_entry(map, saved_start,
2370                                                          &tmp_entry))
2371                                         entry = tmp_entry->next;
2372                                 else {
2373                                         entry = tmp_entry;
2374                                         vm_map_clip_start(map, entry,
2375                                                           saved_start);
2376                                 }
2377                         }
2378                         continue;
2379                 }
2380                 vm_map_clip_end(map, entry, end);
2381
2382                 next = entry->next;
2383
2384                 /*
2385                  * Unwire before removing addresses from the pmap; otherwise,
2386                  * unwiring will put the entries back in the pmap.
2387                  */
2388                 if (entry->wired_count != 0) {
2389                         vm_map_entry_unwire(map, entry);
2390                 }
2391
2392                 if (!map->system_map)
2393                         VM_LOCK_GIANT();
2394                 pmap_remove(map->pmap, entry->start, entry->end);
2395                 if (!map->system_map)
2396                         VM_UNLOCK_GIANT();
2397
2398                 /*
2399                  * Delete the entry (which may delete the object) only after
2400                  * removing all pmap entries pointing to its pages.
2401                  * (Otherwise, its page frames may be reallocated, and any
2402                  * modify bits will be set in the wrong object!)
2403                  */
2404                 vm_map_entry_delete(map, entry);
2405                 entry = next;
2406         }
2407         return (KERN_SUCCESS);
2408 }
2409
2410 /*
2411  *      vm_map_remove:
2412  *
2413  *      Remove the given address range from the target map.
2414  *      This is the exported form of vm_map_delete.
2415  */
2416 int
2417 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2418 {
2419         int result;
2420
2421         vm_map_lock(map);
2422         VM_MAP_RANGE_CHECK(map, start, end);
2423         result = vm_map_delete(map, start, end);
2424         vm_map_unlock(map);
2425         return (result);
2426 }
2427
2428 /*
2429  *      vm_map_check_protection:
2430  *
2431  *      Assert that the target map allows the specified privilege on the
2432  *      entire address region given.  The entire region must be allocated.
2433  *
2434  *      WARNING!  This code does not and should not check whether the
2435  *      contents of the region is accessible.  For example a smaller file
2436  *      might be mapped into a larger address space.
2437  *
2438  *      NOTE!  This code is also called by munmap().
2439  *
2440  *      The map must be locked.  A read lock is sufficient.
2441  */
2442 boolean_t
2443 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2444                         vm_prot_t protection)
2445 {
2446         vm_map_entry_t entry;
2447         vm_map_entry_t tmp_entry;
2448
2449         if (!vm_map_lookup_entry(map, start, &tmp_entry))
2450                 return (FALSE);
2451         entry = tmp_entry;
2452
2453         while (start < end) {
2454                 if (entry == &map->header)
2455                         return (FALSE);
2456                 /*
2457                  * No holes allowed!
2458                  */
2459                 if (start < entry->start)
2460                         return (FALSE);
2461                 /*
2462                  * Check protection associated with entry.
2463                  */
2464                 if ((entry->protection & protection) != protection)
2465                         return (FALSE);
2466                 /* go to next entry */
2467                 start = entry->end;
2468                 entry = entry->next;
2469         }
2470         return (TRUE);
2471 }
2472
2473 /*
2474  *      vm_map_copy_entry:
2475  *
2476  *      Copies the contents of the source entry to the destination
2477  *      entry.  The entries *must* be aligned properly.
2478  */
2479 static void
2480 vm_map_copy_entry(
2481         vm_map_t src_map,
2482         vm_map_t dst_map,
2483         vm_map_entry_t src_entry,
2484         vm_map_entry_t dst_entry)
2485 {
2486         vm_object_t src_object;
2487
2488         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2489                 return;
2490
2491         if (src_entry->wired_count == 0) {
2492
2493                 /*
2494                  * If the source entry is marked needs_copy, it is already
2495                  * write-protected.
2496                  */
2497                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2498                         pmap_protect(src_map->pmap,
2499                             src_entry->start,
2500                             src_entry->end,
2501                             src_entry->protection & ~VM_PROT_WRITE);
2502                 }
2503
2504                 /*
2505                  * Make a copy of the object.
2506                  */
2507                 if ((src_object = src_entry->object.vm_object) != NULL) {
2508                         VM_OBJECT_LOCK(src_object);
2509                         if ((src_object->handle == NULL) &&
2510                                 (src_object->type == OBJT_DEFAULT ||
2511                                  src_object->type == OBJT_SWAP)) {
2512                                 vm_object_collapse(src_object);
2513                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2514                                         vm_object_split(src_entry);
2515                                         src_object = src_entry->object.vm_object;
2516                                 }
2517                         }
2518                         vm_object_reference_locked(src_object);
2519                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2520                         VM_OBJECT_UNLOCK(src_object);
2521                         dst_entry->object.vm_object = src_object;
2522                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2523                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2524                         dst_entry->offset = src_entry->offset;
2525                 } else {
2526                         dst_entry->object.vm_object = NULL;
2527                         dst_entry->offset = 0;
2528                 }
2529
2530                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2531                     dst_entry->end - dst_entry->start, src_entry->start);
2532         } else {
2533                 /*
2534                  * Of course, wired down pages can't be set copy-on-write.
2535                  * Cause wired pages to be copied into the new map by
2536                  * simulating faults (the new pages are pageable)
2537                  */
2538                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2539         }
2540 }
2541
2542 /*
2543  * vmspace_map_entry_forked:
2544  * Update the newly-forked vmspace each time a map entry is inherited
2545  * or copied.  The values for vm_dsize and vm_tsize are approximate
2546  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
2547  */
2548 static void
2549 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
2550     vm_map_entry_t entry)
2551 {
2552         vm_size_t entrysize;
2553         vm_offset_t newend;
2554
2555         entrysize = entry->end - entry->start;
2556         vm2->vm_map.size += entrysize;
2557         if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
2558                 vm2->vm_ssize += btoc(entrysize);
2559         } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
2560             entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
2561                 newend = MIN(entry->end,
2562                     (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
2563                 vm2->vm_dsize += btoc(newend - entry->start);
2564         } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
2565             entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
2566                 newend = MIN(entry->end,
2567                     (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
2568                 vm2->vm_tsize += btoc(newend - entry->start);
2569         }
2570 }
2571
2572 /*
2573  * vmspace_fork:
2574  * Create a new process vmspace structure and vm_map
2575  * based on those of an existing process.  The new map
2576  * is based on the old map, according to the inheritance
2577  * values on the regions in that map.
2578  *
2579  * XXX It might be worth coalescing the entries added to the new vmspace.
2580  *
2581  * The source map must not be locked.
2582  */
2583 struct vmspace *
2584 vmspace_fork(struct vmspace *vm1)
2585 {
2586         struct vmspace *vm2;
2587         vm_map_t old_map = &vm1->vm_map;
2588         vm_map_t new_map;
2589         vm_map_entry_t old_entry;
2590         vm_map_entry_t new_entry;
2591         vm_object_t object;
2592
2593         vm_map_lock(old_map);
2594
2595         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2596         vm2->vm_taddr = vm1->vm_taddr;
2597         vm2->vm_daddr = vm1->vm_daddr;
2598         vm2->vm_maxsaddr = vm1->vm_maxsaddr;
2599         new_map = &vm2->vm_map; /* XXX */
2600         new_map->timestamp = 1;
2601
2602         /* Do not inherit the MAP_WIREFUTURE property. */
2603         if ((new_map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE)
2604                 new_map->flags &= ~MAP_WIREFUTURE;
2605
2606         old_entry = old_map->header.next;
2607
2608         while (old_entry != &old_map->header) {
2609                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2610                         panic("vm_map_fork: encountered a submap");
2611
2612                 switch (old_entry->inheritance) {
2613                 case VM_INHERIT_NONE:
2614                         break;
2615
2616                 case VM_INHERIT_SHARE:
2617                         /*
2618                          * Clone the entry, creating the shared object if necessary.
2619                          */
2620                         object = old_entry->object.vm_object;
2621                         if (object == NULL) {
2622                                 object = vm_object_allocate(OBJT_DEFAULT,
2623                                         atop(old_entry->end - old_entry->start));
2624                                 old_entry->object.vm_object = object;
2625                                 old_entry->offset = 0;
2626                         }
2627
2628                         /*
2629                          * Add the reference before calling vm_object_shadow
2630                          * to insure that a shadow object is created.
2631                          */
2632                         vm_object_reference(object);
2633                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2634                                 vm_object_shadow(&old_entry->object.vm_object,
2635                                         &old_entry->offset,
2636                                         atop(old_entry->end - old_entry->start));
2637                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2638                                 /* Transfer the second reference too. */
2639                                 vm_object_reference(
2640                                     old_entry->object.vm_object);
2641                                 vm_object_deallocate(object);
2642                                 object = old_entry->object.vm_object;
2643                         }
2644                         VM_OBJECT_LOCK(object);
2645                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
2646                         VM_OBJECT_UNLOCK(object);
2647
2648                         /*
2649                          * Clone the entry, referencing the shared object.
2650                          */
2651                         new_entry = vm_map_entry_create(new_map);
2652                         *new_entry = *old_entry;
2653                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2654                         new_entry->wired_count = 0;
2655
2656                         /*
2657                          * Insert the entry into the new map -- we know we're
2658                          * inserting at the end of the new map.
2659                          */
2660                         vm_map_entry_link(new_map, new_map->header.prev,
2661                             new_entry);
2662                         vmspace_map_entry_forked(vm1, vm2, new_entry);
2663
2664                         /*
2665                          * Update the physical map
2666                          */
2667                         pmap_copy(new_map->pmap, old_map->pmap,
2668                             new_entry->start,
2669                             (old_entry->end - old_entry->start),
2670                             old_entry->start);
2671                         break;
2672
2673                 case VM_INHERIT_COPY:
2674                         /*
2675                          * Clone the entry and link into the map.
2676                          */
2677                         new_entry = vm_map_entry_create(new_map);
2678                         *new_entry = *old_entry;
2679                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2680                         new_entry->wired_count = 0;
2681                         new_entry->object.vm_object = NULL;
2682                         vm_map_entry_link(new_map, new_map->header.prev,
2683                             new_entry);
2684                         vmspace_map_entry_forked(vm1, vm2, new_entry);
2685                         vm_map_copy_entry(old_map, new_map, old_entry,
2686                             new_entry);
2687                         break;
2688                 }
2689                 old_entry = old_entry->next;
2690         }
2691
2692         vm_map_unlock(old_map);
2693
2694         return (vm2);
2695 }
2696
2697 int
2698 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
2699     vm_prot_t prot, vm_prot_t max, int cow)
2700 {
2701         vm_map_entry_t new_entry, prev_entry;
2702         vm_offset_t bot, top;
2703         vm_size_t init_ssize;
2704         int orient, rv;
2705         rlim_t vmemlim;
2706
2707         /*
2708          * The stack orientation is piggybacked with the cow argument.
2709          * Extract it into orient and mask the cow argument so that we
2710          * don't pass it around further.
2711          * NOTE: We explicitly allow bi-directional stacks.
2712          */
2713         orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
2714         cow &= ~orient;
2715         KASSERT(orient != 0, ("No stack grow direction"));
2716
2717         if (addrbos < vm_map_min(map) ||
2718             addrbos > vm_map_max(map) ||
2719             addrbos + max_ssize < addrbos)
2720                 return (KERN_NO_SPACE);
2721
2722         init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz;
2723
2724         PROC_LOCK(curthread->td_proc);
2725         vmemlim = lim_cur(curthread->td_proc, RLIMIT_VMEM);
2726         PROC_UNLOCK(curthread->td_proc);
2727
2728         vm_map_lock(map);
2729
2730         /* If addr is already mapped, no go */
2731         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
2732                 vm_map_unlock(map);
2733                 return (KERN_NO_SPACE);
2734         }
2735
2736         /* If we would blow our VMEM resource limit, no go */
2737         if (map->size + init_ssize > vmemlim) {
2738                 vm_map_unlock(map);
2739                 return (KERN_NO_SPACE);
2740         }
2741
2742         /*
2743          * If we can't accomodate max_ssize in the current mapping, no go.
2744          * However, we need to be aware that subsequent user mappings might
2745          * map into the space we have reserved for stack, and currently this
2746          * space is not protected.
2747          *
2748          * Hopefully we will at least detect this condition when we try to
2749          * grow the stack.
2750          */
2751         if ((prev_entry->next != &map->header) &&
2752             (prev_entry->next->start < addrbos + max_ssize)) {
2753                 vm_map_unlock(map);
2754                 return (KERN_NO_SPACE);
2755         }
2756
2757         /*
2758          * We initially map a stack of only init_ssize.  We will grow as
2759          * needed later.  Depending on the orientation of the stack (i.e.
2760          * the grow direction) we either map at the top of the range, the
2761          * bottom of the range or in the middle.
2762          *
2763          * Note: we would normally expect prot and max to be VM_PROT_ALL,
2764          * and cow to be 0.  Possibly we should eliminate these as input
2765          * parameters, and just pass these values here in the insert call.
2766          */
2767         if (orient == MAP_STACK_GROWS_DOWN)
2768                 bot = addrbos + max_ssize - init_ssize;
2769         else if (orient == MAP_STACK_GROWS_UP)
2770                 bot = addrbos;
2771         else
2772                 bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
2773         top = bot + init_ssize;
2774         rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
2775
2776         /* Now set the avail_ssize amount. */
2777         if (rv == KERN_SUCCESS) {
2778                 if (prev_entry != &map->header)
2779                         vm_map_clip_end(map, prev_entry, bot);
2780                 new_entry = prev_entry->next;
2781                 if (new_entry->end != top || new_entry->start != bot)
2782                         panic("Bad entry start/end for new stack entry");
2783
2784                 new_entry->avail_ssize = max_ssize - init_ssize;
2785                 if (orient & MAP_STACK_GROWS_DOWN)
2786                         new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
2787                 if (orient & MAP_STACK_GROWS_UP)
2788                         new_entry->eflags |= MAP_ENTRY_GROWS_UP;
2789         }
2790
2791         vm_map_unlock(map);
2792         return (rv);
2793 }
2794
2795 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
2796  * desired address is already mapped, or if we successfully grow
2797  * the stack.  Also returns KERN_SUCCESS if addr is outside the
2798  * stack range (this is strange, but preserves compatibility with
2799  * the grow function in vm_machdep.c).
2800  */
2801 int
2802 vm_map_growstack(struct proc *p, vm_offset_t addr)
2803 {
2804         vm_map_entry_t next_entry, prev_entry;
2805         vm_map_entry_t new_entry, stack_entry;
2806         struct vmspace *vm = p->p_vmspace;
2807         vm_map_t map = &vm->vm_map;
2808         vm_offset_t end;
2809         size_t grow_amount, max_grow;
2810         rlim_t stacklim, vmemlim;
2811         int is_procstack, rv;
2812
2813 Retry:
2814         PROC_LOCK(p);
2815         stacklim = lim_cur(p, RLIMIT_STACK);
2816         vmemlim = lim_cur(p, RLIMIT_VMEM);
2817         PROC_UNLOCK(p);
2818
2819         vm_map_lock_read(map);
2820
2821         /* If addr is already in the entry range, no need to grow.*/
2822         if (vm_map_lookup_entry(map, addr, &prev_entry)) {
2823                 vm_map_unlock_read(map);
2824                 return (KERN_SUCCESS);
2825         }
2826
2827         next_entry = prev_entry->next;
2828         if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
2829                 /*
2830                  * This entry does not grow upwards. Since the address lies
2831                  * beyond this entry, the next entry (if one exists) has to
2832                  * be a downward growable entry. The entry list header is
2833                  * never a growable entry, so it suffices to check the flags.
2834                  */
2835                 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
2836                         vm_map_unlock_read(map);
2837                         return (KERN_SUCCESS);
2838                 }
2839                 stack_entry = next_entry;
2840         } else {
2841                 /*
2842                  * This entry grows upward. If the next entry does not at
2843                  * least grow downwards, this is the entry we need to grow.
2844                  * otherwise we have two possible choices and we have to
2845                  * select one.
2846                  */
2847                 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
2848                         /*
2849                          * We have two choices; grow the entry closest to
2850                          * the address to minimize the amount of growth.
2851                          */
2852                         if (addr - prev_entry->end <= next_entry->start - addr)
2853                                 stack_entry = prev_entry;
2854                         else
2855                                 stack_entry = next_entry;
2856                 } else
2857                         stack_entry = prev_entry;
2858         }
2859
2860         if (stack_entry == next_entry) {
2861                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
2862                 KASSERT(addr < stack_entry->start, ("foo"));
2863                 end = (prev_entry != &map->header) ? prev_entry->end :
2864                     stack_entry->start - stack_entry->avail_ssize;
2865                 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
2866                 max_grow = stack_entry->start - end;
2867         } else {
2868                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
2869                 KASSERT(addr >= stack_entry->end, ("foo"));
2870                 end = (next_entry != &map->header) ? next_entry->start :
2871                     stack_entry->end + stack_entry->avail_ssize;
2872                 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
2873                 max_grow = end - stack_entry->end;
2874         }
2875
2876         if (grow_amount > stack_entry->avail_ssize) {
2877                 vm_map_unlock_read(map);
2878                 return (KERN_NO_SPACE);
2879         }
2880
2881         /*
2882          * If there is no longer enough space between the entries nogo, and
2883          * adjust the available space.  Note: this  should only happen if the
2884          * user has mapped into the stack area after the stack was created,
2885          * and is probably an error.
2886          *
2887          * This also effectively destroys any guard page the user might have
2888          * intended by limiting the stack size.
2889          */
2890         if (grow_amount > max_grow) {
2891                 if (vm_map_lock_upgrade(map))
2892                         goto Retry;
2893
2894                 stack_entry->avail_ssize = max_grow;
2895
2896                 vm_map_unlock(map);
2897                 return (KERN_NO_SPACE);
2898         }
2899
2900         is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
2901
2902         /*
2903          * If this is the main process stack, see if we're over the stack
2904          * limit.
2905          */
2906         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
2907                 vm_map_unlock_read(map);
2908                 return (KERN_NO_SPACE);
2909         }
2910
2911         /* Round up the grow amount modulo SGROWSIZ */
2912         grow_amount = roundup (grow_amount, sgrowsiz);
2913         if (grow_amount > stack_entry->avail_ssize)
2914                 grow_amount = stack_entry->avail_ssize;
2915         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
2916                 grow_amount = stacklim - ctob(vm->vm_ssize);
2917         }
2918
2919         /* If we would blow our VMEM resource limit, no go */
2920         if (map->size + grow_amount > vmemlim) {
2921                 vm_map_unlock_read(map);
2922                 return (KERN_NO_SPACE);
2923         }
2924
2925         if (vm_map_lock_upgrade(map))
2926                 goto Retry;
2927
2928         if (stack_entry == next_entry) {
2929                 /*
2930                  * Growing downward.
2931                  */
2932                 /* Get the preliminary new entry start value */
2933                 addr = stack_entry->start - grow_amount;
2934
2935                 /*
2936                  * If this puts us into the previous entry, cut back our
2937                  * growth to the available space. Also, see the note above.
2938                  */
2939                 if (addr < end) {
2940                         stack_entry->avail_ssize = max_grow;
2941                         addr = end;
2942                 }
2943
2944                 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
2945                     p->p_sysent->sv_stackprot, VM_PROT_ALL, 0);
2946
2947                 /* Adjust the available stack space by the amount we grew. */
2948                 if (rv == KERN_SUCCESS) {
2949                         if (prev_entry != &map->header)
2950                                 vm_map_clip_end(map, prev_entry, addr);
2951                         new_entry = prev_entry->next;
2952                         KASSERT(new_entry == stack_entry->prev, ("foo"));
2953                         KASSERT(new_entry->end == stack_entry->start, ("foo"));
2954                         KASSERT(new_entry->start == addr, ("foo"));
2955                         grow_amount = new_entry->end - new_entry->start;
2956                         new_entry->avail_ssize = stack_entry->avail_ssize -
2957                             grow_amount;
2958                         stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
2959                         new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
2960                 }
2961         } else {
2962                 /*
2963                  * Growing upward.
2964                  */
2965                 addr = stack_entry->end + grow_amount;
2966
2967                 /*
2968                  * If this puts us into the next entry, cut back our growth
2969                  * to the available space. Also, see the note above.
2970                  */
2971                 if (addr > end) {
2972                         stack_entry->avail_ssize = end - stack_entry->end;
2973                         addr = end;
2974                 }
2975
2976                 grow_amount = addr - stack_entry->end;
2977
2978                 /* Grow the underlying object if applicable. */
2979                 if (stack_entry->object.vm_object == NULL ||
2980                     vm_object_coalesce(stack_entry->object.vm_object,
2981                     stack_entry->offset,
2982                     (vm_size_t)(stack_entry->end - stack_entry->start),
2983                     (vm_size_t)grow_amount)) {
2984                         map->size += (addr - stack_entry->end);
2985                         /* Update the current entry. */
2986                         stack_entry->end = addr;
2987                         stack_entry->avail_ssize -= grow_amount;
2988                         vm_map_entry_resize_free(map, stack_entry);
2989                         rv = KERN_SUCCESS;
2990
2991                         if (next_entry != &map->header)
2992                                 vm_map_clip_start(map, next_entry, addr);
2993                 } else
2994                         rv = KERN_FAILURE;
2995         }
2996
2997         if (rv == KERN_SUCCESS && is_procstack)
2998                 vm->vm_ssize += btoc(grow_amount);
2999
3000         vm_map_unlock(map);
3001
3002         /*
3003          * Heed the MAP_WIREFUTURE flag if it was set for this process.
3004          */
3005         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3006                 vm_map_wire(map,
3007                     (stack_entry == next_entry) ? addr : addr - grow_amount,
3008                     (stack_entry == next_entry) ? stack_entry->start : addr,
3009                     (p->p_flag & P_SYSTEM)
3010                     ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3011                     : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3012         }
3013
3014         return (rv);
3015 }
3016
3017 /*
3018  * Unshare the specified VM space for exec.  If other processes are
3019  * mapped to it, then create a new one.  The new vmspace is null.
3020  */
3021 void
3022 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3023 {
3024         struct vmspace *oldvmspace = p->p_vmspace;
3025         struct vmspace *newvmspace;
3026
3027         newvmspace = vmspace_alloc(minuser, maxuser);
3028         newvmspace->vm_swrss = oldvmspace->vm_swrss;
3029         /*
3030          * This code is written like this for prototype purposes.  The
3031          * goal is to avoid running down the vmspace here, but let the
3032          * other process's that are still using the vmspace to finally
3033          * run it down.  Even though there is little or no chance of blocking
3034          * here, it is a good idea to keep this form for future mods.
3035          */
3036         PROC_VMSPACE_LOCK(p);
3037         p->p_vmspace = newvmspace;
3038         PROC_VMSPACE_UNLOCK(p);
3039         if (p == curthread->td_proc)            /* XXXKSE ? */
3040                 pmap_activate(curthread);
3041         vmspace_free(oldvmspace);
3042 }
3043
3044 /*
3045  * Unshare the specified VM space for forcing COW.  This
3046  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3047  */
3048 void
3049 vmspace_unshare(struct proc *p)
3050 {
3051         struct vmspace *oldvmspace = p->p_vmspace;
3052         struct vmspace *newvmspace;
3053
3054         if (oldvmspace->vm_refcnt == 1)
3055                 return;
3056         newvmspace = vmspace_fork(oldvmspace);
3057         PROC_VMSPACE_LOCK(p);
3058         p->p_vmspace = newvmspace;
3059         PROC_VMSPACE_UNLOCK(p);
3060         if (p == curthread->td_proc)            /* XXXKSE ? */
3061                 pmap_activate(curthread);
3062         vmspace_free(oldvmspace);
3063 }
3064
3065 /*
3066  *      vm_map_lookup:
3067  *
3068  *      Finds the VM object, offset, and
3069  *      protection for a given virtual address in the
3070  *      specified map, assuming a page fault of the
3071  *      type specified.
3072  *
3073  *      Leaves the map in question locked for read; return
3074  *      values are guaranteed until a vm_map_lookup_done
3075  *      call is performed.  Note that the map argument
3076  *      is in/out; the returned map must be used in
3077  *      the call to vm_map_lookup_done.
3078  *
3079  *      A handle (out_entry) is returned for use in
3080  *      vm_map_lookup_done, to make that fast.
3081  *
3082  *      If a lookup is requested with "write protection"
3083  *      specified, the map may be changed to perform virtual
3084  *      copying operations, although the data referenced will
3085  *      remain the same.
3086  */
3087 int
3088 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
3089               vm_offset_t vaddr,
3090               vm_prot_t fault_typea,
3091               vm_map_entry_t *out_entry,        /* OUT */
3092               vm_object_t *object,              /* OUT */
3093               vm_pindex_t *pindex,              /* OUT */
3094               vm_prot_t *out_prot,              /* OUT */
3095               boolean_t *wired)                 /* OUT */
3096 {
3097         vm_map_entry_t entry;
3098         vm_map_t map = *var_map;
3099         vm_prot_t prot;
3100         vm_prot_t fault_type = fault_typea;
3101
3102 RetryLookup:;
3103         /*
3104          * Lookup the faulting address.
3105          */
3106
3107         vm_map_lock_read(map);
3108 #define RETURN(why) \
3109                 { \
3110                 vm_map_unlock_read(map); \
3111                 return (why); \
3112                 }
3113
3114         /*
3115          * If the map has an interesting hint, try it before calling full
3116          * blown lookup routine.
3117          */
3118         entry = map->root;
3119         *out_entry = entry;
3120         if (entry == NULL ||
3121             (vaddr < entry->start) || (vaddr >= entry->end)) {
3122                 /*
3123                  * Entry was either not a valid hint, or the vaddr was not
3124                  * contained in the entry, so do a full lookup.
3125                  */
3126                 if (!vm_map_lookup_entry(map, vaddr, out_entry))
3127                         RETURN(KERN_INVALID_ADDRESS);
3128
3129                 entry = *out_entry;
3130         }
3131
3132         /*
3133          * Handle submaps.
3134          */
3135         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3136                 vm_map_t old_map = map;
3137
3138                 *var_map = map = entry->object.sub_map;
3139                 vm_map_unlock_read(old_map);
3140                 goto RetryLookup;
3141         }
3142
3143         /*
3144          * Check whether this task is allowed to have this page.
3145          * Note the special case for MAP_ENTRY_COW
3146          * pages with an override.  This is to implement a forced
3147          * COW for debuggers.
3148          */
3149         if (fault_type & VM_PROT_OVERRIDE_WRITE)
3150                 prot = entry->max_protection;
3151         else
3152                 prot = entry->protection;
3153         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
3154         if ((fault_type & prot) != fault_type) {
3155                         RETURN(KERN_PROTECTION_FAILURE);
3156         }
3157         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3158             (entry->eflags & MAP_ENTRY_COW) &&
3159             (fault_type & VM_PROT_WRITE) &&
3160             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
3161                 RETURN(KERN_PROTECTION_FAILURE);
3162         }
3163
3164         /*
3165          * If this page is not pageable, we have to get it for all possible
3166          * accesses.
3167          */
3168         *wired = (entry->wired_count != 0);
3169         if (*wired)
3170                 prot = fault_type = entry->protection;
3171
3172         /*
3173          * If the entry was copy-on-write, we either ...
3174          */
3175         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3176                 /*
3177                  * If we want to write the page, we may as well handle that
3178                  * now since we've got the map locked.
3179                  *
3180                  * If we don't need to write the page, we just demote the
3181                  * permissions allowed.
3182                  */
3183                 if (fault_type & VM_PROT_WRITE) {
3184                         /*
3185                          * Make a new object, and place it in the object
3186                          * chain.  Note that no new references have appeared
3187                          * -- one just moved from the map to the new
3188                          * object.
3189                          */
3190                         if (vm_map_lock_upgrade(map))
3191                                 goto RetryLookup;
3192
3193                         vm_object_shadow(
3194                             &entry->object.vm_object,
3195                             &entry->offset,
3196                             atop(entry->end - entry->start));
3197                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3198
3199                         vm_map_lock_downgrade(map);
3200                 } else {
3201                         /*
3202                          * We're attempting to read a copy-on-write page --
3203                          * don't allow writes.
3204                          */
3205                         prot &= ~VM_PROT_WRITE;
3206                 }
3207         }
3208
3209         /*
3210          * Create an object if necessary.
3211          */
3212         if (entry->object.vm_object == NULL &&
3213             !map->system_map) {
3214                 if (vm_map_lock_upgrade(map))
3215                         goto RetryLookup;
3216                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
3217                     atop(entry->end - entry->start));
3218                 entry->offset = 0;
3219                 vm_map_lock_downgrade(map);
3220         }
3221
3222         /*
3223          * Return the object/offset from this entry.  If the entry was
3224          * copy-on-write or empty, it has been fixed up.
3225          */
3226         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3227         *object = entry->object.vm_object;
3228
3229         *out_prot = prot;
3230         return (KERN_SUCCESS);
3231
3232 #undef  RETURN
3233 }
3234
3235 /*
3236  *      vm_map_lookup_locked:
3237  *
3238  *      Lookup the faulting address.  A version of vm_map_lookup that returns 
3239  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
3240  */
3241 int
3242 vm_map_lookup_locked(vm_map_t *var_map,         /* IN/OUT */
3243                      vm_offset_t vaddr,
3244                      vm_prot_t fault_typea,
3245                      vm_map_entry_t *out_entry, /* OUT */
3246                      vm_object_t *object,       /* OUT */
3247                      vm_pindex_t *pindex,       /* OUT */
3248                      vm_prot_t *out_prot,       /* OUT */
3249                      boolean_t *wired)          /* OUT */
3250 {
3251         vm_map_entry_t entry;
3252         vm_map_t map = *var_map;
3253         vm_prot_t prot;
3254         vm_prot_t fault_type = fault_typea;
3255
3256         /*
3257          * If the map has an interesting hint, try it before calling full
3258          * blown lookup routine.
3259          */
3260         entry = map->root;
3261         *out_entry = entry;
3262         if (entry == NULL ||
3263             (vaddr < entry->start) || (vaddr >= entry->end)) {
3264                 /*
3265                  * Entry was either not a valid hint, or the vaddr was not
3266                  * contained in the entry, so do a full lookup.
3267                  */
3268                 if (!vm_map_lookup_entry(map, vaddr, out_entry))
3269                         return (KERN_INVALID_ADDRESS);
3270
3271                 entry = *out_entry;
3272         }
3273
3274         /*
3275          * Fail if the entry refers to a submap.
3276          */
3277         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3278                 return (KERN_FAILURE);
3279
3280         /*
3281          * Check whether this task is allowed to have this page.
3282          * Note the special case for MAP_ENTRY_COW
3283          * pages with an override.  This is to implement a forced
3284          * COW for debuggers.
3285          */
3286         if (fault_type & VM_PROT_OVERRIDE_WRITE)
3287                 prot = entry->max_protection;
3288         else
3289                 prot = entry->protection;
3290         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
3291         if ((fault_type & prot) != fault_type)
3292                 return (KERN_PROTECTION_FAILURE);
3293         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
3294             (entry->eflags & MAP_ENTRY_COW) &&
3295             (fault_type & VM_PROT_WRITE) &&
3296             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0)
3297                 return (KERN_PROTECTION_FAILURE);
3298
3299         /*
3300          * If this page is not pageable, we have to get it for all possible
3301          * accesses.
3302          */
3303         *wired = (entry->wired_count != 0);
3304         if (*wired)
3305                 prot = fault_type = entry->protection;
3306
3307         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3308                 /*
3309                  * Fail if the entry was copy-on-write for a write fault.
3310                  */
3311                 if (fault_type & VM_PROT_WRITE)
3312                         return (KERN_FAILURE);
3313                 /*
3314                  * We're attempting to read a copy-on-write page --
3315                  * don't allow writes.
3316                  */
3317                 prot &= ~VM_PROT_WRITE;
3318         }
3319
3320         /*
3321          * Fail if an object should be created.
3322          */
3323         if (entry->object.vm_object == NULL && !map->system_map)
3324                 return (KERN_FAILURE);
3325
3326         /*
3327          * Return the object/offset from this entry.  If the entry was
3328          * copy-on-write or empty, it has been fixed up.
3329          */
3330         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
3331         *object = entry->object.vm_object;
3332
3333         *out_prot = prot;
3334         return (KERN_SUCCESS);
3335 }
3336
3337 /*
3338  *      vm_map_lookup_done:
3339  *
3340  *      Releases locks acquired by a vm_map_lookup
3341  *      (according to the handle returned by that lookup).
3342  */
3343 void
3344 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
3345 {
3346         /*
3347          * Unlock the main-level map
3348          */
3349         vm_map_unlock_read(map);
3350 }
3351
3352 #include "opt_ddb.h"
3353 #ifdef DDB
3354 #include <sys/kernel.h>
3355
3356 #include <ddb/ddb.h>
3357
3358 /*
3359  *      vm_map_print:   [ debug ]
3360  */
3361 DB_SHOW_COMMAND(map, vm_map_print)
3362 {
3363         static int nlines;
3364         /* XXX convert args. */
3365         vm_map_t map = (vm_map_t)addr;
3366         boolean_t full = have_addr;
3367
3368         vm_map_entry_t entry;
3369
3370         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3371             (void *)map,
3372             (void *)map->pmap, map->nentries, map->timestamp);
3373         nlines++;
3374
3375         if (!full && db_indent)
3376                 return;
3377
3378         db_indent += 2;
3379         for (entry = map->header.next; entry != &map->header;
3380             entry = entry->next) {
3381                 db_iprintf("map entry %p: start=%p, end=%p\n",
3382                     (void *)entry, (void *)entry->start, (void *)entry->end);
3383                 nlines++;
3384                 {
3385                         static char *inheritance_name[4] =
3386                         {"share", "copy", "none", "donate_copy"};
3387
3388                         db_iprintf(" prot=%x/%x/%s",
3389                             entry->protection,
3390                             entry->max_protection,
3391                             inheritance_name[(int)(unsigned char)entry->inheritance]);
3392                         if (entry->wired_count != 0)
3393                                 db_printf(", wired");
3394                 }
3395                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3396                         db_printf(", share=%p, offset=0x%jx\n",
3397                             (void *)entry->object.sub_map,
3398                             (uintmax_t)entry->offset);
3399                         nlines++;
3400                         if ((entry->prev == &map->header) ||
3401                             (entry->prev->object.sub_map !=
3402                                 entry->object.sub_map)) {
3403                                 db_indent += 2;
3404                                 vm_map_print((db_expr_t)(intptr_t)
3405                                              entry->object.sub_map,
3406                                              full, 0, (char *)0);
3407                                 db_indent -= 2;
3408                         }
3409                 } else {
3410                         db_printf(", object=%p, offset=0x%jx",
3411                             (void *)entry->object.vm_object,
3412                             (uintmax_t)entry->offset);
3413                         if (entry->eflags & MAP_ENTRY_COW)
3414                                 db_printf(", copy (%s)",
3415                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3416                         db_printf("\n");
3417                         nlines++;
3418
3419                         if ((entry->prev == &map->header) ||
3420                             (entry->prev->object.vm_object !=
3421                                 entry->object.vm_object)) {
3422                                 db_indent += 2;
3423                                 vm_object_print((db_expr_t)(intptr_t)
3424                                                 entry->object.vm_object,
3425                                                 full, 0, (char *)0);
3426                                 nlines += 4;
3427                                 db_indent -= 2;
3428                         }
3429                 }
3430         }
3431         db_indent -= 2;
3432         if (db_indent == 0)
3433                 nlines = 0;
3434 }
3435
3436
3437 DB_SHOW_COMMAND(procvm, procvm)
3438 {
3439         struct proc *p;
3440
3441         if (have_addr) {
3442                 p = (struct proc *) addr;
3443         } else {
3444                 p = curproc;
3445         }
3446
3447         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3448             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3449             (void *)vmspace_pmap(p->p_vmspace));
3450
3451         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3452 }
3453
3454 #endif /* DDB */