]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_map.c
This commit was generated by cvs2svn to compensate for changes in r109357,
[FreeBSD/FreeBSD.git] / sys / vm / vm_map.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by the University of
19  *      California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD$
65  */
66
67 /*
68  *      Virtual memory mapping module.
69  */
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/ktr.h>
74 #include <sys/lock.h>
75 #include <sys/mutex.h>
76 #include <sys/proc.h>
77 #include <sys/vmmeter.h>
78 #include <sys/mman.h>
79 #include <sys/vnode.h>
80 #include <sys/resourcevar.h>
81 #include <sys/sysent.h>
82 #include <sys/stdint.h>
83 #include <sys/shm.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_pager.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 #include <vm/swap_pager.h>
95 #include <vm/uma.h>
96
97 /*
98  *      Virtual memory maps provide for the mapping, protection,
99  *      and sharing of virtual memory objects.  In addition,
100  *      this module provides for an efficient virtual copy of
101  *      memory from one map to another.
102  *
103  *      Synchronization is required prior to most operations.
104  *
105  *      Maps consist of an ordered doubly-linked list of simple
106  *      entries; a single hint is used to speed up lookups.
107  *
108  *      Since portions of maps are specified by start/end addresses,
109  *      which may not align with existing map entries, all
110  *      routines merely "clip" entries to these start/end values.
111  *      [That is, an entry is split into two, bordering at a
112  *      start or end value.]  Note that these clippings may not
113  *      always be necessary (as the two resulting entries are then
114  *      not changed); however, the clipping is done for convenience.
115  *
116  *      As mentioned above, virtual copy operations are performed
117  *      by copying VM object references from one map to
118  *      another, and then marking both regions as copy-on-write.
119  */
120
121 /*
122  *      vm_map_startup:
123  *
124  *      Initialize the vm_map module.  Must be called before
125  *      any other vm_map routines.
126  *
127  *      Map and entry structures are allocated from the general
128  *      purpose memory pool with some exceptions:
129  *
130  *      - The kernel map and kmem submap are allocated statically.
131  *      - Kernel map entries are allocated out of a static pool.
132  *
133  *      These restrictions are necessary since malloc() uses the
134  *      maps and requires map entries.
135  */
136
137 static struct mtx map_sleep_mtx;
138 static uma_zone_t mapentzone;
139 static uma_zone_t kmapentzone;
140 static uma_zone_t mapzone;
141 static uma_zone_t vmspace_zone;
142 static struct vm_object kmapentobj;
143 static void vmspace_zinit(void *mem, int size);
144 static void vmspace_zfini(void *mem, int size);
145 static void vm_map_zinit(void *mem, int size);
146 static void vm_map_zfini(void *mem, int size);
147 static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max);
148
149 #ifdef INVARIANTS
150 static void vm_map_zdtor(void *mem, int size, void *arg);
151 static void vmspace_zdtor(void *mem, int size, void *arg);
152 #endif
153
154 void
155 vm_map_startup(void)
156 {
157         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
158         mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
159 #ifdef INVARIANTS
160             vm_map_zdtor,
161 #else
162             NULL,
163 #endif
164             vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
165         uma_prealloc(mapzone, MAX_KMAP);
166         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 
167             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
168             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
169         uma_prealloc(kmapentzone, MAX_KMAPENT);
170         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 
171             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
172         uma_prealloc(mapentzone, MAX_MAPENT);
173 }
174
175 static void
176 vmspace_zfini(void *mem, int size)
177 {
178         struct vmspace *vm;
179
180         vm = (struct vmspace *)mem;
181
182         vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
183 }
184
185 static void
186 vmspace_zinit(void *mem, int size)
187 {
188         struct vmspace *vm;
189
190         vm = (struct vmspace *)mem;
191
192         vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map));
193 }
194
195 static void
196 vm_map_zfini(void *mem, int size)
197 {
198         vm_map_t map;
199
200         map = (vm_map_t)mem;
201         mtx_destroy(&map->system_mtx);
202         lockdestroy(&map->lock);
203 }
204
205 static void
206 vm_map_zinit(void *mem, int size)
207 {
208         vm_map_t map;
209
210         map = (vm_map_t)mem;
211         map->nentries = 0;
212         map->size = 0;
213         map->infork = 0;
214         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF);
215         lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
216 }
217
218 #ifdef INVARIANTS
219 static void
220 vmspace_zdtor(void *mem, int size, void *arg)
221 {
222         struct vmspace *vm;
223
224         vm = (struct vmspace *)mem;
225
226         vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
227 }
228 static void
229 vm_map_zdtor(void *mem, int size, void *arg)
230 {
231         vm_map_t map;
232
233         map = (vm_map_t)mem;
234         KASSERT(map->nentries == 0,
235             ("map %p nentries == %d on free.", 
236             map, map->nentries));
237         KASSERT(map->size == 0,
238             ("map %p size == %lu on free.",
239             map, (unsigned long)map->size));
240         KASSERT(map->infork == 0,
241             ("map %p infork == %d on free.",
242             map, map->infork));
243 }
244 #endif  /* INVARIANTS */
245
246 /*
247  * Allocate a vmspace structure, including a vm_map and pmap,
248  * and initialize those structures.  The refcnt is set to 1.
249  * The remaining fields must be initialized by the caller.
250  */
251 struct vmspace *
252 vmspace_alloc(min, max)
253         vm_offset_t min, max;
254 {
255         struct vmspace *vm;
256
257         GIANT_REQUIRED;
258         vm = uma_zalloc(vmspace_zone, M_WAITOK);
259         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
260         _vm_map_init(&vm->vm_map, min, max);
261         pmap_pinit(vmspace_pmap(vm));
262         vm->vm_map.pmap = vmspace_pmap(vm);             /* XXX */
263         vm->vm_refcnt = 1;
264         vm->vm_shm = NULL;
265         vm->vm_exitingcnt = 0;
266         return (vm);
267 }
268
269 void
270 vm_init2(void) 
271 {
272         uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
273             (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8);
274         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
275 #ifdef INVARIANTS
276             vmspace_zdtor,
277 #else
278             NULL,
279 #endif
280             vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
281         pmap_init2();
282 }
283
284 static __inline void
285 vmspace_dofree(struct vmspace *vm)
286 {
287         CTR1(KTR_VM, "vmspace_free: %p", vm);
288
289         /*
290          * Make sure any SysV shm is freed, it might not have been in
291          * exit1().
292          */
293         shmexit(vm);
294
295         /*
296          * Lock the map, to wait out all other references to it.
297          * Delete all of the mappings and pages they hold, then call
298          * the pmap module to reclaim anything left.
299          */
300         vm_map_lock(&vm->vm_map);
301         (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
302             vm->vm_map.max_offset);
303         vm_map_unlock(&vm->vm_map);
304
305         pmap_release(vmspace_pmap(vm));
306         uma_zfree(vmspace_zone, vm);
307 }
308
309 void
310 vmspace_free(struct vmspace *vm)
311 {
312         GIANT_REQUIRED;
313
314         if (vm->vm_refcnt == 0)
315                 panic("vmspace_free: attempt to free already freed vmspace");
316
317         if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
318                 vmspace_dofree(vm);
319 }
320
321 void
322 vmspace_exitfree(struct proc *p)
323 {
324         struct vmspace *vm;
325
326         GIANT_REQUIRED;
327         vm = p->p_vmspace;
328         p->p_vmspace = NULL;
329
330         /*
331          * cleanup by parent process wait()ing on exiting child.  vm_refcnt
332          * may not be 0 (e.g. fork() and child exits without exec()ing).
333          * exitingcnt may increment above 0 and drop back down to zero
334          * several times while vm_refcnt is held non-zero.  vm_refcnt
335          * may also increment above 0 and drop back down to zero several 
336          * times while vm_exitingcnt is held non-zero.
337          * 
338          * The last wait on the exiting child's vmspace will clean up 
339          * the remainder of the vmspace.
340          */
341         if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
342                 vmspace_dofree(vm);
343 }
344
345 /*
346  * vmspace_swap_count() - count the approximate swap useage in pages for a
347  *                        vmspace.
348  *
349  *      Swap useage is determined by taking the proportional swap used by
350  *      VM objects backing the VM map.  To make up for fractional losses,
351  *      if the VM object has any swap use at all the associated map entries
352  *      count for at least 1 swap page.
353  */
354 int
355 vmspace_swap_count(struct vmspace *vmspace)
356 {
357         vm_map_t map = &vmspace->vm_map;
358         vm_map_entry_t cur;
359         int count = 0;
360
361         vm_map_lock_read(map);
362         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
363                 vm_object_t object;
364
365                 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
366                     (object = cur->object.vm_object) != NULL &&
367                     object->type == OBJT_SWAP
368                 ) {
369                         int n = (cur->end - cur->start) / PAGE_SIZE;
370
371                         if (object->un_pager.swp.swp_bcount) {
372                                 count += object->un_pager.swp.swp_bcount *
373                                     SWAP_META_PAGES * n / object->size + 1;
374                         }
375                 }
376         }
377         vm_map_unlock_read(map);
378         return (count);
379 }
380
381 void
382 _vm_map_lock(vm_map_t map, const char *file, int line)
383 {
384         int error;
385
386         if (map->system_map)
387                 _mtx_lock_flags(&map->system_mtx, 0, file, line);
388         else {
389                 error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
390                 KASSERT(error == 0, ("%s: failed to get lock", __func__));
391         }
392         map->timestamp++;
393 }
394
395 void
396 _vm_map_unlock(vm_map_t map, const char *file, int line)
397 {
398
399         if (map->system_map)
400                 _mtx_unlock_flags(&map->system_mtx, 0, file, line);
401         else
402                 lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
403 }
404
405 void
406 _vm_map_lock_read(vm_map_t map, const char *file, int line)
407 {
408         int error;
409
410         if (map->system_map)
411                 _mtx_lock_flags(&map->system_mtx, 0, file, line);
412         else {
413                 error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
414                 KASSERT(error == 0, ("%s: failed to get lock", __func__));
415         }
416 }
417
418 void
419 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
420 {
421
422         if (map->system_map)
423                 _mtx_unlock_flags(&map->system_mtx, 0, file, line);
424         else
425                 lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
426 }
427
428 int
429 _vm_map_trylock(vm_map_t map, const char *file, int line)
430 {
431         int error;
432
433         error = map->system_map ?
434             !_mtx_trylock(&map->system_mtx, 0, file, line) :
435             lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread);
436         if (error == 0)
437                 map->timestamp++;
438         return (error == 0);
439 }
440
441 int
442 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
443 {
444
445         if (map->system_map) {
446 #ifdef INVARIANTS
447                 _mtx_assert(&map->system_mtx, MA_OWNED, file, line);
448 #endif
449         } else
450                 KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
451                     ("%s: lock not held", __func__));
452         map->timestamp++;
453         return (0);
454 }
455
456 void
457 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
458 {
459
460         if (map->system_map) {
461 #ifdef INVARIANTS
462                 _mtx_assert(&map->system_mtx, MA_OWNED, file, line);
463 #endif
464         } else
465                 KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
466                     ("%s: lock not held", __func__));
467 }
468
469 /*
470  *      vm_map_unlock_and_wait:
471  */
472 int
473 vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait)
474 {
475
476         mtx_lock(&map_sleep_mtx);
477         vm_map_unlock(map);
478         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 0));
479 }
480
481 /*
482  *      vm_map_wakeup:
483  */
484 void
485 vm_map_wakeup(vm_map_t map)
486 {
487
488         /*
489          * Acquire and release map_sleep_mtx to prevent a wakeup()
490          * from being performed (and lost) between the vm_map_unlock()
491          * and the msleep() in vm_map_unlock_and_wait().
492          */
493         mtx_lock(&map_sleep_mtx);
494         mtx_unlock(&map_sleep_mtx);
495         wakeup(&map->root);
496 }
497
498 long
499 vmspace_resident_count(struct vmspace *vmspace)
500 {
501         return pmap_resident_count(vmspace_pmap(vmspace));
502 }
503
504 /*
505  *      vm_map_create:
506  *
507  *      Creates and returns a new empty VM map with
508  *      the given physical map structure, and having
509  *      the given lower and upper address bounds.
510  */
511 vm_map_t
512 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
513 {
514         vm_map_t result;
515
516         result = uma_zalloc(mapzone, M_WAITOK);
517         CTR1(KTR_VM, "vm_map_create: %p", result);
518         _vm_map_init(result, min, max);
519         result->pmap = pmap;
520         return (result);
521 }
522
523 /*
524  * Initialize an existing vm_map structure
525  * such as that in the vmspace structure.
526  * The pmap is set elsewhere.
527  */
528 static void
529 _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
530 {
531
532         map->header.next = map->header.prev = &map->header;
533         map->needs_wakeup = FALSE;
534         map->system_map = 0;
535         map->min_offset = min;
536         map->max_offset = max;
537         map->first_free = &map->header;
538         map->root = NULL;
539         map->timestamp = 0;
540 }
541
542 void
543 vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
544 {
545         _vm_map_init(map, min, max);
546         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF);
547         lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
548 }
549
550 /*
551  *      vm_map_entry_dispose:   [ internal use only ]
552  *
553  *      Inverse of vm_map_entry_create.
554  */
555 static void
556 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
557 {
558         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
559 }
560
561 /*
562  *      vm_map_entry_create:    [ internal use only ]
563  *
564  *      Allocates a VM map entry for insertion.
565  *      No entry fields are filled in.
566  */
567 static vm_map_entry_t
568 vm_map_entry_create(vm_map_t map)
569 {
570         vm_map_entry_t new_entry;
571
572         if (map->system_map)
573                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
574         else
575                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
576         if (new_entry == NULL)
577                 panic("vm_map_entry_create: kernel resources exhausted");
578         return (new_entry);
579 }
580
581 /*
582  *      vm_map_entry_set_behavior:
583  *
584  *      Set the expected access behavior, either normal, random, or
585  *      sequential.
586  */
587 static __inline void
588 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
589 {
590         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
591             (behavior & MAP_ENTRY_BEHAV_MASK);
592 }
593
594 /*
595  *      vm_map_entry_splay:
596  *
597  *      Implements Sleator and Tarjan's top-down splay algorithm.  Returns
598  *      the vm_map_entry containing the given address.  If, however, that
599  *      address is not found in the vm_map, returns a vm_map_entry that is
600  *      adjacent to the address, coming before or after it.
601  */
602 static vm_map_entry_t
603 vm_map_entry_splay(vm_offset_t address, vm_map_entry_t root)
604 {
605         struct vm_map_entry dummy;
606         vm_map_entry_t lefttreemax, righttreemin, y;
607
608         if (root == NULL)
609                 return (root);
610         lefttreemax = righttreemin = &dummy;
611         for (;; root = y) {
612                 if (address < root->start) {
613                         if ((y = root->left) == NULL)
614                                 break;
615                         if (address < y->start) {
616                                 /* Rotate right. */
617                                 root->left = y->right;
618                                 y->right = root;
619                                 root = y;
620                                 if ((y = root->left) == NULL)
621                                         break;
622                         }
623                         /* Link into the new root's right tree. */
624                         righttreemin->left = root;
625                         righttreemin = root;
626                 } else if (address >= root->end) {
627                         if ((y = root->right) == NULL)
628                                 break;
629                         if (address >= y->end) {
630                                 /* Rotate left. */
631                                 root->right = y->left;
632                                 y->left = root;
633                                 root = y;
634                                 if ((y = root->right) == NULL)
635                                         break;
636                         }
637                         /* Link into the new root's left tree. */
638                         lefttreemax->right = root;
639                         lefttreemax = root;
640                 } else
641                         break;
642         }
643         /* Assemble the new root. */
644         lefttreemax->right = root->left;
645         righttreemin->left = root->right;
646         root->left = dummy.right;
647         root->right = dummy.left;
648         return (root);
649 }
650
651 /*
652  *      vm_map_entry_{un,}link:
653  *
654  *      Insert/remove entries from maps.
655  */
656 static void
657 vm_map_entry_link(vm_map_t map,
658                   vm_map_entry_t after_where,
659                   vm_map_entry_t entry)
660 {
661
662         CTR4(KTR_VM,
663             "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
664             map->nentries, entry, after_where);
665         map->nentries++;
666         entry->prev = after_where;
667         entry->next = after_where->next;
668         entry->next->prev = entry;
669         after_where->next = entry;
670
671         if (after_where != &map->header) {
672                 if (after_where != map->root)
673                         vm_map_entry_splay(after_where->start, map->root);
674                 entry->right = after_where->right;
675                 entry->left = after_where;
676                 after_where->right = NULL;
677         } else {
678                 entry->right = map->root;
679                 entry->left = NULL;
680         }
681         map->root = entry;
682 }
683
684 static void
685 vm_map_entry_unlink(vm_map_t map,
686                     vm_map_entry_t entry)
687 {
688         vm_map_entry_t next, prev, root;
689
690         if (entry != map->root)
691                 vm_map_entry_splay(entry->start, map->root);
692         if (entry->left == NULL)
693                 root = entry->right;
694         else {
695                 root = vm_map_entry_splay(entry->start, entry->left);
696                 root->right = entry->right;
697         }
698         map->root = root;
699
700         prev = entry->prev;
701         next = entry->next;
702         next->prev = prev;
703         prev->next = next;
704         map->nentries--;
705         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
706             map->nentries, entry);
707 }
708
709 /*
710  *      vm_map_lookup_entry:    [ internal use only ]
711  *
712  *      Finds the map entry containing (or
713  *      immediately preceding) the specified address
714  *      in the given map; the entry is returned
715  *      in the "entry" parameter.  The boolean
716  *      result indicates whether the address is
717  *      actually contained in the map.
718  */
719 boolean_t
720 vm_map_lookup_entry(
721         vm_map_t map,
722         vm_offset_t address,
723         vm_map_entry_t *entry)  /* OUT */
724 {
725         vm_map_entry_t cur;
726
727         cur = vm_map_entry_splay(address, map->root);
728         if (cur == NULL)
729                 *entry = &map->header;
730         else {
731                 map->root = cur;
732
733                 if (address >= cur->start) {
734                         *entry = cur;
735                         if (cur->end > address)
736                                 return (TRUE);
737                 } else
738                         *entry = cur->prev;
739         }
740         return (FALSE);
741 }
742
743 /*
744  *      vm_map_insert:
745  *
746  *      Inserts the given whole VM object into the target
747  *      map at the specified address range.  The object's
748  *      size should match that of the address range.
749  *
750  *      Requires that the map be locked, and leaves it so.
751  *
752  *      If object is non-NULL, ref count must be bumped by caller
753  *      prior to making call to account for the new entry.
754  */
755 int
756 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
757               vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
758               int cow)
759 {
760         vm_map_entry_t new_entry;
761         vm_map_entry_t prev_entry;
762         vm_map_entry_t temp_entry;
763         vm_eflags_t protoeflags;
764
765         /*
766          * Check that the start and end points are not bogus.
767          */
768         if ((start < map->min_offset) || (end > map->max_offset) ||
769             (start >= end))
770                 return (KERN_INVALID_ADDRESS);
771
772         /*
773          * Find the entry prior to the proposed starting address; if it's part
774          * of an existing entry, this range is bogus.
775          */
776         if (vm_map_lookup_entry(map, start, &temp_entry))
777                 return (KERN_NO_SPACE);
778
779         prev_entry = temp_entry;
780
781         /*
782          * Assert that the next entry doesn't overlap the end point.
783          */
784         if ((prev_entry->next != &map->header) &&
785             (prev_entry->next->start < end))
786                 return (KERN_NO_SPACE);
787
788         protoeflags = 0;
789
790         if (cow & MAP_COPY_ON_WRITE)
791                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
792
793         if (cow & MAP_NOFAULT) {
794                 protoeflags |= MAP_ENTRY_NOFAULT;
795
796                 KASSERT(object == NULL,
797                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
798         }
799         if (cow & MAP_DISABLE_SYNCER)
800                 protoeflags |= MAP_ENTRY_NOSYNC;
801         if (cow & MAP_DISABLE_COREDUMP)
802                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
803
804         if (object) {
805                 /*
806                  * When object is non-NULL, it could be shared with another
807                  * process.  We have to set or clear OBJ_ONEMAPPING 
808                  * appropriately.
809                  */
810                 vm_object_lock(object);
811                 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
812                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
813                 }
814                 vm_object_unlock(object);
815         }
816         else if ((prev_entry != &map->header) &&
817                  (prev_entry->eflags == protoeflags) &&
818                  (prev_entry->end == start) &&
819                  (prev_entry->wired_count == 0) &&
820                  ((prev_entry->object.vm_object == NULL) ||
821                   vm_object_coalesce(prev_entry->object.vm_object,
822                                      OFF_TO_IDX(prev_entry->offset),
823                                      (vm_size_t)(prev_entry->end - prev_entry->start),
824                                      (vm_size_t)(end - prev_entry->end)))) {
825                 /*
826                  * We were able to extend the object.  Determine if we
827                  * can extend the previous map entry to include the 
828                  * new range as well.
829                  */
830                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
831                     (prev_entry->protection == prot) &&
832                     (prev_entry->max_protection == max)) {
833                         map->size += (end - prev_entry->end);
834                         prev_entry->end = end;
835                         vm_map_simplify_entry(map, prev_entry);
836                         return (KERN_SUCCESS);
837                 }
838
839                 /*
840                  * If we can extend the object but cannot extend the
841                  * map entry, we have to create a new map entry.  We
842                  * must bump the ref count on the extended object to
843                  * account for it.  object may be NULL.
844                  */
845                 object = prev_entry->object.vm_object;
846                 offset = prev_entry->offset +
847                         (prev_entry->end - prev_entry->start);
848                 vm_object_reference(object);
849         }
850
851         /*
852          * NOTE: if conditionals fail, object can be NULL here.  This occurs
853          * in things like the buffer map where we manage kva but do not manage
854          * backing objects.
855          */
856
857         /*
858          * Create a new entry
859          */
860         new_entry = vm_map_entry_create(map);
861         new_entry->start = start;
862         new_entry->end = end;
863
864         new_entry->eflags = protoeflags;
865         new_entry->object.vm_object = object;
866         new_entry->offset = offset;
867         new_entry->avail_ssize = 0;
868
869         new_entry->inheritance = VM_INHERIT_DEFAULT;
870         new_entry->protection = prot;
871         new_entry->max_protection = max;
872         new_entry->wired_count = 0;
873
874         /*
875          * Insert the new entry into the list
876          */
877         vm_map_entry_link(map, prev_entry, new_entry);
878         map->size += new_entry->end - new_entry->start;
879
880         /*
881          * Update the free space hint
882          */
883         if ((map->first_free == prev_entry) &&
884             (prev_entry->end >= new_entry->start)) {
885                 map->first_free = new_entry;
886         }
887
888 #if 0
889         /*
890          * Temporarily removed to avoid MAP_STACK panic, due to
891          * MAP_STACK being a huge hack.  Will be added back in
892          * when MAP_STACK (and the user stack mapping) is fixed.
893          */
894         /*
895          * It may be possible to simplify the entry
896          */
897         vm_map_simplify_entry(map, new_entry);
898 #endif
899
900         if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
901                 mtx_lock(&Giant);
902                 pmap_object_init_pt(map->pmap, start,
903                                     object, OFF_TO_IDX(offset), end - start,
904                                     cow & MAP_PREFAULT_PARTIAL);
905                 mtx_unlock(&Giant);
906         }
907
908         return (KERN_SUCCESS);
909 }
910
911 /*
912  * Find sufficient space for `length' bytes in the given map, starting at
913  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
914  */
915 int
916 vm_map_findspace(
917         vm_map_t map,
918         vm_offset_t start,
919         vm_size_t length,
920         vm_offset_t *addr)
921 {
922         vm_map_entry_t entry, next;
923         vm_offset_t end;
924
925         if (start < map->min_offset)
926                 start = map->min_offset;
927         if (start > map->max_offset)
928                 return (1);
929
930         /*
931          * Look for the first possible address; if there's already something
932          * at this address, we have to start after it.
933          */
934         if (start == map->min_offset) {
935                 if ((entry = map->first_free) != &map->header)
936                         start = entry->end;
937         } else {
938                 vm_map_entry_t tmp;
939
940                 if (vm_map_lookup_entry(map, start, &tmp))
941                         start = tmp->end;
942                 entry = tmp;
943         }
944
945         /*
946          * Look through the rest of the map, trying to fit a new region in the
947          * gap between existing regions, or after the very last region.
948          */
949         for (;; start = (entry = next)->end) {
950                 /*
951                  * Find the end of the proposed new region.  Be sure we didn't
952                  * go beyond the end of the map, or wrap around the address;
953                  * if so, we lose.  Otherwise, if this is the last entry, or
954                  * if the proposed new region fits before the next entry, we
955                  * win.
956                  */
957                 end = start + length;
958                 if (end > map->max_offset || end < start)
959                         return (1);
960                 next = entry->next;
961                 if (next == &map->header || next->start >= end)
962                         break;
963         }
964         *addr = start;
965         if (map == kernel_map) {
966                 vm_offset_t ksize;
967                 if ((ksize = round_page(start + length)) > kernel_vm_end) {
968                         mtx_lock(&Giant);
969                         pmap_growkernel(ksize);
970                         mtx_unlock(&Giant);
971                 }
972         }
973         return (0);
974 }
975
976 /*
977  *      vm_map_find finds an unallocated region in the target address
978  *      map with the given length.  The search is defined to be
979  *      first-fit from the specified address; the region found is
980  *      returned in the same parameter.
981  *
982  *      If object is non-NULL, ref count must be bumped by caller
983  *      prior to making call to account for the new entry.
984  */
985 int
986 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
987             vm_offset_t *addr,  /* IN/OUT */
988             vm_size_t length, boolean_t find_space, vm_prot_t prot,
989             vm_prot_t max, int cow)
990 {
991         vm_offset_t start;
992         int result, s = 0;
993
994         start = *addr;
995
996         if (map == kmem_map)
997                 s = splvm();
998
999         vm_map_lock(map);
1000         if (find_space) {
1001                 if (vm_map_findspace(map, start, length, addr)) {
1002                         vm_map_unlock(map);
1003                         if (map == kmem_map)
1004                                 splx(s);
1005                         return (KERN_NO_SPACE);
1006                 }
1007                 start = *addr;
1008         }
1009         result = vm_map_insert(map, object, offset,
1010                 start, start + length, prot, max, cow);
1011         vm_map_unlock(map);
1012
1013         if (map == kmem_map)
1014                 splx(s);
1015
1016         return (result);
1017 }
1018
1019 /*
1020  *      vm_map_simplify_entry:
1021  *
1022  *      Simplify the given map entry by merging with either neighbor.  This
1023  *      routine also has the ability to merge with both neighbors.
1024  *
1025  *      The map must be locked.
1026  *
1027  *      This routine guarentees that the passed entry remains valid (though
1028  *      possibly extended).  When merging, this routine may delete one or
1029  *      both neighbors.
1030  */
1031 void
1032 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1033 {
1034         vm_map_entry_t next, prev;
1035         vm_size_t prevsize, esize;
1036
1037         if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
1038                 return;
1039
1040         prev = entry->prev;
1041         if (prev != &map->header) {
1042                 prevsize = prev->end - prev->start;
1043                 if ( (prev->end == entry->start) &&
1044                      (prev->object.vm_object == entry->object.vm_object) &&
1045                      (!prev->object.vm_object ||
1046                         (prev->offset + prevsize == entry->offset)) &&
1047                      (prev->eflags == entry->eflags) &&
1048                      (prev->protection == entry->protection) &&
1049                      (prev->max_protection == entry->max_protection) &&
1050                      (prev->inheritance == entry->inheritance) &&
1051                      (prev->wired_count == entry->wired_count)) {
1052                         if (map->first_free == prev)
1053                                 map->first_free = entry;
1054                         vm_map_entry_unlink(map, prev);
1055                         entry->start = prev->start;
1056                         entry->offset = prev->offset;
1057                         if (prev->object.vm_object)
1058                                 vm_object_deallocate(prev->object.vm_object);
1059                         vm_map_entry_dispose(map, prev);
1060                 }
1061         }
1062
1063         next = entry->next;
1064         if (next != &map->header) {
1065                 esize = entry->end - entry->start;
1066                 if ((entry->end == next->start) &&
1067                     (next->object.vm_object == entry->object.vm_object) &&
1068                      (!entry->object.vm_object ||
1069                         (entry->offset + esize == next->offset)) &&
1070                     (next->eflags == entry->eflags) &&
1071                     (next->protection == entry->protection) &&
1072                     (next->max_protection == entry->max_protection) &&
1073                     (next->inheritance == entry->inheritance) &&
1074                     (next->wired_count == entry->wired_count)) {
1075                         if (map->first_free == next)
1076                                 map->first_free = entry;
1077                         vm_map_entry_unlink(map, next);
1078                         entry->end = next->end;
1079                         if (next->object.vm_object)
1080                                 vm_object_deallocate(next->object.vm_object);
1081                         vm_map_entry_dispose(map, next);
1082                 }
1083         }
1084 }
1085 /*
1086  *      vm_map_clip_start:      [ internal use only ]
1087  *
1088  *      Asserts that the given entry begins at or after
1089  *      the specified address; if necessary,
1090  *      it splits the entry into two.
1091  */
1092 #define vm_map_clip_start(map, entry, startaddr) \
1093 { \
1094         if (startaddr > entry->start) \
1095                 _vm_map_clip_start(map, entry, startaddr); \
1096 }
1097
1098 /*
1099  *      This routine is called only when it is known that
1100  *      the entry must be split.
1101  */
1102 static void
1103 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1104 {
1105         vm_map_entry_t new_entry;
1106
1107         /*
1108          * Split off the front portion -- note that we must insert the new
1109          * entry BEFORE this one, so that this entry has the specified
1110          * starting address.
1111          */
1112         vm_map_simplify_entry(map, entry);
1113
1114         /*
1115          * If there is no object backing this entry, we might as well create
1116          * one now.  If we defer it, an object can get created after the map
1117          * is clipped, and individual objects will be created for the split-up
1118          * map.  This is a bit of a hack, but is also about the best place to
1119          * put this improvement.
1120          */
1121         if (entry->object.vm_object == NULL && !map->system_map) {
1122                 vm_object_t object;
1123                 object = vm_object_allocate(OBJT_DEFAULT,
1124                                 atop(entry->end - entry->start));
1125                 entry->object.vm_object = object;
1126                 entry->offset = 0;
1127         }
1128
1129         new_entry = vm_map_entry_create(map);
1130         *new_entry = *entry;
1131
1132         new_entry->end = start;
1133         entry->offset += (start - entry->start);
1134         entry->start = start;
1135
1136         vm_map_entry_link(map, entry->prev, new_entry);
1137
1138         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1139                 vm_object_reference(new_entry->object.vm_object);
1140         }
1141 }
1142
1143 /*
1144  *      vm_map_clip_end:        [ internal use only ]
1145  *
1146  *      Asserts that the given entry ends at or before
1147  *      the specified address; if necessary,
1148  *      it splits the entry into two.
1149  */
1150 #define vm_map_clip_end(map, entry, endaddr) \
1151 { \
1152         if ((endaddr) < (entry->end)) \
1153                 _vm_map_clip_end((map), (entry), (endaddr)); \
1154 }
1155
1156 /*
1157  *      This routine is called only when it is known that
1158  *      the entry must be split.
1159  */
1160 static void
1161 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1162 {
1163         vm_map_entry_t new_entry;
1164
1165         /*
1166          * If there is no object backing this entry, we might as well create
1167          * one now.  If we defer it, an object can get created after the map
1168          * is clipped, and individual objects will be created for the split-up
1169          * map.  This is a bit of a hack, but is also about the best place to
1170          * put this improvement.
1171          */
1172         if (entry->object.vm_object == NULL && !map->system_map) {
1173                 vm_object_t object;
1174                 object = vm_object_allocate(OBJT_DEFAULT,
1175                                 atop(entry->end - entry->start));
1176                 entry->object.vm_object = object;
1177                 entry->offset = 0;
1178         }
1179
1180         /*
1181          * Create a new entry and insert it AFTER the specified entry
1182          */
1183         new_entry = vm_map_entry_create(map);
1184         *new_entry = *entry;
1185
1186         new_entry->start = entry->end = end;
1187         new_entry->offset += (end - entry->start);
1188
1189         vm_map_entry_link(map, entry, new_entry);
1190
1191         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1192                 vm_object_reference(new_entry->object.vm_object);
1193         }
1194 }
1195
1196 /*
1197  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
1198  *
1199  *      Asserts that the starting and ending region
1200  *      addresses fall within the valid range of the map.
1201  */
1202 #define VM_MAP_RANGE_CHECK(map, start, end)             \
1203                 {                                       \
1204                 if (start < vm_map_min(map))            \
1205                         start = vm_map_min(map);        \
1206                 if (end > vm_map_max(map))              \
1207                         end = vm_map_max(map);          \
1208                 if (start > end)                        \
1209                         start = end;                    \
1210                 }
1211
1212 /*
1213  *      vm_map_submap:          [ kernel use only ]
1214  *
1215  *      Mark the given range as handled by a subordinate map.
1216  *
1217  *      This range must have been created with vm_map_find,
1218  *      and no other operations may have been performed on this
1219  *      range prior to calling vm_map_submap.
1220  *
1221  *      Only a limited number of operations can be performed
1222  *      within this rage after calling vm_map_submap:
1223  *              vm_fault
1224  *      [Don't try vm_map_copy!]
1225  *
1226  *      To remove a submapping, one must first remove the
1227  *      range from the superior map, and then destroy the
1228  *      submap (if desired).  [Better yet, don't try it.]
1229  */
1230 int
1231 vm_map_submap(
1232         vm_map_t map,
1233         vm_offset_t start,
1234         vm_offset_t end,
1235         vm_map_t submap)
1236 {
1237         vm_map_entry_t entry;
1238         int result = KERN_INVALID_ARGUMENT;
1239
1240         vm_map_lock(map);
1241
1242         VM_MAP_RANGE_CHECK(map, start, end);
1243
1244         if (vm_map_lookup_entry(map, start, &entry)) {
1245                 vm_map_clip_start(map, entry, start);
1246         } else
1247                 entry = entry->next;
1248
1249         vm_map_clip_end(map, entry, end);
1250
1251         if ((entry->start == start) && (entry->end == end) &&
1252             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1253             (entry->object.vm_object == NULL)) {
1254                 entry->object.sub_map = submap;
1255                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1256                 result = KERN_SUCCESS;
1257         }
1258         vm_map_unlock(map);
1259
1260         return (result);
1261 }
1262
1263 /*
1264  *      vm_map_protect:
1265  *
1266  *      Sets the protection of the specified address
1267  *      region in the target map.  If "set_max" is
1268  *      specified, the maximum protection is to be set;
1269  *      otherwise, only the current protection is affected.
1270  */
1271 int
1272 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1273                vm_prot_t new_prot, boolean_t set_max)
1274 {
1275         vm_map_entry_t current;
1276         vm_map_entry_t entry;
1277
1278         vm_map_lock(map);
1279
1280         VM_MAP_RANGE_CHECK(map, start, end);
1281
1282         if (vm_map_lookup_entry(map, start, &entry)) {
1283                 vm_map_clip_start(map, entry, start);
1284         } else {
1285                 entry = entry->next;
1286         }
1287
1288         /*
1289          * Make a first pass to check for protection violations.
1290          */
1291         current = entry;
1292         while ((current != &map->header) && (current->start < end)) {
1293                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1294                         vm_map_unlock(map);
1295                         return (KERN_INVALID_ARGUMENT);
1296                 }
1297                 if ((new_prot & current->max_protection) != new_prot) {
1298                         vm_map_unlock(map);
1299                         return (KERN_PROTECTION_FAILURE);
1300                 }
1301                 current = current->next;
1302         }
1303
1304         /*
1305          * Go back and fix up protections. [Note that clipping is not
1306          * necessary the second time.]
1307          */
1308         current = entry;
1309         while ((current != &map->header) && (current->start < end)) {
1310                 vm_prot_t old_prot;
1311
1312                 vm_map_clip_end(map, current, end);
1313
1314                 old_prot = current->protection;
1315                 if (set_max)
1316                         current->protection =
1317                             (current->max_protection = new_prot) &
1318                             old_prot;
1319                 else
1320                         current->protection = new_prot;
1321
1322                 /*
1323                  * Update physical map if necessary. Worry about copy-on-write
1324                  * here -- CHECK THIS XXX
1325                  */
1326                 if (current->protection != old_prot) {
1327                         mtx_lock(&Giant);
1328                         vm_page_lock_queues();
1329 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1330                                                         VM_PROT_ALL)
1331                         pmap_protect(map->pmap, current->start,
1332                             current->end,
1333                             current->protection & MASK(current));
1334 #undef  MASK
1335                         vm_page_unlock_queues();
1336                         mtx_unlock(&Giant);
1337                 }
1338                 vm_map_simplify_entry(map, current);
1339                 current = current->next;
1340         }
1341         vm_map_unlock(map);
1342         return (KERN_SUCCESS);
1343 }
1344
1345 /*
1346  *      vm_map_madvise:
1347  *
1348  *      This routine traverses a processes map handling the madvise
1349  *      system call.  Advisories are classified as either those effecting
1350  *      the vm_map_entry structure, or those effecting the underlying 
1351  *      objects.
1352  */
1353 int
1354 vm_map_madvise(
1355         vm_map_t map,
1356         vm_offset_t start, 
1357         vm_offset_t end,
1358         int behav)
1359 {
1360         vm_map_entry_t current, entry;
1361         int modify_map = 0;
1362
1363         /*
1364          * Some madvise calls directly modify the vm_map_entry, in which case
1365          * we need to use an exclusive lock on the map and we need to perform 
1366          * various clipping operations.  Otherwise we only need a read-lock
1367          * on the map.
1368          */
1369         switch(behav) {
1370         case MADV_NORMAL:
1371         case MADV_SEQUENTIAL:
1372         case MADV_RANDOM:
1373         case MADV_NOSYNC:
1374         case MADV_AUTOSYNC:
1375         case MADV_NOCORE:
1376         case MADV_CORE:
1377                 modify_map = 1;
1378                 vm_map_lock(map);
1379                 break;
1380         case MADV_WILLNEED:
1381         case MADV_DONTNEED:
1382         case MADV_FREE:
1383                 vm_map_lock_read(map);
1384                 break;
1385         default:
1386                 return (KERN_INVALID_ARGUMENT);
1387         }
1388
1389         /*
1390          * Locate starting entry and clip if necessary.
1391          */
1392         VM_MAP_RANGE_CHECK(map, start, end);
1393
1394         if (vm_map_lookup_entry(map, start, &entry)) {
1395                 if (modify_map)
1396                         vm_map_clip_start(map, entry, start);
1397         } else {
1398                 entry = entry->next;
1399         }
1400
1401         if (modify_map) {
1402                 /*
1403                  * madvise behaviors that are implemented in the vm_map_entry.
1404                  *
1405                  * We clip the vm_map_entry so that behavioral changes are
1406                  * limited to the specified address range.
1407                  */
1408                 for (current = entry;
1409                      (current != &map->header) && (current->start < end);
1410                      current = current->next
1411                 ) {
1412                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1413                                 continue;
1414
1415                         vm_map_clip_end(map, current, end);
1416
1417                         switch (behav) {
1418                         case MADV_NORMAL:
1419                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
1420                                 break;
1421                         case MADV_SEQUENTIAL:
1422                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
1423                                 break;
1424                         case MADV_RANDOM:
1425                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
1426                                 break;
1427                         case MADV_NOSYNC:
1428                                 current->eflags |= MAP_ENTRY_NOSYNC;
1429                                 break;
1430                         case MADV_AUTOSYNC:
1431                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
1432                                 break;
1433                         case MADV_NOCORE:
1434                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
1435                                 break;
1436                         case MADV_CORE:
1437                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
1438                                 break;
1439                         default:
1440                                 break;
1441                         }
1442                         vm_map_simplify_entry(map, current);
1443                 }
1444                 vm_map_unlock(map);
1445         } else {
1446                 vm_pindex_t pindex;
1447                 int count;
1448
1449                 /*
1450                  * madvise behaviors that are implemented in the underlying
1451                  * vm_object.
1452                  *
1453                  * Since we don't clip the vm_map_entry, we have to clip
1454                  * the vm_object pindex and count.
1455                  */
1456                 for (current = entry;
1457                      (current != &map->header) && (current->start < end);
1458                      current = current->next
1459                 ) {
1460                         vm_offset_t useStart;
1461
1462                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
1463                                 continue;
1464
1465                         pindex = OFF_TO_IDX(current->offset);
1466                         count = atop(current->end - current->start);
1467                         useStart = current->start;
1468
1469                         if (current->start < start) {
1470                                 pindex += atop(start - current->start);
1471                                 count -= atop(start - current->start);
1472                                 useStart = start;
1473                         }
1474                         if (current->end > end)
1475                                 count -= atop(current->end - end);
1476
1477                         if (count <= 0)
1478                                 continue;
1479
1480                         vm_object_madvise(current->object.vm_object,
1481                                           pindex, count, behav);
1482                         if (behav == MADV_WILLNEED) {
1483                                 mtx_lock(&Giant);
1484                                 pmap_object_init_pt(
1485                                     map->pmap, 
1486                                     useStart,
1487                                     current->object.vm_object,
1488                                     pindex, 
1489                                     (count << PAGE_SHIFT),
1490                                     MAP_PREFAULT_MADVISE
1491                                 );
1492                                 mtx_unlock(&Giant);
1493                         }
1494                 }
1495                 vm_map_unlock_read(map);
1496         }
1497         return (0);
1498 }       
1499
1500
1501 /*
1502  *      vm_map_inherit:
1503  *
1504  *      Sets the inheritance of the specified address
1505  *      range in the target map.  Inheritance
1506  *      affects how the map will be shared with
1507  *      child maps at the time of vm_map_fork.
1508  */
1509 int
1510 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
1511                vm_inherit_t new_inheritance)
1512 {
1513         vm_map_entry_t entry;
1514         vm_map_entry_t temp_entry;
1515
1516         switch (new_inheritance) {
1517         case VM_INHERIT_NONE:
1518         case VM_INHERIT_COPY:
1519         case VM_INHERIT_SHARE:
1520                 break;
1521         default:
1522                 return (KERN_INVALID_ARGUMENT);
1523         }
1524         vm_map_lock(map);
1525         VM_MAP_RANGE_CHECK(map, start, end);
1526         if (vm_map_lookup_entry(map, start, &temp_entry)) {
1527                 entry = temp_entry;
1528                 vm_map_clip_start(map, entry, start);
1529         } else
1530                 entry = temp_entry->next;
1531         while ((entry != &map->header) && (entry->start < end)) {
1532                 vm_map_clip_end(map, entry, end);
1533                 entry->inheritance = new_inheritance;
1534                 vm_map_simplify_entry(map, entry);
1535                 entry = entry->next;
1536         }
1537         vm_map_unlock(map);
1538         return (KERN_SUCCESS);
1539 }
1540
1541 /*
1542  *      vm_map_unwire:
1543  *
1544  *      Implements both kernel and user unwiring.
1545  */
1546 int
1547 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1548         boolean_t user_unwire)
1549 {
1550         vm_map_entry_t entry, first_entry, tmp_entry;
1551         vm_offset_t saved_start;
1552         unsigned int last_timestamp;
1553         int rv;
1554         boolean_t need_wakeup, result;
1555
1556         vm_map_lock(map);
1557         VM_MAP_RANGE_CHECK(map, start, end);
1558         if (!vm_map_lookup_entry(map, start, &first_entry)) {
1559                 vm_map_unlock(map);
1560                 return (KERN_INVALID_ADDRESS);
1561         }
1562         last_timestamp = map->timestamp;
1563         entry = first_entry;
1564         while (entry != &map->header && entry->start < end) {
1565                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1566                         /*
1567                          * We have not yet clipped the entry.
1568                          */
1569                         saved_start = (start >= entry->start) ? start :
1570                             entry->start;
1571                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1572                         if (vm_map_unlock_and_wait(map, user_unwire)) {
1573                                 /*
1574                                  * Allow interruption of user unwiring?
1575                                  */
1576                         }
1577                         vm_map_lock(map);
1578                         if (last_timestamp+1 != map->timestamp) {
1579                                 /*
1580                                  * Look again for the entry because the map was
1581                                  * modified while it was unlocked.
1582                                  * Specifically, the entry may have been
1583                                  * clipped, merged, or deleted.
1584                                  */
1585                                 if (!vm_map_lookup_entry(map, saved_start,
1586                                     &tmp_entry)) {
1587                                         if (saved_start == start) {
1588                                                 /*
1589                                                  * First_entry has been deleted.
1590                                                  */
1591                                                 vm_map_unlock(map);
1592                                                 return (KERN_INVALID_ADDRESS);
1593                                         }
1594                                         end = saved_start;
1595                                         rv = KERN_INVALID_ADDRESS;
1596                                         goto done;
1597                                 }
1598                                 if (entry == first_entry)
1599                                         first_entry = tmp_entry;
1600                                 else
1601                                         first_entry = NULL;
1602                                 entry = tmp_entry;
1603                         }
1604                         last_timestamp = map->timestamp;
1605                         continue;
1606                 }
1607                 vm_map_clip_start(map, entry, start);
1608                 vm_map_clip_end(map, entry, end);
1609                 /*
1610                  * Mark the entry in case the map lock is released.  (See
1611                  * above.)
1612                  */
1613                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1614                 /*
1615                  * Check the map for holes in the specified region.
1616                  */
1617                 if (entry->end < end && (entry->next == &map->header ||
1618                     entry->next->start > entry->end)) {
1619                         end = entry->end;
1620                         rv = KERN_INVALID_ADDRESS;
1621                         goto done;
1622                 }
1623                 /*
1624                  * Require that the entry is wired.
1625                  */
1626                 if (entry->wired_count == 0 || (user_unwire &&
1627                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)) {
1628                         end = entry->end;
1629                         rv = KERN_INVALID_ARGUMENT;
1630                         goto done;
1631                 }
1632                 entry = entry->next;
1633         }
1634         rv = KERN_SUCCESS;
1635 done:
1636         need_wakeup = FALSE;
1637         if (first_entry == NULL) {
1638                 result = vm_map_lookup_entry(map, start, &first_entry);
1639                 KASSERT(result, ("vm_map_unwire: lookup failed"));
1640         }
1641         entry = first_entry;
1642         while (entry != &map->header && entry->start < end) {
1643                 if (rv == KERN_SUCCESS) {
1644                         if (user_unwire)
1645                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
1646                         entry->wired_count--;
1647                         if (entry->wired_count == 0) {
1648                                 /*
1649                                  * Retain the map lock.
1650                                  */
1651                                 vm_fault_unwire(map, entry->start, entry->end);
1652                         }
1653                 }
1654                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1655                         ("vm_map_unwire: in-transition flag missing"));
1656                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1657                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1658                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1659                         need_wakeup = TRUE;
1660                 }
1661                 vm_map_simplify_entry(map, entry);
1662                 entry = entry->next;
1663         }
1664         vm_map_unlock(map);
1665         if (need_wakeup)
1666                 vm_map_wakeup(map);
1667         return (rv);
1668 }
1669
1670 /*
1671  *      vm_map_wire:
1672  *
1673  *      Implements both kernel and user wiring.
1674  */
1675 int
1676 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1677         boolean_t user_wire)
1678 {
1679         vm_map_entry_t entry, first_entry, tmp_entry;
1680         vm_offset_t saved_end, saved_start;
1681         unsigned int last_timestamp;
1682         int rv;
1683         boolean_t need_wakeup, result;
1684
1685         vm_map_lock(map);
1686         VM_MAP_RANGE_CHECK(map, start, end);
1687         if (!vm_map_lookup_entry(map, start, &first_entry)) {
1688                 vm_map_unlock(map);
1689                 return (KERN_INVALID_ADDRESS);
1690         }
1691         last_timestamp = map->timestamp;
1692         entry = first_entry;
1693         while (entry != &map->header && entry->start < end) {
1694                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1695                         /*
1696                          * We have not yet clipped the entry.
1697                          */
1698                         saved_start = (start >= entry->start) ? start :
1699                             entry->start;
1700                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1701                         if (vm_map_unlock_and_wait(map, user_wire)) {
1702                                 /*
1703                                  * Allow interruption of user wiring?
1704                                  */
1705                         }
1706                         vm_map_lock(map);
1707                         if (last_timestamp + 1 != map->timestamp) {
1708                                 /*
1709                                  * Look again for the entry because the map was
1710                                  * modified while it was unlocked.
1711                                  * Specifically, the entry may have been
1712                                  * clipped, merged, or deleted.
1713                                  */
1714                                 if (!vm_map_lookup_entry(map, saved_start,
1715                                     &tmp_entry)) {
1716                                         if (saved_start == start) {
1717                                                 /*
1718                                                  * first_entry has been deleted.
1719                                                  */
1720                                                 vm_map_unlock(map);
1721                                                 return (KERN_INVALID_ADDRESS);
1722                                         }
1723                                         end = saved_start;
1724                                         rv = KERN_INVALID_ADDRESS;
1725                                         goto done;
1726                                 }
1727                                 if (entry == first_entry)
1728                                         first_entry = tmp_entry;
1729                                 else
1730                                         first_entry = NULL;
1731                                 entry = tmp_entry;
1732                         }
1733                         last_timestamp = map->timestamp;
1734                         continue;
1735                 }
1736                 vm_map_clip_start(map, entry, start);
1737                 vm_map_clip_end(map, entry, end);
1738                 /*
1739                  * Mark the entry in case the map lock is released.  (See
1740                  * above.)
1741                  */
1742                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1743                 /*
1744                  *
1745                  */
1746                 if (entry->wired_count == 0) {
1747                         entry->wired_count++;
1748                         saved_start = entry->start;
1749                         saved_end = entry->end;
1750                         /*
1751                          * Release the map lock, relying on the in-transition
1752                          * mark.
1753                          */
1754                         vm_map_unlock(map);
1755                         rv = vm_fault_wire(map, saved_start, saved_end,
1756                             user_wire);
1757                         vm_map_lock(map);
1758                         if (last_timestamp + 1 != map->timestamp) {
1759                                 /*
1760                                  * Look again for the entry because the map was
1761                                  * modified while it was unlocked.  The entry
1762                                  * may have been clipped, but NOT merged or
1763                                  * deleted.
1764                                  */
1765                                 result = vm_map_lookup_entry(map, saved_start,
1766                                     &tmp_entry);
1767                                 KASSERT(result, ("vm_map_wire: lookup failed"));
1768                                 if (entry == first_entry)
1769                                         first_entry = tmp_entry;
1770                                 else
1771                                         first_entry = NULL;
1772                                 entry = tmp_entry;
1773                                 while (entry->end < saved_end) {
1774                                         if (rv != KERN_SUCCESS) {
1775                                                 KASSERT(entry->wired_count == 1,
1776                                                     ("vm_map_wire: bad count"));
1777                                                 entry->wired_count = -1;
1778                                         }
1779                                         entry = entry->next;
1780                                 }
1781                         }
1782                         last_timestamp = map->timestamp;
1783                         if (rv != KERN_SUCCESS) {
1784                                 KASSERT(entry->wired_count == 1,
1785                                     ("vm_map_wire: bad count"));
1786                                 /*
1787                                  * Assign an out-of-range value to represent
1788                                  * the failure to wire this entry.
1789                                  */
1790                                 entry->wired_count = -1;
1791                                 end = entry->end;
1792                                 goto done;
1793                         }
1794                 } else if (!user_wire ||
1795                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
1796                         entry->wired_count++;
1797                 }
1798                 /*
1799                  * Check the map for holes in the specified region.
1800                  */
1801                 if (entry->end < end && (entry->next == &map->header ||
1802                     entry->next->start > entry->end)) {
1803                         end = entry->end;
1804                         rv = KERN_INVALID_ADDRESS;
1805                         goto done;
1806                 }
1807                 entry = entry->next;
1808         }
1809         rv = KERN_SUCCESS;
1810 done:
1811         need_wakeup = FALSE;
1812         if (first_entry == NULL) {
1813                 result = vm_map_lookup_entry(map, start, &first_entry);
1814                 KASSERT(result, ("vm_map_wire: lookup failed"));
1815         }
1816         entry = first_entry;
1817         while (entry != &map->header && entry->start < end) {
1818                 if (rv == KERN_SUCCESS) {
1819                         if (user_wire)
1820                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
1821                 } else if (entry->wired_count == -1) {
1822                         /*
1823                          * Wiring failed on this entry.  Thus, unwiring is
1824                          * unnecessary.
1825                          */
1826                         entry->wired_count = 0;
1827                 } else {
1828                         if (!user_wire ||
1829                             (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
1830                                 entry->wired_count--;
1831                         if (entry->wired_count == 0) {
1832                                 /*
1833                                  * Retain the map lock.
1834                                  */
1835                                 vm_fault_unwire(map, entry->start, entry->end);
1836                         }
1837                 }
1838                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1839                         ("vm_map_wire: in-transition flag missing"));
1840                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1841                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1842                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1843                         need_wakeup = TRUE;
1844                 }
1845                 vm_map_simplify_entry(map, entry);
1846                 entry = entry->next;
1847         }
1848         vm_map_unlock(map);
1849         if (need_wakeup)
1850                 vm_map_wakeup(map);
1851         return (rv);
1852 }
1853
1854 /*
1855  * vm_map_clean
1856  *
1857  * Push any dirty cached pages in the address range to their pager.
1858  * If syncio is TRUE, dirty pages are written synchronously.
1859  * If invalidate is TRUE, any cached pages are freed as well.
1860  *
1861  * Returns an error if any part of the specified range is not mapped.
1862  */
1863 int
1864 vm_map_clean(
1865         vm_map_t map,
1866         vm_offset_t start,
1867         vm_offset_t end,
1868         boolean_t syncio,
1869         boolean_t invalidate)
1870 {
1871         vm_map_entry_t current;
1872         vm_map_entry_t entry;
1873         vm_size_t size;
1874         vm_object_t object;
1875         vm_ooffset_t offset;
1876
1877         GIANT_REQUIRED;
1878
1879         vm_map_lock_read(map);
1880         VM_MAP_RANGE_CHECK(map, start, end);
1881         if (!vm_map_lookup_entry(map, start, &entry)) {
1882                 vm_map_unlock_read(map);
1883                 return (KERN_INVALID_ADDRESS);
1884         }
1885         /*
1886          * Make a first pass to check for holes.
1887          */
1888         for (current = entry; current->start < end; current = current->next) {
1889                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1890                         vm_map_unlock_read(map);
1891                         return (KERN_INVALID_ARGUMENT);
1892                 }
1893                 if (end > current->end &&
1894                     (current->next == &map->header ||
1895                         current->end != current->next->start)) {
1896                         vm_map_unlock_read(map);
1897                         return (KERN_INVALID_ADDRESS);
1898                 }
1899         }
1900
1901         if (invalidate) {
1902                 vm_page_lock_queues();
1903                 pmap_remove(map->pmap, start, end);
1904                 vm_page_unlock_queues();
1905         }
1906         /*
1907          * Make a second pass, cleaning/uncaching pages from the indicated
1908          * objects as we go.
1909          */
1910         for (current = entry; current->start < end; current = current->next) {
1911                 offset = current->offset + (start - current->start);
1912                 size = (end <= current->end ? end : current->end) - start;
1913                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1914                         vm_map_t smap;
1915                         vm_map_entry_t tentry;
1916                         vm_size_t tsize;
1917
1918                         smap = current->object.sub_map;
1919                         vm_map_lock_read(smap);
1920                         (void) vm_map_lookup_entry(smap, offset, &tentry);
1921                         tsize = tentry->end - offset;
1922                         if (tsize < size)
1923                                 size = tsize;
1924                         object = tentry->object.vm_object;
1925                         offset = tentry->offset + (offset - tentry->start);
1926                         vm_map_unlock_read(smap);
1927                 } else {
1928                         object = current->object.vm_object;
1929                 }
1930                 /*
1931                  * Note that there is absolutely no sense in writing out
1932                  * anonymous objects, so we track down the vnode object
1933                  * to write out.
1934                  * We invalidate (remove) all pages from the address space
1935                  * anyway, for semantic correctness.
1936                  *
1937                  * note: certain anonymous maps, such as MAP_NOSYNC maps,
1938                  * may start out with a NULL object.
1939                  */
1940                 while (object && object->backing_object) {
1941                         object = object->backing_object;
1942                         offset += object->backing_object_offset;
1943                         if (object->size < OFF_TO_IDX(offset + size))
1944                                 size = IDX_TO_OFF(object->size) - offset;
1945                 }
1946                 if (object && (object->type == OBJT_VNODE) && 
1947                     (current->protection & VM_PROT_WRITE)) {
1948                         /*
1949                          * Flush pages if writing is allowed, invalidate them
1950                          * if invalidation requested.  Pages undergoing I/O
1951                          * will be ignored by vm_object_page_remove().
1952                          *
1953                          * We cannot lock the vnode and then wait for paging
1954                          * to complete without deadlocking against vm_fault.
1955                          * Instead we simply call vm_object_page_remove() and
1956                          * allow it to block internally on a page-by-page 
1957                          * basis when it encounters pages undergoing async 
1958                          * I/O.
1959                          */
1960                         int flags;
1961
1962                         vm_object_reference(object);
1963                         vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
1964                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1965                         flags |= invalidate ? OBJPC_INVAL : 0;
1966                         vm_object_page_clean(object,
1967                             OFF_TO_IDX(offset),
1968                             OFF_TO_IDX(offset + size + PAGE_MASK),
1969                             flags);
1970                         VOP_UNLOCK(object->handle, 0, curthread);
1971                         vm_object_deallocate(object);
1972                 }
1973                 if (object && invalidate &&
1974                     ((object->type == OBJT_VNODE) ||
1975                      (object->type == OBJT_DEVICE))) {
1976                         vm_object_reference(object);
1977                         vm_object_lock(object);
1978                         vm_object_page_remove(object,
1979                             OFF_TO_IDX(offset),
1980                             OFF_TO_IDX(offset + size + PAGE_MASK),
1981                             FALSE);
1982                         vm_object_unlock(object);
1983                         vm_object_deallocate(object);
1984                 }
1985                 start += size;
1986         }
1987
1988         vm_map_unlock_read(map);
1989         return (KERN_SUCCESS);
1990 }
1991
1992 /*
1993  *      vm_map_entry_unwire:    [ internal use only ]
1994  *
1995  *      Make the region specified by this entry pageable.
1996  *
1997  *      The map in question should be locked.
1998  *      [This is the reason for this routine's existence.]
1999  */
2000 static void 
2001 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2002 {
2003         vm_fault_unwire(map, entry->start, entry->end);
2004         entry->wired_count = 0;
2005 }
2006
2007 /*
2008  *      vm_map_entry_delete:    [ internal use only ]
2009  *
2010  *      Deallocate the given entry from the target map.
2011  */
2012 static void
2013 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2014 {
2015         vm_map_entry_unlink(map, entry);
2016         map->size -= entry->end - entry->start;
2017
2018         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2019                 vm_object_deallocate(entry->object.vm_object);
2020         }
2021
2022         vm_map_entry_dispose(map, entry);
2023 }
2024
2025 /*
2026  *      vm_map_delete:  [ internal use only ]
2027  *
2028  *      Deallocates the given address range from the target
2029  *      map.
2030  */
2031 int
2032 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2033 {
2034         vm_object_t object;
2035         vm_map_entry_t entry;
2036         vm_map_entry_t first_entry;
2037
2038         /*
2039          * Find the start of the region, and clip it
2040          */
2041         if (!vm_map_lookup_entry(map, start, &first_entry))
2042                 entry = first_entry->next;
2043         else {
2044                 entry = first_entry;
2045                 vm_map_clip_start(map, entry, start);
2046         }
2047
2048         /*
2049          * Save the free space hint
2050          */
2051         if (entry == &map->header) {
2052                 map->first_free = &map->header;
2053         } else if (map->first_free->start >= start) {
2054                 map->first_free = entry->prev;
2055         }
2056
2057         /*
2058          * Step through all entries in this region
2059          */
2060         while ((entry != &map->header) && (entry->start < end)) {
2061                 vm_map_entry_t next;
2062                 vm_offset_t s, e;
2063                 vm_pindex_t offidxstart, offidxend, count;
2064
2065                 /*
2066                  * Wait for wiring or unwiring of an entry to complete.
2067                  */
2068                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) {
2069                         unsigned int last_timestamp;
2070                         vm_offset_t saved_start;
2071                         vm_map_entry_t tmp_entry;
2072
2073                         saved_start = entry->start;
2074                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2075                         last_timestamp = map->timestamp;
2076                         (void) vm_map_unlock_and_wait(map, FALSE);
2077                         vm_map_lock(map);
2078                         if (last_timestamp + 1 != map->timestamp) {
2079                                 /*
2080                                  * Look again for the entry because the map was
2081                                  * modified while it was unlocked.
2082                                  * Specifically, the entry may have been
2083                                  * clipped, merged, or deleted.
2084                                  */
2085                                 if (!vm_map_lookup_entry(map, saved_start,
2086                                                          &tmp_entry))
2087                                         entry = tmp_entry->next;
2088                                 else {
2089                                         entry = tmp_entry;
2090                                         vm_map_clip_start(map, entry,
2091                                                           saved_start);
2092                                 }
2093                         }
2094                         continue;
2095                 }
2096                 vm_map_clip_end(map, entry, end);
2097
2098                 s = entry->start;
2099                 e = entry->end;
2100                 next = entry->next;
2101
2102                 offidxstart = OFF_TO_IDX(entry->offset);
2103                 count = OFF_TO_IDX(e - s);
2104                 object = entry->object.vm_object;
2105
2106                 /*
2107                  * Unwire before removing addresses from the pmap; otherwise,
2108                  * unwiring will put the entries back in the pmap.
2109                  */
2110                 if (entry->wired_count != 0) {
2111                         vm_map_entry_unwire(map, entry);
2112                 }
2113
2114                 offidxend = offidxstart + count;
2115
2116                 if ((object == kernel_object) || (object == kmem_object)) {
2117                         vm_object_lock(object);
2118                         vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2119                         vm_object_unlock(object);
2120                 } else {
2121                         vm_object_lock(object);
2122                         vm_page_lock_queues();
2123                         pmap_remove(map->pmap, s, e);
2124                         vm_page_unlock_queues();
2125                         if (object != NULL &&
2126                             object->ref_count != 1 &&
2127                             (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
2128                             (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2129                                 vm_object_collapse(object);
2130                                 vm_object_page_remove(object, offidxstart, offidxend, FALSE);
2131                                 if (object->type == OBJT_SWAP) {
2132                                         swap_pager_freespace(object, offidxstart, count);
2133                                 }
2134                                 if (offidxend >= object->size &&
2135                                     offidxstart < object->size) {
2136                                         object->size = offidxstart;
2137                                 }
2138                         }
2139                         vm_object_unlock(object);
2140                 }
2141
2142                 /*
2143                  * Delete the entry (which may delete the object) only after
2144                  * removing all pmap entries pointing to its pages.
2145                  * (Otherwise, its page frames may be reallocated, and any
2146                  * modify bits will be set in the wrong object!)
2147                  */
2148                 vm_map_entry_delete(map, entry);
2149                 entry = next;
2150         }
2151         return (KERN_SUCCESS);
2152 }
2153
2154 /*
2155  *      vm_map_remove:
2156  *
2157  *      Remove the given address range from the target map.
2158  *      This is the exported form of vm_map_delete.
2159  */
2160 int
2161 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
2162 {
2163         int result, s = 0;
2164
2165         if (map == kmem_map)
2166                 s = splvm();
2167
2168         vm_map_lock(map);
2169         VM_MAP_RANGE_CHECK(map, start, end);
2170         result = vm_map_delete(map, start, end);
2171         vm_map_unlock(map);
2172
2173         if (map == kmem_map)
2174                 splx(s);
2175
2176         return (result);
2177 }
2178
2179 /*
2180  *      vm_map_check_protection:
2181  *
2182  *      Assert that the target map allows the specified
2183  *      privilege on the entire address region given.
2184  *      The entire region must be allocated.
2185  */
2186 boolean_t
2187 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
2188                         vm_prot_t protection)
2189 {
2190         vm_map_entry_t entry;
2191         vm_map_entry_t tmp_entry;
2192
2193         vm_map_lock_read(map);
2194         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
2195                 vm_map_unlock_read(map);
2196                 return (FALSE);
2197         }
2198         entry = tmp_entry;
2199
2200         while (start < end) {
2201                 if (entry == &map->header) {
2202                         vm_map_unlock_read(map);
2203                         return (FALSE);
2204                 }
2205                 /*
2206                  * No holes allowed!
2207                  */
2208                 if (start < entry->start) {
2209                         vm_map_unlock_read(map);
2210                         return (FALSE);
2211                 }
2212                 /*
2213                  * Check protection associated with entry.
2214                  */
2215                 if ((entry->protection & protection) != protection) {
2216                         vm_map_unlock_read(map);
2217                         return (FALSE);
2218                 }
2219                 /* go to next entry */
2220                 start = entry->end;
2221                 entry = entry->next;
2222         }
2223         vm_map_unlock_read(map);
2224         return (TRUE);
2225 }
2226
2227 /*
2228  *      vm_map_copy_entry:
2229  *
2230  *      Copies the contents of the source entry to the destination
2231  *      entry.  The entries *must* be aligned properly.
2232  */
2233 static void
2234 vm_map_copy_entry(
2235         vm_map_t src_map,
2236         vm_map_t dst_map,
2237         vm_map_entry_t src_entry, 
2238         vm_map_entry_t dst_entry)
2239 {
2240         vm_object_t src_object;
2241
2242         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2243                 return;
2244
2245         if (src_entry->wired_count == 0) {
2246
2247                 /*
2248                  * If the source entry is marked needs_copy, it is already
2249                  * write-protected.
2250                  */
2251                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2252                         vm_page_lock_queues();
2253                         pmap_protect(src_map->pmap,
2254                             src_entry->start,
2255                             src_entry->end,
2256                             src_entry->protection & ~VM_PROT_WRITE);
2257                         vm_page_unlock_queues();
2258                 }
2259
2260                 /*
2261                  * Make a copy of the object.
2262                  */
2263                 if ((src_object = src_entry->object.vm_object) != NULL) {
2264
2265                         if ((src_object->handle == NULL) &&
2266                                 (src_object->type == OBJT_DEFAULT ||
2267                                  src_object->type == OBJT_SWAP)) {
2268                                 vm_object_collapse(src_object);
2269                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
2270                                         vm_object_split(src_entry);
2271                                         src_object = src_entry->object.vm_object;
2272                                 }
2273                         }
2274
2275                         vm_object_reference(src_object);
2276                         vm_object_lock(src_object);
2277                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
2278                         vm_object_unlock(src_object);
2279                         dst_entry->object.vm_object = src_object;
2280                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2281                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
2282                         dst_entry->offset = src_entry->offset;
2283                 } else {
2284                         dst_entry->object.vm_object = NULL;
2285                         dst_entry->offset = 0;
2286                 }
2287
2288                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
2289                     dst_entry->end - dst_entry->start, src_entry->start);
2290         } else {
2291                 /*
2292                  * Of course, wired down pages can't be set copy-on-write.
2293                  * Cause wired pages to be copied into the new map by
2294                  * simulating faults (the new pages are pageable)
2295                  */
2296                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
2297         }
2298 }
2299
2300 /*
2301  * vmspace_fork:
2302  * Create a new process vmspace structure and vm_map
2303  * based on those of an existing process.  The new map
2304  * is based on the old map, according to the inheritance
2305  * values on the regions in that map.
2306  *
2307  * The source map must not be locked.
2308  */
2309 struct vmspace *
2310 vmspace_fork(struct vmspace *vm1)
2311 {
2312         struct vmspace *vm2;
2313         vm_map_t old_map = &vm1->vm_map;
2314         vm_map_t new_map;
2315         vm_map_entry_t old_entry;
2316         vm_map_entry_t new_entry;
2317         vm_object_t object;
2318
2319         GIANT_REQUIRED;
2320
2321         vm_map_lock(old_map);
2322         old_map->infork = 1;
2323
2324         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2325         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2326             (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy);
2327         new_map = &vm2->vm_map; /* XXX */
2328         new_map->timestamp = 1;
2329
2330         old_entry = old_map->header.next;
2331
2332         while (old_entry != &old_map->header) {
2333                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2334                         panic("vm_map_fork: encountered a submap");
2335
2336                 switch (old_entry->inheritance) {
2337                 case VM_INHERIT_NONE:
2338                         break;
2339
2340                 case VM_INHERIT_SHARE:
2341                         /*
2342                          * Clone the entry, creating the shared object if necessary.
2343                          */
2344                         object = old_entry->object.vm_object;
2345                         if (object == NULL) {
2346                                 object = vm_object_allocate(OBJT_DEFAULT,
2347                                         atop(old_entry->end - old_entry->start));
2348                                 old_entry->object.vm_object = object;
2349                                 old_entry->offset = (vm_offset_t) 0;
2350                         }
2351
2352                         /*
2353                          * Add the reference before calling vm_object_shadow
2354                          * to insure that a shadow object is created.
2355                          */
2356                         vm_object_reference(object);
2357                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2358                                 vm_object_shadow(&old_entry->object.vm_object,
2359                                         &old_entry->offset,
2360                                         atop(old_entry->end - old_entry->start));
2361                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2362                                 /* Transfer the second reference too. */
2363                                 vm_object_reference(
2364                                     old_entry->object.vm_object);
2365                                 vm_object_deallocate(object);
2366                                 object = old_entry->object.vm_object;
2367                         }
2368                         vm_object_lock(object);
2369                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
2370                         vm_object_unlock(object);
2371
2372                         /*
2373                          * Clone the entry, referencing the shared object.
2374                          */
2375                         new_entry = vm_map_entry_create(new_map);
2376                         *new_entry = *old_entry;
2377                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2378                         new_entry->wired_count = 0;
2379
2380                         /*
2381                          * Insert the entry into the new map -- we know we're
2382                          * inserting at the end of the new map.
2383                          */
2384                         vm_map_entry_link(new_map, new_map->header.prev,
2385                             new_entry);
2386
2387                         /*
2388                          * Update the physical map
2389                          */
2390                         pmap_copy(new_map->pmap, old_map->pmap,
2391                             new_entry->start,
2392                             (old_entry->end - old_entry->start),
2393                             old_entry->start);
2394                         break;
2395
2396                 case VM_INHERIT_COPY:
2397                         /*
2398                          * Clone the entry and link into the map.
2399                          */
2400                         new_entry = vm_map_entry_create(new_map);
2401                         *new_entry = *old_entry;
2402                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2403                         new_entry->wired_count = 0;
2404                         new_entry->object.vm_object = NULL;
2405                         vm_map_entry_link(new_map, new_map->header.prev,
2406                             new_entry);
2407                         vm_map_copy_entry(old_map, new_map, old_entry,
2408                             new_entry);
2409                         break;
2410                 }
2411                 old_entry = old_entry->next;
2412         }
2413
2414         new_map->size = old_map->size;
2415         old_map->infork = 0;
2416         vm_map_unlock(old_map);
2417
2418         return (vm2);
2419 }
2420
2421 int
2422 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
2423               vm_prot_t prot, vm_prot_t max, int cow)
2424 {
2425         vm_map_entry_t prev_entry;
2426         vm_map_entry_t new_stack_entry;
2427         vm_size_t      init_ssize;
2428         int            rv;
2429
2430         if (addrbos < vm_map_min(map))
2431                 return (KERN_NO_SPACE);
2432
2433         if (max_ssize < sgrowsiz)
2434                 init_ssize = max_ssize;
2435         else
2436                 init_ssize = sgrowsiz;
2437
2438         vm_map_lock(map);
2439
2440         /* If addr is already mapped, no go */
2441         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
2442                 vm_map_unlock(map);
2443                 return (KERN_NO_SPACE);
2444         }
2445
2446         /* If we would blow our VMEM resource limit, no go */
2447         if (map->size + init_ssize >
2448             curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2449                 vm_map_unlock(map);
2450                 return (KERN_NO_SPACE);
2451         }
2452
2453         /* If we can't accomodate max_ssize in the current mapping,
2454          * no go.  However, we need to be aware that subsequent user
2455          * mappings might map into the space we have reserved for
2456          * stack, and currently this space is not protected.  
2457          * 
2458          * Hopefully we will at least detect this condition 
2459          * when we try to grow the stack.
2460          */
2461         if ((prev_entry->next != &map->header) &&
2462             (prev_entry->next->start < addrbos + max_ssize)) {
2463                 vm_map_unlock(map);
2464                 return (KERN_NO_SPACE);
2465         }
2466
2467         /* We initially map a stack of only init_ssize.  We will
2468          * grow as needed later.  Since this is to be a grow 
2469          * down stack, we map at the top of the range.
2470          *
2471          * Note: we would normally expect prot and max to be
2472          * VM_PROT_ALL, and cow to be 0.  Possibly we should
2473          * eliminate these as input parameters, and just
2474          * pass these values here in the insert call.
2475          */
2476         rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
2477                            addrbos + max_ssize, prot, max, cow);
2478
2479         /* Now set the avail_ssize amount */
2480         if (rv == KERN_SUCCESS){
2481                 if (prev_entry != &map->header)
2482                         vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize);
2483                 new_stack_entry = prev_entry->next;
2484                 if (new_stack_entry->end   != addrbos + max_ssize ||
2485                     new_stack_entry->start != addrbos + max_ssize - init_ssize)
2486                         panic ("Bad entry start/end for new stack entry");
2487                 else 
2488                         new_stack_entry->avail_ssize = max_ssize - init_ssize;
2489         }
2490
2491         vm_map_unlock(map);
2492         return (rv);
2493 }
2494
2495 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
2496  * desired address is already mapped, or if we successfully grow
2497  * the stack.  Also returns KERN_SUCCESS if addr is outside the
2498  * stack range (this is strange, but preserves compatibility with
2499  * the grow function in vm_machdep.c).
2500  */
2501 int
2502 vm_map_growstack (struct proc *p, vm_offset_t addr)
2503 {
2504         vm_map_entry_t prev_entry;
2505         vm_map_entry_t stack_entry;
2506         vm_map_entry_t new_stack_entry;
2507         struct vmspace *vm = p->p_vmspace;
2508         vm_map_t map = &vm->vm_map;
2509         vm_offset_t    end;
2510         int      grow_amount;
2511         int      rv;
2512         int      is_procstack;
2513
2514         GIANT_REQUIRED;
2515         
2516 Retry:
2517         vm_map_lock_read(map);
2518
2519         /* If addr is already in the entry range, no need to grow.*/
2520         if (vm_map_lookup_entry(map, addr, &prev_entry)) {
2521                 vm_map_unlock_read(map);
2522                 return (KERN_SUCCESS);
2523         }
2524
2525         if ((stack_entry = prev_entry->next) == &map->header) {
2526                 vm_map_unlock_read(map);
2527                 return (KERN_SUCCESS);
2528         } 
2529         if (prev_entry == &map->header) 
2530                 end = stack_entry->start - stack_entry->avail_ssize;
2531         else
2532                 end = prev_entry->end;
2533
2534         /* This next test mimics the old grow function in vm_machdep.c.
2535          * It really doesn't quite make sense, but we do it anyway
2536          * for compatibility.
2537          *
2538          * If not growable stack, return success.  This signals the
2539          * caller to proceed as he would normally with normal vm.
2540          */
2541         if (stack_entry->avail_ssize < 1 ||
2542             addr >= stack_entry->start ||
2543             addr <  stack_entry->start - stack_entry->avail_ssize) {
2544                 vm_map_unlock_read(map);
2545                 return (KERN_SUCCESS);
2546         } 
2547         
2548         /* Find the minimum grow amount */
2549         grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
2550         if (grow_amount > stack_entry->avail_ssize) {
2551                 vm_map_unlock_read(map);
2552                 return (KERN_NO_SPACE);
2553         }
2554
2555         /* If there is no longer enough space between the entries
2556          * nogo, and adjust the available space.  Note: this 
2557          * should only happen if the user has mapped into the
2558          * stack area after the stack was created, and is
2559          * probably an error.
2560          *
2561          * This also effectively destroys any guard page the user
2562          * might have intended by limiting the stack size.
2563          */
2564         if (grow_amount > stack_entry->start - end) {
2565                 if (vm_map_lock_upgrade(map))
2566                         goto Retry;
2567
2568                 stack_entry->avail_ssize = stack_entry->start - end;
2569
2570                 vm_map_unlock(map);
2571                 return (KERN_NO_SPACE);
2572         }
2573
2574         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
2575
2576         /* If this is the main process stack, see if we're over the 
2577          * stack limit.
2578          */
2579         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2580                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2581                 vm_map_unlock_read(map);
2582                 return (KERN_NO_SPACE);
2583         }
2584
2585         /* Round up the grow amount modulo SGROWSIZ */
2586         grow_amount = roundup (grow_amount, sgrowsiz);
2587         if (grow_amount > stack_entry->avail_ssize) {
2588                 grow_amount = stack_entry->avail_ssize;
2589         }
2590         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
2591                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
2592                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
2593                               ctob(vm->vm_ssize);
2594         }
2595
2596         /* If we would blow our VMEM resource limit, no go */
2597         if (map->size + grow_amount >
2598             curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
2599                 vm_map_unlock_read(map);
2600                 return (KERN_NO_SPACE);
2601         }
2602
2603         if (vm_map_lock_upgrade(map))
2604                 goto Retry;
2605
2606         /* Get the preliminary new entry start value */
2607         addr = stack_entry->start - grow_amount;
2608
2609         /* If this puts us into the previous entry, cut back our growth
2610          * to the available space.  Also, see the note above.
2611          */
2612         if (addr < end) {
2613                 stack_entry->avail_ssize = stack_entry->start - end;
2614                 addr = end;
2615         }
2616
2617         rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
2618             p->p_sysent->sv_stackprot, VM_PROT_ALL, 0);
2619
2620         /* Adjust the available stack space by the amount we grew. */
2621         if (rv == KERN_SUCCESS) {
2622                 if (prev_entry != &map->header)
2623                         vm_map_clip_end(map, prev_entry, addr);
2624                 new_stack_entry = prev_entry->next;
2625                 if (new_stack_entry->end   != stack_entry->start  ||
2626                     new_stack_entry->start != addr)
2627                         panic ("Bad stack grow start/end in new stack entry");
2628                 else {
2629                         new_stack_entry->avail_ssize = stack_entry->avail_ssize -
2630                                                         (new_stack_entry->end -
2631                                                          new_stack_entry->start);
2632                         if (is_procstack)
2633                                 vm->vm_ssize += btoc(new_stack_entry->end -
2634                                                      new_stack_entry->start);
2635                 }
2636         }
2637
2638         vm_map_unlock(map);
2639         return (rv);
2640 }
2641
2642 /*
2643  * Unshare the specified VM space for exec.  If other processes are
2644  * mapped to it, then create a new one.  The new vmspace is null.
2645  */
2646 void
2647 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
2648 {
2649         struct vmspace *oldvmspace = p->p_vmspace;
2650         struct vmspace *newvmspace;
2651
2652         GIANT_REQUIRED;
2653         newvmspace = vmspace_alloc(minuser, maxuser);
2654         bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2655             (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2656         /*
2657          * This code is written like this for prototype purposes.  The
2658          * goal is to avoid running down the vmspace here, but let the
2659          * other process's that are still using the vmspace to finally
2660          * run it down.  Even though there is little or no chance of blocking
2661          * here, it is a good idea to keep this form for future mods.
2662          */
2663         p->p_vmspace = newvmspace;
2664         pmap_pinit2(vmspace_pmap(newvmspace));
2665         vmspace_free(oldvmspace);
2666         if (p == curthread->td_proc)            /* XXXKSE ? */
2667                 pmap_activate(curthread);
2668 }
2669
2670 /*
2671  * Unshare the specified VM space for forcing COW.  This
2672  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2673  */
2674 void
2675 vmspace_unshare(struct proc *p)
2676 {
2677         struct vmspace *oldvmspace = p->p_vmspace;
2678         struct vmspace *newvmspace;
2679
2680         GIANT_REQUIRED;
2681         if (oldvmspace->vm_refcnt == 1)
2682                 return;
2683         newvmspace = vmspace_fork(oldvmspace);
2684         p->p_vmspace = newvmspace;
2685         pmap_pinit2(vmspace_pmap(newvmspace));
2686         vmspace_free(oldvmspace);
2687         if (p == curthread->td_proc)            /* XXXKSE ? */
2688                 pmap_activate(curthread);
2689 }
2690
2691 /*
2692  *      vm_map_lookup:
2693  *
2694  *      Finds the VM object, offset, and
2695  *      protection for a given virtual address in the
2696  *      specified map, assuming a page fault of the
2697  *      type specified.
2698  *
2699  *      Leaves the map in question locked for read; return
2700  *      values are guaranteed until a vm_map_lookup_done
2701  *      call is performed.  Note that the map argument
2702  *      is in/out; the returned map must be used in
2703  *      the call to vm_map_lookup_done.
2704  *
2705  *      A handle (out_entry) is returned for use in
2706  *      vm_map_lookup_done, to make that fast.
2707  *
2708  *      If a lookup is requested with "write protection"
2709  *      specified, the map may be changed to perform virtual
2710  *      copying operations, although the data referenced will
2711  *      remain the same.
2712  */
2713 int
2714 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
2715               vm_offset_t vaddr,
2716               vm_prot_t fault_typea,
2717               vm_map_entry_t *out_entry,        /* OUT */
2718               vm_object_t *object,              /* OUT */
2719               vm_pindex_t *pindex,              /* OUT */
2720               vm_prot_t *out_prot,              /* OUT */
2721               boolean_t *wired)                 /* OUT */
2722 {
2723         vm_map_entry_t entry;
2724         vm_map_t map = *var_map;
2725         vm_prot_t prot;
2726         vm_prot_t fault_type = fault_typea;
2727
2728 RetryLookup:;
2729         /*
2730          * Lookup the faulting address.
2731          */
2732
2733         vm_map_lock_read(map);
2734 #define RETURN(why) \
2735                 { \
2736                 vm_map_unlock_read(map); \
2737                 return (why); \
2738                 }
2739
2740         /*
2741          * If the map has an interesting hint, try it before calling full
2742          * blown lookup routine.
2743          */
2744         entry = map->root;
2745         *out_entry = entry;
2746         if (entry == NULL ||
2747             (vaddr < entry->start) || (vaddr >= entry->end)) {
2748                 /*
2749                  * Entry was either not a valid hint, or the vaddr was not
2750                  * contained in the entry, so do a full lookup.
2751                  */
2752                 if (!vm_map_lookup_entry(map, vaddr, out_entry))
2753                         RETURN(KERN_INVALID_ADDRESS);
2754
2755                 entry = *out_entry;
2756         }
2757         
2758         /*
2759          * Handle submaps.
2760          */
2761         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2762                 vm_map_t old_map = map;
2763
2764                 *var_map = map = entry->object.sub_map;
2765                 vm_map_unlock_read(old_map);
2766                 goto RetryLookup;
2767         }
2768
2769         /*
2770          * Check whether this task is allowed to have this page.
2771          * Note the special case for MAP_ENTRY_COW
2772          * pages with an override.  This is to implement a forced
2773          * COW for debuggers.
2774          */
2775         if (fault_type & VM_PROT_OVERRIDE_WRITE)
2776                 prot = entry->max_protection;
2777         else
2778                 prot = entry->protection;
2779         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
2780         if ((fault_type & prot) != fault_type) {
2781                         RETURN(KERN_PROTECTION_FAILURE);
2782         }
2783         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
2784             (entry->eflags & MAP_ENTRY_COW) &&
2785             (fault_type & VM_PROT_WRITE) &&
2786             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
2787                 RETURN(KERN_PROTECTION_FAILURE);
2788         }
2789
2790         /*
2791          * If this page is not pageable, we have to get it for all possible
2792          * accesses.
2793          */
2794         *wired = (entry->wired_count != 0);
2795         if (*wired)
2796                 prot = fault_type = entry->protection;
2797
2798         /*
2799          * If the entry was copy-on-write, we either ...
2800          */
2801         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2802                 /*
2803                  * If we want to write the page, we may as well handle that
2804                  * now since we've got the map locked.
2805                  *
2806                  * If we don't need to write the page, we just demote the
2807                  * permissions allowed.
2808                  */
2809                 if (fault_type & VM_PROT_WRITE) {
2810                         /*
2811                          * Make a new object, and place it in the object
2812                          * chain.  Note that no new references have appeared
2813                          * -- one just moved from the map to the new
2814                          * object.
2815                          */
2816                         if (vm_map_lock_upgrade(map))
2817                                 goto RetryLookup;
2818
2819                         vm_object_shadow(
2820                             &entry->object.vm_object,
2821                             &entry->offset,
2822                             atop(entry->end - entry->start));
2823                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2824
2825                         vm_map_lock_downgrade(map);
2826                 } else {
2827                         /*
2828                          * We're attempting to read a copy-on-write page --
2829                          * don't allow writes.
2830                          */
2831                         prot &= ~VM_PROT_WRITE;
2832                 }
2833         }
2834
2835         /*
2836          * Create an object if necessary.
2837          */
2838         if (entry->object.vm_object == NULL &&
2839             !map->system_map) {
2840                 if (vm_map_lock_upgrade(map)) 
2841                         goto RetryLookup;
2842                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2843                     atop(entry->end - entry->start));
2844                 entry->offset = 0;
2845                 vm_map_lock_downgrade(map);
2846         }
2847
2848         /*
2849          * Return the object/offset from this entry.  If the entry was
2850          * copy-on-write or empty, it has been fixed up.
2851          */
2852         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
2853         *object = entry->object.vm_object;
2854
2855         /*
2856          * Return whether this is the only map sharing this data.
2857          */
2858         *out_prot = prot;
2859         return (KERN_SUCCESS);
2860
2861 #undef  RETURN
2862 }
2863
2864 /*
2865  *      vm_map_lookup_done:
2866  *
2867  *      Releases locks acquired by a vm_map_lookup
2868  *      (according to the handle returned by that lookup).
2869  */
2870 void
2871 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
2872 {
2873         /*
2874          * Unlock the main-level map
2875          */
2876         vm_map_unlock_read(map);
2877 }
2878
2879 #ifdef ENABLE_VFS_IOOPT
2880 /*
2881  * Experimental support for zero-copy I/O
2882  *
2883  * Implement uiomove with VM operations.  This handles (and collateral changes)
2884  * support every combination of source object modification, and COW type
2885  * operations.
2886  */
2887 int
2888 vm_uiomove(
2889         vm_map_t mapa,
2890         vm_object_t srcobject,
2891         off_t cp,
2892         int cnta,
2893         vm_offset_t uaddra,
2894         int *npages)
2895 {
2896         vm_map_t map;
2897         vm_object_t first_object, oldobject, object;
2898         vm_map_entry_t entry;
2899         vm_prot_t prot;
2900         boolean_t wired;
2901         int tcnt, rv;
2902         vm_offset_t uaddr, start, end, tend;
2903         vm_pindex_t first_pindex, oindex;
2904         vm_size_t osize;
2905         off_t ooffset;
2906         int cnt;
2907
2908         GIANT_REQUIRED;
2909
2910         if (npages)
2911                 *npages = 0;
2912
2913         cnt = cnta;
2914         uaddr = uaddra;
2915
2916         while (cnt > 0) {
2917                 map = mapa;
2918
2919                 if ((vm_map_lookup(&map, uaddr,
2920                         VM_PROT_READ, &entry, &first_object,
2921                         &first_pindex, &prot, &wired)) != KERN_SUCCESS) {
2922                         return EFAULT;
2923                 }
2924
2925                 vm_map_clip_start(map, entry, uaddr);
2926
2927                 tcnt = cnt;
2928                 tend = uaddr + tcnt;
2929                 if (tend > entry->end) {
2930                         tcnt = entry->end - uaddr;
2931                         tend = entry->end;
2932                 }
2933
2934                 vm_map_clip_end(map, entry, tend);
2935
2936                 start = entry->start;
2937                 end = entry->end;
2938
2939                 osize = atop(tcnt);
2940
2941                 oindex = OFF_TO_IDX(cp);
2942                 if (npages) {
2943                         vm_size_t idx;
2944                         for (idx = 0; idx < osize; idx++) {
2945                                 vm_page_t m;
2946                                 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
2947                                         vm_map_lookup_done(map, entry);
2948                                         return 0;
2949                                 }
2950                                 /*
2951                                  * disallow busy or invalid pages, but allow
2952                                  * m->busy pages if they are entirely valid.
2953                                  */
2954                                 if ((m->flags & PG_BUSY) ||
2955                                         ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
2956                                         vm_map_lookup_done(map, entry);
2957                                         return 0;
2958                                 }
2959                         }
2960                 }
2961
2962 /*
2963  * If we are changing an existing map entry, just redirect
2964  * the object, and change mappings.
2965  */
2966                 if ((first_object->type == OBJT_VNODE) &&
2967                         ((oldobject = entry->object.vm_object) == first_object)) {
2968
2969                         if ((entry->offset != cp) || (oldobject != srcobject)) {
2970                                 /*
2971                                 * Remove old window into the file
2972                                 */
2973                                 vm_page_lock_queues();
2974                                 pmap_remove(map->pmap, uaddr, tend);
2975                                 vm_page_unlock_queues();
2976
2977                                 /*
2978                                 * Force copy on write for mmaped regions
2979                                 */
2980                                 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2981
2982                                 /*
2983                                 * Point the object appropriately
2984                                 */
2985                                 if (oldobject != srcobject) {
2986
2987                                 /*
2988                                 * Set the object optimization hint flag
2989                                 */
2990                                         vm_object_set_flag(srcobject, OBJ_OPT);
2991                                         vm_object_reference(srcobject);
2992                                         entry->object.vm_object = srcobject;
2993
2994                                         if (oldobject) {
2995                                                 vm_object_deallocate(oldobject);
2996                                         }
2997                                 }
2998
2999                                 entry->offset = cp;
3000                                 map->timestamp++;
3001                         } else {
3002                                 vm_page_lock_queues();
3003                                 pmap_remove(map->pmap, uaddr, tend);
3004                                 vm_page_unlock_queues();
3005                         }
3006
3007                 } else if ((first_object->ref_count == 1) &&
3008                         (first_object->size == osize) &&
3009                         ((first_object->type == OBJT_DEFAULT) ||
3010                                 (first_object->type == OBJT_SWAP)) ) {
3011
3012                         oldobject = first_object->backing_object;
3013
3014                         if ((first_object->backing_object_offset != cp) ||
3015                                 (oldobject != srcobject)) {
3016                                 /*
3017                                 * Remove old window into the file
3018                                 */
3019                                 vm_page_lock_queues();
3020                                 pmap_remove(map->pmap, uaddr, tend);
3021                                 vm_page_unlock_queues();
3022
3023                                 /*
3024                                  * Remove unneeded old pages
3025                                  */
3026                                 vm_object_lock(first_object);
3027                                 vm_object_page_remove(first_object, 0, 0, 0);
3028                                 vm_object_unlock(first_object);
3029
3030                                 /*
3031                                  * Invalidate swap space
3032                                  */
3033                                 if (first_object->type == OBJT_SWAP) {
3034                                         swap_pager_freespace(first_object,
3035                                                 0,
3036                                                 first_object->size);
3037                                 }
3038
3039                                 /*
3040                                  * Force copy on write for mmaped regions
3041                                  */
3042                                 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3043
3044                                 /*
3045                                  * Point the object appropriately
3046                                  */
3047                                 if (oldobject != srcobject) {
3048                                         /*
3049                                          * Set the object optimization hint flag
3050                                          */
3051                                         vm_object_set_flag(srcobject, OBJ_OPT);
3052                                         vm_object_reference(srcobject);
3053
3054                                         if (oldobject) {
3055                                                 TAILQ_REMOVE(&oldobject->shadow_head,
3056                                                         first_object, shadow_list);
3057                                                 oldobject->shadow_count--;
3058                                                 /* XXX bump generation? */
3059                                                 vm_object_deallocate(oldobject);
3060                                         }
3061
3062                                         TAILQ_INSERT_TAIL(&srcobject->shadow_head,
3063                                                 first_object, shadow_list);
3064                                         srcobject->shadow_count++;
3065                                         /* XXX bump generation? */
3066
3067                                         first_object->backing_object = srcobject;
3068                                 }
3069                                 first_object->backing_object_offset = cp;
3070                                 map->timestamp++;
3071                         } else {
3072                                 vm_page_lock_queues();
3073                                 pmap_remove(map->pmap, uaddr, tend);
3074                                 vm_page_unlock_queues();
3075                         }
3076 /*
3077  * Otherwise, we have to do a logical mmap.
3078  */
3079                 } else {
3080
3081                         vm_object_set_flag(srcobject, OBJ_OPT);
3082                         vm_object_reference(srcobject);
3083
3084                         vm_page_lock_queues();
3085                         pmap_remove(map->pmap, uaddr, tend);
3086                         vm_page_unlock_queues();
3087
3088                         vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
3089                         vm_map_lock_upgrade(map);
3090
3091                         if (entry == &map->header) {
3092                                 map->first_free = &map->header;
3093                         } else if (map->first_free->start >= start) {
3094                                 map->first_free = entry->prev;
3095                         }
3096
3097                         vm_map_entry_delete(map, entry);
3098
3099                         object = srcobject;
3100                         ooffset = cp;
3101
3102                         rv = vm_map_insert(map, object, ooffset, start, tend,
3103                                 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
3104
3105                         if (rv != KERN_SUCCESS)
3106                                 panic("vm_uiomove: could not insert new entry: %d", rv);
3107                 }
3108
3109 /*
3110  * Map the window directly, if it is already in memory
3111  */
3112                 pmap_object_init_pt(map->pmap, uaddr,
3113                         srcobject, oindex, tcnt, 0);
3114
3115                 map->timestamp++;
3116                 vm_map_unlock(map);
3117
3118                 cnt -= tcnt;
3119                 uaddr += tcnt;
3120                 cp += tcnt;
3121                 if (npages)
3122                         *npages += osize;
3123         }
3124         return 0;
3125 }
3126 #endif
3127
3128 #include "opt_ddb.h"
3129 #ifdef DDB
3130 #include <sys/kernel.h>
3131
3132 #include <ddb/ddb.h>
3133
3134 /*
3135  *      vm_map_print:   [ debug ]
3136  */
3137 DB_SHOW_COMMAND(map, vm_map_print)
3138 {
3139         static int nlines;
3140         /* XXX convert args. */
3141         vm_map_t map = (vm_map_t)addr;
3142         boolean_t full = have_addr;
3143
3144         vm_map_entry_t entry;
3145
3146         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
3147             (void *)map,
3148             (void *)map->pmap, map->nentries, map->timestamp);
3149         nlines++;
3150
3151         if (!full && db_indent)
3152                 return;
3153
3154         db_indent += 2;
3155         for (entry = map->header.next; entry != &map->header;
3156             entry = entry->next) {
3157                 db_iprintf("map entry %p: start=%p, end=%p\n",
3158                     (void *)entry, (void *)entry->start, (void *)entry->end);
3159                 nlines++;
3160                 {
3161                         static char *inheritance_name[4] =
3162                         {"share", "copy", "none", "donate_copy"};
3163
3164                         db_iprintf(" prot=%x/%x/%s",
3165                             entry->protection,
3166                             entry->max_protection,
3167                             inheritance_name[(int)(unsigned char)entry->inheritance]);
3168                         if (entry->wired_count != 0)
3169                                 db_printf(", wired");
3170                 }
3171                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3172                         db_printf(", share=%p, offset=0x%jx\n",
3173                             (void *)entry->object.sub_map,
3174                             (uintmax_t)entry->offset);
3175                         nlines++;
3176                         if ((entry->prev == &map->header) ||
3177                             (entry->prev->object.sub_map !=
3178                                 entry->object.sub_map)) {
3179                                 db_indent += 2;
3180                                 vm_map_print((db_expr_t)(intptr_t)
3181                                              entry->object.sub_map,
3182                                              full, 0, (char *)0);
3183                                 db_indent -= 2;
3184                         }
3185                 } else {
3186                         db_printf(", object=%p, offset=0x%jx",
3187                             (void *)entry->object.vm_object,
3188                             (uintmax_t)entry->offset);
3189                         if (entry->eflags & MAP_ENTRY_COW)
3190                                 db_printf(", copy (%s)",
3191                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3192                         db_printf("\n");
3193                         nlines++;
3194
3195                         if ((entry->prev == &map->header) ||
3196                             (entry->prev->object.vm_object !=
3197                                 entry->object.vm_object)) {
3198                                 db_indent += 2;
3199                                 vm_object_print((db_expr_t)(intptr_t)
3200                                                 entry->object.vm_object,
3201                                                 full, 0, (char *)0);
3202                                 nlines += 4;
3203                                 db_indent -= 2;
3204                         }
3205                 }
3206         }
3207         db_indent -= 2;
3208         if (db_indent == 0)
3209                 nlines = 0;
3210 }
3211
3212
3213 DB_SHOW_COMMAND(procvm, procvm)
3214 {
3215         struct proc *p;
3216
3217         if (have_addr) {
3218                 p = (struct proc *) addr;
3219         } else {
3220                 p = curproc;
3221         }
3222
3223         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
3224             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
3225             (void *)vmspace_pmap(p->p_vmspace));
3226
3227         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
3228 }
3229
3230 #endif /* DDB */