]> CyberLeo.Net >> Repos - FreeBSD/releng/8.0.git/blob - sys/vm/vm_object.c
Adjust to reflect 8.0-RELEASE.
[FreeBSD/releng/8.0.git] / sys / vm / vm_object.c
1 /*-
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *      from: @(#)vm_object.c   8.5 (Berkeley) 3/22/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60
61 /*
62  *      Virtual memory object module.
63  */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include "opt_vm.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/lock.h>
73 #include <sys/mman.h>
74 #include <sys/mount.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/mutex.h>
78 #include <sys/proc.h>           /* for curproc, pageproc */
79 #include <sys/socket.h>
80 #include <sys/resourcevar.h>
81 #include <sys/vnode.h>
82 #include <sys/vmmeter.h>
83 #include <sys/sx.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_pager.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_reserv.h>
97 #include <vm/uma.h>
98
99 #define EASY_SCAN_FACTOR       8
100
101 #define MSYNC_FLUSH_HARDSEQ     0x01
102 #define MSYNC_FLUSH_SOFTSEQ     0x02
103
104 /*
105  * msync / VM object flushing optimizations
106  */
107 static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ;
108 SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, CTLFLAG_RW, &msync_flush_flags, 0,
109     "Enable sequential iteration optimization");
110
111 static int old_msync;
112 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
113     "Use old (insecure) msync behavior");
114
115 static void     vm_object_qcollapse(vm_object_t object);
116 static int      vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags);
117 static void     vm_object_vndeallocate(vm_object_t object);
118
119 /*
120  *      Virtual memory objects maintain the actual data
121  *      associated with allocated virtual memory.  A given
122  *      page of memory exists within exactly one object.
123  *
124  *      An object is only deallocated when all "references"
125  *      are given up.  Only one "reference" to a given
126  *      region of an object should be writeable.
127  *
128  *      Associated with each object is a list of all resident
129  *      memory pages belonging to that object; this list is
130  *      maintained by the "vm_page" module, and locked by the object's
131  *      lock.
132  *
133  *      Each object also records a "pager" routine which is
134  *      used to retrieve (and store) pages to the proper backing
135  *      storage.  In addition, objects may be backed by other
136  *      objects from which they were virtual-copied.
137  *
138  *      The only items within the object structure which are
139  *      modified after time of creation are:
140  *              reference count         locked by object's lock
141  *              pager routine           locked by object's lock
142  *
143  */
144
145 struct object_q vm_object_list;
146 struct mtx vm_object_list_mtx;  /* lock for object list and count */
147
148 struct vm_object kernel_object_store;
149 struct vm_object kmem_object_store;
150
151 SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, "VM object stats");
152
153 static long object_collapses;
154 SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
155     &object_collapses, 0, "VM object collapses");
156
157 static long object_bypasses;
158 SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
159     &object_bypasses, 0, "VM object bypasses");
160
161 static uma_zone_t obj_zone;
162
163 static int vm_object_zinit(void *mem, int size, int flags);
164
165 #ifdef INVARIANTS
166 static void vm_object_zdtor(void *mem, int size, void *arg);
167
168 static void
169 vm_object_zdtor(void *mem, int size, void *arg)
170 {
171         vm_object_t object;
172
173         object = (vm_object_t)mem;
174         KASSERT(TAILQ_EMPTY(&object->memq),
175             ("object %p has resident pages",
176             object));
177 #if VM_NRESERVLEVEL > 0
178         KASSERT(LIST_EMPTY(&object->rvq),
179             ("object %p has reservations",
180             object));
181 #endif
182         KASSERT(object->cache == NULL,
183             ("object %p has cached pages",
184             object));
185         KASSERT(object->paging_in_progress == 0,
186             ("object %p paging_in_progress = %d",
187             object, object->paging_in_progress));
188         KASSERT(object->resident_page_count == 0,
189             ("object %p resident_page_count = %d",
190             object, object->resident_page_count));
191         KASSERT(object->shadow_count == 0,
192             ("object %p shadow_count = %d",
193             object, object->shadow_count));
194 }
195 #endif
196
197 static int
198 vm_object_zinit(void *mem, int size, int flags)
199 {
200         vm_object_t object;
201
202         object = (vm_object_t)mem;
203         bzero(&object->mtx, sizeof(object->mtx));
204         VM_OBJECT_LOCK_INIT(object, "standard object");
205
206         /* These are true for any object that has been freed */
207         object->paging_in_progress = 0;
208         object->resident_page_count = 0;
209         object->shadow_count = 0;
210         return (0);
211 }
212
213 void
214 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
215 {
216
217         TAILQ_INIT(&object->memq);
218         LIST_INIT(&object->shadow_head);
219
220         object->root = NULL;
221         object->type = type;
222         object->size = size;
223         object->generation = 1;
224         object->ref_count = 1;
225         object->memattr = VM_MEMATTR_DEFAULT;
226         object->flags = 0;
227         object->uip = NULL;
228         object->charge = 0;
229         if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
230                 object->flags = OBJ_ONEMAPPING;
231         object->pg_color = 0;
232         object->handle = NULL;
233         object->backing_object = NULL;
234         object->backing_object_offset = (vm_ooffset_t) 0;
235 #if VM_NRESERVLEVEL > 0
236         LIST_INIT(&object->rvq);
237 #endif
238         object->cache = NULL;
239
240         mtx_lock(&vm_object_list_mtx);
241         TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
242         mtx_unlock(&vm_object_list_mtx);
243 }
244
245 /*
246  *      vm_object_init:
247  *
248  *      Initialize the VM objects module.
249  */
250 void
251 vm_object_init(void)
252 {
253         TAILQ_INIT(&vm_object_list);
254         mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
255         
256         VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object");
257         _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
258             kernel_object);
259 #if VM_NRESERVLEVEL > 0
260         kernel_object->flags |= OBJ_COLORED;
261         kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
262 #endif
263
264         VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object");
265         _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
266             kmem_object);
267 #if VM_NRESERVLEVEL > 0
268         kmem_object->flags |= OBJ_COLORED;
269         kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
270 #endif
271
272         /*
273          * The lock portion of struct vm_object must be type stable due
274          * to vm_pageout_fallback_object_lock locking a vm object
275          * without holding any references to it.
276          */
277         obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
278 #ifdef INVARIANTS
279             vm_object_zdtor,
280 #else
281             NULL,
282 #endif
283             vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
284 }
285
286 void
287 vm_object_clear_flag(vm_object_t object, u_short bits)
288 {
289
290         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
291         object->flags &= ~bits;
292 }
293
294 /*
295  *      Sets the default memory attribute for the specified object.  Pages
296  *      that are allocated to this object are by default assigned this memory
297  *      attribute.
298  *
299  *      Presently, this function must be called before any pages are allocated
300  *      to the object.  In the future, this requirement may be relaxed for
301  *      "default" and "swap" objects.
302  */
303 int
304 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
305 {
306
307         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
308         switch (object->type) {
309         case OBJT_DEFAULT:
310         case OBJT_DEVICE:
311         case OBJT_PHYS:
312         case OBJT_SG:
313         case OBJT_SWAP:
314         case OBJT_VNODE:
315                 if (!TAILQ_EMPTY(&object->memq))
316                         return (KERN_FAILURE);
317                 break;
318         case OBJT_DEAD:
319                 return (KERN_INVALID_ARGUMENT);
320         }
321         object->memattr = memattr;
322         return (KERN_SUCCESS);
323 }
324
325 void
326 vm_object_pip_add(vm_object_t object, short i)
327 {
328
329         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
330         object->paging_in_progress += i;
331 }
332
333 void
334 vm_object_pip_subtract(vm_object_t object, short i)
335 {
336
337         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
338         object->paging_in_progress -= i;
339 }
340
341 void
342 vm_object_pip_wakeup(vm_object_t object)
343 {
344
345         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
346         object->paging_in_progress--;
347         if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
348                 vm_object_clear_flag(object, OBJ_PIPWNT);
349                 wakeup(object);
350         }
351 }
352
353 void
354 vm_object_pip_wakeupn(vm_object_t object, short i)
355 {
356
357         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
358         if (i)
359                 object->paging_in_progress -= i;
360         if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
361                 vm_object_clear_flag(object, OBJ_PIPWNT);
362                 wakeup(object);
363         }
364 }
365
366 void
367 vm_object_pip_wait(vm_object_t object, char *waitid)
368 {
369
370         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
371         while (object->paging_in_progress) {
372                 object->flags |= OBJ_PIPWNT;
373                 msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
374         }
375 }
376
377 /*
378  *      vm_object_allocate:
379  *
380  *      Returns a new object with the given size.
381  */
382 vm_object_t
383 vm_object_allocate(objtype_t type, vm_pindex_t size)
384 {
385         vm_object_t object;
386
387         object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
388         _vm_object_allocate(type, size, object);
389         return (object);
390 }
391
392
393 /*
394  *      vm_object_reference:
395  *
396  *      Gets another reference to the given object.  Note: OBJ_DEAD
397  *      objects can be referenced during final cleaning.
398  */
399 void
400 vm_object_reference(vm_object_t object)
401 {
402         if (object == NULL)
403                 return;
404         VM_OBJECT_LOCK(object);
405         vm_object_reference_locked(object);
406         VM_OBJECT_UNLOCK(object);
407 }
408
409 /*
410  *      vm_object_reference_locked:
411  *
412  *      Gets another reference to the given object.
413  *
414  *      The object must be locked.
415  */
416 void
417 vm_object_reference_locked(vm_object_t object)
418 {
419         struct vnode *vp;
420
421         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
422         object->ref_count++;
423         if (object->type == OBJT_VNODE) {
424                 vp = object->handle;
425                 vref(vp);
426         }
427 }
428
429 /*
430  * Handle deallocating an object of type OBJT_VNODE.
431  */
432 static void
433 vm_object_vndeallocate(vm_object_t object)
434 {
435         struct vnode *vp = (struct vnode *) object->handle;
436
437         VFS_ASSERT_GIANT(vp->v_mount);
438         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
439         KASSERT(object->type == OBJT_VNODE,
440             ("vm_object_vndeallocate: not a vnode object"));
441         KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
442 #ifdef INVARIANTS
443         if (object->ref_count == 0) {
444                 vprint("vm_object_vndeallocate", vp);
445                 panic("vm_object_vndeallocate: bad object reference count");
446         }
447 #endif
448
449         object->ref_count--;
450         if (object->ref_count == 0) {
451                 mp_fixme("Unlocked vflag access.");
452                 vp->v_vflag &= ~VV_TEXT;
453         }
454         VM_OBJECT_UNLOCK(object);
455         /*
456          * vrele may need a vop lock
457          */
458         vrele(vp);
459 }
460
461 /*
462  *      vm_object_deallocate:
463  *
464  *      Release a reference to the specified object,
465  *      gained either through a vm_object_allocate
466  *      or a vm_object_reference call.  When all references
467  *      are gone, storage associated with this object
468  *      may be relinquished.
469  *
470  *      No object may be locked.
471  */
472 void
473 vm_object_deallocate(vm_object_t object)
474 {
475         vm_object_t temp;
476
477         while (object != NULL) {
478                 int vfslocked;
479
480                 vfslocked = 0;
481         restart:
482                 VM_OBJECT_LOCK(object);
483                 if (object->type == OBJT_VNODE) {
484                         struct vnode *vp = (struct vnode *) object->handle;
485
486                         /*
487                          * Conditionally acquire Giant for a vnode-backed
488                          * object.  We have to be careful since the type of
489                          * a vnode object can change while the object is
490                          * unlocked.
491                          */
492                         if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) {
493                                 vfslocked = 1;
494                                 if (!mtx_trylock(&Giant)) {
495                                         VM_OBJECT_UNLOCK(object);
496                                         mtx_lock(&Giant);
497                                         goto restart;
498                                 }
499                         }
500                         vm_object_vndeallocate(object);
501                         VFS_UNLOCK_GIANT(vfslocked);
502                         return;
503                 } else
504                         /*
505                          * This is to handle the case that the object
506                          * changed type while we dropped its lock to
507                          * obtain Giant.
508                          */
509                         VFS_UNLOCK_GIANT(vfslocked);
510
511                 KASSERT(object->ref_count != 0,
512                         ("vm_object_deallocate: object deallocated too many times: %d", object->type));
513
514                 /*
515                  * If the reference count goes to 0 we start calling
516                  * vm_object_terminate() on the object chain.
517                  * A ref count of 1 may be a special case depending on the
518                  * shadow count being 0 or 1.
519                  */
520                 object->ref_count--;
521                 if (object->ref_count > 1) {
522                         VM_OBJECT_UNLOCK(object);
523                         return;
524                 } else if (object->ref_count == 1) {
525                         if (object->shadow_count == 0 &&
526                             object->handle == NULL &&
527                             (object->type == OBJT_DEFAULT ||
528                              object->type == OBJT_SWAP)) {
529                                 vm_object_set_flag(object, OBJ_ONEMAPPING);
530                         } else if ((object->shadow_count == 1) &&
531                             (object->handle == NULL) &&
532                             (object->type == OBJT_DEFAULT ||
533                              object->type == OBJT_SWAP)) {
534                                 vm_object_t robject;
535
536                                 robject = LIST_FIRST(&object->shadow_head);
537                                 KASSERT(robject != NULL,
538                                     ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
539                                          object->ref_count,
540                                          object->shadow_count));
541                                 if (!VM_OBJECT_TRYLOCK(robject)) {
542                                         /*
543                                          * Avoid a potential deadlock.
544                                          */
545                                         object->ref_count++;
546                                         VM_OBJECT_UNLOCK(object);
547                                         /*
548                                          * More likely than not the thread
549                                          * holding robject's lock has lower
550                                          * priority than the current thread.
551                                          * Let the lower priority thread run.
552                                          */
553                                         pause("vmo_de", 1);
554                                         continue;
555                                 }
556                                 /*
557                                  * Collapse object into its shadow unless its
558                                  * shadow is dead.  In that case, object will
559                                  * be deallocated by the thread that is
560                                  * deallocating its shadow.
561                                  */
562                                 if ((robject->flags & OBJ_DEAD) == 0 &&
563                                     (robject->handle == NULL) &&
564                                     (robject->type == OBJT_DEFAULT ||
565                                      robject->type == OBJT_SWAP)) {
566
567                                         robject->ref_count++;
568 retry:
569                                         if (robject->paging_in_progress) {
570                                                 VM_OBJECT_UNLOCK(object);
571                                                 vm_object_pip_wait(robject,
572                                                     "objde1");
573                                                 temp = robject->backing_object;
574                                                 if (object == temp) {
575                                                         VM_OBJECT_LOCK(object);
576                                                         goto retry;
577                                                 }
578                                         } else if (object->paging_in_progress) {
579                                                 VM_OBJECT_UNLOCK(robject);
580                                                 object->flags |= OBJ_PIPWNT;
581                                                 msleep(object,
582                                                     VM_OBJECT_MTX(object),
583                                                     PDROP | PVM, "objde2", 0);
584                                                 VM_OBJECT_LOCK(robject);
585                                                 temp = robject->backing_object;
586                                                 if (object == temp) {
587                                                         VM_OBJECT_LOCK(object);
588                                                         goto retry;
589                                                 }
590                                         } else
591                                                 VM_OBJECT_UNLOCK(object);
592
593                                         if (robject->ref_count == 1) {
594                                                 robject->ref_count--;
595                                                 object = robject;
596                                                 goto doterm;
597                                         }
598                                         object = robject;
599                                         vm_object_collapse(object);
600                                         VM_OBJECT_UNLOCK(object);
601                                         continue;
602                                 }
603                                 VM_OBJECT_UNLOCK(robject);
604                         }
605                         VM_OBJECT_UNLOCK(object);
606                         return;
607                 }
608 doterm:
609                 temp = object->backing_object;
610                 if (temp != NULL) {
611                         VM_OBJECT_LOCK(temp);
612                         LIST_REMOVE(object, shadow_list);
613                         temp->shadow_count--;
614                         temp->generation++;
615                         VM_OBJECT_UNLOCK(temp);
616                         object->backing_object = NULL;
617                 }
618                 /*
619                  * Don't double-terminate, we could be in a termination
620                  * recursion due to the terminate having to sync data
621                  * to disk.
622                  */
623                 if ((object->flags & OBJ_DEAD) == 0)
624                         vm_object_terminate(object);
625                 else
626                         VM_OBJECT_UNLOCK(object);
627                 object = temp;
628         }
629 }
630
631 /*
632  *      vm_object_destroy removes the object from the global object list
633  *      and frees the space for the object.
634  */
635 void
636 vm_object_destroy(vm_object_t object)
637 {
638
639         /*
640          * Remove the object from the global object list.
641          */
642         mtx_lock(&vm_object_list_mtx);
643         TAILQ_REMOVE(&vm_object_list, object, object_list);
644         mtx_unlock(&vm_object_list_mtx);
645
646         /*
647          * Release the allocation charge.
648          */
649         if (object->uip != NULL) {
650                 KASSERT(object->type == OBJT_DEFAULT ||
651                     object->type == OBJT_SWAP,
652                     ("vm_object_terminate: non-swap obj %p has uip",
653                      object));
654                 swap_release_by_uid(object->charge, object->uip);
655                 object->charge = 0;
656                 uifree(object->uip);
657                 object->uip = NULL;
658         }
659
660         /*
661          * Free the space for the object.
662          */
663         uma_zfree(obj_zone, object);
664 }
665
666 /*
667  *      vm_object_terminate actually destroys the specified object, freeing
668  *      up all previously used resources.
669  *
670  *      The object must be locked.
671  *      This routine may block.
672  */
673 void
674 vm_object_terminate(vm_object_t object)
675 {
676         vm_page_t p;
677
678         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
679
680         /*
681          * Make sure no one uses us.
682          */
683         vm_object_set_flag(object, OBJ_DEAD);
684
685         /*
686          * wait for the pageout daemon to be done with the object
687          */
688         vm_object_pip_wait(object, "objtrm");
689
690         KASSERT(!object->paging_in_progress,
691                 ("vm_object_terminate: pageout in progress"));
692
693         /*
694          * Clean and free the pages, as appropriate. All references to the
695          * object are gone, so we don't need to lock it.
696          */
697         if (object->type == OBJT_VNODE) {
698                 struct vnode *vp = (struct vnode *)object->handle;
699
700                 /*
701                  * Clean pages and flush buffers.
702                  */
703                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
704                 VM_OBJECT_UNLOCK(object);
705
706                 vinvalbuf(vp, V_SAVE, 0, 0);
707
708                 VM_OBJECT_LOCK(object);
709         }
710
711         KASSERT(object->ref_count == 0, 
712                 ("vm_object_terminate: object with references, ref_count=%d",
713                 object->ref_count));
714
715         /*
716          * Now free any remaining pages. For internal objects, this also
717          * removes them from paging queues. Don't free wired pages, just
718          * remove them from the object. 
719          */
720         vm_page_lock_queues();
721         while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
722                 KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0,
723                         ("vm_object_terminate: freeing busy page %p "
724                         "p->busy = %d, p->oflags %x\n", p, p->busy, p->oflags));
725                 if (p->wire_count == 0) {
726                         vm_page_free(p);
727                         cnt.v_pfree++;
728                 } else {
729                         vm_page_remove(p);
730                 }
731         }
732         vm_page_unlock_queues();
733
734 #if VM_NRESERVLEVEL > 0
735         if (__predict_false(!LIST_EMPTY(&object->rvq)))
736                 vm_reserv_break_all(object);
737 #endif
738         if (__predict_false(object->cache != NULL))
739                 vm_page_cache_free(object, 0, 0);
740
741         /*
742          * Let the pager know object is dead.
743          */
744         vm_pager_deallocate(object);
745         VM_OBJECT_UNLOCK(object);
746
747         vm_object_destroy(object);
748 }
749
750 /*
751  *      vm_object_page_clean
752  *
753  *      Clean all dirty pages in the specified range of object.  Leaves page 
754  *      on whatever queue it is currently on.   If NOSYNC is set then do not
755  *      write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC),
756  *      leaving the object dirty.
757  *
758  *      When stuffing pages asynchronously, allow clustering.  XXX we need a
759  *      synchronous clustering mode implementation.
760  *
761  *      Odd semantics: if start == end, we clean everything.
762  *
763  *      The object must be locked.
764  */
765 void
766 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags)
767 {
768         vm_page_t p, np;
769         vm_pindex_t tstart, tend;
770         vm_pindex_t pi;
771         int clearobjflags;
772         int pagerflags;
773         int curgeneration;
774
775         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
776         if (object->type != OBJT_VNODE ||
777                 (object->flags & OBJ_MIGHTBEDIRTY) == 0)
778                 return;
779
780         pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
781         pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
782
783         vm_object_set_flag(object, OBJ_CLEANING);
784
785         tstart = start;
786         if (end == 0) {
787                 tend = object->size;
788         } else {
789                 tend = end;
790         }
791
792         vm_page_lock_queues();
793         /*
794          * If the caller is smart and only msync()s a range he knows is
795          * dirty, we may be able to avoid an object scan.  This results in
796          * a phenominal improvement in performance.  We cannot do this
797          * as a matter of course because the object may be huge - e.g.
798          * the size might be in the gigabytes or terrabytes.
799          */
800         if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) {
801                 vm_pindex_t tscan;
802                 int scanlimit;
803                 int scanreset;
804
805                 scanreset = object->resident_page_count / EASY_SCAN_FACTOR;
806                 if (scanreset < 16)
807                         scanreset = 16;
808                 pagerflags |= VM_PAGER_IGNORE_CLEANCHK;
809
810                 scanlimit = scanreset;
811                 tscan = tstart;
812                 while (tscan < tend) {
813                         curgeneration = object->generation;
814                         p = vm_page_lookup(object, tscan);
815                         if (p == NULL || p->valid == 0) {
816                                 if (--scanlimit == 0)
817                                         break;
818                                 ++tscan;
819                                 continue;
820                         }
821                         vm_page_test_dirty(p);
822                         if (p->dirty == 0) {
823                                 if (--scanlimit == 0)
824                                         break;
825                                 ++tscan;
826                                 continue;
827                         }
828                         /*
829                          * If we have been asked to skip nosync pages and 
830                          * this is a nosync page, we can't continue.
831                          */
832                         if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) {
833                                 if (--scanlimit == 0)
834                                         break;
835                                 ++tscan;
836                                 continue;
837                         }
838                         scanlimit = scanreset;
839
840                         /*
841                          * This returns 0 if it was unable to busy the first
842                          * page (i.e. had to sleep).
843                          */
844                         tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags);
845                 }
846
847                 /*
848                  * If everything was dirty and we flushed it successfully,
849                  * and the requested range is not the entire object, we
850                  * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can
851                  * return immediately.
852                  */
853                 if (tscan >= tend && (tstart || tend < object->size)) {
854                         vm_page_unlock_queues();
855                         vm_object_clear_flag(object, OBJ_CLEANING);
856                         return;
857                 }
858                 pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK;
859         }
860
861         /*
862          * Generally set CLEANCHK interlock and make the page read-only so
863          * we can then clear the object flags.
864          *
865          * However, if this is a nosync mmap then the object is likely to 
866          * stay dirty so do not mess with the page and do not clear the
867          * object flags.
868          */
869         clearobjflags = 1;
870         TAILQ_FOREACH(p, &object->memq, listq) {
871                 p->oflags |= VPO_CLEANCHK;
872                 if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC))
873                         clearobjflags = 0;
874                 else
875                         pmap_remove_write(p);
876         }
877
878         if (clearobjflags && (tstart == 0) && (tend == object->size)) {
879                 struct vnode *vp;
880
881                 vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY);
882                 if (object->type == OBJT_VNODE &&
883                     (vp = (struct vnode *)object->handle) != NULL) {
884                         VI_LOCK(vp);
885                         if (vp->v_iflag & VI_OBJDIRTY)
886                                 vp->v_iflag &= ~VI_OBJDIRTY;
887                         VI_UNLOCK(vp);
888                 }
889         }
890
891 rescan:
892         curgeneration = object->generation;
893
894         for (p = TAILQ_FIRST(&object->memq); p; p = np) {
895                 int n;
896
897                 np = TAILQ_NEXT(p, listq);
898
899 again:
900                 pi = p->pindex;
901                 if ((p->oflags & VPO_CLEANCHK) == 0 ||
902                         (pi < tstart) || (pi >= tend) ||
903                     p->valid == 0) {
904                         p->oflags &= ~VPO_CLEANCHK;
905                         continue;
906                 }
907
908                 vm_page_test_dirty(p);
909                 if (p->dirty == 0) {
910                         p->oflags &= ~VPO_CLEANCHK;
911                         continue;
912                 }
913
914                 /*
915                  * If we have been asked to skip nosync pages and this is a
916                  * nosync page, skip it.  Note that the object flags were
917                  * not cleared in this case so we do not have to set them.
918                  */
919                 if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) {
920                         p->oflags &= ~VPO_CLEANCHK;
921                         continue;
922                 }
923
924                 n = vm_object_page_collect_flush(object, p,
925                         curgeneration, pagerflags);
926                 if (n == 0)
927                         goto rescan;
928
929                 if (object->generation != curgeneration)
930                         goto rescan;
931
932                 /*
933                  * Try to optimize the next page.  If we can't we pick up
934                  * our (random) scan where we left off.
935                  */
936                 if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) {
937                         if ((p = vm_page_lookup(object, pi + n)) != NULL)
938                                 goto again;
939                 }
940         }
941         vm_page_unlock_queues();
942 #if 0
943         VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);
944 #endif
945
946         vm_object_clear_flag(object, OBJ_CLEANING);
947         return;
948 }
949
950 static int
951 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags)
952 {
953         int runlen;
954         int maxf;
955         int chkb;
956         int maxb;
957         int i;
958         vm_pindex_t pi;
959         vm_page_t maf[vm_pageout_page_count];
960         vm_page_t mab[vm_pageout_page_count];
961         vm_page_t ma[vm_pageout_page_count];
962
963         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
964         pi = p->pindex;
965         while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) {
966                 vm_page_lock_queues();
967                 if (object->generation != curgeneration) {
968                         return(0);
969                 }
970         }
971         maxf = 0;
972         for(i = 1; i < vm_pageout_page_count; i++) {
973                 vm_page_t tp;
974
975                 if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
976                         if ((tp->oflags & VPO_BUSY) ||
977                                 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
978                                  (tp->oflags & VPO_CLEANCHK) == 0) ||
979                                 (tp->busy != 0))
980                                 break;
981                         vm_page_test_dirty(tp);
982                         if (tp->dirty == 0) {
983                                 tp->oflags &= ~VPO_CLEANCHK;
984                                 break;
985                         }
986                         maf[ i - 1 ] = tp;
987                         maxf++;
988                         continue;
989                 }
990                 break;
991         }
992
993         maxb = 0;
994         chkb = vm_pageout_page_count -  maxf;
995         if (chkb) {
996                 for(i = 1; i < chkb;i++) {
997                         vm_page_t tp;
998
999                         if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
1000                                 if ((tp->oflags & VPO_BUSY) ||
1001                                         ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1002                                          (tp->oflags & VPO_CLEANCHK) == 0) ||
1003                                         (tp->busy != 0))
1004                                         break;
1005                                 vm_page_test_dirty(tp);
1006                                 if (tp->dirty == 0) {
1007                                         tp->oflags &= ~VPO_CLEANCHK;
1008                                         break;
1009                                 }
1010                                 mab[ i - 1 ] = tp;
1011                                 maxb++;
1012                                 continue;
1013                         }
1014                         break;
1015                 }
1016         }
1017
1018         for(i = 0; i < maxb; i++) {
1019                 int index = (maxb - i) - 1;
1020                 ma[index] = mab[i];
1021                 ma[index]->oflags &= ~VPO_CLEANCHK;
1022         }
1023         p->oflags &= ~VPO_CLEANCHK;
1024         ma[maxb] = p;
1025         for(i = 0; i < maxf; i++) {
1026                 int index = (maxb + i) + 1;
1027                 ma[index] = maf[i];
1028                 ma[index]->oflags &= ~VPO_CLEANCHK;
1029         }
1030         runlen = maxb + maxf + 1;
1031
1032         vm_pageout_flush(ma, runlen, pagerflags);
1033         for (i = 0; i < runlen; i++) {
1034                 if (ma[i]->dirty) {
1035                         pmap_remove_write(ma[i]);
1036                         ma[i]->oflags |= VPO_CLEANCHK;
1037
1038                         /*
1039                          * maxf will end up being the actual number of pages
1040                          * we wrote out contiguously, non-inclusive of the
1041                          * first page.  We do not count look-behind pages.
1042                          */
1043                         if (i >= maxb + 1 && (maxf > i - maxb - 1))
1044                                 maxf = i - maxb - 1;
1045                 }
1046         }
1047         return(maxf + 1);
1048 }
1049
1050 /*
1051  * Note that there is absolutely no sense in writing out
1052  * anonymous objects, so we track down the vnode object
1053  * to write out.
1054  * We invalidate (remove) all pages from the address space
1055  * for semantic correctness.
1056  *
1057  * Note: certain anonymous maps, such as MAP_NOSYNC maps,
1058  * may start out with a NULL object.
1059  */
1060 void
1061 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
1062     boolean_t syncio, boolean_t invalidate)
1063 {
1064         vm_object_t backing_object;
1065         struct vnode *vp;
1066         struct mount *mp;
1067         int flags;
1068
1069         if (object == NULL)
1070                 return;
1071         VM_OBJECT_LOCK(object);
1072         while ((backing_object = object->backing_object) != NULL) {
1073                 VM_OBJECT_LOCK(backing_object);
1074                 offset += object->backing_object_offset;
1075                 VM_OBJECT_UNLOCK(object);
1076                 object = backing_object;
1077                 if (object->size < OFF_TO_IDX(offset + size))
1078                         size = IDX_TO_OFF(object->size) - offset;
1079         }
1080         /*
1081          * Flush pages if writing is allowed, invalidate them
1082          * if invalidation requested.  Pages undergoing I/O
1083          * will be ignored by vm_object_page_remove().
1084          *
1085          * We cannot lock the vnode and then wait for paging
1086          * to complete without deadlocking against vm_fault.
1087          * Instead we simply call vm_object_page_remove() and
1088          * allow it to block internally on a page-by-page
1089          * basis when it encounters pages undergoing async
1090          * I/O.
1091          */
1092         if (object->type == OBJT_VNODE &&
1093             (object->flags & OBJ_MIGHTBEDIRTY) != 0) {
1094                 int vfslocked;
1095                 vp = object->handle;
1096                 VM_OBJECT_UNLOCK(object);
1097                 (void) vn_start_write(vp, &mp, V_WAIT);
1098                 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1099                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1100                 flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1101                 flags |= invalidate ? OBJPC_INVAL : 0;
1102                 VM_OBJECT_LOCK(object);
1103                 vm_object_page_clean(object,
1104                     OFF_TO_IDX(offset),
1105                     OFF_TO_IDX(offset + size + PAGE_MASK),
1106                     flags);
1107                 VM_OBJECT_UNLOCK(object);
1108                 VOP_UNLOCK(vp, 0);
1109                 VFS_UNLOCK_GIANT(vfslocked);
1110                 vn_finished_write(mp);
1111                 VM_OBJECT_LOCK(object);
1112         }
1113         if ((object->type == OBJT_VNODE ||
1114              object->type == OBJT_DEVICE) && invalidate) {
1115                 boolean_t purge;
1116                 purge = old_msync || (object->type == OBJT_DEVICE);
1117                 vm_object_page_remove(object,
1118                     OFF_TO_IDX(offset),
1119                     OFF_TO_IDX(offset + size + PAGE_MASK),
1120                     purge ? FALSE : TRUE);
1121         }
1122         VM_OBJECT_UNLOCK(object);
1123 }
1124
1125 /*
1126  *      vm_object_madvise:
1127  *
1128  *      Implements the madvise function at the object/page level.
1129  *
1130  *      MADV_WILLNEED   (any object)
1131  *
1132  *          Activate the specified pages if they are resident.
1133  *
1134  *      MADV_DONTNEED   (any object)
1135  *
1136  *          Deactivate the specified pages if they are resident.
1137  *
1138  *      MADV_FREE       (OBJT_DEFAULT/OBJT_SWAP objects,
1139  *                       OBJ_ONEMAPPING only)
1140  *
1141  *          Deactivate and clean the specified pages if they are
1142  *          resident.  This permits the process to reuse the pages
1143  *          without faulting or the kernel to reclaim the pages
1144  *          without I/O.
1145  */
1146 void
1147 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
1148 {
1149         vm_pindex_t end, tpindex;
1150         vm_object_t backing_object, tobject;
1151         vm_page_t m;
1152
1153         if (object == NULL)
1154                 return;
1155         VM_OBJECT_LOCK(object);
1156         end = pindex + count;
1157         /*
1158          * Locate and adjust resident pages
1159          */
1160         for (; pindex < end; pindex += 1) {
1161 relookup:
1162                 tobject = object;
1163                 tpindex = pindex;
1164 shadowlookup:
1165                 /*
1166                  * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1167                  * and those pages must be OBJ_ONEMAPPING.
1168                  */
1169                 if (advise == MADV_FREE) {
1170                         if ((tobject->type != OBJT_DEFAULT &&
1171                              tobject->type != OBJT_SWAP) ||
1172                             (tobject->flags & OBJ_ONEMAPPING) == 0) {
1173                                 goto unlock_tobject;
1174                         }
1175                 }
1176                 m = vm_page_lookup(tobject, tpindex);
1177                 if (m == NULL && advise == MADV_WILLNEED) {
1178                         /*
1179                          * If the page is cached, reactivate it.
1180                          */
1181                         m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED |
1182                             VM_ALLOC_NOBUSY);
1183                 }
1184                 if (m == NULL) {
1185                         /*
1186                          * There may be swap even if there is no backing page
1187                          */
1188                         if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1189                                 swap_pager_freespace(tobject, tpindex, 1);
1190                         /*
1191                          * next object
1192                          */
1193                         backing_object = tobject->backing_object;
1194                         if (backing_object == NULL)
1195                                 goto unlock_tobject;
1196                         VM_OBJECT_LOCK(backing_object);
1197                         tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1198                         if (tobject != object)
1199                                 VM_OBJECT_UNLOCK(tobject);
1200                         tobject = backing_object;
1201                         goto shadowlookup;
1202                 }
1203                 /*
1204                  * If the page is busy or not in a normal active state,
1205                  * we skip it.  If the page is not managed there are no
1206                  * page queues to mess with.  Things can break if we mess
1207                  * with pages in any of the below states.
1208                  */
1209                 vm_page_lock_queues();
1210                 if (m->hold_count ||
1211                     m->wire_count ||
1212                     (m->flags & PG_UNMANAGED) ||
1213                     m->valid != VM_PAGE_BITS_ALL) {
1214                         vm_page_unlock_queues();
1215                         goto unlock_tobject;
1216                 }
1217                 if ((m->oflags & VPO_BUSY) || m->busy) {
1218                         vm_page_flag_set(m, PG_REFERENCED);
1219                         vm_page_unlock_queues();
1220                         if (object != tobject)
1221                                 VM_OBJECT_UNLOCK(object);
1222                         m->oflags |= VPO_WANTED;
1223                         msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo", 0);
1224                         VM_OBJECT_LOCK(object);
1225                         goto relookup;
1226                 }
1227                 if (advise == MADV_WILLNEED) {
1228                         vm_page_activate(m);
1229                 } else if (advise == MADV_DONTNEED) {
1230                         vm_page_dontneed(m);
1231                 } else if (advise == MADV_FREE) {
1232                         /*
1233                          * Mark the page clean.  This will allow the page
1234                          * to be freed up by the system.  However, such pages
1235                          * are often reused quickly by malloc()/free()
1236                          * so we do not do anything that would cause
1237                          * a page fault if we can help it.
1238                          *
1239                          * Specifically, we do not try to actually free
1240                          * the page now nor do we try to put it in the
1241                          * cache (which would cause a page fault on reuse).
1242                          *
1243                          * But we do make the page is freeable as we
1244                          * can without actually taking the step of unmapping
1245                          * it.
1246                          */
1247                         pmap_clear_modify(m);
1248                         m->dirty = 0;
1249                         m->act_count = 0;
1250                         vm_page_dontneed(m);
1251                 }
1252                 vm_page_unlock_queues();
1253                 if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1254                         swap_pager_freespace(tobject, tpindex, 1);
1255 unlock_tobject:
1256                 if (tobject != object)
1257                         VM_OBJECT_UNLOCK(tobject);
1258         }       
1259         VM_OBJECT_UNLOCK(object);
1260 }
1261
1262 /*
1263  *      vm_object_shadow:
1264  *
1265  *      Create a new object which is backed by the
1266  *      specified existing object range.  The source
1267  *      object reference is deallocated.
1268  *
1269  *      The new object and offset into that object
1270  *      are returned in the source parameters.
1271  */
1272 void
1273 vm_object_shadow(
1274         vm_object_t *object,    /* IN/OUT */
1275         vm_ooffset_t *offset,   /* IN/OUT */
1276         vm_size_t length)
1277 {
1278         vm_object_t source;
1279         vm_object_t result;
1280
1281         source = *object;
1282
1283         /*
1284          * Don't create the new object if the old object isn't shared.
1285          */
1286         if (source != NULL) {
1287                 VM_OBJECT_LOCK(source);
1288                 if (source->ref_count == 1 &&
1289                     source->handle == NULL &&
1290                     (source->type == OBJT_DEFAULT ||
1291                      source->type == OBJT_SWAP)) {
1292                         VM_OBJECT_UNLOCK(source);
1293                         return;
1294                 }
1295                 VM_OBJECT_UNLOCK(source);
1296         }
1297
1298         /*
1299          * Allocate a new object with the given length.
1300          */
1301         result = vm_object_allocate(OBJT_DEFAULT, length);
1302
1303         /*
1304          * The new object shadows the source object, adding a reference to it.
1305          * Our caller changes his reference to point to the new object,
1306          * removing a reference to the source object.  Net result: no change
1307          * of reference count.
1308          *
1309          * Try to optimize the result object's page color when shadowing
1310          * in order to maintain page coloring consistency in the combined 
1311          * shadowed object.
1312          */
1313         result->backing_object = source;
1314         /*
1315          * Store the offset into the source object, and fix up the offset into
1316          * the new object.
1317          */
1318         result->backing_object_offset = *offset;
1319         if (source != NULL) {
1320                 VM_OBJECT_LOCK(source);
1321                 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1322                 source->shadow_count++;
1323                 source->generation++;
1324 #if VM_NRESERVLEVEL > 0
1325                 result->flags |= source->flags & OBJ_COLORED;
1326                 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
1327                     ((1 << (VM_NFREEORDER - 1)) - 1);
1328 #endif
1329                 VM_OBJECT_UNLOCK(source);
1330         }
1331
1332
1333         /*
1334          * Return the new things
1335          */
1336         *offset = 0;
1337         *object = result;
1338 }
1339
1340 /*
1341  *      vm_object_split:
1342  *
1343  * Split the pages in a map entry into a new object.  This affords
1344  * easier removal of unused pages, and keeps object inheritance from
1345  * being a negative impact on memory usage.
1346  */
1347 void
1348 vm_object_split(vm_map_entry_t entry)
1349 {
1350         vm_page_t m, m_next;
1351         vm_object_t orig_object, new_object, source;
1352         vm_pindex_t idx, offidxstart;
1353         vm_size_t size;
1354
1355         orig_object = entry->object.vm_object;
1356         if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1357                 return;
1358         if (orig_object->ref_count <= 1)
1359                 return;
1360         VM_OBJECT_UNLOCK(orig_object);
1361
1362         offidxstart = OFF_TO_IDX(entry->offset);
1363         size = atop(entry->end - entry->start);
1364
1365         /*
1366          * If swap_pager_copy() is later called, it will convert new_object
1367          * into a swap object.
1368          */
1369         new_object = vm_object_allocate(OBJT_DEFAULT, size);
1370
1371         /*
1372          * At this point, the new object is still private, so the order in
1373          * which the original and new objects are locked does not matter.
1374          */
1375         VM_OBJECT_LOCK(new_object);
1376         VM_OBJECT_LOCK(orig_object);
1377         source = orig_object->backing_object;
1378         if (source != NULL) {
1379                 VM_OBJECT_LOCK(source);
1380                 if ((source->flags & OBJ_DEAD) != 0) {
1381                         VM_OBJECT_UNLOCK(source);
1382                         VM_OBJECT_UNLOCK(orig_object);
1383                         VM_OBJECT_UNLOCK(new_object);
1384                         vm_object_deallocate(new_object);
1385                         VM_OBJECT_LOCK(orig_object);
1386                         return;
1387                 }
1388                 LIST_INSERT_HEAD(&source->shadow_head,
1389                                   new_object, shadow_list);
1390                 source->shadow_count++;
1391                 source->generation++;
1392                 vm_object_reference_locked(source);     /* for new_object */
1393                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
1394                 VM_OBJECT_UNLOCK(source);
1395                 new_object->backing_object_offset = 
1396                         orig_object->backing_object_offset + entry->offset;
1397                 new_object->backing_object = source;
1398         }
1399         if (orig_object->uip != NULL) {
1400                 new_object->uip = orig_object->uip;
1401                 uihold(orig_object->uip);
1402                 new_object->charge = ptoa(size);
1403                 KASSERT(orig_object->charge >= ptoa(size),
1404                     ("orig_object->charge < 0"));
1405                 orig_object->charge -= ptoa(size);
1406         }
1407 retry:
1408         if ((m = TAILQ_FIRST(&orig_object->memq)) != NULL) {
1409                 if (m->pindex < offidxstart) {
1410                         m = vm_page_splay(offidxstart, orig_object->root);
1411                         if ((orig_object->root = m)->pindex < offidxstart)
1412                                 m = TAILQ_NEXT(m, listq);
1413                 }
1414         }
1415         vm_page_lock_queues();
1416         for (; m != NULL && (idx = m->pindex - offidxstart) < size;
1417             m = m_next) {
1418                 m_next = TAILQ_NEXT(m, listq);
1419
1420                 /*
1421                  * We must wait for pending I/O to complete before we can
1422                  * rename the page.
1423                  *
1424                  * We do not have to VM_PROT_NONE the page as mappings should
1425                  * not be changed by this operation.
1426                  */
1427                 if ((m->oflags & VPO_BUSY) || m->busy) {
1428                         vm_page_flag_set(m, PG_REFERENCED);
1429                         vm_page_unlock_queues();
1430                         VM_OBJECT_UNLOCK(new_object);
1431                         m->oflags |= VPO_WANTED;
1432                         msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0);
1433                         VM_OBJECT_LOCK(new_object);
1434                         goto retry;
1435                 }
1436                 vm_page_rename(m, new_object, idx);
1437                 /* page automatically made dirty by rename and cache handled */
1438                 vm_page_busy(m);
1439         }
1440         vm_page_unlock_queues();
1441         if (orig_object->type == OBJT_SWAP) {
1442                 /*
1443                  * swap_pager_copy() can sleep, in which case the orig_object's
1444                  * and new_object's locks are released and reacquired. 
1445                  */
1446                 swap_pager_copy(orig_object, new_object, offidxstart, 0);
1447
1448                 /*
1449                  * Transfer any cached pages from orig_object to new_object.
1450                  */
1451                 if (__predict_false(orig_object->cache != NULL))
1452                         vm_page_cache_transfer(orig_object, offidxstart,
1453                             new_object);
1454         }
1455         VM_OBJECT_UNLOCK(orig_object);
1456         TAILQ_FOREACH(m, &new_object->memq, listq)
1457                 vm_page_wakeup(m);
1458         VM_OBJECT_UNLOCK(new_object);
1459         entry->object.vm_object = new_object;
1460         entry->offset = 0LL;
1461         vm_object_deallocate(orig_object);
1462         VM_OBJECT_LOCK(new_object);
1463 }
1464
1465 #define OBSC_TEST_ALL_SHADOWED  0x0001
1466 #define OBSC_COLLAPSE_NOWAIT    0x0002
1467 #define OBSC_COLLAPSE_WAIT      0x0004
1468
1469 static int
1470 vm_object_backing_scan(vm_object_t object, int op)
1471 {
1472         int r = 1;
1473         vm_page_t p;
1474         vm_object_t backing_object;
1475         vm_pindex_t backing_offset_index;
1476
1477         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1478         VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
1479
1480         backing_object = object->backing_object;
1481         backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1482
1483         /*
1484          * Initial conditions
1485          */
1486         if (op & OBSC_TEST_ALL_SHADOWED) {
1487                 /*
1488                  * We do not want to have to test for the existence of cache
1489                  * or swap pages in the backing object.  XXX but with the
1490                  * new swapper this would be pretty easy to do.
1491                  *
1492                  * XXX what about anonymous MAP_SHARED memory that hasn't
1493                  * been ZFOD faulted yet?  If we do not test for this, the
1494                  * shadow test may succeed! XXX
1495                  */
1496                 if (backing_object->type != OBJT_DEFAULT) {
1497                         return (0);
1498                 }
1499         }
1500         if (op & OBSC_COLLAPSE_WAIT) {
1501                 vm_object_set_flag(backing_object, OBJ_DEAD);
1502         }
1503
1504         /*
1505          * Our scan
1506          */
1507         p = TAILQ_FIRST(&backing_object->memq);
1508         while (p) {
1509                 vm_page_t next = TAILQ_NEXT(p, listq);
1510                 vm_pindex_t new_pindex = p->pindex - backing_offset_index;
1511
1512                 if (op & OBSC_TEST_ALL_SHADOWED) {
1513                         vm_page_t pp;
1514
1515                         /*
1516                          * Ignore pages outside the parent object's range
1517                          * and outside the parent object's mapping of the 
1518                          * backing object.
1519                          *
1520                          * note that we do not busy the backing object's
1521                          * page.
1522                          */
1523                         if (
1524                             p->pindex < backing_offset_index ||
1525                             new_pindex >= object->size
1526                         ) {
1527                                 p = next;
1528                                 continue;
1529                         }
1530
1531                         /*
1532                          * See if the parent has the page or if the parent's
1533                          * object pager has the page.  If the parent has the
1534                          * page but the page is not valid, the parent's
1535                          * object pager must have the page.
1536                          *
1537                          * If this fails, the parent does not completely shadow
1538                          * the object and we might as well give up now.
1539                          */
1540
1541                         pp = vm_page_lookup(object, new_pindex);
1542                         if (
1543                             (pp == NULL || pp->valid == 0) &&
1544                             !vm_pager_has_page(object, new_pindex, NULL, NULL)
1545                         ) {
1546                                 r = 0;
1547                                 break;
1548                         }
1549                 }
1550
1551                 /*
1552                  * Check for busy page
1553                  */
1554                 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1555                         vm_page_t pp;
1556
1557                         if (op & OBSC_COLLAPSE_NOWAIT) {
1558                                 if ((p->oflags & VPO_BUSY) ||
1559                                     !p->valid || 
1560                                     p->busy) {
1561                                         p = next;
1562                                         continue;
1563                                 }
1564                         } else if (op & OBSC_COLLAPSE_WAIT) {
1565                                 if ((p->oflags & VPO_BUSY) || p->busy) {
1566                                         vm_page_lock_queues();
1567                                         vm_page_flag_set(p, PG_REFERENCED);
1568                                         vm_page_unlock_queues();
1569                                         VM_OBJECT_UNLOCK(object);
1570                                         p->oflags |= VPO_WANTED;
1571                                         msleep(p, VM_OBJECT_MTX(backing_object),
1572                                             PDROP | PVM, "vmocol", 0);
1573                                         VM_OBJECT_LOCK(object);
1574                                         VM_OBJECT_LOCK(backing_object);
1575                                         /*
1576                                          * If we slept, anything could have
1577                                          * happened.  Since the object is
1578                                          * marked dead, the backing offset
1579                                          * should not have changed so we
1580                                          * just restart our scan.
1581                                          */
1582                                         p = TAILQ_FIRST(&backing_object->memq);
1583                                         continue;
1584                                 }
1585                         }
1586
1587                         KASSERT(
1588                             p->object == backing_object,
1589                             ("vm_object_backing_scan: object mismatch")
1590                         );
1591
1592                         /*
1593                          * Destroy any associated swap
1594                          */
1595                         if (backing_object->type == OBJT_SWAP) {
1596                                 swap_pager_freespace(
1597                                     backing_object, 
1598                                     p->pindex,
1599                                     1
1600                                 );
1601                         }
1602
1603                         if (
1604                             p->pindex < backing_offset_index ||
1605                             new_pindex >= object->size
1606                         ) {
1607                                 /*
1608                                  * Page is out of the parent object's range, we 
1609                                  * can simply destroy it. 
1610                                  */
1611                                 vm_page_lock_queues();
1612                                 KASSERT(!pmap_page_is_mapped(p),
1613                                     ("freeing mapped page %p", p));
1614                                 if (p->wire_count == 0)
1615                                         vm_page_free(p);
1616                                 else
1617                                         vm_page_remove(p);
1618                                 vm_page_unlock_queues();
1619                                 p = next;
1620                                 continue;
1621                         }
1622
1623                         pp = vm_page_lookup(object, new_pindex);
1624                         if (
1625                             pp != NULL ||
1626                             vm_pager_has_page(object, new_pindex, NULL, NULL)
1627                         ) {
1628                                 /*
1629                                  * page already exists in parent OR swap exists
1630                                  * for this location in the parent.  Destroy 
1631                                  * the original page from the backing object.
1632                                  *
1633                                  * Leave the parent's page alone
1634                                  */
1635                                 vm_page_lock_queues();
1636                                 KASSERT(!pmap_page_is_mapped(p),
1637                                     ("freeing mapped page %p", p));
1638                                 if (p->wire_count == 0)
1639                                         vm_page_free(p);
1640                                 else
1641                                         vm_page_remove(p);
1642                                 vm_page_unlock_queues();
1643                                 p = next;
1644                                 continue;
1645                         }
1646
1647 #if VM_NRESERVLEVEL > 0
1648                         /*
1649                          * Rename the reservation.
1650                          */
1651                         vm_reserv_rename(p, object, backing_object,
1652                             backing_offset_index);
1653 #endif
1654
1655                         /*
1656                          * Page does not exist in parent, rename the
1657                          * page from the backing object to the main object. 
1658                          *
1659                          * If the page was mapped to a process, it can remain 
1660                          * mapped through the rename.
1661                          */
1662                         vm_page_lock_queues();
1663                         vm_page_rename(p, object, new_pindex);
1664                         vm_page_unlock_queues();
1665                         /* page automatically made dirty by rename */
1666                 }
1667                 p = next;
1668         }
1669         return (r);
1670 }
1671
1672
1673 /*
1674  * this version of collapse allows the operation to occur earlier and
1675  * when paging_in_progress is true for an object...  This is not a complete
1676  * operation, but should plug 99.9% of the rest of the leaks.
1677  */
1678 static void
1679 vm_object_qcollapse(vm_object_t object)
1680 {
1681         vm_object_t backing_object = object->backing_object;
1682
1683         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1684         VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
1685
1686         if (backing_object->ref_count != 1)
1687                 return;
1688
1689         vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1690 }
1691
1692 /*
1693  *      vm_object_collapse:
1694  *
1695  *      Collapse an object with the object backing it.
1696  *      Pages in the backing object are moved into the
1697  *      parent, and the backing object is deallocated.
1698  */
1699 void
1700 vm_object_collapse(vm_object_t object)
1701 {
1702         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1703         
1704         while (TRUE) {
1705                 vm_object_t backing_object;
1706
1707                 /*
1708                  * Verify that the conditions are right for collapse:
1709                  *
1710                  * The object exists and the backing object exists.
1711                  */
1712                 if ((backing_object = object->backing_object) == NULL)
1713                         break;
1714
1715                 /*
1716                  * we check the backing object first, because it is most likely
1717                  * not collapsable.
1718                  */
1719                 VM_OBJECT_LOCK(backing_object);
1720                 if (backing_object->handle != NULL ||
1721                     (backing_object->type != OBJT_DEFAULT &&
1722                      backing_object->type != OBJT_SWAP) ||
1723                     (backing_object->flags & OBJ_DEAD) ||
1724                     object->handle != NULL ||
1725                     (object->type != OBJT_DEFAULT &&
1726                      object->type != OBJT_SWAP) ||
1727                     (object->flags & OBJ_DEAD)) {
1728                         VM_OBJECT_UNLOCK(backing_object);
1729                         break;
1730                 }
1731
1732                 if (
1733                     object->paging_in_progress != 0 ||
1734                     backing_object->paging_in_progress != 0
1735                 ) {
1736                         vm_object_qcollapse(object);
1737                         VM_OBJECT_UNLOCK(backing_object);
1738                         break;
1739                 }
1740                 /*
1741                  * We know that we can either collapse the backing object (if
1742                  * the parent is the only reference to it) or (perhaps) have
1743                  * the parent bypass the object if the parent happens to shadow
1744                  * all the resident pages in the entire backing object.
1745                  *
1746                  * This is ignoring pager-backed pages such as swap pages.
1747                  * vm_object_backing_scan fails the shadowing test in this
1748                  * case.
1749                  */
1750                 if (backing_object->ref_count == 1) {
1751                         /*
1752                          * If there is exactly one reference to the backing
1753                          * object, we can collapse it into the parent.  
1754                          */
1755                         vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1756
1757 #if VM_NRESERVLEVEL > 0
1758                         /*
1759                          * Break any reservations from backing_object.
1760                          */
1761                         if (__predict_false(!LIST_EMPTY(&backing_object->rvq)))
1762                                 vm_reserv_break_all(backing_object);
1763 #endif
1764
1765                         /*
1766                          * Move the pager from backing_object to object.
1767                          */
1768                         if (backing_object->type == OBJT_SWAP) {
1769                                 /*
1770                                  * swap_pager_copy() can sleep, in which case
1771                                  * the backing_object's and object's locks are
1772                                  * released and reacquired.
1773                                  */
1774                                 swap_pager_copy(
1775                                     backing_object,
1776                                     object,
1777                                     OFF_TO_IDX(object->backing_object_offset), TRUE);
1778
1779                                 /*
1780                                  * Free any cached pages from backing_object.
1781                                  */
1782                                 if (__predict_false(backing_object->cache != NULL))
1783                                         vm_page_cache_free(backing_object, 0, 0);
1784                         }
1785                         /*
1786                          * Object now shadows whatever backing_object did.
1787                          * Note that the reference to 
1788                          * backing_object->backing_object moves from within 
1789                          * backing_object to within object.
1790                          */
1791                         LIST_REMOVE(object, shadow_list);
1792                         backing_object->shadow_count--;
1793                         backing_object->generation++;
1794                         if (backing_object->backing_object) {
1795                                 VM_OBJECT_LOCK(backing_object->backing_object);
1796                                 LIST_REMOVE(backing_object, shadow_list);
1797                                 LIST_INSERT_HEAD(
1798                                     &backing_object->backing_object->shadow_head,
1799                                     object, shadow_list);
1800                                 /*
1801                                  * The shadow_count has not changed.
1802                                  */
1803                                 backing_object->backing_object->generation++;
1804                                 VM_OBJECT_UNLOCK(backing_object->backing_object);
1805                         }
1806                         object->backing_object = backing_object->backing_object;
1807                         object->backing_object_offset +=
1808                             backing_object->backing_object_offset;
1809
1810                         /*
1811                          * Discard backing_object.
1812                          *
1813                          * Since the backing object has no pages, no pager left,
1814                          * and no object references within it, all that is
1815                          * necessary is to dispose of it.
1816                          */
1817                         KASSERT(backing_object->ref_count == 1, (
1818 "backing_object %p was somehow re-referenced during collapse!",
1819                             backing_object));
1820                         VM_OBJECT_UNLOCK(backing_object);
1821                         vm_object_destroy(backing_object);
1822
1823                         object_collapses++;
1824                 } else {
1825                         vm_object_t new_backing_object;
1826
1827                         /*
1828                          * If we do not entirely shadow the backing object,
1829                          * there is nothing we can do so we give up.
1830                          */
1831                         if (object->resident_page_count != object->size &&
1832                             vm_object_backing_scan(object,
1833                             OBSC_TEST_ALL_SHADOWED) == 0) {
1834                                 VM_OBJECT_UNLOCK(backing_object);
1835                                 break;
1836                         }
1837
1838                         /*
1839                          * Make the parent shadow the next object in the
1840                          * chain.  Deallocating backing_object will not remove
1841                          * it, since its reference count is at least 2.
1842                          */
1843                         LIST_REMOVE(object, shadow_list);
1844                         backing_object->shadow_count--;
1845                         backing_object->generation++;
1846
1847                         new_backing_object = backing_object->backing_object;
1848                         if ((object->backing_object = new_backing_object) != NULL) {
1849                                 VM_OBJECT_LOCK(new_backing_object);
1850                                 LIST_INSERT_HEAD(
1851                                     &new_backing_object->shadow_head,
1852                                     object,
1853                                     shadow_list
1854                                 );
1855                                 new_backing_object->shadow_count++;
1856                                 new_backing_object->generation++;
1857                                 vm_object_reference_locked(new_backing_object);
1858                                 VM_OBJECT_UNLOCK(new_backing_object);
1859                                 object->backing_object_offset +=
1860                                         backing_object->backing_object_offset;
1861                         }
1862
1863                         /*
1864                          * Drop the reference count on backing_object. Since
1865                          * its ref_count was at least 2, it will not vanish.
1866                          */
1867                         backing_object->ref_count--;
1868                         VM_OBJECT_UNLOCK(backing_object);
1869                         object_bypasses++;
1870                 }
1871
1872                 /*
1873                  * Try again with this object's new backing object.
1874                  */
1875         }
1876 }
1877
1878 /*
1879  *      vm_object_page_remove:
1880  *
1881  *      For the given object, either frees or invalidates each of the
1882  *      specified pages.  In general, a page is freed.  However, if a
1883  *      page is wired for any reason other than the existence of a
1884  *      managed, wired mapping, then it may be invalidated but not
1885  *      removed from the object.  Pages are specified by the given
1886  *      range ["start", "end") and Boolean "clean_only".  As a
1887  *      special case, if "end" is zero, then the range extends from
1888  *      "start" to the end of the object.  If "clean_only" is TRUE,
1889  *      then only the non-dirty pages within the specified range are
1890  *      affected.
1891  *
1892  *      In general, this operation should only be performed on objects
1893  *      that contain managed pages.  There are two exceptions.  First,
1894  *      it may be performed on the kernel and kmem objects.  Second,
1895  *      it may be used by msync(..., MS_INVALIDATE) to invalidate
1896  *      device-backed pages.
1897  *
1898  *      The object must be locked.
1899  */
1900 void
1901 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1902     boolean_t clean_only)
1903 {
1904         vm_page_t p, next;
1905         int wirings;
1906
1907         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1908         if (object->resident_page_count == 0)
1909                 goto skipmemq;
1910
1911         /*
1912          * Since physically-backed objects do not use managed pages, we can't
1913          * remove pages from the object (we must instead remove the page
1914          * references, and then destroy the object).
1915          */
1916         KASSERT(object->type != OBJT_PHYS || object == kernel_object ||
1917             object == kmem_object,
1918             ("attempt to remove pages from a physical object"));
1919
1920         vm_object_pip_add(object, 1);
1921 again:
1922         if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
1923                 if (p->pindex < start) {
1924                         p = vm_page_splay(start, object->root);
1925                         if ((object->root = p)->pindex < start)
1926                                 p = TAILQ_NEXT(p, listq);
1927                 }
1928         }
1929         vm_page_lock_queues();
1930         /*
1931          * Assert: the variable p is either (1) the page with the
1932          * least pindex greater than or equal to the parameter pindex
1933          * or (2) NULL.
1934          */
1935         for (;
1936              p != NULL && (p->pindex < end || end == 0);
1937              p = next) {
1938                 next = TAILQ_NEXT(p, listq);
1939
1940                 /*
1941                  * If the page is wired for any reason besides the
1942                  * existence of managed, wired mappings, then it cannot
1943                  * be freed.  For example, fictitious pages, which
1944                  * represent device memory, are inherently wired and
1945                  * cannot be freed.  They can, however, be invalidated
1946                  * if "clean_only" is FALSE.
1947                  */
1948                 if ((wirings = p->wire_count) != 0 &&
1949                     (wirings = pmap_page_wired_mappings(p)) != p->wire_count) {
1950                         /* Fictitious pages do not have managed mappings. */
1951                         if ((p->flags & PG_FICTITIOUS) == 0)
1952                                 pmap_remove_all(p);
1953                         /* Account for removal of managed, wired mappings. */
1954                         p->wire_count -= wirings;
1955                         if (!clean_only) {
1956                                 p->valid = 0;
1957                                 vm_page_undirty(p);
1958                         }
1959                         continue;
1960                 }
1961                 if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
1962                         goto again;
1963                 KASSERT((p->flags & PG_FICTITIOUS) == 0,
1964                     ("vm_object_page_remove: page %p is fictitious", p));
1965                 if (clean_only && p->valid) {
1966                         pmap_remove_write(p);
1967                         if (p->dirty)
1968                                 continue;
1969                 }
1970                 pmap_remove_all(p);
1971                 /* Account for removal of managed, wired mappings. */
1972                 if (wirings != 0)
1973                         p->wire_count -= wirings;
1974                 vm_page_free(p);
1975         }
1976         vm_page_unlock_queues();
1977         vm_object_pip_wakeup(object);
1978 skipmemq:
1979         if (__predict_false(object->cache != NULL))
1980                 vm_page_cache_free(object, start, end);
1981 }
1982
1983 /*
1984  *      Populate the specified range of the object with valid pages.  Returns
1985  *      TRUE if the range is successfully populated and FALSE otherwise.
1986  *
1987  *      Note: This function should be optimized to pass a larger array of
1988  *      pages to vm_pager_get_pages() before it is applied to a non-
1989  *      OBJT_DEVICE object.
1990  *
1991  *      The object must be locked.
1992  */
1993 boolean_t
1994 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1995 {
1996         vm_page_t m, ma[1];
1997         vm_pindex_t pindex;
1998         int rv;
1999
2000         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2001         for (pindex = start; pindex < end; pindex++) {
2002                 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
2003                     VM_ALLOC_RETRY);
2004                 if (m->valid != VM_PAGE_BITS_ALL) {
2005                         ma[0] = m;
2006                         rv = vm_pager_get_pages(object, ma, 1, 0);
2007                         m = vm_page_lookup(object, pindex);
2008                         if (m == NULL)
2009                                 break;
2010                         if (rv != VM_PAGER_OK) {
2011                                 vm_page_lock_queues();
2012                                 vm_page_free(m);
2013                                 vm_page_unlock_queues();
2014                                 break;
2015                         }
2016                 }
2017                 /*
2018                  * Keep "m" busy because a subsequent iteration may unlock
2019                  * the object.
2020                  */
2021         }
2022         if (pindex > start) {
2023                 m = vm_page_lookup(object, start);
2024                 while (m != NULL && m->pindex < pindex) {
2025                         vm_page_wakeup(m);
2026                         m = TAILQ_NEXT(m, listq);
2027                 }
2028         }
2029         return (pindex == end);
2030 }
2031
2032 /*
2033  *      Routine:        vm_object_coalesce
2034  *      Function:       Coalesces two objects backing up adjoining
2035  *                      regions of memory into a single object.
2036  *
2037  *      returns TRUE if objects were combined.
2038  *
2039  *      NOTE:   Only works at the moment if the second object is NULL -
2040  *              if it's not, which object do we lock first?
2041  *
2042  *      Parameters:
2043  *              prev_object     First object to coalesce
2044  *              prev_offset     Offset into prev_object
2045  *              prev_size       Size of reference to prev_object
2046  *              next_size       Size of reference to the second object
2047  *              reserved        Indicator that extension region has
2048  *                              swap accounted for
2049  *
2050  *      Conditions:
2051  *      The object must *not* be locked.
2052  */
2053 boolean_t
2054 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
2055     vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2056 {
2057         vm_pindex_t next_pindex;
2058
2059         if (prev_object == NULL)
2060                 return (TRUE);
2061         VM_OBJECT_LOCK(prev_object);
2062         if (prev_object->type != OBJT_DEFAULT &&
2063             prev_object->type != OBJT_SWAP) {
2064                 VM_OBJECT_UNLOCK(prev_object);
2065                 return (FALSE);
2066         }
2067
2068         /*
2069          * Try to collapse the object first
2070          */
2071         vm_object_collapse(prev_object);
2072
2073         /*
2074          * Can't coalesce if: . more than one reference . paged out . shadows
2075          * another object . has a copy elsewhere (any of which mean that the
2076          * pages not mapped to prev_entry may be in use anyway)
2077          */
2078         if (prev_object->backing_object != NULL) {
2079                 VM_OBJECT_UNLOCK(prev_object);
2080                 return (FALSE);
2081         }
2082
2083         prev_size >>= PAGE_SHIFT;
2084         next_size >>= PAGE_SHIFT;
2085         next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
2086
2087         if ((prev_object->ref_count > 1) &&
2088             (prev_object->size != next_pindex)) {
2089                 VM_OBJECT_UNLOCK(prev_object);
2090                 return (FALSE);
2091         }
2092
2093         /*
2094          * Account for the charge.
2095          */
2096         if (prev_object->uip != NULL) {
2097
2098                 /*
2099                  * If prev_object was charged, then this mapping,
2100                  * althought not charged now, may become writable
2101                  * later. Non-NULL uip in the object would prevent
2102                  * swap reservation during enabling of the write
2103                  * access, so reserve swap now. Failed reservation
2104                  * cause allocation of the separate object for the map
2105                  * entry, and swap reservation for this entry is
2106                  * managed in appropriate time.
2107                  */
2108                 if (!reserved && !swap_reserve_by_uid(ptoa(next_size),
2109                     prev_object->uip)) {
2110                         return (FALSE);
2111                 }
2112                 prev_object->charge += ptoa(next_size);
2113         }
2114
2115         /*
2116          * Remove any pages that may still be in the object from a previous
2117          * deallocation.
2118          */
2119         if (next_pindex < prev_object->size) {
2120                 vm_object_page_remove(prev_object,
2121                                       next_pindex,
2122                                       next_pindex + next_size, FALSE);
2123                 if (prev_object->type == OBJT_SWAP)
2124                         swap_pager_freespace(prev_object,
2125                                              next_pindex, next_size);
2126 #if 0
2127                 if (prev_object->uip != NULL) {
2128                         KASSERT(prev_object->charge >=
2129                             ptoa(prev_object->size - next_pindex),
2130                             ("object %p overcharged 1 %jx %jx", prev_object,
2131                                 (uintmax_t)next_pindex, (uintmax_t)next_size));
2132                         prev_object->charge -= ptoa(prev_object->size -
2133                             next_pindex);
2134                 }
2135 #endif
2136         }
2137
2138         /*
2139          * Extend the object if necessary.
2140          */
2141         if (next_pindex + next_size > prev_object->size)
2142                 prev_object->size = next_pindex + next_size;
2143
2144         VM_OBJECT_UNLOCK(prev_object);
2145         return (TRUE);
2146 }
2147
2148 void
2149 vm_object_set_writeable_dirty(vm_object_t object)
2150 {
2151         struct vnode *vp;
2152
2153         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2154         if ((object->flags & OBJ_MIGHTBEDIRTY) != 0)
2155                 return;
2156         vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
2157         if (object->type == OBJT_VNODE &&
2158             (vp = (struct vnode *)object->handle) != NULL) {
2159                 VI_LOCK(vp);
2160                 vp->v_iflag |= VI_OBJDIRTY;
2161                 VI_UNLOCK(vp);
2162         }
2163 }
2164
2165 #include "opt_ddb.h"
2166 #ifdef DDB
2167 #include <sys/kernel.h>
2168
2169 #include <sys/cons.h>
2170
2171 #include <ddb/ddb.h>
2172
2173 static int
2174 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2175 {
2176         vm_map_t tmpm;
2177         vm_map_entry_t tmpe;
2178         vm_object_t obj;
2179         int entcount;
2180
2181         if (map == 0)
2182                 return 0;
2183
2184         if (entry == 0) {
2185                 tmpe = map->header.next;
2186                 entcount = map->nentries;
2187                 while (entcount-- && (tmpe != &map->header)) {
2188                         if (_vm_object_in_map(map, object, tmpe)) {
2189                                 return 1;
2190                         }
2191                         tmpe = tmpe->next;
2192                 }
2193         } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2194                 tmpm = entry->object.sub_map;
2195                 tmpe = tmpm->header.next;
2196                 entcount = tmpm->nentries;
2197                 while (entcount-- && tmpe != &tmpm->header) {
2198                         if (_vm_object_in_map(tmpm, object, tmpe)) {
2199                                 return 1;
2200                         }
2201                         tmpe = tmpe->next;
2202                 }
2203         } else if ((obj = entry->object.vm_object) != NULL) {
2204                 for (; obj; obj = obj->backing_object)
2205                         if (obj == object) {
2206                                 return 1;
2207                         }
2208         }
2209         return 0;
2210 }
2211
2212 static int
2213 vm_object_in_map(vm_object_t object)
2214 {
2215         struct proc *p;
2216
2217         /* sx_slock(&allproc_lock); */
2218         FOREACH_PROC_IN_SYSTEM(p) {
2219                 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2220                         continue;
2221                 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
2222                         /* sx_sunlock(&allproc_lock); */
2223                         return 1;
2224                 }
2225         }
2226         /* sx_sunlock(&allproc_lock); */
2227         if (_vm_object_in_map(kernel_map, object, 0))
2228                 return 1;
2229         if (_vm_object_in_map(kmem_map, object, 0))
2230                 return 1;
2231         if (_vm_object_in_map(pager_map, object, 0))
2232                 return 1;
2233         if (_vm_object_in_map(buffer_map, object, 0))
2234                 return 1;
2235         return 0;
2236 }
2237
2238 DB_SHOW_COMMAND(vmochk, vm_object_check)
2239 {
2240         vm_object_t object;
2241
2242         /*
2243          * make sure that internal objs are in a map somewhere
2244          * and none have zero ref counts.
2245          */
2246         TAILQ_FOREACH(object, &vm_object_list, object_list) {
2247                 if (object->handle == NULL &&
2248                     (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2249                         if (object->ref_count == 0) {
2250                                 db_printf("vmochk: internal obj has zero ref count: %ld\n",
2251                                         (long)object->size);
2252                         }
2253                         if (!vm_object_in_map(object)) {
2254                                 db_printf(
2255                         "vmochk: internal obj is not in a map: "
2256                         "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2257                                     object->ref_count, (u_long)object->size, 
2258                                     (u_long)object->size,
2259                                     (void *)object->backing_object);
2260                         }
2261                 }
2262         }
2263 }
2264
2265 /*
2266  *      vm_object_print:        [ debug ]
2267  */
2268 DB_SHOW_COMMAND(object, vm_object_print_static)
2269 {
2270         /* XXX convert args. */
2271         vm_object_t object = (vm_object_t)addr;
2272         boolean_t full = have_addr;
2273
2274         vm_page_t p;
2275
2276         /* XXX count is an (unused) arg.  Avoid shadowing it. */
2277 #define count   was_count
2278
2279         int count;
2280
2281         if (object == NULL)
2282                 return;
2283
2284         db_iprintf(
2285             "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x uip %d charge %jx\n",
2286             object, (int)object->type, (uintmax_t)object->size,
2287             object->resident_page_count, object->ref_count, object->flags,
2288             object->uip ? object->uip->ui_uid : -1, (uintmax_t)object->charge);
2289         db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2290             object->shadow_count, 
2291             object->backing_object ? object->backing_object->ref_count : 0,
2292             object->backing_object, (uintmax_t)object->backing_object_offset);
2293
2294         if (!full)
2295                 return;
2296
2297         db_indent += 2;
2298         count = 0;
2299         TAILQ_FOREACH(p, &object->memq, listq) {
2300                 if (count == 0)
2301                         db_iprintf("memory:=");
2302                 else if (count == 6) {
2303                         db_printf("\n");
2304                         db_iprintf(" ...");
2305                         count = 0;
2306                 } else
2307                         db_printf(",");
2308                 count++;
2309
2310                 db_printf("(off=0x%jx,page=0x%jx)",
2311                     (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2312         }
2313         if (count != 0)
2314                 db_printf("\n");
2315         db_indent -= 2;
2316 }
2317
2318 /* XXX. */
2319 #undef count
2320
2321 /* XXX need this non-static entry for calling from vm_map_print. */
2322 void
2323 vm_object_print(
2324         /* db_expr_t */ long addr,
2325         boolean_t have_addr,
2326         /* db_expr_t */ long count,
2327         char *modif)
2328 {
2329         vm_object_print_static(addr, have_addr, count, modif);
2330 }
2331
2332 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2333 {
2334         vm_object_t object;
2335         vm_pindex_t fidx;
2336         vm_paddr_t pa;
2337         vm_page_t m, prev_m;
2338         int rcount, nl, c;
2339
2340         nl = 0;
2341         TAILQ_FOREACH(object, &vm_object_list, object_list) {
2342                 db_printf("new object: %p\n", (void *)object);
2343                 if (nl > 18) {
2344                         c = cngetc();
2345                         if (c != ' ')
2346                                 return;
2347                         nl = 0;
2348                 }
2349                 nl++;
2350                 rcount = 0;
2351                 fidx = 0;
2352                 pa = -1;
2353                 TAILQ_FOREACH(m, &object->memq, listq) {
2354                         if (m->pindex > 128)
2355                                 break;
2356                         if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2357                             prev_m->pindex + 1 != m->pindex) {
2358                                 if (rcount) {
2359                                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2360                                                 (long)fidx, rcount, (long)pa);
2361                                         if (nl > 18) {
2362                                                 c = cngetc();
2363                                                 if (c != ' ')
2364                                                         return;
2365                                                 nl = 0;
2366                                         }
2367                                         nl++;
2368                                         rcount = 0;
2369                                 }
2370                         }                               
2371                         if (rcount &&
2372                                 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2373                                 ++rcount;
2374                                 continue;
2375                         }
2376                         if (rcount) {
2377                                 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2378                                         (long)fidx, rcount, (long)pa);
2379                                 if (nl > 18) {
2380                                         c = cngetc();
2381                                         if (c != ' ')
2382                                                 return;
2383                                         nl = 0;
2384                                 }
2385                                 nl++;
2386                         }
2387                         fidx = m->pindex;
2388                         pa = VM_PAGE_TO_PHYS(m);
2389                         rcount = 1;
2390                 }
2391                 if (rcount) {
2392                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2393                                 (long)fidx, rcount, (long)pa);
2394                         if (nl > 18) {
2395                                 c = cngetc();
2396                                 if (c != ' ')
2397                                         return;
2398                                 nl = 0;
2399                         }
2400                         nl++;
2401                 }
2402         }
2403 }
2404 #endif /* DDB */