]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_object.c
Update ena-com HAL to v1.1.4.3 and update driver accordingly
[FreeBSD/FreeBSD.git] / sys / vm / vm_object.c
1 /*-
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *      from: @(#)vm_object.c   8.5 (Berkeley) 3/22/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60
61 /*
62  *      Virtual memory object module.
63  */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include "opt_vm.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/lock.h>
73 #include <sys/mman.h>
74 #include <sys/mount.h>
75 #include <sys/kernel.h>
76 #include <sys/pctrie.h>
77 #include <sys/sysctl.h>
78 #include <sys/mutex.h>
79 #include <sys/proc.h>           /* for curproc, pageproc */
80 #include <sys/socket.h>
81 #include <sys/resourcevar.h>
82 #include <sys/rwlock.h>
83 #include <sys/user.h>
84 #include <sys/vnode.h>
85 #include <sys/vmmeter.h>
86 #include <sys/sx.h>
87
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_pager.h>
96 #include <vm/swap_pager.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vm_radix.h>
100 #include <vm/vm_reserv.h>
101 #include <vm/uma.h>
102
103 static int old_msync;
104 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
105     "Use old (insecure) msync behavior");
106
107 static int      vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
108                     int pagerflags, int flags, boolean_t *clearobjflags,
109                     boolean_t *eio);
110 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
111                     boolean_t *clearobjflags);
112 static void     vm_object_qcollapse(vm_object_t object);
113 static void     vm_object_vndeallocate(vm_object_t object);
114
115 /*
116  *      Virtual memory objects maintain the actual data
117  *      associated with allocated virtual memory.  A given
118  *      page of memory exists within exactly one object.
119  *
120  *      An object is only deallocated when all "references"
121  *      are given up.  Only one "reference" to a given
122  *      region of an object should be writeable.
123  *
124  *      Associated with each object is a list of all resident
125  *      memory pages belonging to that object; this list is
126  *      maintained by the "vm_page" module, and locked by the object's
127  *      lock.
128  *
129  *      Each object also records a "pager" routine which is
130  *      used to retrieve (and store) pages to the proper backing
131  *      storage.  In addition, objects may be backed by other
132  *      objects from which they were virtual-copied.
133  *
134  *      The only items within the object structure which are
135  *      modified after time of creation are:
136  *              reference count         locked by object's lock
137  *              pager routine           locked by object's lock
138  *
139  */
140
141 struct object_q vm_object_list;
142 struct mtx vm_object_list_mtx;  /* lock for object list and count */
143
144 struct vm_object kernel_object_store;
145 struct vm_object kmem_object_store;
146
147 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0,
148     "VM object stats");
149
150 static long object_collapses;
151 SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
152     &object_collapses, 0, "VM object collapses");
153
154 static long object_bypasses;
155 SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
156     &object_bypasses, 0, "VM object bypasses");
157
158 static uma_zone_t obj_zone;
159
160 static int vm_object_zinit(void *mem, int size, int flags);
161
162 #ifdef INVARIANTS
163 static void vm_object_zdtor(void *mem, int size, void *arg);
164
165 static void
166 vm_object_zdtor(void *mem, int size, void *arg)
167 {
168         vm_object_t object;
169
170         object = (vm_object_t)mem;
171         KASSERT(object->ref_count == 0,
172             ("object %p ref_count = %d", object, object->ref_count));
173         KASSERT(TAILQ_EMPTY(&object->memq),
174             ("object %p has resident pages in its memq", object));
175         KASSERT(vm_radix_is_empty(&object->rtree),
176             ("object %p has resident pages in its trie", object));
177 #if VM_NRESERVLEVEL > 0
178         KASSERT(LIST_EMPTY(&object->rvq),
179             ("object %p has reservations",
180             object));
181 #endif
182         KASSERT(object->paging_in_progress == 0,
183             ("object %p paging_in_progress = %d",
184             object, object->paging_in_progress));
185         KASSERT(object->resident_page_count == 0,
186             ("object %p resident_page_count = %d",
187             object, object->resident_page_count));
188         KASSERT(object->shadow_count == 0,
189             ("object %p shadow_count = %d",
190             object, object->shadow_count));
191         KASSERT(object->type == OBJT_DEAD,
192             ("object %p has non-dead type %d",
193             object, object->type));
194 }
195 #endif
196
197 static int
198 vm_object_zinit(void *mem, int size, int flags)
199 {
200         vm_object_t object;
201
202         object = (vm_object_t)mem;
203         rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW);
204
205         /* These are true for any object that has been freed */
206         object->type = OBJT_DEAD;
207         object->ref_count = 0;
208         vm_radix_init(&object->rtree);
209         object->paging_in_progress = 0;
210         object->resident_page_count = 0;
211         object->shadow_count = 0;
212         object->flags = OBJ_DEAD;
213
214         mtx_lock(&vm_object_list_mtx);
215         TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
216         mtx_unlock(&vm_object_list_mtx);
217         return (0);
218 }
219
220 static void
221 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
222 {
223
224         TAILQ_INIT(&object->memq);
225         LIST_INIT(&object->shadow_head);
226
227         object->type = type;
228         if (type == OBJT_SWAP)
229                 pctrie_init(&object->un_pager.swp.swp_blks);
230
231         /*
232          * Ensure that swap_pager_swapoff() iteration over object_list
233          * sees up to date type and pctrie head if it observed
234          * non-dead object.
235          */
236         atomic_thread_fence_rel();
237
238         switch (type) {
239         case OBJT_DEAD:
240                 panic("_vm_object_allocate: can't create OBJT_DEAD");
241         case OBJT_DEFAULT:
242         case OBJT_SWAP:
243                 object->flags = OBJ_ONEMAPPING;
244                 break;
245         case OBJT_DEVICE:
246         case OBJT_SG:
247                 object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
248                 break;
249         case OBJT_MGTDEVICE:
250                 object->flags = OBJ_FICTITIOUS;
251                 break;
252         case OBJT_PHYS:
253                 object->flags = OBJ_UNMANAGED;
254                 break;
255         case OBJT_VNODE:
256                 object->flags = 0;
257                 break;
258         default:
259                 panic("_vm_object_allocate: type %d is undefined", type);
260         }
261         object->size = size;
262         object->generation = 1;
263         object->ref_count = 1;
264         object->memattr = VM_MEMATTR_DEFAULT;
265         object->cred = NULL;
266         object->charge = 0;
267         object->handle = NULL;
268         object->backing_object = NULL;
269         object->backing_object_offset = (vm_ooffset_t) 0;
270 #if VM_NRESERVLEVEL > 0
271         LIST_INIT(&object->rvq);
272 #endif
273         umtx_shm_object_init(object);
274 }
275
276 /*
277  *      vm_object_init:
278  *
279  *      Initialize the VM objects module.
280  */
281 void
282 vm_object_init(void)
283 {
284         TAILQ_INIT(&vm_object_list);
285         mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
286         
287         rw_init(&kernel_object->lock, "kernel vm object");
288         _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
289             VM_MIN_KERNEL_ADDRESS), kernel_object);
290 #if VM_NRESERVLEVEL > 0
291         kernel_object->flags |= OBJ_COLORED;
292         kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
293 #endif
294
295         rw_init(&kmem_object->lock, "kmem vm object");
296         _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
297             VM_MIN_KERNEL_ADDRESS), kmem_object);
298 #if VM_NRESERVLEVEL > 0
299         kmem_object->flags |= OBJ_COLORED;
300         kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
301 #endif
302
303         /*
304          * The lock portion of struct vm_object must be type stable due
305          * to vm_pageout_fallback_object_lock locking a vm object
306          * without holding any references to it.
307          */
308         obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
309 #ifdef INVARIANTS
310             vm_object_zdtor,
311 #else
312             NULL,
313 #endif
314             vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
315
316         vm_radix_zinit();
317 }
318
319 void
320 vm_object_clear_flag(vm_object_t object, u_short bits)
321 {
322
323         VM_OBJECT_ASSERT_WLOCKED(object);
324         object->flags &= ~bits;
325 }
326
327 /*
328  *      Sets the default memory attribute for the specified object.  Pages
329  *      that are allocated to this object are by default assigned this memory
330  *      attribute.
331  *
332  *      Presently, this function must be called before any pages are allocated
333  *      to the object.  In the future, this requirement may be relaxed for
334  *      "default" and "swap" objects.
335  */
336 int
337 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
338 {
339
340         VM_OBJECT_ASSERT_WLOCKED(object);
341         switch (object->type) {
342         case OBJT_DEFAULT:
343         case OBJT_DEVICE:
344         case OBJT_MGTDEVICE:
345         case OBJT_PHYS:
346         case OBJT_SG:
347         case OBJT_SWAP:
348         case OBJT_VNODE:
349                 if (!TAILQ_EMPTY(&object->memq))
350                         return (KERN_FAILURE);
351                 break;
352         case OBJT_DEAD:
353                 return (KERN_INVALID_ARGUMENT);
354         default:
355                 panic("vm_object_set_memattr: object %p is of undefined type",
356                     object);
357         }
358         object->memattr = memattr;
359         return (KERN_SUCCESS);
360 }
361
362 void
363 vm_object_pip_add(vm_object_t object, short i)
364 {
365
366         VM_OBJECT_ASSERT_WLOCKED(object);
367         object->paging_in_progress += i;
368 }
369
370 void
371 vm_object_pip_subtract(vm_object_t object, short i)
372 {
373
374         VM_OBJECT_ASSERT_WLOCKED(object);
375         object->paging_in_progress -= i;
376 }
377
378 void
379 vm_object_pip_wakeup(vm_object_t object)
380 {
381
382         VM_OBJECT_ASSERT_WLOCKED(object);
383         object->paging_in_progress--;
384         if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
385                 vm_object_clear_flag(object, OBJ_PIPWNT);
386                 wakeup(object);
387         }
388 }
389
390 void
391 vm_object_pip_wakeupn(vm_object_t object, short i)
392 {
393
394         VM_OBJECT_ASSERT_WLOCKED(object);
395         if (i)
396                 object->paging_in_progress -= i;
397         if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
398                 vm_object_clear_flag(object, OBJ_PIPWNT);
399                 wakeup(object);
400         }
401 }
402
403 void
404 vm_object_pip_wait(vm_object_t object, char *waitid)
405 {
406
407         VM_OBJECT_ASSERT_WLOCKED(object);
408         while (object->paging_in_progress) {
409                 object->flags |= OBJ_PIPWNT;
410                 VM_OBJECT_SLEEP(object, object, PVM, waitid, 0);
411         }
412 }
413
414 /*
415  *      vm_object_allocate:
416  *
417  *      Returns a new object with the given size.
418  */
419 vm_object_t
420 vm_object_allocate(objtype_t type, vm_pindex_t size)
421 {
422         vm_object_t object;
423
424         object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
425         _vm_object_allocate(type, size, object);
426         return (object);
427 }
428
429
430 /*
431  *      vm_object_reference:
432  *
433  *      Gets another reference to the given object.  Note: OBJ_DEAD
434  *      objects can be referenced during final cleaning.
435  */
436 void
437 vm_object_reference(vm_object_t object)
438 {
439         if (object == NULL)
440                 return;
441         VM_OBJECT_WLOCK(object);
442         vm_object_reference_locked(object);
443         VM_OBJECT_WUNLOCK(object);
444 }
445
446 /*
447  *      vm_object_reference_locked:
448  *
449  *      Gets another reference to the given object.
450  *
451  *      The object must be locked.
452  */
453 void
454 vm_object_reference_locked(vm_object_t object)
455 {
456         struct vnode *vp;
457
458         VM_OBJECT_ASSERT_WLOCKED(object);
459         object->ref_count++;
460         if (object->type == OBJT_VNODE) {
461                 vp = object->handle;
462                 vref(vp);
463         }
464 }
465
466 /*
467  * Handle deallocating an object of type OBJT_VNODE.
468  */
469 static void
470 vm_object_vndeallocate(vm_object_t object)
471 {
472         struct vnode *vp = (struct vnode *) object->handle;
473
474         VM_OBJECT_ASSERT_WLOCKED(object);
475         KASSERT(object->type == OBJT_VNODE,
476             ("vm_object_vndeallocate: not a vnode object"));
477         KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
478 #ifdef INVARIANTS
479         if (object->ref_count == 0) {
480                 vn_printf(vp, "vm_object_vndeallocate ");
481                 panic("vm_object_vndeallocate: bad object reference count");
482         }
483 #endif
484
485         if (!umtx_shm_vnobj_persistent && object->ref_count == 1)
486                 umtx_shm_object_terminated(object);
487
488         /*
489          * The test for text of vp vnode does not need a bypass to
490          * reach right VV_TEXT there, since it is obtained from
491          * object->handle.
492          */
493         if (object->ref_count > 1 || (vp->v_vflag & VV_TEXT) == 0) {
494                 object->ref_count--;
495                 VM_OBJECT_WUNLOCK(object);
496                 /* vrele may need the vnode lock. */
497                 vrele(vp);
498         } else {
499                 vhold(vp);
500                 VM_OBJECT_WUNLOCK(object);
501                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
502                 vdrop(vp);
503                 VM_OBJECT_WLOCK(object);
504                 object->ref_count--;
505                 if (object->type == OBJT_DEAD) {
506                         VM_OBJECT_WUNLOCK(object);
507                         VOP_UNLOCK(vp, 0);
508                 } else {
509                         if (object->ref_count == 0)
510                                 VOP_UNSET_TEXT(vp);
511                         VM_OBJECT_WUNLOCK(object);
512                         vput(vp);
513                 }
514         }
515 }
516
517 /*
518  *      vm_object_deallocate:
519  *
520  *      Release a reference to the specified object,
521  *      gained either through a vm_object_allocate
522  *      or a vm_object_reference call.  When all references
523  *      are gone, storage associated with this object
524  *      may be relinquished.
525  *
526  *      No object may be locked.
527  */
528 void
529 vm_object_deallocate(vm_object_t object)
530 {
531         vm_object_t temp;
532         struct vnode *vp;
533
534         while (object != NULL) {
535                 VM_OBJECT_WLOCK(object);
536                 if (object->type == OBJT_VNODE) {
537                         vm_object_vndeallocate(object);
538                         return;
539                 }
540
541                 KASSERT(object->ref_count != 0,
542                         ("vm_object_deallocate: object deallocated too many times: %d", object->type));
543
544                 /*
545                  * If the reference count goes to 0 we start calling
546                  * vm_object_terminate() on the object chain.
547                  * A ref count of 1 may be a special case depending on the
548                  * shadow count being 0 or 1.
549                  */
550                 object->ref_count--;
551                 if (object->ref_count > 1) {
552                         VM_OBJECT_WUNLOCK(object);
553                         return;
554                 } else if (object->ref_count == 1) {
555                         if (object->type == OBJT_SWAP &&
556                             (object->flags & OBJ_TMPFS) != 0) {
557                                 vp = object->un_pager.swp.swp_tmpfs;
558                                 vhold(vp);
559                                 VM_OBJECT_WUNLOCK(object);
560                                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
561                                 VM_OBJECT_WLOCK(object);
562                                 if (object->type == OBJT_DEAD ||
563                                     object->ref_count != 1) {
564                                         VM_OBJECT_WUNLOCK(object);
565                                         VOP_UNLOCK(vp, 0);
566                                         vdrop(vp);
567                                         return;
568                                 }
569                                 if ((object->flags & OBJ_TMPFS) != 0)
570                                         VOP_UNSET_TEXT(vp);
571                                 VOP_UNLOCK(vp, 0);
572                                 vdrop(vp);
573                         }
574                         if (object->shadow_count == 0 &&
575                             object->handle == NULL &&
576                             (object->type == OBJT_DEFAULT ||
577                             (object->type == OBJT_SWAP &&
578                             (object->flags & OBJ_TMPFS_NODE) == 0))) {
579                                 vm_object_set_flag(object, OBJ_ONEMAPPING);
580                         } else if ((object->shadow_count == 1) &&
581                             (object->handle == NULL) &&
582                             (object->type == OBJT_DEFAULT ||
583                              object->type == OBJT_SWAP)) {
584                                 vm_object_t robject;
585
586                                 robject = LIST_FIRST(&object->shadow_head);
587                                 KASSERT(robject != NULL,
588                                     ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
589                                          object->ref_count,
590                                          object->shadow_count));
591                                 KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0,
592                                     ("shadowed tmpfs v_object %p", object));
593                                 if (!VM_OBJECT_TRYWLOCK(robject)) {
594                                         /*
595                                          * Avoid a potential deadlock.
596                                          */
597                                         object->ref_count++;
598                                         VM_OBJECT_WUNLOCK(object);
599                                         /*
600                                          * More likely than not the thread
601                                          * holding robject's lock has lower
602                                          * priority than the current thread.
603                                          * Let the lower priority thread run.
604                                          */
605                                         pause("vmo_de", 1);
606                                         continue;
607                                 }
608                                 /*
609                                  * Collapse object into its shadow unless its
610                                  * shadow is dead.  In that case, object will
611                                  * be deallocated by the thread that is
612                                  * deallocating its shadow.
613                                  */
614                                 if ((robject->flags & OBJ_DEAD) == 0 &&
615                                     (robject->handle == NULL) &&
616                                     (robject->type == OBJT_DEFAULT ||
617                                      robject->type == OBJT_SWAP)) {
618
619                                         robject->ref_count++;
620 retry:
621                                         if (robject->paging_in_progress) {
622                                                 VM_OBJECT_WUNLOCK(object);
623                                                 vm_object_pip_wait(robject,
624                                                     "objde1");
625                                                 temp = robject->backing_object;
626                                                 if (object == temp) {
627                                                         VM_OBJECT_WLOCK(object);
628                                                         goto retry;
629                                                 }
630                                         } else if (object->paging_in_progress) {
631                                                 VM_OBJECT_WUNLOCK(robject);
632                                                 object->flags |= OBJ_PIPWNT;
633                                                 VM_OBJECT_SLEEP(object, object,
634                                                     PDROP | PVM, "objde2", 0);
635                                                 VM_OBJECT_WLOCK(robject);
636                                                 temp = robject->backing_object;
637                                                 if (object == temp) {
638                                                         VM_OBJECT_WLOCK(object);
639                                                         goto retry;
640                                                 }
641                                         } else
642                                                 VM_OBJECT_WUNLOCK(object);
643
644                                         if (robject->ref_count == 1) {
645                                                 robject->ref_count--;
646                                                 object = robject;
647                                                 goto doterm;
648                                         }
649                                         object = robject;
650                                         vm_object_collapse(object);
651                                         VM_OBJECT_WUNLOCK(object);
652                                         continue;
653                                 }
654                                 VM_OBJECT_WUNLOCK(robject);
655                         }
656                         VM_OBJECT_WUNLOCK(object);
657                         return;
658                 }
659 doterm:
660                 umtx_shm_object_terminated(object);
661                 temp = object->backing_object;
662                 if (temp != NULL) {
663                         KASSERT((object->flags & OBJ_TMPFS_NODE) == 0,
664                             ("shadowed tmpfs v_object 2 %p", object));
665                         VM_OBJECT_WLOCK(temp);
666                         LIST_REMOVE(object, shadow_list);
667                         temp->shadow_count--;
668                         VM_OBJECT_WUNLOCK(temp);
669                         object->backing_object = NULL;
670                 }
671                 /*
672                  * Don't double-terminate, we could be in a termination
673                  * recursion due to the terminate having to sync data
674                  * to disk.
675                  */
676                 if ((object->flags & OBJ_DEAD) == 0)
677                         vm_object_terminate(object);
678                 else
679                         VM_OBJECT_WUNLOCK(object);
680                 object = temp;
681         }
682 }
683
684 /*
685  *      vm_object_destroy removes the object from the global object list
686  *      and frees the space for the object.
687  */
688 void
689 vm_object_destroy(vm_object_t object)
690 {
691
692         /*
693          * Release the allocation charge.
694          */
695         if (object->cred != NULL) {
696                 swap_release_by_cred(object->charge, object->cred);
697                 object->charge = 0;
698                 crfree(object->cred);
699                 object->cred = NULL;
700         }
701
702         /*
703          * Free the space for the object.
704          */
705         uma_zfree(obj_zone, object);
706 }
707
708 /*
709  *      vm_object_terminate_pages removes any remaining pageable pages
710  *      from the object and resets the object to an empty state.
711  */
712 static void
713 vm_object_terminate_pages(vm_object_t object)
714 {
715         vm_page_t p, p_next;
716         struct mtx *mtx, *mtx1;
717         struct vm_pagequeue *pq, *pq1;
718         int dequeued;
719
720         VM_OBJECT_ASSERT_WLOCKED(object);
721
722         mtx = NULL;
723         pq = NULL;
724
725         /*
726          * Free any remaining pageable pages.  This also removes them from the
727          * paging queues.  However, don't free wired pages, just remove them
728          * from the object.  Rather than incrementally removing each page from
729          * the object, the page and object are reset to any empty state. 
730          */
731         TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) {
732                 vm_page_assert_unbusied(p);
733                 if ((object->flags & OBJ_UNMANAGED) == 0) {
734                         /*
735                          * vm_page_free_prep() only needs the page
736                          * lock for managed pages.
737                          */
738                         mtx1 = vm_page_lockptr(p);
739                         if (mtx1 != mtx) {
740                                 if (mtx != NULL)
741                                         mtx_unlock(mtx);
742                                 if (pq != NULL) {
743                                         vm_pagequeue_cnt_add(pq, dequeued);
744                                         vm_pagequeue_unlock(pq);
745                                         pq = NULL;
746                                 }
747                                 mtx = mtx1;
748                                 mtx_lock(mtx);
749                         }
750                 }
751                 p->object = NULL;
752                 if (p->wire_count != 0)
753                         goto unlist;
754                 VM_CNT_INC(v_pfree);
755                 p->flags &= ~PG_ZERO;
756                 if (p->queue != PQ_NONE) {
757                         KASSERT(p->queue < PQ_COUNT, ("vm_object_terminate: "
758                             "page %p is not queued", p));
759                         pq1 = vm_page_pagequeue(p);
760                         if (pq != pq1) {
761                                 if (pq != NULL) {
762                                         vm_pagequeue_cnt_add(pq, dequeued);
763                                         vm_pagequeue_unlock(pq);
764                                 }
765                                 pq = pq1;
766                                 vm_pagequeue_lock(pq);
767                                 dequeued = 0;
768                         }
769                         p->queue = PQ_NONE;
770                         TAILQ_REMOVE(&pq->pq_pl, p, plinks.q);
771                         dequeued--;
772                 }
773                 if (vm_page_free_prep(p, true))
774                         continue;
775 unlist:
776                 TAILQ_REMOVE(&object->memq, p, listq);
777         }
778         if (pq != NULL) {
779                 vm_pagequeue_cnt_add(pq, dequeued);
780                 vm_pagequeue_unlock(pq);
781         }
782         if (mtx != NULL)
783                 mtx_unlock(mtx);
784
785         vm_page_free_phys_pglist(&object->memq);
786
787         /*
788          * If the object contained any pages, then reset it to an empty state.
789          * None of the object's fields, including "resident_page_count", were
790          * modified by the preceding loop.
791          */
792         if (object->resident_page_count != 0) {
793                 vm_radix_reclaim_allnodes(&object->rtree);
794                 TAILQ_INIT(&object->memq);
795                 object->resident_page_count = 0;
796                 if (object->type == OBJT_VNODE)
797                         vdrop(object->handle);
798         }
799 }
800
801 /*
802  *      vm_object_terminate actually destroys the specified object, freeing
803  *      up all previously used resources.
804  *
805  *      The object must be locked.
806  *      This routine may block.
807  */
808 void
809 vm_object_terminate(vm_object_t object)
810 {
811
812         VM_OBJECT_ASSERT_WLOCKED(object);
813
814         /*
815          * Make sure no one uses us.
816          */
817         vm_object_set_flag(object, OBJ_DEAD);
818
819         /*
820          * wait for the pageout daemon to be done with the object
821          */
822         vm_object_pip_wait(object, "objtrm");
823
824         KASSERT(!object->paging_in_progress,
825                 ("vm_object_terminate: pageout in progress"));
826
827         /*
828          * Clean and free the pages, as appropriate. All references to the
829          * object are gone, so we don't need to lock it.
830          */
831         if (object->type == OBJT_VNODE) {
832                 struct vnode *vp = (struct vnode *)object->handle;
833
834                 /*
835                  * Clean pages and flush buffers.
836                  */
837                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
838                 VM_OBJECT_WUNLOCK(object);
839
840                 vinvalbuf(vp, V_SAVE, 0, 0);
841
842                 BO_LOCK(&vp->v_bufobj);
843                 vp->v_bufobj.bo_flag |= BO_DEAD;
844                 BO_UNLOCK(&vp->v_bufobj);
845
846                 VM_OBJECT_WLOCK(object);
847         }
848
849         KASSERT(object->ref_count == 0, 
850                 ("vm_object_terminate: object with references, ref_count=%d",
851                 object->ref_count));
852
853         if ((object->flags & OBJ_PG_DTOR) == 0)
854                 vm_object_terminate_pages(object);
855
856 #if VM_NRESERVLEVEL > 0
857         if (__predict_false(!LIST_EMPTY(&object->rvq)))
858                 vm_reserv_break_all(object);
859 #endif
860
861         KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT ||
862             object->type == OBJT_SWAP,
863             ("%s: non-swap obj %p has cred", __func__, object));
864
865         /*
866          * Let the pager know object is dead.
867          */
868         vm_pager_deallocate(object);
869         VM_OBJECT_WUNLOCK(object);
870
871         vm_object_destroy(object);
872 }
873
874 /*
875  * Make the page read-only so that we can clear the object flags.  However, if
876  * this is a nosync mmap then the object is likely to stay dirty so do not
877  * mess with the page and do not clear the object flags.  Returns TRUE if the
878  * page should be flushed, and FALSE otherwise.
879  */
880 static boolean_t
881 vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags)
882 {
883
884         /*
885          * If we have been asked to skip nosync pages and this is a
886          * nosync page, skip it.  Note that the object flags were not
887          * cleared in this case so we do not have to set them.
888          */
889         if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) {
890                 *clearobjflags = FALSE;
891                 return (FALSE);
892         } else {
893                 pmap_remove_write(p);
894                 return (p->dirty != 0);
895         }
896 }
897
898 /*
899  *      vm_object_page_clean
900  *
901  *      Clean all dirty pages in the specified range of object.  Leaves page 
902  *      on whatever queue it is currently on.   If NOSYNC is set then do not
903  *      write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC),
904  *      leaving the object dirty.
905  *
906  *      When stuffing pages asynchronously, allow clustering.  XXX we need a
907  *      synchronous clustering mode implementation.
908  *
909  *      Odd semantics: if start == end, we clean everything.
910  *
911  *      The object must be locked.
912  *
913  *      Returns FALSE if some page from the range was not written, as
914  *      reported by the pager, and TRUE otherwise.
915  */
916 boolean_t
917 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
918     int flags)
919 {
920         vm_page_t np, p;
921         vm_pindex_t pi, tend, tstart;
922         int curgeneration, n, pagerflags;
923         boolean_t clearobjflags, eio, res;
924
925         VM_OBJECT_ASSERT_WLOCKED(object);
926
927         /*
928          * The OBJ_MIGHTBEDIRTY flag is only set for OBJT_VNODE
929          * objects.  The check below prevents the function from
930          * operating on non-vnode objects.
931          */
932         if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
933             object->resident_page_count == 0)
934                 return (TRUE);
935
936         pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ?
937             VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
938         pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0;
939
940         tstart = OFF_TO_IDX(start);
941         tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
942         clearobjflags = tstart == 0 && tend >= object->size;
943         res = TRUE;
944
945 rescan:
946         curgeneration = object->generation;
947
948         for (p = vm_page_find_least(object, tstart); p != NULL; p = np) {
949                 pi = p->pindex;
950                 if (pi >= tend)
951                         break;
952                 np = TAILQ_NEXT(p, listq);
953                 if (p->valid == 0)
954                         continue;
955                 if (vm_page_sleep_if_busy(p, "vpcwai")) {
956                         if (object->generation != curgeneration) {
957                                 if ((flags & OBJPC_SYNC) != 0)
958                                         goto rescan;
959                                 else
960                                         clearobjflags = FALSE;
961                         }
962                         np = vm_page_find_least(object, pi);
963                         continue;
964                 }
965                 if (!vm_object_page_remove_write(p, flags, &clearobjflags))
966                         continue;
967
968                 n = vm_object_page_collect_flush(object, p, pagerflags,
969                     flags, &clearobjflags, &eio);
970                 if (eio) {
971                         res = FALSE;
972                         clearobjflags = FALSE;
973                 }
974                 if (object->generation != curgeneration) {
975                         if ((flags & OBJPC_SYNC) != 0)
976                                 goto rescan;
977                         else
978                                 clearobjflags = FALSE;
979                 }
980
981                 /*
982                  * If the VOP_PUTPAGES() did a truncated write, so
983                  * that even the first page of the run is not fully
984                  * written, vm_pageout_flush() returns 0 as the run
985                  * length.  Since the condition that caused truncated
986                  * write may be permanent, e.g. exhausted free space,
987                  * accepting n == 0 would cause an infinite loop.
988                  *
989                  * Forwarding the iterator leaves the unwritten page
990                  * behind, but there is not much we can do there if
991                  * filesystem refuses to write it.
992                  */
993                 if (n == 0) {
994                         n = 1;
995                         clearobjflags = FALSE;
996                 }
997                 np = vm_page_find_least(object, pi + n);
998         }
999 #if 0
1000         VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0);
1001 #endif
1002
1003         if (clearobjflags)
1004                 vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY);
1005         return (res);
1006 }
1007
1008 static int
1009 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
1010     int flags, boolean_t *clearobjflags, boolean_t *eio)
1011 {
1012         vm_page_t ma[vm_pageout_page_count], p_first, tp;
1013         int count, i, mreq, runlen;
1014
1015         vm_page_lock_assert(p, MA_NOTOWNED);
1016         VM_OBJECT_ASSERT_WLOCKED(object);
1017
1018         count = 1;
1019         mreq = 0;
1020
1021         for (tp = p; count < vm_pageout_page_count; count++) {
1022                 tp = vm_page_next(tp);
1023                 if (tp == NULL || vm_page_busied(tp))
1024                         break;
1025                 if (!vm_object_page_remove_write(tp, flags, clearobjflags))
1026                         break;
1027         }
1028
1029         for (p_first = p; count < vm_pageout_page_count; count++) {
1030                 tp = vm_page_prev(p_first);
1031                 if (tp == NULL || vm_page_busied(tp))
1032                         break;
1033                 if (!vm_object_page_remove_write(tp, flags, clearobjflags))
1034                         break;
1035                 p_first = tp;
1036                 mreq++;
1037         }
1038
1039         for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++)
1040                 ma[i] = tp;
1041
1042         vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio);
1043         return (runlen);
1044 }
1045
1046 /*
1047  * Note that there is absolutely no sense in writing out
1048  * anonymous objects, so we track down the vnode object
1049  * to write out.
1050  * We invalidate (remove) all pages from the address space
1051  * for semantic correctness.
1052  *
1053  * If the backing object is a device object with unmanaged pages, then any
1054  * mappings to the specified range of pages must be removed before this
1055  * function is called.
1056  *
1057  * Note: certain anonymous maps, such as MAP_NOSYNC maps,
1058  * may start out with a NULL object.
1059  */
1060 boolean_t
1061 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
1062     boolean_t syncio, boolean_t invalidate)
1063 {
1064         vm_object_t backing_object;
1065         struct vnode *vp;
1066         struct mount *mp;
1067         int error, flags, fsync_after;
1068         boolean_t res;
1069
1070         if (object == NULL)
1071                 return (TRUE);
1072         res = TRUE;
1073         error = 0;
1074         VM_OBJECT_WLOCK(object);
1075         while ((backing_object = object->backing_object) != NULL) {
1076                 VM_OBJECT_WLOCK(backing_object);
1077                 offset += object->backing_object_offset;
1078                 VM_OBJECT_WUNLOCK(object);
1079                 object = backing_object;
1080                 if (object->size < OFF_TO_IDX(offset + size))
1081                         size = IDX_TO_OFF(object->size) - offset;
1082         }
1083         /*
1084          * Flush pages if writing is allowed, invalidate them
1085          * if invalidation requested.  Pages undergoing I/O
1086          * will be ignored by vm_object_page_remove().
1087          *
1088          * We cannot lock the vnode and then wait for paging
1089          * to complete without deadlocking against vm_fault.
1090          * Instead we simply call vm_object_page_remove() and
1091          * allow it to block internally on a page-by-page
1092          * basis when it encounters pages undergoing async
1093          * I/O.
1094          */
1095         if (object->type == OBJT_VNODE &&
1096             (object->flags & OBJ_MIGHTBEDIRTY) != 0 &&
1097             ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) {
1098                 VM_OBJECT_WUNLOCK(object);
1099                 (void) vn_start_write(vp, &mp, V_WAIT);
1100                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1101                 if (syncio && !invalidate && offset == 0 &&
1102                     atop(size) == object->size) {
1103                         /*
1104                          * If syncing the whole mapping of the file,
1105                          * it is faster to schedule all the writes in
1106                          * async mode, also allowing the clustering,
1107                          * and then wait for i/o to complete.
1108                          */
1109                         flags = 0;
1110                         fsync_after = TRUE;
1111                 } else {
1112                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1113                         flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
1114                         fsync_after = FALSE;
1115                 }
1116                 VM_OBJECT_WLOCK(object);
1117                 res = vm_object_page_clean(object, offset, offset + size,
1118                     flags);
1119                 VM_OBJECT_WUNLOCK(object);
1120                 if (fsync_after)
1121                         error = VOP_FSYNC(vp, MNT_WAIT, curthread);
1122                 VOP_UNLOCK(vp, 0);
1123                 vn_finished_write(mp);
1124                 if (error != 0)
1125                         res = FALSE;
1126                 VM_OBJECT_WLOCK(object);
1127         }
1128         if ((object->type == OBJT_VNODE ||
1129              object->type == OBJT_DEVICE) && invalidate) {
1130                 if (object->type == OBJT_DEVICE)
1131                         /*
1132                          * The option OBJPR_NOTMAPPED must be passed here
1133                          * because vm_object_page_remove() cannot remove
1134                          * unmanaged mappings.
1135                          */
1136                         flags = OBJPR_NOTMAPPED;
1137                 else if (old_msync)
1138                         flags = 0;
1139                 else
1140                         flags = OBJPR_CLEANONLY;
1141                 vm_object_page_remove(object, OFF_TO_IDX(offset),
1142                     OFF_TO_IDX(offset + size + PAGE_MASK), flags);
1143         }
1144         VM_OBJECT_WUNLOCK(object);
1145         return (res);
1146 }
1147
1148 /*
1149  * Determine whether the given advice can be applied to the object.  Advice is
1150  * not applied to unmanaged pages since they never belong to page queues, and
1151  * since MADV_FREE is destructive, it can apply only to anonymous pages that
1152  * have been mapped at most once.
1153  */
1154 static bool
1155 vm_object_advice_applies(vm_object_t object, int advice)
1156 {
1157
1158         if ((object->flags & OBJ_UNMANAGED) != 0)
1159                 return (false);
1160         if (advice != MADV_FREE)
1161                 return (true);
1162         return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) &&
1163             (object->flags & OBJ_ONEMAPPING) != 0);
1164 }
1165
1166 static void
1167 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex,
1168     vm_size_t size)
1169 {
1170
1171         if (advice == MADV_FREE && object->type == OBJT_SWAP)
1172                 swap_pager_freespace(object, pindex, size);
1173 }
1174
1175 /*
1176  *      vm_object_madvise:
1177  *
1178  *      Implements the madvise function at the object/page level.
1179  *
1180  *      MADV_WILLNEED   (any object)
1181  *
1182  *          Activate the specified pages if they are resident.
1183  *
1184  *      MADV_DONTNEED   (any object)
1185  *
1186  *          Deactivate the specified pages if they are resident.
1187  *
1188  *      MADV_FREE       (OBJT_DEFAULT/OBJT_SWAP objects,
1189  *                       OBJ_ONEMAPPING only)
1190  *
1191  *          Deactivate and clean the specified pages if they are
1192  *          resident.  This permits the process to reuse the pages
1193  *          without faulting or the kernel to reclaim the pages
1194  *          without I/O.
1195  */
1196 void
1197 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
1198     int advice)
1199 {
1200         vm_pindex_t tpindex;
1201         vm_object_t backing_object, tobject;
1202         vm_page_t m, tm;
1203
1204         if (object == NULL)
1205                 return;
1206
1207 relookup:
1208         VM_OBJECT_WLOCK(object);
1209         if (!vm_object_advice_applies(object, advice)) {
1210                 VM_OBJECT_WUNLOCK(object);
1211                 return;
1212         }
1213         for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) {
1214                 tobject = object;
1215
1216                 /*
1217                  * If the next page isn't resident in the top-level object, we
1218                  * need to search the shadow chain.  When applying MADV_FREE, we
1219                  * take care to release any swap space used to store
1220                  * non-resident pages.
1221                  */
1222                 if (m == NULL || pindex < m->pindex) {
1223                         /*
1224                          * Optimize a common case: if the top-level object has
1225                          * no backing object, we can skip over the non-resident
1226                          * range in constant time.
1227                          */
1228                         if (object->backing_object == NULL) {
1229                                 tpindex = (m != NULL && m->pindex < end) ?
1230                                     m->pindex : end;
1231                                 vm_object_madvise_freespace(object, advice,
1232                                     pindex, tpindex - pindex);
1233                                 if ((pindex = tpindex) == end)
1234                                         break;
1235                                 goto next_page;
1236                         }
1237
1238                         tpindex = pindex;
1239                         do {
1240                                 vm_object_madvise_freespace(tobject, advice,
1241                                     tpindex, 1);
1242                                 /*
1243                                  * Prepare to search the next object in the
1244                                  * chain.
1245                                  */
1246                                 backing_object = tobject->backing_object;
1247                                 if (backing_object == NULL)
1248                                         goto next_pindex;
1249                                 VM_OBJECT_WLOCK(backing_object);
1250                                 tpindex +=
1251                                     OFF_TO_IDX(tobject->backing_object_offset);
1252                                 if (tobject != object)
1253                                         VM_OBJECT_WUNLOCK(tobject);
1254                                 tobject = backing_object;
1255                                 if (!vm_object_advice_applies(tobject, advice))
1256                                         goto next_pindex;
1257                         } while ((tm = vm_page_lookup(tobject, tpindex)) ==
1258                             NULL);
1259                 } else {
1260 next_page:
1261                         tm = m;
1262                         m = TAILQ_NEXT(m, listq);
1263                 }
1264
1265                 /*
1266                  * If the page is not in a normal state, skip it.
1267                  */
1268                 if (tm->valid != VM_PAGE_BITS_ALL)
1269                         goto next_pindex;
1270                 vm_page_lock(tm);
1271                 if (tm->hold_count != 0 || tm->wire_count != 0) {
1272                         vm_page_unlock(tm);
1273                         goto next_pindex;
1274                 }
1275                 KASSERT((tm->flags & PG_FICTITIOUS) == 0,
1276                     ("vm_object_madvise: page %p is fictitious", tm));
1277                 KASSERT((tm->oflags & VPO_UNMANAGED) == 0,
1278                     ("vm_object_madvise: page %p is not managed", tm));
1279                 if (vm_page_busied(tm)) {
1280                         if (object != tobject)
1281                                 VM_OBJECT_WUNLOCK(tobject);
1282                         VM_OBJECT_WUNLOCK(object);
1283                         if (advice == MADV_WILLNEED) {
1284                                 /*
1285                                  * Reference the page before unlocking and
1286                                  * sleeping so that the page daemon is less
1287                                  * likely to reclaim it.
1288                                  */
1289                                 vm_page_aflag_set(tm, PGA_REFERENCED);
1290                         }
1291                         vm_page_busy_sleep(tm, "madvpo", false);
1292                         goto relookup;
1293                 }
1294                 vm_page_advise(tm, advice);
1295                 vm_page_unlock(tm);
1296                 vm_object_madvise_freespace(tobject, advice, tm->pindex, 1);
1297 next_pindex:
1298                 if (tobject != object)
1299                         VM_OBJECT_WUNLOCK(tobject);
1300         }
1301         VM_OBJECT_WUNLOCK(object);
1302 }
1303
1304 /*
1305  *      vm_object_shadow:
1306  *
1307  *      Create a new object which is backed by the
1308  *      specified existing object range.  The source
1309  *      object reference is deallocated.
1310  *
1311  *      The new object and offset into that object
1312  *      are returned in the source parameters.
1313  */
1314 void
1315 vm_object_shadow(
1316         vm_object_t *object,    /* IN/OUT */
1317         vm_ooffset_t *offset,   /* IN/OUT */
1318         vm_size_t length)
1319 {
1320         vm_object_t source;
1321         vm_object_t result;
1322
1323         source = *object;
1324
1325         /*
1326          * Don't create the new object if the old object isn't shared.
1327          */
1328         if (source != NULL) {
1329                 VM_OBJECT_WLOCK(source);
1330                 if (source->ref_count == 1 &&
1331                     source->handle == NULL &&
1332                     (source->type == OBJT_DEFAULT ||
1333                      source->type == OBJT_SWAP)) {
1334                         VM_OBJECT_WUNLOCK(source);
1335                         return;
1336                 }
1337                 VM_OBJECT_WUNLOCK(source);
1338         }
1339
1340         /*
1341          * Allocate a new object with the given length.
1342          */
1343         result = vm_object_allocate(OBJT_DEFAULT, atop(length));
1344
1345         /*
1346          * The new object shadows the source object, adding a reference to it.
1347          * Our caller changes his reference to point to the new object,
1348          * removing a reference to the source object.  Net result: no change
1349          * of reference count.
1350          *
1351          * Try to optimize the result object's page color when shadowing
1352          * in order to maintain page coloring consistency in the combined 
1353          * shadowed object.
1354          */
1355         result->backing_object = source;
1356         /*
1357          * Store the offset into the source object, and fix up the offset into
1358          * the new object.
1359          */
1360         result->backing_object_offset = *offset;
1361         if (source != NULL) {
1362                 VM_OBJECT_WLOCK(source);
1363                 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1364                 source->shadow_count++;
1365 #if VM_NRESERVLEVEL > 0
1366                 result->flags |= source->flags & OBJ_COLORED;
1367                 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
1368                     ((1 << (VM_NFREEORDER - 1)) - 1);
1369 #endif
1370                 VM_OBJECT_WUNLOCK(source);
1371         }
1372
1373
1374         /*
1375          * Return the new things
1376          */
1377         *offset = 0;
1378         *object = result;
1379 }
1380
1381 /*
1382  *      vm_object_split:
1383  *
1384  * Split the pages in a map entry into a new object.  This affords
1385  * easier removal of unused pages, and keeps object inheritance from
1386  * being a negative impact on memory usage.
1387  */
1388 void
1389 vm_object_split(vm_map_entry_t entry)
1390 {
1391         vm_page_t m, m_next;
1392         vm_object_t orig_object, new_object, source;
1393         vm_pindex_t idx, offidxstart;
1394         vm_size_t size;
1395
1396         orig_object = entry->object.vm_object;
1397         if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1398                 return;
1399         if (orig_object->ref_count <= 1)
1400                 return;
1401         VM_OBJECT_WUNLOCK(orig_object);
1402
1403         offidxstart = OFF_TO_IDX(entry->offset);
1404         size = atop(entry->end - entry->start);
1405
1406         /*
1407          * If swap_pager_copy() is later called, it will convert new_object
1408          * into a swap object.
1409          */
1410         new_object = vm_object_allocate(OBJT_DEFAULT, size);
1411
1412         /*
1413          * At this point, the new object is still private, so the order in
1414          * which the original and new objects are locked does not matter.
1415          */
1416         VM_OBJECT_WLOCK(new_object);
1417         VM_OBJECT_WLOCK(orig_object);
1418         source = orig_object->backing_object;
1419         if (source != NULL) {
1420                 VM_OBJECT_WLOCK(source);
1421                 if ((source->flags & OBJ_DEAD) != 0) {
1422                         VM_OBJECT_WUNLOCK(source);
1423                         VM_OBJECT_WUNLOCK(orig_object);
1424                         VM_OBJECT_WUNLOCK(new_object);
1425                         vm_object_deallocate(new_object);
1426                         VM_OBJECT_WLOCK(orig_object);
1427                         return;
1428                 }
1429                 LIST_INSERT_HEAD(&source->shadow_head,
1430                                   new_object, shadow_list);
1431                 source->shadow_count++;
1432                 vm_object_reference_locked(source);     /* for new_object */
1433                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
1434                 VM_OBJECT_WUNLOCK(source);
1435                 new_object->backing_object_offset = 
1436                         orig_object->backing_object_offset + entry->offset;
1437                 new_object->backing_object = source;
1438         }
1439         if (orig_object->cred != NULL) {
1440                 new_object->cred = orig_object->cred;
1441                 crhold(orig_object->cred);
1442                 new_object->charge = ptoa(size);
1443                 KASSERT(orig_object->charge >= ptoa(size),
1444                     ("orig_object->charge < 0"));
1445                 orig_object->charge -= ptoa(size);
1446         }
1447 retry:
1448         m = vm_page_find_least(orig_object, offidxstart);
1449         for (; m != NULL && (idx = m->pindex - offidxstart) < size;
1450             m = m_next) {
1451                 m_next = TAILQ_NEXT(m, listq);
1452
1453                 /*
1454                  * We must wait for pending I/O to complete before we can
1455                  * rename the page.
1456                  *
1457                  * We do not have to VM_PROT_NONE the page as mappings should
1458                  * not be changed by this operation.
1459                  */
1460                 if (vm_page_busied(m)) {
1461                         VM_OBJECT_WUNLOCK(new_object);
1462                         vm_page_lock(m);
1463                         VM_OBJECT_WUNLOCK(orig_object);
1464                         vm_page_busy_sleep(m, "spltwt", false);
1465                         VM_OBJECT_WLOCK(orig_object);
1466                         VM_OBJECT_WLOCK(new_object);
1467                         goto retry;
1468                 }
1469
1470                 /* vm_page_rename() will dirty the page. */
1471                 if (vm_page_rename(m, new_object, idx)) {
1472                         VM_OBJECT_WUNLOCK(new_object);
1473                         VM_OBJECT_WUNLOCK(orig_object);
1474                         VM_WAIT;
1475                         VM_OBJECT_WLOCK(orig_object);
1476                         VM_OBJECT_WLOCK(new_object);
1477                         goto retry;
1478                 }
1479 #if VM_NRESERVLEVEL > 0
1480                 /*
1481                  * If some of the reservation's allocated pages remain with
1482                  * the original object, then transferring the reservation to
1483                  * the new object is neither particularly beneficial nor
1484                  * particularly harmful as compared to leaving the reservation
1485                  * with the original object.  If, however, all of the
1486                  * reservation's allocated pages are transferred to the new
1487                  * object, then transferring the reservation is typically
1488                  * beneficial.  Determining which of these two cases applies
1489                  * would be more costly than unconditionally renaming the
1490                  * reservation.
1491                  */
1492                 vm_reserv_rename(m, new_object, orig_object, offidxstart);
1493 #endif
1494                 if (orig_object->type == OBJT_SWAP)
1495                         vm_page_xbusy(m);
1496         }
1497         if (orig_object->type == OBJT_SWAP) {
1498                 /*
1499                  * swap_pager_copy() can sleep, in which case the orig_object's
1500                  * and new_object's locks are released and reacquired. 
1501                  */
1502                 swap_pager_copy(orig_object, new_object, offidxstart, 0);
1503                 TAILQ_FOREACH(m, &new_object->memq, listq)
1504                         vm_page_xunbusy(m);
1505         }
1506         VM_OBJECT_WUNLOCK(orig_object);
1507         VM_OBJECT_WUNLOCK(new_object);
1508         entry->object.vm_object = new_object;
1509         entry->offset = 0LL;
1510         vm_object_deallocate(orig_object);
1511         VM_OBJECT_WLOCK(new_object);
1512 }
1513
1514 #define OBSC_COLLAPSE_NOWAIT    0x0002
1515 #define OBSC_COLLAPSE_WAIT      0x0004
1516
1517 static vm_page_t
1518 vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next,
1519     int op)
1520 {
1521         vm_object_t backing_object;
1522
1523         VM_OBJECT_ASSERT_WLOCKED(object);
1524         backing_object = object->backing_object;
1525         VM_OBJECT_ASSERT_WLOCKED(backing_object);
1526
1527         KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p));
1528         KASSERT(p == NULL || p->object == object || p->object == backing_object,
1529             ("invalid ownership %p %p %p", p, object, backing_object));
1530         if ((op & OBSC_COLLAPSE_NOWAIT) != 0)
1531                 return (next);
1532         if (p != NULL)
1533                 vm_page_lock(p);
1534         VM_OBJECT_WUNLOCK(object);
1535         VM_OBJECT_WUNLOCK(backing_object);
1536         if (p == NULL)
1537                 VM_WAIT;
1538         else
1539                 vm_page_busy_sleep(p, "vmocol", false);
1540         VM_OBJECT_WLOCK(object);
1541         VM_OBJECT_WLOCK(backing_object);
1542         return (TAILQ_FIRST(&backing_object->memq));
1543 }
1544
1545 static bool
1546 vm_object_scan_all_shadowed(vm_object_t object)
1547 {
1548         vm_object_t backing_object;
1549         vm_page_t p, pp;
1550         vm_pindex_t backing_offset_index, new_pindex, pi, ps;
1551
1552         VM_OBJECT_ASSERT_WLOCKED(object);
1553         VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
1554
1555         backing_object = object->backing_object;
1556
1557         if (backing_object->type != OBJT_DEFAULT &&
1558             backing_object->type != OBJT_SWAP)
1559                 return (false);
1560
1561         pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1562         p = vm_page_find_least(backing_object, pi);
1563         ps = swap_pager_find_least(backing_object, pi);
1564
1565         /*
1566          * Only check pages inside the parent object's range and
1567          * inside the parent object's mapping of the backing object.
1568          */
1569         for (;; pi++) {
1570                 if (p != NULL && p->pindex < pi)
1571                         p = TAILQ_NEXT(p, listq);
1572                 if (ps < pi)
1573                         ps = swap_pager_find_least(backing_object, pi);
1574                 if (p == NULL && ps >= backing_object->size)
1575                         break;
1576                 else if (p == NULL)
1577                         pi = ps;
1578                 else
1579                         pi = MIN(p->pindex, ps);
1580
1581                 new_pindex = pi - backing_offset_index;
1582                 if (new_pindex >= object->size)
1583                         break;
1584
1585                 /*
1586                  * See if the parent has the page or if the parent's object
1587                  * pager has the page.  If the parent has the page but the page
1588                  * is not valid, the parent's object pager must have the page.
1589                  *
1590                  * If this fails, the parent does not completely shadow the
1591                  * object and we might as well give up now.
1592                  */
1593                 pp = vm_page_lookup(object, new_pindex);
1594                 if ((pp == NULL || pp->valid == 0) &&
1595                     !vm_pager_has_page(object, new_pindex, NULL, NULL))
1596                         return (false);
1597         }
1598         return (true);
1599 }
1600
1601 static bool
1602 vm_object_collapse_scan(vm_object_t object, int op)
1603 {
1604         vm_object_t backing_object;
1605         vm_page_t next, p, pp;
1606         vm_pindex_t backing_offset_index, new_pindex;
1607
1608         VM_OBJECT_ASSERT_WLOCKED(object);
1609         VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
1610
1611         backing_object = object->backing_object;
1612         backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1613
1614         /*
1615          * Initial conditions
1616          */
1617         if ((op & OBSC_COLLAPSE_WAIT) != 0)
1618                 vm_object_set_flag(backing_object, OBJ_DEAD);
1619
1620         /*
1621          * Our scan
1622          */
1623         for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) {
1624                 next = TAILQ_NEXT(p, listq);
1625                 new_pindex = p->pindex - backing_offset_index;
1626
1627                 /*
1628                  * Check for busy page
1629                  */
1630                 if (vm_page_busied(p)) {
1631                         next = vm_object_collapse_scan_wait(object, p, next, op);
1632                         continue;
1633                 }
1634
1635                 KASSERT(p->object == backing_object,
1636                     ("vm_object_collapse_scan: object mismatch"));
1637
1638                 if (p->pindex < backing_offset_index ||
1639                     new_pindex >= object->size) {
1640                         if (backing_object->type == OBJT_SWAP)
1641                                 swap_pager_freespace(backing_object, p->pindex,
1642                                     1);
1643
1644                         /*
1645                          * Page is out of the parent object's range, we can
1646                          * simply destroy it.
1647                          */
1648                         vm_page_lock(p);
1649                         KASSERT(!pmap_page_is_mapped(p),
1650                             ("freeing mapped page %p", p));
1651                         if (p->wire_count == 0)
1652                                 vm_page_free(p);
1653                         else
1654                                 vm_page_remove(p);
1655                         vm_page_unlock(p);
1656                         continue;
1657                 }
1658
1659                 pp = vm_page_lookup(object, new_pindex);
1660                 if (pp != NULL && vm_page_busied(pp)) {
1661                         /*
1662                          * The page in the parent is busy and possibly not
1663                          * (yet) valid.  Until its state is finalized by the
1664                          * busy bit owner, we can't tell whether it shadows the
1665                          * original page.  Therefore, we must either skip it
1666                          * and the original (backing_object) page or wait for
1667                          * its state to be finalized.
1668                          *
1669                          * This is due to a race with vm_fault() where we must
1670                          * unbusy the original (backing_obj) page before we can
1671                          * (re)lock the parent.  Hence we can get here.
1672                          */
1673                         next = vm_object_collapse_scan_wait(object, pp, next,
1674                             op);
1675                         continue;
1676                 }
1677
1678                 KASSERT(pp == NULL || pp->valid != 0,
1679                     ("unbusy invalid page %p", pp));
1680
1681                 if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL,
1682                         NULL)) {
1683                         /*
1684                          * The page already exists in the parent OR swap exists
1685                          * for this location in the parent.  Leave the parent's
1686                          * page alone.  Destroy the original page from the
1687                          * backing object.
1688                          */
1689                         if (backing_object->type == OBJT_SWAP)
1690                                 swap_pager_freespace(backing_object, p->pindex,
1691                                     1);
1692                         vm_page_lock(p);
1693                         KASSERT(!pmap_page_is_mapped(p),
1694                             ("freeing mapped page %p", p));
1695                         if (p->wire_count == 0)
1696                                 vm_page_free(p);
1697                         else
1698                                 vm_page_remove(p);
1699                         vm_page_unlock(p);
1700                         continue;
1701                 }
1702
1703                 /*
1704                  * Page does not exist in parent, rename the page from the
1705                  * backing object to the main object.
1706                  *
1707                  * If the page was mapped to a process, it can remain mapped
1708                  * through the rename.  vm_page_rename() will dirty the page.
1709                  */
1710                 if (vm_page_rename(p, object, new_pindex)) {
1711                         next = vm_object_collapse_scan_wait(object, NULL, next,
1712                             op);
1713                         continue;
1714                 }
1715
1716                 /* Use the old pindex to free the right page. */
1717                 if (backing_object->type == OBJT_SWAP)
1718                         swap_pager_freespace(backing_object,
1719                             new_pindex + backing_offset_index, 1);
1720
1721 #if VM_NRESERVLEVEL > 0
1722                 /*
1723                  * Rename the reservation.
1724                  */
1725                 vm_reserv_rename(p, object, backing_object,
1726                     backing_offset_index);
1727 #endif
1728         }
1729         return (true);
1730 }
1731
1732
1733 /*
1734  * this version of collapse allows the operation to occur earlier and
1735  * when paging_in_progress is true for an object...  This is not a complete
1736  * operation, but should plug 99.9% of the rest of the leaks.
1737  */
1738 static void
1739 vm_object_qcollapse(vm_object_t object)
1740 {
1741         vm_object_t backing_object = object->backing_object;
1742
1743         VM_OBJECT_ASSERT_WLOCKED(object);
1744         VM_OBJECT_ASSERT_WLOCKED(backing_object);
1745
1746         if (backing_object->ref_count != 1)
1747                 return;
1748
1749         vm_object_collapse_scan(object, OBSC_COLLAPSE_NOWAIT);
1750 }
1751
1752 /*
1753  *      vm_object_collapse:
1754  *
1755  *      Collapse an object with the object backing it.
1756  *      Pages in the backing object are moved into the
1757  *      parent, and the backing object is deallocated.
1758  */
1759 void
1760 vm_object_collapse(vm_object_t object)
1761 {
1762         vm_object_t backing_object, new_backing_object;
1763
1764         VM_OBJECT_ASSERT_WLOCKED(object);
1765
1766         while (TRUE) {
1767                 /*
1768                  * Verify that the conditions are right for collapse:
1769                  *
1770                  * The object exists and the backing object exists.
1771                  */
1772                 if ((backing_object = object->backing_object) == NULL)
1773                         break;
1774
1775                 /*
1776                  * we check the backing object first, because it is most likely
1777                  * not collapsable.
1778                  */
1779                 VM_OBJECT_WLOCK(backing_object);
1780                 if (backing_object->handle != NULL ||
1781                     (backing_object->type != OBJT_DEFAULT &&
1782                      backing_object->type != OBJT_SWAP) ||
1783                     (backing_object->flags & OBJ_DEAD) ||
1784                     object->handle != NULL ||
1785                     (object->type != OBJT_DEFAULT &&
1786                      object->type != OBJT_SWAP) ||
1787                     (object->flags & OBJ_DEAD)) {
1788                         VM_OBJECT_WUNLOCK(backing_object);
1789                         break;
1790                 }
1791
1792                 if (object->paging_in_progress != 0 ||
1793                     backing_object->paging_in_progress != 0) {
1794                         vm_object_qcollapse(object);
1795                         VM_OBJECT_WUNLOCK(backing_object);
1796                         break;
1797                 }
1798
1799                 /*
1800                  * We know that we can either collapse the backing object (if
1801                  * the parent is the only reference to it) or (perhaps) have
1802                  * the parent bypass the object if the parent happens to shadow
1803                  * all the resident pages in the entire backing object.
1804                  *
1805                  * This is ignoring pager-backed pages such as swap pages.
1806                  * vm_object_collapse_scan fails the shadowing test in this
1807                  * case.
1808                  */
1809                 if (backing_object->ref_count == 1) {
1810                         vm_object_pip_add(object, 1);
1811                         vm_object_pip_add(backing_object, 1);
1812
1813                         /*
1814                          * If there is exactly one reference to the backing
1815                          * object, we can collapse it into the parent.
1816                          */
1817                         vm_object_collapse_scan(object, OBSC_COLLAPSE_WAIT);
1818
1819 #if VM_NRESERVLEVEL > 0
1820                         /*
1821                          * Break any reservations from backing_object.
1822                          */
1823                         if (__predict_false(!LIST_EMPTY(&backing_object->rvq)))
1824                                 vm_reserv_break_all(backing_object);
1825 #endif
1826
1827                         /*
1828                          * Move the pager from backing_object to object.
1829                          */
1830                         if (backing_object->type == OBJT_SWAP) {
1831                                 /*
1832                                  * swap_pager_copy() can sleep, in which case
1833                                  * the backing_object's and object's locks are
1834                                  * released and reacquired.
1835                                  * Since swap_pager_copy() is being asked to
1836                                  * destroy the source, it will change the
1837                                  * backing_object's type to OBJT_DEFAULT.
1838                                  */
1839                                 swap_pager_copy(
1840                                     backing_object,
1841                                     object,
1842                                     OFF_TO_IDX(object->backing_object_offset), TRUE);
1843                         }
1844                         /*
1845                          * Object now shadows whatever backing_object did.
1846                          * Note that the reference to 
1847                          * backing_object->backing_object moves from within 
1848                          * backing_object to within object.
1849                          */
1850                         LIST_REMOVE(object, shadow_list);
1851                         backing_object->shadow_count--;
1852                         if (backing_object->backing_object) {
1853                                 VM_OBJECT_WLOCK(backing_object->backing_object);
1854                                 LIST_REMOVE(backing_object, shadow_list);
1855                                 LIST_INSERT_HEAD(
1856                                     &backing_object->backing_object->shadow_head,
1857                                     object, shadow_list);
1858                                 /*
1859                                  * The shadow_count has not changed.
1860                                  */
1861                                 VM_OBJECT_WUNLOCK(backing_object->backing_object);
1862                         }
1863                         object->backing_object = backing_object->backing_object;
1864                         object->backing_object_offset +=
1865                             backing_object->backing_object_offset;
1866
1867                         /*
1868                          * Discard backing_object.
1869                          *
1870                          * Since the backing object has no pages, no pager left,
1871                          * and no object references within it, all that is
1872                          * necessary is to dispose of it.
1873                          */
1874                         KASSERT(backing_object->ref_count == 1, (
1875 "backing_object %p was somehow re-referenced during collapse!",
1876                             backing_object));
1877                         vm_object_pip_wakeup(backing_object);
1878                         backing_object->type = OBJT_DEAD;
1879                         backing_object->ref_count = 0;
1880                         VM_OBJECT_WUNLOCK(backing_object);
1881                         vm_object_destroy(backing_object);
1882
1883                         vm_object_pip_wakeup(object);
1884                         object_collapses++;
1885                 } else {
1886                         /*
1887                          * If we do not entirely shadow the backing object,
1888                          * there is nothing we can do so we give up.
1889                          */
1890                         if (object->resident_page_count != object->size &&
1891                             !vm_object_scan_all_shadowed(object)) {
1892                                 VM_OBJECT_WUNLOCK(backing_object);
1893                                 break;
1894                         }
1895
1896                         /*
1897                          * Make the parent shadow the next object in the
1898                          * chain.  Deallocating backing_object will not remove
1899                          * it, since its reference count is at least 2.
1900                          */
1901                         LIST_REMOVE(object, shadow_list);
1902                         backing_object->shadow_count--;
1903
1904                         new_backing_object = backing_object->backing_object;
1905                         if ((object->backing_object = new_backing_object) != NULL) {
1906                                 VM_OBJECT_WLOCK(new_backing_object);
1907                                 LIST_INSERT_HEAD(
1908                                     &new_backing_object->shadow_head,
1909                                     object,
1910                                     shadow_list
1911                                 );
1912                                 new_backing_object->shadow_count++;
1913                                 vm_object_reference_locked(new_backing_object);
1914                                 VM_OBJECT_WUNLOCK(new_backing_object);
1915                                 object->backing_object_offset +=
1916                                         backing_object->backing_object_offset;
1917                         }
1918
1919                         /*
1920                          * Drop the reference count on backing_object. Since
1921                          * its ref_count was at least 2, it will not vanish.
1922                          */
1923                         backing_object->ref_count--;
1924                         VM_OBJECT_WUNLOCK(backing_object);
1925                         object_bypasses++;
1926                 }
1927
1928                 /*
1929                  * Try again with this object's new backing object.
1930                  */
1931         }
1932 }
1933
1934 /*
1935  *      vm_object_page_remove:
1936  *
1937  *      For the given object, either frees or invalidates each of the
1938  *      specified pages.  In general, a page is freed.  However, if a page is
1939  *      wired for any reason other than the existence of a managed, wired
1940  *      mapping, then it may be invalidated but not removed from the object.
1941  *      Pages are specified by the given range ["start", "end") and the option
1942  *      OBJPR_CLEANONLY.  As a special case, if "end" is zero, then the range
1943  *      extends from "start" to the end of the object.  If the option
1944  *      OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
1945  *      specified range are affected.  If the option OBJPR_NOTMAPPED is
1946  *      specified, then the pages within the specified range must have no
1947  *      mappings.  Otherwise, if this option is not specified, any mappings to
1948  *      the specified pages are removed before the pages are freed or
1949  *      invalidated.
1950  *
1951  *      In general, this operation should only be performed on objects that
1952  *      contain managed pages.  There are, however, two exceptions.  First, it
1953  *      is performed on the kernel and kmem objects by vm_map_entry_delete().
1954  *      Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
1955  *      backed pages.  In both of these cases, the option OBJPR_CLEANONLY must
1956  *      not be specified and the option OBJPR_NOTMAPPED must be specified.
1957  *
1958  *      The object must be locked.
1959  */
1960 void
1961 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1962     int options)
1963 {
1964         vm_page_t p, next;
1965         struct mtx *mtx;
1966         struct pglist pgl;
1967
1968         VM_OBJECT_ASSERT_WLOCKED(object);
1969         KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
1970             (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
1971             ("vm_object_page_remove: illegal options for object %p", object));
1972         if (object->resident_page_count == 0)
1973                 return;
1974         vm_object_pip_add(object, 1);
1975         TAILQ_INIT(&pgl);
1976 again:
1977         p = vm_page_find_least(object, start);
1978         mtx = NULL;
1979
1980         /*
1981          * Here, the variable "p" is either (1) the page with the least pindex
1982          * greater than or equal to the parameter "start" or (2) NULL. 
1983          */
1984         for (; p != NULL && (p->pindex < end || end == 0); p = next) {
1985                 next = TAILQ_NEXT(p, listq);
1986
1987                 /*
1988                  * If the page is wired for any reason besides the existence
1989                  * of managed, wired mappings, then it cannot be freed.  For
1990                  * example, fictitious pages, which represent device memory,
1991                  * are inherently wired and cannot be freed.  They can,
1992                  * however, be invalidated if the option OBJPR_CLEANONLY is
1993                  * not specified.
1994                  */
1995                 vm_page_change_lock(p, &mtx);
1996                 if (vm_page_xbusied(p)) {
1997                         VM_OBJECT_WUNLOCK(object);
1998                         vm_page_busy_sleep(p, "vmopax", true);
1999                         VM_OBJECT_WLOCK(object);
2000                         goto again;
2001                 }
2002                 if (p->wire_count != 0) {
2003                         if ((options & OBJPR_NOTMAPPED) == 0 &&
2004                             object->ref_count != 0)
2005                                 pmap_remove_all(p);
2006                         if ((options & OBJPR_CLEANONLY) == 0) {
2007                                 p->valid = 0;
2008                                 vm_page_undirty(p);
2009                         }
2010                         continue;
2011                 }
2012                 if (vm_page_busied(p)) {
2013                         VM_OBJECT_WUNLOCK(object);
2014                         vm_page_busy_sleep(p, "vmopar", false);
2015                         VM_OBJECT_WLOCK(object);
2016                         goto again;
2017                 }
2018                 KASSERT((p->flags & PG_FICTITIOUS) == 0,
2019                     ("vm_object_page_remove: page %p is fictitious", p));
2020                 if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) {
2021                         if ((options & OBJPR_NOTMAPPED) == 0 &&
2022                             object->ref_count != 0)
2023                                 pmap_remove_write(p);
2024                         if (p->dirty != 0)
2025                                 continue;
2026                 }
2027                 if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0)
2028                         pmap_remove_all(p);
2029                 p->flags &= ~PG_ZERO;
2030                 if (vm_page_free_prep(p, false))
2031                         TAILQ_INSERT_TAIL(&pgl, p, listq);
2032         }
2033         if (mtx != NULL)
2034                 mtx_unlock(mtx);
2035         vm_page_free_phys_pglist(&pgl);
2036         vm_object_pip_wakeup(object);
2037 }
2038
2039 /*
2040  *      vm_object_page_noreuse:
2041  *
2042  *      For the given object, attempt to move the specified pages to
2043  *      the head of the inactive queue.  This bypasses regular LRU
2044  *      operation and allows the pages to be reused quickly under memory
2045  *      pressure.  If a page is wired for any reason, then it will not
2046  *      be queued.  Pages are specified by the range ["start", "end").
2047  *      As a special case, if "end" is zero, then the range extends from
2048  *      "start" to the end of the object.
2049  *
2050  *      This operation should only be performed on objects that
2051  *      contain non-fictitious, managed pages.
2052  *
2053  *      The object must be locked.
2054  */
2055 void
2056 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2057 {
2058         struct mtx *mtx;
2059         vm_page_t p, next;
2060
2061         VM_OBJECT_ASSERT_LOCKED(object);
2062         KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
2063             ("vm_object_page_noreuse: illegal object %p", object));
2064         if (object->resident_page_count == 0)
2065                 return;
2066         p = vm_page_find_least(object, start);
2067
2068         /*
2069          * Here, the variable "p" is either (1) the page with the least pindex
2070          * greater than or equal to the parameter "start" or (2) NULL. 
2071          */
2072         mtx = NULL;
2073         for (; p != NULL && (p->pindex < end || end == 0); p = next) {
2074                 next = TAILQ_NEXT(p, listq);
2075                 vm_page_change_lock(p, &mtx);
2076                 vm_page_deactivate_noreuse(p);
2077         }
2078         if (mtx != NULL)
2079                 mtx_unlock(mtx);
2080 }
2081
2082 /*
2083  *      Populate the specified range of the object with valid pages.  Returns
2084  *      TRUE if the range is successfully populated and FALSE otherwise.
2085  *
2086  *      Note: This function should be optimized to pass a larger array of
2087  *      pages to vm_pager_get_pages() before it is applied to a non-
2088  *      OBJT_DEVICE object.
2089  *
2090  *      The object must be locked.
2091  */
2092 boolean_t
2093 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
2094 {
2095         vm_page_t m;
2096         vm_pindex_t pindex;
2097         int rv;
2098
2099         VM_OBJECT_ASSERT_WLOCKED(object);
2100         for (pindex = start; pindex < end; pindex++) {
2101                 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
2102                 if (m->valid != VM_PAGE_BITS_ALL) {
2103                         rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
2104                         if (rv != VM_PAGER_OK) {
2105                                 vm_page_lock(m);
2106                                 vm_page_free(m);
2107                                 vm_page_unlock(m);
2108                                 break;
2109                         }
2110                 }
2111                 /*
2112                  * Keep "m" busy because a subsequent iteration may unlock
2113                  * the object.
2114                  */
2115         }
2116         if (pindex > start) {
2117                 m = vm_page_lookup(object, start);
2118                 while (m != NULL && m->pindex < pindex) {
2119                         vm_page_xunbusy(m);
2120                         m = TAILQ_NEXT(m, listq);
2121                 }
2122         }
2123         return (pindex == end);
2124 }
2125
2126 /*
2127  *      Routine:        vm_object_coalesce
2128  *      Function:       Coalesces two objects backing up adjoining
2129  *                      regions of memory into a single object.
2130  *
2131  *      returns TRUE if objects were combined.
2132  *
2133  *      NOTE:   Only works at the moment if the second object is NULL -
2134  *              if it's not, which object do we lock first?
2135  *
2136  *      Parameters:
2137  *              prev_object     First object to coalesce
2138  *              prev_offset     Offset into prev_object
2139  *              prev_size       Size of reference to prev_object
2140  *              next_size       Size of reference to the second object
2141  *              reserved        Indicator that extension region has
2142  *                              swap accounted for
2143  *
2144  *      Conditions:
2145  *      The object must *not* be locked.
2146  */
2147 boolean_t
2148 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
2149     vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2150 {
2151         vm_pindex_t next_pindex;
2152
2153         if (prev_object == NULL)
2154                 return (TRUE);
2155         VM_OBJECT_WLOCK(prev_object);
2156         if ((prev_object->type != OBJT_DEFAULT &&
2157             prev_object->type != OBJT_SWAP) ||
2158             (prev_object->flags & OBJ_TMPFS_NODE) != 0) {
2159                 VM_OBJECT_WUNLOCK(prev_object);
2160                 return (FALSE);
2161         }
2162
2163         /*
2164          * Try to collapse the object first
2165          */
2166         vm_object_collapse(prev_object);
2167
2168         /*
2169          * Can't coalesce if: . more than one reference . paged out . shadows
2170          * another object . has a copy elsewhere (any of which mean that the
2171          * pages not mapped to prev_entry may be in use anyway)
2172          */
2173         if (prev_object->backing_object != NULL) {
2174                 VM_OBJECT_WUNLOCK(prev_object);
2175                 return (FALSE);
2176         }
2177
2178         prev_size >>= PAGE_SHIFT;
2179         next_size >>= PAGE_SHIFT;
2180         next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
2181
2182         if ((prev_object->ref_count > 1) &&
2183             (prev_object->size != next_pindex)) {
2184                 VM_OBJECT_WUNLOCK(prev_object);
2185                 return (FALSE);
2186         }
2187
2188         /*
2189          * Account for the charge.
2190          */
2191         if (prev_object->cred != NULL) {
2192
2193                 /*
2194                  * If prev_object was charged, then this mapping,
2195                  * although not charged now, may become writable
2196                  * later. Non-NULL cred in the object would prevent
2197                  * swap reservation during enabling of the write
2198                  * access, so reserve swap now. Failed reservation
2199                  * cause allocation of the separate object for the map
2200                  * entry, and swap reservation for this entry is
2201                  * managed in appropriate time.
2202                  */
2203                 if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
2204                     prev_object->cred)) {
2205                         VM_OBJECT_WUNLOCK(prev_object);
2206                         return (FALSE);
2207                 }
2208                 prev_object->charge += ptoa(next_size);
2209         }
2210
2211         /*
2212          * Remove any pages that may still be in the object from a previous
2213          * deallocation.
2214          */
2215         if (next_pindex < prev_object->size) {
2216                 vm_object_page_remove(prev_object, next_pindex, next_pindex +
2217                     next_size, 0);
2218                 if (prev_object->type == OBJT_SWAP)
2219                         swap_pager_freespace(prev_object,
2220                                              next_pindex, next_size);
2221 #if 0
2222                 if (prev_object->cred != NULL) {
2223                         KASSERT(prev_object->charge >=
2224                             ptoa(prev_object->size - next_pindex),
2225                             ("object %p overcharged 1 %jx %jx", prev_object,
2226                                 (uintmax_t)next_pindex, (uintmax_t)next_size));
2227                         prev_object->charge -= ptoa(prev_object->size -
2228                             next_pindex);
2229                 }
2230 #endif
2231         }
2232
2233         /*
2234          * Extend the object if necessary.
2235          */
2236         if (next_pindex + next_size > prev_object->size)
2237                 prev_object->size = next_pindex + next_size;
2238
2239         VM_OBJECT_WUNLOCK(prev_object);
2240         return (TRUE);
2241 }
2242
2243 void
2244 vm_object_set_writeable_dirty(vm_object_t object)
2245 {
2246
2247         VM_OBJECT_ASSERT_WLOCKED(object);
2248         if (object->type != OBJT_VNODE) {
2249                 if ((object->flags & OBJ_TMPFS_NODE) != 0) {
2250                         KASSERT(object->type == OBJT_SWAP, ("non-swap tmpfs"));
2251                         vm_object_set_flag(object, OBJ_TMPFS_DIRTY);
2252                 }
2253                 return;
2254         }
2255         object->generation++;
2256         if ((object->flags & OBJ_MIGHTBEDIRTY) != 0)
2257                 return;
2258         vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
2259 }
2260
2261 /*
2262  *      vm_object_unwire:
2263  *
2264  *      For each page offset within the specified range of the given object,
2265  *      find the highest-level page in the shadow chain and unwire it.  A page
2266  *      must exist at every page offset, and the highest-level page must be
2267  *      wired.
2268  */
2269 void
2270 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length,
2271     uint8_t queue)
2272 {
2273         vm_object_t tobject;
2274         vm_page_t m, tm;
2275         vm_pindex_t end_pindex, pindex, tpindex;
2276         int depth, locked_depth;
2277
2278         KASSERT((offset & PAGE_MASK) == 0,
2279             ("vm_object_unwire: offset is not page aligned"));
2280         KASSERT((length & PAGE_MASK) == 0,
2281             ("vm_object_unwire: length is not a multiple of PAGE_SIZE"));
2282         /* The wired count of a fictitious page never changes. */
2283         if ((object->flags & OBJ_FICTITIOUS) != 0)
2284                 return;
2285         pindex = OFF_TO_IDX(offset);
2286         end_pindex = pindex + atop(length);
2287         locked_depth = 1;
2288         VM_OBJECT_RLOCK(object);
2289         m = vm_page_find_least(object, pindex);
2290         while (pindex < end_pindex) {
2291                 if (m == NULL || pindex < m->pindex) {
2292                         /*
2293                          * The first object in the shadow chain doesn't
2294                          * contain a page at the current index.  Therefore,
2295                          * the page must exist in a backing object.
2296                          */
2297                         tobject = object;
2298                         tpindex = pindex;
2299                         depth = 0;
2300                         do {
2301                                 tpindex +=
2302                                     OFF_TO_IDX(tobject->backing_object_offset);
2303                                 tobject = tobject->backing_object;
2304                                 KASSERT(tobject != NULL,
2305                                     ("vm_object_unwire: missing page"));
2306                                 if ((tobject->flags & OBJ_FICTITIOUS) != 0)
2307                                         goto next_page;
2308                                 depth++;
2309                                 if (depth == locked_depth) {
2310                                         locked_depth++;
2311                                         VM_OBJECT_RLOCK(tobject);
2312                                 }
2313                         } while ((tm = vm_page_lookup(tobject, tpindex)) ==
2314                             NULL);
2315                 } else {
2316                         tm = m;
2317                         m = TAILQ_NEXT(m, listq);
2318                 }
2319                 vm_page_lock(tm);
2320                 vm_page_unwire(tm, queue);
2321                 vm_page_unlock(tm);
2322 next_page:
2323                 pindex++;
2324         }
2325         /* Release the accumulated object locks. */
2326         for (depth = 0; depth < locked_depth; depth++) {
2327                 tobject = object->backing_object;
2328                 VM_OBJECT_RUNLOCK(object);
2329                 object = tobject;
2330         }
2331 }
2332
2333 struct vnode *
2334 vm_object_vnode(vm_object_t object)
2335 {
2336
2337         VM_OBJECT_ASSERT_LOCKED(object);
2338         if (object->type == OBJT_VNODE)
2339                 return (object->handle);
2340         if (object->type == OBJT_SWAP && (object->flags & OBJ_TMPFS) != 0)
2341                 return (object->un_pager.swp.swp_tmpfs);
2342         return (NULL);
2343 }
2344
2345 static int
2346 sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
2347 {
2348         struct kinfo_vmobject *kvo;
2349         char *fullpath, *freepath;
2350         struct vnode *vp;
2351         struct vattr va;
2352         vm_object_t obj;
2353         vm_page_t m;
2354         int count, error;
2355
2356         if (req->oldptr == NULL) {
2357                 /*
2358                  * If an old buffer has not been provided, generate an
2359                  * estimate of the space needed for a subsequent call.
2360                  */
2361                 mtx_lock(&vm_object_list_mtx);
2362                 count = 0;
2363                 TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2364                         if (obj->type == OBJT_DEAD)
2365                                 continue;
2366                         count++;
2367                 }
2368                 mtx_unlock(&vm_object_list_mtx);
2369                 return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) *
2370                     count * 11 / 10));
2371         }
2372
2373         kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK);
2374         error = 0;
2375
2376         /*
2377          * VM objects are type stable and are never removed from the
2378          * list once added.  This allows us to safely read obj->object_list
2379          * after reacquiring the VM object lock.
2380          */
2381         mtx_lock(&vm_object_list_mtx);
2382         TAILQ_FOREACH(obj, &vm_object_list, object_list) {
2383                 if (obj->type == OBJT_DEAD)
2384                         continue;
2385                 VM_OBJECT_RLOCK(obj);
2386                 if (obj->type == OBJT_DEAD) {
2387                         VM_OBJECT_RUNLOCK(obj);
2388                         continue;
2389                 }
2390                 mtx_unlock(&vm_object_list_mtx);
2391                 kvo->kvo_size = ptoa(obj->size);
2392                 kvo->kvo_resident = obj->resident_page_count;
2393                 kvo->kvo_ref_count = obj->ref_count;
2394                 kvo->kvo_shadow_count = obj->shadow_count;
2395                 kvo->kvo_memattr = obj->memattr;
2396                 kvo->kvo_active = 0;
2397                 kvo->kvo_inactive = 0;
2398                 TAILQ_FOREACH(m, &obj->memq, listq) {
2399                         /*
2400                          * A page may belong to the object but be
2401                          * dequeued and set to PQ_NONE while the
2402                          * object lock is not held.  This makes the
2403                          * reads of m->queue below racy, and we do not
2404                          * count pages set to PQ_NONE.  However, this
2405                          * sysctl is only meant to give an
2406                          * approximation of the system anyway.
2407                          */
2408                         if (vm_page_active(m))
2409                                 kvo->kvo_active++;
2410                         else if (vm_page_inactive(m))
2411                                 kvo->kvo_inactive++;
2412                 }
2413
2414                 kvo->kvo_vn_fileid = 0;
2415                 kvo->kvo_vn_fsid = 0;
2416                 kvo->kvo_vn_fsid_freebsd11 = 0;
2417                 freepath = NULL;
2418                 fullpath = "";
2419                 vp = NULL;
2420                 switch (obj->type) {
2421                 case OBJT_DEFAULT:
2422                         kvo->kvo_type = KVME_TYPE_DEFAULT;
2423                         break;
2424                 case OBJT_VNODE:
2425                         kvo->kvo_type = KVME_TYPE_VNODE;
2426                         vp = obj->handle;
2427                         vref(vp);
2428                         break;
2429                 case OBJT_SWAP:
2430                         kvo->kvo_type = KVME_TYPE_SWAP;
2431                         break;
2432                 case OBJT_DEVICE:
2433                         kvo->kvo_type = KVME_TYPE_DEVICE;
2434                         break;
2435                 case OBJT_PHYS:
2436                         kvo->kvo_type = KVME_TYPE_PHYS;
2437                         break;
2438                 case OBJT_DEAD:
2439                         kvo->kvo_type = KVME_TYPE_DEAD;
2440                         break;
2441                 case OBJT_SG:
2442                         kvo->kvo_type = KVME_TYPE_SG;
2443                         break;
2444                 case OBJT_MGTDEVICE:
2445                         kvo->kvo_type = KVME_TYPE_MGTDEVICE;
2446                         break;
2447                 default:
2448                         kvo->kvo_type = KVME_TYPE_UNKNOWN;
2449                         break;
2450                 }
2451                 VM_OBJECT_RUNLOCK(obj);
2452                 if (vp != NULL) {
2453                         vn_fullpath(curthread, vp, &fullpath, &freepath);
2454                         vn_lock(vp, LK_SHARED | LK_RETRY);
2455                         if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) {
2456                                 kvo->kvo_vn_fileid = va.va_fileid;
2457                                 kvo->kvo_vn_fsid = va.va_fsid;
2458                                 kvo->kvo_vn_fsid_freebsd11 = va.va_fsid;
2459                                                                 /* truncate */
2460                         }
2461                         vput(vp);
2462                 }
2463
2464                 strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path));
2465                 if (freepath != NULL)
2466                         free(freepath, M_TEMP);
2467
2468                 /* Pack record size down */
2469                 kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path)
2470                     + strlen(kvo->kvo_path) + 1;
2471                 kvo->kvo_structsize = roundup(kvo->kvo_structsize,
2472                     sizeof(uint64_t));
2473                 error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize);
2474                 mtx_lock(&vm_object_list_mtx);
2475                 if (error)
2476                         break;
2477         }
2478         mtx_unlock(&vm_object_list_mtx);
2479         free(kvo, M_TEMP);
2480         return (error);
2481 }
2482 SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP |
2483     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject",
2484     "List of VM objects");
2485
2486 #include "opt_ddb.h"
2487 #ifdef DDB
2488 #include <sys/kernel.h>
2489
2490 #include <sys/cons.h>
2491
2492 #include <ddb/ddb.h>
2493
2494 static int
2495 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2496 {
2497         vm_map_t tmpm;
2498         vm_map_entry_t tmpe;
2499         vm_object_t obj;
2500         int entcount;
2501
2502         if (map == 0)
2503                 return 0;
2504
2505         if (entry == 0) {
2506                 tmpe = map->header.next;
2507                 entcount = map->nentries;
2508                 while (entcount-- && (tmpe != &map->header)) {
2509                         if (_vm_object_in_map(map, object, tmpe)) {
2510                                 return 1;
2511                         }
2512                         tmpe = tmpe->next;
2513                 }
2514         } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2515                 tmpm = entry->object.sub_map;
2516                 tmpe = tmpm->header.next;
2517                 entcount = tmpm->nentries;
2518                 while (entcount-- && tmpe != &tmpm->header) {
2519                         if (_vm_object_in_map(tmpm, object, tmpe)) {
2520                                 return 1;
2521                         }
2522                         tmpe = tmpe->next;
2523                 }
2524         } else if ((obj = entry->object.vm_object) != NULL) {
2525                 for (; obj; obj = obj->backing_object)
2526                         if (obj == object) {
2527                                 return 1;
2528                         }
2529         }
2530         return 0;
2531 }
2532
2533 static int
2534 vm_object_in_map(vm_object_t object)
2535 {
2536         struct proc *p;
2537
2538         /* sx_slock(&allproc_lock); */
2539         FOREACH_PROC_IN_SYSTEM(p) {
2540                 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2541                         continue;
2542                 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
2543                         /* sx_sunlock(&allproc_lock); */
2544                         return 1;
2545                 }
2546         }
2547         /* sx_sunlock(&allproc_lock); */
2548         if (_vm_object_in_map(kernel_map, object, 0))
2549                 return 1;
2550         return 0;
2551 }
2552
2553 DB_SHOW_COMMAND(vmochk, vm_object_check)
2554 {
2555         vm_object_t object;
2556
2557         /*
2558          * make sure that internal objs are in a map somewhere
2559          * and none have zero ref counts.
2560          */
2561         TAILQ_FOREACH(object, &vm_object_list, object_list) {
2562                 if (object->handle == NULL &&
2563                     (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2564                         if (object->ref_count == 0) {
2565                                 db_printf("vmochk: internal obj has zero ref count: %ld\n",
2566                                         (long)object->size);
2567                         }
2568                         if (!vm_object_in_map(object)) {
2569                                 db_printf(
2570                         "vmochk: internal obj is not in a map: "
2571                         "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2572                                     object->ref_count, (u_long)object->size, 
2573                                     (u_long)object->size,
2574                                     (void *)object->backing_object);
2575                         }
2576                 }
2577         }
2578 }
2579
2580 /*
2581  *      vm_object_print:        [ debug ]
2582  */
2583 DB_SHOW_COMMAND(object, vm_object_print_static)
2584 {
2585         /* XXX convert args. */
2586         vm_object_t object = (vm_object_t)addr;
2587         boolean_t full = have_addr;
2588
2589         vm_page_t p;
2590
2591         /* XXX count is an (unused) arg.  Avoid shadowing it. */
2592 #define count   was_count
2593
2594         int count;
2595
2596         if (object == NULL)
2597                 return;
2598
2599         db_iprintf(
2600             "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
2601             object, (int)object->type, (uintmax_t)object->size,
2602             object->resident_page_count, object->ref_count, object->flags,
2603             object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
2604         db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2605             object->shadow_count, 
2606             object->backing_object ? object->backing_object->ref_count : 0,
2607             object->backing_object, (uintmax_t)object->backing_object_offset);
2608
2609         if (!full)
2610                 return;
2611
2612         db_indent += 2;
2613         count = 0;
2614         TAILQ_FOREACH(p, &object->memq, listq) {
2615                 if (count == 0)
2616                         db_iprintf("memory:=");
2617                 else if (count == 6) {
2618                         db_printf("\n");
2619                         db_iprintf(" ...");
2620                         count = 0;
2621                 } else
2622                         db_printf(",");
2623                 count++;
2624
2625                 db_printf("(off=0x%jx,page=0x%jx)",
2626                     (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2627         }
2628         if (count != 0)
2629                 db_printf("\n");
2630         db_indent -= 2;
2631 }
2632
2633 /* XXX. */
2634 #undef count
2635
2636 /* XXX need this non-static entry for calling from vm_map_print. */
2637 void
2638 vm_object_print(
2639         /* db_expr_t */ long addr,
2640         boolean_t have_addr,
2641         /* db_expr_t */ long count,
2642         char *modif)
2643 {
2644         vm_object_print_static(addr, have_addr, count, modif);
2645 }
2646
2647 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2648 {
2649         vm_object_t object;
2650         vm_pindex_t fidx;
2651         vm_paddr_t pa;
2652         vm_page_t m, prev_m;
2653         int rcount, nl, c;
2654
2655         nl = 0;
2656         TAILQ_FOREACH(object, &vm_object_list, object_list) {
2657                 db_printf("new object: %p\n", (void *)object);
2658                 if (nl > 18) {
2659                         c = cngetc();
2660                         if (c != ' ')
2661                                 return;
2662                         nl = 0;
2663                 }
2664                 nl++;
2665                 rcount = 0;
2666                 fidx = 0;
2667                 pa = -1;
2668                 TAILQ_FOREACH(m, &object->memq, listq) {
2669                         if (m->pindex > 128)
2670                                 break;
2671                         if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2672                             prev_m->pindex + 1 != m->pindex) {
2673                                 if (rcount) {
2674                                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2675                                                 (long)fidx, rcount, (long)pa);
2676                                         if (nl > 18) {
2677                                                 c = cngetc();
2678                                                 if (c != ' ')
2679                                                         return;
2680                                                 nl = 0;
2681                                         }
2682                                         nl++;
2683                                         rcount = 0;
2684                                 }
2685                         }                               
2686                         if (rcount &&
2687                                 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2688                                 ++rcount;
2689                                 continue;
2690                         }
2691                         if (rcount) {
2692                                 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2693                                         (long)fidx, rcount, (long)pa);
2694                                 if (nl > 18) {
2695                                         c = cngetc();
2696                                         if (c != ' ')
2697                                                 return;
2698                                         nl = 0;
2699                                 }
2700                                 nl++;
2701                         }
2702                         fidx = m->pindex;
2703                         pa = VM_PAGE_TO_PHYS(m);
2704                         rcount = 1;
2705                 }
2706                 if (rcount) {
2707                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2708                                 (long)fidx, rcount, (long)pa);
2709                         if (nl > 18) {
2710                                 c = cngetc();
2711                                 if (c != ' ')
2712                                         return;
2713                                 nl = 0;
2714                         }
2715                         nl++;
2716                 }
2717         }
2718 }
2719 #endif /* DDB */