]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/vm/vm_fault.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / vm / vm_fault.c
1 /*-
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *      This product includes software developed by the University of
24  *      California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *      from: @(#)vm_fault.c    8.4 (Berkeley) 1/12/94
42  *
43  *
44  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
45  * All rights reserved.
46  *
47  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
48  *
49  * Permission to use, copy, modify and distribute this software and
50  * its documentation is hereby granted, provided that both the copyright
51  * notice and this permission notice appear in all copies of the
52  * software, derivative works or modified versions, and any portions
53  * thereof, and that both notices appear in supporting documentation.
54  *
55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58  *
59  * Carnegie Mellon requests users of this software to return to
60  *
61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62  *  School of Computer Science
63  *  Carnegie Mellon University
64  *  Pittsburgh PA 15213-3890
65  *
66  * any improvements or extensions that they make and grant Carnegie the
67  * rights to redistribute these changes.
68  */
69
70 /*
71  *      Page fault handling module.
72  */
73
74 #include <sys/cdefs.h>
75 __FBSDID("$FreeBSD$");
76
77 #include "opt_vm.h"
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/lock.h>
83 #include <sys/mutex.h>
84 #include <sys/proc.h>
85 #include <sys/resourcevar.h>
86 #include <sys/sysctl.h>
87 #include <sys/vmmeter.h>
88 #include <sys/vnode.h>
89
90 #include <vm/vm.h>
91 #include <vm/vm_param.h>
92 #include <vm/pmap.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_pager.h>
99 #include <vm/vnode_pager.h>
100 #include <vm/vm_extern.h>
101
102 #include <sys/mount.h>  /* XXX Temporary for VFS_LOCK_GIANT() */
103
104 #define PFBAK 4
105 #define PFFOR 4
106 #define PAGEORDER_SIZE (PFBAK+PFFOR)
107
108 static int prefault_pageorder[] = {
109         -1 * PAGE_SIZE, 1 * PAGE_SIZE,
110         -2 * PAGE_SIZE, 2 * PAGE_SIZE,
111         -3 * PAGE_SIZE, 3 * PAGE_SIZE,
112         -4 * PAGE_SIZE, 4 * PAGE_SIZE
113 };
114
115 static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *);
116 static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
117
118 #define VM_FAULT_READ_AHEAD 8
119 #define VM_FAULT_READ_BEHIND 7
120 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
121
122 struct faultstate {
123         vm_page_t m;
124         vm_object_t object;
125         vm_pindex_t pindex;
126         vm_page_t first_m;
127         vm_object_t     first_object;
128         vm_pindex_t first_pindex;
129         vm_map_t map;
130         vm_map_entry_t entry;
131         int lookup_still_valid;
132         struct vnode *vp;
133 };
134
135 static inline void
136 release_page(struct faultstate *fs)
137 {
138         vm_page_wakeup(fs->m);
139         vm_page_lock_queues();
140         vm_page_deactivate(fs->m);
141         vm_page_unlock_queues();
142         fs->m = NULL;
143 }
144
145 static inline void
146 unlock_map(struct faultstate *fs)
147 {
148         if (fs->lookup_still_valid) {
149                 vm_map_lookup_done(fs->map, fs->entry);
150                 fs->lookup_still_valid = FALSE;
151         }
152 }
153
154 static void
155 unlock_and_deallocate(struct faultstate *fs)
156 {
157
158         vm_object_pip_wakeup(fs->object);
159         VM_OBJECT_UNLOCK(fs->object);
160         if (fs->object != fs->first_object) {
161                 VM_OBJECT_LOCK(fs->first_object);
162                 vm_page_lock_queues();
163                 vm_page_free(fs->first_m);
164                 vm_page_unlock_queues();
165                 vm_object_pip_wakeup(fs->first_object);
166                 VM_OBJECT_UNLOCK(fs->first_object);
167                 fs->first_m = NULL;
168         }
169         vm_object_deallocate(fs->first_object);
170         unlock_map(fs); 
171         if (fs->vp != NULL) { 
172                 int vfslocked;
173
174                 vfslocked = VFS_LOCK_GIANT(fs->vp->v_mount);
175                 vput(fs->vp);
176                 fs->vp = NULL;
177                 VFS_UNLOCK_GIANT(vfslocked);
178         }
179 }
180
181 /*
182  * TRYPAGER - used by vm_fault to calculate whether the pager for the
183  *            current object *might* contain the page.
184  *
185  *            default objects are zero-fill, there is no real pager.
186  */
187 #define TRYPAGER        (fs.object->type != OBJT_DEFAULT && \
188                         (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
189
190 /*
191  *      vm_fault:
192  *
193  *      Handle a page fault occurring at the given address,
194  *      requiring the given permissions, in the map specified.
195  *      If successful, the page is inserted into the
196  *      associated physical map.
197  *
198  *      NOTE: the given address should be truncated to the
199  *      proper page address.
200  *
201  *      KERN_SUCCESS is returned if the page fault is handled; otherwise,
202  *      a standard error specifying why the fault is fatal is returned.
203  *
204  *
205  *      The map in question must be referenced, and remains so.
206  *      Caller may hold no locks.
207  */
208 int
209 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
210          int fault_flags)
211 {
212         vm_prot_t prot;
213         int is_first_object_locked, result;
214         boolean_t growstack, wired;
215         int map_generation;
216         vm_object_t next_object;
217         vm_page_t marray[VM_FAULT_READ];
218         int hardfault;
219         int faultcount;
220         struct faultstate fs;
221
222         hardfault = 0;
223         growstack = TRUE;
224         PCPU_INC(cnt.v_vm_faults);
225
226 RetryFault:;
227
228         /*
229          * Find the backing store object and offset into it to begin the
230          * search.
231          */
232         fs.map = map;
233         result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry,
234             &fs.first_object, &fs.first_pindex, &prot, &wired);
235         if (result != KERN_SUCCESS) {
236                 if (result != KERN_PROTECTION_FAILURE ||
237                     (fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) {
238                         if (growstack && result == KERN_INVALID_ADDRESS &&
239                             map != kernel_map && curproc != NULL) {
240                                 result = vm_map_growstack(curproc, vaddr);
241                                 if (result != KERN_SUCCESS)
242                                         return (KERN_FAILURE);
243                                 growstack = FALSE;
244                                 goto RetryFault;
245                         }
246                         return (result);
247                 }
248
249                 /*
250                  * If we are user-wiring a r/w segment, and it is COW, then
251                  * we need to do the COW operation.  Note that we don't COW
252                  * currently RO sections now, because it is NOT desirable
253                  * to COW .text.  We simply keep .text from ever being COW'ed
254                  * and take the heat that one cannot debug wired .text sections.
255                  */
256                 result = vm_map_lookup(&fs.map, vaddr,
257                         VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE,
258                         &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired);
259                 if (result != KERN_SUCCESS)
260                         return (result);
261
262                 /*
263                  * If we don't COW now, on a user wire, the user will never
264                  * be able to write to the mapping.  If we don't make this
265                  * restriction, the bookkeeping would be nearly impossible.
266                  *
267                  * XXX The following assignment modifies the map without
268                  * holding a write lock on it.
269                  */
270                 if ((fs.entry->protection & VM_PROT_WRITE) == 0)
271                         fs.entry->max_protection &= ~VM_PROT_WRITE;
272         }
273
274         map_generation = fs.map->timestamp;
275
276         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
277                 panic("vm_fault: fault on nofault entry, addr: %lx",
278                     (u_long)vaddr);
279         }
280
281         /*
282          * Make a reference to this object to prevent its disposal while we
283          * are messing with it.  Once we have the reference, the map is free
284          * to be diddled.  Since objects reference their shadows (and copies),
285          * they will stay around as well.
286          *
287          * Bump the paging-in-progress count to prevent size changes (e.g. 
288          * truncation operations) during I/O.  This must be done after
289          * obtaining the vnode lock in order to avoid possible deadlocks.
290          *
291          * XXX vnode_pager_lock() can block without releasing the map lock.
292          */
293         if (fs.first_object->flags & OBJ_NEEDGIANT)
294                 mtx_lock(&Giant);
295         VM_OBJECT_LOCK(fs.first_object);
296         vm_object_reference_locked(fs.first_object);
297         fs.vp = vnode_pager_lock(fs.first_object);
298         KASSERT(fs.vp == NULL || !fs.map->system_map,
299             ("vm_fault: vnode-backed object mapped by system map"));
300         KASSERT((fs.first_object->flags & OBJ_NEEDGIANT) == 0 ||
301             !fs.map->system_map,
302             ("vm_fault: Object requiring giant mapped by system map"));
303         if (fs.first_object->flags & OBJ_NEEDGIANT)
304                 mtx_unlock(&Giant);
305         vm_object_pip_add(fs.first_object, 1);
306
307         fs.lookup_still_valid = TRUE;
308
309         if (wired)
310                 fault_type = prot;
311
312         fs.first_m = NULL;
313
314         /*
315          * Search for the page at object/offset.
316          */
317         fs.object = fs.first_object;
318         fs.pindex = fs.first_pindex;
319         while (TRUE) {
320                 /*
321                  * If the object is dead, we stop here
322                  */
323                 if (fs.object->flags & OBJ_DEAD) {
324                         unlock_and_deallocate(&fs);
325                         return (KERN_PROTECTION_FAILURE);
326                 }
327
328                 /*
329                  * See if page is resident
330                  */
331                 fs.m = vm_page_lookup(fs.object, fs.pindex);
332                 if (fs.m != NULL) {
333                         /* 
334                          * check for page-based copy on write.
335                          * We check fs.object == fs.first_object so
336                          * as to ensure the legacy COW mechanism is
337                          * used when the page in question is part of
338                          * a shadow object.  Otherwise, vm_page_cowfault()
339                          * removes the page from the backing object, 
340                          * which is not what we want.
341                          */
342                         vm_page_lock_queues();
343                         if ((fs.m->cow) && 
344                             (fault_type & VM_PROT_WRITE) &&
345                             (fs.object == fs.first_object)) {
346                                 vm_page_cowfault(fs.m);
347                                 vm_page_unlock_queues();
348                                 unlock_and_deallocate(&fs);
349                                 goto RetryFault;
350                         }
351
352                         /*
353                          * Wait/Retry if the page is busy.  We have to do this
354                          * if the page is busy via either VPO_BUSY or 
355                          * vm_page_t->busy because the vm_pager may be using
356                          * vm_page_t->busy for pageouts ( and even pageins if
357                          * it is the vnode pager ), and we could end up trying
358                          * to pagein and pageout the same page simultaneously.
359                          *
360                          * We can theoretically allow the busy case on a read
361                          * fault if the page is marked valid, but since such
362                          * pages are typically already pmap'd, putting that
363                          * special case in might be more effort then it is 
364                          * worth.  We cannot under any circumstances mess
365                          * around with a vm_page_t->busy page except, perhaps,
366                          * to pmap it.
367                          */
368                         if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) {
369                                 vm_page_unlock_queues();
370                                 VM_OBJECT_UNLOCK(fs.object);
371                                 if (fs.object != fs.first_object) {
372                                         VM_OBJECT_LOCK(fs.first_object);
373                                         vm_page_lock_queues();
374                                         vm_page_free(fs.first_m);
375                                         vm_page_unlock_queues();
376                                         vm_object_pip_wakeup(fs.first_object);
377                                         VM_OBJECT_UNLOCK(fs.first_object);
378                                         fs.first_m = NULL;
379                                 }
380                                 unlock_map(&fs);
381                                 if (fs.vp != NULL) {
382                                         int vfslck;
383
384                                         vfslck = VFS_LOCK_GIANT(fs.vp->v_mount);
385                                         vput(fs.vp);
386                                         fs.vp = NULL;
387                                         VFS_UNLOCK_GIANT(vfslck);
388                                 }
389                                 VM_OBJECT_LOCK(fs.object);
390                                 if (fs.m == vm_page_lookup(fs.object,
391                                     fs.pindex)) {
392                                         vm_page_sleep_if_busy(fs.m, TRUE,
393                                             "vmpfw");
394                                 }
395                                 vm_object_pip_wakeup(fs.object);
396                                 VM_OBJECT_UNLOCK(fs.object);
397                                 PCPU_INC(cnt.v_intrans);
398                                 vm_object_deallocate(fs.first_object);
399                                 goto RetryFault;
400                         }
401                         vm_pageq_remove(fs.m);
402                         vm_page_unlock_queues();
403
404                         /*
405                          * Mark page busy for other processes, and the 
406                          * pagedaemon.  If it still isn't completely valid
407                          * (readable), jump to readrest, else break-out ( we
408                          * found the page ).
409                          */
410                         vm_page_busy(fs.m);
411                         if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
412                                 fs.m->object != kernel_object && fs.m->object != kmem_object) {
413                                 goto readrest;
414                         }
415
416                         break;
417                 }
418
419                 /*
420                  * Page is not resident, If this is the search termination
421                  * or the pager might contain the page, allocate a new page.
422                  */
423                 if (TRYPAGER || fs.object == fs.first_object) {
424                         if (fs.pindex >= fs.object->size) {
425                                 unlock_and_deallocate(&fs);
426                                 return (KERN_PROTECTION_FAILURE);
427                         }
428
429                         /*
430                          * Allocate a new page for this object/offset pair.
431                          */
432                         fs.m = NULL;
433                         if (!vm_page_count_severe()) {
434 #if VM_NRESERVLEVEL > 0
435                                 if ((fs.object->flags & OBJ_COLORED) == 0) {
436                                         fs.object->flags |= OBJ_COLORED;
437                                         fs.object->pg_color = atop(vaddr) -
438                                             fs.pindex;
439                                 }
440 #endif
441                                 fs.m = vm_page_alloc(fs.object, fs.pindex,
442                                     (fs.vp || fs.object->backing_object)? VM_ALLOC_NORMAL: VM_ALLOC_ZERO);
443                         }
444                         if (fs.m == NULL) {
445                                 unlock_and_deallocate(&fs);
446                                 VM_WAITPFAULT;
447                                 goto RetryFault;
448                         } else if ((fs.m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
449                                 break;
450                 }
451
452 readrest:
453                 /*
454                  * We have found a valid page or we have allocated a new page.
455                  * The page thus may not be valid or may not be entirely 
456                  * valid.
457                  *
458                  * Attempt to fault-in the page if there is a chance that the
459                  * pager has it, and potentially fault in additional pages
460                  * at the same time.
461                  */
462                 if (TRYPAGER) {
463                         int rv;
464                         int reqpage = 0;
465                         int ahead, behind;
466                         u_char behavior = vm_map_entry_behavior(fs.entry);
467
468                         if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
469                                 ahead = 0;
470                                 behind = 0;
471                         } else {
472                                 behind = (vaddr - fs.entry->start) >> PAGE_SHIFT;
473                                 if (behind > VM_FAULT_READ_BEHIND)
474                                         behind = VM_FAULT_READ_BEHIND;
475
476                                 ahead = ((fs.entry->end - vaddr) >> PAGE_SHIFT) - 1;
477                                 if (ahead > VM_FAULT_READ_AHEAD)
478                                         ahead = VM_FAULT_READ_AHEAD;
479                         }
480                         is_first_object_locked = FALSE;
481                         if ((behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
482                              (behavior != MAP_ENTRY_BEHAV_RANDOM &&
483                               fs.pindex >= fs.entry->lastr &&
484                               fs.pindex < fs.entry->lastr + VM_FAULT_READ)) &&
485                             (fs.first_object == fs.object ||
486                              (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object))) &&
487                             fs.first_object->type != OBJT_DEVICE &&
488                             fs.first_object->type != OBJT_PHYS) {
489                                 vm_pindex_t firstpindex, tmppindex;
490
491                                 if (fs.first_pindex < 2 * VM_FAULT_READ)
492                                         firstpindex = 0;
493                                 else
494                                         firstpindex = fs.first_pindex - 2 * VM_FAULT_READ;
495
496                                 vm_page_lock_queues();
497                                 /*
498                                  * note: partially valid pages cannot be 
499                                  * included in the lookahead - NFS piecemeal
500                                  * writes will barf on it badly.
501                                  */
502                                 for (tmppindex = fs.first_pindex - 1;
503                                         tmppindex >= firstpindex;
504                                         --tmppindex) {
505                                         vm_page_t mt;
506
507                                         mt = vm_page_lookup(fs.first_object, tmppindex);
508                                         if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL))
509                                                 break;
510                                         if (mt->busy ||
511                                             (mt->oflags & VPO_BUSY) ||
512                                                 mt->hold_count ||
513                                                 mt->wire_count) 
514                                                 continue;
515                                         pmap_remove_all(mt);
516                                         if (mt->dirty) {
517                                                 vm_page_deactivate(mt);
518                                         } else {
519                                                 vm_page_cache(mt);
520                                         }
521                                 }
522                                 vm_page_unlock_queues();
523                                 ahead += behind;
524                                 behind = 0;
525                         }
526                         if (is_first_object_locked)
527                                 VM_OBJECT_UNLOCK(fs.first_object);
528                         /*
529                          * now we find out if any other pages should be paged
530                          * in at this time this routine checks to see if the
531                          * pages surrounding this fault reside in the same
532                          * object as the page for this fault.  If they do,
533                          * then they are faulted in also into the object.  The
534                          * array "marray" returned contains an array of
535                          * vm_page_t structs where one of them is the
536                          * vm_page_t passed to the routine.  The reqpage
537                          * return value is the index into the marray for the
538                          * vm_page_t passed to the routine.
539                          *
540                          * fs.m plus the additional pages are VPO_BUSY'd.
541                          *
542                          * XXX vm_fault_additional_pages() can block
543                          * without releasing the map lock.
544                          */
545                         faultcount = vm_fault_additional_pages(
546                             fs.m, behind, ahead, marray, &reqpage);
547
548                         /*
549                          * update lastr imperfectly (we do not know how much
550                          * getpages will actually read), but good enough.
551                          *
552                          * XXX The following assignment modifies the map
553                          * without holding a write lock on it.
554                          */
555                         fs.entry->lastr = fs.pindex + faultcount - behind;
556
557                         /*
558                          * Call the pager to retrieve the data, if any, after
559                          * releasing the lock on the map.  We hold a ref on
560                          * fs.object and the pages are VPO_BUSY'd.
561                          */
562                         unlock_map(&fs);
563
564                         rv = faultcount ?
565                             vm_pager_get_pages(fs.object, marray, faultcount,
566                                 reqpage) : VM_PAGER_FAIL;
567
568                         if (rv == VM_PAGER_OK) {
569                                 /*
570                                  * Found the page. Leave it busy while we play
571                                  * with it.
572                                  */
573
574                                 /*
575                                  * Relookup in case pager changed page. Pager
576                                  * is responsible for disposition of old page
577                                  * if moved.
578                                  */
579                                 fs.m = vm_page_lookup(fs.object, fs.pindex);
580                                 if (!fs.m) {
581                                         unlock_and_deallocate(&fs);
582                                         goto RetryFault;
583                                 }
584
585                                 hardfault++;
586                                 break; /* break to PAGE HAS BEEN FOUND */
587                         }
588                         /*
589                          * Remove the bogus page (which does not exist at this
590                          * object/offset); before doing so, we must get back
591                          * our object lock to preserve our invariant.
592                          *
593                          * Also wake up any other process that may want to bring
594                          * in this page.
595                          *
596                          * If this is the top-level object, we must leave the
597                          * busy page to prevent another process from rushing
598                          * past us, and inserting the page in that object at
599                          * the same time that we are.
600                          */
601                         if (rv == VM_PAGER_ERROR)
602                                 printf("vm_fault: pager read error, pid %d (%s)\n",
603                                     curproc->p_pid, curproc->p_comm);
604                         /*
605                          * Data outside the range of the pager or an I/O error
606                          */
607                         /*
608                          * XXX - the check for kernel_map is a kludge to work
609                          * around having the machine panic on a kernel space
610                          * fault w/ I/O error.
611                          */
612                         if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) ||
613                                 (rv == VM_PAGER_BAD)) {
614                                 vm_page_lock_queues();
615                                 vm_page_free(fs.m);
616                                 vm_page_unlock_queues();
617                                 fs.m = NULL;
618                                 unlock_and_deallocate(&fs);
619                                 return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE);
620                         }
621                         if (fs.object != fs.first_object) {
622                                 vm_page_lock_queues();
623                                 vm_page_free(fs.m);
624                                 vm_page_unlock_queues();
625                                 fs.m = NULL;
626                                 /*
627                                  * XXX - we cannot just fall out at this
628                                  * point, m has been freed and is invalid!
629                                  */
630                         }
631                 }
632
633                 /*
634                  * We get here if the object has default pager (or unwiring) 
635                  * or the pager doesn't have the page.
636                  */
637                 if (fs.object == fs.first_object)
638                         fs.first_m = fs.m;
639
640                 /*
641                  * Move on to the next object.  Lock the next object before
642                  * unlocking the current one.
643                  */
644                 fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset);
645                 next_object = fs.object->backing_object;
646                 if (next_object == NULL) {
647                         /*
648                          * If there's no object left, fill the page in the top
649                          * object with zeros.
650                          */
651                         if (fs.object != fs.first_object) {
652                                 vm_object_pip_wakeup(fs.object);
653                                 VM_OBJECT_UNLOCK(fs.object);
654
655                                 fs.object = fs.first_object;
656                                 fs.pindex = fs.first_pindex;
657                                 fs.m = fs.first_m;
658                                 VM_OBJECT_LOCK(fs.object);
659                         }
660                         fs.first_m = NULL;
661
662                         /*
663                          * Zero the page if necessary and mark it valid.
664                          */
665                         if ((fs.m->flags & PG_ZERO) == 0) {
666                                 pmap_zero_page(fs.m);
667                         } else {
668                                 PCPU_INC(cnt.v_ozfod);
669                         }
670                         PCPU_INC(cnt.v_zfod);
671                         fs.m->valid = VM_PAGE_BITS_ALL;
672                         break;  /* break to PAGE HAS BEEN FOUND */
673                 } else {
674                         KASSERT(fs.object != next_object,
675                             ("object loop %p", next_object));
676                         VM_OBJECT_LOCK(next_object);
677                         vm_object_pip_add(next_object, 1);
678                         if (fs.object != fs.first_object)
679                                 vm_object_pip_wakeup(fs.object);
680                         VM_OBJECT_UNLOCK(fs.object);
681                         fs.object = next_object;
682                 }
683         }
684
685         KASSERT((fs.m->oflags & VPO_BUSY) != 0,
686             ("vm_fault: not busy after main loop"));
687
688         /*
689          * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
690          * is held.]
691          */
692
693         /*
694          * If the page is being written, but isn't already owned by the
695          * top-level object, we have to copy it into a new page owned by the
696          * top-level object.
697          */
698         if (fs.object != fs.first_object) {
699                 /*
700                  * We only really need to copy if we want to write it.
701                  */
702                 if (fault_type & VM_PROT_WRITE) {
703                         /*
704                          * This allows pages to be virtually copied from a 
705                          * backing_object into the first_object, where the 
706                          * backing object has no other refs to it, and cannot
707                          * gain any more refs.  Instead of a bcopy, we just 
708                          * move the page from the backing object to the 
709                          * first object.  Note that we must mark the page 
710                          * dirty in the first object so that it will go out 
711                          * to swap when needed.
712                          */
713                         is_first_object_locked = FALSE;
714                         if (
715                                 /*
716                                  * Only one shadow object
717                                  */
718                                 (fs.object->shadow_count == 1) &&
719                                 /*
720                                  * No COW refs, except us
721                                  */
722                                 (fs.object->ref_count == 1) &&
723                                 /*
724                                  * No one else can look this object up
725                                  */
726                                 (fs.object->handle == NULL) &&
727                                 /*
728                                  * No other ways to look the object up
729                                  */
730                                 ((fs.object->type == OBJT_DEFAULT) ||
731                                  (fs.object->type == OBJT_SWAP)) &&
732                             (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) &&
733                                 /*
734                                  * We don't chase down the shadow chain
735                                  */
736                             fs.object == fs.first_object->backing_object) {
737                                 vm_page_lock_queues();
738                                 /*
739                                  * get rid of the unnecessary page
740                                  */
741                                 vm_page_free(fs.first_m);
742                                 /*
743                                  * grab the page and put it into the 
744                                  * process'es object.  The page is 
745                                  * automatically made dirty.
746                                  */
747                                 vm_page_rename(fs.m, fs.first_object, fs.first_pindex);
748                                 vm_page_unlock_queues();
749                                 vm_page_busy(fs.m);
750                                 fs.first_m = fs.m;
751                                 fs.m = NULL;
752                                 PCPU_INC(cnt.v_cow_optim);
753                         } else {
754                                 /*
755                                  * Oh, well, lets copy it.
756                                  */
757                                 pmap_copy_page(fs.m, fs.first_m);
758                                 fs.first_m->valid = VM_PAGE_BITS_ALL;
759                         }
760                         if (fs.m) {
761                                 /*
762                                  * We no longer need the old page or object.
763                                  */
764                                 release_page(&fs);
765                         }
766                         /*
767                          * fs.object != fs.first_object due to above 
768                          * conditional
769                          */
770                         vm_object_pip_wakeup(fs.object);
771                         VM_OBJECT_UNLOCK(fs.object);
772                         /*
773                          * Only use the new page below...
774                          */
775                         fs.object = fs.first_object;
776                         fs.pindex = fs.first_pindex;
777                         fs.m = fs.first_m;
778                         if (!is_first_object_locked)
779                                 VM_OBJECT_LOCK(fs.object);
780                         PCPU_INC(cnt.v_cow_faults);
781                 } else {
782                         prot &= ~VM_PROT_WRITE;
783                 }
784         }
785
786         /*
787          * We must verify that the maps have not changed since our last
788          * lookup.
789          */
790         if (!fs.lookup_still_valid) {
791                 vm_object_t retry_object;
792                 vm_pindex_t retry_pindex;
793                 vm_prot_t retry_prot;
794
795                 if (!vm_map_trylock_read(fs.map)) {
796                         release_page(&fs);
797                         unlock_and_deallocate(&fs);
798                         goto RetryFault;
799                 }
800                 fs.lookup_still_valid = TRUE;
801                 if (fs.map->timestamp != map_generation) {
802                         result = vm_map_lookup_locked(&fs.map, vaddr, fault_type,
803                             &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired);
804
805                         /*
806                          * If we don't need the page any longer, put it on the inactive
807                          * list (the easiest thing to do here).  If no one needs it,
808                          * pageout will grab it eventually.
809                          */
810                         if (result != KERN_SUCCESS) {
811                                 release_page(&fs);
812                                 unlock_and_deallocate(&fs);
813
814                                 /*
815                                  * If retry of map lookup would have blocked then
816                                  * retry fault from start.
817                                  */
818                                 if (result == KERN_FAILURE)
819                                         goto RetryFault;
820                                 return (result);
821                         }
822                         if ((retry_object != fs.first_object) ||
823                             (retry_pindex != fs.first_pindex)) {
824                                 release_page(&fs);
825                                 unlock_and_deallocate(&fs);
826                                 goto RetryFault;
827                         }
828
829                         /*
830                          * Check whether the protection has changed or the object has
831                          * been copied while we left the map unlocked. Changing from
832                          * read to write permission is OK - we leave the page
833                          * write-protected, and catch the write fault. Changing from
834                          * write to read permission means that we can't mark the page
835                          * write-enabled after all.
836                          */
837                         prot &= retry_prot;
838                 }
839         }
840         if (prot & VM_PROT_WRITE) {
841                 vm_object_set_writeable_dirty(fs.object);
842
843                 /*
844                  * If the fault is a write, we know that this page is being
845                  * written NOW so dirty it explicitly to save on 
846                  * pmap_is_modified() calls later.
847                  *
848                  * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC
849                  * if the page is already dirty to prevent data written with
850                  * the expectation of being synced from not being synced.
851                  * Likewise if this entry does not request NOSYNC then make
852                  * sure the page isn't marked NOSYNC.  Applications sharing
853                  * data should use the same flags to avoid ping ponging.
854                  *
855                  * Also tell the backing pager, if any, that it should remove
856                  * any swap backing since the page is now dirty.
857                  */
858                 if (fs.entry->eflags & MAP_ENTRY_NOSYNC) {
859                         if (fs.m->dirty == 0)
860                                 fs.m->oflags |= VPO_NOSYNC;
861                 } else {
862                         fs.m->oflags &= ~VPO_NOSYNC;
863                 }
864                 if (fault_flags & VM_FAULT_DIRTY) {
865                         vm_page_dirty(fs.m);
866                         vm_pager_page_unswapped(fs.m);
867                 }
868         }
869
870         /*
871          * Page had better still be busy
872          */
873         KASSERT(fs.m->oflags & VPO_BUSY,
874                 ("vm_fault: page %p not busy!", fs.m));
875         /*
876          * Sanity check: page must be completely valid or it is not fit to
877          * map into user space.  vm_pager_get_pages() ensures this.
878          */
879         if (fs.m->valid != VM_PAGE_BITS_ALL) {
880                 vm_page_zero_invalid(fs.m, TRUE);
881                 printf("Warning: page %p partially invalid on fault\n", fs.m);
882         }
883         VM_OBJECT_UNLOCK(fs.object);
884
885         /*
886          * Put this page into the physical map.  We had to do the unlock above
887          * because pmap_enter() may sleep.  We don't put the page
888          * back on the active queue until later so that the pageout daemon
889          * won't find it (yet).
890          */
891         pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
892         if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
893                 vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
894         }
895         VM_OBJECT_LOCK(fs.object);
896         vm_page_lock_queues();
897         vm_page_flag_set(fs.m, PG_REFERENCED);
898
899         /*
900          * If the page is not wired down, then put it where the pageout daemon
901          * can find it.
902          */
903         if (fault_flags & VM_FAULT_WIRE_MASK) {
904                 if (wired)
905                         vm_page_wire(fs.m);
906                 else
907                         vm_page_unwire(fs.m, 1);
908         } else {
909                 vm_page_activate(fs.m);
910         }
911         vm_page_unlock_queues();
912         vm_page_wakeup(fs.m);
913
914         /*
915          * Unlock everything, and return
916          */
917         unlock_and_deallocate(&fs);
918         if (hardfault)
919                 curthread->td_ru.ru_majflt++;
920         else
921                 curthread->td_ru.ru_minflt++;
922
923         return (KERN_SUCCESS);
924 }
925
926 /*
927  * vm_fault_prefault provides a quick way of clustering
928  * pagefaults into a processes address space.  It is a "cousin"
929  * of vm_map_pmap_enter, except it runs at page fault time instead
930  * of mmap time.
931  */
932 static void
933 vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
934 {
935         int i;
936         vm_offset_t addr, starta;
937         vm_pindex_t pindex;
938         vm_page_t m;
939         vm_object_t object;
940
941         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
942                 return;
943
944         object = entry->object.vm_object;
945
946         starta = addra - PFBAK * PAGE_SIZE;
947         if (starta < entry->start) {
948                 starta = entry->start;
949         } else if (starta > addra) {
950                 starta = 0;
951         }
952
953         for (i = 0; i < PAGEORDER_SIZE; i++) {
954                 vm_object_t backing_object, lobject;
955
956                 addr = addra + prefault_pageorder[i];
957                 if (addr > addra + (PFFOR * PAGE_SIZE))
958                         addr = 0;
959
960                 if (addr < starta || addr >= entry->end)
961                         continue;
962
963                 if (!pmap_is_prefaultable(pmap, addr))
964                         continue;
965
966                 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
967                 lobject = object;
968                 VM_OBJECT_LOCK(lobject);
969                 while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
970                     lobject->type == OBJT_DEFAULT &&
971                     (backing_object = lobject->backing_object) != NULL) {
972                         if (lobject->backing_object_offset & PAGE_MASK)
973                                 break;
974                         pindex += lobject->backing_object_offset >> PAGE_SHIFT;
975                         VM_OBJECT_LOCK(backing_object);
976                         VM_OBJECT_UNLOCK(lobject);
977                         lobject = backing_object;
978                 }
979                 /*
980                  * give-up when a page is not in memory
981                  */
982                 if (m == NULL) {
983                         VM_OBJECT_UNLOCK(lobject);
984                         break;
985                 }
986                 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
987                         (m->busy == 0) &&
988                     (m->flags & PG_FICTITIOUS) == 0) {
989
990                         vm_page_lock_queues();
991                         pmap_enter_quick(pmap, addr, m, entry->protection);
992                         vm_page_unlock_queues();
993                 }
994                 VM_OBJECT_UNLOCK(lobject);
995         }
996 }
997
998 /*
999  *      vm_fault_quick:
1000  *
1001  *      Ensure that the requested virtual address, which may be in userland,
1002  *      is valid.  Fault-in the page if necessary.  Return -1 on failure.
1003  */
1004 int
1005 vm_fault_quick(caddr_t v, int prot)
1006 {
1007         int r;
1008
1009         if (prot & VM_PROT_WRITE)
1010                 r = subyte(v, fubyte(v));
1011         else
1012                 r = fubyte(v);
1013         return(r);
1014 }
1015
1016 /*
1017  *      vm_fault_wire:
1018  *
1019  *      Wire down a range of virtual addresses in a map.
1020  */
1021 int
1022 vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1023     boolean_t user_wire, boolean_t fictitious)
1024 {
1025         vm_offset_t va;
1026         int rv;
1027
1028         /*
1029          * We simulate a fault to get the page and enter it in the physical
1030          * map.  For user wiring, we only ask for read access on currently
1031          * read-only sections.
1032          */
1033         for (va = start; va < end; va += PAGE_SIZE) {
1034                 rv = vm_fault(map, va,
1035                     user_wire ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
1036                     user_wire ? VM_FAULT_USER_WIRE : VM_FAULT_CHANGE_WIRING);
1037                 if (rv) {
1038                         if (va != start)
1039                                 vm_fault_unwire(map, start, va, fictitious);
1040                         return (rv);
1041                 }
1042         }
1043         return (KERN_SUCCESS);
1044 }
1045
1046 /*
1047  *      vm_fault_unwire:
1048  *
1049  *      Unwire a range of virtual addresses in a map.
1050  */
1051 void
1052 vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1053     boolean_t fictitious)
1054 {
1055         vm_paddr_t pa;
1056         vm_offset_t va;
1057         pmap_t pmap;
1058
1059         pmap = vm_map_pmap(map);
1060
1061         /*
1062          * Since the pages are wired down, we must be able to get their
1063          * mappings from the physical map system.
1064          */
1065         for (va = start; va < end; va += PAGE_SIZE) {
1066                 pa = pmap_extract(pmap, va);
1067                 if (pa != 0) {
1068                         pmap_change_wiring(pmap, va, FALSE);
1069                         if (!fictitious) {
1070                                 vm_page_lock_queues();
1071                                 vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1072                                 vm_page_unlock_queues();
1073                         }
1074                 }
1075         }
1076 }
1077
1078 /*
1079  *      Routine:
1080  *              vm_fault_copy_entry
1081  *      Function:
1082  *              Copy all of the pages from a wired-down map entry to another.
1083  *
1084  *      In/out conditions:
1085  *              The source and destination maps must be locked for write.
1086  *              The source map entry must be wired down (or be a sharing map
1087  *              entry corresponding to a main map entry that is wired down).
1088  */
1089 void
1090 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
1091         vm_map_t dst_map;
1092         vm_map_t src_map;
1093         vm_map_entry_t dst_entry;
1094         vm_map_entry_t src_entry;
1095 {
1096         vm_object_t backing_object, dst_object, object;
1097         vm_object_t src_object;
1098         vm_ooffset_t dst_offset;
1099         vm_ooffset_t src_offset;
1100         vm_pindex_t pindex;
1101         vm_prot_t prot;
1102         vm_offset_t vaddr;
1103         vm_page_t dst_m;
1104         vm_page_t src_m;
1105
1106 #ifdef  lint
1107         src_map++;
1108 #endif  /* lint */
1109
1110         src_object = src_entry->object.vm_object;
1111         src_offset = src_entry->offset;
1112
1113         /*
1114          * Create the top-level object for the destination entry. (Doesn't
1115          * actually shadow anything - we copy the pages directly.)
1116          */
1117         dst_object = vm_object_allocate(OBJT_DEFAULT,
1118             OFF_TO_IDX(dst_entry->end - dst_entry->start));
1119 #if VM_NRESERVLEVEL > 0
1120         dst_object->flags |= OBJ_COLORED;
1121         dst_object->pg_color = atop(dst_entry->start);
1122 #endif
1123
1124         VM_OBJECT_LOCK(dst_object);
1125         dst_entry->object.vm_object = dst_object;
1126         dst_entry->offset = 0;
1127
1128         prot = dst_entry->max_protection;
1129
1130         /*
1131          * Loop through all of the pages in the entry's range, copying each
1132          * one from the source object (it should be there) to the destination
1133          * object.
1134          */
1135         for (vaddr = dst_entry->start, dst_offset = 0;
1136             vaddr < dst_entry->end;
1137             vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1138
1139                 /*
1140                  * Allocate a page in the destination object
1141                  */
1142                 do {
1143                         dst_m = vm_page_alloc(dst_object,
1144                                 OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
1145                         if (dst_m == NULL) {
1146                                 VM_OBJECT_UNLOCK(dst_object);
1147                                 VM_WAIT;
1148                                 VM_OBJECT_LOCK(dst_object);
1149                         }
1150                 } while (dst_m == NULL);
1151
1152                 /*
1153                  * Find the page in the source object, and copy it in.
1154                  * (Because the source is wired down, the page will be in
1155                  * memory.)
1156                  */
1157                 VM_OBJECT_LOCK(src_object);
1158                 object = src_object;
1159                 pindex = 0;
1160                 while ((src_m = vm_page_lookup(object, pindex +
1161                     OFF_TO_IDX(dst_offset + src_offset))) == NULL &&
1162                     (src_entry->protection & VM_PROT_WRITE) == 0 &&
1163                     (backing_object = object->backing_object) != NULL) {
1164                         /*
1165                          * Allow fallback to backing objects if we are reading.
1166                          */
1167                         VM_OBJECT_LOCK(backing_object);
1168                         pindex += OFF_TO_IDX(object->backing_object_offset);
1169                         VM_OBJECT_UNLOCK(object);
1170                         object = backing_object;
1171                 }
1172                 if (src_m == NULL)
1173                         panic("vm_fault_copy_wired: page missing");
1174                 pmap_copy_page(src_m, dst_m);
1175                 VM_OBJECT_UNLOCK(object);
1176                 dst_m->valid = VM_PAGE_BITS_ALL;
1177                 VM_OBJECT_UNLOCK(dst_object);
1178
1179                 /*
1180                  * Enter it in the pmap as a read and/or execute access.
1181                  */
1182                 pmap_enter(dst_map->pmap, vaddr, prot & ~VM_PROT_WRITE, dst_m,
1183                     prot, FALSE);
1184
1185                 /*
1186                  * Mark it no longer busy, and put it on the active list.
1187                  */
1188                 VM_OBJECT_LOCK(dst_object);
1189                 vm_page_lock_queues();
1190                 vm_page_activate(dst_m);
1191                 vm_page_unlock_queues();
1192                 vm_page_wakeup(dst_m);
1193         }
1194         VM_OBJECT_UNLOCK(dst_object);
1195 }
1196
1197
1198 /*
1199  * This routine checks around the requested page for other pages that
1200  * might be able to be faulted in.  This routine brackets the viable
1201  * pages for the pages to be paged in.
1202  *
1203  * Inputs:
1204  *      m, rbehind, rahead
1205  *
1206  * Outputs:
1207  *  marray (array of vm_page_t), reqpage (index of requested page)
1208  *
1209  * Return value:
1210  *  number of pages in marray
1211  *
1212  * This routine can't block.
1213  */
1214 static int
1215 vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
1216         vm_page_t m;
1217         int rbehind;
1218         int rahead;
1219         vm_page_t *marray;
1220         int *reqpage;
1221 {
1222         int i,j;
1223         vm_object_t object;
1224         vm_pindex_t pindex, startpindex, endpindex, tpindex;
1225         vm_page_t rtm;
1226         int cbehind, cahead;
1227
1228         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1229
1230         object = m->object;
1231         pindex = m->pindex;
1232         cbehind = cahead = 0;
1233
1234         /*
1235          * if the requested page is not available, then give up now
1236          */
1237         if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1238                 return 0;
1239         }
1240
1241         if ((cbehind == 0) && (cahead == 0)) {
1242                 *reqpage = 0;
1243                 marray[0] = m;
1244                 return 1;
1245         }
1246
1247         if (rahead > cahead) {
1248                 rahead = cahead;
1249         }
1250
1251         if (rbehind > cbehind) {
1252                 rbehind = cbehind;
1253         }
1254
1255         /*
1256          * scan backward for the read behind pages -- in memory 
1257          */
1258         if (pindex > 0) {
1259                 if (rbehind > pindex) {
1260                         rbehind = pindex;
1261                         startpindex = 0;
1262                 } else {
1263                         startpindex = pindex - rbehind;
1264                 }
1265
1266                 if ((rtm = TAILQ_PREV(m, pglist, listq)) != NULL &&
1267                     rtm->pindex >= startpindex)
1268                         startpindex = rtm->pindex + 1;
1269
1270                 /* tpindex is unsigned; beware of numeric underflow. */
1271                 for (i = 0, tpindex = pindex - 1; tpindex >= startpindex &&
1272                     tpindex < pindex; i++, tpindex--) {
1273
1274                         rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
1275                             VM_ALLOC_IFNOTCACHED);
1276                         if (rtm == NULL) {
1277                                 /*
1278                                  * Shift the allocated pages to the
1279                                  * beginning of the array.
1280                                  */
1281                                 for (j = 0; j < i; j++) {
1282                                         marray[j] = marray[j + tpindex + 1 -
1283                                             startpindex];
1284                                 }
1285                                 break;
1286                         }
1287
1288                         marray[tpindex - startpindex] = rtm;
1289                 }
1290         } else {
1291                 startpindex = 0;
1292                 i = 0;
1293         }
1294
1295         marray[i] = m;
1296         /* page offset of the required page */
1297         *reqpage = i;
1298
1299         tpindex = pindex + 1;
1300         i++;
1301
1302         /*
1303          * scan forward for the read ahead pages
1304          */
1305         endpindex = tpindex + rahead;
1306         if ((rtm = TAILQ_NEXT(m, listq)) != NULL && rtm->pindex < endpindex)
1307                 endpindex = rtm->pindex;
1308         if (endpindex > object->size)
1309                 endpindex = object->size;
1310
1311         for (; tpindex < endpindex; i++, tpindex++) {
1312
1313                 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
1314                     VM_ALLOC_IFNOTCACHED);
1315                 if (rtm == NULL) {
1316                         break;
1317                 }
1318
1319                 marray[i] = rtm;
1320         }
1321
1322         /* return number of pages */
1323         return i;
1324 }