]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_fault.c
This commit was generated by cvs2svn to compensate for changes in r159063,
[FreeBSD/FreeBSD.git] / sys / vm / vm_fault.c
1 /*-
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *      This product includes software developed by the University of
24  *      California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *      from: @(#)vm_fault.c    8.4 (Berkeley) 1/12/94
42  *
43  *
44  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
45  * All rights reserved.
46  *
47  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
48  *
49  * Permission to use, copy, modify and distribute this software and
50  * its documentation is hereby granted, provided that both the copyright
51  * notice and this permission notice appear in all copies of the
52  * software, derivative works or modified versions, and any portions
53  * thereof, and that both notices appear in supporting documentation.
54  *
55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58  *
59  * Carnegie Mellon requests users of this software to return to
60  *
61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62  *  School of Computer Science
63  *  Carnegie Mellon University
64  *  Pittsburgh PA 15213-3890
65  *
66  * any improvements or extensions that they make and grant Carnegie the
67  * rights to redistribute these changes.
68  */
69
70 /*
71  *      Page fault handling module.
72  */
73
74 #include <sys/cdefs.h>
75 __FBSDID("$FreeBSD$");
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/lock.h>
81 #include <sys/mutex.h>
82 #include <sys/proc.h>
83 #include <sys/resourcevar.h>
84 #include <sys/sysctl.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/vm_extern.h>
99
100 #include <sys/mount.h>  /* XXX Temporary for VFS_LOCK_GIANT() */
101
102 #define PFBAK 4
103 #define PFFOR 4
104 #define PAGEORDER_SIZE (PFBAK+PFFOR)
105
106 static int prefault_pageorder[] = {
107         -1 * PAGE_SIZE, 1 * PAGE_SIZE,
108         -2 * PAGE_SIZE, 2 * PAGE_SIZE,
109         -3 * PAGE_SIZE, 3 * PAGE_SIZE,
110         -4 * PAGE_SIZE, 4 * PAGE_SIZE
111 };
112
113 static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *);
114 static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
115
116 #define VM_FAULT_READ_AHEAD 8
117 #define VM_FAULT_READ_BEHIND 7
118 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
119
120 struct faultstate {
121         vm_page_t m;
122         vm_object_t object;
123         vm_pindex_t pindex;
124         vm_page_t first_m;
125         vm_object_t     first_object;
126         vm_pindex_t first_pindex;
127         vm_map_t map;
128         vm_map_entry_t entry;
129         int lookup_still_valid;
130         struct vnode *vp;
131 };
132
133 static inline void
134 release_page(struct faultstate *fs)
135 {
136         vm_page_lock_queues();
137         vm_page_wakeup(fs->m);
138         vm_page_deactivate(fs->m);
139         vm_page_unlock_queues();
140         fs->m = NULL;
141 }
142
143 static inline void
144 unlock_map(struct faultstate *fs)
145 {
146         if (fs->lookup_still_valid) {
147                 vm_map_lookup_done(fs->map, fs->entry);
148                 fs->lookup_still_valid = FALSE;
149         }
150 }
151
152 static void
153 unlock_and_deallocate(struct faultstate *fs)
154 {
155         boolean_t firstobjneedgiant;
156
157         vm_object_pip_wakeup(fs->object);
158         VM_OBJECT_UNLOCK(fs->object);
159         if (fs->object != fs->first_object) {
160                 VM_OBJECT_LOCK(fs->first_object);
161                 vm_page_lock_queues();
162                 vm_page_free(fs->first_m);
163                 vm_page_unlock_queues();
164                 vm_object_pip_wakeup(fs->first_object);
165                 VM_OBJECT_UNLOCK(fs->first_object);
166                 fs->first_m = NULL;
167         }
168         firstobjneedgiant = (fs->first_object->flags & OBJ_NEEDGIANT) != 0;
169         vm_object_deallocate(fs->first_object);
170         unlock_map(fs); 
171         if (fs->vp != NULL) { 
172                 int vfslocked;
173
174                 vfslocked = VFS_LOCK_GIANT(fs->vp->v_mount);
175                 vput(fs->vp);
176                 fs->vp = NULL;
177                 VFS_UNLOCK_GIANT(vfslocked);
178         }
179         if (firstobjneedgiant)
180                 VM_UNLOCK_GIANT();
181 }
182
183 /*
184  * TRYPAGER - used by vm_fault to calculate whether the pager for the
185  *            current object *might* contain the page.
186  *
187  *            default objects are zero-fill, there is no real pager.
188  */
189 #define TRYPAGER        (fs.object->type != OBJT_DEFAULT && \
190                         (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
191
192 /*
193  *      vm_fault:
194  *
195  *      Handle a page fault occurring at the given address,
196  *      requiring the given permissions, in the map specified.
197  *      If successful, the page is inserted into the
198  *      associated physical map.
199  *
200  *      NOTE: the given address should be truncated to the
201  *      proper page address.
202  *
203  *      KERN_SUCCESS is returned if the page fault is handled; otherwise,
204  *      a standard error specifying why the fault is fatal is returned.
205  *
206  *
207  *      The map in question must be referenced, and remains so.
208  *      Caller may hold no locks.
209  */
210 int
211 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
212          int fault_flags)
213 {
214         vm_prot_t prot;
215         int is_first_object_locked, result;
216         boolean_t growstack, wired;
217         int map_generation;
218         vm_object_t next_object;
219         vm_page_t marray[VM_FAULT_READ];
220         int hardfault;
221         int faultcount;
222         struct faultstate fs;
223
224         hardfault = 0;
225         growstack = TRUE;
226         atomic_add_int(&cnt.v_vm_faults, 1);
227
228 RetryFault:;
229
230         /*
231          * Find the backing store object and offset into it to begin the
232          * search.
233          */
234         fs.map = map;
235         result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry,
236             &fs.first_object, &fs.first_pindex, &prot, &wired);
237         if (result != KERN_SUCCESS) {
238                 if (result != KERN_PROTECTION_FAILURE ||
239                     (fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) {
240                         if (growstack && result == KERN_INVALID_ADDRESS &&
241                             map != kernel_map && curproc != NULL) {
242                                 result = vm_map_growstack(curproc, vaddr);
243                                 if (result != KERN_SUCCESS)
244                                         return (KERN_FAILURE);
245                                 growstack = FALSE;
246                                 goto RetryFault;
247                         }
248                         return (result);
249                 }
250
251                 /*
252                  * If we are user-wiring a r/w segment, and it is COW, then
253                  * we need to do the COW operation.  Note that we don't COW
254                  * currently RO sections now, because it is NOT desirable
255                  * to COW .text.  We simply keep .text from ever being COW'ed
256                  * and take the heat that one cannot debug wired .text sections.
257                  */
258                 result = vm_map_lookup(&fs.map, vaddr,
259                         VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE,
260                         &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired);
261                 if (result != KERN_SUCCESS)
262                         return (result);
263
264                 /*
265                  * If we don't COW now, on a user wire, the user will never
266                  * be able to write to the mapping.  If we don't make this
267                  * restriction, the bookkeeping would be nearly impossible.
268                  *
269                  * XXX The following assignment modifies the map without
270                  * holding a write lock on it.
271                  */
272                 if ((fs.entry->protection & VM_PROT_WRITE) == 0)
273                         fs.entry->max_protection &= ~VM_PROT_WRITE;
274         }
275
276         map_generation = fs.map->timestamp;
277
278         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
279                 panic("vm_fault: fault on nofault entry, addr: %lx",
280                     (u_long)vaddr);
281         }
282
283         /*
284          * Make a reference to this object to prevent its disposal while we
285          * are messing with it.  Once we have the reference, the map is free
286          * to be diddled.  Since objects reference their shadows (and copies),
287          * they will stay around as well.
288          *
289          * Bump the paging-in-progress count to prevent size changes (e.g. 
290          * truncation operations) during I/O.  This must be done after
291          * obtaining the vnode lock in order to avoid possible deadlocks.
292          *
293          * XXX vnode_pager_lock() can block without releasing the map lock.
294          */
295         if (fs.first_object->flags & OBJ_NEEDGIANT)
296                 mtx_lock(&Giant);
297         VM_OBJECT_LOCK(fs.first_object);
298         vm_object_reference_locked(fs.first_object);
299         fs.vp = vnode_pager_lock(fs.first_object);
300         KASSERT(fs.vp == NULL || !fs.map->system_map,
301             ("vm_fault: vnode-backed object mapped by system map"));
302         KASSERT((fs.first_object->flags & OBJ_NEEDGIANT) == 0 ||
303             !fs.map->system_map,
304             ("vm_fault: Object requiring giant mapped by system map"));
305         if (fs.first_object->flags & OBJ_NEEDGIANT && debug_mpsafevm)
306                 mtx_unlock(&Giant);
307         vm_object_pip_add(fs.first_object, 1);
308
309         fs.lookup_still_valid = TRUE;
310
311         if (wired)
312                 fault_type = prot;
313
314         fs.first_m = NULL;
315
316         /*
317          * Search for the page at object/offset.
318          */
319         fs.object = fs.first_object;
320         fs.pindex = fs.first_pindex;
321         while (TRUE) {
322                 /*
323                  * If the object is dead, we stop here
324                  */
325                 if (fs.object->flags & OBJ_DEAD) {
326                         unlock_and_deallocate(&fs);
327                         return (KERN_PROTECTION_FAILURE);
328                 }
329
330                 /*
331                  * See if page is resident
332                  */
333                 fs.m = vm_page_lookup(fs.object, fs.pindex);
334                 if (fs.m != NULL) {
335                         int queue;
336
337                         /* 
338                          * check for page-based copy on write.
339                          * We check fs.object == fs.first_object so
340                          * as to ensure the legacy COW mechanism is
341                          * used when the page in question is part of
342                          * a shadow object.  Otherwise, vm_page_cowfault()
343                          * removes the page from the backing object, 
344                          * which is not what we want.
345                          */
346                         vm_page_lock_queues();
347                         if ((fs.m->cow) && 
348                             (fault_type & VM_PROT_WRITE) &&
349                             (fs.object == fs.first_object)) {
350                                 vm_page_cowfault(fs.m);
351                                 vm_page_unlock_queues();
352                                 unlock_and_deallocate(&fs);
353                                 goto RetryFault;
354                         }
355
356                         /*
357                          * Wait/Retry if the page is busy.  We have to do this
358                          * if the page is busy via either PG_BUSY or 
359                          * vm_page_t->busy because the vm_pager may be using
360                          * vm_page_t->busy for pageouts ( and even pageins if
361                          * it is the vnode pager ), and we could end up trying
362                          * to pagein and pageout the same page simultaneously.
363                          *
364                          * We can theoretically allow the busy case on a read
365                          * fault if the page is marked valid, but since such
366                          * pages are typically already pmap'd, putting that
367                          * special case in might be more effort then it is 
368                          * worth.  We cannot under any circumstances mess
369                          * around with a vm_page_t->busy page except, perhaps,
370                          * to pmap it.
371                          */
372                         if ((fs.m->flags & PG_BUSY) || fs.m->busy) {
373                                 vm_page_unlock_queues();
374                                 VM_OBJECT_UNLOCK(fs.object);
375                                 if (fs.object != fs.first_object) {
376                                         VM_OBJECT_LOCK(fs.first_object);
377                                         vm_page_lock_queues();
378                                         vm_page_free(fs.first_m);
379                                         vm_page_unlock_queues();
380                                         vm_object_pip_wakeup(fs.first_object);
381                                         VM_OBJECT_UNLOCK(fs.first_object);
382                                         fs.first_m = NULL;
383                                 }
384                                 unlock_map(&fs);
385                                 if (fs.vp != NULL) {
386                                         int vfslck;
387
388                                         vfslck = VFS_LOCK_GIANT(fs.vp->v_mount);
389                                         vput(fs.vp);
390                                         fs.vp = NULL;
391                                         VFS_UNLOCK_GIANT(vfslck);
392                                 }
393                                 VM_OBJECT_LOCK(fs.object);
394                                 if (fs.m == vm_page_lookup(fs.object,
395                                     fs.pindex)) {
396                                         vm_page_lock_queues();
397                                         if (!vm_page_sleep_if_busy(fs.m, TRUE,
398                                             "vmpfw"))
399                                                 vm_page_unlock_queues();
400                                 }
401                                 vm_object_pip_wakeup(fs.object);
402                                 VM_OBJECT_UNLOCK(fs.object);
403                                 atomic_add_int(&cnt.v_intrans, 1);
404                                 if (fs.first_object->flags & OBJ_NEEDGIANT)
405                                         VM_UNLOCK_GIANT();
406                                 vm_object_deallocate(fs.first_object);
407                                 goto RetryFault;
408                         }
409                         queue = fs.m->queue;
410
411                         vm_pageq_remove_nowakeup(fs.m);
412
413                         if (VM_PAGE_RESOLVEQUEUE(fs.m, queue) == PQ_CACHE &&
414                             vm_page_count_severe()) {
415                                 vm_page_activate(fs.m);
416                                 vm_page_unlock_queues();
417                                 unlock_and_deallocate(&fs);
418                                 VM_WAITPFAULT;
419                                 goto RetryFault;
420                         }
421
422                         /*
423                          * Mark page busy for other processes, and the 
424                          * pagedaemon.  If it still isn't completely valid
425                          * (readable), jump to readrest, else break-out ( we
426                          * found the page ).
427                          */
428                         vm_page_busy(fs.m);
429                         vm_page_unlock_queues();
430                         if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
431                                 fs.m->object != kernel_object && fs.m->object != kmem_object) {
432                                 goto readrest;
433                         }
434
435                         break;
436                 }
437
438                 /*
439                  * Page is not resident, If this is the search termination
440                  * or the pager might contain the page, allocate a new page.
441                  */
442                 if (TRYPAGER || fs.object == fs.first_object) {
443                         if (fs.pindex >= fs.object->size) {
444                                 unlock_and_deallocate(&fs);
445                                 return (KERN_PROTECTION_FAILURE);
446                         }
447
448                         /*
449                          * Allocate a new page for this object/offset pair.
450                          */
451                         fs.m = NULL;
452                         if (!vm_page_count_severe()) {
453                                 fs.m = vm_page_alloc(fs.object, fs.pindex,
454                                     (fs.vp || fs.object->backing_object)? VM_ALLOC_NORMAL: VM_ALLOC_ZERO);
455                         }
456                         if (fs.m == NULL) {
457                                 unlock_and_deallocate(&fs);
458                                 VM_WAITPFAULT;
459                                 goto RetryFault;
460                         }
461                 }
462
463 readrest:
464                 /*
465                  * We have found a valid page or we have allocated a new page.
466                  * The page thus may not be valid or may not be entirely 
467                  * valid.
468                  *
469                  * Attempt to fault-in the page if there is a chance that the
470                  * pager has it, and potentially fault in additional pages
471                  * at the same time.
472                  */
473                 if (TRYPAGER) {
474                         int rv;
475                         int reqpage;
476                         int ahead, behind;
477                         u_char behavior = vm_map_entry_behavior(fs.entry);
478
479                         if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
480                                 ahead = 0;
481                                 behind = 0;
482                         } else {
483                                 behind = (vaddr - fs.entry->start) >> PAGE_SHIFT;
484                                 if (behind > VM_FAULT_READ_BEHIND)
485                                         behind = VM_FAULT_READ_BEHIND;
486
487                                 ahead = ((fs.entry->end - vaddr) >> PAGE_SHIFT) - 1;
488                                 if (ahead > VM_FAULT_READ_AHEAD)
489                                         ahead = VM_FAULT_READ_AHEAD;
490                         }
491                         is_first_object_locked = FALSE;
492                         if ((behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
493                              (behavior != MAP_ENTRY_BEHAV_RANDOM &&
494                               fs.pindex >= fs.entry->lastr &&
495                               fs.pindex < fs.entry->lastr + VM_FAULT_READ)) &&
496                             (fs.first_object == fs.object ||
497                              (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object))) &&
498                             fs.first_object->type != OBJT_DEVICE) {
499                                 vm_pindex_t firstpindex, tmppindex;
500
501                                 if (fs.first_pindex < 2 * VM_FAULT_READ)
502                                         firstpindex = 0;
503                                 else
504                                         firstpindex = fs.first_pindex - 2 * VM_FAULT_READ;
505
506                                 vm_page_lock_queues();
507                                 /*
508                                  * note: partially valid pages cannot be 
509                                  * included in the lookahead - NFS piecemeal
510                                  * writes will barf on it badly.
511                                  */
512                                 for (tmppindex = fs.first_pindex - 1;
513                                         tmppindex >= firstpindex;
514                                         --tmppindex) {
515                                         vm_page_t mt;
516
517                                         mt = vm_page_lookup(fs.first_object, tmppindex);
518                                         if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL))
519                                                 break;
520                                         if (mt->busy ||
521                                                 (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) ||
522                                                 mt->hold_count ||
523                                                 mt->wire_count) 
524                                                 continue;
525                                         pmap_remove_all(mt);
526                                         if (mt->dirty) {
527                                                 vm_page_deactivate(mt);
528                                         } else {
529                                                 vm_page_cache(mt);
530                                         }
531                                 }
532                                 vm_page_unlock_queues();
533                                 ahead += behind;
534                                 behind = 0;
535                         }
536                         if (is_first_object_locked)
537                                 VM_OBJECT_UNLOCK(fs.first_object);
538                         /*
539                          * now we find out if any other pages should be paged
540                          * in at this time this routine checks to see if the
541                          * pages surrounding this fault reside in the same
542                          * object as the page for this fault.  If they do,
543                          * then they are faulted in also into the object.  The
544                          * array "marray" returned contains an array of
545                          * vm_page_t structs where one of them is the
546                          * vm_page_t passed to the routine.  The reqpage
547                          * return value is the index into the marray for the
548                          * vm_page_t passed to the routine.
549                          *
550                          * fs.m plus the additional pages are PG_BUSY'd.
551                          *
552                          * XXX vm_fault_additional_pages() can block
553                          * without releasing the map lock.
554                          */
555                         faultcount = vm_fault_additional_pages(
556                             fs.m, behind, ahead, marray, &reqpage);
557
558                         /*
559                          * update lastr imperfectly (we do not know how much
560                          * getpages will actually read), but good enough.
561                          *
562                          * XXX The following assignment modifies the map
563                          * without holding a write lock on it.
564                          */
565                         fs.entry->lastr = fs.pindex + faultcount - behind;
566
567                         /*
568                          * Call the pager to retrieve the data, if any, after
569                          * releasing the lock on the map.  We hold a ref on
570                          * fs.object and the pages are PG_BUSY'd.
571                          */
572                         unlock_map(&fs);
573
574                         rv = faultcount ?
575                             vm_pager_get_pages(fs.object, marray, faultcount,
576                                 reqpage) : VM_PAGER_FAIL;
577
578                         if (rv == VM_PAGER_OK) {
579                                 /*
580                                  * Found the page. Leave it busy while we play
581                                  * with it.
582                                  */
583
584                                 /*
585                                  * Relookup in case pager changed page. Pager
586                                  * is responsible for disposition of old page
587                                  * if moved.
588                                  */
589                                 fs.m = vm_page_lookup(fs.object, fs.pindex);
590                                 if (!fs.m) {
591                                         unlock_and_deallocate(&fs);
592                                         goto RetryFault;
593                                 }
594
595                                 hardfault++;
596                                 break; /* break to PAGE HAS BEEN FOUND */
597                         }
598                         /*
599                          * Remove the bogus page (which does not exist at this
600                          * object/offset); before doing so, we must get back
601                          * our object lock to preserve our invariant.
602                          *
603                          * Also wake up any other process that may want to bring
604                          * in this page.
605                          *
606                          * If this is the top-level object, we must leave the
607                          * busy page to prevent another process from rushing
608                          * past us, and inserting the page in that object at
609                          * the same time that we are.
610                          */
611                         if (rv == VM_PAGER_ERROR)
612                                 printf("vm_fault: pager read error, pid %d (%s)\n",
613                                     curproc->p_pid, curproc->p_comm);
614                         /*
615                          * Data outside the range of the pager or an I/O error
616                          */
617                         /*
618                          * XXX - the check for kernel_map is a kludge to work
619                          * around having the machine panic on a kernel space
620                          * fault w/ I/O error.
621                          */
622                         if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) ||
623                                 (rv == VM_PAGER_BAD)) {
624                                 vm_page_lock_queues();
625                                 vm_page_free(fs.m);
626                                 vm_page_unlock_queues();
627                                 fs.m = NULL;
628                                 unlock_and_deallocate(&fs);
629                                 return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE);
630                         }
631                         if (fs.object != fs.first_object) {
632                                 vm_page_lock_queues();
633                                 vm_page_free(fs.m);
634                                 vm_page_unlock_queues();
635                                 fs.m = NULL;
636                                 /*
637                                  * XXX - we cannot just fall out at this
638                                  * point, m has been freed and is invalid!
639                                  */
640                         }
641                 }
642
643                 /*
644                  * We get here if the object has default pager (or unwiring) 
645                  * or the pager doesn't have the page.
646                  */
647                 if (fs.object == fs.first_object)
648                         fs.first_m = fs.m;
649
650                 /*
651                  * Move on to the next object.  Lock the next object before
652                  * unlocking the current one.
653                  */
654                 fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset);
655                 next_object = fs.object->backing_object;
656                 if (next_object == NULL) {
657                         /*
658                          * If there's no object left, fill the page in the top
659                          * object with zeros.
660                          */
661                         if (fs.object != fs.first_object) {
662                                 vm_object_pip_wakeup(fs.object);
663                                 VM_OBJECT_UNLOCK(fs.object);
664
665                                 fs.object = fs.first_object;
666                                 fs.pindex = fs.first_pindex;
667                                 fs.m = fs.first_m;
668                                 VM_OBJECT_LOCK(fs.object);
669                         }
670                         fs.first_m = NULL;
671
672                         /*
673                          * Zero the page if necessary and mark it valid.
674                          */
675                         if ((fs.m->flags & PG_ZERO) == 0) {
676                                 pmap_zero_page(fs.m);
677                         } else {
678                                 atomic_add_int(&cnt.v_ozfod, 1);
679                         }
680                         atomic_add_int(&cnt.v_zfod, 1);
681                         fs.m->valid = VM_PAGE_BITS_ALL;
682                         break;  /* break to PAGE HAS BEEN FOUND */
683                 } else {
684                         KASSERT(fs.object != next_object,
685                             ("object loop %p", next_object));
686                         VM_OBJECT_LOCK(next_object);
687                         vm_object_pip_add(next_object, 1);
688                         if (fs.object != fs.first_object)
689                                 vm_object_pip_wakeup(fs.object);
690                         VM_OBJECT_UNLOCK(fs.object);
691                         fs.object = next_object;
692                 }
693         }
694
695         KASSERT((fs.m->flags & PG_BUSY) != 0,
696             ("vm_fault: not busy after main loop"));
697
698         /*
699          * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
700          * is held.]
701          */
702
703         /*
704          * If the page is being written, but isn't already owned by the
705          * top-level object, we have to copy it into a new page owned by the
706          * top-level object.
707          */
708         if (fs.object != fs.first_object) {
709                 /*
710                  * We only really need to copy if we want to write it.
711                  */
712                 if (fault_type & VM_PROT_WRITE) {
713                         /*
714                          * This allows pages to be virtually copied from a 
715                          * backing_object into the first_object, where the 
716                          * backing object has no other refs to it, and cannot
717                          * gain any more refs.  Instead of a bcopy, we just 
718                          * move the page from the backing object to the 
719                          * first object.  Note that we must mark the page 
720                          * dirty in the first object so that it will go out 
721                          * to swap when needed.
722                          */
723                         is_first_object_locked = FALSE;
724                         if (
725                                 /*
726                                  * Only one shadow object
727                                  */
728                                 (fs.object->shadow_count == 1) &&
729                                 /*
730                                  * No COW refs, except us
731                                  */
732                                 (fs.object->ref_count == 1) &&
733                                 /*
734                                  * No one else can look this object up
735                                  */
736                                 (fs.object->handle == NULL) &&
737                                 /*
738                                  * No other ways to look the object up
739                                  */
740                                 ((fs.object->type == OBJT_DEFAULT) ||
741                                  (fs.object->type == OBJT_SWAP)) &&
742                             (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) &&
743                                 /*
744                                  * We don't chase down the shadow chain
745                                  */
746                             fs.object == fs.first_object->backing_object) {
747                                 vm_page_lock_queues();
748                                 /*
749                                  * get rid of the unnecessary page
750                                  */
751                                 vm_page_free(fs.first_m);
752                                 /*
753                                  * grab the page and put it into the 
754                                  * process'es object.  The page is 
755                                  * automatically made dirty.
756                                  */
757                                 vm_page_rename(fs.m, fs.first_object, fs.first_pindex);
758                                 vm_page_busy(fs.m);
759                                 vm_page_unlock_queues();
760                                 fs.first_m = fs.m;
761                                 fs.m = NULL;
762                                 atomic_add_int(&cnt.v_cow_optim, 1);
763                         } else {
764                                 /*
765                                  * Oh, well, lets copy it.
766                                  */
767                                 pmap_copy_page(fs.m, fs.first_m);
768                                 fs.first_m->valid = VM_PAGE_BITS_ALL;
769                         }
770                         if (fs.m) {
771                                 /*
772                                  * We no longer need the old page or object.
773                                  */
774                                 release_page(&fs);
775                         }
776                         /*
777                          * fs.object != fs.first_object due to above 
778                          * conditional
779                          */
780                         vm_object_pip_wakeup(fs.object);
781                         VM_OBJECT_UNLOCK(fs.object);
782                         /*
783                          * Only use the new page below...
784                          */
785                         fs.object = fs.first_object;
786                         fs.pindex = fs.first_pindex;
787                         fs.m = fs.first_m;
788                         if (!is_first_object_locked)
789                                 VM_OBJECT_LOCK(fs.object);
790                         atomic_add_int(&cnt.v_cow_faults, 1);
791                 } else {
792                         prot &= ~VM_PROT_WRITE;
793                 }
794         }
795
796         /*
797          * We must verify that the maps have not changed since our last
798          * lookup.
799          */
800         if (!fs.lookup_still_valid) {
801                 vm_object_t retry_object;
802                 vm_pindex_t retry_pindex;
803                 vm_prot_t retry_prot;
804
805                 if (!vm_map_trylock_read(fs.map)) {
806                         release_page(&fs);
807                         unlock_and_deallocate(&fs);
808                         goto RetryFault;
809                 }
810                 fs.lookup_still_valid = TRUE;
811                 if (fs.map->timestamp != map_generation) {
812                         result = vm_map_lookup_locked(&fs.map, vaddr, fault_type,
813                             &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired);
814
815                         /*
816                          * If we don't need the page any longer, put it on the inactive
817                          * list (the easiest thing to do here).  If no one needs it,
818                          * pageout will grab it eventually.
819                          */
820                         if (result != KERN_SUCCESS) {
821                                 release_page(&fs);
822                                 unlock_and_deallocate(&fs);
823
824                                 /*
825                                  * If retry of map lookup would have blocked then
826                                  * retry fault from start.
827                                  */
828                                 if (result == KERN_FAILURE)
829                                         goto RetryFault;
830                                 return (result);
831                         }
832                         if ((retry_object != fs.first_object) ||
833                             (retry_pindex != fs.first_pindex)) {
834                                 release_page(&fs);
835                                 unlock_and_deallocate(&fs);
836                                 goto RetryFault;
837                         }
838
839                         /*
840                          * Check whether the protection has changed or the object has
841                          * been copied while we left the map unlocked. Changing from
842                          * read to write permission is OK - we leave the page
843                          * write-protected, and catch the write fault. Changing from
844                          * write to read permission means that we can't mark the page
845                          * write-enabled after all.
846                          */
847                         prot &= retry_prot;
848                 }
849         }
850         if (prot & VM_PROT_WRITE) {
851                 vm_page_lock_queues();
852                 vm_page_flag_set(fs.m, PG_WRITEABLE);
853                 vm_object_set_writeable_dirty(fs.m->object);
854
855                 /*
856                  * If the fault is a write, we know that this page is being
857                  * written NOW so dirty it explicitly to save on 
858                  * pmap_is_modified() calls later.
859                  *
860                  * If this is a NOSYNC mmap we do not want to set PG_NOSYNC
861                  * if the page is already dirty to prevent data written with
862                  * the expectation of being synced from not being synced.
863                  * Likewise if this entry does not request NOSYNC then make
864                  * sure the page isn't marked NOSYNC.  Applications sharing
865                  * data should use the same flags to avoid ping ponging.
866                  *
867                  * Also tell the backing pager, if any, that it should remove
868                  * any swap backing since the page is now dirty.
869                  */
870                 if (fs.entry->eflags & MAP_ENTRY_NOSYNC) {
871                         if (fs.m->dirty == 0)
872                                 vm_page_flag_set(fs.m, PG_NOSYNC);
873                 } else {
874                         vm_page_flag_clear(fs.m, PG_NOSYNC);
875                 }
876                 vm_page_unlock_queues();
877                 if (fault_flags & VM_FAULT_DIRTY) {
878                         vm_page_dirty(fs.m);
879                         vm_pager_page_unswapped(fs.m);
880                 }
881         }
882
883         /*
884          * Page had better still be busy
885          */
886         KASSERT(fs.m->flags & PG_BUSY,
887                 ("vm_fault: page %p not busy!", fs.m));
888         /*
889          * Sanity check: page must be completely valid or it is not fit to
890          * map into user space.  vm_pager_get_pages() ensures this.
891          */
892         if (fs.m->valid != VM_PAGE_BITS_ALL) {
893                 vm_page_zero_invalid(fs.m, TRUE);
894                 printf("Warning: page %p partially invalid on fault\n", fs.m);
895         }
896         VM_OBJECT_UNLOCK(fs.object);
897
898         /*
899          * Put this page into the physical map.  We had to do the unlock above
900          * because pmap_enter() may sleep.  We don't put the page
901          * back on the active queue until later so that the pageout daemon
902          * won't find it (yet).
903          */
904         pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired);
905         if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
906                 vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
907         }
908         VM_OBJECT_LOCK(fs.object);
909         vm_page_lock_queues();
910         vm_page_flag_set(fs.m, PG_REFERENCED);
911
912         /*
913          * If the page is not wired down, then put it where the pageout daemon
914          * can find it.
915          */
916         if (fault_flags & VM_FAULT_WIRE_MASK) {
917                 if (wired)
918                         vm_page_wire(fs.m);
919                 else
920                         vm_page_unwire(fs.m, 1);
921         } else {
922                 vm_page_activate(fs.m);
923         }
924         vm_page_wakeup(fs.m);
925         vm_page_unlock_queues();
926
927         /*
928          * Unlock everything, and return
929          */
930         unlock_and_deallocate(&fs);
931         PROC_LOCK(curproc);
932         if ((curproc->p_sflag & PS_INMEM) && curproc->p_stats) {
933                 if (hardfault) {
934                         curproc->p_stats->p_ru.ru_majflt++;
935                 } else {
936                         curproc->p_stats->p_ru.ru_minflt++;
937                 }
938         }
939         PROC_UNLOCK(curproc);
940
941         return (KERN_SUCCESS);
942 }
943
944 /*
945  * vm_fault_prefault provides a quick way of clustering
946  * pagefaults into a processes address space.  It is a "cousin"
947  * of vm_map_pmap_enter, except it runs at page fault time instead
948  * of mmap time.
949  */
950 static void
951 vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
952 {
953         int i;
954         vm_offset_t addr, starta;
955         vm_pindex_t pindex;
956         vm_page_t m, mpte;
957         vm_object_t object;
958
959         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
960                 return;
961
962         object = entry->object.vm_object;
963
964         starta = addra - PFBAK * PAGE_SIZE;
965         if (starta < entry->start) {
966                 starta = entry->start;
967         } else if (starta > addra) {
968                 starta = 0;
969         }
970
971         mpte = NULL;
972         for (i = 0; i < PAGEORDER_SIZE; i++) {
973                 vm_object_t backing_object, lobject;
974
975                 addr = addra + prefault_pageorder[i];
976                 if (addr > addra + (PFFOR * PAGE_SIZE))
977                         addr = 0;
978
979                 if (addr < starta || addr >= entry->end)
980                         continue;
981
982                 if (!pmap_is_prefaultable(pmap, addr))
983                         continue;
984
985                 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
986                 lobject = object;
987                 VM_OBJECT_LOCK(lobject);
988                 while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
989                     lobject->type == OBJT_DEFAULT &&
990                     (backing_object = lobject->backing_object) != NULL) {
991                         if (lobject->backing_object_offset & PAGE_MASK)
992                                 break;
993                         pindex += lobject->backing_object_offset >> PAGE_SHIFT;
994                         VM_OBJECT_LOCK(backing_object);
995                         VM_OBJECT_UNLOCK(lobject);
996                         lobject = backing_object;
997                 }
998                 /*
999                  * give-up when a page is not in memory
1000                  */
1001                 if (m == NULL) {
1002                         VM_OBJECT_UNLOCK(lobject);
1003                         break;
1004                 }
1005                 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
1006                         (m->busy == 0) &&
1007                     (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
1008
1009                         vm_page_lock_queues();
1010                         if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
1011                                 vm_page_deactivate(m);
1012                         mpte = pmap_enter_quick(pmap, addr, m,
1013                             entry->protection, mpte);
1014                         vm_page_unlock_queues();
1015                 }
1016                 VM_OBJECT_UNLOCK(lobject);
1017         }
1018 }
1019
1020 /*
1021  *      vm_fault_quick:
1022  *
1023  *      Ensure that the requested virtual address, which may be in userland,
1024  *      is valid.  Fault-in the page if necessary.  Return -1 on failure.
1025  */
1026 int
1027 vm_fault_quick(caddr_t v, int prot)
1028 {
1029         int r;
1030
1031         if (prot & VM_PROT_WRITE)
1032                 r = subyte(v, fubyte(v));
1033         else
1034                 r = fubyte(v);
1035         return(r);
1036 }
1037
1038 /*
1039  *      vm_fault_wire:
1040  *
1041  *      Wire down a range of virtual addresses in a map.
1042  */
1043 int
1044 vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1045     boolean_t user_wire, boolean_t fictitious)
1046 {
1047         vm_offset_t va;
1048         int rv;
1049
1050         /*
1051          * We simulate a fault to get the page and enter it in the physical
1052          * map.  For user wiring, we only ask for read access on currently
1053          * read-only sections.
1054          */
1055         for (va = start; va < end; va += PAGE_SIZE) {
1056                 rv = vm_fault(map, va,
1057                     user_wire ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
1058                     user_wire ? VM_FAULT_USER_WIRE : VM_FAULT_CHANGE_WIRING);
1059                 if (rv) {
1060                         if (va != start)
1061                                 vm_fault_unwire(map, start, va, fictitious);
1062                         return (rv);
1063                 }
1064         }
1065         return (KERN_SUCCESS);
1066 }
1067
1068 /*
1069  *      vm_fault_unwire:
1070  *
1071  *      Unwire a range of virtual addresses in a map.
1072  */
1073 void
1074 vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
1075     boolean_t fictitious)
1076 {
1077         vm_paddr_t pa;
1078         vm_offset_t va;
1079         pmap_t pmap;
1080
1081         pmap = vm_map_pmap(map);
1082
1083         /*
1084          * Since the pages are wired down, we must be able to get their
1085          * mappings from the physical map system.
1086          */
1087         for (va = start; va < end; va += PAGE_SIZE) {
1088                 pa = pmap_extract(pmap, va);
1089                 if (pa != 0) {
1090                         pmap_change_wiring(pmap, va, FALSE);
1091                         if (!fictitious) {
1092                                 vm_page_lock_queues();
1093                                 vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
1094                                 vm_page_unlock_queues();
1095                         }
1096                 }
1097         }
1098 }
1099
1100 /*
1101  *      Routine:
1102  *              vm_fault_copy_entry
1103  *      Function:
1104  *              Copy all of the pages from a wired-down map entry to another.
1105  *
1106  *      In/out conditions:
1107  *              The source and destination maps must be locked for write.
1108  *              The source map entry must be wired down (or be a sharing map
1109  *              entry corresponding to a main map entry that is wired down).
1110  */
1111 void
1112 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
1113         vm_map_t dst_map;
1114         vm_map_t src_map;
1115         vm_map_entry_t dst_entry;
1116         vm_map_entry_t src_entry;
1117 {
1118         vm_object_t backing_object, dst_object, object;
1119         vm_object_t src_object;
1120         vm_ooffset_t dst_offset;
1121         vm_ooffset_t src_offset;
1122         vm_pindex_t pindex;
1123         vm_prot_t prot;
1124         vm_offset_t vaddr;
1125         vm_page_t dst_m;
1126         vm_page_t src_m;
1127
1128 #ifdef  lint
1129         src_map++;
1130 #endif  /* lint */
1131
1132         src_object = src_entry->object.vm_object;
1133         src_offset = src_entry->offset;
1134
1135         /*
1136          * Create the top-level object for the destination entry. (Doesn't
1137          * actually shadow anything - we copy the pages directly.)
1138          */
1139         dst_object = vm_object_allocate(OBJT_DEFAULT,
1140             OFF_TO_IDX(dst_entry->end - dst_entry->start));
1141
1142         VM_OBJECT_LOCK(dst_object);
1143         dst_entry->object.vm_object = dst_object;
1144         dst_entry->offset = 0;
1145
1146         prot = dst_entry->max_protection;
1147
1148         /*
1149          * Loop through all of the pages in the entry's range, copying each
1150          * one from the source object (it should be there) to the destination
1151          * object.
1152          */
1153         for (vaddr = dst_entry->start, dst_offset = 0;
1154             vaddr < dst_entry->end;
1155             vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
1156
1157                 /*
1158                  * Allocate a page in the destination object
1159                  */
1160                 do {
1161                         dst_m = vm_page_alloc(dst_object,
1162                                 OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
1163                         if (dst_m == NULL) {
1164                                 VM_OBJECT_UNLOCK(dst_object);
1165                                 VM_WAIT;
1166                                 VM_OBJECT_LOCK(dst_object);
1167                         }
1168                 } while (dst_m == NULL);
1169
1170                 /*
1171                  * Find the page in the source object, and copy it in.
1172                  * (Because the source is wired down, the page will be in
1173                  * memory.)
1174                  */
1175                 VM_OBJECT_LOCK(src_object);
1176                 object = src_object;
1177                 pindex = 0;
1178                 while ((src_m = vm_page_lookup(object, pindex +
1179                     OFF_TO_IDX(dst_offset + src_offset))) == NULL &&
1180                     (src_entry->protection & VM_PROT_WRITE) == 0 &&
1181                     (backing_object = object->backing_object) != NULL) {
1182                         /*
1183                          * Allow fallback to backing objects if we are reading.
1184                          */
1185                         VM_OBJECT_LOCK(backing_object);
1186                         pindex += OFF_TO_IDX(object->backing_object_offset);
1187                         VM_OBJECT_UNLOCK(object);
1188                         object = backing_object;
1189                 }
1190                 if (src_m == NULL)
1191                         panic("vm_fault_copy_wired: page missing");
1192                 pmap_copy_page(src_m, dst_m);
1193                 VM_OBJECT_UNLOCK(object);
1194                 dst_m->valid = VM_PAGE_BITS_ALL;
1195                 VM_OBJECT_UNLOCK(dst_object);
1196
1197                 /*
1198                  * Enter it in the pmap...
1199                  */
1200                 pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
1201                 VM_OBJECT_LOCK(dst_object);
1202                 vm_page_lock_queues();
1203                 if ((prot & VM_PROT_WRITE) != 0)
1204                         vm_page_flag_set(dst_m, PG_WRITEABLE);
1205
1206                 /*
1207                  * Mark it no longer busy, and put it on the active list.
1208                  */
1209                 vm_page_activate(dst_m);
1210                 vm_page_wakeup(dst_m);
1211                 vm_page_unlock_queues();
1212         }
1213         VM_OBJECT_UNLOCK(dst_object);
1214 }
1215
1216
1217 /*
1218  * This routine checks around the requested page for other pages that
1219  * might be able to be faulted in.  This routine brackets the viable
1220  * pages for the pages to be paged in.
1221  *
1222  * Inputs:
1223  *      m, rbehind, rahead
1224  *
1225  * Outputs:
1226  *  marray (array of vm_page_t), reqpage (index of requested page)
1227  *
1228  * Return value:
1229  *  number of pages in marray
1230  *
1231  * This routine can't block.
1232  */
1233 static int
1234 vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
1235         vm_page_t m;
1236         int rbehind;
1237         int rahead;
1238         vm_page_t *marray;
1239         int *reqpage;
1240 {
1241         int i,j;
1242         vm_object_t object;
1243         vm_pindex_t pindex, startpindex, endpindex, tpindex;
1244         vm_page_t rtm;
1245         int cbehind, cahead;
1246
1247         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1248
1249         object = m->object;
1250         pindex = m->pindex;
1251
1252         /*
1253          * we don't fault-ahead for device pager
1254          */
1255         if (object->type == OBJT_DEVICE) {
1256                 *reqpage = 0;
1257                 marray[0] = m;
1258                 return 1;
1259         }
1260
1261         /*
1262          * if the requested page is not available, then give up now
1263          */
1264         if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1265                 return 0;
1266         }
1267
1268         if ((cbehind == 0) && (cahead == 0)) {
1269                 *reqpage = 0;
1270                 marray[0] = m;
1271                 return 1;
1272         }
1273
1274         if (rahead > cahead) {
1275                 rahead = cahead;
1276         }
1277
1278         if (rbehind > cbehind) {
1279                 rbehind = cbehind;
1280         }
1281
1282         /*
1283          * try to do any readahead that we might have free pages for.
1284          */
1285         if ((rahead + rbehind) >
1286                 ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) {
1287                 pagedaemon_wakeup();
1288                 marray[0] = m;
1289                 *reqpage = 0;
1290                 return 1;
1291         }
1292
1293         /*
1294          * scan backward for the read behind pages -- in memory 
1295          */
1296         if (pindex > 0) {
1297                 if (rbehind > pindex) {
1298                         rbehind = pindex;
1299                         startpindex = 0;
1300                 } else {
1301                         startpindex = pindex - rbehind;
1302                 }
1303
1304                 if ((rtm = TAILQ_PREV(m, pglist, listq)) != NULL &&
1305                     rtm->pindex >= startpindex)
1306                         startpindex = rtm->pindex + 1;
1307
1308                 for (i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) {
1309
1310                         rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1311                         if (rtm == NULL) {
1312                                 vm_page_lock_queues();
1313                                 for (j = 0; j < i; j++) {
1314                                         vm_page_free(marray[j]);
1315                                 }
1316                                 vm_page_unlock_queues();
1317                                 marray[0] = m;
1318                                 *reqpage = 0;
1319                                 return 1;
1320                         }
1321
1322                         marray[i] = rtm;
1323                 }
1324         } else {
1325                 startpindex = 0;
1326                 i = 0;
1327         }
1328
1329         marray[i] = m;
1330         /* page offset of the required page */
1331         *reqpage = i;
1332
1333         tpindex = pindex + 1;
1334         i++;
1335
1336         /*
1337          * scan forward for the read ahead pages
1338          */
1339         endpindex = tpindex + rahead;
1340         if ((rtm = TAILQ_NEXT(m, listq)) != NULL && rtm->pindex < endpindex)
1341                 endpindex = rtm->pindex;
1342         if (endpindex > object->size)
1343                 endpindex = object->size;
1344
1345         for (; tpindex < endpindex; i++, tpindex++) {
1346
1347                 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
1348                 if (rtm == NULL) {
1349                         break;
1350                 }
1351
1352                 marray[i] = rtm;
1353         }
1354
1355         /* return number of bytes of pages */
1356         return i;
1357 }