]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_fault.c
vm_fault: Shoot down shared mappings in vm_fault_copy_entry()
[FreeBSD/FreeBSD.git] / sys / vm / vm_fault.c
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  *
11  *
12  * This code is derived from software contributed to Berkeley by
13  * The Mach Operating System project at Carnegie-Mellon University.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *      This product includes software developed by the University of
26  *      California, Berkeley and its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  *      from: @(#)vm_fault.c    8.4 (Berkeley) 1/12/94
44  *
45  *
46  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47  * All rights reserved.
48  *
49  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
50  *
51  * Permission to use, copy, modify and distribute this software and
52  * its documentation is hereby granted, provided that both the copyright
53  * notice and this permission notice appear in all copies of the
54  * software, derivative works or modified versions, and any portions
55  * thereof, and that both notices appear in supporting documentation.
56  *
57  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
58  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
59  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
60  *
61  * Carnegie Mellon requests users of this software to return to
62  *
63  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
64  *  School of Computer Science
65  *  Carnegie Mellon University
66  *  Pittsburgh PA 15213-3890
67  *
68  * any improvements or extensions that they make and grant Carnegie the
69  * rights to redistribute these changes.
70  */
71
72 /*
73  *      Page fault handling module.
74  */
75
76 #include <sys/cdefs.h>
77 __FBSDID("$FreeBSD$");
78
79 #include "opt_ktrace.h"
80 #include "opt_vm.h"
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/lock.h>
86 #include <sys/mman.h>
87 #include <sys/mutex.h>
88 #include <sys/proc.h>
89 #include <sys/racct.h>
90 #include <sys/refcount.h>
91 #include <sys/resourcevar.h>
92 #include <sys/rwlock.h>
93 #include <sys/signalvar.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysent.h>
96 #include <sys/vmmeter.h>
97 #include <sys/vnode.h>
98 #ifdef KTRACE
99 #include <sys/ktrace.h>
100 #endif
101
102 #include <vm/vm.h>
103 #include <vm/vm_param.h>
104 #include <vm/pmap.h>
105 #include <vm/vm_map.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_page.h>
108 #include <vm/vm_pageout.h>
109 #include <vm/vm_kern.h>
110 #include <vm/vm_pager.h>
111 #include <vm/vm_extern.h>
112 #include <vm/vm_reserv.h>
113
114 #define PFBAK 4
115 #define PFFOR 4
116
117 #define VM_FAULT_READ_DEFAULT   (1 + VM_FAULT_READ_AHEAD_INIT)
118
119 #define VM_FAULT_DONTNEED_MIN   1048576
120
121 struct faultstate {
122         /* Fault parameters. */
123         vm_offset_t     vaddr;
124         vm_page_t       *m_hold;
125         vm_prot_t       fault_type;
126         vm_prot_t       prot;
127         int             fault_flags;
128         boolean_t       wired;
129
130         /* Control state. */
131         struct timeval  oom_start_time;
132         bool            oom_started;
133         int             nera;
134
135         /* Page reference for cow. */
136         vm_page_t m_cow;
137
138         /* Current object. */
139         vm_object_t     object;
140         vm_pindex_t     pindex;
141         vm_page_t       m;
142
143         /* Top-level map object. */
144         vm_object_t     first_object;
145         vm_pindex_t     first_pindex;
146         vm_page_t       first_m;
147
148         /* Map state. */
149         vm_map_t        map;
150         vm_map_entry_t  entry;
151         int             map_generation;
152         bool            lookup_still_valid;
153
154         /* Vnode if locked. */
155         struct vnode    *vp;
156 };
157
158 /*
159  * Return codes for internal fault routines.
160  */
161 enum fault_status {
162         FAULT_SUCCESS = 1,      /* Return success to user. */
163         FAULT_FAILURE,          /* Return failure to user. */
164         FAULT_CONTINUE,         /* Continue faulting. */
165         FAULT_RESTART,          /* Restart fault. */
166         FAULT_OUT_OF_BOUNDS,    /* Invalid address for pager. */
167         FAULT_HARD,             /* Performed I/O. */
168         FAULT_SOFT,             /* Found valid page. */
169         FAULT_PROTECTION_FAILURE, /* Invalid access. */
170 };
171
172 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr,
173             int ahead);
174 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
175             int backward, int forward, bool obj_locked);
176
177 static int vm_pfault_oom_attempts = 3;
178 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN,
179     &vm_pfault_oom_attempts, 0,
180     "Number of page allocation attempts in page fault handler before it "
181     "triggers OOM handling");
182
183 static int vm_pfault_oom_wait = 10;
184 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN,
185     &vm_pfault_oom_wait, 0,
186     "Number of seconds to wait for free pages before retrying "
187     "the page fault handler");
188
189 static inline void
190 fault_page_release(vm_page_t *mp)
191 {
192         vm_page_t m;
193
194         m = *mp;
195         if (m != NULL) {
196                 /*
197                  * We are likely to loop around again and attempt to busy
198                  * this page.  Deactivating it leaves it available for
199                  * pageout while optimizing fault restarts.
200                  */
201                 vm_page_deactivate(m);
202                 vm_page_xunbusy(m);
203                 *mp = NULL;
204         }
205 }
206
207 static inline void
208 fault_page_free(vm_page_t *mp)
209 {
210         vm_page_t m;
211
212         m = *mp;
213         if (m != NULL) {
214                 VM_OBJECT_ASSERT_WLOCKED(m->object);
215                 if (!vm_page_wired(m))
216                         vm_page_free(m);
217                 else
218                         vm_page_xunbusy(m);
219                 *mp = NULL;
220         }
221 }
222
223 static inline void
224 unlock_map(struct faultstate *fs)
225 {
226
227         if (fs->lookup_still_valid) {
228                 vm_map_lookup_done(fs->map, fs->entry);
229                 fs->lookup_still_valid = false;
230         }
231 }
232
233 static void
234 unlock_vp(struct faultstate *fs)
235 {
236
237         if (fs->vp != NULL) {
238                 vput(fs->vp);
239                 fs->vp = NULL;
240         }
241 }
242
243 static void
244 fault_deallocate(struct faultstate *fs)
245 {
246
247         fault_page_release(&fs->m_cow);
248         fault_page_release(&fs->m);
249         vm_object_pip_wakeup(fs->object);
250         if (fs->object != fs->first_object) {
251                 VM_OBJECT_WLOCK(fs->first_object);
252                 fault_page_free(&fs->first_m);
253                 VM_OBJECT_WUNLOCK(fs->first_object);
254                 vm_object_pip_wakeup(fs->first_object);
255         }
256         vm_object_deallocate(fs->first_object);
257         unlock_map(fs);
258         unlock_vp(fs);
259 }
260
261 static void
262 unlock_and_deallocate(struct faultstate *fs)
263 {
264
265         VM_OBJECT_WUNLOCK(fs->object);
266         fault_deallocate(fs);
267 }
268
269 static void
270 vm_fault_dirty(struct faultstate *fs, vm_page_t m)
271 {
272         bool need_dirty;
273
274         if (((fs->prot & VM_PROT_WRITE) == 0 &&
275             (fs->fault_flags & VM_FAULT_DIRTY) == 0) ||
276             (m->oflags & VPO_UNMANAGED) != 0)
277                 return;
278
279         VM_PAGE_OBJECT_BUSY_ASSERT(m);
280
281         need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 &&
282             (fs->fault_flags & VM_FAULT_WIRE) == 0) ||
283             (fs->fault_flags & VM_FAULT_DIRTY) != 0;
284
285         vm_object_set_writeable_dirty(m->object);
286
287         /*
288          * If the fault is a write, we know that this page is being
289          * written NOW so dirty it explicitly to save on
290          * pmap_is_modified() calls later.
291          *
292          * Also, since the page is now dirty, we can possibly tell
293          * the pager to release any swap backing the page.
294          */
295         if (need_dirty && vm_page_set_dirty(m) == 0) {
296                 /*
297                  * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC
298                  * if the page is already dirty to prevent data written with
299                  * the expectation of being synced from not being synced.
300                  * Likewise if this entry does not request NOSYNC then make
301                  * sure the page isn't marked NOSYNC.  Applications sharing
302                  * data should use the same flags to avoid ping ponging.
303                  */
304                 if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0)
305                         vm_page_aflag_set(m, PGA_NOSYNC);
306                 else
307                         vm_page_aflag_clear(m, PGA_NOSYNC);
308         }
309
310 }
311
312 /*
313  * Unlocks fs.first_object and fs.map on success.
314  */
315 static enum fault_status
316 vm_fault_soft_fast(struct faultstate *fs)
317 {
318         vm_page_t m, m_map;
319 #if VM_NRESERVLEVEL > 0
320         vm_page_t m_super;
321         int flags;
322 #endif
323         int psind;
324         vm_offset_t vaddr;
325         enum fault_status res;
326
327         MPASS(fs->vp == NULL);
328
329         res = FAULT_SUCCESS;
330         vaddr = fs->vaddr;
331         vm_object_busy(fs->first_object);
332         m = vm_page_lookup(fs->first_object, fs->first_pindex);
333         /* A busy page can be mapped for read|execute access. */
334         if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 &&
335             vm_page_busied(m)) || !vm_page_all_valid(m)) {
336                 res = FAULT_FAILURE;
337                 goto out;
338         }
339         m_map = m;
340         psind = 0;
341 #if VM_NRESERVLEVEL > 0
342         if ((m->flags & PG_FICTITIOUS) == 0 &&
343             (m_super = vm_reserv_to_superpage(m)) != NULL &&
344             rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start &&
345             roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end &&
346             (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) &
347             (pagesizes[m_super->psind] - 1)) && !fs->wired &&
348             pmap_ps_enabled(fs->map->pmap)) {
349                 flags = PS_ALL_VALID;
350                 if ((fs->prot & VM_PROT_WRITE) != 0) {
351                         /*
352                          * Create a superpage mapping allowing write access
353                          * only if none of the constituent pages are busy and
354                          * all of them are already dirty (except possibly for
355                          * the page that was faulted on).
356                          */
357                         flags |= PS_NONE_BUSY;
358                         if ((fs->first_object->flags & OBJ_UNMANAGED) == 0)
359                                 flags |= PS_ALL_DIRTY;
360                 }
361                 if (vm_page_ps_test(m_super, flags, m)) {
362                         m_map = m_super;
363                         psind = m_super->psind;
364                         vaddr = rounddown2(vaddr, pagesizes[psind]);
365                         /* Preset the modified bit for dirty superpages. */
366                         if ((flags & PS_ALL_DIRTY) != 0)
367                                 fs->fault_type |= VM_PROT_WRITE;
368                 }
369         }
370 #endif
371         if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type |
372             PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) !=
373             KERN_SUCCESS) {
374                 res = FAULT_FAILURE;
375                 goto out;
376         }
377         if (fs->m_hold != NULL) {
378                 (*fs->m_hold) = m;
379                 vm_page_wire(m);
380         }
381         if (psind == 0 && !fs->wired)
382                 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true);
383         VM_OBJECT_RUNLOCK(fs->first_object);
384         vm_fault_dirty(fs, m);
385         vm_map_lookup_done(fs->map, fs->entry);
386         curthread->td_ru.ru_minflt++;
387
388 out:
389         vm_object_unbusy(fs->first_object);
390         return (res);
391 }
392
393 static void
394 vm_fault_restore_map_lock(struct faultstate *fs)
395 {
396
397         VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
398         MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0);
399
400         if (!vm_map_trylock_read(fs->map)) {
401                 VM_OBJECT_WUNLOCK(fs->first_object);
402                 vm_map_lock_read(fs->map);
403                 VM_OBJECT_WLOCK(fs->first_object);
404         }
405         fs->lookup_still_valid = true;
406 }
407
408 static void
409 vm_fault_populate_check_page(vm_page_t m)
410 {
411
412         /*
413          * Check each page to ensure that the pager is obeying the
414          * interface: the page must be installed in the object, fully
415          * valid, and exclusively busied.
416          */
417         MPASS(m != NULL);
418         MPASS(vm_page_all_valid(m));
419         MPASS(vm_page_xbusied(m));
420 }
421
422 static void
423 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first,
424     vm_pindex_t last)
425 {
426         vm_page_t m;
427         vm_pindex_t pidx;
428
429         VM_OBJECT_ASSERT_WLOCKED(object);
430         MPASS(first <= last);
431         for (pidx = first, m = vm_page_lookup(object, pidx);
432             pidx <= last; pidx++, m = vm_page_next(m)) {
433                 vm_fault_populate_check_page(m);
434                 vm_page_deactivate(m);
435                 vm_page_xunbusy(m);
436         }
437 }
438
439 static enum fault_status
440 vm_fault_populate(struct faultstate *fs)
441 {
442         vm_offset_t vaddr;
443         vm_page_t m;
444         vm_pindex_t map_first, map_last, pager_first, pager_last, pidx;
445         int bdry_idx, i, npages, psind, rv;
446         enum fault_status res;
447
448         MPASS(fs->object == fs->first_object);
449         VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
450         MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0);
451         MPASS(fs->first_object->backing_object == NULL);
452         MPASS(fs->lookup_still_valid);
453
454         pager_first = OFF_TO_IDX(fs->entry->offset);
455         pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1;
456         unlock_map(fs);
457         unlock_vp(fs);
458
459         res = FAULT_SUCCESS;
460
461         /*
462          * Call the pager (driver) populate() method.
463          *
464          * There is no guarantee that the method will be called again
465          * if the current fault is for read, and a future fault is
466          * for write.  Report the entry's maximum allowed protection
467          * to the driver.
468          */
469         rv = vm_pager_populate(fs->first_object, fs->first_pindex,
470             fs->fault_type, fs->entry->max_protection, &pager_first,
471             &pager_last);
472
473         VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
474         if (rv == VM_PAGER_BAD) {
475                 /*
476                  * VM_PAGER_BAD is the backdoor for a pager to request
477                  * normal fault handling.
478                  */
479                 vm_fault_restore_map_lock(fs);
480                 if (fs->map->timestamp != fs->map_generation)
481                         return (FAULT_RESTART);
482                 return (FAULT_CONTINUE);
483         }
484         if (rv != VM_PAGER_OK)
485                 return (FAULT_FAILURE); /* AKA SIGSEGV */
486
487         /* Ensure that the driver is obeying the interface. */
488         MPASS(pager_first <= pager_last);
489         MPASS(fs->first_pindex <= pager_last);
490         MPASS(fs->first_pindex >= pager_first);
491         MPASS(pager_last < fs->first_object->size);
492
493         vm_fault_restore_map_lock(fs);
494         bdry_idx = (fs->entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >>
495             MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
496         if (fs->map->timestamp != fs->map_generation) {
497                 if (bdry_idx == 0) {
498                         vm_fault_populate_cleanup(fs->first_object, pager_first,
499                             pager_last);
500                 } else {
501                         m = vm_page_lookup(fs->first_object, pager_first);
502                         if (m != fs->m)
503                                 vm_page_xunbusy(m);
504                 }
505                 return (FAULT_RESTART);
506         }
507
508         /*
509          * The map is unchanged after our last unlock.  Process the fault.
510          *
511          * First, the special case of largepage mappings, where
512          * populate only busies the first page in superpage run.
513          */
514         if (bdry_idx != 0) {
515                 KASSERT(PMAP_HAS_LARGEPAGES,
516                     ("missing pmap support for large pages"));
517                 m = vm_page_lookup(fs->first_object, pager_first);
518                 vm_fault_populate_check_page(m);
519                 VM_OBJECT_WUNLOCK(fs->first_object);
520                 vaddr = fs->entry->start + IDX_TO_OFF(pager_first) -
521                     fs->entry->offset;
522                 /* assert alignment for entry */
523                 KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0,
524     ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx",
525                     (uintmax_t)fs->entry->start, (uintmax_t)pager_first,
526                     (uintmax_t)fs->entry->offset, (uintmax_t)vaddr));
527                 KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0,
528                     ("unaligned superpage m %p %#jx", m,
529                     (uintmax_t)VM_PAGE_TO_PHYS(m)));
530                 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot,
531                     fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) |
532                     PMAP_ENTER_LARGEPAGE, bdry_idx);
533                 VM_OBJECT_WLOCK(fs->first_object);
534                 vm_page_xunbusy(m);
535                 if (rv != KERN_SUCCESS) {
536                         res = FAULT_FAILURE;
537                         goto out;
538                 }
539                 if ((fs->fault_flags & VM_FAULT_WIRE) != 0) {
540                         for (i = 0; i < atop(pagesizes[bdry_idx]); i++)
541                                 vm_page_wire(m + i);
542                 }
543                 if (fs->m_hold != NULL) {
544                         *fs->m_hold = m + (fs->first_pindex - pager_first);
545                         vm_page_wire(*fs->m_hold);
546                 }
547                 goto out;
548         }
549
550         /*
551          * The range [pager_first, pager_last] that is given to the
552          * pager is only a hint.  The pager may populate any range
553          * within the object that includes the requested page index.
554          * In case the pager expanded the range, clip it to fit into
555          * the map entry.
556          */
557         map_first = OFF_TO_IDX(fs->entry->offset);
558         if (map_first > pager_first) {
559                 vm_fault_populate_cleanup(fs->first_object, pager_first,
560                     map_first - 1);
561                 pager_first = map_first;
562         }
563         map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1;
564         if (map_last < pager_last) {
565                 vm_fault_populate_cleanup(fs->first_object, map_last + 1,
566                     pager_last);
567                 pager_last = map_last;
568         }
569         for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx);
570             pidx <= pager_last;
571             pidx += npages, m = vm_page_next(&m[npages - 1])) {
572                 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset;
573
574                 psind = m->psind;
575                 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 ||
576                     pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last ||
577                     !pmap_ps_enabled(fs->map->pmap) || fs->wired))
578                         psind = 0;
579
580                 npages = atop(pagesizes[psind]);
581                 for (i = 0; i < npages; i++) {
582                         vm_fault_populate_check_page(&m[i]);
583                         vm_fault_dirty(fs, &m[i]);
584                 }
585                 VM_OBJECT_WUNLOCK(fs->first_object);
586                 rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type |
587                     (fs->wired ? PMAP_ENTER_WIRED : 0), psind);
588
589                 /*
590                  * pmap_enter() may fail for a superpage mapping if additional
591                  * protection policies prevent the full mapping.
592                  * For example, this will happen on amd64 if the entire
593                  * address range does not share the same userspace protection
594                  * key.  Revert to single-page mappings if this happens.
595                  */
596                 MPASS(rv == KERN_SUCCESS ||
597                     (psind > 0 && rv == KERN_PROTECTION_FAILURE));
598                 if (__predict_false(psind > 0 &&
599                     rv == KERN_PROTECTION_FAILURE)) {
600                         MPASS(!fs->wired);
601                         for (i = 0; i < npages; i++) {
602                                 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i),
603                                     &m[i], fs->prot, fs->fault_type, 0);
604                                 MPASS(rv == KERN_SUCCESS);
605                         }
606                 }
607
608                 VM_OBJECT_WLOCK(fs->first_object);
609                 for (i = 0; i < npages; i++) {
610                         if ((fs->fault_flags & VM_FAULT_WIRE) != 0 &&
611                             m[i].pindex == fs->first_pindex)
612                                 vm_page_wire(&m[i]);
613                         else
614                                 vm_page_activate(&m[i]);
615                         if (fs->m_hold != NULL &&
616                             m[i].pindex == fs->first_pindex) {
617                                 (*fs->m_hold) = &m[i];
618                                 vm_page_wire(&m[i]);
619                         }
620                         vm_page_xunbusy(&m[i]);
621                 }
622         }
623 out:
624         curthread->td_ru.ru_majflt++;
625         return (res);
626 }
627
628 static int prot_fault_translation;
629 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN,
630     &prot_fault_translation, 0,
631     "Control signal to deliver on protection fault");
632
633 /* compat definition to keep common code for signal translation */
634 #define UCODE_PAGEFLT   12
635 #ifdef T_PAGEFLT
636 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT");
637 #endif
638
639 /*
640  *      vm_fault_trap:
641  *
642  *      Handle a page fault occurring at the given address,
643  *      requiring the given permissions, in the map specified.
644  *      If successful, the page is inserted into the
645  *      associated physical map.
646  *
647  *      NOTE: the given address should be truncated to the
648  *      proper page address.
649  *
650  *      KERN_SUCCESS is returned if the page fault is handled; otherwise,
651  *      a standard error specifying why the fault is fatal is returned.
652  *
653  *      The map in question must be referenced, and remains so.
654  *      Caller may hold no locks.
655  */
656 int
657 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
658     int fault_flags, int *signo, int *ucode)
659 {
660         int result;
661
662         MPASS(signo == NULL || ucode != NULL);
663 #ifdef KTRACE
664         if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT))
665                 ktrfault(vaddr, fault_type);
666 #endif
667         result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags,
668             NULL);
669         KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE ||
670             result == KERN_INVALID_ADDRESS ||
671             result == KERN_RESOURCE_SHORTAGE ||
672             result == KERN_PROTECTION_FAILURE ||
673             result == KERN_OUT_OF_BOUNDS,
674             ("Unexpected Mach error %d from vm_fault()", result));
675 #ifdef KTRACE
676         if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND))
677                 ktrfaultend(result);
678 #endif
679         if (result != KERN_SUCCESS && signo != NULL) {
680                 switch (result) {
681                 case KERN_FAILURE:
682                 case KERN_INVALID_ADDRESS:
683                         *signo = SIGSEGV;
684                         *ucode = SEGV_MAPERR;
685                         break;
686                 case KERN_RESOURCE_SHORTAGE:
687                         *signo = SIGBUS;
688                         *ucode = BUS_OOMERR;
689                         break;
690                 case KERN_OUT_OF_BOUNDS:
691                         *signo = SIGBUS;
692                         *ucode = BUS_OBJERR;
693                         break;
694                 case KERN_PROTECTION_FAILURE:
695                         if (prot_fault_translation == 0) {
696                                 /*
697                                  * Autodetect.  This check also covers
698                                  * the images without the ABI-tag ELF
699                                  * note.
700                                  */
701                                 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD &&
702                                     curproc->p_osrel >= P_OSREL_SIGSEGV) {
703                                         *signo = SIGSEGV;
704                                         *ucode = SEGV_ACCERR;
705                                 } else {
706                                         *signo = SIGBUS;
707                                         *ucode = UCODE_PAGEFLT;
708                                 }
709                         } else if (prot_fault_translation == 1) {
710                                 /* Always compat mode. */
711                                 *signo = SIGBUS;
712                                 *ucode = UCODE_PAGEFLT;
713                         } else {
714                                 /* Always SIGSEGV mode. */
715                                 *signo = SIGSEGV;
716                                 *ucode = SEGV_ACCERR;
717                         }
718                         break;
719                 default:
720                         KASSERT(0, ("Unexpected Mach error %d from vm_fault()",
721                             result));
722                         break;
723                 }
724         }
725         return (result);
726 }
727
728 static enum fault_status
729 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked)
730 {
731         struct vnode *vp;
732         int error, locked;
733
734         if (fs->object->type != OBJT_VNODE)
735                 return (FAULT_CONTINUE);
736         vp = fs->object->handle;
737         if (vp == fs->vp) {
738                 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked");
739                 return (FAULT_CONTINUE);
740         }
741
742         /*
743          * Perform an unlock in case the desired vnode changed while
744          * the map was unlocked during a retry.
745          */
746         unlock_vp(fs);
747
748         locked = VOP_ISLOCKED(vp);
749         if (locked != LK_EXCLUSIVE)
750                 locked = LK_SHARED;
751
752         /*
753          * We must not sleep acquiring the vnode lock while we have
754          * the page exclusive busied or the object's
755          * paging-in-progress count incremented.  Otherwise, we could
756          * deadlock.
757          */
758         error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT);
759         if (error == 0) {
760                 fs->vp = vp;
761                 return (FAULT_CONTINUE);
762         }
763
764         vhold(vp);
765         if (objlocked)
766                 unlock_and_deallocate(fs);
767         else
768                 fault_deallocate(fs);
769         error = vget(vp, locked | LK_RETRY | LK_CANRECURSE);
770         vdrop(vp);
771         fs->vp = vp;
772         KASSERT(error == 0, ("vm_fault: vget failed %d", error));
773         return (FAULT_RESTART);
774 }
775
776 /*
777  * Calculate the desired readahead.  Handle drop-behind.
778  *
779  * Returns the number of readahead blocks to pass to the pager.
780  */
781 static int
782 vm_fault_readahead(struct faultstate *fs)
783 {
784         int era, nera;
785         u_char behavior;
786
787         KASSERT(fs->lookup_still_valid, ("map unlocked"));
788         era = fs->entry->read_ahead;
789         behavior = vm_map_entry_behavior(fs->entry);
790         if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
791                 nera = 0;
792         } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) {
793                 nera = VM_FAULT_READ_AHEAD_MAX;
794                 if (fs->vaddr == fs->entry->next_read)
795                         vm_fault_dontneed(fs, fs->vaddr, nera);
796         } else if (fs->vaddr == fs->entry->next_read) {
797                 /*
798                  * This is a sequential fault.  Arithmetically
799                  * increase the requested number of pages in
800                  * the read-ahead window.  The requested
801                  * number of pages is "# of sequential faults
802                  * x (read ahead min + 1) + read ahead min"
803                  */
804                 nera = VM_FAULT_READ_AHEAD_MIN;
805                 if (era > 0) {
806                         nera += era + 1;
807                         if (nera > VM_FAULT_READ_AHEAD_MAX)
808                                 nera = VM_FAULT_READ_AHEAD_MAX;
809                 }
810                 if (era == VM_FAULT_READ_AHEAD_MAX)
811                         vm_fault_dontneed(fs, fs->vaddr, nera);
812         } else {
813                 /*
814                  * This is a non-sequential fault.
815                  */
816                 nera = 0;
817         }
818         if (era != nera) {
819                 /*
820                  * A read lock on the map suffices to update
821                  * the read ahead count safely.
822                  */
823                 fs->entry->read_ahead = nera;
824         }
825
826         return (nera);
827 }
828
829 static int
830 vm_fault_lookup(struct faultstate *fs)
831 {
832         int result;
833
834         KASSERT(!fs->lookup_still_valid,
835            ("vm_fault_lookup: Map already locked."));
836         result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type |
837             VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object,
838             &fs->first_pindex, &fs->prot, &fs->wired);
839         if (result != KERN_SUCCESS) {
840                 unlock_vp(fs);
841                 return (result);
842         }
843
844         fs->map_generation = fs->map->timestamp;
845
846         if (fs->entry->eflags & MAP_ENTRY_NOFAULT) {
847                 panic("%s: fault on nofault entry, addr: %#lx",
848                     __func__, (u_long)fs->vaddr);
849         }
850
851         if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION &&
852             fs->entry->wiring_thread != curthread) {
853                 vm_map_unlock_read(fs->map);
854                 vm_map_lock(fs->map);
855                 if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) &&
856                     (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) {
857                         unlock_vp(fs);
858                         fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
859                         vm_map_unlock_and_wait(fs->map, 0);
860                 } else
861                         vm_map_unlock(fs->map);
862                 return (KERN_RESOURCE_SHORTAGE);
863         }
864
865         MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0);
866
867         if (fs->wired)
868                 fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY);
869         else
870                 KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0,
871                     ("!fs->wired && VM_FAULT_WIRE"));
872         fs->lookup_still_valid = true;
873
874         return (KERN_SUCCESS);
875 }
876
877 static int
878 vm_fault_relookup(struct faultstate *fs)
879 {
880         vm_object_t retry_object;
881         vm_pindex_t retry_pindex;
882         vm_prot_t retry_prot;
883         int result;
884
885         if (!vm_map_trylock_read(fs->map))
886                 return (KERN_RESTART);
887
888         fs->lookup_still_valid = true;
889         if (fs->map->timestamp == fs->map_generation)
890                 return (KERN_SUCCESS);
891
892         result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type,
893             &fs->entry, &retry_object, &retry_pindex, &retry_prot,
894             &fs->wired);
895         if (result != KERN_SUCCESS) {
896                 /*
897                  * If retry of map lookup would have blocked then
898                  * retry fault from start.
899                  */
900                 if (result == KERN_FAILURE)
901                         return (KERN_RESTART);
902                 return (result);
903         }
904         if (retry_object != fs->first_object ||
905             retry_pindex != fs->first_pindex)
906                 return (KERN_RESTART);
907
908         /*
909          * Check whether the protection has changed or the object has
910          * been copied while we left the map unlocked. Changing from
911          * read to write permission is OK - we leave the page
912          * write-protected, and catch the write fault. Changing from
913          * write to read permission means that we can't mark the page
914          * write-enabled after all.
915          */
916         fs->prot &= retry_prot;
917         fs->fault_type &= retry_prot;
918         if (fs->prot == 0)
919                 return (KERN_RESTART);
920
921         /* Reassert because wired may have changed. */
922         KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0,
923             ("!wired && VM_FAULT_WIRE"));
924
925         return (KERN_SUCCESS);
926 }
927
928 static void
929 vm_fault_cow(struct faultstate *fs)
930 {
931         bool is_first_object_locked;
932
933         KASSERT(fs->object != fs->first_object,
934             ("source and target COW objects are identical"));
935
936         /*
937          * This allows pages to be virtually copied from a backing_object
938          * into the first_object, where the backing object has no other
939          * refs to it, and cannot gain any more refs.  Instead of a bcopy,
940          * we just move the page from the backing object to the first
941          * object.  Note that we must mark the page dirty in the first
942          * object so that it will go out to swap when needed.
943          */
944         is_first_object_locked = false;
945         if (
946             /*
947              * Only one shadow object and no other refs.
948              */
949             fs->object->shadow_count == 1 && fs->object->ref_count == 1 &&
950             /*
951              * No other ways to look the object up
952              */
953             fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 &&
954             /*
955              * We don't chase down the shadow chain and we can acquire locks.
956              */
957             (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) &&
958             fs->object == fs->first_object->backing_object &&
959             VM_OBJECT_TRYWLOCK(fs->object)) {
960                 /*
961                  * Remove but keep xbusy for replace.  fs->m is moved into
962                  * fs->first_object and left busy while fs->first_m is
963                  * conditionally freed.
964                  */
965                 vm_page_remove_xbusy(fs->m);
966                 vm_page_replace(fs->m, fs->first_object, fs->first_pindex,
967                     fs->first_m);
968                 vm_page_dirty(fs->m);
969 #if VM_NRESERVLEVEL > 0
970                 /*
971                  * Rename the reservation.
972                  */
973                 vm_reserv_rename(fs->m, fs->first_object, fs->object,
974                     OFF_TO_IDX(fs->first_object->backing_object_offset));
975 #endif
976                 VM_OBJECT_WUNLOCK(fs->object);
977                 VM_OBJECT_WUNLOCK(fs->first_object);
978                 fs->first_m = fs->m;
979                 fs->m = NULL;
980                 VM_CNT_INC(v_cow_optim);
981         } else {
982                 if (is_first_object_locked)
983                         VM_OBJECT_WUNLOCK(fs->first_object);
984                 /*
985                  * Oh, well, lets copy it.
986                  */
987                 pmap_copy_page(fs->m, fs->first_m);
988                 vm_page_valid(fs->first_m);
989                 if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) {
990                         vm_page_wire(fs->first_m);
991                         vm_page_unwire(fs->m, PQ_INACTIVE);
992                 }
993                 /*
994                  * Save the cow page to be released after
995                  * pmap_enter is complete.
996                  */
997                 fs->m_cow = fs->m;
998                 fs->m = NULL;
999
1000                 /*
1001                  * Typically, the shadow object is either private to this
1002                  * address space (OBJ_ONEMAPPING) or its pages are read only.
1003                  * In the highly unusual case where the pages of a shadow object
1004                  * are read/write shared between this and other address spaces,
1005                  * we need to ensure that any pmap-level mappings to the
1006                  * original, copy-on-write page from the backing object are
1007                  * removed from those other address spaces.
1008                  *
1009                  * The flag check is racy, but this is tolerable: if
1010                  * OBJ_ONEMAPPING is cleared after the check, the busy state
1011                  * ensures that new mappings of m_cow can't be created.
1012                  * pmap_enter() will replace an existing mapping in the current
1013                  * address space.  If OBJ_ONEMAPPING is set after the check,
1014                  * removing mappings will at worse trigger some unnecessary page
1015                  * faults.
1016                  */
1017                 vm_page_assert_xbusied(fs->m_cow);
1018                 if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0)
1019                         pmap_remove_all(fs->m_cow);
1020         }
1021
1022         vm_object_pip_wakeup(fs->object);
1023
1024         /*
1025          * Only use the new page below...
1026          */
1027         fs->object = fs->first_object;
1028         fs->pindex = fs->first_pindex;
1029         fs->m = fs->first_m;
1030         VM_CNT_INC(v_cow_faults);
1031         curthread->td_cow++;
1032 }
1033
1034 static bool
1035 vm_fault_next(struct faultstate *fs)
1036 {
1037         vm_object_t next_object;
1038
1039         /*
1040          * The requested page does not exist at this object/
1041          * offset.  Remove the invalid page from the object,
1042          * waking up anyone waiting for it, and continue on to
1043          * the next object.  However, if this is the top-level
1044          * object, we must leave the busy page in place to
1045          * prevent another process from rushing past us, and
1046          * inserting the page in that object at the same time
1047          * that we are.
1048          */
1049         if (fs->object == fs->first_object) {
1050                 fs->first_m = fs->m;
1051                 fs->m = NULL;
1052         } else
1053                 fault_page_free(&fs->m);
1054
1055         /*
1056          * Move on to the next object.  Lock the next object before
1057          * unlocking the current one.
1058          */
1059         VM_OBJECT_ASSERT_WLOCKED(fs->object);
1060         next_object = fs->object->backing_object;
1061         if (next_object == NULL)
1062                 return (false);
1063         MPASS(fs->first_m != NULL);
1064         KASSERT(fs->object != next_object, ("object loop %p", next_object));
1065         VM_OBJECT_WLOCK(next_object);
1066         vm_object_pip_add(next_object, 1);
1067         if (fs->object != fs->first_object)
1068                 vm_object_pip_wakeup(fs->object);
1069         fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset);
1070         VM_OBJECT_WUNLOCK(fs->object);
1071         fs->object = next_object;
1072
1073         return (true);
1074 }
1075
1076 static void
1077 vm_fault_zerofill(struct faultstate *fs)
1078 {
1079
1080         /*
1081          * If there's no object left, fill the page in the top
1082          * object with zeros.
1083          */
1084         if (fs->object != fs->first_object) {
1085                 vm_object_pip_wakeup(fs->object);
1086                 fs->object = fs->first_object;
1087                 fs->pindex = fs->first_pindex;
1088         }
1089         MPASS(fs->first_m != NULL);
1090         MPASS(fs->m == NULL);
1091         fs->m = fs->first_m;
1092         fs->first_m = NULL;
1093
1094         /*
1095          * Zero the page if necessary and mark it valid.
1096          */
1097         if ((fs->m->flags & PG_ZERO) == 0) {
1098                 pmap_zero_page(fs->m);
1099         } else {
1100                 VM_CNT_INC(v_ozfod);
1101         }
1102         VM_CNT_INC(v_zfod);
1103         vm_page_valid(fs->m);
1104 }
1105
1106 /*
1107  * Initiate page fault after timeout.  Returns true if caller should
1108  * do vm_waitpfault() after the call.
1109  */
1110 static bool
1111 vm_fault_allocate_oom(struct faultstate *fs)
1112 {
1113         struct timeval now;
1114
1115         unlock_and_deallocate(fs);
1116         if (vm_pfault_oom_attempts < 0)
1117                 return (true);
1118         if (!fs->oom_started) {
1119                 fs->oom_started = true;
1120                 getmicrotime(&fs->oom_start_time);
1121                 return (true);
1122         }
1123
1124         getmicrotime(&now);
1125         timevalsub(&now, &fs->oom_start_time);
1126         if (now.tv_sec < vm_pfault_oom_attempts * vm_pfault_oom_wait)
1127                 return (true);
1128
1129         if (bootverbose)
1130                 printf(
1131             "proc %d (%s) failed to alloc page on fault, starting OOM\n",
1132                     curproc->p_pid, curproc->p_comm);
1133         vm_pageout_oom(VM_OOM_MEM_PF);
1134         fs->oom_started = false;
1135         return (false);
1136 }
1137
1138 /*
1139  * Allocate a page directly or via the object populate method.
1140  */
1141 static enum fault_status
1142 vm_fault_allocate(struct faultstate *fs)
1143 {
1144         struct domainset *dset;
1145         int alloc_req;
1146         enum fault_status res;
1147
1148         if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) {
1149                 res = vm_fault_lock_vnode(fs, true);
1150                 MPASS(res == FAULT_CONTINUE || res == FAULT_RESTART);
1151                 if (res == FAULT_RESTART)
1152                         return (res);
1153         }
1154
1155         if (fs->pindex >= fs->object->size) {
1156                 unlock_and_deallocate(fs);
1157                 return (FAULT_OUT_OF_BOUNDS);
1158         }
1159
1160         if (fs->object == fs->first_object &&
1161             (fs->first_object->flags & OBJ_POPULATE) != 0 &&
1162             fs->first_object->shadow_count == 0) {
1163                 res = vm_fault_populate(fs);
1164                 switch (res) {
1165                 case FAULT_SUCCESS:
1166                 case FAULT_FAILURE:
1167                 case FAULT_RESTART:
1168                         unlock_and_deallocate(fs);
1169                         return (res);
1170                 case FAULT_CONTINUE:
1171                         /*
1172                          * Pager's populate() method
1173                          * returned VM_PAGER_BAD.
1174                          */
1175                         break;
1176                 default:
1177                         panic("inconsistent return codes");
1178                 }
1179         }
1180
1181         /*
1182          * Allocate a new page for this object/offset pair.
1183          *
1184          * Unlocked read of the p_flag is harmless. At worst, the P_KILLED
1185          * might be not observed there, and allocation can fail, causing
1186          * restart and new reading of the p_flag.
1187          */
1188         dset = fs->object->domain.dr_policy;
1189         if (dset == NULL)
1190                 dset = curthread->td_domain.dr_policy;
1191         if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) {
1192 #if VM_NRESERVLEVEL > 0
1193                 vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex);
1194 #endif
1195                 alloc_req = P_KILLED(curproc) ?
1196                     VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL;
1197                 if (fs->object->type != OBJT_VNODE &&
1198                     fs->object->backing_object == NULL)
1199                         alloc_req |= VM_ALLOC_ZERO;
1200                 fs->m = vm_page_alloc(fs->object, fs->pindex, alloc_req);
1201         }
1202         if (fs->m == NULL) {
1203                 if (vm_fault_allocate_oom(fs))
1204                         vm_waitpfault(dset, vm_pfault_oom_wait * hz);
1205                 return (FAULT_RESTART);
1206         }
1207         fs->oom_started = false;
1208
1209         return (FAULT_CONTINUE);
1210 }
1211
1212 /*
1213  * Call the pager to retrieve the page if there is a chance
1214  * that the pager has it, and potentially retrieve additional
1215  * pages at the same time.
1216  */
1217 static enum fault_status
1218 vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp)
1219 {
1220         vm_offset_t e_end, e_start;
1221         int ahead, behind, cluster_offset, rv;
1222         enum fault_status status;
1223         u_char behavior;
1224
1225         /*
1226          * Prepare for unlocking the map.  Save the map
1227          * entry's start and end addresses, which are used to
1228          * optimize the size of the pager operation below.
1229          * Even if the map entry's addresses change after
1230          * unlocking the map, using the saved addresses is
1231          * safe.
1232          */
1233         e_start = fs->entry->start;
1234         e_end = fs->entry->end;
1235         behavior = vm_map_entry_behavior(fs->entry);
1236
1237         /*
1238          * If the pager for the current object might have
1239          * the page, then determine the number of additional
1240          * pages to read and potentially reprioritize
1241          * previously read pages for earlier reclamation.
1242          * These operations should only be performed once per
1243          * page fault.  Even if the current pager doesn't
1244          * have the page, the number of additional pages to
1245          * read will apply to subsequent objects in the
1246          * shadow chain.
1247          */
1248         if (fs->nera == -1 && !P_KILLED(curproc))
1249                 fs->nera = vm_fault_readahead(fs);
1250
1251         /*
1252          * Release the map lock before locking the vnode or
1253          * sleeping in the pager.  (If the current object has
1254          * a shadow, then an earlier iteration of this loop
1255          * may have already unlocked the map.)
1256          */
1257         unlock_map(fs);
1258
1259         status = vm_fault_lock_vnode(fs, false);
1260         MPASS(status == FAULT_CONTINUE || status == FAULT_RESTART);
1261         if (status == FAULT_RESTART)
1262                 return (status);
1263         KASSERT(fs->vp == NULL || !fs->map->system_map,
1264             ("vm_fault: vnode-backed object mapped by system map"));
1265
1266         /*
1267          * Page in the requested page and hint the pager,
1268          * that it may bring up surrounding pages.
1269          */
1270         if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM ||
1271             P_KILLED(curproc)) {
1272                 behind = 0;
1273                 ahead = 0;
1274         } else {
1275                 /* Is this a sequential fault? */
1276                 if (fs->nera > 0) {
1277                         behind = 0;
1278                         ahead = fs->nera;
1279                 } else {
1280                         /*
1281                          * Request a cluster of pages that is
1282                          * aligned to a VM_FAULT_READ_DEFAULT
1283                          * page offset boundary within the
1284                          * object.  Alignment to a page offset
1285                          * boundary is more likely to coincide
1286                          * with the underlying file system
1287                          * block than alignment to a virtual
1288                          * address boundary.
1289                          */
1290                         cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT;
1291                         behind = ulmin(cluster_offset,
1292                             atop(fs->vaddr - e_start));
1293                         ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset;
1294                 }
1295                 ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1);
1296         }
1297         *behindp = behind;
1298         *aheadp = ahead;
1299         rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp);
1300         if (rv == VM_PAGER_OK)
1301                 return (FAULT_HARD);
1302         if (rv == VM_PAGER_ERROR)
1303                 printf("vm_fault: pager read error, pid %d (%s)\n",
1304                     curproc->p_pid, curproc->p_comm);
1305         /*
1306          * If an I/O error occurred or the requested page was
1307          * outside the range of the pager, clean up and return
1308          * an error.
1309          */
1310         if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
1311                 VM_OBJECT_WLOCK(fs->object);
1312                 fault_page_free(&fs->m);
1313                 unlock_and_deallocate(fs);
1314                 return (FAULT_OUT_OF_BOUNDS);
1315         }
1316         KASSERT(rv == VM_PAGER_FAIL,
1317             ("%s: unepxected pager error %d", __func__, rv));
1318         return (FAULT_CONTINUE);
1319 }
1320
1321 /*
1322  * Wait/Retry if the page is busy.  We have to do this if the page is
1323  * either exclusive or shared busy because the vm_pager may be using
1324  * read busy for pageouts (and even pageins if it is the vnode pager),
1325  * and we could end up trying to pagein and pageout the same page
1326  * simultaneously.
1327  *
1328  * We can theoretically allow the busy case on a read fault if the page
1329  * is marked valid, but since such pages are typically already pmap'd,
1330  * putting that special case in might be more effort then it is worth.
1331  * We cannot under any circumstances mess around with a shared busied
1332  * page except, perhaps, to pmap it.
1333  */
1334 static void
1335 vm_fault_busy_sleep(struct faultstate *fs)
1336 {
1337         /*
1338          * Reference the page before unlocking and
1339          * sleeping so that the page daemon is less
1340          * likely to reclaim it.
1341          */
1342         vm_page_aflag_set(fs->m, PGA_REFERENCED);
1343         if (fs->object != fs->first_object) {
1344                 fault_page_release(&fs->first_m);
1345                 vm_object_pip_wakeup(fs->first_object);
1346         }
1347         vm_object_pip_wakeup(fs->object);
1348         unlock_map(fs);
1349         if (fs->m != vm_page_lookup(fs->object, fs->pindex) ||
1350             !vm_page_busy_sleep(fs->m, "vmpfw", 0))
1351                 VM_OBJECT_WUNLOCK(fs->object);
1352         VM_CNT_INC(v_intrans);
1353         vm_object_deallocate(fs->first_object);
1354 }
1355
1356 /*
1357  * Handle page lookup, populate, allocate, page-in for the current
1358  * object.
1359  *
1360  * The object is locked on entry and will remain locked with a return
1361  * code of FAULT_CONTINUE so that fault may follow the shadow chain.
1362  * Otherwise, the object will be unlocked upon return.
1363  */
1364 static enum fault_status
1365 vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp)
1366 {
1367         enum fault_status res;
1368         bool dead;
1369
1370         /*
1371          * If the object is marked for imminent termination, we retry
1372          * here, since the collapse pass has raced with us.  Otherwise,
1373          * if we see terminally dead object, return fail.
1374          */
1375         if ((fs->object->flags & OBJ_DEAD) != 0) {
1376                 dead = fs->object->type == OBJT_DEAD;
1377                 unlock_and_deallocate(fs);
1378                 if (dead)
1379                         return (FAULT_PROTECTION_FAILURE);
1380                 pause("vmf_de", 1);
1381                 return (FAULT_RESTART);
1382         }
1383
1384         /*
1385          * See if the page is resident.
1386          */
1387         fs->m = vm_page_lookup(fs->object, fs->pindex);
1388         if (fs->m != NULL) {
1389                 if (!vm_page_tryxbusy(fs->m)) {
1390                         vm_fault_busy_sleep(fs);
1391                         return (FAULT_RESTART);
1392                 }
1393
1394                 /*
1395                  * The page is marked busy for other processes and the
1396                  * pagedaemon.  If it is still completely valid we are
1397                  * done.
1398                  */
1399                 if (vm_page_all_valid(fs->m)) {
1400                         VM_OBJECT_WUNLOCK(fs->object);
1401                         return (FAULT_SOFT);
1402                 }
1403         }
1404         VM_OBJECT_ASSERT_WLOCKED(fs->object);
1405
1406         /*
1407          * Page is not resident.  If the pager might contain the page
1408          * or this is the beginning of the search, allocate a new
1409          * page.  (Default objects are zero-fill, so there is no real
1410          * pager for them.)
1411          */
1412         if (fs->m == NULL && (fs->object->type != OBJT_DEFAULT ||
1413             fs->object == fs->first_object)) {
1414                 res = vm_fault_allocate(fs);
1415                 if (res != FAULT_CONTINUE)
1416                         return (res);
1417         }
1418
1419         /*
1420          * Default objects have no pager so no exclusive busy exists
1421          * to protect this page in the chain.  Skip to the next
1422          * object without dropping the lock to preserve atomicity of
1423          * shadow faults.
1424          */
1425         if (fs->object->type != OBJT_DEFAULT) {
1426                 /*
1427                  * At this point, we have either allocated a new page
1428                  * or found an existing page that is only partially
1429                  * valid.
1430                  *
1431                  * We hold a reference on the current object and the
1432                  * page is exclusive busied.  The exclusive busy
1433                  * prevents simultaneous faults and collapses while
1434                  * the object lock is dropped.
1435                  */
1436                 VM_OBJECT_WUNLOCK(fs->object);
1437                 res = vm_fault_getpages(fs, behindp, aheadp);
1438                 if (res == FAULT_CONTINUE)
1439                         VM_OBJECT_WLOCK(fs->object);
1440         } else {
1441                 res = FAULT_CONTINUE;
1442         }
1443         return (res);
1444 }
1445
1446 int
1447 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
1448     int fault_flags, vm_page_t *m_hold)
1449 {
1450         struct faultstate fs;
1451         int ahead, behind, faultcount, rv;
1452         enum fault_status res;
1453         bool hardfault;
1454
1455         VM_CNT_INC(v_vm_faults);
1456
1457         if ((curthread->td_pflags & TDP_NOFAULTING) != 0)
1458                 return (KERN_PROTECTION_FAILURE);
1459
1460         fs.vp = NULL;
1461         fs.vaddr = vaddr;
1462         fs.m_hold = m_hold;
1463         fs.fault_flags = fault_flags;
1464         fs.map = map;
1465         fs.lookup_still_valid = false;
1466         fs.oom_started = false;
1467         fs.nera = -1;
1468         faultcount = 0;
1469         hardfault = false;
1470
1471 RetryFault:
1472         fs.fault_type = fault_type;
1473
1474         /*
1475          * Find the backing store object and offset into it to begin the
1476          * search.
1477          */
1478         rv = vm_fault_lookup(&fs);
1479         if (rv != KERN_SUCCESS) {
1480                 if (rv == KERN_RESOURCE_SHORTAGE)
1481                         goto RetryFault;
1482                 return (rv);
1483         }
1484
1485         /*
1486          * Try to avoid lock contention on the top-level object through
1487          * special-case handling of some types of page faults, specifically,
1488          * those that are mapping an existing page from the top-level object.
1489          * Under this condition, a read lock on the object suffices, allowing
1490          * multiple page faults of a similar type to run in parallel.
1491          */
1492         if (fs.vp == NULL /* avoid locked vnode leak */ &&
1493             (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 &&
1494             (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) {
1495                 VM_OBJECT_RLOCK(fs.first_object);
1496                 res = vm_fault_soft_fast(&fs);
1497                 if (res == FAULT_SUCCESS)
1498                         return (KERN_SUCCESS);
1499                 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) {
1500                         VM_OBJECT_RUNLOCK(fs.first_object);
1501                         VM_OBJECT_WLOCK(fs.first_object);
1502                 }
1503         } else {
1504                 VM_OBJECT_WLOCK(fs.first_object);
1505         }
1506
1507         /*
1508          * Make a reference to this object to prevent its disposal while we
1509          * are messing with it.  Once we have the reference, the map is free
1510          * to be diddled.  Since objects reference their shadows (and copies),
1511          * they will stay around as well.
1512          *
1513          * Bump the paging-in-progress count to prevent size changes (e.g. 
1514          * truncation operations) during I/O.
1515          */
1516         vm_object_reference_locked(fs.first_object);
1517         vm_object_pip_add(fs.first_object, 1);
1518
1519         fs.m_cow = fs.m = fs.first_m = NULL;
1520
1521         /*
1522          * Search for the page at object/offset.
1523          */
1524         fs.object = fs.first_object;
1525         fs.pindex = fs.first_pindex;
1526
1527         if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) {
1528                 res = vm_fault_allocate(&fs);
1529                 switch (res) {
1530                 case FAULT_RESTART:
1531                         goto RetryFault;
1532                 case FAULT_SUCCESS:
1533                         return (KERN_SUCCESS);
1534                 case FAULT_FAILURE:
1535                         return (KERN_FAILURE);
1536                 case FAULT_OUT_OF_BOUNDS:
1537                         return (KERN_OUT_OF_BOUNDS);
1538                 case FAULT_CONTINUE:
1539                         break;
1540                 default:
1541                         panic("vm_fault: Unhandled status %d", res);
1542                 }
1543         }
1544
1545         while (TRUE) {
1546                 KASSERT(fs.m == NULL,
1547                     ("page still set %p at loop start", fs.m));
1548
1549                 res = vm_fault_object(&fs, &behind, &ahead);
1550                 switch (res) {
1551                 case FAULT_SOFT:
1552                         goto found;
1553                 case FAULT_HARD:
1554                         faultcount = behind + 1 + ahead;
1555                         hardfault = true;
1556                         goto found;
1557                 case FAULT_RESTART:
1558                         goto RetryFault;
1559                 case FAULT_SUCCESS:
1560                         return (KERN_SUCCESS);
1561                 case FAULT_FAILURE:
1562                         return (KERN_FAILURE);
1563                 case FAULT_OUT_OF_BOUNDS:
1564                         return (KERN_OUT_OF_BOUNDS);
1565                 case FAULT_PROTECTION_FAILURE:
1566                         return (KERN_PROTECTION_FAILURE);
1567                 case FAULT_CONTINUE:
1568                         break;
1569                 default:
1570                         panic("vm_fault: Unhandled status %d", res);
1571                 }
1572
1573                 /*
1574                  * The page was not found in the current object.  Try to
1575                  * traverse into a backing object or zero fill if none is
1576                  * found.
1577                  */
1578                 if (vm_fault_next(&fs))
1579                         continue;
1580                 if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) {
1581                         if (fs.first_object == fs.object)
1582                                 fault_page_free(&fs.first_m);
1583                         unlock_and_deallocate(&fs);
1584                         return (KERN_OUT_OF_BOUNDS);
1585                 }
1586                 VM_OBJECT_WUNLOCK(fs.object);
1587                 vm_fault_zerofill(&fs);
1588                 /* Don't try to prefault neighboring pages. */
1589                 faultcount = 1;
1590                 break;
1591         }
1592
1593 found:
1594         /*
1595          * A valid page has been found and exclusively busied.  The
1596          * object lock must no longer be held.
1597          */
1598         vm_page_assert_xbusied(fs.m);
1599         VM_OBJECT_ASSERT_UNLOCKED(fs.object);
1600
1601         /*
1602          * If the page is being written, but isn't already owned by the
1603          * top-level object, we have to copy it into a new page owned by the
1604          * top-level object.
1605          */
1606         if (fs.object != fs.first_object) {
1607                 /*
1608                  * We only really need to copy if we want to write it.
1609                  */
1610                 if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) {
1611                         vm_fault_cow(&fs);
1612                         /*
1613                          * We only try to prefault read-only mappings to the
1614                          * neighboring pages when this copy-on-write fault is
1615                          * a hard fault.  In other cases, trying to prefault
1616                          * is typically wasted effort.
1617                          */
1618                         if (faultcount == 0)
1619                                 faultcount = 1;
1620
1621                 } else {
1622                         fs.prot &= ~VM_PROT_WRITE;
1623                 }
1624         }
1625
1626         /*
1627          * We must verify that the maps have not changed since our last
1628          * lookup.
1629          */
1630         if (!fs.lookup_still_valid) {
1631                 rv = vm_fault_relookup(&fs);
1632                 if (rv != KERN_SUCCESS) {
1633                         fault_deallocate(&fs);
1634                         if (rv == KERN_RESTART)
1635                                 goto RetryFault;
1636                         return (rv);
1637                 }
1638         }
1639         VM_OBJECT_ASSERT_UNLOCKED(fs.object);
1640
1641         /*
1642          * If the page was filled by a pager, save the virtual address that
1643          * should be faulted on next under a sequential access pattern to the
1644          * map entry.  A read lock on the map suffices to update this address
1645          * safely.
1646          */
1647         if (hardfault)
1648                 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE;
1649
1650         /*
1651          * Page must be completely valid or it is not fit to
1652          * map into user space.  vm_pager_get_pages() ensures this.
1653          */
1654         vm_page_assert_xbusied(fs.m);
1655         KASSERT(vm_page_all_valid(fs.m),
1656             ("vm_fault: page %p partially invalid", fs.m));
1657
1658         vm_fault_dirty(&fs, fs.m);
1659
1660         /*
1661          * Put this page into the physical map.  We had to do the unlock above
1662          * because pmap_enter() may sleep.  We don't put the page
1663          * back on the active queue until later so that the pageout daemon
1664          * won't find it (yet).
1665          */
1666         pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot,
1667             fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0);
1668         if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 &&
1669             fs.wired == 0)
1670                 vm_fault_prefault(&fs, vaddr,
1671                     faultcount > 0 ? behind : PFBAK,
1672                     faultcount > 0 ? ahead : PFFOR, false);
1673
1674         /*
1675          * If the page is not wired down, then put it where the pageout daemon
1676          * can find it.
1677          */
1678         if ((fs.fault_flags & VM_FAULT_WIRE) != 0)
1679                 vm_page_wire(fs.m);
1680         else
1681                 vm_page_activate(fs.m);
1682         if (fs.m_hold != NULL) {
1683                 (*fs.m_hold) = fs.m;
1684                 vm_page_wire(fs.m);
1685         }
1686         vm_page_xunbusy(fs.m);
1687         fs.m = NULL;
1688
1689         /*
1690          * Unlock everything, and return
1691          */
1692         fault_deallocate(&fs);
1693         if (hardfault) {
1694                 VM_CNT_INC(v_io_faults);
1695                 curthread->td_ru.ru_majflt++;
1696 #ifdef RACCT
1697                 if (racct_enable && fs.object->type == OBJT_VNODE) {
1698                         PROC_LOCK(curproc);
1699                         if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) {
1700                                 racct_add_force(curproc, RACCT_WRITEBPS,
1701                                     PAGE_SIZE + behind * PAGE_SIZE);
1702                                 racct_add_force(curproc, RACCT_WRITEIOPS, 1);
1703                         } else {
1704                                 racct_add_force(curproc, RACCT_READBPS,
1705                                     PAGE_SIZE + ahead * PAGE_SIZE);
1706                                 racct_add_force(curproc, RACCT_READIOPS, 1);
1707                         }
1708                         PROC_UNLOCK(curproc);
1709                 }
1710 #endif
1711         } else 
1712                 curthread->td_ru.ru_minflt++;
1713
1714         return (KERN_SUCCESS);
1715 }
1716
1717 /*
1718  * Speed up the reclamation of pages that precede the faulting pindex within
1719  * the first object of the shadow chain.  Essentially, perform the equivalent
1720  * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes
1721  * the faulting pindex by the cluster size when the pages read by vm_fault()
1722  * cross a cluster-size boundary.  The cluster size is the greater of the
1723  * smallest superpage size and VM_FAULT_DONTNEED_MIN.
1724  *
1725  * When "fs->first_object" is a shadow object, the pages in the backing object
1726  * that precede the faulting pindex are deactivated by vm_fault().  So, this
1727  * function must only be concerned with pages in the first object.
1728  */
1729 static void
1730 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead)
1731 {
1732         vm_map_entry_t entry;
1733         vm_object_t first_object, object;
1734         vm_offset_t end, start;
1735         vm_page_t m, m_next;
1736         vm_pindex_t pend, pstart;
1737         vm_size_t size;
1738
1739         object = fs->object;
1740         VM_OBJECT_ASSERT_UNLOCKED(object);
1741         first_object = fs->first_object;
1742         /* Neither fictitious nor unmanaged pages can be reclaimed. */
1743         if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) {
1744                 VM_OBJECT_RLOCK(first_object);
1745                 size = VM_FAULT_DONTNEED_MIN;
1746                 if (MAXPAGESIZES > 1 && size < pagesizes[1])
1747                         size = pagesizes[1];
1748                 end = rounddown2(vaddr, size);
1749                 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) &&
1750                     (entry = fs->entry)->start < end) {
1751                         if (end - entry->start < size)
1752                                 start = entry->start;
1753                         else
1754                                 start = end - size;
1755                         pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED);
1756                         pstart = OFF_TO_IDX(entry->offset) + atop(start -
1757                             entry->start);
1758                         m_next = vm_page_find_least(first_object, pstart);
1759                         pend = OFF_TO_IDX(entry->offset) + atop(end -
1760                             entry->start);
1761                         while ((m = m_next) != NULL && m->pindex < pend) {
1762                                 m_next = TAILQ_NEXT(m, listq);
1763                                 if (!vm_page_all_valid(m) ||
1764                                     vm_page_busied(m))
1765                                         continue;
1766
1767                                 /*
1768                                  * Don't clear PGA_REFERENCED, since it would
1769                                  * likely represent a reference by a different
1770                                  * process.
1771                                  *
1772                                  * Typically, at this point, prefetched pages
1773                                  * are still in the inactive queue.  Only
1774                                  * pages that triggered page faults are in the
1775                                  * active queue.  The test for whether the page
1776                                  * is in the inactive queue is racy; in the
1777                                  * worst case we will requeue the page
1778                                  * unnecessarily.
1779                                  */
1780                                 if (!vm_page_inactive(m))
1781                                         vm_page_deactivate(m);
1782                         }
1783                 }
1784                 VM_OBJECT_RUNLOCK(first_object);
1785         }
1786 }
1787
1788 /*
1789  * vm_fault_prefault provides a quick way of clustering
1790  * pagefaults into a processes address space.  It is a "cousin"
1791  * of vm_map_pmap_enter, except it runs at page fault time instead
1792  * of mmap time.
1793  */
1794 static void
1795 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
1796     int backward, int forward, bool obj_locked)
1797 {
1798         pmap_t pmap;
1799         vm_map_entry_t entry;
1800         vm_object_t backing_object, lobject;
1801         vm_offset_t addr, starta;
1802         vm_pindex_t pindex;
1803         vm_page_t m;
1804         int i;
1805
1806         pmap = fs->map->pmap;
1807         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
1808                 return;
1809
1810         entry = fs->entry;
1811
1812         if (addra < backward * PAGE_SIZE) {
1813                 starta = entry->start;
1814         } else {
1815                 starta = addra - backward * PAGE_SIZE;
1816                 if (starta < entry->start)
1817                         starta = entry->start;
1818         }
1819
1820         /*
1821          * Generate the sequence of virtual addresses that are candidates for
1822          * prefaulting in an outward spiral from the faulting virtual address,
1823          * "addra".  Specifically, the sequence is "addra - PAGE_SIZE", "addra
1824          * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ...
1825          * If the candidate address doesn't have a backing physical page, then
1826          * the loop immediately terminates.
1827          */
1828         for (i = 0; i < 2 * imax(backward, forward); i++) {
1829                 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE :
1830                     PAGE_SIZE);
1831                 if (addr > addra + forward * PAGE_SIZE)
1832                         addr = 0;
1833
1834                 if (addr < starta || addr >= entry->end)
1835                         continue;
1836
1837                 if (!pmap_is_prefaultable(pmap, addr))
1838                         continue;
1839
1840                 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
1841                 lobject = entry->object.vm_object;
1842                 if (!obj_locked)
1843                         VM_OBJECT_RLOCK(lobject);
1844                 while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
1845                     lobject->type == OBJT_DEFAULT &&
1846                     (backing_object = lobject->backing_object) != NULL) {
1847                         KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
1848                             0, ("vm_fault_prefault: unaligned object offset"));
1849                         pindex += lobject->backing_object_offset >> PAGE_SHIFT;
1850                         VM_OBJECT_RLOCK(backing_object);
1851                         if (!obj_locked || lobject != entry->object.vm_object)
1852                                 VM_OBJECT_RUNLOCK(lobject);
1853                         lobject = backing_object;
1854                 }
1855                 if (m == NULL) {
1856                         if (!obj_locked || lobject != entry->object.vm_object)
1857                                 VM_OBJECT_RUNLOCK(lobject);
1858                         break;
1859                 }
1860                 if (vm_page_all_valid(m) &&
1861                     (m->flags & PG_FICTITIOUS) == 0)
1862                         pmap_enter_quick(pmap, addr, m, entry->protection);
1863                 if (!obj_locked || lobject != entry->object.vm_object)
1864                         VM_OBJECT_RUNLOCK(lobject);
1865         }
1866 }
1867
1868 /*
1869  * Hold each of the physical pages that are mapped by the specified range of
1870  * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid
1871  * and allow the specified types of access, "prot".  If all of the implied
1872  * pages are successfully held, then the number of held pages is returned
1873  * together with pointers to those pages in the array "ma".  However, if any
1874  * of the pages cannot be held, -1 is returned.
1875  */
1876 int
1877 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
1878     vm_prot_t prot, vm_page_t *ma, int max_count)
1879 {
1880         vm_offset_t end, va;
1881         vm_page_t *mp;
1882         int count;
1883         boolean_t pmap_failed;
1884
1885         if (len == 0)
1886                 return (0);
1887         end = round_page(addr + len);
1888         addr = trunc_page(addr);
1889
1890         if (!vm_map_range_valid(map, addr, end))
1891                 return (-1);
1892
1893         if (atop(end - addr) > max_count)
1894                 panic("vm_fault_quick_hold_pages: count > max_count");
1895         count = atop(end - addr);
1896
1897         /*
1898          * Most likely, the physical pages are resident in the pmap, so it is
1899          * faster to try pmap_extract_and_hold() first.
1900          */
1901         pmap_failed = FALSE;
1902         for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) {
1903                 *mp = pmap_extract_and_hold(map->pmap, va, prot);
1904                 if (*mp == NULL)
1905                         pmap_failed = TRUE;
1906                 else if ((prot & VM_PROT_WRITE) != 0 &&
1907                     (*mp)->dirty != VM_PAGE_BITS_ALL) {
1908                         /*
1909                          * Explicitly dirty the physical page.  Otherwise, the
1910                          * caller's changes may go unnoticed because they are
1911                          * performed through an unmanaged mapping or by a DMA
1912                          * operation.
1913                          *
1914                          * The object lock is not held here.
1915                          * See vm_page_clear_dirty_mask().
1916                          */
1917                         vm_page_dirty(*mp);
1918                 }
1919         }
1920         if (pmap_failed) {
1921                 /*
1922                  * One or more pages could not be held by the pmap.  Either no
1923                  * page was mapped at the specified virtual address or that
1924                  * mapping had insufficient permissions.  Attempt to fault in
1925                  * and hold these pages.
1926                  *
1927                  * If vm_fault_disable_pagefaults() was called,
1928                  * i.e., TDP_NOFAULTING is set, we must not sleep nor
1929                  * acquire MD VM locks, which means we must not call
1930                  * vm_fault().  Some (out of tree) callers mark
1931                  * too wide a code area with vm_fault_disable_pagefaults()
1932                  * already, use the VM_PROT_QUICK_NOFAULT flag to request
1933                  * the proper behaviour explicitly.
1934                  */
1935                 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 &&
1936                     (curthread->td_pflags & TDP_NOFAULTING) != 0)
1937                         goto error;
1938                 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE)
1939                         if (*mp == NULL && vm_fault(map, va, prot,
1940                             VM_FAULT_NORMAL, mp) != KERN_SUCCESS)
1941                                 goto error;
1942         }
1943         return (count);
1944 error:  
1945         for (mp = ma; mp < ma + count; mp++)
1946                 if (*mp != NULL)
1947                         vm_page_unwire(*mp, PQ_INACTIVE);
1948         return (-1);
1949 }
1950
1951 /*
1952  *      Routine:
1953  *              vm_fault_copy_entry
1954  *      Function:
1955  *              Create new shadow object backing dst_entry with private copy of
1956  *              all underlying pages. When src_entry is equal to dst_entry,
1957  *              function implements COW for wired-down map entry. Otherwise,
1958  *              it forks wired entry into dst_map.
1959  *
1960  *      In/out conditions:
1961  *              The source and destination maps must be locked for write.
1962  *              The source map entry must be wired down (or be a sharing map
1963  *              entry corresponding to a main map entry that is wired down).
1964  */
1965 void
1966 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
1967     vm_map_entry_t dst_entry, vm_map_entry_t src_entry,
1968     vm_ooffset_t *fork_charge)
1969 {
1970         vm_object_t backing_object, dst_object, object, src_object;
1971         vm_pindex_t dst_pindex, pindex, src_pindex;
1972         vm_prot_t access, prot;
1973         vm_offset_t vaddr;
1974         vm_page_t dst_m;
1975         vm_page_t src_m;
1976         boolean_t upgrade;
1977
1978 #ifdef  lint
1979         src_map++;
1980 #endif  /* lint */
1981
1982         upgrade = src_entry == dst_entry;
1983         access = prot = dst_entry->protection;
1984
1985         src_object = src_entry->object.vm_object;
1986         src_pindex = OFF_TO_IDX(src_entry->offset);
1987
1988         if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
1989                 dst_object = src_object;
1990                 vm_object_reference(dst_object);
1991         } else {
1992                 /*
1993                  * Create the top-level object for the destination entry.
1994                  * Doesn't actually shadow anything - we copy the pages
1995                  * directly.
1996                  */
1997                 dst_object = vm_object_allocate_anon(atop(dst_entry->end -
1998                     dst_entry->start), NULL, NULL, 0);
1999 #if VM_NRESERVLEVEL > 0
2000                 dst_object->flags |= OBJ_COLORED;
2001                 dst_object->pg_color = atop(dst_entry->start);
2002 #endif
2003                 dst_object->domain = src_object->domain;
2004                 dst_object->charge = dst_entry->end - dst_entry->start;
2005         }
2006
2007         VM_OBJECT_WLOCK(dst_object);
2008         KASSERT(upgrade || dst_entry->object.vm_object == NULL,
2009             ("vm_fault_copy_entry: vm_object not NULL"));
2010         if (src_object != dst_object) {
2011                 dst_entry->object.vm_object = dst_object;
2012                 dst_entry->offset = 0;
2013                 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC;
2014         }
2015         if (fork_charge != NULL) {
2016                 KASSERT(dst_entry->cred == NULL,
2017                     ("vm_fault_copy_entry: leaked swp charge"));
2018                 dst_object->cred = curthread->td_ucred;
2019                 crhold(dst_object->cred);
2020                 *fork_charge += dst_object->charge;
2021         } else if ((dst_object->type == OBJT_DEFAULT ||
2022             (dst_object->flags & OBJ_SWAP) != 0) &&
2023             dst_object->cred == NULL) {
2024                 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p",
2025                     dst_entry));
2026                 dst_object->cred = dst_entry->cred;
2027                 dst_entry->cred = NULL;
2028         }
2029
2030         /*
2031          * If not an upgrade, then enter the mappings in the pmap as
2032          * read and/or execute accesses.  Otherwise, enter them as
2033          * write accesses.
2034          *
2035          * A writeable large page mapping is only created if all of
2036          * the constituent small page mappings are modified. Marking
2037          * PTEs as modified on inception allows promotion to happen
2038          * without taking potentially large number of soft faults.
2039          */
2040         if (!upgrade)
2041                 access &= ~VM_PROT_WRITE;
2042
2043         /*
2044          * Loop through all of the virtual pages within the entry's
2045          * range, copying each page from the source object to the
2046          * destination object.  Since the source is wired, those pages
2047          * must exist.  In contrast, the destination is pageable.
2048          * Since the destination object doesn't share any backing storage
2049          * with the source object, all of its pages must be dirtied,
2050          * regardless of whether they can be written.
2051          */
2052         for (vaddr = dst_entry->start, dst_pindex = 0;
2053             vaddr < dst_entry->end;
2054             vaddr += PAGE_SIZE, dst_pindex++) {
2055 again:
2056                 /*
2057                  * Find the page in the source object, and copy it in.
2058                  * Because the source is wired down, the page will be
2059                  * in memory.
2060                  */
2061                 if (src_object != dst_object)
2062                         VM_OBJECT_RLOCK(src_object);
2063                 object = src_object;
2064                 pindex = src_pindex + dst_pindex;
2065                 while ((src_m = vm_page_lookup(object, pindex)) == NULL &&
2066                     (backing_object = object->backing_object) != NULL) {
2067                         /*
2068                          * Unless the source mapping is read-only or
2069                          * it is presently being upgraded from
2070                          * read-only, the first object in the shadow
2071                          * chain should provide all of the pages.  In
2072                          * other words, this loop body should never be
2073                          * executed when the source mapping is already
2074                          * read/write.
2075                          */
2076                         KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 ||
2077                             upgrade,
2078                             ("vm_fault_copy_entry: main object missing page"));
2079
2080                         VM_OBJECT_RLOCK(backing_object);
2081                         pindex += OFF_TO_IDX(object->backing_object_offset);
2082                         if (object != dst_object)
2083                                 VM_OBJECT_RUNLOCK(object);
2084                         object = backing_object;
2085                 }
2086                 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing"));
2087
2088                 if (object != dst_object) {
2089                         /*
2090                          * Allocate a page in the destination object.
2091                          */
2092                         dst_m = vm_page_alloc(dst_object, (src_object ==
2093                             dst_object ? src_pindex : 0) + dst_pindex,
2094                             VM_ALLOC_NORMAL);
2095                         if (dst_m == NULL) {
2096                                 VM_OBJECT_WUNLOCK(dst_object);
2097                                 VM_OBJECT_RUNLOCK(object);
2098                                 vm_wait(dst_object);
2099                                 VM_OBJECT_WLOCK(dst_object);
2100                                 goto again;
2101                         }
2102
2103                         /*
2104                          * See the comment in vm_fault_cow().
2105                          */
2106                         if (src_object == dst_object &&
2107                             (object->flags & OBJ_ONEMAPPING) == 0)
2108                                 pmap_remove_all(src_m);
2109                         pmap_copy_page(src_m, dst_m);
2110                         VM_OBJECT_RUNLOCK(object);
2111                         dst_m->dirty = dst_m->valid = src_m->valid;
2112                 } else {
2113                         dst_m = src_m;
2114                         if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0)
2115                                 goto again;
2116                         if (dst_m->pindex >= dst_object->size) {
2117                                 /*
2118                                  * We are upgrading.  Index can occur
2119                                  * out of bounds if the object type is
2120                                  * vnode and the file was truncated.
2121                                  */
2122                                 vm_page_xunbusy(dst_m);
2123                                 break;
2124                         }
2125                 }
2126                 VM_OBJECT_WUNLOCK(dst_object);
2127
2128                 /*
2129                  * Enter it in the pmap. If a wired, copy-on-write
2130                  * mapping is being replaced by a write-enabled
2131                  * mapping, then wire that new mapping.
2132                  *
2133                  * The page can be invalid if the user called
2134                  * msync(MS_INVALIDATE) or truncated the backing vnode
2135                  * or shared memory object.  In this case, do not
2136                  * insert it into pmap, but still do the copy so that
2137                  * all copies of the wired map entry have similar
2138                  * backing pages.
2139                  */
2140                 if (vm_page_all_valid(dst_m)) {
2141                         pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
2142                             access | (upgrade ? PMAP_ENTER_WIRED : 0), 0);
2143                 }
2144
2145                 /*
2146                  * Mark it no longer busy, and put it on the active list.
2147                  */
2148                 VM_OBJECT_WLOCK(dst_object);
2149                 
2150                 if (upgrade) {
2151                         if (src_m != dst_m) {
2152                                 vm_page_unwire(src_m, PQ_INACTIVE);
2153                                 vm_page_wire(dst_m);
2154                         } else {
2155                                 KASSERT(vm_page_wired(dst_m),
2156                                     ("dst_m %p is not wired", dst_m));
2157                         }
2158                 } else {
2159                         vm_page_activate(dst_m);
2160                 }
2161                 vm_page_xunbusy(dst_m);
2162         }
2163         VM_OBJECT_WUNLOCK(dst_object);
2164         if (upgrade) {
2165                 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
2166                 vm_object_deallocate(src_object);
2167         }
2168 }
2169
2170 /*
2171  * Block entry into the machine-independent layer's page fault handler by
2172  * the calling thread.  Subsequent calls to vm_fault() by that thread will
2173  * return KERN_PROTECTION_FAILURE.  Enable machine-dependent handling of
2174  * spurious page faults. 
2175  */
2176 int
2177 vm_fault_disable_pagefaults(void)
2178 {
2179
2180         return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR));
2181 }
2182
2183 void
2184 vm_fault_enable_pagefaults(int save)
2185 {
2186
2187         curthread_pflags_restore(save);
2188 }