]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/vm/vm_fault.c
Previously we did not support invalid pages in default objects. This means
[FreeBSD/FreeBSD.git] / sys / vm / vm_fault.c
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  *
11  *
12  * This code is derived from software contributed to Berkeley by
13  * The Mach Operating System project at Carnegie-Mellon University.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *      This product includes software developed by the University of
26  *      California, Berkeley and its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  *      from: @(#)vm_fault.c    8.4 (Berkeley) 1/12/94
44  *
45  *
46  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47  * All rights reserved.
48  *
49  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
50  *
51  * Permission to use, copy, modify and distribute this software and
52  * its documentation is hereby granted, provided that both the copyright
53  * notice and this permission notice appear in all copies of the
54  * software, derivative works or modified versions, and any portions
55  * thereof, and that both notices appear in supporting documentation.
56  *
57  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
58  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
59  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
60  *
61  * Carnegie Mellon requests users of this software to return to
62  *
63  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
64  *  School of Computer Science
65  *  Carnegie Mellon University
66  *  Pittsburgh PA 15213-3890
67  *
68  * any improvements or extensions that they make and grant Carnegie the
69  * rights to redistribute these changes.
70  */
71
72 /*
73  *      Page fault handling module.
74  */
75
76 #include <sys/cdefs.h>
77 __FBSDID("$FreeBSD$");
78
79 #include "opt_ktrace.h"
80 #include "opt_vm.h"
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/lock.h>
86 #include <sys/mman.h>
87 #include <sys/mutex.h>
88 #include <sys/proc.h>
89 #include <sys/racct.h>
90 #include <sys/refcount.h>
91 #include <sys/resourcevar.h>
92 #include <sys/rwlock.h>
93 #include <sys/signalvar.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysent.h>
96 #include <sys/vmmeter.h>
97 #include <sys/vnode.h>
98 #ifdef KTRACE
99 #include <sys/ktrace.h>
100 #endif
101
102 #include <vm/vm.h>
103 #include <vm/vm_param.h>
104 #include <vm/pmap.h>
105 #include <vm/vm_map.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_page.h>
108 #include <vm/vm_pageout.h>
109 #include <vm/vm_kern.h>
110 #include <vm/vm_pager.h>
111 #include <vm/vm_extern.h>
112 #include <vm/vm_reserv.h>
113
114 #define PFBAK 4
115 #define PFFOR 4
116
117 #define VM_FAULT_READ_DEFAULT   (1 + VM_FAULT_READ_AHEAD_INIT)
118 #define VM_FAULT_READ_MAX       (1 + VM_FAULT_READ_AHEAD_MAX)
119
120 #define VM_FAULT_DONTNEED_MIN   1048576
121
122 struct faultstate {
123         vm_page_t m;
124         vm_object_t object;
125         vm_pindex_t pindex;
126         vm_page_t first_m;
127         vm_object_t     first_object;
128         vm_pindex_t first_pindex;
129         vm_map_t map;
130         vm_map_entry_t entry;
131         int map_generation;
132         bool lookup_still_valid;
133         struct vnode *vp;
134 };
135
136 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr,
137             int ahead);
138 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
139             int backward, int forward, bool obj_locked);
140
141 static int vm_pfault_oom_attempts = 3;
142 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN,
143     &vm_pfault_oom_attempts, 0,
144     "Number of page allocation attempts in page fault handler before it "
145     "triggers OOM handling");
146
147 static int vm_pfault_oom_wait = 10;
148 SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN,
149     &vm_pfault_oom_wait, 0,
150     "Number of seconds to wait for free pages before retrying "
151     "the page fault handler");
152
153 static inline void
154 fault_page_release(vm_page_t *mp)
155 {
156         vm_page_t m;
157
158         m = *mp;
159         if (m != NULL) {
160                 /*
161                  * We are likely to loop around again and attempt to busy
162                  * this page.  Deactivating it leaves it available for
163                  * pageout while optimizing fault restarts.
164                  */
165                 vm_page_lock(m);
166                 vm_page_deactivate(m);
167                 vm_page_unlock(m);
168                 vm_page_xunbusy(m);
169                 *mp = NULL;
170         }
171 }
172
173 static inline void
174 fault_page_free(vm_page_t *mp)
175 {
176         vm_page_t m;
177
178         m = *mp;
179         if (m != NULL) {
180                 VM_OBJECT_ASSERT_WLOCKED(m->object);
181                 if (!vm_page_wired(m))
182                         vm_page_free(m);
183                 else
184                         vm_page_xunbusy(m);
185                 *mp = NULL;
186         }
187 }
188
189 static inline void
190 unlock_map(struct faultstate *fs)
191 {
192
193         if (fs->lookup_still_valid) {
194                 vm_map_lookup_done(fs->map, fs->entry);
195                 fs->lookup_still_valid = false;
196         }
197 }
198
199 static void
200 unlock_vp(struct faultstate *fs)
201 {
202
203         if (fs->vp != NULL) {
204                 vput(fs->vp);
205                 fs->vp = NULL;
206         }
207 }
208
209 static void
210 fault_deallocate(struct faultstate *fs)
211 {
212
213         fault_page_release(&fs->m);
214         vm_object_pip_wakeup(fs->object);
215         if (fs->object != fs->first_object) {
216                 VM_OBJECT_WLOCK(fs->first_object);
217                 fault_page_free(&fs->first_m);
218                 VM_OBJECT_WUNLOCK(fs->first_object);
219                 vm_object_pip_wakeup(fs->first_object);
220         }
221         vm_object_deallocate(fs->first_object);
222         unlock_map(fs);
223         unlock_vp(fs);
224 }
225
226 static void
227 unlock_and_deallocate(struct faultstate *fs)
228 {
229
230         VM_OBJECT_WUNLOCK(fs->object);
231         fault_deallocate(fs);
232 }
233
234 static void
235 vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot,
236     vm_prot_t fault_type, int fault_flags)
237 {
238         bool need_dirty;
239
240         if (((prot & VM_PROT_WRITE) == 0 &&
241             (fault_flags & VM_FAULT_DIRTY) == 0) ||
242             (m->oflags & VPO_UNMANAGED) != 0)
243                 return;
244
245         VM_PAGE_OBJECT_BUSY_ASSERT(m);
246
247         need_dirty = ((fault_type & VM_PROT_WRITE) != 0 &&
248             (fault_flags & VM_FAULT_WIRE) == 0) ||
249             (fault_flags & VM_FAULT_DIRTY) != 0;
250
251         vm_object_set_writeable_dirty(m->object);
252
253         /*
254          * If the fault is a write, we know that this page is being
255          * written NOW so dirty it explicitly to save on
256          * pmap_is_modified() calls later.
257          *
258          * Also, since the page is now dirty, we can possibly tell
259          * the pager to release any swap backing the page.
260          */
261         if (need_dirty && vm_page_set_dirty(m) == 0) {
262                 /*
263                  * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC
264                  * if the page is already dirty to prevent data written with
265                  * the expectation of being synced from not being synced.
266                  * Likewise if this entry does not request NOSYNC then make
267                  * sure the page isn't marked NOSYNC.  Applications sharing
268                  * data should use the same flags to avoid ping ponging.
269                  */
270                 if ((entry->eflags & MAP_ENTRY_NOSYNC) != 0)
271                         vm_page_aflag_set(m, PGA_NOSYNC);
272                 else
273                         vm_page_aflag_clear(m, PGA_NOSYNC);
274         }
275
276 }
277
278 /*
279  * Unlocks fs.first_object and fs.map on success.
280  */
281 static int
282 vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
283     int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold)
284 {
285         vm_page_t m, m_map;
286 #if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
287     __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \
288     VM_NRESERVLEVEL > 0
289         vm_page_t m_super;
290         int flags;
291 #endif
292         int psind, rv;
293
294         MPASS(fs->vp == NULL);
295         vm_object_busy(fs->first_object);
296         m = vm_page_lookup(fs->first_object, fs->first_pindex);
297         /* A busy page can be mapped for read|execute access. */
298         if (m == NULL || ((prot & VM_PROT_WRITE) != 0 &&
299             vm_page_busied(m)) || !vm_page_all_valid(m)) {
300                 rv = KERN_FAILURE;
301                 goto out;
302         }
303         m_map = m;
304         psind = 0;
305 #if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
306     __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \
307     VM_NRESERVLEVEL > 0
308         if ((m->flags & PG_FICTITIOUS) == 0 &&
309             (m_super = vm_reserv_to_superpage(m)) != NULL &&
310             rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start &&
311             roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end &&
312             (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) &
313             (pagesizes[m_super->psind] - 1)) && !wired &&
314             pmap_ps_enabled(fs->map->pmap)) {
315                 flags = PS_ALL_VALID;
316                 if ((prot & VM_PROT_WRITE) != 0) {
317                         /*
318                          * Create a superpage mapping allowing write access
319                          * only if none of the constituent pages are busy and
320                          * all of them are already dirty (except possibly for
321                          * the page that was faulted on).
322                          */
323                         flags |= PS_NONE_BUSY;
324                         if ((fs->first_object->flags & OBJ_UNMANAGED) == 0)
325                                 flags |= PS_ALL_DIRTY;
326                 }
327                 if (vm_page_ps_test(m_super, flags, m)) {
328                         m_map = m_super;
329                         psind = m_super->psind;
330                         vaddr = rounddown2(vaddr, pagesizes[psind]);
331                         /* Preset the modified bit for dirty superpages. */
332                         if ((flags & PS_ALL_DIRTY) != 0)
333                                 fault_type |= VM_PROT_WRITE;
334                 }
335         }
336 #endif
337         rv = pmap_enter(fs->map->pmap, vaddr, m_map, prot, fault_type |
338             PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), psind);
339         if (rv != KERN_SUCCESS)
340                 goto out;
341         if (m_hold != NULL) {
342                 *m_hold = m;
343                 vm_page_wire(m);
344         }
345         vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags);
346         if (psind == 0 && !wired)
347                 vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true);
348         VM_OBJECT_RUNLOCK(fs->first_object);
349         vm_map_lookup_done(fs->map, fs->entry);
350         curthread->td_ru.ru_minflt++;
351
352 out:
353         vm_object_unbusy(fs->first_object);
354         return (rv);
355 }
356
357 static void
358 vm_fault_restore_map_lock(struct faultstate *fs)
359 {
360
361         VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
362         MPASS(REFCOUNT_COUNT(fs->first_object->paging_in_progress) > 0);
363
364         if (!vm_map_trylock_read(fs->map)) {
365                 VM_OBJECT_WUNLOCK(fs->first_object);
366                 vm_map_lock_read(fs->map);
367                 VM_OBJECT_WLOCK(fs->first_object);
368         }
369         fs->lookup_still_valid = true;
370 }
371
372 static void
373 vm_fault_populate_check_page(vm_page_t m)
374 {
375
376         /*
377          * Check each page to ensure that the pager is obeying the
378          * interface: the page must be installed in the object, fully
379          * valid, and exclusively busied.
380          */
381         MPASS(m != NULL);
382         MPASS(vm_page_all_valid(m));
383         MPASS(vm_page_xbusied(m));
384 }
385
386 static void
387 vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first,
388     vm_pindex_t last)
389 {
390         vm_page_t m;
391         vm_pindex_t pidx;
392
393         VM_OBJECT_ASSERT_WLOCKED(object);
394         MPASS(first <= last);
395         for (pidx = first, m = vm_page_lookup(object, pidx);
396             pidx <= last; pidx++, m = vm_page_next(m)) {
397                 vm_fault_populate_check_page(m);
398                 vm_page_lock(m);
399                 vm_page_deactivate(m);
400                 vm_page_unlock(m);
401                 vm_page_xunbusy(m);
402         }
403 }
404
405 static int
406 vm_fault_populate(struct faultstate *fs, vm_prot_t prot, int fault_type,
407     int fault_flags, boolean_t wired, vm_page_t *m_hold)
408 {
409         struct mtx *m_mtx;
410         vm_offset_t vaddr;
411         vm_page_t m;
412         vm_pindex_t map_first, map_last, pager_first, pager_last, pidx;
413         int i, npages, psind, rv;
414
415         MPASS(fs->object == fs->first_object);
416         VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
417         MPASS(REFCOUNT_COUNT(fs->first_object->paging_in_progress) > 0);
418         MPASS(fs->first_object->backing_object == NULL);
419         MPASS(fs->lookup_still_valid);
420
421         pager_first = OFF_TO_IDX(fs->entry->offset);
422         pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1;
423         unlock_map(fs);
424         unlock_vp(fs);
425
426         /*
427          * Call the pager (driver) populate() method.
428          *
429          * There is no guarantee that the method will be called again
430          * if the current fault is for read, and a future fault is
431          * for write.  Report the entry's maximum allowed protection
432          * to the driver.
433          */
434         rv = vm_pager_populate(fs->first_object, fs->first_pindex,
435             fault_type, fs->entry->max_protection, &pager_first, &pager_last);
436
437         VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
438         if (rv == VM_PAGER_BAD) {
439                 /*
440                  * VM_PAGER_BAD is the backdoor for a pager to request
441                  * normal fault handling.
442                  */
443                 vm_fault_restore_map_lock(fs);
444                 if (fs->map->timestamp != fs->map_generation)
445                         return (KERN_RESOURCE_SHORTAGE); /* RetryFault */
446                 return (KERN_NOT_RECEIVER);
447         }
448         if (rv != VM_PAGER_OK)
449                 return (KERN_FAILURE); /* AKA SIGSEGV */
450
451         /* Ensure that the driver is obeying the interface. */
452         MPASS(pager_first <= pager_last);
453         MPASS(fs->first_pindex <= pager_last);
454         MPASS(fs->first_pindex >= pager_first);
455         MPASS(pager_last < fs->first_object->size);
456
457         vm_fault_restore_map_lock(fs);
458         if (fs->map->timestamp != fs->map_generation) {
459                 vm_fault_populate_cleanup(fs->first_object, pager_first,
460                     pager_last);
461                 return (KERN_RESOURCE_SHORTAGE); /* RetryFault */
462         }
463
464         /*
465          * The map is unchanged after our last unlock.  Process the fault.
466          *
467          * The range [pager_first, pager_last] that is given to the
468          * pager is only a hint.  The pager may populate any range
469          * within the object that includes the requested page index.
470          * In case the pager expanded the range, clip it to fit into
471          * the map entry.
472          */
473         map_first = OFF_TO_IDX(fs->entry->offset);
474         if (map_first > pager_first) {
475                 vm_fault_populate_cleanup(fs->first_object, pager_first,
476                     map_first - 1);
477                 pager_first = map_first;
478         }
479         map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1;
480         if (map_last < pager_last) {
481                 vm_fault_populate_cleanup(fs->first_object, map_last + 1,
482                     pager_last);
483                 pager_last = map_last;
484         }
485         for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx);
486             pidx <= pager_last;
487             pidx += npages, m = vm_page_next(&m[npages - 1])) {
488                 vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset;
489 #if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \
490     __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)
491                 psind = m->psind;
492                 if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 ||
493                     pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last ||
494                     !pmap_ps_enabled(fs->map->pmap) || wired))
495                         psind = 0;
496 #else
497                 psind = 0;
498 #endif          
499                 npages = atop(pagesizes[psind]);
500                 for (i = 0; i < npages; i++) {
501                         vm_fault_populate_check_page(&m[i]);
502                         vm_fault_dirty(fs->entry, &m[i], prot, fault_type,
503                             fault_flags);
504                 }
505                 VM_OBJECT_WUNLOCK(fs->first_object);
506                 rv = pmap_enter(fs->map->pmap, vaddr, m, prot, fault_type |
507                     (wired ? PMAP_ENTER_WIRED : 0), psind);
508 #if defined(__amd64__)
509                 if (psind > 0 && rv == KERN_FAILURE) {
510                         for (i = 0; i < npages; i++) {
511                                 rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i),
512                                     &m[i], prot, fault_type |
513                                     (wired ? PMAP_ENTER_WIRED : 0), 0);
514                                 MPASS(rv == KERN_SUCCESS);
515                         }
516                 }
517 #else
518                 MPASS(rv == KERN_SUCCESS);
519 #endif
520                 VM_OBJECT_WLOCK(fs->first_object);
521                 m_mtx = NULL;
522                 for (i = 0; i < npages; i++) {
523                         if ((fault_flags & VM_FAULT_WIRE) != 0) {
524                                 vm_page_wire(&m[i]);
525                         } else {
526                                 vm_page_change_lock(&m[i], &m_mtx);
527                                 vm_page_activate(&m[i]);
528                         }
529                         if (m_hold != NULL && m[i].pindex == fs->first_pindex) {
530                                 *m_hold = &m[i];
531                                 vm_page_wire(&m[i]);
532                         }
533                         vm_page_xunbusy(&m[i]);
534                 }
535                 if (m_mtx != NULL)
536                         mtx_unlock(m_mtx);
537         }
538         curthread->td_ru.ru_majflt++;
539         return (KERN_SUCCESS);
540 }
541
542 static int prot_fault_translation;
543 SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN,
544     &prot_fault_translation, 0,
545     "Control signal to deliver on protection fault");
546
547 /* compat definition to keep common code for signal translation */
548 #define UCODE_PAGEFLT   12
549 #ifdef T_PAGEFLT
550 _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT");
551 #endif
552
553 /*
554  *      vm_fault_trap:
555  *
556  *      Handle a page fault occurring at the given address,
557  *      requiring the given permissions, in the map specified.
558  *      If successful, the page is inserted into the
559  *      associated physical map.
560  *
561  *      NOTE: the given address should be truncated to the
562  *      proper page address.
563  *
564  *      KERN_SUCCESS is returned if the page fault is handled; otherwise,
565  *      a standard error specifying why the fault is fatal is returned.
566  *
567  *      The map in question must be referenced, and remains so.
568  *      Caller may hold no locks.
569  */
570 int
571 vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
572     int fault_flags, int *signo, int *ucode)
573 {
574         int result;
575
576         MPASS(signo == NULL || ucode != NULL);
577 #ifdef KTRACE
578         if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT))
579                 ktrfault(vaddr, fault_type);
580 #endif
581         result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags,
582             NULL);
583         KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE ||
584             result == KERN_INVALID_ADDRESS ||
585             result == KERN_RESOURCE_SHORTAGE ||
586             result == KERN_PROTECTION_FAILURE ||
587             result == KERN_OUT_OF_BOUNDS,
588             ("Unexpected Mach error %d from vm_fault()", result));
589 #ifdef KTRACE
590         if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND))
591                 ktrfaultend(result);
592 #endif
593         if (result != KERN_SUCCESS && signo != NULL) {
594                 switch (result) {
595                 case KERN_FAILURE:
596                 case KERN_INVALID_ADDRESS:
597                         *signo = SIGSEGV;
598                         *ucode = SEGV_MAPERR;
599                         break;
600                 case KERN_RESOURCE_SHORTAGE:
601                         *signo = SIGBUS;
602                         *ucode = BUS_OOMERR;
603                         break;
604                 case KERN_OUT_OF_BOUNDS:
605                         *signo = SIGBUS;
606                         *ucode = BUS_OBJERR;
607                         break;
608                 case KERN_PROTECTION_FAILURE:
609                         if (prot_fault_translation == 0) {
610                                 /*
611                                  * Autodetect.  This check also covers
612                                  * the images without the ABI-tag ELF
613                                  * note.
614                                  */
615                                 if (SV_CURPROC_ABI() == SV_ABI_FREEBSD &&
616                                     curproc->p_osrel >= P_OSREL_SIGSEGV) {
617                                         *signo = SIGSEGV;
618                                         *ucode = SEGV_ACCERR;
619                                 } else {
620                                         *signo = SIGBUS;
621                                         *ucode = UCODE_PAGEFLT;
622                                 }
623                         } else if (prot_fault_translation == 1) {
624                                 /* Always compat mode. */
625                                 *signo = SIGBUS;
626                                 *ucode = UCODE_PAGEFLT;
627                         } else {
628                                 /* Always SIGSEGV mode. */
629                                 *signo = SIGSEGV;
630                                 *ucode = SEGV_ACCERR;
631                         }
632                         break;
633                 default:
634                         KASSERT(0, ("Unexpected Mach error %d from vm_fault()",
635                             result));
636                         break;
637                 }
638         }
639         return (result);
640 }
641
642 static int
643 vm_fault_lock_vnode(struct faultstate *fs)
644 {
645         struct vnode *vp;
646         int error, locked;
647
648         if (fs->object->type != OBJT_VNODE)
649                 return (KERN_SUCCESS);
650         vp = fs->object->handle;
651         if (vp == fs->vp) {
652                 ASSERT_VOP_LOCKED(vp, "saved vnode is not locked");
653                 return (KERN_SUCCESS);
654         }
655
656         /*
657          * Perform an unlock in case the desired vnode changed while
658          * the map was unlocked during a retry.
659          */
660         unlock_vp(fs);
661
662         locked = VOP_ISLOCKED(vp);
663         if (locked != LK_EXCLUSIVE)
664                 locked = LK_SHARED;
665
666         /*
667          * We must not sleep acquiring the vnode lock while we have
668          * the page exclusive busied or the object's
669          * paging-in-progress count incremented.  Otherwise, we could
670          * deadlock.
671          */
672         error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT, curthread);
673         if (error == 0) {
674                 fs->vp = vp;
675                 return (KERN_SUCCESS);
676         }
677
678         vhold(vp);
679         unlock_and_deallocate(fs);
680         error = vget(vp, locked | LK_RETRY | LK_CANRECURSE, curthread);
681         vdrop(vp);
682         fs->vp = vp;
683         KASSERT(error == 0, ("vm_fault: vget failed %d", error));
684         return (KERN_RESOURCE_SHORTAGE);
685 }
686
687 int
688 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
689     int fault_flags, vm_page_t *m_hold)
690 {
691         struct faultstate fs;
692         struct domainset *dset;
693         vm_object_t next_object, retry_object;
694         vm_offset_t e_end, e_start;
695         vm_pindex_t retry_pindex;
696         vm_prot_t prot, retry_prot;
697         int ahead, alloc_req, behind, cluster_offset, era, faultcount;
698         int nera, oom, result, rv;
699         u_char behavior;
700         boolean_t wired;        /* Passed by reference. */
701         bool dead, hardfault, is_first_object_locked;
702
703         VM_CNT_INC(v_vm_faults);
704
705         if ((curthread->td_pflags & TDP_NOFAULTING) != 0)
706                 return (KERN_PROTECTION_FAILURE);
707
708         fs.vp = NULL;
709         faultcount = 0;
710         nera = -1;
711         hardfault = false;
712
713 RetryFault:
714         oom = 0;
715 RetryFault_oom:
716
717         /*
718          * Find the backing store object and offset into it to begin the
719          * search.
720          */
721         fs.map = map;
722         result = vm_map_lookup(&fs.map, vaddr, fault_type |
723             VM_PROT_FAULT_LOOKUP, &fs.entry, &fs.first_object,
724             &fs.first_pindex, &prot, &wired);
725         if (result != KERN_SUCCESS) {
726                 unlock_vp(&fs);
727                 return (result);
728         }
729
730         fs.map_generation = fs.map->timestamp;
731
732         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
733                 panic("%s: fault on nofault entry, addr: %#lx",
734                     __func__, (u_long)vaddr);
735         }
736
737         if (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION &&
738             fs.entry->wiring_thread != curthread) {
739                 vm_map_unlock_read(fs.map);
740                 vm_map_lock(fs.map);
741                 if (vm_map_lookup_entry(fs.map, vaddr, &fs.entry) &&
742                     (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION)) {
743                         unlock_vp(&fs);
744                         fs.entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
745                         vm_map_unlock_and_wait(fs.map, 0);
746                 } else
747                         vm_map_unlock(fs.map);
748                 goto RetryFault;
749         }
750
751         MPASS((fs.entry->eflags & MAP_ENTRY_GUARD) == 0);
752
753         if (wired)
754                 fault_type = prot | (fault_type & VM_PROT_COPY);
755         else
756                 KASSERT((fault_flags & VM_FAULT_WIRE) == 0,
757                     ("!wired && VM_FAULT_WIRE"));
758
759         /*
760          * Try to avoid lock contention on the top-level object through
761          * special-case handling of some types of page faults, specifically,
762          * those that are mapping an existing page from the top-level object.
763          * Under this condition, a read lock on the object suffices, allowing
764          * multiple page faults of a similar type to run in parallel.
765          */
766         if (fs.vp == NULL /* avoid locked vnode leak */ &&
767             (fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) {
768                 VM_OBJECT_RLOCK(fs.first_object);
769                 rv = vm_fault_soft_fast(&fs, vaddr, prot, fault_type,
770                     fault_flags, wired, m_hold);
771                 if (rv == KERN_SUCCESS)
772                         return (rv);
773                 if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) {
774                         VM_OBJECT_RUNLOCK(fs.first_object);
775                         VM_OBJECT_WLOCK(fs.first_object);
776                 }
777         } else {
778                 VM_OBJECT_WLOCK(fs.first_object);
779         }
780
781         /*
782          * Make a reference to this object to prevent its disposal while we
783          * are messing with it.  Once we have the reference, the map is free
784          * to be diddled.  Since objects reference their shadows (and copies),
785          * they will stay around as well.
786          *
787          * Bump the paging-in-progress count to prevent size changes (e.g. 
788          * truncation operations) during I/O.
789          */
790         vm_object_reference_locked(fs.first_object);
791         vm_object_pip_add(fs.first_object, 1);
792
793         fs.lookup_still_valid = true;
794
795         fs.m = fs.first_m = NULL;
796
797         /*
798          * Search for the page at object/offset.
799          */
800         fs.object = fs.first_object;
801         fs.pindex = fs.first_pindex;
802         while (TRUE) {
803                 KASSERT(fs.m == NULL,
804                     ("page still set %p at loop start", fs.m));
805                 /*
806                  * If the object is marked for imminent termination,
807                  * we retry here, since the collapse pass has raced
808                  * with us.  Otherwise, if we see terminally dead
809                  * object, return fail.
810                  */
811                 if ((fs.object->flags & OBJ_DEAD) != 0) {
812                         dead = fs.object->type == OBJT_DEAD;
813                         unlock_and_deallocate(&fs);
814                         if (dead)
815                                 return (KERN_PROTECTION_FAILURE);
816                         pause("vmf_de", 1);
817                         goto RetryFault;
818                 }
819
820                 /*
821                  * See if page is resident
822                  */
823                 fs.m = vm_page_lookup(fs.object, fs.pindex);
824                 if (fs.m != NULL) {
825                         /*
826                          * Wait/Retry if the page is busy.  We have to do this
827                          * if the page is either exclusive or shared busy
828                          * because the vm_pager may be using read busy for
829                          * pageouts (and even pageins if it is the vnode
830                          * pager), and we could end up trying to pagein and
831                          * pageout the same page simultaneously.
832                          *
833                          * We can theoretically allow the busy case on a read
834                          * fault if the page is marked valid, but since such
835                          * pages are typically already pmap'd, putting that
836                          * special case in might be more effort then it is 
837                          * worth.  We cannot under any circumstances mess
838                          * around with a shared busied page except, perhaps,
839                          * to pmap it.
840                          */
841                         if (vm_page_tryxbusy(fs.m) == 0) {
842                                 /*
843                                  * Reference the page before unlocking and
844                                  * sleeping so that the page daemon is less
845                                  * likely to reclaim it.
846                                  */
847                                 vm_page_aflag_set(fs.m, PGA_REFERENCED);
848                                 if (fs.object != fs.first_object) {
849                                         fault_page_release(&fs.first_m);
850                                         vm_object_pip_wakeup(fs.first_object);
851                                 }
852                                 unlock_map(&fs);
853                                 vm_object_pip_wakeup(fs.object);
854                                 if (fs.m == vm_page_lookup(fs.object,
855                                     fs.pindex)) {
856                                         vm_page_sleep_if_busy(fs.m, "vmpfw");
857                                 }
858                                 VM_OBJECT_WUNLOCK(fs.object);
859                                 VM_CNT_INC(v_intrans);
860                                 vm_object_deallocate(fs.first_object);
861                                 goto RetryFault;
862                         }
863
864                         /*
865                          * The page is marked busy for other processes and the
866                          * pagedaemon.  If it still isn't completely valid
867                          * (readable), jump to readrest, else break-out ( we
868                          * found the page ).
869                          */
870                         if (!vm_page_all_valid(fs.m))
871                                 goto readrest;
872                         break; /* break to PAGE HAS BEEN FOUND */
873                 }
874                 KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m));
875
876                 /*
877                  * Page is not resident.  If the pager might contain the page
878                  * or this is the beginning of the search, allocate a new
879                  * page.  (Default objects are zero-fill, so there is no real
880                  * pager for them.)
881                  */
882                 if (fs.object->type != OBJT_DEFAULT ||
883                     fs.object == fs.first_object) {
884                         if ((fs.object->flags & OBJ_SIZEVNLOCK) != 0) {
885                                 rv = vm_fault_lock_vnode(&fs);
886                                 MPASS(rv == KERN_SUCCESS ||
887                                     rv == KERN_RESOURCE_SHORTAGE);
888                                 if (rv == KERN_RESOURCE_SHORTAGE)
889                                         goto RetryFault;
890                         }
891                         if (fs.pindex >= fs.object->size) {
892                                 unlock_and_deallocate(&fs);
893                                 return (KERN_OUT_OF_BOUNDS);
894                         }
895
896                         if (fs.object == fs.first_object &&
897                             (fs.first_object->flags & OBJ_POPULATE) != 0 &&
898                             fs.first_object->shadow_count == 0) {
899                                 rv = vm_fault_populate(&fs, prot, fault_type,
900                                     fault_flags, wired, m_hold);
901                                 switch (rv) {
902                                 case KERN_SUCCESS:
903                                 case KERN_FAILURE:
904                                         unlock_and_deallocate(&fs);
905                                         return (rv);
906                                 case KERN_RESOURCE_SHORTAGE:
907                                         unlock_and_deallocate(&fs);
908                                         goto RetryFault;
909                                 case KERN_NOT_RECEIVER:
910                                         /*
911                                          * Pager's populate() method
912                                          * returned VM_PAGER_BAD.
913                                          */
914                                         break;
915                                 default:
916                                         panic("inconsistent return codes");
917                                 }
918                         }
919
920                         /*
921                          * Allocate a new page for this object/offset pair.
922                          *
923                          * Unlocked read of the p_flag is harmless. At
924                          * worst, the P_KILLED might be not observed
925                          * there, and allocation can fail, causing
926                          * restart and new reading of the p_flag.
927                          */
928                         dset = fs.object->domain.dr_policy;
929                         if (dset == NULL)
930                                 dset = curthread->td_domain.dr_policy;
931                         if (!vm_page_count_severe_set(&dset->ds_mask) ||
932                             P_KILLED(curproc)) {
933 #if VM_NRESERVLEVEL > 0
934                                 vm_object_color(fs.object, atop(vaddr) -
935                                     fs.pindex);
936 #endif
937                                 alloc_req = P_KILLED(curproc) ?
938                                     VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL;
939                                 if (fs.object->type != OBJT_VNODE &&
940                                     fs.object->backing_object == NULL)
941                                         alloc_req |= VM_ALLOC_ZERO;
942                                 fs.m = vm_page_alloc(fs.object, fs.pindex,
943                                     alloc_req);
944                         }
945                         if (fs.m == NULL) {
946                                 unlock_and_deallocate(&fs);
947                                 if (vm_pfault_oom_attempts < 0 ||
948                                     oom < vm_pfault_oom_attempts) {
949                                         oom++;
950                                         vm_waitpfault(dset,
951                                             vm_pfault_oom_wait * hz);
952                                         goto RetryFault_oom;
953                                 }
954                                 if (bootverbose)
955                                         printf(
956         "proc %d (%s) failed to alloc page on fault, starting OOM\n",
957                                             curproc->p_pid, curproc->p_comm);
958                                 vm_pageout_oom(VM_OOM_MEM_PF);
959                                 goto RetryFault;
960                         }
961                 }
962
963 readrest:
964                 /*
965                  * At this point, we have either allocated a new page or found
966                  * an existing page that is only partially valid.
967                  *
968                  * We hold a reference on the current object and the page is
969                  * exclusive busied.
970                  */
971
972                 /*
973                  * If the pager for the current object might have the page,
974                  * then determine the number of additional pages to read and
975                  * potentially reprioritize previously read pages for earlier
976                  * reclamation.  These operations should only be performed
977                  * once per page fault.  Even if the current pager doesn't
978                  * have the page, the number of additional pages to read will
979                  * apply to subsequent objects in the shadow chain.
980                  */
981                 if (fs.object->type != OBJT_DEFAULT && nera == -1 &&
982                     !P_KILLED(curproc)) {
983                         KASSERT(fs.lookup_still_valid, ("map unlocked"));
984                         era = fs.entry->read_ahead;
985                         behavior = vm_map_entry_behavior(fs.entry);
986                         if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
987                                 nera = 0;
988                         } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) {
989                                 nera = VM_FAULT_READ_AHEAD_MAX;
990                                 if (vaddr == fs.entry->next_read)
991                                         vm_fault_dontneed(&fs, vaddr, nera);
992                         } else if (vaddr == fs.entry->next_read) {
993                                 /*
994                                  * This is a sequential fault.  Arithmetically
995                                  * increase the requested number of pages in
996                                  * the read-ahead window.  The requested
997                                  * number of pages is "# of sequential faults
998                                  * x (read ahead min + 1) + read ahead min"
999                                  */
1000                                 nera = VM_FAULT_READ_AHEAD_MIN;
1001                                 if (era > 0) {
1002                                         nera += era + 1;
1003                                         if (nera > VM_FAULT_READ_AHEAD_MAX)
1004                                                 nera = VM_FAULT_READ_AHEAD_MAX;
1005                                 }
1006                                 if (era == VM_FAULT_READ_AHEAD_MAX)
1007                                         vm_fault_dontneed(&fs, vaddr, nera);
1008                         } else {
1009                                 /*
1010                                  * This is a non-sequential fault.
1011                                  */
1012                                 nera = 0;
1013                         }
1014                         if (era != nera) {
1015                                 /*
1016                                  * A read lock on the map suffices to update
1017                                  * the read ahead count safely.
1018                                  */
1019                                 fs.entry->read_ahead = nera;
1020                         }
1021
1022                         /*
1023                          * Prepare for unlocking the map.  Save the map
1024                          * entry's start and end addresses, which are used to
1025                          * optimize the size of the pager operation below.
1026                          * Even if the map entry's addresses change after
1027                          * unlocking the map, using the saved addresses is
1028                          * safe.
1029                          */
1030                         e_start = fs.entry->start;
1031                         e_end = fs.entry->end;
1032                 }
1033
1034                 /*
1035                  * Call the pager to retrieve the page if there is a chance
1036                  * that the pager has it, and potentially retrieve additional
1037                  * pages at the same time.
1038                  */
1039                 if (fs.object->type != OBJT_DEFAULT) {
1040                         /*
1041                          * Release the map lock before locking the vnode or
1042                          * sleeping in the pager.  (If the current object has
1043                          * a shadow, then an earlier iteration of this loop
1044                          * may have already unlocked the map.)
1045                          */
1046                         unlock_map(&fs);
1047
1048                         rv = vm_fault_lock_vnode(&fs);
1049                         MPASS(rv == KERN_SUCCESS ||
1050                             rv == KERN_RESOURCE_SHORTAGE);
1051                         if (rv == KERN_RESOURCE_SHORTAGE)
1052                                 goto RetryFault;
1053                         KASSERT(fs.vp == NULL || !fs.map->system_map,
1054                             ("vm_fault: vnode-backed object mapped by system map"));
1055
1056                         /*
1057                          * Page in the requested page and hint the pager,
1058                          * that it may bring up surrounding pages.
1059                          */
1060                         if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM ||
1061                             P_KILLED(curproc)) {
1062                                 behind = 0;
1063                                 ahead = 0;
1064                         } else {
1065                                 /* Is this a sequential fault? */
1066                                 if (nera > 0) {
1067                                         behind = 0;
1068                                         ahead = nera;
1069                                 } else {
1070                                         /*
1071                                          * Request a cluster of pages that is
1072                                          * aligned to a VM_FAULT_READ_DEFAULT
1073                                          * page offset boundary within the
1074                                          * object.  Alignment to a page offset
1075                                          * boundary is more likely to coincide
1076                                          * with the underlying file system
1077                                          * block than alignment to a virtual
1078                                          * address boundary.
1079                                          */
1080                                         cluster_offset = fs.pindex %
1081                                             VM_FAULT_READ_DEFAULT;
1082                                         behind = ulmin(cluster_offset,
1083                                             atop(vaddr - e_start));
1084                                         ahead = VM_FAULT_READ_DEFAULT - 1 -
1085                                             cluster_offset;
1086                                 }
1087                                 ahead = ulmin(ahead, atop(e_end - vaddr) - 1);
1088                         }
1089                         rv = vm_pager_get_pages(fs.object, &fs.m, 1,
1090                             &behind, &ahead);
1091                         if (rv == VM_PAGER_OK) {
1092                                 faultcount = behind + 1 + ahead;
1093                                 hardfault = true;
1094                                 break; /* break to PAGE HAS BEEN FOUND */
1095                         }
1096                         if (rv == VM_PAGER_ERROR)
1097                                 printf("vm_fault: pager read error, pid %d (%s)\n",
1098                                     curproc->p_pid, curproc->p_comm);
1099
1100                         /*
1101                          * If an I/O error occurred or the requested page was
1102                          * outside the range of the pager, clean up and return
1103                          * an error.
1104                          */
1105                         if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
1106                                 fault_page_free(&fs.m);
1107                                 unlock_and_deallocate(&fs);
1108                                 return (KERN_OUT_OF_BOUNDS);
1109                         }
1110
1111                 }
1112
1113                 /*
1114                  * The requested page does not exist at this object/
1115                  * offset.  Remove the invalid page from the object,
1116                  * waking up anyone waiting for it, and continue on to
1117                  * the next object.  However, if this is the top-level
1118                  * object, we must leave the busy page in place to
1119                  * prevent another process from rushing past us, and
1120                  * inserting the page in that object at the same time
1121                  * that we are.
1122                  */
1123                 if (fs.object == fs.first_object) {
1124                         fs.first_m = fs.m;
1125                         fs.m = NULL;
1126                 } else
1127                         fault_page_free(&fs.m);
1128
1129                 /*
1130                  * Move on to the next object.  Lock the next object before
1131                  * unlocking the current one.
1132                  */
1133                 next_object = fs.object->backing_object;
1134                 if (next_object == NULL) {
1135                         /*
1136                          * If there's no object left, fill the page in the top
1137                          * object with zeros.
1138                          */
1139                         if (fs.object != fs.first_object) {
1140                                 vm_object_pip_wakeup(fs.object);
1141                                 VM_OBJECT_WUNLOCK(fs.object);
1142
1143                                 fs.object = fs.first_object;
1144                                 fs.pindex = fs.first_pindex;
1145                                 VM_OBJECT_WLOCK(fs.object);
1146                         }
1147                         MPASS(fs.first_m != NULL);
1148                         MPASS(fs.m == NULL);
1149                         fs.m = fs.first_m;
1150                         fs.first_m = NULL;
1151
1152                         /*
1153                          * Zero the page if necessary and mark it valid.
1154                          */
1155                         if ((fs.m->flags & PG_ZERO) == 0) {
1156                                 pmap_zero_page(fs.m);
1157                         } else {
1158                                 VM_CNT_INC(v_ozfod);
1159                         }
1160                         VM_CNT_INC(v_zfod);
1161                         vm_page_valid(fs.m);
1162                         /* Don't try to prefault neighboring pages. */
1163                         faultcount = 1;
1164                         break;  /* break to PAGE HAS BEEN FOUND */
1165                 } else {
1166                         MPASS(fs.first_m != NULL);
1167                         KASSERT(fs.object != next_object,
1168                             ("object loop %p", next_object));
1169                         VM_OBJECT_WLOCK(next_object);
1170                         vm_object_pip_add(next_object, 1);
1171                         if (fs.object != fs.first_object)
1172                                 vm_object_pip_wakeup(fs.object);
1173                         fs.pindex +=
1174                             OFF_TO_IDX(fs.object->backing_object_offset);
1175                         VM_OBJECT_WUNLOCK(fs.object);
1176                         fs.object = next_object;
1177                 }
1178         }
1179
1180         vm_page_assert_xbusied(fs.m);
1181
1182         /*
1183          * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1184          * is held.]
1185          */
1186
1187         /*
1188          * If the page is being written, but isn't already owned by the
1189          * top-level object, we have to copy it into a new page owned by the
1190          * top-level object.
1191          */
1192         if (fs.object != fs.first_object) {
1193                 /*
1194                  * We only really need to copy if we want to write it.
1195                  */
1196                 if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) {
1197                         /*
1198                          * This allows pages to be virtually copied from a 
1199                          * backing_object into the first_object, where the 
1200                          * backing object has no other refs to it, and cannot
1201                          * gain any more refs.  Instead of a bcopy, we just 
1202                          * move the page from the backing object to the 
1203                          * first object.  Note that we must mark the page 
1204                          * dirty in the first object so that it will go out 
1205                          * to swap when needed.
1206                          */
1207                         is_first_object_locked = false;
1208                         if (
1209                                 /*
1210                                  * Only one shadow object
1211                                  */
1212                                 (fs.object->shadow_count == 1) &&
1213                                 /*
1214                                  * No COW refs, except us
1215                                  */
1216                                 (fs.object->ref_count == 1) &&
1217                                 /*
1218                                  * No one else can look this object up
1219                                  */
1220                                 (fs.object->handle == NULL) &&
1221                                 /*
1222                                  * No other ways to look the object up
1223                                  */
1224                                 ((fs.object->flags & OBJ_ANON) != 0) &&
1225                             (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) &&
1226                                 /*
1227                                  * We don't chase down the shadow chain
1228                                  */
1229                             fs.object == fs.first_object->backing_object) {
1230
1231                                 (void)vm_page_remove(fs.m);
1232                                 vm_page_replace_checked(fs.m, fs.first_object,
1233                                     fs.first_pindex, fs.first_m);
1234                                 vm_page_free(fs.first_m);
1235                                 vm_page_dirty(fs.m);
1236 #if VM_NRESERVLEVEL > 0
1237                                 /*
1238                                  * Rename the reservation.
1239                                  */
1240                                 vm_reserv_rename(fs.m, fs.first_object,
1241                                     fs.object, OFF_TO_IDX(
1242                                     fs.first_object->backing_object_offset));
1243 #endif
1244                                 VM_OBJECT_WUNLOCK(fs.object);
1245                                 fs.first_m = fs.m;
1246                                 fs.m = NULL;
1247                                 VM_CNT_INC(v_cow_optim);
1248                         } else {
1249                                 VM_OBJECT_WUNLOCK(fs.object);
1250                                 /*
1251                                  * Oh, well, lets copy it.
1252                                  */
1253                                 pmap_copy_page(fs.m, fs.first_m);
1254                                 vm_page_valid(fs.first_m);
1255                                 if (wired && (fault_flags &
1256                                     VM_FAULT_WIRE) == 0) {
1257                                         vm_page_wire(fs.first_m);
1258                                         vm_page_unwire(fs.m, PQ_INACTIVE);
1259                                 }
1260                                 /*
1261                                  * We no longer need the old page or object.
1262                                  */
1263                                 fault_page_release(&fs.m);
1264                         }
1265                         /*
1266                          * fs.object != fs.first_object due to above 
1267                          * conditional
1268                          */
1269                         vm_object_pip_wakeup(fs.object);
1270
1271                         /*
1272                          * We only try to prefault read-only mappings to the
1273                          * neighboring pages when this copy-on-write fault is
1274                          * a hard fault.  In other cases, trying to prefault
1275                          * is typically wasted effort.
1276                          */
1277                         if (faultcount == 0)
1278                                 faultcount = 1;
1279
1280                         /*
1281                          * Only use the new page below...
1282                          */
1283                         fs.object = fs.first_object;
1284                         fs.pindex = fs.first_pindex;
1285                         fs.m = fs.first_m;
1286                         if (!is_first_object_locked)
1287                                 VM_OBJECT_WLOCK(fs.object);
1288                         VM_CNT_INC(v_cow_faults);
1289                         curthread->td_cow++;
1290                 } else {
1291                         prot &= ~VM_PROT_WRITE;
1292                 }
1293         }
1294
1295         /*
1296          * We must verify that the maps have not changed since our last
1297          * lookup.
1298          */
1299         if (!fs.lookup_still_valid) {
1300                 if (!vm_map_trylock_read(fs.map)) {
1301                         unlock_and_deallocate(&fs);
1302                         goto RetryFault;
1303                 }
1304                 fs.lookup_still_valid = true;
1305                 if (fs.map->timestamp != fs.map_generation) {
1306                         result = vm_map_lookup_locked(&fs.map, vaddr, fault_type,
1307                             &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired);
1308
1309                         /*
1310                          * If we don't need the page any longer, put it on the inactive
1311                          * list (the easiest thing to do here).  If no one needs it,
1312                          * pageout will grab it eventually.
1313                          */
1314                         if (result != KERN_SUCCESS) {
1315                                 unlock_and_deallocate(&fs);
1316
1317                                 /*
1318                                  * If retry of map lookup would have blocked then
1319                                  * retry fault from start.
1320                                  */
1321                                 if (result == KERN_FAILURE)
1322                                         goto RetryFault;
1323                                 return (result);
1324                         }
1325                         if ((retry_object != fs.first_object) ||
1326                             (retry_pindex != fs.first_pindex)) {
1327                                 unlock_and_deallocate(&fs);
1328                                 goto RetryFault;
1329                         }
1330
1331                         /*
1332                          * Check whether the protection has changed or the object has
1333                          * been copied while we left the map unlocked. Changing from
1334                          * read to write permission is OK - we leave the page
1335                          * write-protected, and catch the write fault. Changing from
1336                          * write to read permission means that we can't mark the page
1337                          * write-enabled after all.
1338                          */
1339                         prot &= retry_prot;
1340                         fault_type &= retry_prot;
1341                         if (prot == 0) {
1342                                 unlock_and_deallocate(&fs);
1343                                 goto RetryFault;
1344                         }
1345
1346                         /* Reassert because wired may have changed. */
1347                         KASSERT(wired || (fault_flags & VM_FAULT_WIRE) == 0,
1348                             ("!wired && VM_FAULT_WIRE"));
1349                 }
1350         }
1351
1352         /*
1353          * If the page was filled by a pager, save the virtual address that
1354          * should be faulted on next under a sequential access pattern to the
1355          * map entry.  A read lock on the map suffices to update this address
1356          * safely.
1357          */
1358         if (hardfault)
1359                 fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE;
1360
1361         vm_page_assert_xbusied(fs.m);
1362         vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags);
1363
1364         /*
1365          * Page must be completely valid or it is not fit to
1366          * map into user space.  vm_pager_get_pages() ensures this.
1367          */
1368         KASSERT(vm_page_all_valid(fs.m),
1369             ("vm_fault: page %p partially invalid", fs.m));
1370         VM_OBJECT_WUNLOCK(fs.object);
1371
1372         /*
1373          * Put this page into the physical map.  We had to do the unlock above
1374          * because pmap_enter() may sleep.  We don't put the page
1375          * back on the active queue until later so that the pageout daemon
1376          * won't find it (yet).
1377          */
1378         pmap_enter(fs.map->pmap, vaddr, fs.m, prot,
1379             fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0);
1380         if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 &&
1381             wired == 0)
1382                 vm_fault_prefault(&fs, vaddr,
1383                     faultcount > 0 ? behind : PFBAK,
1384                     faultcount > 0 ? ahead : PFFOR, false);
1385
1386         /*
1387          * If the page is not wired down, then put it where the pageout daemon
1388          * can find it.
1389          */
1390         if ((fault_flags & VM_FAULT_WIRE) != 0) {
1391                 vm_page_wire(fs.m);
1392         } else {
1393                 vm_page_lock(fs.m);
1394                 vm_page_activate(fs.m);
1395                 vm_page_unlock(fs.m);
1396         }
1397         if (m_hold != NULL) {
1398                 *m_hold = fs.m;
1399                 vm_page_wire(fs.m);
1400         }
1401         vm_page_xunbusy(fs.m);
1402         fs.m = NULL;
1403
1404         /*
1405          * Unlock everything, and return
1406          */
1407         fault_deallocate(&fs);
1408         if (hardfault) {
1409                 VM_CNT_INC(v_io_faults);
1410                 curthread->td_ru.ru_majflt++;
1411 #ifdef RACCT
1412                 if (racct_enable && fs.object->type == OBJT_VNODE) {
1413                         PROC_LOCK(curproc);
1414                         if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) {
1415                                 racct_add_force(curproc, RACCT_WRITEBPS,
1416                                     PAGE_SIZE + behind * PAGE_SIZE);
1417                                 racct_add_force(curproc, RACCT_WRITEIOPS, 1);
1418                         } else {
1419                                 racct_add_force(curproc, RACCT_READBPS,
1420                                     PAGE_SIZE + ahead * PAGE_SIZE);
1421                                 racct_add_force(curproc, RACCT_READIOPS, 1);
1422                         }
1423                         PROC_UNLOCK(curproc);
1424                 }
1425 #endif
1426         } else 
1427                 curthread->td_ru.ru_minflt++;
1428
1429         return (KERN_SUCCESS);
1430 }
1431
1432 /*
1433  * Speed up the reclamation of pages that precede the faulting pindex within
1434  * the first object of the shadow chain.  Essentially, perform the equivalent
1435  * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes
1436  * the faulting pindex by the cluster size when the pages read by vm_fault()
1437  * cross a cluster-size boundary.  The cluster size is the greater of the
1438  * smallest superpage size and VM_FAULT_DONTNEED_MIN.
1439  *
1440  * When "fs->first_object" is a shadow object, the pages in the backing object
1441  * that precede the faulting pindex are deactivated by vm_fault().  So, this
1442  * function must only be concerned with pages in the first object.
1443  */
1444 static void
1445 vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead)
1446 {
1447         vm_map_entry_t entry;
1448         vm_object_t first_object, object;
1449         vm_offset_t end, start;
1450         vm_page_t m, m_next;
1451         vm_pindex_t pend, pstart;
1452         vm_size_t size;
1453
1454         object = fs->object;
1455         VM_OBJECT_ASSERT_WLOCKED(object);
1456         first_object = fs->first_object;
1457         if (first_object != object) {
1458                 if (!VM_OBJECT_TRYWLOCK(first_object)) {
1459                         VM_OBJECT_WUNLOCK(object);
1460                         VM_OBJECT_WLOCK(first_object);
1461                         VM_OBJECT_WLOCK(object);
1462                 }
1463         }
1464         /* Neither fictitious nor unmanaged pages can be reclaimed. */
1465         if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) {
1466                 size = VM_FAULT_DONTNEED_MIN;
1467                 if (MAXPAGESIZES > 1 && size < pagesizes[1])
1468                         size = pagesizes[1];
1469                 end = rounddown2(vaddr, size);
1470                 if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) &&
1471                     (entry = fs->entry)->start < end) {
1472                         if (end - entry->start < size)
1473                                 start = entry->start;
1474                         else
1475                                 start = end - size;
1476                         pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED);
1477                         pstart = OFF_TO_IDX(entry->offset) + atop(start -
1478                             entry->start);
1479                         m_next = vm_page_find_least(first_object, pstart);
1480                         pend = OFF_TO_IDX(entry->offset) + atop(end -
1481                             entry->start);
1482                         while ((m = m_next) != NULL && m->pindex < pend) {
1483                                 m_next = TAILQ_NEXT(m, listq);
1484                                 if (!vm_page_all_valid(m) ||
1485                                     vm_page_busied(m))
1486                                         continue;
1487
1488                                 /*
1489                                  * Don't clear PGA_REFERENCED, since it would
1490                                  * likely represent a reference by a different
1491                                  * process.
1492                                  *
1493                                  * Typically, at this point, prefetched pages
1494                                  * are still in the inactive queue.  Only
1495                                  * pages that triggered page faults are in the
1496                                  * active queue.
1497                                  */
1498                                 vm_page_lock(m);
1499                                 if (!vm_page_inactive(m))
1500                                         vm_page_deactivate(m);
1501                                 vm_page_unlock(m);
1502                         }
1503                 }
1504         }
1505         if (first_object != object)
1506                 VM_OBJECT_WUNLOCK(first_object);
1507 }
1508
1509 /*
1510  * vm_fault_prefault provides a quick way of clustering
1511  * pagefaults into a processes address space.  It is a "cousin"
1512  * of vm_map_pmap_enter, except it runs at page fault time instead
1513  * of mmap time.
1514  */
1515 static void
1516 vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
1517     int backward, int forward, bool obj_locked)
1518 {
1519         pmap_t pmap;
1520         vm_map_entry_t entry;
1521         vm_object_t backing_object, lobject;
1522         vm_offset_t addr, starta;
1523         vm_pindex_t pindex;
1524         vm_page_t m;
1525         int i;
1526
1527         pmap = fs->map->pmap;
1528         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
1529                 return;
1530
1531         entry = fs->entry;
1532
1533         if (addra < backward * PAGE_SIZE) {
1534                 starta = entry->start;
1535         } else {
1536                 starta = addra - backward * PAGE_SIZE;
1537                 if (starta < entry->start)
1538                         starta = entry->start;
1539         }
1540
1541         /*
1542          * Generate the sequence of virtual addresses that are candidates for
1543          * prefaulting in an outward spiral from the faulting virtual address,
1544          * "addra".  Specifically, the sequence is "addra - PAGE_SIZE", "addra
1545          * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ...
1546          * If the candidate address doesn't have a backing physical page, then
1547          * the loop immediately terminates.
1548          */
1549         for (i = 0; i < 2 * imax(backward, forward); i++) {
1550                 addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE :
1551                     PAGE_SIZE);
1552                 if (addr > addra + forward * PAGE_SIZE)
1553                         addr = 0;
1554
1555                 if (addr < starta || addr >= entry->end)
1556                         continue;
1557
1558                 if (!pmap_is_prefaultable(pmap, addr))
1559                         continue;
1560
1561                 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
1562                 lobject = entry->object.vm_object;
1563                 if (!obj_locked)
1564                         VM_OBJECT_RLOCK(lobject);
1565                 while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
1566                     lobject->type == OBJT_DEFAULT &&
1567                     (backing_object = lobject->backing_object) != NULL) {
1568                         KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
1569                             0, ("vm_fault_prefault: unaligned object offset"));
1570                         pindex += lobject->backing_object_offset >> PAGE_SHIFT;
1571                         VM_OBJECT_RLOCK(backing_object);
1572                         if (!obj_locked || lobject != entry->object.vm_object)
1573                                 VM_OBJECT_RUNLOCK(lobject);
1574                         lobject = backing_object;
1575                 }
1576                 if (m == NULL) {
1577                         if (!obj_locked || lobject != entry->object.vm_object)
1578                                 VM_OBJECT_RUNLOCK(lobject);
1579                         break;
1580                 }
1581                 if (vm_page_all_valid(m) &&
1582                     (m->flags & PG_FICTITIOUS) == 0)
1583                         pmap_enter_quick(pmap, addr, m, entry->protection);
1584                 if (!obj_locked || lobject != entry->object.vm_object)
1585                         VM_OBJECT_RUNLOCK(lobject);
1586         }
1587 }
1588
1589 /*
1590  * Hold each of the physical pages that are mapped by the specified range of
1591  * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid
1592  * and allow the specified types of access, "prot".  If all of the implied
1593  * pages are successfully held, then the number of held pages is returned
1594  * together with pointers to those pages in the array "ma".  However, if any
1595  * of the pages cannot be held, -1 is returned.
1596  */
1597 int
1598 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
1599     vm_prot_t prot, vm_page_t *ma, int max_count)
1600 {
1601         vm_offset_t end, va;
1602         vm_page_t *mp;
1603         int count;
1604         boolean_t pmap_failed;
1605
1606         if (len == 0)
1607                 return (0);
1608         end = round_page(addr + len);
1609         addr = trunc_page(addr);
1610
1611         /*
1612          * Check for illegal addresses.
1613          */
1614         if (addr < vm_map_min(map) || addr > end || end > vm_map_max(map))
1615                 return (-1);
1616
1617         if (atop(end - addr) > max_count)
1618                 panic("vm_fault_quick_hold_pages: count > max_count");
1619         count = atop(end - addr);
1620
1621         /*
1622          * Most likely, the physical pages are resident in the pmap, so it is
1623          * faster to try pmap_extract_and_hold() first.
1624          */
1625         pmap_failed = FALSE;
1626         for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) {
1627                 *mp = pmap_extract_and_hold(map->pmap, va, prot);
1628                 if (*mp == NULL)
1629                         pmap_failed = TRUE;
1630                 else if ((prot & VM_PROT_WRITE) != 0 &&
1631                     (*mp)->dirty != VM_PAGE_BITS_ALL) {
1632                         /*
1633                          * Explicitly dirty the physical page.  Otherwise, the
1634                          * caller's changes may go unnoticed because they are
1635                          * performed through an unmanaged mapping or by a DMA
1636                          * operation.
1637                          *
1638                          * The object lock is not held here.
1639                          * See vm_page_clear_dirty_mask().
1640                          */
1641                         vm_page_dirty(*mp);
1642                 }
1643         }
1644         if (pmap_failed) {
1645                 /*
1646                  * One or more pages could not be held by the pmap.  Either no
1647                  * page was mapped at the specified virtual address or that
1648                  * mapping had insufficient permissions.  Attempt to fault in
1649                  * and hold these pages.
1650                  *
1651                  * If vm_fault_disable_pagefaults() was called,
1652                  * i.e., TDP_NOFAULTING is set, we must not sleep nor
1653                  * acquire MD VM locks, which means we must not call
1654                  * vm_fault().  Some (out of tree) callers mark
1655                  * too wide a code area with vm_fault_disable_pagefaults()
1656                  * already, use the VM_PROT_QUICK_NOFAULT flag to request
1657                  * the proper behaviour explicitly.
1658                  */
1659                 if ((prot & VM_PROT_QUICK_NOFAULT) != 0 &&
1660                     (curthread->td_pflags & TDP_NOFAULTING) != 0)
1661                         goto error;
1662                 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE)
1663                         if (*mp == NULL && vm_fault(map, va, prot,
1664                             VM_FAULT_NORMAL, mp) != KERN_SUCCESS)
1665                                 goto error;
1666         }
1667         return (count);
1668 error:  
1669         for (mp = ma; mp < ma + count; mp++)
1670                 if (*mp != NULL)
1671                         vm_page_unwire(*mp, PQ_INACTIVE);
1672         return (-1);
1673 }
1674
1675 /*
1676  *      Routine:
1677  *              vm_fault_copy_entry
1678  *      Function:
1679  *              Create new shadow object backing dst_entry with private copy of
1680  *              all underlying pages. When src_entry is equal to dst_entry,
1681  *              function implements COW for wired-down map entry. Otherwise,
1682  *              it forks wired entry into dst_map.
1683  *
1684  *      In/out conditions:
1685  *              The source and destination maps must be locked for write.
1686  *              The source map entry must be wired down (or be a sharing map
1687  *              entry corresponding to a main map entry that is wired down).
1688  */
1689 void
1690 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
1691     vm_map_entry_t dst_entry, vm_map_entry_t src_entry,
1692     vm_ooffset_t *fork_charge)
1693 {
1694         vm_object_t backing_object, dst_object, object, src_object;
1695         vm_pindex_t dst_pindex, pindex, src_pindex;
1696         vm_prot_t access, prot;
1697         vm_offset_t vaddr;
1698         vm_page_t dst_m;
1699         vm_page_t src_m;
1700         boolean_t upgrade;
1701
1702 #ifdef  lint
1703         src_map++;
1704 #endif  /* lint */
1705
1706         upgrade = src_entry == dst_entry;
1707         access = prot = dst_entry->protection;
1708
1709         src_object = src_entry->object.vm_object;
1710         src_pindex = OFF_TO_IDX(src_entry->offset);
1711
1712         if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
1713                 dst_object = src_object;
1714                 vm_object_reference(dst_object);
1715         } else {
1716                 /*
1717                  * Create the top-level object for the destination entry.
1718                  * Doesn't actually shadow anything - we copy the pages
1719                  * directly.
1720                  */
1721                 dst_object = vm_object_allocate_anon(atop(dst_entry->end -
1722                     dst_entry->start), NULL, NULL, 0);
1723 #if VM_NRESERVLEVEL > 0
1724                 dst_object->flags |= OBJ_COLORED;
1725                 dst_object->pg_color = atop(dst_entry->start);
1726 #endif
1727                 dst_object->domain = src_object->domain;
1728                 dst_object->charge = dst_entry->end - dst_entry->start;
1729         }
1730
1731         VM_OBJECT_WLOCK(dst_object);
1732         KASSERT(upgrade || dst_entry->object.vm_object == NULL,
1733             ("vm_fault_copy_entry: vm_object not NULL"));
1734         if (src_object != dst_object) {
1735                 dst_entry->object.vm_object = dst_object;
1736                 dst_entry->offset = 0;
1737                 dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC;
1738         }
1739         if (fork_charge != NULL) {
1740                 KASSERT(dst_entry->cred == NULL,
1741                     ("vm_fault_copy_entry: leaked swp charge"));
1742                 dst_object->cred = curthread->td_ucred;
1743                 crhold(dst_object->cred);
1744                 *fork_charge += dst_object->charge;
1745         } else if ((dst_object->type == OBJT_DEFAULT ||
1746             dst_object->type == OBJT_SWAP) &&
1747             dst_object->cred == NULL) {
1748                 KASSERT(dst_entry->cred != NULL, ("no cred for entry %p",
1749                     dst_entry));
1750                 dst_object->cred = dst_entry->cred;
1751                 dst_entry->cred = NULL;
1752         }
1753
1754         /*
1755          * If not an upgrade, then enter the mappings in the pmap as
1756          * read and/or execute accesses.  Otherwise, enter them as
1757          * write accesses.
1758          *
1759          * A writeable large page mapping is only created if all of
1760          * the constituent small page mappings are modified. Marking
1761          * PTEs as modified on inception allows promotion to happen
1762          * without taking potentially large number of soft faults.
1763          */
1764         if (!upgrade)
1765                 access &= ~VM_PROT_WRITE;
1766
1767         /*
1768          * Loop through all of the virtual pages within the entry's
1769          * range, copying each page from the source object to the
1770          * destination object.  Since the source is wired, those pages
1771          * must exist.  In contrast, the destination is pageable.
1772          * Since the destination object doesn't share any backing storage
1773          * with the source object, all of its pages must be dirtied,
1774          * regardless of whether they can be written.
1775          */
1776         for (vaddr = dst_entry->start, dst_pindex = 0;
1777             vaddr < dst_entry->end;
1778             vaddr += PAGE_SIZE, dst_pindex++) {
1779 again:
1780                 /*
1781                  * Find the page in the source object, and copy it in.
1782                  * Because the source is wired down, the page will be
1783                  * in memory.
1784                  */
1785                 if (src_object != dst_object)
1786                         VM_OBJECT_RLOCK(src_object);
1787                 object = src_object;
1788                 pindex = src_pindex + dst_pindex;
1789                 while ((src_m = vm_page_lookup(object, pindex)) == NULL &&
1790                     (backing_object = object->backing_object) != NULL) {
1791                         /*
1792                          * Unless the source mapping is read-only or
1793                          * it is presently being upgraded from
1794                          * read-only, the first object in the shadow
1795                          * chain should provide all of the pages.  In
1796                          * other words, this loop body should never be
1797                          * executed when the source mapping is already
1798                          * read/write.
1799                          */
1800                         KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 ||
1801                             upgrade,
1802                             ("vm_fault_copy_entry: main object missing page"));
1803
1804                         VM_OBJECT_RLOCK(backing_object);
1805                         pindex += OFF_TO_IDX(object->backing_object_offset);
1806                         if (object != dst_object)
1807                                 VM_OBJECT_RUNLOCK(object);
1808                         object = backing_object;
1809                 }
1810                 KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing"));
1811
1812                 if (object != dst_object) {
1813                         /*
1814                          * Allocate a page in the destination object.
1815                          */
1816                         dst_m = vm_page_alloc(dst_object, (src_object ==
1817                             dst_object ? src_pindex : 0) + dst_pindex,
1818                             VM_ALLOC_NORMAL);
1819                         if (dst_m == NULL) {
1820                                 VM_OBJECT_WUNLOCK(dst_object);
1821                                 VM_OBJECT_RUNLOCK(object);
1822                                 vm_wait(dst_object);
1823                                 VM_OBJECT_WLOCK(dst_object);
1824                                 goto again;
1825                         }
1826                         pmap_copy_page(src_m, dst_m);
1827                         VM_OBJECT_RUNLOCK(object);
1828                         dst_m->dirty = dst_m->valid = src_m->valid;
1829                 } else {
1830                         dst_m = src_m;
1831                         if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0)
1832                                 goto again;
1833                         if (dst_m->pindex >= dst_object->size) {
1834                                 /*
1835                                  * We are upgrading.  Index can occur
1836                                  * out of bounds if the object type is
1837                                  * vnode and the file was truncated.
1838                                  */
1839                                 vm_page_xunbusy(dst_m);
1840                                 break;
1841                         }
1842                 }
1843                 VM_OBJECT_WUNLOCK(dst_object);
1844
1845                 /*
1846                  * Enter it in the pmap. If a wired, copy-on-write
1847                  * mapping is being replaced by a write-enabled
1848                  * mapping, then wire that new mapping.
1849                  *
1850                  * The page can be invalid if the user called
1851                  * msync(MS_INVALIDATE) or truncated the backing vnode
1852                  * or shared memory object.  In this case, do not
1853                  * insert it into pmap, but still do the copy so that
1854                  * all copies of the wired map entry have similar
1855                  * backing pages.
1856                  */
1857                 if (vm_page_all_valid(dst_m)) {
1858                         pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
1859                             access | (upgrade ? PMAP_ENTER_WIRED : 0), 0);
1860                 }
1861
1862                 /*
1863                  * Mark it no longer busy, and put it on the active list.
1864                  */
1865                 VM_OBJECT_WLOCK(dst_object);
1866                 
1867                 if (upgrade) {
1868                         if (src_m != dst_m) {
1869                                 vm_page_unwire(src_m, PQ_INACTIVE);
1870                                 vm_page_wire(dst_m);
1871                         } else {
1872                                 KASSERT(vm_page_wired(dst_m),
1873                                     ("dst_m %p is not wired", dst_m));
1874                         }
1875                 } else {
1876                         vm_page_lock(dst_m);
1877                         vm_page_activate(dst_m);
1878                         vm_page_unlock(dst_m);
1879                 }
1880                 vm_page_xunbusy(dst_m);
1881         }
1882         VM_OBJECT_WUNLOCK(dst_object);
1883         if (upgrade) {
1884                 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
1885                 vm_object_deallocate(src_object);
1886         }
1887 }
1888
1889 /*
1890  * Block entry into the machine-independent layer's page fault handler by
1891  * the calling thread.  Subsequent calls to vm_fault() by that thread will
1892  * return KERN_PROTECTION_FAILURE.  Enable machine-dependent handling of
1893  * spurious page faults. 
1894  */
1895 int
1896 vm_fault_disable_pagefaults(void)
1897 {
1898
1899         return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR));
1900 }
1901
1902 void
1903 vm_fault_enable_pagefaults(int save)
1904 {
1905
1906         curthread_pflags_restore(save);
1907 }