2 * SPDX-License-Identifier: BSD-4-Clause
4 * Copyright (c) 1991 Regents of the University of California.
6 * Copyright (c) 1994 John S. Dyson
8 * Copyright (c) 1994 David Greenman
10 * Copyright (c) 2003 Peter Wemm
11 * All rights reserved.
12 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
13 * All rights reserved.
15 * This code is derived from software contributed to Berkeley by
16 * the Systems Programming Group of the University of Utah Computer
17 * Science Department and William Jolitz of UUNET Technologies Inc.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 * 3. All advertising materials mentioning features or use of this software
28 * must display the following acknowledgement:
29 * This product includes software developed by the University of
30 * California, Berkeley and its contributors.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
50 * Copyright (c) 2003 Networks Associates Technology, Inc.
51 * All rights reserved.
53 * This software was developed for the FreeBSD Project by Jake Burkholder,
54 * Safeport Network Services, and Network Associates Laboratories, the
55 * Security Research Division of Network Associates, Inc. under
56 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
57 * CHATS research program.
59 * Redistribution and use in source and binary forms, with or without
60 * modification, are permitted provided that the following conditions
62 * 1. Redistributions of source code must retain the above copyright
63 * notice, this list of conditions and the following disclaimer.
64 * 2. Redistributions in binary form must reproduce the above copyright
65 * notice, this list of conditions and the following disclaimer in the
66 * documentation and/or other materials provided with the distribution.
68 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
69 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
70 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
71 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
72 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
73 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
74 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
75 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
76 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
77 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81 #define AMD64_NPT_AWARE
83 #include <sys/cdefs.h>
84 __FBSDID("$FreeBSD$");
87 * Manages physical address maps.
89 * Since the information managed by this module is
90 * also stored by the logical address mapping module,
91 * this module may throw away valid virtual-to-physical
92 * mappings at almost any time. However, invalidations
93 * of virtual-to-physical mappings must be done as
96 * In order to cope with hardware architectures which
97 * make virtual-to-physical map invalidates expensive,
98 * this module may delay invalidate or reduced protection
99 * operations until such time as they are actually
100 * necessary. This module is given full information as
101 * to which processors are currently using which maps,
102 * and to when physical maps must be made correct.
105 #include "opt_pmap.h"
108 #include <sys/param.h>
109 #include <sys/bitstring.h>
111 #include <sys/systm.h>
112 #include <sys/kernel.h>
114 #include <sys/lock.h>
115 #include <sys/malloc.h>
116 #include <sys/mman.h>
117 #include <sys/mutex.h>
118 #include <sys/proc.h>
119 #include <sys/rwlock.h>
121 #include <sys/turnstile.h>
122 #include <sys/vmem.h>
123 #include <sys/vmmeter.h>
124 #include <sys/sched.h>
125 #include <sys/sysctl.h>
129 #include <vm/vm_param.h>
130 #include <vm/vm_kern.h>
131 #include <vm/vm_page.h>
132 #include <vm/vm_map.h>
133 #include <vm/vm_object.h>
134 #include <vm/vm_extern.h>
135 #include <vm/vm_pageout.h>
136 #include <vm/vm_pager.h>
137 #include <vm/vm_phys.h>
138 #include <vm/vm_radix.h>
139 #include <vm/vm_reserv.h>
142 #include <machine/intr_machdep.h>
143 #include <x86/apicvar.h>
144 #include <machine/cpu.h>
145 #include <machine/cputypes.h>
146 #include <machine/md_var.h>
147 #include <machine/pcb.h>
148 #include <machine/specialreg.h>
150 #include <machine/smp.h>
153 static __inline boolean_t
154 pmap_type_guest(pmap_t pmap)
157 return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
160 static __inline boolean_t
161 pmap_emulate_ad_bits(pmap_t pmap)
164 return ((pmap->pm_flags & PMAP_EMULATE_AD_BITS) != 0);
167 static __inline pt_entry_t
168 pmap_valid_bit(pmap_t pmap)
172 switch (pmap->pm_type) {
178 if (pmap_emulate_ad_bits(pmap))
179 mask = EPT_PG_EMUL_V;
184 panic("pmap_valid_bit: invalid pm_type %d", pmap->pm_type);
190 static __inline pt_entry_t
191 pmap_rw_bit(pmap_t pmap)
195 switch (pmap->pm_type) {
201 if (pmap_emulate_ad_bits(pmap))
202 mask = EPT_PG_EMUL_RW;
207 panic("pmap_rw_bit: invalid pm_type %d", pmap->pm_type);
213 static __inline pt_entry_t
214 pmap_global_bit(pmap_t pmap)
218 switch (pmap->pm_type) {
227 panic("pmap_global_bit: invalid pm_type %d", pmap->pm_type);
233 static __inline pt_entry_t
234 pmap_accessed_bit(pmap_t pmap)
238 switch (pmap->pm_type) {
244 if (pmap_emulate_ad_bits(pmap))
250 panic("pmap_accessed_bit: invalid pm_type %d", pmap->pm_type);
256 static __inline pt_entry_t
257 pmap_modified_bit(pmap_t pmap)
261 switch (pmap->pm_type) {
267 if (pmap_emulate_ad_bits(pmap))
273 panic("pmap_modified_bit: invalid pm_type %d", pmap->pm_type);
279 #if !defined(DIAGNOSTIC)
280 #ifdef __GNUC_GNU_INLINE__
281 #define PMAP_INLINE __attribute__((__gnu_inline__)) inline
283 #define PMAP_INLINE extern inline
290 #define PV_STAT(x) do { x ; } while (0)
292 #define PV_STAT(x) do { } while (0)
295 #define pa_index(pa) ((pa) >> PDRSHIFT)
296 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
298 #define NPV_LIST_LOCKS MAXCPU
300 #define PHYS_TO_PV_LIST_LOCK(pa) \
301 (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
303 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
304 struct rwlock **_lockp = (lockp); \
305 struct rwlock *_new_lock; \
307 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
308 if (_new_lock != *_lockp) { \
309 if (*_lockp != NULL) \
310 rw_wunlock(*_lockp); \
311 *_lockp = _new_lock; \
316 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
317 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
319 #define RELEASE_PV_LIST_LOCK(lockp) do { \
320 struct rwlock **_lockp = (lockp); \
322 if (*_lockp != NULL) { \
323 rw_wunlock(*_lockp); \
328 #define VM_PAGE_TO_PV_LIST_LOCK(m) \
329 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
331 struct pmap kernel_pmap_store;
333 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
334 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
337 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
338 "Number of kernel page table pages allocated on bootup");
341 vm_paddr_t dmaplimit;
342 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
345 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
347 static int pat_works = 1;
348 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
349 "Is page attribute table fully functional?");
351 static int pg_ps_enabled = 1;
352 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
353 &pg_ps_enabled, 0, "Are large page mappings enabled?");
355 #define PAT_INDEX_SIZE 8
356 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
358 static u_int64_t KPTphys; /* phys addr of kernel level 1 */
359 static u_int64_t KPDphys; /* phys addr of kernel level 2 */
360 u_int64_t KPDPphys; /* phys addr of kernel level 3 */
361 u_int64_t KPML4phys; /* phys addr of kernel level 4 */
363 static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
364 static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
365 static int ndmpdpphys; /* number of DMPDPphys pages */
368 * pmap_mapdev support pre initialization (i.e. console)
370 #define PMAP_PREINIT_MAPPING_COUNT 8
371 static struct pmap_preinit_mapping {
376 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
377 static int pmap_initialized;
380 * Data for the pv entry allocation mechanism.
381 * Updates to pv_invl_gen are protected by the pv_list_locks[]
382 * elements, but reads are not.
384 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
385 static struct mtx __exclusive_cache_line pv_chunks_mutex;
386 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
387 static u_long pv_invl_gen[NPV_LIST_LOCKS];
388 static struct md_page *pv_table;
389 static struct md_page pv_dummy;
392 * All those kernel PT submaps that BSD is so fond of
394 pt_entry_t *CMAP1 = NULL;
396 static vm_offset_t qframe = 0;
397 static struct mtx qframe_mtx;
399 static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for x86 pmaps */
401 int pmap_pcid_enabled = 1;
402 SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
403 &pmap_pcid_enabled, 0, "Is TLB Context ID enabled ?");
404 int invpcid_works = 0;
405 SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0,
406 "Is the invpcid instruction available ?");
409 pmap_pcid_save_cnt_proc(SYSCTL_HANDLER_ARGS)
416 res += cpuid_to_pcpu[i]->pc_pm_save_cnt;
418 return (sysctl_handle_64(oidp, &res, 0, req));
420 SYSCTL_PROC(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLTYPE_U64 | CTLFLAG_RW |
421 CTLFLAG_MPSAFE, NULL, 0, pmap_pcid_save_cnt_proc, "QU",
422 "Count of saved TLB context on switch");
424 static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker =
425 LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker);
426 static struct mtx invl_gen_mtx;
427 static u_long pmap_invl_gen = 0;
428 /* Fake lock object to satisfy turnstiles interface. */
429 static struct lock_object invl_gen_ts = {
437 return (curthread->td_md.md_invl_gen.gen == 0);
440 #define PMAP_ASSERT_NOT_IN_DI() \
441 KASSERT(pmap_not_in_di(), ("DI already started"))
444 * Start a new Delayed Invalidation (DI) block of code, executed by
445 * the current thread. Within a DI block, the current thread may
446 * destroy both the page table and PV list entries for a mapping and
447 * then release the corresponding PV list lock before ensuring that
448 * the mapping is flushed from the TLBs of any processors with the
452 pmap_delayed_invl_started(void)
454 struct pmap_invl_gen *invl_gen;
457 invl_gen = &curthread->td_md.md_invl_gen;
458 PMAP_ASSERT_NOT_IN_DI();
459 mtx_lock(&invl_gen_mtx);
460 if (LIST_EMPTY(&pmap_invl_gen_tracker))
461 currgen = pmap_invl_gen;
463 currgen = LIST_FIRST(&pmap_invl_gen_tracker)->gen;
464 invl_gen->gen = currgen + 1;
465 LIST_INSERT_HEAD(&pmap_invl_gen_tracker, invl_gen, link);
466 mtx_unlock(&invl_gen_mtx);
470 * Finish the DI block, previously started by the current thread. All
471 * required TLB flushes for the pages marked by
472 * pmap_delayed_invl_page() must be finished before this function is
475 * This function works by bumping the global DI generation number to
476 * the generation number of the current thread's DI, unless there is a
477 * pending DI that started earlier. In the latter case, bumping the
478 * global DI generation number would incorrectly signal that the
479 * earlier DI had finished. Instead, this function bumps the earlier
480 * DI's generation number to match the generation number of the
481 * current thread's DI.
484 pmap_delayed_invl_finished(void)
486 struct pmap_invl_gen *invl_gen, *next;
487 struct turnstile *ts;
489 invl_gen = &curthread->td_md.md_invl_gen;
490 KASSERT(invl_gen->gen != 0, ("missed invl_started"));
491 mtx_lock(&invl_gen_mtx);
492 next = LIST_NEXT(invl_gen, link);
494 turnstile_chain_lock(&invl_gen_ts);
495 ts = turnstile_lookup(&invl_gen_ts);
496 pmap_invl_gen = invl_gen->gen;
498 turnstile_broadcast(ts, TS_SHARED_QUEUE);
499 turnstile_unpend(ts, TS_SHARED_LOCK);
501 turnstile_chain_unlock(&invl_gen_ts);
503 next->gen = invl_gen->gen;
505 LIST_REMOVE(invl_gen, link);
506 mtx_unlock(&invl_gen_mtx);
511 static long invl_wait;
512 SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait, CTLFLAG_RD, &invl_wait, 0,
513 "Number of times DI invalidation blocked pmap_remove_all/write");
517 pmap_delayed_invl_genp(vm_page_t m)
520 return (&pv_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]);
524 * Ensure that all currently executing DI blocks, that need to flush
525 * TLB for the given page m, actually flushed the TLB at the time the
526 * function returned. If the page m has an empty PV list and we call
527 * pmap_delayed_invl_wait(), upon its return we know that no CPU has a
528 * valid mapping for the page m in either its page table or TLB.
530 * This function works by blocking until the global DI generation
531 * number catches up with the generation number associated with the
532 * given page m and its PV list. Since this function's callers
533 * typically own an object lock and sometimes own a page lock, it
534 * cannot sleep. Instead, it blocks on a turnstile to relinquish the
538 pmap_delayed_invl_wait(vm_page_t m)
540 struct turnstile *ts;
543 bool accounted = false;
546 m_gen = pmap_delayed_invl_genp(m);
547 while (*m_gen > pmap_invl_gen) {
550 atomic_add_long(&invl_wait, 1);
554 ts = turnstile_trywait(&invl_gen_ts);
555 if (*m_gen > pmap_invl_gen)
556 turnstile_wait(ts, NULL, TS_SHARED_QUEUE);
558 turnstile_cancel(ts);
563 * Mark the page m's PV list as participating in the current thread's
564 * DI block. Any threads concurrently using m's PV list to remove or
565 * restrict all mappings to m will wait for the current thread's DI
566 * block to complete before proceeding.
568 * The function works by setting the DI generation number for m's PV
569 * list to at least the DI generation number of the current thread.
570 * This forces a caller of pmap_delayed_invl_wait() to block until
571 * current thread calls pmap_delayed_invl_finished().
574 pmap_delayed_invl_page(vm_page_t m)
578 rw_assert(VM_PAGE_TO_PV_LIST_LOCK(m), RA_WLOCKED);
579 gen = curthread->td_md.md_invl_gen.gen;
582 m_gen = pmap_delayed_invl_genp(m);
590 static caddr_t crashdumpmap;
593 * Internal flags for pmap_enter()'s helper functions.
595 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
596 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
598 static void free_pv_chunk(struct pv_chunk *pc);
599 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
600 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
601 static int popcnt_pc_map_pq(uint64_t *map);
602 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
603 static void reserve_pv_entries(pmap_t pmap, int needed,
604 struct rwlock **lockp);
605 static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
606 struct rwlock **lockp);
607 static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde,
608 u_int flags, struct rwlock **lockp);
609 #if VM_NRESERVLEVEL > 0
610 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
611 struct rwlock **lockp);
613 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
614 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
617 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
618 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
619 static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
620 vm_offset_t va, struct rwlock **lockp);
621 static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
623 static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
624 vm_prot_t prot, struct rwlock **lockp);
625 static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
626 u_int flags, vm_page_t m, struct rwlock **lockp);
627 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
628 vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
629 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
630 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
631 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
633 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
634 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask);
635 #if VM_NRESERVLEVEL > 0
636 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
637 struct rwlock **lockp);
639 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
641 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask);
642 static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
643 struct spglist *free, struct rwlock **lockp);
644 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
645 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
646 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
647 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
648 struct spglist *free);
649 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
650 pd_entry_t *pde, struct spglist *free,
651 struct rwlock **lockp);
652 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
653 vm_page_t m, struct rwlock **lockp);
654 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
656 static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde);
658 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
659 struct rwlock **lockp);
660 static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va,
661 struct rwlock **lockp);
662 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
663 struct rwlock **lockp);
665 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
666 struct spglist *free);
667 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
668 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
671 * Move the kernel virtual free pointer to the next
672 * 2MB. This is used to help improve performance
673 * by using a large (2MB) page for much of the kernel
674 * (.text, .data, .bss)
677 pmap_kmem_choose(vm_offset_t addr)
679 vm_offset_t newaddr = addr;
681 newaddr = roundup2(addr, NBPDR);
685 /********************/
686 /* Inline functions */
687 /********************/
689 /* Return a non-clipped PD index for a given VA */
690 static __inline vm_pindex_t
691 pmap_pde_pindex(vm_offset_t va)
693 return (va >> PDRSHIFT);
697 /* Return a pointer to the PML4 slot that corresponds to a VA */
698 static __inline pml4_entry_t *
699 pmap_pml4e(pmap_t pmap, vm_offset_t va)
702 return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
705 /* Return a pointer to the PDP slot that corresponds to a VA */
706 static __inline pdp_entry_t *
707 pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
711 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
712 return (&pdpe[pmap_pdpe_index(va)]);
715 /* Return a pointer to the PDP slot that corresponds to a VA */
716 static __inline pdp_entry_t *
717 pmap_pdpe(pmap_t pmap, vm_offset_t va)
722 PG_V = pmap_valid_bit(pmap);
723 pml4e = pmap_pml4e(pmap, va);
724 if ((*pml4e & PG_V) == 0)
726 return (pmap_pml4e_to_pdpe(pml4e, va));
729 /* Return a pointer to the PD slot that corresponds to a VA */
730 static __inline pd_entry_t *
731 pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
735 pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
736 return (&pde[pmap_pde_index(va)]);
739 /* Return a pointer to the PD slot that corresponds to a VA */
740 static __inline pd_entry_t *
741 pmap_pde(pmap_t pmap, vm_offset_t va)
746 PG_V = pmap_valid_bit(pmap);
747 pdpe = pmap_pdpe(pmap, va);
748 if (pdpe == NULL || (*pdpe & PG_V) == 0)
750 return (pmap_pdpe_to_pde(pdpe, va));
753 /* Return a pointer to the PT slot that corresponds to a VA */
754 static __inline pt_entry_t *
755 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
759 pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
760 return (&pte[pmap_pte_index(va)]);
763 /* Return a pointer to the PT slot that corresponds to a VA */
764 static __inline pt_entry_t *
765 pmap_pte(pmap_t pmap, vm_offset_t va)
770 PG_V = pmap_valid_bit(pmap);
771 pde = pmap_pde(pmap, va);
772 if (pde == NULL || (*pde & PG_V) == 0)
774 if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */
775 return ((pt_entry_t *)pde);
776 return (pmap_pde_to_pte(pde, va));
780 pmap_resident_count_inc(pmap_t pmap, int count)
783 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
784 pmap->pm_stats.resident_count += count;
788 pmap_resident_count_dec(pmap_t pmap, int count)
791 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
792 KASSERT(pmap->pm_stats.resident_count >= count,
793 ("pmap %p resident count underflow %ld %d", pmap,
794 pmap->pm_stats.resident_count, count));
795 pmap->pm_stats.resident_count -= count;
798 PMAP_INLINE pt_entry_t *
799 vtopte(vm_offset_t va)
801 u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
803 KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopte on a uva/gpa 0x%0lx", va));
805 return (PTmap + ((va >> PAGE_SHIFT) & mask));
808 static __inline pd_entry_t *
809 vtopde(vm_offset_t va)
811 u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
813 KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopde on a uva/gpa 0x%0lx", va));
815 return (PDmap + ((va >> PDRSHIFT) & mask));
819 allocpages(vm_paddr_t *firstaddr, int n)
824 bzero((void *)ret, n * PAGE_SIZE);
825 *firstaddr += n * PAGE_SIZE;
829 CTASSERT(powerof2(NDMPML4E));
831 /* number of kernel PDP slots */
832 #define NKPDPE(ptpgs) howmany(ptpgs, NPDEPG)
835 nkpt_init(vm_paddr_t addr)
842 pt_pages = howmany(addr, 1 << PDRSHIFT);
843 pt_pages += NKPDPE(pt_pages);
846 * Add some slop beyond the bare minimum required for bootstrapping
849 * This is quite important when allocating KVA for kernel modules.
850 * The modules are required to be linked in the negative 2GB of
851 * the address space. If we run out of KVA in this region then
852 * pmap_growkernel() will need to allocate page table pages to map
853 * the entire 512GB of KVA space which is an unnecessary tax on
856 * Secondly, device memory mapped as part of setting up the low-
857 * level console(s) is taken from KVA, starting at virtual_avail.
858 * This is because cninit() is called after pmap_bootstrap() but
859 * before vm_init() and pmap_init(). 20MB for a frame buffer is
862 pt_pages += 32; /* 64MB additional slop. */
868 create_pagetables(vm_paddr_t *firstaddr)
870 int i, j, ndm1g, nkpdpe;
876 /* Allocate page table pages for the direct map */
877 ndmpdp = howmany(ptoa(Maxmem), NBPDP);
878 if (ndmpdp < 4) /* Minimum 4GB of dirmap */
880 ndmpdpphys = howmany(ndmpdp, NPDPEPG);
881 if (ndmpdpphys > NDMPML4E) {
883 * Each NDMPML4E allows 512 GB, so limit to that,
884 * and then readjust ndmpdp and ndmpdpphys.
886 printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512);
887 Maxmem = atop(NDMPML4E * NBPML4);
888 ndmpdpphys = NDMPML4E;
889 ndmpdp = NDMPML4E * NPDEPG;
891 DMPDPphys = allocpages(firstaddr, ndmpdpphys);
893 if ((amd_feature & AMDID_PAGE1GB) != 0)
894 ndm1g = ptoa(Maxmem) >> PDPSHIFT;
896 DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g);
897 dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
900 KPML4phys = allocpages(firstaddr, 1);
901 KPDPphys = allocpages(firstaddr, NKPML4E);
904 * Allocate the initial number of kernel page table pages required to
905 * bootstrap. We defer this until after all memory-size dependent
906 * allocations are done (e.g. direct map), so that we don't have to
907 * build in too much slop in our estimate.
909 * Note that when NKPML4E > 1, we have an empty page underneath
910 * all but the KPML4I'th one, so we need NKPML4E-1 extra (zeroed)
911 * pages. (pmap_enter requires a PD page to exist for each KPML4E.)
913 nkpt_init(*firstaddr);
914 nkpdpe = NKPDPE(nkpt);
916 KPTphys = allocpages(firstaddr, nkpt);
917 KPDphys = allocpages(firstaddr, nkpdpe);
919 /* Fill in the underlying page table pages */
920 /* Nominally read-only (but really R/W) from zero to physfree */
921 /* XXX not fully used, underneath 2M pages */
922 pt_p = (pt_entry_t *)KPTphys;
923 for (i = 0; ptoa(i) < *firstaddr; i++)
924 pt_p[i] = ptoa(i) | X86_PG_RW | X86_PG_V | X86_PG_G;
926 /* Now map the page tables at their location within PTmap */
927 pd_p = (pd_entry_t *)KPDphys;
928 for (i = 0; i < nkpt; i++)
929 pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
931 /* Map from zero to end of allocations under 2M pages */
932 /* This replaces some of the KPTphys entries above */
933 for (i = 0; (i << PDRSHIFT) < *firstaddr; i++)
934 pd_p[i] = (i << PDRSHIFT) | X86_PG_RW | X86_PG_V | PG_PS |
937 /* And connect up the PD to the PDP (leaving room for L4 pages) */
938 pdp_p = (pdp_entry_t *)(KPDPphys + ptoa(KPML4I - KPML4BASE));
939 for (i = 0; i < nkpdpe; i++)
940 pdp_p[i + KPDPI] = (KPDphys + ptoa(i)) | X86_PG_RW | X86_PG_V |
944 * Now, set up the direct map region using 2MB and/or 1GB pages. If
945 * the end of physical memory is not aligned to a 1GB page boundary,
946 * then the residual physical memory is mapped with 2MB pages. Later,
947 * if pmap_mapdev{_attr}() uses the direct map for non-write-back
948 * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings
949 * that are partially used.
951 pd_p = (pd_entry_t *)DMPDphys;
952 for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) {
953 pd_p[j] = (vm_paddr_t)i << PDRSHIFT;
954 /* Preset PG_M and PG_A because demotion expects it. */
955 pd_p[j] |= X86_PG_RW | X86_PG_V | PG_PS | X86_PG_G |
956 X86_PG_M | X86_PG_A | pg_nx;
958 pdp_p = (pdp_entry_t *)DMPDPphys;
959 for (i = 0; i < ndm1g; i++) {
960 pdp_p[i] = (vm_paddr_t)i << PDPSHIFT;
961 /* Preset PG_M and PG_A because demotion expects it. */
962 pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_PS | X86_PG_G |
963 X86_PG_M | X86_PG_A | pg_nx;
965 for (j = 0; i < ndmpdp; i++, j++) {
966 pdp_p[i] = DMPDphys + ptoa(j);
967 pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_U;
970 /* And recursively map PML4 to itself in order to get PTmap */
971 p4_p = (pml4_entry_t *)KPML4phys;
972 p4_p[PML4PML4I] = KPML4phys;
973 p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | PG_U;
975 /* Connect the Direct Map slot(s) up to the PML4. */
976 for (i = 0; i < ndmpdpphys; i++) {
977 p4_p[DMPML4I + i] = DMPDPphys + ptoa(i);
978 p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | PG_U;
981 /* Connect the KVA slots up to the PML4 */
982 for (i = 0; i < NKPML4E; i++) {
983 p4_p[KPML4BASE + i] = KPDPphys + ptoa(i);
984 p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V | PG_U;
989 * Bootstrap the system enough to run with virtual memory.
991 * On amd64 this is called after mapping has already been enabled
992 * and just syncs the pmap module with what has already been done.
993 * [We can't call it easily with mapping off since the kernel is not
994 * mapped with PA == VA, hence we would have to relocate every address
995 * from the linked base (virtual) address "KERNBASE" to the actual
996 * (physical) address starting relative to 0]
999 pmap_bootstrap(vm_paddr_t *firstaddr)
1006 * Create an initial set of page tables to run the kernel in.
1008 create_pagetables(firstaddr);
1011 * Add a physical memory segment (vm_phys_seg) corresponding to the
1012 * preallocated kernel page table pages so that vm_page structures
1013 * representing these pages will be created. The vm_page structures
1014 * are required for promotion of the corresponding kernel virtual
1015 * addresses to superpage mappings.
1017 vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1019 virtual_avail = (vm_offset_t) KERNBASE + *firstaddr;
1020 virtual_avail = pmap_kmem_choose(virtual_avail);
1022 virtual_end = VM_MAX_KERNEL_ADDRESS;
1025 /* XXX do %cr0 as well */
1026 load_cr4(rcr4() | CR4_PGE);
1027 load_cr3(KPML4phys);
1028 if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
1029 load_cr4(rcr4() | CR4_SMEP);
1032 * Initialize the kernel pmap (which is statically allocated).
1034 PMAP_LOCK_INIT(kernel_pmap);
1035 kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
1036 kernel_pmap->pm_cr3 = KPML4phys;
1037 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */
1038 TAILQ_INIT(&kernel_pmap->pm_pvchunk);
1039 kernel_pmap->pm_flags = pmap_flags;
1042 * Initialize the TLB invalidations generation number lock.
1044 mtx_init(&invl_gen_mtx, "invlgn", NULL, MTX_DEF);
1047 * Reserve some special page table entries/VA space for temporary
1050 #define SYSMAP(c, p, v, n) \
1051 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
1057 * Crashdump maps. The first page is reused as CMAP1 for the
1060 SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS)
1061 CADDR1 = crashdumpmap;
1066 * Initialize the PAT MSR.
1067 * pmap_init_pat() clears and sets CR4_PGE, which, as a
1068 * side-effect, invalidates stale PG_G TLB entries that might
1069 * have been created in our pre-boot environment.
1073 /* Initialize TLB Context Id. */
1074 TUNABLE_INT_FETCH("vm.pmap.pcid_enabled", &pmap_pcid_enabled);
1075 if ((cpu_feature2 & CPUID2_PCID) != 0 && pmap_pcid_enabled) {
1076 /* Check for INVPCID support */
1077 invpcid_works = (cpu_stdext_feature & CPUID_STDEXT_INVPCID)
1079 for (i = 0; i < MAXCPU; i++) {
1080 kernel_pmap->pm_pcids[i].pm_pcid = PMAP_PCID_KERN;
1081 kernel_pmap->pm_pcids[i].pm_gen = 1;
1083 PCPU_SET(pcid_next, PMAP_PCID_KERN + 1);
1084 PCPU_SET(pcid_gen, 1);
1086 * pcpu area for APs is zeroed during AP startup.
1087 * pc_pcid_next and pc_pcid_gen are initialized by AP
1088 * during pcpu setup.
1090 load_cr4(rcr4() | CR4_PCIDE);
1092 pmap_pcid_enabled = 0;
1097 * Setup the PAT MSR.
1102 int pat_table[PAT_INDEX_SIZE];
1107 /* Bail if this CPU doesn't implement PAT. */
1108 if ((cpu_feature & CPUID_PAT) == 0)
1111 /* Set default PAT index table. */
1112 for (i = 0; i < PAT_INDEX_SIZE; i++)
1114 pat_table[PAT_WRITE_BACK] = 0;
1115 pat_table[PAT_WRITE_THROUGH] = 1;
1116 pat_table[PAT_UNCACHEABLE] = 3;
1117 pat_table[PAT_WRITE_COMBINING] = 3;
1118 pat_table[PAT_WRITE_PROTECTED] = 3;
1119 pat_table[PAT_UNCACHED] = 3;
1121 /* Initialize default PAT entries. */
1122 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
1123 PAT_VALUE(1, PAT_WRITE_THROUGH) |
1124 PAT_VALUE(2, PAT_UNCACHED) |
1125 PAT_VALUE(3, PAT_UNCACHEABLE) |
1126 PAT_VALUE(4, PAT_WRITE_BACK) |
1127 PAT_VALUE(5, PAT_WRITE_THROUGH) |
1128 PAT_VALUE(6, PAT_UNCACHED) |
1129 PAT_VALUE(7, PAT_UNCACHEABLE);
1133 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
1134 * Program 5 and 6 as WP and WC.
1135 * Leave 4 and 7 as WB and UC.
1137 pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
1138 pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
1139 PAT_VALUE(6, PAT_WRITE_COMBINING);
1140 pat_table[PAT_UNCACHED] = 2;
1141 pat_table[PAT_WRITE_PROTECTED] = 5;
1142 pat_table[PAT_WRITE_COMBINING] = 6;
1145 * Just replace PAT Index 2 with WC instead of UC-.
1147 pat_msr &= ~PAT_MASK(2);
1148 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
1149 pat_table[PAT_WRITE_COMBINING] = 2;
1154 load_cr4(cr4 & ~CR4_PGE);
1156 /* Disable caches (CD = 1, NW = 0). */
1158 load_cr0((cr0 & ~CR0_NW) | CR0_CD);
1160 /* Flushes caches and TLBs. */
1164 /* Update PAT and index table. */
1165 wrmsr(MSR_PAT, pat_msr);
1166 for (i = 0; i < PAT_INDEX_SIZE; i++)
1167 pat_index[i] = pat_table[i];
1169 /* Flush caches and TLBs again. */
1173 /* Restore caches and PGE. */
1179 * Initialize a vm_page's machine-dependent fields.
1182 pmap_page_init(vm_page_t m)
1185 TAILQ_INIT(&m->md.pv_list);
1186 m->md.pat_mode = PAT_WRITE_BACK;
1190 * Initialize the pmap module.
1191 * Called by vm_init, to initialize any structures that the pmap
1192 * system needs to map virtual memory.
1197 struct pmap_preinit_mapping *ppim;
1200 int error, i, pv_npg;
1203 * Initialize the vm page array entries for the kernel pmap's
1206 for (i = 0; i < nkpt; i++) {
1207 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
1208 KASSERT(mpte >= vm_page_array &&
1209 mpte < &vm_page_array[vm_page_array_size],
1210 ("pmap_init: page table page is out of range"));
1211 mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
1212 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
1216 * If the kernel is running on a virtual machine, then it must assume
1217 * that MCA is enabled by the hypervisor. Moreover, the kernel must
1218 * be prepared for the hypervisor changing the vendor and family that
1219 * are reported by CPUID. Consequently, the workaround for AMD Family
1220 * 10h Erratum 383 is enabled if the processor's feature set does not
1221 * include at least one feature that is only supported by older Intel
1222 * or newer AMD processors.
1224 if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 &&
1225 (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
1226 CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
1228 workaround_erratum383 = 1;
1231 * Are large page mappings enabled?
1233 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
1234 if (pg_ps_enabled) {
1235 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1236 ("pmap_init: can't assign to pagesizes[1]"));
1237 pagesizes[1] = NBPDR;
1241 * Initialize the pv chunk list mutex.
1243 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
1246 * Initialize the pool of pv list locks.
1248 for (i = 0; i < NPV_LIST_LOCKS; i++)
1249 rw_init(&pv_list_locks[i], "pmap pv list");
1252 * Calculate the size of the pv head table for superpages.
1254 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR);
1257 * Allocate memory for the pv head table for superpages.
1259 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
1261 pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
1263 for (i = 0; i < pv_npg; i++)
1264 TAILQ_INIT(&pv_table[i].pv_list);
1265 TAILQ_INIT(&pv_dummy.pv_list);
1267 pmap_initialized = 1;
1268 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
1269 ppim = pmap_preinit_mapping + i;
1272 /* Make the direct map consistent */
1273 if (ppim->pa < dmaplimit && ppim->pa + ppim->sz < dmaplimit) {
1274 (void)pmap_change_attr(PHYS_TO_DMAP(ppim->pa),
1275 ppim->sz, ppim->mode);
1279 printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i,
1280 ppim->pa, ppim->va, ppim->sz, ppim->mode);
1283 mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
1284 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
1285 (vmem_addr_t *)&qframe);
1287 panic("qframe allocation failed");
1290 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
1291 "2MB page mapping counters");
1293 static u_long pmap_pde_demotions;
1294 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
1295 &pmap_pde_demotions, 0, "2MB page demotions");
1297 static u_long pmap_pde_mappings;
1298 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
1299 &pmap_pde_mappings, 0, "2MB page mappings");
1301 static u_long pmap_pde_p_failures;
1302 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
1303 &pmap_pde_p_failures, 0, "2MB page promotion failures");
1305 static u_long pmap_pde_promotions;
1306 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
1307 &pmap_pde_promotions, 0, "2MB page promotions");
1309 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD, 0,
1310 "1GB page mapping counters");
1312 static u_long pmap_pdpe_demotions;
1313 SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
1314 &pmap_pdpe_demotions, 0, "1GB page demotions");
1316 /***************************************************
1317 * Low level helper routines.....
1318 ***************************************************/
1321 pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
1323 int x86_pat_bits = X86_PG_PTE_PAT | X86_PG_PDE_PAT;
1325 switch (pmap->pm_type) {
1328 /* Verify that both PAT bits are not set at the same time */
1329 KASSERT((entry & x86_pat_bits) != x86_pat_bits,
1330 ("Invalid PAT bits in entry %#lx", entry));
1332 /* Swap the PAT bits if one of them is set */
1333 if ((entry & x86_pat_bits) != 0)
1334 entry ^= x86_pat_bits;
1338 * Nothing to do - the memory attributes are represented
1339 * the same way for regular pages and superpages.
1343 panic("pmap_switch_pat_bits: bad pm_type %d", pmap->pm_type);
1350 * Determine the appropriate bits to set in a PTE or PDE for a specified
1354 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
1356 int cache_bits, pat_flag, pat_idx;
1358 if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0)
1359 panic("Unknown caching mode %d\n", mode);
1361 switch (pmap->pm_type) {
1364 /* The PAT bit is different for PTE's and PDE's. */
1365 pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
1367 /* Map the caching mode to a PAT index. */
1368 pat_idx = pat_index[mode];
1370 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
1373 cache_bits |= pat_flag;
1375 cache_bits |= PG_NC_PCD;
1377 cache_bits |= PG_NC_PWT;
1381 cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode);
1385 panic("unsupported pmap type %d", pmap->pm_type);
1388 return (cache_bits);
1392 pmap_cache_mask(pmap_t pmap, boolean_t is_pde)
1396 switch (pmap->pm_type) {
1399 mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
1402 mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7);
1405 panic("pmap_cache_mask: invalid pm_type %d", pmap->pm_type);
1412 pmap_ps_enabled(pmap_t pmap)
1415 return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
1419 pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde)
1422 switch (pmap->pm_type) {
1429 * This is a little bogus since the generation number is
1430 * supposed to be bumped up when a region of the address
1431 * space is invalidated in the page tables.
1433 * In this case the old PDE entry is valid but yet we want
1434 * to make sure that any mappings using the old entry are
1435 * invalidated in the TLB.
1437 * The reason this works as expected is because we rendezvous
1438 * "all" host cpus and force any vcpu context to exit as a
1441 atomic_add_acq_long(&pmap->pm_eptgen, 1);
1444 panic("pmap_update_pde_store: bad pm_type %d", pmap->pm_type);
1446 pde_store(pde, newpde);
1450 * After changing the page size for the specified virtual address in the page
1451 * table, flush the corresponding entries from the processor's TLB. Only the
1452 * calling processor's TLB is affected.
1454 * The calling thread must be pinned to a processor.
1457 pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde)
1461 if (pmap_type_guest(pmap))
1464 KASSERT(pmap->pm_type == PT_X86,
1465 ("pmap_update_pde_invalidate: invalid type %d", pmap->pm_type));
1467 PG_G = pmap_global_bit(pmap);
1469 if ((newpde & PG_PS) == 0)
1470 /* Demotion: flush a specific 2MB page mapping. */
1472 else if ((newpde & PG_G) == 0)
1474 * Promotion: flush every 4KB page mapping from the TLB
1475 * because there are too many to flush individually.
1480 * Promotion: flush every 4KB page mapping from the TLB,
1481 * including any global (PG_G) mappings.
1489 * For SMP, these functions have to use the IPI mechanism for coherence.
1491 * N.B.: Before calling any of the following TLB invalidation functions,
1492 * the calling processor must ensure that all stores updating a non-
1493 * kernel page table are globally performed. Otherwise, another
1494 * processor could cache an old, pre-update entry without being
1495 * invalidated. This can happen one of two ways: (1) The pmap becomes
1496 * active on another processor after its pm_active field is checked by
1497 * one of the following functions but before a store updating the page
1498 * table is globally performed. (2) The pmap becomes active on another
1499 * processor before its pm_active field is checked but due to
1500 * speculative loads one of the following functions stills reads the
1501 * pmap as inactive on the other processor.
1503 * The kernel page table is exempt because its pm_active field is
1504 * immutable. The kernel page table is always active on every
1509 * Interrupt the cpus that are executing in the guest context.
1510 * This will force the vcpu to exit and the cached EPT mappings
1511 * will be invalidated by the host before the next vmresume.
1513 static __inline void
1514 pmap_invalidate_ept(pmap_t pmap)
1519 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1520 ("pmap_invalidate_ept: absurd pm_active"));
1523 * The TLB mappings associated with a vcpu context are not
1524 * flushed each time a different vcpu is chosen to execute.
1526 * This is in contrast with a process's vtop mappings that
1527 * are flushed from the TLB on each context switch.
1529 * Therefore we need to do more than just a TLB shootdown on
1530 * the active cpus in 'pmap->pm_active'. To do this we keep
1531 * track of the number of invalidations performed on this pmap.
1533 * Each vcpu keeps a cache of this counter and compares it
1534 * just before a vmresume. If the counter is out-of-date an
1535 * invept will be done to flush stale mappings from the TLB.
1537 atomic_add_acq_long(&pmap->pm_eptgen, 1);
1540 * Force the vcpu to exit and trap back into the hypervisor.
1542 ipinum = pmap->pm_flags & PMAP_NESTED_IPIMASK;
1543 ipi_selected(pmap->pm_active, ipinum);
1548 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1553 if (pmap_type_guest(pmap)) {
1554 pmap_invalidate_ept(pmap);
1558 KASSERT(pmap->pm_type == PT_X86,
1559 ("pmap_invalidate_page: invalid type %d", pmap->pm_type));
1562 if (pmap == kernel_pmap) {
1566 cpuid = PCPU_GET(cpuid);
1567 if (pmap == PCPU_GET(curpmap))
1569 else if (pmap_pcid_enabled)
1570 pmap->pm_pcids[cpuid].pm_gen = 0;
1571 if (pmap_pcid_enabled) {
1574 pmap->pm_pcids[i].pm_gen = 0;
1577 mask = &pmap->pm_active;
1579 smp_masked_invlpg(*mask, va);
1583 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
1584 #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE)
1587 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1593 if (eva - sva >= PMAP_INVLPG_THRESHOLD) {
1594 pmap_invalidate_all(pmap);
1598 if (pmap_type_guest(pmap)) {
1599 pmap_invalidate_ept(pmap);
1603 KASSERT(pmap->pm_type == PT_X86,
1604 ("pmap_invalidate_range: invalid type %d", pmap->pm_type));
1607 cpuid = PCPU_GET(cpuid);
1608 if (pmap == kernel_pmap) {
1609 for (addr = sva; addr < eva; addr += PAGE_SIZE)
1613 if (pmap == PCPU_GET(curpmap)) {
1614 for (addr = sva; addr < eva; addr += PAGE_SIZE)
1616 } else if (pmap_pcid_enabled) {
1617 pmap->pm_pcids[cpuid].pm_gen = 0;
1619 if (pmap_pcid_enabled) {
1622 pmap->pm_pcids[i].pm_gen = 0;
1625 mask = &pmap->pm_active;
1627 smp_masked_invlpg_range(*mask, sva, eva);
1632 pmap_invalidate_all(pmap_t pmap)
1635 struct invpcid_descr d;
1638 if (pmap_type_guest(pmap)) {
1639 pmap_invalidate_ept(pmap);
1643 KASSERT(pmap->pm_type == PT_X86,
1644 ("pmap_invalidate_all: invalid type %d", pmap->pm_type));
1647 if (pmap == kernel_pmap) {
1648 if (pmap_pcid_enabled && invpcid_works) {
1649 bzero(&d, sizeof(d));
1650 invpcid(&d, INVPCID_CTXGLOB);
1656 cpuid = PCPU_GET(cpuid);
1657 if (pmap == PCPU_GET(curpmap)) {
1658 if (pmap_pcid_enabled) {
1659 if (invpcid_works) {
1660 d.pcid = pmap->pm_pcids[cpuid].pm_pcid;
1663 invpcid(&d, INVPCID_CTX);
1665 load_cr3(pmap->pm_cr3 | pmap->pm_pcids
1666 [PCPU_GET(cpuid)].pm_pcid);
1671 } else if (pmap_pcid_enabled) {
1672 pmap->pm_pcids[cpuid].pm_gen = 0;
1674 if (pmap_pcid_enabled) {
1677 pmap->pm_pcids[i].pm_gen = 0;
1680 mask = &pmap->pm_active;
1682 smp_masked_invltlb(*mask, pmap);
1687 pmap_invalidate_cache(void)
1697 cpuset_t invalidate; /* processors that invalidate their TLB */
1702 u_int store; /* processor that updates the PDE */
1706 pmap_update_pde_action(void *arg)
1708 struct pde_action *act = arg;
1710 if (act->store == PCPU_GET(cpuid))
1711 pmap_update_pde_store(act->pmap, act->pde, act->newpde);
1715 pmap_update_pde_teardown(void *arg)
1717 struct pde_action *act = arg;
1719 if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
1720 pmap_update_pde_invalidate(act->pmap, act->va, act->newpde);
1724 * Change the page size for the specified virtual address in a way that
1725 * prevents any possibility of the TLB ever having two entries that map the
1726 * same virtual address using different page sizes. This is the recommended
1727 * workaround for Erratum 383 on AMD Family 10h processors. It prevents a
1728 * machine check exception for a TLB state that is improperly diagnosed as a
1732 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
1734 struct pde_action act;
1735 cpuset_t active, other_cpus;
1739 cpuid = PCPU_GET(cpuid);
1740 other_cpus = all_cpus;
1741 CPU_CLR(cpuid, &other_cpus);
1742 if (pmap == kernel_pmap || pmap_type_guest(pmap))
1745 active = pmap->pm_active;
1747 if (CPU_OVERLAP(&active, &other_cpus)) {
1749 act.invalidate = active;
1753 act.newpde = newpde;
1754 CPU_SET(cpuid, &active);
1755 smp_rendezvous_cpus(active,
1756 smp_no_rendezvous_barrier, pmap_update_pde_action,
1757 pmap_update_pde_teardown, &act);
1759 pmap_update_pde_store(pmap, pde, newpde);
1760 if (CPU_ISSET(cpuid, &active))
1761 pmap_update_pde_invalidate(pmap, va, newpde);
1767 * Normal, non-SMP, invalidation functions.
1770 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1773 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
1777 KASSERT(pmap->pm_type == PT_X86,
1778 ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
1780 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap))
1782 else if (pmap_pcid_enabled)
1783 pmap->pm_pcids[0].pm_gen = 0;
1787 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1791 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
1795 KASSERT(pmap->pm_type == PT_X86,
1796 ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
1798 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
1799 for (addr = sva; addr < eva; addr += PAGE_SIZE)
1801 } else if (pmap_pcid_enabled) {
1802 pmap->pm_pcids[0].pm_gen = 0;
1807 pmap_invalidate_all(pmap_t pmap)
1809 struct invpcid_descr d;
1811 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
1815 KASSERT(pmap->pm_type == PT_X86,
1816 ("pmap_invalidate_all: unknown type %d", pmap->pm_type));
1818 if (pmap == kernel_pmap) {
1819 if (pmap_pcid_enabled && invpcid_works) {
1820 bzero(&d, sizeof(d));
1821 invpcid(&d, INVPCID_CTXGLOB);
1825 } else if (pmap == PCPU_GET(curpmap)) {
1826 if (pmap_pcid_enabled) {
1827 if (invpcid_works) {
1828 d.pcid = pmap->pm_pcids[0].pm_pcid;
1831 invpcid(&d, INVPCID_CTX);
1833 load_cr3(pmap->pm_cr3 | pmap->pm_pcids[0].
1839 } else if (pmap_pcid_enabled) {
1840 pmap->pm_pcids[0].pm_gen = 0;
1845 pmap_invalidate_cache(void)
1852 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
1855 pmap_update_pde_store(pmap, pde, newpde);
1856 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap))
1857 pmap_update_pde_invalidate(pmap, va, newpde);
1859 pmap->pm_pcids[0].pm_gen = 0;
1864 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
1868 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
1869 * by a promotion that did not invalidate the 512 4KB page mappings
1870 * that might exist in the TLB. Consequently, at this point, the TLB
1871 * may hold both 4KB and 2MB page mappings for the address range [va,
1872 * va + NBPDR). Therefore, the entire range must be invalidated here.
1873 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
1874 * 4KB page mappings for the address range [va, va + NBPDR), and so a
1875 * single INVLPG suffices to invalidate the 2MB page mapping from the
1878 if ((pde & PG_PROMOTED) != 0)
1879 pmap_invalidate_range(pmap, va, va + NBPDR - 1);
1881 pmap_invalidate_page(pmap, va);
1884 #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
1887 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
1891 sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
1893 KASSERT((sva & PAGE_MASK) == 0,
1894 ("pmap_invalidate_cache_range: sva not page-aligned"));
1895 KASSERT((eva & PAGE_MASK) == 0,
1896 ("pmap_invalidate_cache_range: eva not page-aligned"));
1899 if ((cpu_feature & CPUID_SS) != 0 && !force)
1900 ; /* If "Self Snoop" is supported and allowed, do nothing. */
1901 else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0 &&
1902 eva - sva < PMAP_CLFLUSH_THRESHOLD) {
1904 * XXX: Some CPUs fault, hang, or trash the local APIC
1905 * registers if we use CLFLUSH on the local APIC
1906 * range. The local APIC is always uncached, so we
1907 * don't need to flush for that range anyway.
1909 if (pmap_kextract(sva) == lapic_paddr)
1913 * Otherwise, do per-cache line flush. Use the sfence
1914 * instruction to insure that previous stores are
1915 * included in the write-back. The processor
1916 * propagates flush to other processors in the cache
1920 for (; sva < eva; sva += cpu_clflush_line_size)
1923 } else if ((cpu_feature & CPUID_CLFSH) != 0 &&
1924 eva - sva < PMAP_CLFLUSH_THRESHOLD) {
1925 if (pmap_kextract(sva) == lapic_paddr)
1928 * Writes are ordered by CLFLUSH on Intel CPUs.
1930 if (cpu_vendor_id != CPU_VENDOR_INTEL)
1932 for (; sva < eva; sva += cpu_clflush_line_size)
1934 if (cpu_vendor_id != CPU_VENDOR_INTEL)
1939 * No targeted cache flush methods are supported by CPU,
1940 * or the supplied range is bigger than 2MB.
1941 * Globally invalidate cache.
1943 pmap_invalidate_cache();
1948 * Remove the specified set of pages from the data and instruction caches.
1950 * In contrast to pmap_invalidate_cache_range(), this function does not
1951 * rely on the CPU's self-snoop feature, because it is intended for use
1952 * when moving pages into a different cache domain.
1955 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
1957 vm_offset_t daddr, eva;
1961 useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
1962 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
1963 ((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt))
1964 pmap_invalidate_cache();
1968 else if (cpu_vendor_id != CPU_VENDOR_INTEL)
1970 for (i = 0; i < count; i++) {
1971 daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
1972 eva = daddr + PAGE_SIZE;
1973 for (; daddr < eva; daddr += cpu_clflush_line_size) {
1982 else if (cpu_vendor_id != CPU_VENDOR_INTEL)
1988 * Routine: pmap_extract
1990 * Extract the physical page address associated
1991 * with the given map/virtual_address pair.
1994 pmap_extract(pmap_t pmap, vm_offset_t va)
1998 pt_entry_t *pte, PG_V;
2002 PG_V = pmap_valid_bit(pmap);
2004 pdpe = pmap_pdpe(pmap, va);
2005 if (pdpe != NULL && (*pdpe & PG_V) != 0) {
2006 if ((*pdpe & PG_PS) != 0)
2007 pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK);
2009 pde = pmap_pdpe_to_pde(pdpe, va);
2010 if ((*pde & PG_V) != 0) {
2011 if ((*pde & PG_PS) != 0) {
2012 pa = (*pde & PG_PS_FRAME) |
2015 pte = pmap_pde_to_pte(pde, va);
2016 pa = (*pte & PG_FRAME) |
2027 * Routine: pmap_extract_and_hold
2029 * Atomically extract and hold the physical page
2030 * with the given pmap and virtual address pair
2031 * if that mapping permits the given protection.
2034 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
2036 pd_entry_t pde, *pdep;
2037 pt_entry_t pte, PG_RW, PG_V;
2043 PG_RW = pmap_rw_bit(pmap);
2044 PG_V = pmap_valid_bit(pmap);
2047 pdep = pmap_pde(pmap, va);
2048 if (pdep != NULL && (pde = *pdep)) {
2050 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
2051 if (vm_page_pa_tryrelock(pmap, (pde &
2052 PG_PS_FRAME) | (va & PDRMASK), &pa))
2054 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
2059 pte = *pmap_pde_to_pte(pdep, va);
2061 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
2062 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
2065 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
2076 pmap_kextract(vm_offset_t va)
2081 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
2082 pa = DMAP_TO_PHYS(va);
2086 pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
2089 * Beware of a concurrent promotion that changes the
2090 * PDE at this point! For example, vtopte() must not
2091 * be used to access the PTE because it would use the
2092 * new PDE. It is, however, safe to use the old PDE
2093 * because the page table page is preserved by the
2096 pa = *pmap_pde_to_pte(&pde, va);
2097 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
2103 /***************************************************
2104 * Low level mapping routines.....
2105 ***************************************************/
2108 * Add a wired page to the kva.
2109 * Note: not SMP coherent.
2112 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
2117 pte_store(pte, pa | X86_PG_RW | X86_PG_V | X86_PG_G);
2120 static __inline void
2121 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
2127 cache_bits = pmap_cache_bits(kernel_pmap, mode, 0);
2128 pte_store(pte, pa | X86_PG_RW | X86_PG_V | X86_PG_G | cache_bits);
2132 * Remove a page from the kernel pagetables.
2133 * Note: not SMP coherent.
2136 pmap_kremove(vm_offset_t va)
2145 * Used to map a range of physical addresses into kernel
2146 * virtual address space.
2148 * The value passed in '*virt' is a suggested virtual address for
2149 * the mapping. Architectures which can support a direct-mapped
2150 * physical to virtual region can return the appropriate address
2151 * within that region, leaving '*virt' unchanged. Other
2152 * architectures should map the pages starting at '*virt' and
2153 * update '*virt' with the first usable address after the mapped
2157 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
2159 return PHYS_TO_DMAP(start);
2164 * Add a list of wired pages to the kva
2165 * this routine is only used for temporary
2166 * kernel mappings that do not need to have
2167 * page modification or references recorded.
2168 * Note that old mappings are simply written
2169 * over. The page *must* be wired.
2170 * Note: SMP coherent. Uses a ranged shootdown IPI.
2173 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
2175 pt_entry_t *endpte, oldpte, pa, *pte;
2181 endpte = pte + count;
2182 while (pte < endpte) {
2184 cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0);
2185 pa = VM_PAGE_TO_PHYS(m) | cache_bits;
2186 if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) {
2188 pte_store(pte, pa | X86_PG_G | X86_PG_RW | X86_PG_V);
2192 if (__predict_false((oldpte & X86_PG_V) != 0))
2193 pmap_invalidate_range(kernel_pmap, sva, sva + count *
2198 * This routine tears out page mappings from the
2199 * kernel -- it is meant only for temporary mappings.
2200 * Note: SMP coherent. Uses a ranged shootdown IPI.
2203 pmap_qremove(vm_offset_t sva, int count)
2208 while (count-- > 0) {
2209 KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
2213 pmap_invalidate_range(kernel_pmap, sva, va);
2216 /***************************************************
2217 * Page table page management routines.....
2218 ***************************************************/
2219 static __inline void
2220 pmap_free_zero_pages(struct spglist *free)
2225 for (count = 0; (m = SLIST_FIRST(free)) != NULL; count++) {
2226 SLIST_REMOVE_HEAD(free, plinks.s.ss);
2227 /* Preserve the page's PG_ZERO setting. */
2228 vm_page_free_toq(m);
2230 atomic_subtract_int(&vm_cnt.v_wire_count, count);
2234 * Schedule the specified unused page table page to be freed. Specifically,
2235 * add the page to the specified list of pages that will be released to the
2236 * physical memory manager after the TLB has been updated.
2238 static __inline void
2239 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
2240 boolean_t set_PG_ZERO)
2244 m->flags |= PG_ZERO;
2246 m->flags &= ~PG_ZERO;
2247 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
2251 * Inserts the specified page table page into the specified pmap's collection
2252 * of idle page table pages. Each of a pmap's page table pages is responsible
2253 * for mapping a distinct range of virtual addresses. The pmap's collection is
2254 * ordered by this virtual address range.
2257 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
2260 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2261 return (vm_radix_insert(&pmap->pm_root, mpte));
2265 * Removes the page table page mapping the specified virtual address from the
2266 * specified pmap's collection of idle page table pages, and returns it.
2267 * Otherwise, returns NULL if there is no page table page corresponding to the
2268 * specified virtual address.
2270 static __inline vm_page_t
2271 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
2274 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2275 return (vm_radix_remove(&pmap->pm_root, pmap_pde_pindex(va)));
2279 * Decrements a page table page's wire count, which is used to record the
2280 * number of valid page table entries within the page. If the wire count
2281 * drops to zero, then the page table page is unmapped. Returns TRUE if the
2282 * page table page was unmapped and FALSE otherwise.
2284 static inline boolean_t
2285 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
2289 if (m->wire_count == 0) {
2290 _pmap_unwire_ptp(pmap, va, m, free);
2297 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
2300 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2302 * unmap the page table page
2304 if (m->pindex >= (NUPDE + NUPDPE)) {
2307 pml4 = pmap_pml4e(pmap, va);
2309 } else if (m->pindex >= NUPDE) {
2312 pdp = pmap_pdpe(pmap, va);
2317 pd = pmap_pde(pmap, va);
2320 pmap_resident_count_dec(pmap, 1);
2321 if (m->pindex < NUPDE) {
2322 /* We just released a PT, unhold the matching PD */
2325 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
2326 pmap_unwire_ptp(pmap, va, pdpg, free);
2328 if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
2329 /* We just released a PD, unhold the matching PDP */
2332 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
2333 pmap_unwire_ptp(pmap, va, pdppg, free);
2337 * Put page on a list so that it is released after
2338 * *ALL* TLB shootdown is done
2340 pmap_add_delayed_free_list(m, free, TRUE);
2344 * After removing a page table entry, this routine is used to
2345 * conditionally free the page, and manage the hold/wire counts.
2348 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
2349 struct spglist *free)
2353 if (va >= VM_MAXUSER_ADDRESS)
2355 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
2356 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
2357 return (pmap_unwire_ptp(pmap, va, mpte, free));
2361 pmap_pinit0(pmap_t pmap)
2365 PMAP_LOCK_INIT(pmap);
2366 pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
2367 pmap->pm_cr3 = KPML4phys;
2368 pmap->pm_root.rt_root = 0;
2369 CPU_ZERO(&pmap->pm_active);
2370 TAILQ_INIT(&pmap->pm_pvchunk);
2371 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2372 pmap->pm_flags = pmap_flags;
2374 pmap->pm_pcids[i].pm_pcid = PMAP_PCID_NONE;
2375 pmap->pm_pcids[i].pm_gen = 0;
2377 PCPU_SET(curpmap, kernel_pmap);
2378 pmap_activate(curthread);
2379 CPU_FILL(&kernel_pmap->pm_active);
2383 pmap_pinit_pml4(vm_page_t pml4pg)
2385 pml4_entry_t *pm_pml4;
2388 pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
2390 /* Wire in kernel global address entries. */
2391 for (i = 0; i < NKPML4E; i++) {
2392 pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW |
2395 for (i = 0; i < ndmpdpphys; i++) {
2396 pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW |
2400 /* install self-referential address mapping entry(s) */
2401 pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW |
2402 X86_PG_A | X86_PG_M;
2406 * Initialize a preallocated and zeroed pmap structure,
2407 * such as one in a vmspace structure.
2410 pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
2413 vm_paddr_t pml4phys;
2417 * allocate the page directory page
2419 pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2420 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK);
2422 pml4phys = VM_PAGE_TO_PHYS(pml4pg);
2423 pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(pml4phys);
2425 pmap->pm_pcids[i].pm_pcid = PMAP_PCID_NONE;
2426 pmap->pm_pcids[i].pm_gen = 0;
2428 pmap->pm_cr3 = ~0; /* initialize to an invalid value */
2430 if ((pml4pg->flags & PG_ZERO) == 0)
2431 pagezero(pmap->pm_pml4);
2434 * Do not install the host kernel mappings in the nested page
2435 * tables. These mappings are meaningless in the guest physical
2438 if ((pmap->pm_type = pm_type) == PT_X86) {
2439 pmap->pm_cr3 = pml4phys;
2440 pmap_pinit_pml4(pml4pg);
2443 pmap->pm_root.rt_root = 0;
2444 CPU_ZERO(&pmap->pm_active);
2445 TAILQ_INIT(&pmap->pm_pvchunk);
2446 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2447 pmap->pm_flags = flags;
2448 pmap->pm_eptgen = 0;
2454 pmap_pinit(pmap_t pmap)
2457 return (pmap_pinit_type(pmap, PT_X86, pmap_flags));
2461 * This routine is called if the desired page table page does not exist.
2463 * If page table page allocation fails, this routine may sleep before
2464 * returning NULL. It sleeps only if a lock pointer was given.
2466 * Note: If a page allocation fails at page table level two or three,
2467 * one or two pages may be held during the wait, only to be released
2468 * afterwards. This conservative approach is easily argued to avoid
2472 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
2474 vm_page_t m, pdppg, pdpg;
2475 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
2477 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2479 PG_A = pmap_accessed_bit(pmap);
2480 PG_M = pmap_modified_bit(pmap);
2481 PG_V = pmap_valid_bit(pmap);
2482 PG_RW = pmap_rw_bit(pmap);
2485 * Allocate a page table page.
2487 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
2488 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
2489 if (lockp != NULL) {
2490 RELEASE_PV_LIST_LOCK(lockp);
2492 PMAP_ASSERT_NOT_IN_DI();
2498 * Indicate the need to retry. While waiting, the page table
2499 * page may have been allocated.
2503 if ((m->flags & PG_ZERO) == 0)
2507 * Map the pagetable page into the process address space, if
2508 * it isn't already there.
2511 if (ptepindex >= (NUPDE + NUPDPE)) {
2513 vm_pindex_t pml4index;
2515 /* Wire up a new PDPE page */
2516 pml4index = ptepindex - (NUPDE + NUPDPE);
2517 pml4 = &pmap->pm_pml4[pml4index];
2518 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
2520 } else if (ptepindex >= NUPDE) {
2521 vm_pindex_t pml4index;
2522 vm_pindex_t pdpindex;
2526 /* Wire up a new PDE page */
2527 pdpindex = ptepindex - NUPDE;
2528 pml4index = pdpindex >> NPML4EPGSHIFT;
2530 pml4 = &pmap->pm_pml4[pml4index];
2531 if ((*pml4 & PG_V) == 0) {
2532 /* Have to allocate a new pdp, recurse */
2533 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
2536 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
2537 vm_page_free_zero(m);
2541 /* Add reference to pdp page */
2542 pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
2543 pdppg->wire_count++;
2545 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
2547 /* Now find the pdp page */
2548 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
2549 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
2552 vm_pindex_t pml4index;
2553 vm_pindex_t pdpindex;
2558 /* Wire up a new PTE page */
2559 pdpindex = ptepindex >> NPDPEPGSHIFT;
2560 pml4index = pdpindex >> NPML4EPGSHIFT;
2562 /* First, find the pdp and check that its valid. */
2563 pml4 = &pmap->pm_pml4[pml4index];
2564 if ((*pml4 & PG_V) == 0) {
2565 /* Have to allocate a new pd, recurse */
2566 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
2569 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
2570 vm_page_free_zero(m);
2573 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
2574 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
2576 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
2577 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
2578 if ((*pdp & PG_V) == 0) {
2579 /* Have to allocate a new pd, recurse */
2580 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
2583 atomic_subtract_int(&vm_cnt.v_wire_count,
2585 vm_page_free_zero(m);
2589 /* Add reference to the pd page */
2590 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
2594 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
2596 /* Now we know where the page directory page is */
2597 pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
2598 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
2601 pmap_resident_count_inc(pmap, 1);
2607 pmap_allocpde(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
2609 vm_pindex_t pdpindex, ptepindex;
2610 pdp_entry_t *pdpe, PG_V;
2613 PG_V = pmap_valid_bit(pmap);
2616 pdpe = pmap_pdpe(pmap, va);
2617 if (pdpe != NULL && (*pdpe & PG_V) != 0) {
2618 /* Add a reference to the pd page. */
2619 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
2622 /* Allocate a pd page. */
2623 ptepindex = pmap_pde_pindex(va);
2624 pdpindex = ptepindex >> NPDPEPGSHIFT;
2625 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
2626 if (pdpg == NULL && lockp != NULL)
2633 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
2635 vm_pindex_t ptepindex;
2636 pd_entry_t *pd, PG_V;
2639 PG_V = pmap_valid_bit(pmap);
2642 * Calculate pagetable page index
2644 ptepindex = pmap_pde_pindex(va);
2647 * Get the page directory entry
2649 pd = pmap_pde(pmap, va);
2652 * This supports switching from a 2MB page to a
2655 if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
2656 if (!pmap_demote_pde_locked(pmap, pd, va, lockp)) {
2658 * Invalidation of the 2MB page mapping may have caused
2659 * the deallocation of the underlying PD page.
2666 * If the page table page is mapped, we just increment the
2667 * hold count, and activate it.
2669 if (pd != NULL && (*pd & PG_V) != 0) {
2670 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
2674 * Here if the pte page isn't mapped, or if it has been
2677 m = _pmap_allocpte(pmap, ptepindex, lockp);
2678 if (m == NULL && lockp != NULL)
2685 /***************************************************
2686 * Pmap allocation/deallocation routines.
2687 ***************************************************/
2690 * Release any resources held by the given physical map.
2691 * Called when a pmap initialized by pmap_pinit is being released.
2692 * Should only be called if the map contains no valid mappings.
2695 pmap_release(pmap_t pmap)
2700 KASSERT(pmap->pm_stats.resident_count == 0,
2701 ("pmap_release: pmap resident count %ld != 0",
2702 pmap->pm_stats.resident_count));
2703 KASSERT(vm_radix_is_empty(&pmap->pm_root),
2704 ("pmap_release: pmap has reserved page table page(s)"));
2705 KASSERT(CPU_EMPTY(&pmap->pm_active),
2706 ("releasing active pmap %p", pmap));
2708 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4));
2710 for (i = 0; i < NKPML4E; i++) /* KVA */
2711 pmap->pm_pml4[KPML4BASE + i] = 0;
2712 for (i = 0; i < ndmpdpphys; i++)/* Direct Map */
2713 pmap->pm_pml4[DMPML4I + i] = 0;
2714 pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */
2717 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
2718 vm_page_free_zero(m);
2722 kvm_size(SYSCTL_HANDLER_ARGS)
2724 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
2726 return sysctl_handle_long(oidp, &ksize, 0, req);
2728 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
2729 0, 0, kvm_size, "LU", "Size of KVM");
2732 kvm_free(SYSCTL_HANDLER_ARGS)
2734 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
2736 return sysctl_handle_long(oidp, &kfree, 0, req);
2738 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
2739 0, 0, kvm_free, "LU", "Amount of KVM free");
2742 * grow the number of kernel page table entries, if needed
2745 pmap_growkernel(vm_offset_t addr)
2749 pd_entry_t *pde, newpdir;
2752 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
2755 * Return if "addr" is within the range of kernel page table pages
2756 * that were preallocated during pmap bootstrap. Moreover, leave
2757 * "kernel_vm_end" and the kernel page table as they were.
2759 * The correctness of this action is based on the following
2760 * argument: vm_map_insert() allocates contiguous ranges of the
2761 * kernel virtual address space. It calls this function if a range
2762 * ends after "kernel_vm_end". If the kernel is mapped between
2763 * "kernel_vm_end" and "addr", then the range cannot begin at
2764 * "kernel_vm_end". In fact, its beginning address cannot be less
2765 * than the kernel. Thus, there is no immediate need to allocate
2766 * any new kernel page table pages between "kernel_vm_end" and
2769 if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR)
2772 addr = roundup2(addr, NBPDR);
2773 if (addr - 1 >= kernel_map->max_offset)
2774 addr = kernel_map->max_offset;
2775 while (kernel_vm_end < addr) {
2776 pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
2777 if ((*pdpe & X86_PG_V) == 0) {
2778 /* We need a new PDP entry */
2779 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
2780 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
2781 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
2783 panic("pmap_growkernel: no memory to grow kernel");
2784 if ((nkpg->flags & PG_ZERO) == 0)
2785 pmap_zero_page(nkpg);
2786 paddr = VM_PAGE_TO_PHYS(nkpg);
2787 *pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW |
2788 X86_PG_A | X86_PG_M);
2789 continue; /* try again */
2791 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
2792 if ((*pde & X86_PG_V) != 0) {
2793 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
2794 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
2795 kernel_vm_end = kernel_map->max_offset;
2801 nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end),
2802 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2805 panic("pmap_growkernel: no memory to grow kernel");
2806 if ((nkpg->flags & PG_ZERO) == 0)
2807 pmap_zero_page(nkpg);
2808 paddr = VM_PAGE_TO_PHYS(nkpg);
2809 newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
2810 pde_store(pde, newpdir);
2812 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
2813 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
2814 kernel_vm_end = kernel_map->max_offset;
2821 /***************************************************
2822 * page management routines.
2823 ***************************************************/
2825 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
2826 CTASSERT(_NPCM == 3);
2827 CTASSERT(_NPCPV == 168);
2829 static __inline struct pv_chunk *
2830 pv_to_chunk(pv_entry_t pv)
2833 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
2836 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
2838 #define PC_FREE0 0xfffffffffffffffful
2839 #define PC_FREE1 0xfffffffffffffffful
2840 #define PC_FREE2 0x000000fffffffffful
2842 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
2845 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
2847 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
2848 "Current number of pv entry chunks");
2849 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
2850 "Current number of pv entry chunks allocated");
2851 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
2852 "Current number of pv entry chunks frees");
2853 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
2854 "Number of times tried to get a chunk page but failed.");
2856 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
2857 static int pv_entry_spare;
2859 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
2860 "Current number of pv entry frees");
2861 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
2862 "Current number of pv entry allocs");
2863 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
2864 "Current number of pv entries");
2865 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
2866 "Current number of spare pv entries");
2870 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap, bool start_di)
2875 pmap_invalidate_all(pmap);
2876 if (pmap != locked_pmap)
2879 pmap_delayed_invl_finished();
2883 * We are in a serious low memory condition. Resort to
2884 * drastic measures to free some pages so we can allocate
2885 * another pv entry chunk.
2887 * Returns NULL if PV entries were reclaimed from the specified pmap.
2889 * We do not, however, unmap 2mpages because subsequent accesses will
2890 * allocate per-page pv entries until repromotion occurs, thereby
2891 * exacerbating the shortage of free pv entries.
2894 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
2896 struct pv_chunk *pc, *pc_marker, *pc_marker_end;
2897 struct pv_chunk_header pc_marker_b, pc_marker_end_b;
2898 struct md_page *pvh;
2900 pmap_t next_pmap, pmap;
2901 pt_entry_t *pte, tpte;
2902 pt_entry_t PG_G, PG_A, PG_M, PG_RW;
2906 struct spglist free;
2908 int bit, field, freed;
2910 static int active_reclaims = 0;
2912 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
2913 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
2916 PG_G = PG_A = PG_M = PG_RW = 0;
2918 bzero(&pc_marker_b, sizeof(pc_marker_b));
2919 bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
2920 pc_marker = (struct pv_chunk *)&pc_marker_b;
2921 pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
2924 * A delayed invalidation block should already be active if
2925 * pmap_advise() or pmap_remove() called this function by way
2926 * of pmap_demote_pde_locked().
2928 start_di = pmap_not_in_di();
2930 mtx_lock(&pv_chunks_mutex);
2932 TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
2933 TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
2934 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
2935 SLIST_EMPTY(&free)) {
2936 next_pmap = pc->pc_pmap;
2937 if (next_pmap == NULL) {
2939 * The next chunk is a marker. However, it is
2940 * not our marker, so active_reclaims must be
2941 * > 1. Consequently, the next_chunk code
2942 * will not rotate the pv_chunks list.
2946 mtx_unlock(&pv_chunks_mutex);
2949 * A pv_chunk can only be removed from the pc_lru list
2950 * when both pc_chunks_mutex is owned and the
2951 * corresponding pmap is locked.
2953 if (pmap != next_pmap) {
2954 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap,
2957 /* Avoid deadlock and lock recursion. */
2958 if (pmap > locked_pmap) {
2959 RELEASE_PV_LIST_LOCK(lockp);
2962 pmap_delayed_invl_started();
2963 mtx_lock(&pv_chunks_mutex);
2965 } else if (pmap != locked_pmap) {
2966 if (PMAP_TRYLOCK(pmap)) {
2968 pmap_delayed_invl_started();
2969 mtx_lock(&pv_chunks_mutex);
2972 pmap = NULL; /* pmap is not locked */
2973 mtx_lock(&pv_chunks_mutex);
2974 pc = TAILQ_NEXT(pc_marker, pc_lru);
2976 pc->pc_pmap != next_pmap)
2980 } else if (start_di)
2981 pmap_delayed_invl_started();
2982 PG_G = pmap_global_bit(pmap);
2983 PG_A = pmap_accessed_bit(pmap);
2984 PG_M = pmap_modified_bit(pmap);
2985 PG_RW = pmap_rw_bit(pmap);
2989 * Destroy every non-wired, 4 KB page mapping in the chunk.
2992 for (field = 0; field < _NPCM; field++) {
2993 for (inuse = ~pc->pc_map[field] & pc_freemask[field];
2994 inuse != 0; inuse &= ~(1UL << bit)) {
2996 pv = &pc->pc_pventry[field * 64 + bit];
2998 pde = pmap_pde(pmap, va);
2999 if ((*pde & PG_PS) != 0)
3001 pte = pmap_pde_to_pte(pde, va);
3002 if ((*pte & PG_W) != 0)
3004 tpte = pte_load_clear(pte);
3005 if ((tpte & PG_G) != 0)
3006 pmap_invalidate_page(pmap, va);
3007 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
3008 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3010 if ((tpte & PG_A) != 0)
3011 vm_page_aflag_set(m, PGA_REFERENCED);
3012 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3013 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3015 if (TAILQ_EMPTY(&m->md.pv_list) &&
3016 (m->flags & PG_FICTITIOUS) == 0) {
3017 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3018 if (TAILQ_EMPTY(&pvh->pv_list)) {
3019 vm_page_aflag_clear(m,
3023 pmap_delayed_invl_page(m);
3024 pc->pc_map[field] |= 1UL << bit;
3025 pmap_unuse_pt(pmap, va, *pde, &free);
3030 mtx_lock(&pv_chunks_mutex);
3033 /* Every freed mapping is for a 4 KB page. */
3034 pmap_resident_count_dec(pmap, freed);
3035 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
3036 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
3037 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
3038 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3039 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
3040 pc->pc_map[2] == PC_FREE2) {
3041 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
3042 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
3043 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
3044 /* Entire chunk is free; return it. */
3045 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
3046 dump_drop_page(m_pc->phys_addr);
3047 mtx_lock(&pv_chunks_mutex);
3048 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
3051 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3052 mtx_lock(&pv_chunks_mutex);
3053 /* One freed pv entry in locked_pmap is sufficient. */
3054 if (pmap == locked_pmap)
3057 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
3058 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
3059 if (active_reclaims == 1 && pmap != NULL) {
3061 * Rotate the pv chunks list so that we do not
3062 * scan the same pv chunks that could not be
3063 * freed (because they contained a wired
3064 * and/or superpage mapping) on every
3065 * invocation of reclaim_pv_chunk().
3067 while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
3068 MPASS(pc->pc_pmap != NULL);
3069 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
3070 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
3074 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
3075 TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
3077 mtx_unlock(&pv_chunks_mutex);
3078 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap, start_di);
3079 if (m_pc == NULL && !SLIST_EMPTY(&free)) {
3080 m_pc = SLIST_FIRST(&free);
3081 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
3082 /* Recycle a freed page table page. */
3083 m_pc->wire_count = 1;
3085 pmap_free_zero_pages(&free);
3090 * free the pv_entry back to the free list
3093 free_pv_entry(pmap_t pmap, pv_entry_t pv)
3095 struct pv_chunk *pc;
3096 int idx, field, bit;
3098 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3099 PV_STAT(atomic_add_long(&pv_entry_frees, 1));
3100 PV_STAT(atomic_add_int(&pv_entry_spare, 1));
3101 PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
3102 pc = pv_to_chunk(pv);
3103 idx = pv - &pc->pc_pventry[0];
3106 pc->pc_map[field] |= 1ul << bit;
3107 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
3108 pc->pc_map[2] != PC_FREE2) {
3109 /* 98% of the time, pc is already at the head of the list. */
3110 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
3111 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3112 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3116 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3121 free_pv_chunk(struct pv_chunk *pc)
3125 mtx_lock(&pv_chunks_mutex);
3126 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
3127 mtx_unlock(&pv_chunks_mutex);
3128 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
3129 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
3130 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
3131 /* entire chunk is free, return it */
3132 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
3133 dump_drop_page(m->phys_addr);
3134 vm_page_unwire(m, PQ_NONE);
3139 * Returns a new PV entry, allocating a new PV chunk from the system when
3140 * needed. If this PV chunk allocation fails and a PV list lock pointer was
3141 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
3144 * The given PV list lock may be released.
3147 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
3151 struct pv_chunk *pc;
3154 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3155 PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
3157 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
3159 for (field = 0; field < _NPCM; field++) {
3160 if (pc->pc_map[field]) {
3161 bit = bsfq(pc->pc_map[field]);
3165 if (field < _NPCM) {
3166 pv = &pc->pc_pventry[field * 64 + bit];
3167 pc->pc_map[field] &= ~(1ul << bit);
3168 /* If this was the last item, move it to tail */
3169 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
3170 pc->pc_map[2] == 0) {
3171 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3172 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
3175 PV_STAT(atomic_add_long(&pv_entry_count, 1));
3176 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
3180 /* No free items, allocate another chunk */
3181 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
3184 if (lockp == NULL) {
3185 PV_STAT(pc_chunk_tryfail++);
3188 m = reclaim_pv_chunk(pmap, lockp);
3192 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
3193 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
3194 dump_add_page(m->phys_addr);
3195 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
3197 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
3198 pc->pc_map[1] = PC_FREE1;
3199 pc->pc_map[2] = PC_FREE2;
3200 mtx_lock(&pv_chunks_mutex);
3201 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
3202 mtx_unlock(&pv_chunks_mutex);
3203 pv = &pc->pc_pventry[0];
3204 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3205 PV_STAT(atomic_add_long(&pv_entry_count, 1));
3206 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
3211 * Returns the number of one bits within the given PV chunk map.
3213 * The erratas for Intel processors state that "POPCNT Instruction May
3214 * Take Longer to Execute Than Expected". It is believed that the
3215 * issue is the spurious dependency on the destination register.
3216 * Provide a hint to the register rename logic that the destination
3217 * value is overwritten, by clearing it, as suggested in the
3218 * optimization manual. It should be cheap for unaffected processors
3221 * Reference numbers for erratas are
3222 * 4th Gen Core: HSD146
3223 * 5th Gen Core: BDM85
3224 * 6th Gen Core: SKL029
3227 popcnt_pc_map_pq(uint64_t *map)
3231 __asm __volatile("xorl %k0,%k0;popcntq %2,%0;"
3232 "xorl %k1,%k1;popcntq %3,%1;addl %k1,%k0;"
3233 "xorl %k1,%k1;popcntq %4,%1;addl %k1,%k0"
3234 : "=&r" (result), "=&r" (tmp)
3235 : "m" (map[0]), "m" (map[1]), "m" (map[2]));
3240 * Ensure that the number of spare PV entries in the specified pmap meets or
3241 * exceeds the given count, "needed".
3243 * The given PV list lock may be released.
3246 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
3248 struct pch new_tail;
3249 struct pv_chunk *pc;
3253 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3254 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
3257 * Newly allocated PV chunks must be stored in a private list until
3258 * the required number of PV chunks have been allocated. Otherwise,
3259 * reclaim_pv_chunk() could recycle one of these chunks. In
3260 * contrast, these chunks must be added to the pmap upon allocation.
3262 TAILQ_INIT(&new_tail);
3265 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
3267 if ((cpu_feature2 & CPUID2_POPCNT) == 0)
3268 bit_count((bitstr_t *)pc->pc_map, 0,
3269 sizeof(pc->pc_map) * NBBY, &free);
3272 free = popcnt_pc_map_pq(pc->pc_map);
3276 if (avail >= needed)
3279 for (; avail < needed; avail += _NPCPV) {
3280 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
3283 m = reclaim_pv_chunk(pmap, lockp);
3287 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
3288 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
3289 dump_add_page(m->phys_addr);
3290 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
3292 pc->pc_map[0] = PC_FREE0;
3293 pc->pc_map[1] = PC_FREE1;
3294 pc->pc_map[2] = PC_FREE2;
3295 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3296 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
3297 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
3299 if (!TAILQ_EMPTY(&new_tail)) {
3300 mtx_lock(&pv_chunks_mutex);
3301 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
3302 mtx_unlock(&pv_chunks_mutex);
3307 * First find and then remove the pv entry for the specified pmap and virtual
3308 * address from the specified pv list. Returns the pv entry if found and NULL
3309 * otherwise. This operation can be performed on pv lists for either 4KB or
3310 * 2MB page mappings.
3312 static __inline pv_entry_t
3313 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3317 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3318 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
3319 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
3328 * After demotion from a 2MB page mapping to 512 4KB page mappings,
3329 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
3330 * entries for each of the 4KB page mappings.
3333 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
3334 struct rwlock **lockp)
3336 struct md_page *pvh;
3337 struct pv_chunk *pc;
3339 vm_offset_t va_last;
3343 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3344 KASSERT((pa & PDRMASK) == 0,
3345 ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
3346 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3349 * Transfer the 2mpage's pv entry for this mapping to the first
3350 * page's pv list. Once this transfer begins, the pv list lock
3351 * must not be released until the last pv entry is reinstantiated.
3353 pvh = pa_to_pvh(pa);
3354 va = trunc_2mpage(va);
3355 pv = pmap_pvh_remove(pvh, pmap, va);
3356 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
3357 m = PHYS_TO_VM_PAGE(pa);
3358 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3360 /* Instantiate the remaining NPTEPG - 1 pv entries. */
3361 PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
3362 va_last = va + NBPDR - PAGE_SIZE;
3364 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
3365 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
3366 pc->pc_map[2] != 0, ("pmap_pv_demote_pde: missing spare"));
3367 for (field = 0; field < _NPCM; field++) {
3368 while (pc->pc_map[field]) {
3369 bit = bsfq(pc->pc_map[field]);
3370 pc->pc_map[field] &= ~(1ul << bit);
3371 pv = &pc->pc_pventry[field * 64 + bit];
3375 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3376 ("pmap_pv_demote_pde: page %p is not managed", m));
3377 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3383 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3384 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
3387 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
3388 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3389 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
3391 PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
3392 PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
3395 #if VM_NRESERVLEVEL > 0
3397 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
3398 * replace the many pv entries for the 4KB page mappings by a single pv entry
3399 * for the 2MB page mapping.
3402 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
3403 struct rwlock **lockp)
3405 struct md_page *pvh;
3407 vm_offset_t va_last;
3410 KASSERT((pa & PDRMASK) == 0,
3411 ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
3412 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3415 * Transfer the first page's pv entry for this mapping to the 2mpage's
3416 * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
3417 * a transfer avoids the possibility that get_pv_entry() calls
3418 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
3419 * mappings that is being promoted.
3421 m = PHYS_TO_VM_PAGE(pa);
3422 va = trunc_2mpage(va);
3423 pv = pmap_pvh_remove(&m->md, pmap, va);
3424 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
3425 pvh = pa_to_pvh(pa);
3426 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3428 /* Free the remaining NPTEPG - 1 pv entries. */
3429 va_last = va + NBPDR - PAGE_SIZE;
3433 pmap_pvh_free(&m->md, pmap, va);
3434 } while (va < va_last);
3436 #endif /* VM_NRESERVLEVEL > 0 */
3439 * First find and then destroy the pv entry for the specified pmap and virtual
3440 * address. This operation can be performed on pv lists for either 4KB or 2MB
3444 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3448 pv = pmap_pvh_remove(pvh, pmap, va);
3449 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
3450 free_pv_entry(pmap, pv);
3454 * Conditionally create the PV entry for a 4KB page mapping if the required
3455 * memory can be allocated without resorting to reclamation.
3458 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
3459 struct rwlock **lockp)
3463 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3464 /* Pass NULL instead of the lock pointer to disable reclamation. */
3465 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
3467 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3468 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3476 * Create the PV entry for a 2MB page mapping. Always returns true unless the
3477 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
3478 * false if the PV entry cannot be allocated without resorting to reclamation.
3481 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags,
3482 struct rwlock **lockp)
3484 struct md_page *pvh;
3488 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3489 /* Pass NULL instead of the lock pointer to disable reclamation. */
3490 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
3491 NULL : lockp)) == NULL)
3494 pa = pde & PG_PS_FRAME;
3495 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3496 pvh = pa_to_pvh(pa);
3497 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3503 * Fills a page table page with mappings to consecutive physical pages.
3506 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
3510 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
3512 newpte += PAGE_SIZE;
3517 * Tries to demote a 2MB page mapping. If demotion fails, the 2MB page
3518 * mapping is invalidated.
3521 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
3523 struct rwlock *lock;
3527 rv = pmap_demote_pde_locked(pmap, pde, va, &lock);
3534 pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
3535 struct rwlock **lockp)
3537 pd_entry_t newpde, oldpde;
3538 pt_entry_t *firstpte, newpte;
3539 pt_entry_t PG_A, PG_G, PG_M, PG_RW, PG_V;
3542 struct spglist free;
3546 PG_G = pmap_global_bit(pmap);
3547 PG_A = pmap_accessed_bit(pmap);
3548 PG_M = pmap_modified_bit(pmap);
3549 PG_RW = pmap_rw_bit(pmap);
3550 PG_V = pmap_valid_bit(pmap);
3551 PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
3553 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3555 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
3556 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
3557 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
3559 KASSERT((oldpde & PG_W) == 0,
3560 ("pmap_demote_pde: page table page for a wired mapping"
3564 * Invalidate the 2MB page mapping and return "failure" if the
3565 * mapping was never accessed or the allocation of the new
3566 * page table page fails. If the 2MB page mapping belongs to
3567 * the direct map region of the kernel's address space, then
3568 * the page allocation request specifies the highest possible
3569 * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is
3570 * normal. Page table pages are preallocated for every other
3571 * part of the kernel address space, so the direct map region
3572 * is the only part of the kernel address space that must be
3575 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
3576 pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
3577 DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
3578 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
3580 sva = trunc_2mpage(va);
3581 pmap_remove_pde(pmap, pde, sva, &free, lockp);
3582 if ((oldpde & PG_G) == 0)
3583 pmap_invalidate_pde_page(pmap, sva, oldpde);
3584 pmap_free_zero_pages(&free);
3585 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
3586 " in pmap %p", va, pmap);
3589 if (va < VM_MAXUSER_ADDRESS)
3590 pmap_resident_count_inc(pmap, 1);
3592 mptepa = VM_PAGE_TO_PHYS(mpte);
3593 firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
3594 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
3595 KASSERT((oldpde & PG_A) != 0,
3596 ("pmap_demote_pde: oldpde is missing PG_A"));
3597 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
3598 ("pmap_demote_pde: oldpde is missing PG_M"));
3599 newpte = oldpde & ~PG_PS;
3600 newpte = pmap_swap_pat(pmap, newpte);
3603 * If the page table page is new, initialize it.
3605 if (mpte->wire_count == 1) {
3606 mpte->wire_count = NPTEPG;
3607 pmap_fill_ptp(firstpte, newpte);
3609 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
3610 ("pmap_demote_pde: firstpte and newpte map different physical"
3614 * If the mapping has changed attributes, update the page table
3617 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
3618 pmap_fill_ptp(firstpte, newpte);
3621 * The spare PV entries must be reserved prior to demoting the
3622 * mapping, that is, prior to changing the PDE. Otherwise, the state
3623 * of the PDE and the PV lists will be inconsistent, which can result
3624 * in reclaim_pv_chunk() attempting to remove a PV entry from the
3625 * wrong PV list and pmap_pv_demote_pde() failing to find the expected
3626 * PV entry for the 2MB page mapping that is being demoted.
3628 if ((oldpde & PG_MANAGED) != 0)
3629 reserve_pv_entries(pmap, NPTEPG - 1, lockp);
3632 * Demote the mapping. This pmap is locked. The old PDE has
3633 * PG_A set. If the old PDE has PG_RW set, it also has PG_M
3634 * set. Thus, there is no danger of a race with another
3635 * processor changing the setting of PG_A and/or PG_M between
3636 * the read above and the store below.
3638 if (workaround_erratum383)
3639 pmap_update_pde(pmap, va, pde, newpde);
3641 pde_store(pde, newpde);
3644 * Invalidate a stale recursive mapping of the page table page.
3646 if (va >= VM_MAXUSER_ADDRESS)
3647 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
3650 * Demote the PV entry.
3652 if ((oldpde & PG_MANAGED) != 0)
3653 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, lockp);
3655 atomic_add_long(&pmap_pde_demotions, 1);
3656 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx"
3657 " in pmap %p", va, pmap);
3662 * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
3665 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
3671 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
3672 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3673 mpte = pmap_remove_pt_page(pmap, va);
3675 panic("pmap_remove_kernel_pde: Missing pt page.");
3677 mptepa = VM_PAGE_TO_PHYS(mpte);
3678 newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V;
3681 * Initialize the page table page.
3683 pagezero((void *)PHYS_TO_DMAP(mptepa));
3686 * Demote the mapping.
3688 if (workaround_erratum383)
3689 pmap_update_pde(pmap, va, pde, newpde);
3691 pde_store(pde, newpde);
3694 * Invalidate a stale recursive mapping of the page table page.
3696 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
3700 * pmap_remove_pde: do the things to unmap a superpage in a process
3703 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
3704 struct spglist *free, struct rwlock **lockp)
3706 struct md_page *pvh;
3708 vm_offset_t eva, va;
3710 pt_entry_t PG_G, PG_A, PG_M, PG_RW;
3712 PG_G = pmap_global_bit(pmap);
3713 PG_A = pmap_accessed_bit(pmap);
3714 PG_M = pmap_modified_bit(pmap);
3715 PG_RW = pmap_rw_bit(pmap);
3717 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3718 KASSERT((sva & PDRMASK) == 0,
3719 ("pmap_remove_pde: sva is not 2mpage aligned"));
3720 oldpde = pte_load_clear(pdq);
3722 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
3723 if ((oldpde & PG_G) != 0)
3724 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
3725 pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
3726 if (oldpde & PG_MANAGED) {
3727 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
3728 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
3729 pmap_pvh_free(pvh, pmap, sva);
3731 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
3732 va < eva; va += PAGE_SIZE, m++) {
3733 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
3736 vm_page_aflag_set(m, PGA_REFERENCED);
3737 if (TAILQ_EMPTY(&m->md.pv_list) &&
3738 TAILQ_EMPTY(&pvh->pv_list))
3739 vm_page_aflag_clear(m, PGA_WRITEABLE);
3740 pmap_delayed_invl_page(m);
3743 if (pmap == kernel_pmap) {
3744 pmap_remove_kernel_pde(pmap, pdq, sva);
3746 mpte = pmap_remove_pt_page(pmap, sva);
3748 pmap_resident_count_dec(pmap, 1);
3749 KASSERT(mpte->wire_count == NPTEPG,
3750 ("pmap_remove_pde: pte page wire count error"));
3751 mpte->wire_count = 0;
3752 pmap_add_delayed_free_list(mpte, free, FALSE);
3755 return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
3759 * pmap_remove_pte: do the things to unmap a page in a process
3762 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
3763 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
3765 struct md_page *pvh;
3766 pt_entry_t oldpte, PG_A, PG_M, PG_RW;
3769 PG_A = pmap_accessed_bit(pmap);
3770 PG_M = pmap_modified_bit(pmap);
3771 PG_RW = pmap_rw_bit(pmap);
3773 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3774 oldpte = pte_load_clear(ptq);
3776 pmap->pm_stats.wired_count -= 1;
3777 pmap_resident_count_dec(pmap, 1);
3778 if (oldpte & PG_MANAGED) {
3779 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
3780 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3783 vm_page_aflag_set(m, PGA_REFERENCED);
3784 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3785 pmap_pvh_free(&m->md, pmap, va);
3786 if (TAILQ_EMPTY(&m->md.pv_list) &&
3787 (m->flags & PG_FICTITIOUS) == 0) {
3788 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3789 if (TAILQ_EMPTY(&pvh->pv_list))
3790 vm_page_aflag_clear(m, PGA_WRITEABLE);
3792 pmap_delayed_invl_page(m);
3794 return (pmap_unuse_pt(pmap, va, ptepde, free));
3798 * Remove a single page from a process address space
3801 pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
3802 struct spglist *free)
3804 struct rwlock *lock;
3805 pt_entry_t *pte, PG_V;
3807 PG_V = pmap_valid_bit(pmap);
3808 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3809 if ((*pde & PG_V) == 0)
3811 pte = pmap_pde_to_pte(pde, va);
3812 if ((*pte & PG_V) == 0)
3815 pmap_remove_pte(pmap, pte, va, *pde, free, &lock);
3818 pmap_invalidate_page(pmap, va);
3822 * Removes the specified range of addresses from the page table page.
3825 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
3826 pd_entry_t *pde, struct spglist *free, struct rwlock **lockp)
3828 pt_entry_t PG_G, *pte;
3832 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3833 PG_G = pmap_global_bit(pmap);
3836 for (pte = pmap_pde_to_pte(pde, sva); sva != eva; pte++,
3840 pmap_invalidate_range(pmap, va, sva);
3845 if ((*pte & PG_G) == 0)
3849 if (pmap_remove_pte(pmap, pte, sva, *pde, free, lockp)) {
3855 pmap_invalidate_range(pmap, va, sva);
3860 * Remove the given range of addresses from the specified map.
3862 * It is assumed that the start and end are properly
3863 * rounded to the page size.
3866 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3868 struct rwlock *lock;
3869 vm_offset_t va_next;
3870 pml4_entry_t *pml4e;
3872 pd_entry_t ptpaddr, *pde;
3873 pt_entry_t PG_G, PG_V;
3874 struct spglist free;
3877 PG_G = pmap_global_bit(pmap);
3878 PG_V = pmap_valid_bit(pmap);
3881 * Perform an unsynchronized read. This is, however, safe.
3883 if (pmap->pm_stats.resident_count == 0)
3889 pmap_delayed_invl_started();
3893 * special handling of removing one page. a very
3894 * common operation and easy to short circuit some
3897 if (sva + PAGE_SIZE == eva) {
3898 pde = pmap_pde(pmap, sva);
3899 if (pde && (*pde & PG_PS) == 0) {
3900 pmap_remove_page(pmap, sva, pde, &free);
3906 for (; sva < eva; sva = va_next) {
3908 if (pmap->pm_stats.resident_count == 0)
3911 pml4e = pmap_pml4e(pmap, sva);
3912 if ((*pml4e & PG_V) == 0) {
3913 va_next = (sva + NBPML4) & ~PML4MASK;
3919 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
3920 if ((*pdpe & PG_V) == 0) {
3921 va_next = (sva + NBPDP) & ~PDPMASK;
3928 * Calculate index for next page table.
3930 va_next = (sva + NBPDR) & ~PDRMASK;
3934 pde = pmap_pdpe_to_pde(pdpe, sva);
3938 * Weed out invalid mappings.
3944 * Check for large page.
3946 if ((ptpaddr & PG_PS) != 0) {
3948 * Are we removing the entire large page? If not,
3949 * demote the mapping and fall through.
3951 if (sva + NBPDR == va_next && eva >= va_next) {
3953 * The TLB entry for a PG_G mapping is
3954 * invalidated by pmap_remove_pde().
3956 if ((ptpaddr & PG_G) == 0)
3958 pmap_remove_pde(pmap, pde, sva, &free, &lock);
3960 } else if (!pmap_demote_pde_locked(pmap, pde, sva,
3962 /* The large page mapping was destroyed. */
3969 * Limit our scan to either the end of the va represented
3970 * by the current page table page, or to the end of the
3971 * range being removed.
3976 if (pmap_remove_ptes(pmap, sva, va_next, pde, &free, &lock))
3983 pmap_invalidate_all(pmap);
3985 pmap_delayed_invl_finished();
3986 pmap_free_zero_pages(&free);
3990 * Routine: pmap_remove_all
3992 * Removes this physical page from
3993 * all physical maps in which it resides.
3994 * Reflects back modify bits to the pager.
3997 * Original versions of this routine were very
3998 * inefficient because they iteratively called
3999 * pmap_remove (slow...)
4003 pmap_remove_all(vm_page_t m)
4005 struct md_page *pvh;
4008 struct rwlock *lock;
4009 pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW;
4012 struct spglist free;
4013 int pvh_gen, md_gen;
4015 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4016 ("pmap_remove_all: page %p is not managed", m));
4018 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4019 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
4020 pa_to_pvh(VM_PAGE_TO_PHYS(m));
4023 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
4025 if (!PMAP_TRYLOCK(pmap)) {
4026 pvh_gen = pvh->pv_gen;
4030 if (pvh_gen != pvh->pv_gen) {
4037 pde = pmap_pde(pmap, va);
4038 (void)pmap_demote_pde_locked(pmap, pde, va, &lock);
4041 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
4043 if (!PMAP_TRYLOCK(pmap)) {
4044 pvh_gen = pvh->pv_gen;
4045 md_gen = m->md.pv_gen;
4049 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4055 PG_A = pmap_accessed_bit(pmap);
4056 PG_M = pmap_modified_bit(pmap);
4057 PG_RW = pmap_rw_bit(pmap);
4058 pmap_resident_count_dec(pmap, 1);
4059 pde = pmap_pde(pmap, pv->pv_va);
4060 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
4061 " a 2mpage in page %p's pv list", m));
4062 pte = pmap_pde_to_pte(pde, pv->pv_va);
4063 tpte = pte_load_clear(pte);
4065 pmap->pm_stats.wired_count--;
4067 vm_page_aflag_set(m, PGA_REFERENCED);
4070 * Update the vm_page_t clean and reference bits.
4072 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
4074 pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
4075 pmap_invalidate_page(pmap, pv->pv_va);
4076 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4078 free_pv_entry(pmap, pv);
4081 vm_page_aflag_clear(m, PGA_WRITEABLE);
4083 pmap_delayed_invl_wait(m);
4084 pmap_free_zero_pages(&free);
4088 * pmap_protect_pde: do the things to protect a 2mpage in a process
4091 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
4093 pd_entry_t newpde, oldpde;
4094 vm_offset_t eva, va;
4096 boolean_t anychanged;
4097 pt_entry_t PG_G, PG_M, PG_RW;
4099 PG_G = pmap_global_bit(pmap);
4100 PG_M = pmap_modified_bit(pmap);
4101 PG_RW = pmap_rw_bit(pmap);
4103 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4104 KASSERT((sva & PDRMASK) == 0,
4105 ("pmap_protect_pde: sva is not 2mpage aligned"));
4108 oldpde = newpde = *pde;
4109 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
4110 (PG_MANAGED | PG_M | PG_RW)) {
4112 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
4113 va < eva; va += PAGE_SIZE, m++)
4116 if ((prot & VM_PROT_WRITE) == 0)
4117 newpde &= ~(PG_RW | PG_M);
4118 if ((prot & VM_PROT_EXECUTE) == 0)
4120 if (newpde != oldpde) {
4122 * As an optimization to future operations on this PDE, clear
4123 * PG_PROMOTED. The impending invalidation will remove any
4124 * lingering 4KB page mappings from the TLB.
4126 if (!atomic_cmpset_long(pde, oldpde, newpde & ~PG_PROMOTED))
4128 if ((oldpde & PG_G) != 0)
4129 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
4133 return (anychanged);
4137 * Set the physical protection on the
4138 * specified range of this map as requested.
4141 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
4143 vm_offset_t va_next;
4144 pml4_entry_t *pml4e;
4146 pd_entry_t ptpaddr, *pde;
4147 pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V;
4148 boolean_t anychanged;
4150 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4151 if (prot == VM_PROT_NONE) {
4152 pmap_remove(pmap, sva, eva);
4156 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
4157 (VM_PROT_WRITE|VM_PROT_EXECUTE))
4160 PG_G = pmap_global_bit(pmap);
4161 PG_M = pmap_modified_bit(pmap);
4162 PG_V = pmap_valid_bit(pmap);
4163 PG_RW = pmap_rw_bit(pmap);
4167 * Although this function delays and batches the invalidation
4168 * of stale TLB entries, it does not need to call
4169 * pmap_delayed_invl_started() and
4170 * pmap_delayed_invl_finished(), because it does not
4171 * ordinarily destroy mappings. Stale TLB entries from
4172 * protection-only changes need only be invalidated before the
4173 * pmap lock is released, because protection-only changes do
4174 * not destroy PV entries. Even operations that iterate over
4175 * a physical page's PV list of mappings, like
4176 * pmap_remove_write(), acquire the pmap lock for each
4177 * mapping. Consequently, for protection-only changes, the
4178 * pmap lock suffices to synchronize both page table and TLB
4181 * This function only destroys a mapping if pmap_demote_pde()
4182 * fails. In that case, stale TLB entries are immediately
4187 for (; sva < eva; sva = va_next) {
4189 pml4e = pmap_pml4e(pmap, sva);
4190 if ((*pml4e & PG_V) == 0) {
4191 va_next = (sva + NBPML4) & ~PML4MASK;
4197 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
4198 if ((*pdpe & PG_V) == 0) {
4199 va_next = (sva + NBPDP) & ~PDPMASK;
4205 va_next = (sva + NBPDR) & ~PDRMASK;
4209 pde = pmap_pdpe_to_pde(pdpe, sva);
4213 * Weed out invalid mappings.
4219 * Check for large page.
4221 if ((ptpaddr & PG_PS) != 0) {
4223 * Are we protecting the entire large page? If not,
4224 * demote the mapping and fall through.
4226 if (sva + NBPDR == va_next && eva >= va_next) {
4228 * The TLB entry for a PG_G mapping is
4229 * invalidated by pmap_protect_pde().
4231 if (pmap_protect_pde(pmap, pde, sva, prot))
4234 } else if (!pmap_demote_pde(pmap, pde, sva)) {
4236 * The large page mapping was destroyed.
4245 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
4247 pt_entry_t obits, pbits;
4251 obits = pbits = *pte;
4252 if ((pbits & PG_V) == 0)
4255 if ((prot & VM_PROT_WRITE) == 0) {
4256 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
4257 (PG_MANAGED | PG_M | PG_RW)) {
4258 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
4261 pbits &= ~(PG_RW | PG_M);
4263 if ((prot & VM_PROT_EXECUTE) == 0)
4266 if (pbits != obits) {
4267 if (!atomic_cmpset_long(pte, obits, pbits))
4270 pmap_invalidate_page(pmap, sva);
4277 pmap_invalidate_all(pmap);
4281 #if VM_NRESERVLEVEL > 0
4283 * Tries to promote the 512, contiguous 4KB page mappings that are within a
4284 * single page table page (PTP) to a single 2MB page mapping. For promotion
4285 * to occur, two conditions must be met: (1) the 4KB page mappings must map
4286 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
4287 * identical characteristics.
4290 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
4291 struct rwlock **lockp)
4294 pt_entry_t *firstpte, oldpte, pa, *pte;
4295 pt_entry_t PG_G, PG_A, PG_M, PG_RW, PG_V;
4299 PG_A = pmap_accessed_bit(pmap);
4300 PG_G = pmap_global_bit(pmap);
4301 PG_M = pmap_modified_bit(pmap);
4302 PG_V = pmap_valid_bit(pmap);
4303 PG_RW = pmap_rw_bit(pmap);
4304 PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
4306 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4309 * Examine the first PTE in the specified PTP. Abort if this PTE is
4310 * either invalid, unused, or does not map the first 4KB physical page
4311 * within a 2MB page.
4313 firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
4316 if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
4317 atomic_add_long(&pmap_pde_p_failures, 1);
4318 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
4319 " in pmap %p", va, pmap);
4322 if ((newpde & (PG_M | PG_RW)) == PG_RW) {
4324 * When PG_M is already clear, PG_RW can be cleared without
4325 * a TLB invalidation.
4327 if (!atomic_cmpset_long(firstpte, newpde, newpde & ~PG_RW))
4333 * Examine each of the other PTEs in the specified PTP. Abort if this
4334 * PTE maps an unexpected 4KB physical page or does not have identical
4335 * characteristics to the first PTE.
4337 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
4338 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
4341 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
4342 atomic_add_long(&pmap_pde_p_failures, 1);
4343 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
4344 " in pmap %p", va, pmap);
4347 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
4349 * When PG_M is already clear, PG_RW can be cleared
4350 * without a TLB invalidation.
4352 if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW))
4355 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
4356 " in pmap %p", (oldpte & PG_FRAME & PDRMASK) |
4357 (va & ~PDRMASK), pmap);
4359 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
4360 atomic_add_long(&pmap_pde_p_failures, 1);
4361 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
4362 " in pmap %p", va, pmap);
4369 * Save the page table page in its current state until the PDE
4370 * mapping the superpage is demoted by pmap_demote_pde() or
4371 * destroyed by pmap_remove_pde().
4373 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
4374 KASSERT(mpte >= vm_page_array &&
4375 mpte < &vm_page_array[vm_page_array_size],
4376 ("pmap_promote_pde: page table page is out of range"));
4377 KASSERT(mpte->pindex == pmap_pde_pindex(va),
4378 ("pmap_promote_pde: page table page's pindex is wrong"));
4379 if (pmap_insert_pt_page(pmap, mpte)) {
4380 atomic_add_long(&pmap_pde_p_failures, 1);
4382 "pmap_promote_pde: failure for va %#lx in pmap %p", va,
4388 * Promote the pv entries.
4390 if ((newpde & PG_MANAGED) != 0)
4391 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME, lockp);
4394 * Propagate the PAT index to its proper position.
4396 newpde = pmap_swap_pat(pmap, newpde);
4399 * Map the superpage.
4401 if (workaround_erratum383)
4402 pmap_update_pde(pmap, va, pde, PG_PS | newpde);
4404 pde_store(pde, PG_PROMOTED | PG_PS | newpde);
4406 atomic_add_long(&pmap_pde_promotions, 1);
4407 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
4408 " in pmap %p", va, pmap);
4410 #endif /* VM_NRESERVLEVEL > 0 */
4413 * Insert the given physical page (p) at
4414 * the specified virtual address (v) in the
4415 * target physical map with the protection requested.
4417 * If specified, the page will be wired down, meaning
4418 * that the related pte can not be reclaimed.
4420 * NB: This is the only routine which MAY NOT lazy-evaluate
4421 * or lose information. That is, this routine must actually
4422 * insert this page into the given map NOW.
4424 * When destroying both a page table and PV entry, this function
4425 * performs the TLB invalidation before releasing the PV list
4426 * lock, so we do not need pmap_delayed_invl_page() calls here.
4429 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
4430 u_int flags, int8_t psind)
4432 struct rwlock *lock;
4434 pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V;
4435 pt_entry_t newpte, origpte;
4442 PG_A = pmap_accessed_bit(pmap);
4443 PG_G = pmap_global_bit(pmap);
4444 PG_M = pmap_modified_bit(pmap);
4445 PG_V = pmap_valid_bit(pmap);
4446 PG_RW = pmap_rw_bit(pmap);
4448 va = trunc_page(va);
4449 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
4450 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
4451 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
4453 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
4454 va >= kmi.clean_eva,
4455 ("pmap_enter: managed mapping within the clean submap"));
4456 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
4457 VM_OBJECT_ASSERT_LOCKED(m->object);
4458 KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
4459 ("pmap_enter: flags %u has reserved bits set", flags));
4460 pa = VM_PAGE_TO_PHYS(m);
4461 newpte = (pt_entry_t)(pa | PG_A | PG_V);
4462 if ((flags & VM_PROT_WRITE) != 0)
4464 if ((prot & VM_PROT_WRITE) != 0)
4466 KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
4467 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
4468 if ((prot & VM_PROT_EXECUTE) == 0)
4470 if ((flags & PMAP_ENTER_WIRED) != 0)
4472 if (va < VM_MAXUSER_ADDRESS)
4474 if (pmap == kernel_pmap)
4476 newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0);
4479 * Set modified bit gratuitously for writeable mappings if
4480 * the page is unmanaged. We do not want to take a fault
4481 * to do the dirty bit accounting for these mappings.
4483 if ((m->oflags & VPO_UNMANAGED) != 0) {
4484 if ((newpte & PG_RW) != 0)
4487 newpte |= PG_MANAGED;
4492 /* Assert the required virtual and physical alignment. */
4493 KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned"));
4494 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
4495 rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m, &lock);
4501 * In the case that a page table page is not
4502 * resident, we are creating it here.
4505 pde = pmap_pde(pmap, va);
4506 if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 ||
4507 pmap_demote_pde_locked(pmap, pde, va, &lock))) {
4508 pte = pmap_pde_to_pte(pde, va);
4509 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
4510 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
4513 } else if (va < VM_MAXUSER_ADDRESS) {
4515 * Here if the pte page isn't mapped, or if it has been
4518 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
4519 mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va),
4520 nosleep ? NULL : &lock);
4521 if (mpte == NULL && nosleep) {
4522 rv = KERN_RESOURCE_SHORTAGE;
4527 panic("pmap_enter: invalid page directory va=%#lx", va);
4532 * Is the specified virtual address already mapped?
4534 if ((origpte & PG_V) != 0) {
4536 * Wiring change, just update stats. We don't worry about
4537 * wiring PT pages as they remain resident as long as there
4538 * are valid mappings in them. Hence, if a user page is wired,
4539 * the PT page will be also.
4541 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
4542 pmap->pm_stats.wired_count++;
4543 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
4544 pmap->pm_stats.wired_count--;
4547 * Remove the extra PT page reference.
4551 KASSERT(mpte->wire_count > 0,
4552 ("pmap_enter: missing reference to page table page,"
4557 * Has the physical page changed?
4559 opa = origpte & PG_FRAME;
4562 * No, might be a protection or wiring change.
4564 if ((origpte & PG_MANAGED) != 0 &&
4565 (newpte & PG_RW) != 0)
4566 vm_page_aflag_set(m, PGA_WRITEABLE);
4567 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
4573 * Increment the counters.
4575 if ((newpte & PG_W) != 0)
4576 pmap->pm_stats.wired_count++;
4577 pmap_resident_count_inc(pmap, 1);
4581 * Enter on the PV list if part of our managed memory.
4583 if ((newpte & PG_MANAGED) != 0) {
4584 pv = get_pv_entry(pmap, &lock);
4586 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
4587 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4589 if ((newpte & PG_RW) != 0)
4590 vm_page_aflag_set(m, PGA_WRITEABLE);
4596 if ((origpte & PG_V) != 0) {
4598 origpte = pte_load_store(pte, newpte);
4599 opa = origpte & PG_FRAME;
4601 if ((origpte & PG_MANAGED) != 0) {
4602 om = PHYS_TO_VM_PAGE(opa);
4603 if ((origpte & (PG_M | PG_RW)) == (PG_M |
4606 if ((origpte & PG_A) != 0)
4607 vm_page_aflag_set(om, PGA_REFERENCED);
4608 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
4609 pmap_pvh_free(&om->md, pmap, va);
4610 if ((om->aflags & PGA_WRITEABLE) != 0 &&
4611 TAILQ_EMPTY(&om->md.pv_list) &&
4612 ((om->flags & PG_FICTITIOUS) != 0 ||
4613 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
4614 vm_page_aflag_clear(om, PGA_WRITEABLE);
4616 } else if ((newpte & PG_M) == 0 && (origpte & (PG_M |
4617 PG_RW)) == (PG_M | PG_RW)) {
4618 if ((origpte & PG_MANAGED) != 0)
4622 * Although the PTE may still have PG_RW set, TLB
4623 * invalidation may nonetheless be required because
4624 * the PTE no longer has PG_M set.
4626 } else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
4628 * This PTE change does not require TLB invalidation.
4632 if ((origpte & PG_A) != 0)
4633 pmap_invalidate_page(pmap, va);
4635 pte_store(pte, newpte);
4639 #if VM_NRESERVLEVEL > 0
4641 * If both the page table page and the reservation are fully
4642 * populated, then attempt promotion.
4644 if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
4645 pmap_ps_enabled(pmap) &&
4646 (m->flags & PG_FICTITIOUS) == 0 &&
4647 vm_reserv_level_iffullpop(m) == 0)
4648 pmap_promote_pde(pmap, pde, va, &lock);
4660 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
4661 * if successful. Returns false if (1) a page table page cannot be allocated
4662 * without sleeping, (2) a mapping already exists at the specified virtual
4663 * address, or (3) a PV entry cannot be allocated without reclaiming another
4667 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
4668 struct rwlock **lockp)
4673 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4674 PG_V = pmap_valid_bit(pmap);
4675 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) |
4677 if ((m->oflags & VPO_UNMANAGED) == 0)
4678 newpde |= PG_MANAGED;
4679 if ((prot & VM_PROT_EXECUTE) == 0)
4681 if (va < VM_MAXUSER_ADDRESS)
4683 return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
4684 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
4689 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
4690 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
4691 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
4692 * a mapping already exists at the specified virtual address. Returns
4693 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
4694 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
4695 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
4697 * The parameter "m" is only used when creating a managed, writeable mapping.
4700 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
4701 vm_page_t m, struct rwlock **lockp)
4703 struct spglist free;
4704 pd_entry_t oldpde, *pde;
4705 pt_entry_t PG_G, PG_RW, PG_V;
4708 PG_G = pmap_global_bit(pmap);
4709 PG_RW = pmap_rw_bit(pmap);
4710 KASSERT((newpde & (pmap_modified_bit(pmap) | PG_RW)) != PG_RW,
4711 ("pmap_enter_pde: newpde is missing PG_M"));
4712 PG_V = pmap_valid_bit(pmap);
4713 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4715 if ((pdpg = pmap_allocpde(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
4716 NULL : lockp)) == NULL) {
4717 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
4718 " in pmap %p", va, pmap);
4719 return (KERN_RESOURCE_SHORTAGE);
4721 pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
4722 pde = &pde[pmap_pde_index(va)];
4724 if ((oldpde & PG_V) != 0) {
4725 KASSERT(pdpg->wire_count > 1,
4726 ("pmap_enter_pde: pdpg's wire count is too low"));
4727 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
4729 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
4730 " in pmap %p", va, pmap);
4731 return (KERN_FAILURE);
4733 /* Break the existing mapping(s). */
4735 if ((oldpde & PG_PS) != 0) {
4737 * The reference to the PD page that was acquired by
4738 * pmap_allocpde() ensures that it won't be freed.
4739 * However, if the PDE resulted from a promotion, then
4740 * a reserved PT page could be freed.
4742 (void)pmap_remove_pde(pmap, pde, va, &free, lockp);
4743 if ((oldpde & PG_G) == 0)
4744 pmap_invalidate_pde_page(pmap, va, oldpde);
4746 pmap_delayed_invl_started();
4747 if (pmap_remove_ptes(pmap, va, va + NBPDR, pde, &free,
4749 pmap_invalidate_all(pmap);
4750 pmap_delayed_invl_finished();
4752 pmap_free_zero_pages(&free);
4753 if (va >= VM_MAXUSER_ADDRESS) {
4754 mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
4755 if (pmap_insert_pt_page(pmap, mt)) {
4757 * XXX Currently, this can't happen because
4758 * we do not perform pmap_enter(psind == 1)
4759 * on the kernel pmap.
4761 panic("pmap_enter_pde: trie insert failed");
4764 KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p",
4767 if ((newpde & PG_MANAGED) != 0) {
4769 * Abort this mapping if its PV entry could not be created.
4771 if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) {
4773 if (pmap_unwire_ptp(pmap, va, pdpg, &free)) {
4775 * Although "va" is not mapped, paging-
4776 * structure caches could nonetheless have
4777 * entries that refer to the freed page table
4778 * pages. Invalidate those entries.
4780 pmap_invalidate_page(pmap, va);
4781 pmap_free_zero_pages(&free);
4783 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
4784 " in pmap %p", va, pmap);
4785 return (KERN_RESOURCE_SHORTAGE);
4787 if ((newpde & PG_RW) != 0) {
4788 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
4789 vm_page_aflag_set(mt, PGA_WRITEABLE);
4794 * Increment counters.
4796 if ((newpde & PG_W) != 0)
4797 pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE;
4798 pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
4801 * Map the superpage. (This is not a promoted mapping; there will not
4802 * be any lingering 4KB page mappings in the TLB.)
4804 pde_store(pde, newpde);
4806 atomic_add_long(&pmap_pde_mappings, 1);
4807 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
4808 " in pmap %p", va, pmap);
4809 return (KERN_SUCCESS);
4813 * Maps a sequence of resident pages belonging to the same object.
4814 * The sequence begins with the given page m_start. This page is
4815 * mapped at the given virtual address start. Each subsequent page is
4816 * mapped at a virtual address that is offset from start by the same
4817 * amount as the page is offset from m_start within the object. The
4818 * last page in the sequence is the page with the largest offset from
4819 * m_start that can be mapped at a virtual address less than the given
4820 * virtual address end. Not every virtual page between start and end
4821 * is mapped; only those for which a resident page exists with the
4822 * corresponding offset from m_start are mapped.
4825 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
4826 vm_page_t m_start, vm_prot_t prot)
4828 struct rwlock *lock;
4831 vm_pindex_t diff, psize;
4833 VM_OBJECT_ASSERT_LOCKED(m_start->object);
4835 psize = atop(end - start);
4840 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
4841 va = start + ptoa(diff);
4842 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
4843 m->psind == 1 && pmap_ps_enabled(pmap) &&
4844 pmap_enter_2mpage(pmap, va, m, prot, &lock))
4845 m = &m[NBPDR / PAGE_SIZE - 1];
4847 mpte = pmap_enter_quick_locked(pmap, va, m, prot,
4849 m = TAILQ_NEXT(m, listq);
4857 * this code makes some *MAJOR* assumptions:
4858 * 1. Current pmap & pmap exists.
4861 * 4. No page table pages.
4862 * but is *MUCH* faster than pmap_enter...
4866 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
4868 struct rwlock *lock;
4872 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
4879 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
4880 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
4882 struct spglist free;
4883 pt_entry_t *pte, PG_V;
4886 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
4887 (m->oflags & VPO_UNMANAGED) != 0,
4888 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
4889 PG_V = pmap_valid_bit(pmap);
4890 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4893 * In the case that a page table page is not
4894 * resident, we are creating it here.
4896 if (va < VM_MAXUSER_ADDRESS) {
4897 vm_pindex_t ptepindex;
4901 * Calculate pagetable page index
4903 ptepindex = pmap_pde_pindex(va);
4904 if (mpte && (mpte->pindex == ptepindex)) {
4908 * Get the page directory entry
4910 ptepa = pmap_pde(pmap, va);
4913 * If the page table page is mapped, we just increment
4914 * the hold count, and activate it. Otherwise, we
4915 * attempt to allocate a page table page. If this
4916 * attempt fails, we don't retry. Instead, we give up.
4918 if (ptepa && (*ptepa & PG_V) != 0) {
4921 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
4925 * Pass NULL instead of the PV list lock
4926 * pointer, because we don't intend to sleep.
4928 mpte = _pmap_allocpte(pmap, ptepindex, NULL);
4933 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
4934 pte = &pte[pmap_pte_index(va)];
4948 * Enter on the PV list if part of our managed memory.
4950 if ((m->oflags & VPO_UNMANAGED) == 0 &&
4951 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
4954 if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
4956 * Although "va" is not mapped, paging-
4957 * structure caches could nonetheless have
4958 * entries that refer to the freed page table
4959 * pages. Invalidate those entries.
4961 pmap_invalidate_page(pmap, va);
4962 pmap_free_zero_pages(&free);
4970 * Increment counters
4972 pmap_resident_count_inc(pmap, 1);
4974 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 0);
4975 if ((prot & VM_PROT_EXECUTE) == 0)
4979 * Now validate mapping with RO protection
4981 if ((m->oflags & VPO_UNMANAGED) != 0)
4982 pte_store(pte, pa | PG_V | PG_U);
4984 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
4989 * Make a temporary mapping for a physical address. This is only intended
4990 * to be used for panic dumps.
4993 pmap_kenter_temporary(vm_paddr_t pa, int i)
4997 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
4998 pmap_kenter(va, pa);
5000 return ((void *)crashdumpmap);
5004 * This code maps large physical mmap regions into the
5005 * processor address space. Note that some shortcuts
5006 * are taken, but the code works.
5009 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
5010 vm_pindex_t pindex, vm_size_t size)
5013 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
5014 vm_paddr_t pa, ptepa;
5018 PG_A = pmap_accessed_bit(pmap);
5019 PG_M = pmap_modified_bit(pmap);
5020 PG_V = pmap_valid_bit(pmap);
5021 PG_RW = pmap_rw_bit(pmap);
5023 VM_OBJECT_ASSERT_WLOCKED(object);
5024 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
5025 ("pmap_object_init_pt: non-device object"));
5026 if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
5027 if (!pmap_ps_enabled(pmap))
5029 if (!vm_object_populate(object, pindex, pindex + atop(size)))
5031 p = vm_page_lookup(object, pindex);
5032 KASSERT(p->valid == VM_PAGE_BITS_ALL,
5033 ("pmap_object_init_pt: invalid page %p", p));
5034 pat_mode = p->md.pat_mode;
5037 * Abort the mapping if the first page is not physically
5038 * aligned to a 2MB page boundary.
5040 ptepa = VM_PAGE_TO_PHYS(p);
5041 if (ptepa & (NBPDR - 1))
5045 * Skip the first page. Abort the mapping if the rest of
5046 * the pages are not physically contiguous or have differing
5047 * memory attributes.
5049 p = TAILQ_NEXT(p, listq);
5050 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
5052 KASSERT(p->valid == VM_PAGE_BITS_ALL,
5053 ("pmap_object_init_pt: invalid page %p", p));
5054 if (pa != VM_PAGE_TO_PHYS(p) ||
5055 pat_mode != p->md.pat_mode)
5057 p = TAILQ_NEXT(p, listq);
5061 * Map using 2MB pages. Since "ptepa" is 2M aligned and
5062 * "size" is a multiple of 2M, adding the PAT setting to "pa"
5063 * will not affect the termination of this loop.
5066 for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1);
5067 pa < ptepa + size; pa += NBPDR) {
5068 pdpg = pmap_allocpde(pmap, addr, NULL);
5071 * The creation of mappings below is only an
5072 * optimization. If a page directory page
5073 * cannot be allocated without blocking,
5074 * continue on to the next mapping rather than
5080 pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
5081 pde = &pde[pmap_pde_index(addr)];
5082 if ((*pde & PG_V) == 0) {
5083 pde_store(pde, pa | PG_PS | PG_M | PG_A |
5084 PG_U | PG_RW | PG_V);
5085 pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
5086 atomic_add_long(&pmap_pde_mappings, 1);
5088 /* Continue on if the PDE is already valid. */
5090 KASSERT(pdpg->wire_count > 0,
5091 ("pmap_object_init_pt: missing reference "
5092 "to page directory page, va: 0x%lx", addr));
5101 * Clear the wired attribute from the mappings for the specified range of
5102 * addresses in the given pmap. Every valid mapping within that range
5103 * must have the wired attribute set. In contrast, invalid mappings
5104 * cannot have the wired attribute set, so they are ignored.
5106 * The wired attribute of the page table entry is not a hardware
5107 * feature, so there is no need to invalidate any TLB entries.
5108 * Since pmap_demote_pde() for the wired entry must never fail,
5109 * pmap_delayed_invl_started()/finished() calls around the
5110 * function are not needed.
5113 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5115 vm_offset_t va_next;
5116 pml4_entry_t *pml4e;
5119 pt_entry_t *pte, PG_V;
5121 PG_V = pmap_valid_bit(pmap);
5123 for (; sva < eva; sva = va_next) {
5124 pml4e = pmap_pml4e(pmap, sva);
5125 if ((*pml4e & PG_V) == 0) {
5126 va_next = (sva + NBPML4) & ~PML4MASK;
5131 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
5132 if ((*pdpe & PG_V) == 0) {
5133 va_next = (sva + NBPDP) & ~PDPMASK;
5138 va_next = (sva + NBPDR) & ~PDRMASK;
5141 pde = pmap_pdpe_to_pde(pdpe, sva);
5142 if ((*pde & PG_V) == 0)
5144 if ((*pde & PG_PS) != 0) {
5145 if ((*pde & PG_W) == 0)
5146 panic("pmap_unwire: pde %#jx is missing PG_W",
5150 * Are we unwiring the entire large page? If not,
5151 * demote the mapping and fall through.
5153 if (sva + NBPDR == va_next && eva >= va_next) {
5154 atomic_clear_long(pde, PG_W);
5155 pmap->pm_stats.wired_count -= NBPDR /
5158 } else if (!pmap_demote_pde(pmap, pde, sva))
5159 panic("pmap_unwire: demotion failed");
5163 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
5165 if ((*pte & PG_V) == 0)
5167 if ((*pte & PG_W) == 0)
5168 panic("pmap_unwire: pte %#jx is missing PG_W",
5172 * PG_W must be cleared atomically. Although the pmap
5173 * lock synchronizes access to PG_W, another processor
5174 * could be setting PG_M and/or PG_A concurrently.
5176 atomic_clear_long(pte, PG_W);
5177 pmap->pm_stats.wired_count--;
5184 * Copy the range specified by src_addr/len
5185 * from the source map to the range dst_addr/len
5186 * in the destination map.
5188 * This routine is only advisory and need not do anything.
5192 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
5193 vm_offset_t src_addr)
5195 struct rwlock *lock;
5196 struct spglist free;
5198 vm_offset_t end_addr = src_addr + len;
5199 vm_offset_t va_next;
5200 vm_page_t dst_pdpg, dstmpte, srcmpte;
5201 pt_entry_t PG_A, PG_M, PG_V;
5203 if (dst_addr != src_addr)
5206 if (dst_pmap->pm_type != src_pmap->pm_type)
5210 * EPT page table entries that require emulation of A/D bits are
5211 * sensitive to clearing the PG_A bit (aka EPT_PG_READ). Although
5212 * we clear PG_M (aka EPT_PG_WRITE) concomitantly, the PG_U bit
5213 * (aka EPT_PG_EXECUTE) could still be set. Since some EPT
5214 * implementations flag an EPT misconfiguration for exec-only
5215 * mappings we skip this function entirely for emulated pmaps.
5217 if (pmap_emulate_ad_bits(dst_pmap))
5221 if (dst_pmap < src_pmap) {
5222 PMAP_LOCK(dst_pmap);
5223 PMAP_LOCK(src_pmap);
5225 PMAP_LOCK(src_pmap);
5226 PMAP_LOCK(dst_pmap);
5229 PG_A = pmap_accessed_bit(dst_pmap);
5230 PG_M = pmap_modified_bit(dst_pmap);
5231 PG_V = pmap_valid_bit(dst_pmap);
5233 for (addr = src_addr; addr < end_addr; addr = va_next) {
5234 pt_entry_t *src_pte, *dst_pte;
5235 pml4_entry_t *pml4e;
5237 pd_entry_t srcptepaddr, *pde;
5239 KASSERT(addr < UPT_MIN_ADDRESS,
5240 ("pmap_copy: invalid to pmap_copy page tables"));
5242 pml4e = pmap_pml4e(src_pmap, addr);
5243 if ((*pml4e & PG_V) == 0) {
5244 va_next = (addr + NBPML4) & ~PML4MASK;
5250 pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
5251 if ((*pdpe & PG_V) == 0) {
5252 va_next = (addr + NBPDP) & ~PDPMASK;
5258 va_next = (addr + NBPDR) & ~PDRMASK;
5262 pde = pmap_pdpe_to_pde(pdpe, addr);
5264 if (srcptepaddr == 0)
5267 if (srcptepaddr & PG_PS) {
5268 if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
5270 dst_pdpg = pmap_allocpde(dst_pmap, addr, NULL);
5271 if (dst_pdpg == NULL)
5273 pde = (pd_entry_t *)
5274 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg));
5275 pde = &pde[pmap_pde_index(addr)];
5276 if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
5277 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
5278 PMAP_ENTER_NORECLAIM, &lock))) {
5279 *pde = srcptepaddr & ~PG_W;
5280 pmap_resident_count_inc(dst_pmap, NBPDR / PAGE_SIZE);
5281 atomic_add_long(&pmap_pde_mappings, 1);
5283 dst_pdpg->wire_count--;
5287 srcptepaddr &= PG_FRAME;
5288 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
5289 KASSERT(srcmpte->wire_count > 0,
5290 ("pmap_copy: source page table page is unused"));
5292 if (va_next > end_addr)
5295 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
5296 src_pte = &src_pte[pmap_pte_index(addr)];
5298 while (addr < va_next) {
5302 * we only virtual copy managed pages
5304 if ((ptetemp & PG_MANAGED) != 0) {
5305 if (dstmpte != NULL &&
5306 dstmpte->pindex == pmap_pde_pindex(addr))
5307 dstmpte->wire_count++;
5308 else if ((dstmpte = pmap_allocpte(dst_pmap,
5309 addr, NULL)) == NULL)
5311 dst_pte = (pt_entry_t *)
5312 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
5313 dst_pte = &dst_pte[pmap_pte_index(addr)];
5314 if (*dst_pte == 0 &&
5315 pmap_try_insert_pv_entry(dst_pmap, addr,
5316 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME),
5319 * Clear the wired, modified, and
5320 * accessed (referenced) bits
5323 *dst_pte = ptetemp & ~(PG_W | PG_M |
5325 pmap_resident_count_inc(dst_pmap, 1);
5328 if (pmap_unwire_ptp(dst_pmap, addr,
5331 * Although "addr" is not
5332 * mapped, paging-structure
5333 * caches could nonetheless
5334 * have entries that refer to
5335 * the freed page table pages.
5336 * Invalidate those entries.
5338 pmap_invalidate_page(dst_pmap,
5340 pmap_free_zero_pages(&free);
5344 if (dstmpte->wire_count >= srcmpte->wire_count)
5354 PMAP_UNLOCK(src_pmap);
5355 PMAP_UNLOCK(dst_pmap);
5359 * Zero the specified hardware page.
5362 pmap_zero_page(vm_page_t m)
5364 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5366 pagezero((void *)va);
5370 * Zero an an area within a single hardware page. off and size must not
5371 * cover an area beyond a single hardware page.
5374 pmap_zero_page_area(vm_page_t m, int off, int size)
5376 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5378 if (off == 0 && size == PAGE_SIZE)
5379 pagezero((void *)va);
5381 bzero((char *)va + off, size);
5385 * Copy 1 specified hardware page to another.
5388 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
5390 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
5391 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
5393 pagecopy((void *)src, (void *)dst);
5396 int unmapped_buf_allowed = 1;
5399 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
5400 vm_offset_t b_offset, int xfersize)
5404 vm_offset_t vaddr[2], a_pg_offset, b_pg_offset;
5408 while (xfersize > 0) {
5409 a_pg_offset = a_offset & PAGE_MASK;
5410 pages[0] = ma[a_offset >> PAGE_SHIFT];
5411 b_pg_offset = b_offset & PAGE_MASK;
5412 pages[1] = mb[b_offset >> PAGE_SHIFT];
5413 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
5414 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
5415 mapped = pmap_map_io_transient(pages, vaddr, 2, FALSE);
5416 a_cp = (char *)vaddr[0] + a_pg_offset;
5417 b_cp = (char *)vaddr[1] + b_pg_offset;
5418 bcopy(a_cp, b_cp, cnt);
5419 if (__predict_false(mapped))
5420 pmap_unmap_io_transient(pages, vaddr, 2, FALSE);
5428 * Returns true if the pmap's pv is one of the first
5429 * 16 pvs linked to from this page. This count may
5430 * be changed upwards or downwards in the future; it
5431 * is only necessary that true be returned for a small
5432 * subset of pmaps for proper page aging.
5435 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
5437 struct md_page *pvh;
5438 struct rwlock *lock;
5443 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5444 ("pmap_page_exists_quick: page %p is not managed", m));
5446 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5448 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5449 if (PV_PMAP(pv) == pmap) {
5457 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
5458 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5459 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5460 if (PV_PMAP(pv) == pmap) {
5474 * pmap_page_wired_mappings:
5476 * Return the number of managed mappings to the given physical page
5480 pmap_page_wired_mappings(vm_page_t m)
5482 struct rwlock *lock;
5483 struct md_page *pvh;
5487 int count, md_gen, pvh_gen;
5489 if ((m->oflags & VPO_UNMANAGED) != 0)
5491 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5495 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5497 if (!PMAP_TRYLOCK(pmap)) {
5498 md_gen = m->md.pv_gen;
5502 if (md_gen != m->md.pv_gen) {
5507 pte = pmap_pte(pmap, pv->pv_va);
5508 if ((*pte & PG_W) != 0)
5512 if ((m->flags & PG_FICTITIOUS) == 0) {
5513 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5514 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5516 if (!PMAP_TRYLOCK(pmap)) {
5517 md_gen = m->md.pv_gen;
5518 pvh_gen = pvh->pv_gen;
5522 if (md_gen != m->md.pv_gen ||
5523 pvh_gen != pvh->pv_gen) {
5528 pte = pmap_pde(pmap, pv->pv_va);
5529 if ((*pte & PG_W) != 0)
5539 * Returns TRUE if the given page is mapped individually or as part of
5540 * a 2mpage. Otherwise, returns FALSE.
5543 pmap_page_is_mapped(vm_page_t m)
5545 struct rwlock *lock;
5548 if ((m->oflags & VPO_UNMANAGED) != 0)
5550 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5552 rv = !TAILQ_EMPTY(&m->md.pv_list) ||
5553 ((m->flags & PG_FICTITIOUS) == 0 &&
5554 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
5560 * Destroy all managed, non-wired mappings in the given user-space
5561 * pmap. This pmap cannot be active on any processor besides the
5564 * This function cannot be applied to the kernel pmap. Moreover, it
5565 * is not intended for general use. It is only to be used during
5566 * process termination. Consequently, it can be implemented in ways
5567 * that make it faster than pmap_remove(). First, it can more quickly
5568 * destroy mappings by iterating over the pmap's collection of PV
5569 * entries, rather than searching the page table. Second, it doesn't
5570 * have to test and clear the page table entries atomically, because
5571 * no processor is currently accessing the user address space. In
5572 * particular, a page table entry's dirty bit won't change state once
5573 * this function starts.
5575 * Although this function destroys all of the pmap's managed,
5576 * non-wired mappings, it can delay and batch the invalidation of TLB
5577 * entries without calling pmap_delayed_invl_started() and
5578 * pmap_delayed_invl_finished(). Because the pmap is not active on
5579 * any other processor, none of these TLB entries will ever be used
5580 * before their eventual invalidation. Consequently, there is no need
5581 * for either pmap_remove_all() or pmap_remove_write() to wait for
5582 * that eventual TLB invalidation.
5585 pmap_remove_pages(pmap_t pmap)
5588 pt_entry_t *pte, tpte;
5589 pt_entry_t PG_M, PG_RW, PG_V;
5590 struct spglist free;
5591 vm_page_t m, mpte, mt;
5593 struct md_page *pvh;
5594 struct pv_chunk *pc, *npc;
5595 struct rwlock *lock;
5597 uint64_t inuse, bitmask;
5598 int allfree, field, freed, idx;
5599 boolean_t superpage;
5603 * Assert that the given pmap is only active on the current
5604 * CPU. Unfortunately, we cannot block another CPU from
5605 * activating the pmap while this function is executing.
5607 KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
5610 cpuset_t other_cpus;
5612 other_cpus = all_cpus;
5614 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
5615 CPU_AND(&other_cpus, &pmap->pm_active);
5617 KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
5622 PG_M = pmap_modified_bit(pmap);
5623 PG_V = pmap_valid_bit(pmap);
5624 PG_RW = pmap_rw_bit(pmap);
5628 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
5631 for (field = 0; field < _NPCM; field++) {
5632 inuse = ~pc->pc_map[field] & pc_freemask[field];
5633 while (inuse != 0) {
5635 bitmask = 1UL << bit;
5636 idx = field * 64 + bit;
5637 pv = &pc->pc_pventry[idx];
5640 pte = pmap_pdpe(pmap, pv->pv_va);
5642 pte = pmap_pdpe_to_pde(pte, pv->pv_va);
5644 if ((tpte & (PG_PS | PG_V)) == PG_V) {
5647 pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
5649 pte = &pte[pmap_pte_index(pv->pv_va)];
5653 * Keep track whether 'tpte' is a
5654 * superpage explicitly instead of
5655 * relying on PG_PS being set.
5657 * This is because PG_PS is numerically
5658 * identical to PG_PTE_PAT and thus a
5659 * regular page could be mistaken for
5665 if ((tpte & PG_V) == 0) {
5666 panic("bad pte va %lx pte %lx",
5671 * We cannot remove wired pages from a process' mapping at this time
5679 pa = tpte & PG_PS_FRAME;
5681 pa = tpte & PG_FRAME;
5683 m = PHYS_TO_VM_PAGE(pa);
5684 KASSERT(m->phys_addr == pa,
5685 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
5686 m, (uintmax_t)m->phys_addr,
5689 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
5690 m < &vm_page_array[vm_page_array_size],
5691 ("pmap_remove_pages: bad tpte %#jx",
5697 * Update the vm_page_t clean/reference bits.
5699 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5701 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
5707 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
5710 pc->pc_map[field] |= bitmask;
5712 pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
5713 pvh = pa_to_pvh(tpte & PG_PS_FRAME);
5714 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5716 if (TAILQ_EMPTY(&pvh->pv_list)) {
5717 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
5718 if ((mt->aflags & PGA_WRITEABLE) != 0 &&
5719 TAILQ_EMPTY(&mt->md.pv_list))
5720 vm_page_aflag_clear(mt, PGA_WRITEABLE);
5722 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
5724 pmap_resident_count_dec(pmap, 1);
5725 KASSERT(mpte->wire_count == NPTEPG,
5726 ("pmap_remove_pages: pte page wire count error"));
5727 mpte->wire_count = 0;
5728 pmap_add_delayed_free_list(mpte, &free, FALSE);
5731 pmap_resident_count_dec(pmap, 1);
5732 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5734 if ((m->aflags & PGA_WRITEABLE) != 0 &&
5735 TAILQ_EMPTY(&m->md.pv_list) &&
5736 (m->flags & PG_FICTITIOUS) == 0) {
5737 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5738 if (TAILQ_EMPTY(&pvh->pv_list))
5739 vm_page_aflag_clear(m, PGA_WRITEABLE);
5742 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
5746 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
5747 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
5748 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
5750 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5756 pmap_invalidate_all(pmap);
5758 pmap_free_zero_pages(&free);
5762 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
5764 struct rwlock *lock;
5766 struct md_page *pvh;
5767 pt_entry_t *pte, mask;
5768 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
5770 int md_gen, pvh_gen;
5774 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5777 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5779 if (!PMAP_TRYLOCK(pmap)) {
5780 md_gen = m->md.pv_gen;
5784 if (md_gen != m->md.pv_gen) {
5789 pte = pmap_pte(pmap, pv->pv_va);
5792 PG_M = pmap_modified_bit(pmap);
5793 PG_RW = pmap_rw_bit(pmap);
5794 mask |= PG_RW | PG_M;
5797 PG_A = pmap_accessed_bit(pmap);
5798 PG_V = pmap_valid_bit(pmap);
5799 mask |= PG_V | PG_A;
5801 rv = (*pte & mask) == mask;
5806 if ((m->flags & PG_FICTITIOUS) == 0) {
5807 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5808 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5810 if (!PMAP_TRYLOCK(pmap)) {
5811 md_gen = m->md.pv_gen;
5812 pvh_gen = pvh->pv_gen;
5816 if (md_gen != m->md.pv_gen ||
5817 pvh_gen != pvh->pv_gen) {
5822 pte = pmap_pde(pmap, pv->pv_va);
5825 PG_M = pmap_modified_bit(pmap);
5826 PG_RW = pmap_rw_bit(pmap);
5827 mask |= PG_RW | PG_M;
5830 PG_A = pmap_accessed_bit(pmap);
5831 PG_V = pmap_valid_bit(pmap);
5832 mask |= PG_V | PG_A;
5834 rv = (*pte & mask) == mask;
5848 * Return whether or not the specified physical page was modified
5849 * in any physical maps.
5852 pmap_is_modified(vm_page_t m)
5855 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5856 ("pmap_is_modified: page %p is not managed", m));
5859 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
5860 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
5861 * is clear, no PTEs can have PG_M set.
5863 VM_OBJECT_ASSERT_WLOCKED(m->object);
5864 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
5866 return (pmap_page_test_mappings(m, FALSE, TRUE));
5870 * pmap_is_prefaultable:
5872 * Return whether or not the specified virtual address is eligible
5876 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
5879 pt_entry_t *pte, PG_V;
5882 PG_V = pmap_valid_bit(pmap);
5885 pde = pmap_pde(pmap, addr);
5886 if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
5887 pte = pmap_pde_to_pte(pde, addr);
5888 rv = (*pte & PG_V) == 0;
5895 * pmap_is_referenced:
5897 * Return whether or not the specified physical page was referenced
5898 * in any physical maps.
5901 pmap_is_referenced(vm_page_t m)
5904 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5905 ("pmap_is_referenced: page %p is not managed", m));
5906 return (pmap_page_test_mappings(m, TRUE, FALSE));
5910 * Clear the write and modified bits in each of the given page's mappings.
5913 pmap_remove_write(vm_page_t m)
5915 struct md_page *pvh;
5917 struct rwlock *lock;
5918 pv_entry_t next_pv, pv;
5920 pt_entry_t oldpte, *pte, PG_M, PG_RW;
5922 int pvh_gen, md_gen;
5924 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5925 ("pmap_remove_write: page %p is not managed", m));
5928 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
5929 * set by another thread while the object is locked. Thus,
5930 * if PGA_WRITEABLE is clear, no page table entries need updating.
5932 VM_OBJECT_ASSERT_WLOCKED(m->object);
5933 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
5935 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5936 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5937 pa_to_pvh(VM_PAGE_TO_PHYS(m));
5940 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5942 if (!PMAP_TRYLOCK(pmap)) {
5943 pvh_gen = pvh->pv_gen;
5947 if (pvh_gen != pvh->pv_gen) {
5953 PG_RW = pmap_rw_bit(pmap);
5955 pde = pmap_pde(pmap, va);
5956 if ((*pde & PG_RW) != 0)
5957 (void)pmap_demote_pde_locked(pmap, pde, va, &lock);
5958 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
5959 ("inconsistent pv lock %p %p for page %p",
5960 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
5963 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5965 if (!PMAP_TRYLOCK(pmap)) {
5966 pvh_gen = pvh->pv_gen;
5967 md_gen = m->md.pv_gen;
5971 if (pvh_gen != pvh->pv_gen ||
5972 md_gen != m->md.pv_gen) {
5978 PG_M = pmap_modified_bit(pmap);
5979 PG_RW = pmap_rw_bit(pmap);
5980 pde = pmap_pde(pmap, pv->pv_va);
5981 KASSERT((*pde & PG_PS) == 0,
5982 ("pmap_remove_write: found a 2mpage in page %p's pv list",
5984 pte = pmap_pde_to_pte(pde, pv->pv_va);
5987 if (oldpte & PG_RW) {
5988 if (!atomic_cmpset_long(pte, oldpte, oldpte &
5991 if ((oldpte & PG_M) != 0)
5993 pmap_invalidate_page(pmap, pv->pv_va);
5998 vm_page_aflag_clear(m, PGA_WRITEABLE);
5999 pmap_delayed_invl_wait(m);
6002 static __inline boolean_t
6003 safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
6006 if (!pmap_emulate_ad_bits(pmap))
6009 KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type));
6012 * XWR = 010 or 110 will cause an unconditional EPT misconfiguration
6013 * so we don't let the referenced (aka EPT_PG_READ) bit to be cleared
6014 * if the EPT_PG_WRITE bit is set.
6016 if ((pte & EPT_PG_WRITE) != 0)
6020 * XWR = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set.
6022 if ((pte & EPT_PG_EXECUTE) == 0 ||
6023 ((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0))
6030 * pmap_ts_referenced:
6032 * Return a count of reference bits for a page, clearing those bits.
6033 * It is not necessary for every reference bit to be cleared, but it
6034 * is necessary that 0 only be returned when there are truly no
6035 * reference bits set.
6037 * As an optimization, update the page's dirty field if a modified bit is
6038 * found while counting reference bits. This opportunistic update can be
6039 * performed at low cost and can eliminate the need for some future calls
6040 * to pmap_is_modified(). However, since this function stops after
6041 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
6042 * dirty pages. Those dirty pages will only be detected by a future call
6043 * to pmap_is_modified().
6045 * A DI block is not needed within this function, because
6046 * invalidations are performed before the PV list lock is
6050 pmap_ts_referenced(vm_page_t m)
6052 struct md_page *pvh;
6055 struct rwlock *lock;
6056 pd_entry_t oldpde, *pde;
6057 pt_entry_t *pte, PG_A, PG_M, PG_RW;
6060 int cleared, md_gen, not_cleared, pvh_gen;
6061 struct spglist free;
6064 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
6065 ("pmap_ts_referenced: page %p is not managed", m));
6068 pa = VM_PAGE_TO_PHYS(m);
6069 lock = PHYS_TO_PV_LIST_LOCK(pa);
6070 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
6074 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
6075 goto small_mappings;
6081 if (!PMAP_TRYLOCK(pmap)) {
6082 pvh_gen = pvh->pv_gen;
6086 if (pvh_gen != pvh->pv_gen) {
6091 PG_A = pmap_accessed_bit(pmap);
6092 PG_M = pmap_modified_bit(pmap);
6093 PG_RW = pmap_rw_bit(pmap);
6095 pde = pmap_pde(pmap, pv->pv_va);
6097 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
6099 * Although "oldpde" is mapping a 2MB page, because
6100 * this function is called at a 4KB page granularity,
6101 * we only update the 4KB page under test.
6105 if ((oldpde & PG_A) != 0) {
6107 * Since this reference bit is shared by 512 4KB
6108 * pages, it should not be cleared every time it is
6109 * tested. Apply a simple "hash" function on the
6110 * physical page number, the virtual superpage number,
6111 * and the pmap address to select one 4KB page out of
6112 * the 512 on which testing the reference bit will
6113 * result in clearing that reference bit. This
6114 * function is designed to avoid the selection of the
6115 * same 4KB page for every 2MB page mapping.
6117 * On demotion, a mapping that hasn't been referenced
6118 * is simply destroyed. To avoid the possibility of a
6119 * subsequent page fault on a demoted wired mapping,
6120 * always leave its reference bit set. Moreover,
6121 * since the superpage is wired, the current state of
6122 * its reference bit won't affect page replacement.
6124 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
6125 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
6126 (oldpde & PG_W) == 0) {
6127 if (safe_to_clear_referenced(pmap, oldpde)) {
6128 atomic_clear_long(pde, PG_A);
6129 pmap_invalidate_page(pmap, pv->pv_va);
6131 } else if (pmap_demote_pde_locked(pmap, pde,
6132 pv->pv_va, &lock)) {
6134 * Remove the mapping to a single page
6135 * so that a subsequent access may
6136 * repromote. Since the underlying
6137 * page table page is fully populated,
6138 * this removal never frees a page
6142 va += VM_PAGE_TO_PHYS(m) - (oldpde &
6144 pte = pmap_pde_to_pte(pde, va);
6145 pmap_remove_pte(pmap, pte, va, *pde,
6147 pmap_invalidate_page(pmap, va);
6153 * The superpage mapping was removed
6154 * entirely and therefore 'pv' is no
6162 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
6163 ("inconsistent pv lock %p %p for page %p",
6164 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
6169 /* Rotate the PV list if it has more than one entry. */
6170 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
6171 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
6172 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
6175 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
6177 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
6179 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
6186 if (!PMAP_TRYLOCK(pmap)) {
6187 pvh_gen = pvh->pv_gen;
6188 md_gen = m->md.pv_gen;
6192 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
6197 PG_A = pmap_accessed_bit(pmap);
6198 PG_M = pmap_modified_bit(pmap);
6199 PG_RW = pmap_rw_bit(pmap);
6200 pde = pmap_pde(pmap, pv->pv_va);
6201 KASSERT((*pde & PG_PS) == 0,
6202 ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
6204 pte = pmap_pde_to_pte(pde, pv->pv_va);
6205 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
6207 if ((*pte & PG_A) != 0) {
6208 if (safe_to_clear_referenced(pmap, *pte)) {
6209 atomic_clear_long(pte, PG_A);
6210 pmap_invalidate_page(pmap, pv->pv_va);
6212 } else if ((*pte & PG_W) == 0) {
6214 * Wired pages cannot be paged out so
6215 * doing accessed bit emulation for
6216 * them is wasted effort. We do the
6217 * hard work for unwired pages only.
6219 pmap_remove_pte(pmap, pte, pv->pv_va,
6220 *pde, &free, &lock);
6221 pmap_invalidate_page(pmap, pv->pv_va);
6226 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
6227 ("inconsistent pv lock %p %p for page %p",
6228 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
6233 /* Rotate the PV list if it has more than one entry. */
6234 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
6235 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
6236 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
6239 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
6240 not_cleared < PMAP_TS_REFERENCED_MAX);
6243 pmap_free_zero_pages(&free);
6244 return (cleared + not_cleared);
6248 * Apply the given advice to the specified range of addresses within the
6249 * given pmap. Depending on the advice, clear the referenced and/or
6250 * modified flags in each mapping and set the mapped page's dirty field.
6253 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
6255 struct rwlock *lock;
6256 pml4_entry_t *pml4e;
6258 pd_entry_t oldpde, *pde;
6259 pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V;
6260 vm_offset_t va, va_next;
6262 boolean_t anychanged;
6264 if (advice != MADV_DONTNEED && advice != MADV_FREE)
6268 * A/D bit emulation requires an alternate code path when clearing
6269 * the modified and accessed bits below. Since this function is
6270 * advisory in nature we skip it entirely for pmaps that require
6271 * A/D bit emulation.
6273 if (pmap_emulate_ad_bits(pmap))
6276 PG_A = pmap_accessed_bit(pmap);
6277 PG_G = pmap_global_bit(pmap);
6278 PG_M = pmap_modified_bit(pmap);
6279 PG_V = pmap_valid_bit(pmap);
6280 PG_RW = pmap_rw_bit(pmap);
6282 pmap_delayed_invl_started();
6284 for (; sva < eva; sva = va_next) {
6285 pml4e = pmap_pml4e(pmap, sva);
6286 if ((*pml4e & PG_V) == 0) {
6287 va_next = (sva + NBPML4) & ~PML4MASK;
6292 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
6293 if ((*pdpe & PG_V) == 0) {
6294 va_next = (sva + NBPDP) & ~PDPMASK;
6299 va_next = (sva + NBPDR) & ~PDRMASK;
6302 pde = pmap_pdpe_to_pde(pdpe, sva);
6304 if ((oldpde & PG_V) == 0)
6306 else if ((oldpde & PG_PS) != 0) {
6307 if ((oldpde & PG_MANAGED) == 0)
6310 if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) {
6315 * The large page mapping was destroyed.
6321 * Unless the page mappings are wired, remove the
6322 * mapping to a single page so that a subsequent
6323 * access may repromote. Since the underlying page
6324 * table page is fully populated, this removal never
6325 * frees a page table page.
6327 if ((oldpde & PG_W) == 0) {
6328 pte = pmap_pde_to_pte(pde, sva);
6329 KASSERT((*pte & PG_V) != 0,
6330 ("pmap_advise: invalid PTE"));
6331 pmap_remove_pte(pmap, pte, sva, *pde, NULL,
6341 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
6343 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
6345 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
6346 if (advice == MADV_DONTNEED) {
6348 * Future calls to pmap_is_modified()
6349 * can be avoided by making the page
6352 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
6355 atomic_clear_long(pte, PG_M | PG_A);
6356 } else if ((*pte & PG_A) != 0)
6357 atomic_clear_long(pte, PG_A);
6361 if ((*pte & PG_G) != 0) {
6368 if (va != va_next) {
6369 pmap_invalidate_range(pmap, va, sva);
6374 pmap_invalidate_range(pmap, va, sva);
6377 pmap_invalidate_all(pmap);
6379 pmap_delayed_invl_finished();
6383 * Clear the modify bits on the specified physical page.
6386 pmap_clear_modify(vm_page_t m)
6388 struct md_page *pvh;
6390 pv_entry_t next_pv, pv;
6391 pd_entry_t oldpde, *pde;
6392 pt_entry_t oldpte, *pte, PG_M, PG_RW, PG_V;
6393 struct rwlock *lock;
6395 int md_gen, pvh_gen;
6397 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
6398 ("pmap_clear_modify: page %p is not managed", m));
6399 VM_OBJECT_ASSERT_WLOCKED(m->object);
6400 KASSERT(!vm_page_xbusied(m),
6401 ("pmap_clear_modify: page %p is exclusive busied", m));
6404 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
6405 * If the object containing the page is locked and the page is not
6406 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
6408 if ((m->aflags & PGA_WRITEABLE) == 0)
6410 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
6411 pa_to_pvh(VM_PAGE_TO_PHYS(m));
6412 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
6415 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
6417 if (!PMAP_TRYLOCK(pmap)) {
6418 pvh_gen = pvh->pv_gen;
6422 if (pvh_gen != pvh->pv_gen) {
6427 PG_M = pmap_modified_bit(pmap);
6428 PG_V = pmap_valid_bit(pmap);
6429 PG_RW = pmap_rw_bit(pmap);
6431 pde = pmap_pde(pmap, va);
6433 if ((oldpde & PG_RW) != 0) {
6434 if (pmap_demote_pde_locked(pmap, pde, va, &lock)) {
6435 if ((oldpde & PG_W) == 0) {
6437 * Write protect the mapping to a
6438 * single page so that a subsequent
6439 * write access may repromote.
6441 va += VM_PAGE_TO_PHYS(m) - (oldpde &
6443 pte = pmap_pde_to_pte(pde, va);
6445 if ((oldpte & PG_V) != 0) {
6446 while (!atomic_cmpset_long(pte,
6448 oldpte & ~(PG_M | PG_RW)))
6451 pmap_invalidate_page(pmap, va);
6458 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
6460 if (!PMAP_TRYLOCK(pmap)) {
6461 md_gen = m->md.pv_gen;
6462 pvh_gen = pvh->pv_gen;
6466 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
6471 PG_M = pmap_modified_bit(pmap);
6472 PG_RW = pmap_rw_bit(pmap);
6473 pde = pmap_pde(pmap, pv->pv_va);
6474 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
6475 " a 2mpage in page %p's pv list", m));
6476 pte = pmap_pde_to_pte(pde, pv->pv_va);
6477 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
6478 atomic_clear_long(pte, PG_M);
6479 pmap_invalidate_page(pmap, pv->pv_va);
6487 * Miscellaneous support routines follow
6490 /* Adjust the cache mode for a 4KB page mapped via a PTE. */
6491 static __inline void
6492 pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask)
6497 * The cache mode bits are all in the low 32-bits of the
6498 * PTE, so we can just spin on updating the low 32-bits.
6501 opte = *(u_int *)pte;
6502 npte = opte & ~mask;
6504 } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
6507 /* Adjust the cache mode for a 2MB page mapped via a PDE. */
6508 static __inline void
6509 pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask)
6514 * The cache mode bits are all in the low 32-bits of the
6515 * PDE, so we can just spin on updating the low 32-bits.
6518 opde = *(u_int *)pde;
6519 npde = opde & ~mask;
6521 } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
6525 * Map a set of physical memory pages into the kernel virtual
6526 * address space. Return a pointer to where it is mapped. This
6527 * routine is intended to be used for mapping device memory,
6531 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
6533 struct pmap_preinit_mapping *ppim;
6534 vm_offset_t va, offset;
6538 offset = pa & PAGE_MASK;
6539 size = round_page(offset + size);
6540 pa = trunc_page(pa);
6542 if (!pmap_initialized) {
6544 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
6545 ppim = pmap_preinit_mapping + i;
6546 if (ppim->va == 0) {
6550 ppim->va = virtual_avail;
6551 virtual_avail += size;
6557 panic("%s: too many preinit mappings", __func__);
6560 * If we have a preinit mapping, re-use it.
6562 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
6563 ppim = pmap_preinit_mapping + i;
6564 if (ppim->pa == pa && ppim->sz == size &&
6566 return ((void *)(ppim->va + offset));
6569 * If the specified range of physical addresses fits within
6570 * the direct map window, use the direct map.
6572 if (pa < dmaplimit && pa + size < dmaplimit) {
6573 va = PHYS_TO_DMAP(pa);
6574 if (!pmap_change_attr(va, size, mode))
6575 return ((void *)(va + offset));
6577 va = kva_alloc(size);
6579 panic("%s: Couldn't allocate KVA", __func__);
6581 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
6582 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
6583 pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
6584 pmap_invalidate_cache_range(va, va + tmpsize, FALSE);
6585 return ((void *)(va + offset));
6589 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
6592 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
6596 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
6599 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
6603 pmap_unmapdev(vm_offset_t va, vm_size_t size)
6605 struct pmap_preinit_mapping *ppim;
6609 /* If we gave a direct map region in pmap_mapdev, do nothing */
6610 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
6612 offset = va & PAGE_MASK;
6613 size = round_page(offset + size);
6614 va = trunc_page(va);
6615 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
6616 ppim = pmap_preinit_mapping + i;
6617 if (ppim->va == va && ppim->sz == size) {
6618 if (pmap_initialized)
6624 if (va + size == virtual_avail)
6629 if (pmap_initialized)
6634 * Tries to demote a 1GB page mapping.
6637 pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
6639 pdp_entry_t newpdpe, oldpdpe;
6640 pd_entry_t *firstpde, newpde, *pde;
6641 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
6645 PG_A = pmap_accessed_bit(pmap);
6646 PG_M = pmap_modified_bit(pmap);
6647 PG_V = pmap_valid_bit(pmap);
6648 PG_RW = pmap_rw_bit(pmap);
6650 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6652 KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
6653 ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
6654 if ((pdpg = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT |
6655 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
6656 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
6657 " in pmap %p", va, pmap);
6660 pdpgpa = VM_PAGE_TO_PHYS(pdpg);
6661 firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa);
6662 newpdpe = pdpgpa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
6663 KASSERT((oldpdpe & PG_A) != 0,
6664 ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
6665 KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
6666 ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
6670 * Initialize the page directory page.
6672 for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
6678 * Demote the mapping.
6683 * Invalidate a stale recursive mapping of the page directory page.
6685 pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va));
6687 pmap_pdpe_demotions++;
6688 CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
6689 " in pmap %p", va, pmap);
6694 * Sets the memory attribute for the specified page.
6697 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
6700 m->md.pat_mode = ma;
6703 * If "m" is a normal page, update its direct mapping. This update
6704 * can be relied upon to perform any cache operations that are
6705 * required for data coherence.
6707 if ((m->flags & PG_FICTITIOUS) == 0 &&
6708 pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
6710 panic("memory attribute change on the direct map failed");
6714 * Changes the specified virtual address range's memory type to that given by
6715 * the parameter "mode". The specified virtual address range must be
6716 * completely contained within either the direct map or the kernel map. If
6717 * the virtual address range is contained within the kernel map, then the
6718 * memory type for each of the corresponding ranges of the direct map is also
6719 * changed. (The corresponding ranges of the direct map are those ranges that
6720 * map the same physical pages as the specified virtual address range.) These
6721 * changes to the direct map are necessary because Intel describes the
6722 * behavior of their processors as "undefined" if two or more mappings to the
6723 * same physical page have different memory types.
6725 * Returns zero if the change completed successfully, and either EINVAL or
6726 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
6727 * of the virtual address range was not mapped, and ENOMEM is returned if
6728 * there was insufficient memory available to complete the change. In the
6729 * latter case, the memory type may have been changed on some part of the
6730 * virtual address range or the direct map.
6733 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
6737 PMAP_LOCK(kernel_pmap);
6738 error = pmap_change_attr_locked(va, size, mode);
6739 PMAP_UNLOCK(kernel_pmap);
6744 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
6746 vm_offset_t base, offset, tmpva;
6747 vm_paddr_t pa_start, pa_end, pa_end1;
6751 int cache_bits_pte, cache_bits_pde, error;
6754 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
6755 base = trunc_page(va);
6756 offset = va & PAGE_MASK;
6757 size = round_page(offset + size);
6760 * Only supported on kernel virtual addresses, including the direct
6761 * map but excluding the recursive map.
6763 if (base < DMAP_MIN_ADDRESS)
6766 cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1);
6767 cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0);
6771 * Pages that aren't mapped aren't supported. Also break down 2MB pages
6772 * into 4KB pages if required.
6774 for (tmpva = base; tmpva < base + size; ) {
6775 pdpe = pmap_pdpe(kernel_pmap, tmpva);
6776 if (pdpe == NULL || *pdpe == 0)
6778 if (*pdpe & PG_PS) {
6780 * If the current 1GB page already has the required
6781 * memory type, then we need not demote this page. Just
6782 * increment tmpva to the next 1GB page frame.
6784 if ((*pdpe & X86_PG_PDE_CACHE) == cache_bits_pde) {
6785 tmpva = trunc_1gpage(tmpva) + NBPDP;
6790 * If the current offset aligns with a 1GB page frame
6791 * and there is at least 1GB left within the range, then
6792 * we need not break down this page into 2MB pages.
6794 if ((tmpva & PDPMASK) == 0 &&
6795 tmpva + PDPMASK < base + size) {
6799 if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva))
6802 pde = pmap_pdpe_to_pde(pdpe, tmpva);
6807 * If the current 2MB page already has the required
6808 * memory type, then we need not demote this page. Just
6809 * increment tmpva to the next 2MB page frame.
6811 if ((*pde & X86_PG_PDE_CACHE) == cache_bits_pde) {
6812 tmpva = trunc_2mpage(tmpva) + NBPDR;
6817 * If the current offset aligns with a 2MB page frame
6818 * and there is at least 2MB left within the range, then
6819 * we need not break down this page into 4KB pages.
6821 if ((tmpva & PDRMASK) == 0 &&
6822 tmpva + PDRMASK < base + size) {
6826 if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
6829 pte = pmap_pde_to_pte(pde, tmpva);
6837 * Ok, all the pages exist, so run through them updating their
6838 * cache mode if required.
6840 pa_start = pa_end = 0;
6841 for (tmpva = base; tmpva < base + size; ) {
6842 pdpe = pmap_pdpe(kernel_pmap, tmpva);
6843 if (*pdpe & PG_PS) {
6844 if ((*pdpe & X86_PG_PDE_CACHE) != cache_bits_pde) {
6845 pmap_pde_attr(pdpe, cache_bits_pde,
6849 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6850 (*pdpe & PG_PS_FRAME) < dmaplimit) {
6851 if (pa_start == pa_end) {
6852 /* Start physical address run. */
6853 pa_start = *pdpe & PG_PS_FRAME;
6854 pa_end = pa_start + NBPDP;
6855 } else if (pa_end == (*pdpe & PG_PS_FRAME))
6858 /* Run ended, update direct map. */
6859 error = pmap_change_attr_locked(
6860 PHYS_TO_DMAP(pa_start),
6861 pa_end - pa_start, mode);
6864 /* Start physical address run. */
6865 pa_start = *pdpe & PG_PS_FRAME;
6866 pa_end = pa_start + NBPDP;
6869 tmpva = trunc_1gpage(tmpva) + NBPDP;
6872 pde = pmap_pdpe_to_pde(pdpe, tmpva);
6874 if ((*pde & X86_PG_PDE_CACHE) != cache_bits_pde) {
6875 pmap_pde_attr(pde, cache_bits_pde,
6879 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6880 (*pde & PG_PS_FRAME) < dmaplimit) {
6881 if (pa_start == pa_end) {
6882 /* Start physical address run. */
6883 pa_start = *pde & PG_PS_FRAME;
6884 pa_end = pa_start + NBPDR;
6885 } else if (pa_end == (*pde & PG_PS_FRAME))
6888 /* Run ended, update direct map. */
6889 error = pmap_change_attr_locked(
6890 PHYS_TO_DMAP(pa_start),
6891 pa_end - pa_start, mode);
6894 /* Start physical address run. */
6895 pa_start = *pde & PG_PS_FRAME;
6896 pa_end = pa_start + NBPDR;
6899 tmpva = trunc_2mpage(tmpva) + NBPDR;
6901 pte = pmap_pde_to_pte(pde, tmpva);
6902 if ((*pte & X86_PG_PTE_CACHE) != cache_bits_pte) {
6903 pmap_pte_attr(pte, cache_bits_pte,
6907 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6908 (*pte & PG_FRAME) < dmaplimit) {
6909 if (pa_start == pa_end) {
6910 /* Start physical address run. */
6911 pa_start = *pte & PG_FRAME;
6912 pa_end = pa_start + PAGE_SIZE;
6913 } else if (pa_end == (*pte & PG_FRAME))
6914 pa_end += PAGE_SIZE;
6916 /* Run ended, update direct map. */
6917 error = pmap_change_attr_locked(
6918 PHYS_TO_DMAP(pa_start),
6919 pa_end - pa_start, mode);
6922 /* Start physical address run. */
6923 pa_start = *pte & PG_FRAME;
6924 pa_end = pa_start + PAGE_SIZE;
6930 if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
6931 pa_end1 = MIN(pa_end, dmaplimit);
6932 if (pa_start != pa_end1)
6933 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
6934 pa_end1 - pa_start, mode);
6938 * Flush CPU caches if required to make sure any data isn't cached that
6939 * shouldn't be, etc.
6942 pmap_invalidate_range(kernel_pmap, base, tmpva);
6943 pmap_invalidate_cache_range(base, tmpva, FALSE);
6949 * Demotes any mapping within the direct map region that covers more than the
6950 * specified range of physical addresses. This range's size must be a power
6951 * of two and its starting address must be a multiple of its size. Since the
6952 * demotion does not change any attributes of the mapping, a TLB invalidation
6953 * is not mandatory. The caller may, however, request a TLB invalidation.
6956 pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
6965 KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
6966 KASSERT((base & (len - 1)) == 0,
6967 ("pmap_demote_DMAP: base is not a multiple of len"));
6968 if (len < NBPDP && base < dmaplimit) {
6969 va = PHYS_TO_DMAP(base);
6971 PMAP_LOCK(kernel_pmap);
6972 pdpe = pmap_pdpe(kernel_pmap, va);
6973 if ((*pdpe & X86_PG_V) == 0)
6974 panic("pmap_demote_DMAP: invalid PDPE");
6975 if ((*pdpe & PG_PS) != 0) {
6976 if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
6977 panic("pmap_demote_DMAP: PDPE failed");
6981 pde = pmap_pdpe_to_pde(pdpe, va);
6982 if ((*pde & X86_PG_V) == 0)
6983 panic("pmap_demote_DMAP: invalid PDE");
6984 if ((*pde & PG_PS) != 0) {
6985 if (!pmap_demote_pde(kernel_pmap, pde, va))
6986 panic("pmap_demote_DMAP: PDE failed");
6990 if (changed && invalidate)
6991 pmap_invalidate_page(kernel_pmap, va);
6992 PMAP_UNLOCK(kernel_pmap);
6997 * perform the pmap work for mincore
7000 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
7003 pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V;
7007 PG_A = pmap_accessed_bit(pmap);
7008 PG_M = pmap_modified_bit(pmap);
7009 PG_V = pmap_valid_bit(pmap);
7010 PG_RW = pmap_rw_bit(pmap);
7014 pdep = pmap_pde(pmap, addr);
7015 if (pdep != NULL && (*pdep & PG_V)) {
7016 if (*pdep & PG_PS) {
7018 /* Compute the physical address of the 4KB page. */
7019 pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
7021 val = MINCORE_SUPER;
7023 pte = *pmap_pde_to_pte(pdep, addr);
7024 pa = pte & PG_FRAME;
7032 if ((pte & PG_V) != 0) {
7033 val |= MINCORE_INCORE;
7034 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
7035 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
7036 if ((pte & PG_A) != 0)
7037 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
7039 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
7040 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
7041 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
7042 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
7043 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
7046 PA_UNLOCK_COND(*locked_pa);
7052 pmap_pcid_alloc(pmap_t pmap, u_int cpuid)
7054 uint32_t gen, new_gen, pcid_next;
7056 CRITICAL_ASSERT(curthread);
7057 gen = PCPU_GET(pcid_gen);
7058 if (pmap->pm_pcids[cpuid].pm_pcid == PMAP_PCID_KERN ||
7059 pmap->pm_pcids[cpuid].pm_gen == gen)
7060 return (CR3_PCID_SAVE);
7061 pcid_next = PCPU_GET(pcid_next);
7062 KASSERT(pcid_next <= PMAP_PCID_OVERMAX, ("cpu %d pcid_next %#x",
7064 if (pcid_next == PMAP_PCID_OVERMAX) {
7068 PCPU_SET(pcid_gen, new_gen);
7069 pcid_next = PMAP_PCID_KERN + 1;
7073 pmap->pm_pcids[cpuid].pm_pcid = pcid_next;
7074 pmap->pm_pcids[cpuid].pm_gen = new_gen;
7075 PCPU_SET(pcid_next, pcid_next + 1);
7080 pmap_activate_sw(struct thread *td)
7082 pmap_t oldpmap, pmap;
7083 uint64_t cached, cr3;
7087 oldpmap = PCPU_GET(curpmap);
7088 pmap = vmspace_pmap(td->td_proc->p_vmspace);
7089 if (oldpmap == pmap)
7091 cpuid = PCPU_GET(cpuid);
7093 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
7095 CPU_SET(cpuid, &pmap->pm_active);
7098 if (pmap_pcid_enabled) {
7099 cached = pmap_pcid_alloc(pmap, cpuid);
7100 KASSERT(pmap->pm_pcids[cpuid].pm_pcid >= 0 &&
7101 pmap->pm_pcids[cpuid].pm_pcid < PMAP_PCID_OVERMAX,
7102 ("pmap %p cpu %d pcid %#x", pmap, cpuid,
7103 pmap->pm_pcids[cpuid].pm_pcid));
7104 KASSERT(pmap->pm_pcids[cpuid].pm_pcid != PMAP_PCID_KERN ||
7105 pmap == kernel_pmap,
7106 ("non-kernel pmap thread %p pmap %p cpu %d pcid %#x",
7107 td, pmap, cpuid, pmap->pm_pcids[cpuid].pm_pcid));
7110 * If the INVPCID instruction is not available,
7111 * invltlb_pcid_handler() is used for handle
7112 * invalidate_all IPI, which checks for curpmap ==
7113 * smp_tlb_pmap. Below operations sequence has a
7114 * window where %CR3 is loaded with the new pmap's
7115 * PML4 address, but curpmap value is not yet updated.
7116 * This causes invltlb IPI handler, called between the
7117 * updates, to execute as NOP, which leaves stale TLB
7120 * Note that the most typical use of
7121 * pmap_activate_sw(), from the context switch, is
7122 * immune to this race, because interrupts are
7123 * disabled (while the thread lock is owned), and IPI
7124 * happends after curpmap is updated. Protect other
7125 * callers in a similar way, by disabling interrupts
7126 * around the %cr3 register reload and curpmap
7130 rflags = intr_disable();
7132 if (!cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3) {
7133 load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid |
7136 PCPU_INC(pm_save_cnt);
7138 PCPU_SET(curpmap, pmap);
7140 intr_restore(rflags);
7141 } else if (cr3 != pmap->pm_cr3) {
7142 load_cr3(pmap->pm_cr3);
7143 PCPU_SET(curpmap, pmap);
7146 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
7148 CPU_CLR(cpuid, &oldpmap->pm_active);
7153 pmap_activate(struct thread *td)
7157 pmap_activate_sw(td);
7162 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
7167 * Increase the starting virtual address of the given mapping if a
7168 * different alignment might result in more superpage mappings.
7171 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
7172 vm_offset_t *addr, vm_size_t size)
7174 vm_offset_t superpage_offset;
7178 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
7179 offset += ptoa(object->pg_color);
7180 superpage_offset = offset & PDRMASK;
7181 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
7182 (*addr & PDRMASK) == superpage_offset)
7184 if ((*addr & PDRMASK) < superpage_offset)
7185 *addr = (*addr & ~PDRMASK) + superpage_offset;
7187 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
7191 static unsigned long num_dirty_emulations;
7192 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_dirty_emulations, CTLFLAG_RW,
7193 &num_dirty_emulations, 0, NULL);
7195 static unsigned long num_accessed_emulations;
7196 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_accessed_emulations, CTLFLAG_RW,
7197 &num_accessed_emulations, 0, NULL);
7199 static unsigned long num_superpage_accessed_emulations;
7200 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_superpage_accessed_emulations, CTLFLAG_RW,
7201 &num_superpage_accessed_emulations, 0, NULL);
7203 static unsigned long ad_emulation_superpage_promotions;
7204 SYSCTL_ULONG(_vm_pmap, OID_AUTO, ad_emulation_superpage_promotions, CTLFLAG_RW,
7205 &ad_emulation_superpage_promotions, 0, NULL);
7206 #endif /* INVARIANTS */
7209 pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype)
7212 struct rwlock *lock;
7213 #if VM_NRESERVLEVEL > 0
7217 pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V;
7219 KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE,
7220 ("pmap_emulate_accessed_dirty: invalid fault type %d", ftype));
7222 if (!pmap_emulate_ad_bits(pmap))
7225 PG_A = pmap_accessed_bit(pmap);
7226 PG_M = pmap_modified_bit(pmap);
7227 PG_V = pmap_valid_bit(pmap);
7228 PG_RW = pmap_rw_bit(pmap);
7234 pde = pmap_pde(pmap, va);
7235 if (pde == NULL || (*pde & PG_V) == 0)
7238 if ((*pde & PG_PS) != 0) {
7239 if (ftype == VM_PROT_READ) {
7241 atomic_add_long(&num_superpage_accessed_emulations, 1);
7249 pte = pmap_pde_to_pte(pde, va);
7250 if ((*pte & PG_V) == 0)
7253 if (ftype == VM_PROT_WRITE) {
7254 if ((*pte & PG_RW) == 0)
7257 * Set the modified and accessed bits simultaneously.
7259 * Intel EPT PTEs that do software emulation of A/D bits map
7260 * PG_A and PG_M to EPT_PG_READ and EPT_PG_WRITE respectively.
7261 * An EPT misconfiguration is triggered if the PTE is writable
7262 * but not readable (WR=10). This is avoided by setting PG_A
7263 * and PG_M simultaneously.
7265 *pte |= PG_M | PG_A;
7270 #if VM_NRESERVLEVEL > 0
7271 /* try to promote the mapping */
7272 if (va < VM_MAXUSER_ADDRESS)
7273 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7277 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
7279 if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
7280 pmap_ps_enabled(pmap) &&
7281 (m->flags & PG_FICTITIOUS) == 0 &&
7282 vm_reserv_level_iffullpop(m) == 0) {
7283 pmap_promote_pde(pmap, pde, va, &lock);
7285 atomic_add_long(&ad_emulation_superpage_promotions, 1);
7291 if (ftype == VM_PROT_WRITE)
7292 atomic_add_long(&num_dirty_emulations, 1);
7294 atomic_add_long(&num_accessed_emulations, 1);
7296 rv = 0; /* success */
7305 pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num)
7310 pt_entry_t *pte, PG_V;
7314 PG_V = pmap_valid_bit(pmap);
7317 pml4 = pmap_pml4e(pmap, va);
7319 if ((*pml4 & PG_V) == 0)
7322 pdp = pmap_pml4e_to_pdpe(pml4, va);
7324 if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0)
7327 pde = pmap_pdpe_to_pde(pdp, va);
7329 if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0)
7332 pte = pmap_pde_to_pte(pde, va);
7341 * Get the kernel virtual address of a set of physical pages. If there are
7342 * physical addresses not covered by the DMAP perform a transient mapping
7343 * that will be removed when calling pmap_unmap_io_transient.
7345 * \param page The pages the caller wishes to obtain the virtual
7346 * address on the kernel memory map.
7347 * \param vaddr On return contains the kernel virtual memory address
7348 * of the pages passed in the page parameter.
7349 * \param count Number of pages passed in.
7350 * \param can_fault TRUE if the thread using the mapped pages can take
7351 * page faults, FALSE otherwise.
7353 * \returns TRUE if the caller must call pmap_unmap_io_transient when
7354 * finished or FALSE otherwise.
7358 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
7359 boolean_t can_fault)
7362 boolean_t needs_mapping;
7364 int cache_bits, error, i;
7367 * Allocate any KVA space that we need, this is done in a separate
7368 * loop to prevent calling vmem_alloc while pinned.
7370 needs_mapping = FALSE;
7371 for (i = 0; i < count; i++) {
7372 paddr = VM_PAGE_TO_PHYS(page[i]);
7373 if (__predict_false(paddr >= dmaplimit)) {
7374 error = vmem_alloc(kernel_arena, PAGE_SIZE,
7375 M_BESTFIT | M_WAITOK, &vaddr[i]);
7376 KASSERT(error == 0, ("vmem_alloc failed: %d", error));
7377 needs_mapping = TRUE;
7379 vaddr[i] = PHYS_TO_DMAP(paddr);
7383 /* Exit early if everything is covered by the DMAP */
7388 * NB: The sequence of updating a page table followed by accesses
7389 * to the corresponding pages used in the !DMAP case is subject to
7390 * the situation described in the "AMD64 Architecture Programmer's
7391 * Manual Volume 2: System Programming" rev. 3.23, "7.3.1 Special
7392 * Coherency Considerations". Therefore, issuing the INVLPG right
7393 * after modifying the PTE bits is crucial.
7397 for (i = 0; i < count; i++) {
7398 paddr = VM_PAGE_TO_PHYS(page[i]);
7399 if (paddr >= dmaplimit) {
7402 * Slow path, since we can get page faults
7403 * while mappings are active don't pin the
7404 * thread to the CPU and instead add a global
7405 * mapping visible to all CPUs.
7407 pmap_qenter(vaddr[i], &page[i], 1);
7409 pte = vtopte(vaddr[i]);
7410 cache_bits = pmap_cache_bits(kernel_pmap,
7411 page[i]->md.pat_mode, 0);
7412 pte_store(pte, paddr | X86_PG_RW | X86_PG_V |
7419 return (needs_mapping);
7423 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
7424 boolean_t can_fault)
7431 for (i = 0; i < count; i++) {
7432 paddr = VM_PAGE_TO_PHYS(page[i]);
7433 if (paddr >= dmaplimit) {
7435 pmap_qremove(vaddr[i], 1);
7436 vmem_free(kernel_arena, vaddr[i], PAGE_SIZE);
7442 pmap_quick_enter_page(vm_page_t m)
7446 paddr = VM_PAGE_TO_PHYS(m);
7447 if (paddr < dmaplimit)
7448 return (PHYS_TO_DMAP(paddr));
7449 mtx_lock_spin(&qframe_mtx);
7450 KASSERT(*vtopte(qframe) == 0, ("qframe busy"));
7451 pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A |
7452 X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0));
7457 pmap_quick_remove_page(vm_offset_t addr)
7462 pte_store(vtopte(qframe), 0);
7464 mtx_unlock_spin(&qframe_mtx);
7467 #include "opt_ddb.h"
7469 #include <sys/kdb.h>
7470 #include <ddb/ddb.h>
7472 DB_SHOW_COMMAND(pte, pmap_print_pte)
7478 pt_entry_t *pte, PG_V;
7482 db_printf("show pte addr\n");
7485 va = (vm_offset_t)addr;
7487 if (kdb_thread != NULL)
7488 pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
7490 pmap = PCPU_GET(curpmap);
7492 PG_V = pmap_valid_bit(pmap);
7493 pml4 = pmap_pml4e(pmap, va);
7494 db_printf("VA %#016lx pml4e %#016lx", va, *pml4);
7495 if ((*pml4 & PG_V) == 0) {
7499 pdp = pmap_pml4e_to_pdpe(pml4, va);
7500 db_printf(" pdpe %#016lx", *pdp);
7501 if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) {
7505 pde = pmap_pdpe_to_pde(pdp, va);
7506 db_printf(" pde %#016lx", *pde);
7507 if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) {
7511 pte = pmap_pde_to_pte(pde, va);
7512 db_printf(" pte %#016lx\n", *pte);
7515 DB_SHOW_COMMAND(phys2dmap, pmap_phys2dmap)
7520 a = (vm_paddr_t)addr;
7521 db_printf("0x%jx\n", (uintmax_t)PHYS_TO_DMAP(a));
7523 db_printf("show phys2dmap addr\n");