2 * SPDX-License-Identifier: BSD-4-Clause
4 * Copyright (c) 1991 Regents of the University of California.
6 * Copyright (c) 1994 John S. Dyson
8 * Copyright (c) 1994 David Greenman
10 * Copyright (c) 2003 Peter Wemm
11 * All rights reserved.
12 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
13 * All rights reserved.
15 * This code is derived from software contributed to Berkeley by
16 * the Systems Programming Group of the University of Utah Computer
17 * Science Department and William Jolitz of UUNET Technologies Inc.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 * 3. All advertising materials mentioning features or use of this software
28 * must display the following acknowledgement:
29 * This product includes software developed by the University of
30 * California, Berkeley and its contributors.
31 * 4. Neither the name of the University nor the names of its contributors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
50 * Copyright (c) 2003 Networks Associates Technology, Inc.
51 * Copyright (c) 2014-2020 The FreeBSD Foundation
52 * All rights reserved.
54 * This software was developed for the FreeBSD Project by Jake Burkholder,
55 * Safeport Network Services, and Network Associates Laboratories, the
56 * Security Research Division of Network Associates, Inc. under
57 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
58 * CHATS research program.
60 * Portions of this software were developed by
61 * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
62 * the FreeBSD Foundation.
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
67 * 1. Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * 2. Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in the
71 * documentation and/or other materials provided with the distribution.
73 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
74 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
77 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
86 #define AMD64_NPT_AWARE
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
92 * Manages physical address maps.
94 * Since the information managed by this module is
95 * also stored by the logical address mapping module,
96 * this module may throw away valid virtual-to-physical
97 * mappings at almost any time. However, invalidations
98 * of virtual-to-physical mappings must be done as
101 * In order to cope with hardware architectures which
102 * make virtual-to-physical map invalidates expensive,
103 * this module may delay invalidate or reduced protection
104 * operations until such time as they are actually
105 * necessary. This module is given full information as
106 * to which processors are currently using which maps,
107 * and to when physical maps must be made correct.
111 #include "opt_pmap.h"
114 #include <sys/param.h>
115 #include <sys/bitstring.h>
117 #include <sys/systm.h>
118 #include <sys/kernel.h>
120 #include <sys/lock.h>
121 #include <sys/malloc.h>
122 #include <sys/mman.h>
123 #include <sys/mutex.h>
124 #include <sys/proc.h>
125 #include <sys/rangeset.h>
126 #include <sys/rwlock.h>
127 #include <sys/sbuf.h>
130 #include <sys/turnstile.h>
131 #include <sys/vmem.h>
132 #include <sys/vmmeter.h>
133 #include <sys/sched.h>
134 #include <sys/sysctl.h>
142 #include <vm/vm_param.h>
143 #include <vm/vm_kern.h>
144 #include <vm/vm_page.h>
145 #include <vm/vm_map.h>
146 #include <vm/vm_object.h>
147 #include <vm/vm_extern.h>
148 #include <vm/vm_pageout.h>
149 #include <vm/vm_pager.h>
150 #include <vm/vm_phys.h>
151 #include <vm/vm_radix.h>
152 #include <vm/vm_reserv.h>
153 #include <vm/vm_dumpset.h>
156 #include <machine/intr_machdep.h>
157 #include <x86/apicvar.h>
158 #include <x86/ifunc.h>
159 #include <machine/cpu.h>
160 #include <machine/cputypes.h>
161 #include <machine/intr_machdep.h>
162 #include <machine/md_var.h>
163 #include <machine/pcb.h>
164 #include <machine/specialreg.h>
166 #include <machine/smp.h>
168 #include <machine/sysarch.h>
169 #include <machine/tss.h>
172 #define PMAP_MEMDOM MAXMEMDOM
174 #define PMAP_MEMDOM 1
177 static __inline boolean_t
178 pmap_type_guest(pmap_t pmap)
181 return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
184 static __inline boolean_t
185 pmap_emulate_ad_bits(pmap_t pmap)
188 return ((pmap->pm_flags & PMAP_EMULATE_AD_BITS) != 0);
191 static __inline pt_entry_t
192 pmap_valid_bit(pmap_t pmap)
196 switch (pmap->pm_type) {
202 if (pmap_emulate_ad_bits(pmap))
203 mask = EPT_PG_EMUL_V;
208 panic("pmap_valid_bit: invalid pm_type %d", pmap->pm_type);
214 static __inline pt_entry_t
215 pmap_rw_bit(pmap_t pmap)
219 switch (pmap->pm_type) {
225 if (pmap_emulate_ad_bits(pmap))
226 mask = EPT_PG_EMUL_RW;
231 panic("pmap_rw_bit: invalid pm_type %d", pmap->pm_type);
237 static pt_entry_t pg_g;
239 static __inline pt_entry_t
240 pmap_global_bit(pmap_t pmap)
244 switch (pmap->pm_type) {
253 panic("pmap_global_bit: invalid pm_type %d", pmap->pm_type);
259 static __inline pt_entry_t
260 pmap_accessed_bit(pmap_t pmap)
264 switch (pmap->pm_type) {
270 if (pmap_emulate_ad_bits(pmap))
276 panic("pmap_accessed_bit: invalid pm_type %d", pmap->pm_type);
282 static __inline pt_entry_t
283 pmap_modified_bit(pmap_t pmap)
287 switch (pmap->pm_type) {
293 if (pmap_emulate_ad_bits(pmap))
299 panic("pmap_modified_bit: invalid pm_type %d", pmap->pm_type);
305 static __inline pt_entry_t
306 pmap_pku_mask_bit(pmap_t pmap)
309 return (pmap->pm_type == PT_X86 ? X86_PG_PKU_MASK : 0);
312 #if !defined(DIAGNOSTIC)
313 #ifdef __GNUC_GNU_INLINE__
314 #define PMAP_INLINE __attribute__((__gnu_inline__)) inline
316 #define PMAP_INLINE extern inline
323 #define PV_STAT(x) do { x ; } while (0)
325 #define PV_STAT(x) do { } while (0)
330 #define pa_index(pa) ({ \
331 KASSERT((pa) <= vm_phys_segs[vm_phys_nsegs - 1].end, \
332 ("address %lx beyond the last segment", (pa))); \
335 #define pa_to_pmdp(pa) (&pv_table[pa_index(pa)])
336 #define pa_to_pvh(pa) (&(pa_to_pmdp(pa)->pv_page))
337 #define PHYS_TO_PV_LIST_LOCK(pa) ({ \
338 struct rwlock *_lock; \
339 if (__predict_false((pa) > pmap_last_pa)) \
340 _lock = &pv_dummy_large.pv_lock; \
342 _lock = &(pa_to_pmdp(pa)->pv_lock); \
346 #define pa_index(pa) ((pa) >> PDRSHIFT)
347 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
349 #define NPV_LIST_LOCKS MAXCPU
351 #define PHYS_TO_PV_LIST_LOCK(pa) \
352 (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
355 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
356 struct rwlock **_lockp = (lockp); \
357 struct rwlock *_new_lock; \
359 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
360 if (_new_lock != *_lockp) { \
361 if (*_lockp != NULL) \
362 rw_wunlock(*_lockp); \
363 *_lockp = _new_lock; \
368 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
369 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
371 #define RELEASE_PV_LIST_LOCK(lockp) do { \
372 struct rwlock **_lockp = (lockp); \
374 if (*_lockp != NULL) { \
375 rw_wunlock(*_lockp); \
380 #define VM_PAGE_TO_PV_LIST_LOCK(m) \
381 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
383 struct pmap kernel_pmap_store;
385 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
386 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
389 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
390 "Number of kernel page table pages allocated on bootup");
393 vm_paddr_t dmaplimit;
394 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
397 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
398 "VM/pmap parameters");
400 static int pg_ps_enabled = 1;
401 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
402 &pg_ps_enabled, 0, "Are large page mappings enabled?");
404 int __read_frequently la57 = 0;
405 SYSCTL_INT(_vm_pmap, OID_AUTO, la57, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
407 "5-level paging for host is enabled");
410 pmap_is_la57(pmap_t pmap)
412 if (pmap->pm_type == PT_X86)
414 return (false); /* XXXKIB handle EPT */
417 #define PAT_INDEX_SIZE 8
418 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
420 static u_int64_t KPTphys; /* phys addr of kernel level 1 */
421 static u_int64_t KPDphys; /* phys addr of kernel level 2 */
422 u_int64_t KPDPphys; /* phys addr of kernel level 3 */
423 u_int64_t KPML4phys; /* phys addr of kernel level 4 */
424 u_int64_t KPML5phys; /* phys addr of kernel level 5,
427 static pml4_entry_t *kernel_pml4;
428 static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
429 static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
430 static int ndmpdpphys; /* number of DMPDPphys pages */
432 static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */
435 * pmap_mapdev support pre initialization (i.e. console)
437 #define PMAP_PREINIT_MAPPING_COUNT 8
438 static struct pmap_preinit_mapping {
443 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
444 static int pmap_initialized;
447 * Data for the pv entry allocation mechanism.
448 * Updates to pv_invl_gen are protected by the pv list lock but reads are not.
452 pc_to_domain(struct pv_chunk *pc)
455 return (vm_phys_domain(DMAP_TO_PHYS((vm_offset_t)pc)));
459 pc_to_domain(struct pv_chunk *pc __unused)
466 struct pv_chunks_list {
468 TAILQ_HEAD(pch, pv_chunk) pvc_list;
470 } __aligned(CACHE_LINE_SIZE);
472 struct pv_chunks_list __exclusive_cache_line pv_chunks[PMAP_MEMDOM];
475 struct pmap_large_md_page {
476 struct rwlock pv_lock;
477 struct md_page pv_page;
480 __exclusive_cache_line static struct pmap_large_md_page pv_dummy_large;
481 #define pv_dummy pv_dummy_large.pv_page
482 __read_mostly static struct pmap_large_md_page *pv_table;
483 __read_mostly vm_paddr_t pmap_last_pa;
485 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
486 static u_long pv_invl_gen[NPV_LIST_LOCKS];
487 static struct md_page *pv_table;
488 static struct md_page pv_dummy;
492 * All those kernel PT submaps that BSD is so fond of
494 pt_entry_t *CMAP1 = NULL;
496 static vm_offset_t qframe = 0;
497 static struct mtx qframe_mtx;
499 static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for x86 pmaps */
501 static vmem_t *large_vmem;
502 static u_int lm_ents;
503 #define PMAP_ADDRESS_IN_LARGEMAP(va) ((va) >= LARGEMAP_MIN_ADDRESS && \
504 (va) < LARGEMAP_MIN_ADDRESS + NBPML4 * (u_long)lm_ents)
506 int pmap_pcid_enabled = 1;
507 SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
508 &pmap_pcid_enabled, 0, "Is TLB Context ID enabled ?");
509 int invpcid_works = 0;
510 SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0,
511 "Is the invpcid instruction available ?");
513 int __read_frequently pti = 0;
514 SYSCTL_INT(_vm_pmap, OID_AUTO, pti, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
516 "Page Table Isolation enabled");
517 static vm_object_t pti_obj;
518 static pml4_entry_t *pti_pml4;
519 static vm_pindex_t pti_pg_idx;
520 static bool pti_finalized;
522 struct pmap_pkru_range {
523 struct rs_el pkru_rs_el;
528 static uma_zone_t pmap_pkru_ranges_zone;
529 static bool pmap_pkru_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
530 static pt_entry_t pmap_pkru_get(pmap_t pmap, vm_offset_t va);
531 static void pmap_pkru_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
532 static void *pkru_dup_range(void *ctx, void *data);
533 static void pkru_free_range(void *ctx, void *node);
534 static int pmap_pkru_copy(pmap_t dst_pmap, pmap_t src_pmap);
535 static int pmap_pkru_deassign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva);
536 static void pmap_pkru_deassign_all(pmap_t pmap);
539 pmap_pcid_save_cnt_proc(SYSCTL_HANDLER_ARGS)
546 res += cpuid_to_pcpu[i]->pc_pm_save_cnt;
548 return (sysctl_handle_64(oidp, &res, 0, req));
550 SYSCTL_PROC(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLTYPE_U64 | CTLFLAG_RD |
551 CTLFLAG_MPSAFE, NULL, 0, pmap_pcid_save_cnt_proc, "QU",
552 "Count of saved TLB context on switch");
554 static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker =
555 LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker);
556 static struct mtx invl_gen_mtx;
557 /* Fake lock object to satisfy turnstiles interface. */
558 static struct lock_object invl_gen_ts = {
561 static struct pmap_invl_gen pmap_invl_gen_head = {
565 static u_long pmap_invl_gen = 1;
566 static int pmap_invl_waiters;
567 static struct callout pmap_invl_callout;
568 static bool pmap_invl_callout_inited;
570 #define PMAP_ASSERT_NOT_IN_DI() \
571 KASSERT(pmap_not_in_di(), ("DI already started"))
578 if ((cpu_feature2 & CPUID2_CX16) == 0)
581 TUNABLE_INT_FETCH("vm.pmap.di_locked", &tun);
586 sysctl_pmap_di_locked(SYSCTL_HANDLER_ARGS)
590 locked = pmap_di_locked();
591 return (sysctl_handle_int(oidp, &locked, 0, req));
593 SYSCTL_PROC(_vm_pmap, OID_AUTO, di_locked, CTLTYPE_INT | CTLFLAG_RDTUN |
594 CTLFLAG_MPSAFE, 0, 0, sysctl_pmap_di_locked, "",
595 "Locked delayed invalidation");
597 static bool pmap_not_in_di_l(void);
598 static bool pmap_not_in_di_u(void);
599 DEFINE_IFUNC(, bool, pmap_not_in_di, (void))
602 return (pmap_di_locked() ? pmap_not_in_di_l : pmap_not_in_di_u);
606 pmap_not_in_di_l(void)
608 struct pmap_invl_gen *invl_gen;
610 invl_gen = &curthread->td_md.md_invl_gen;
611 return (invl_gen->gen == 0);
615 pmap_thread_init_invl_gen_l(struct thread *td)
617 struct pmap_invl_gen *invl_gen;
619 invl_gen = &td->td_md.md_invl_gen;
624 pmap_delayed_invl_wait_block(u_long *m_gen, u_long *invl_gen)
626 struct turnstile *ts;
628 ts = turnstile_trywait(&invl_gen_ts);
629 if (*m_gen > atomic_load_long(invl_gen))
630 turnstile_wait(ts, NULL, TS_SHARED_QUEUE);
632 turnstile_cancel(ts);
636 pmap_delayed_invl_finish_unblock(u_long new_gen)
638 struct turnstile *ts;
640 turnstile_chain_lock(&invl_gen_ts);
641 ts = turnstile_lookup(&invl_gen_ts);
643 pmap_invl_gen = new_gen;
645 turnstile_broadcast(ts, TS_SHARED_QUEUE);
646 turnstile_unpend(ts);
648 turnstile_chain_unlock(&invl_gen_ts);
652 * Start a new Delayed Invalidation (DI) block of code, executed by
653 * the current thread. Within a DI block, the current thread may
654 * destroy both the page table and PV list entries for a mapping and
655 * then release the corresponding PV list lock before ensuring that
656 * the mapping is flushed from the TLBs of any processors with the
660 pmap_delayed_invl_start_l(void)
662 struct pmap_invl_gen *invl_gen;
665 invl_gen = &curthread->td_md.md_invl_gen;
666 PMAP_ASSERT_NOT_IN_DI();
667 mtx_lock(&invl_gen_mtx);
668 if (LIST_EMPTY(&pmap_invl_gen_tracker))
669 currgen = pmap_invl_gen;
671 currgen = LIST_FIRST(&pmap_invl_gen_tracker)->gen;
672 invl_gen->gen = currgen + 1;
673 LIST_INSERT_HEAD(&pmap_invl_gen_tracker, invl_gen, link);
674 mtx_unlock(&invl_gen_mtx);
678 * Finish the DI block, previously started by the current thread. All
679 * required TLB flushes for the pages marked by
680 * pmap_delayed_invl_page() must be finished before this function is
683 * This function works by bumping the global DI generation number to
684 * the generation number of the current thread's DI, unless there is a
685 * pending DI that started earlier. In the latter case, bumping the
686 * global DI generation number would incorrectly signal that the
687 * earlier DI had finished. Instead, this function bumps the earlier
688 * DI's generation number to match the generation number of the
689 * current thread's DI.
692 pmap_delayed_invl_finish_l(void)
694 struct pmap_invl_gen *invl_gen, *next;
696 invl_gen = &curthread->td_md.md_invl_gen;
697 KASSERT(invl_gen->gen != 0, ("missed invl_start"));
698 mtx_lock(&invl_gen_mtx);
699 next = LIST_NEXT(invl_gen, link);
701 pmap_delayed_invl_finish_unblock(invl_gen->gen);
703 next->gen = invl_gen->gen;
704 LIST_REMOVE(invl_gen, link);
705 mtx_unlock(&invl_gen_mtx);
710 pmap_not_in_di_u(void)
712 struct pmap_invl_gen *invl_gen;
714 invl_gen = &curthread->td_md.md_invl_gen;
715 return (((uintptr_t)invl_gen->next & PMAP_INVL_GEN_NEXT_INVALID) != 0);
719 pmap_thread_init_invl_gen_u(struct thread *td)
721 struct pmap_invl_gen *invl_gen;
723 invl_gen = &td->td_md.md_invl_gen;
725 invl_gen->next = (void *)PMAP_INVL_GEN_NEXT_INVALID;
729 pmap_di_load_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *out)
731 uint64_t new_high, new_low, old_high, old_low;
734 old_low = new_low = 0;
735 old_high = new_high = (uintptr_t)0;
737 __asm volatile("lock;cmpxchg16b\t%1"
738 : "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
739 : "b"(new_low), "c" (new_high)
742 if ((old_high & PMAP_INVL_GEN_NEXT_INVALID) != 0)
745 out->next = (void *)old_high;
748 out->next = (void *)new_high;
754 pmap_di_store_invl(struct pmap_invl_gen *ptr, struct pmap_invl_gen *old_val,
755 struct pmap_invl_gen *new_val)
757 uint64_t new_high, new_low, old_high, old_low;
760 new_low = new_val->gen;
761 new_high = (uintptr_t)new_val->next;
762 old_low = old_val->gen;
763 old_high = (uintptr_t)old_val->next;
765 __asm volatile("lock;cmpxchg16b\t%1"
766 : "=@cce" (res), "+m" (*ptr), "+a" (old_low), "+d" (old_high)
767 : "b"(new_low), "c" (new_high)
773 static long invl_start_restart;
774 SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_start_restart, CTLFLAG_RD,
775 &invl_start_restart, 0,
777 static long invl_finish_restart;
778 SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_finish_restart, CTLFLAG_RD,
779 &invl_finish_restart, 0,
781 static int invl_max_qlen;
782 SYSCTL_INT(_vm_pmap, OID_AUTO, invl_max_qlen, CTLFLAG_RD,
787 #define di_delay locks_delay
790 pmap_delayed_invl_start_u(void)
792 struct pmap_invl_gen *invl_gen, *p, prev, new_prev;
794 struct lock_delay_arg lda;
802 invl_gen = &td->td_md.md_invl_gen;
803 PMAP_ASSERT_NOT_IN_DI();
804 lock_delay_arg_init(&lda, &di_delay);
805 invl_gen->saved_pri = 0;
806 pri = td->td_base_pri;
809 pri = td->td_base_pri;
811 invl_gen->saved_pri = pri;
818 for (p = &pmap_invl_gen_head;; p = prev.next) {
820 prevl = (uintptr_t)atomic_load_ptr(&p->next);
821 if ((prevl & PMAP_INVL_GEN_NEXT_INVALID) != 0) {
822 PV_STAT(atomic_add_long(&invl_start_restart, 1));
828 prev.next = (void *)prevl;
831 if ((ii = invl_max_qlen) < i)
832 atomic_cmpset_int(&invl_max_qlen, ii, i);
835 if (!pmap_di_load_invl(p, &prev) || prev.next != NULL) {
836 PV_STAT(atomic_add_long(&invl_start_restart, 1));
841 new_prev.gen = prev.gen;
842 new_prev.next = invl_gen;
843 invl_gen->gen = prev.gen + 1;
845 /* Formal fence between store to invl->gen and updating *p. */
846 atomic_thread_fence_rel();
849 * After inserting an invl_gen element with invalid bit set,
850 * this thread blocks any other thread trying to enter the
851 * delayed invalidation block. Do not allow to remove us from
852 * the CPU, because it causes starvation for other threads.
857 * ABA for *p is not possible there, since p->gen can only
858 * increase. So if the *p thread finished its di, then
859 * started a new one and got inserted into the list at the
860 * same place, its gen will appear greater than the previously
863 if (!pmap_di_store_invl(p, &prev, &new_prev)) {
865 PV_STAT(atomic_add_long(&invl_start_restart, 1));
871 * There we clear PMAP_INVL_GEN_NEXT_INVALID in
872 * invl_gen->next, allowing other threads to iterate past us.
873 * pmap_di_store_invl() provides fence between the generation
874 * write and the update of next.
876 invl_gen->next = NULL;
881 pmap_delayed_invl_finish_u_crit(struct pmap_invl_gen *invl_gen,
882 struct pmap_invl_gen *p)
884 struct pmap_invl_gen prev, new_prev;
888 * Load invl_gen->gen after setting invl_gen->next
889 * PMAP_INVL_GEN_NEXT_INVALID. This prevents larger
890 * generations to propagate to our invl_gen->gen. Lock prefix
891 * in atomic_set_ptr() worked as seq_cst fence.
893 mygen = atomic_load_long(&invl_gen->gen);
895 if (!pmap_di_load_invl(p, &prev) || prev.next != invl_gen)
898 KASSERT(prev.gen < mygen,
899 ("invalid di gen sequence %lu %lu", prev.gen, mygen));
900 new_prev.gen = mygen;
901 new_prev.next = (void *)((uintptr_t)invl_gen->next &
902 ~PMAP_INVL_GEN_NEXT_INVALID);
904 /* Formal fence between load of prev and storing update to it. */
905 atomic_thread_fence_rel();
907 return (pmap_di_store_invl(p, &prev, &new_prev));
911 pmap_delayed_invl_finish_u(void)
913 struct pmap_invl_gen *invl_gen, *p;
915 struct lock_delay_arg lda;
919 invl_gen = &td->td_md.md_invl_gen;
920 KASSERT(invl_gen->gen != 0, ("missed invl_start: gen 0"));
921 KASSERT(((uintptr_t)invl_gen->next & PMAP_INVL_GEN_NEXT_INVALID) == 0,
922 ("missed invl_start: INVALID"));
923 lock_delay_arg_init(&lda, &di_delay);
926 for (p = &pmap_invl_gen_head; p != NULL; p = (void *)prevl) {
927 prevl = (uintptr_t)atomic_load_ptr(&p->next);
928 if ((prevl & PMAP_INVL_GEN_NEXT_INVALID) != 0) {
929 PV_STAT(atomic_add_long(&invl_finish_restart, 1));
933 if ((void *)prevl == invl_gen)
938 * It is legitimate to not find ourself on the list if a
939 * thread before us finished its DI and started it again.
941 if (__predict_false(p == NULL)) {
942 PV_STAT(atomic_add_long(&invl_finish_restart, 1));
948 atomic_set_ptr((uintptr_t *)&invl_gen->next,
949 PMAP_INVL_GEN_NEXT_INVALID);
950 if (!pmap_delayed_invl_finish_u_crit(invl_gen, p)) {
951 atomic_clear_ptr((uintptr_t *)&invl_gen->next,
952 PMAP_INVL_GEN_NEXT_INVALID);
954 PV_STAT(atomic_add_long(&invl_finish_restart, 1));
959 if (atomic_load_int(&pmap_invl_waiters) > 0)
960 pmap_delayed_invl_finish_unblock(0);
961 if (invl_gen->saved_pri != 0) {
963 sched_prio(td, invl_gen->saved_pri);
969 DB_SHOW_COMMAND(di_queue, pmap_di_queue)
971 struct pmap_invl_gen *p, *pn;
976 for (p = &pmap_invl_gen_head, first = true; p != NULL; p = pn,
978 nextl = (uintptr_t)atomic_load_ptr(&p->next);
979 pn = (void *)(nextl & ~PMAP_INVL_GEN_NEXT_INVALID);
980 td = first ? NULL : __containerof(p, struct thread,
982 db_printf("gen %lu inv %d td %p tid %d\n", p->gen,
983 (nextl & PMAP_INVL_GEN_NEXT_INVALID) != 0, td,
984 td != NULL ? td->td_tid : -1);
990 static long invl_wait;
991 SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait, CTLFLAG_RD, &invl_wait, 0,
992 "Number of times DI invalidation blocked pmap_remove_all/write");
993 static long invl_wait_slow;
994 SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait_slow, CTLFLAG_RD, &invl_wait_slow, 0,
995 "Number of slow invalidation waits for lockless DI");
1000 pmap_delayed_invl_genp(vm_page_t m)
1005 pa = VM_PAGE_TO_PHYS(m);
1006 if (__predict_false((pa) > pmap_last_pa))
1007 gen = &pv_dummy_large.pv_invl_gen;
1009 gen = &(pa_to_pmdp(pa)->pv_invl_gen);
1015 pmap_delayed_invl_genp(vm_page_t m)
1018 return (&pv_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]);
1023 pmap_delayed_invl_callout_func(void *arg __unused)
1026 if (atomic_load_int(&pmap_invl_waiters) == 0)
1028 pmap_delayed_invl_finish_unblock(0);
1032 pmap_delayed_invl_callout_init(void *arg __unused)
1035 if (pmap_di_locked())
1037 callout_init(&pmap_invl_callout, 1);
1038 pmap_invl_callout_inited = true;
1040 SYSINIT(pmap_di_callout, SI_SUB_CPU + 1, SI_ORDER_ANY,
1041 pmap_delayed_invl_callout_init, NULL);
1044 * Ensure that all currently executing DI blocks, that need to flush
1045 * TLB for the given page m, actually flushed the TLB at the time the
1046 * function returned. If the page m has an empty PV list and we call
1047 * pmap_delayed_invl_wait(), upon its return we know that no CPU has a
1048 * valid mapping for the page m in either its page table or TLB.
1050 * This function works by blocking until the global DI generation
1051 * number catches up with the generation number associated with the
1052 * given page m and its PV list. Since this function's callers
1053 * typically own an object lock and sometimes own a page lock, it
1054 * cannot sleep. Instead, it blocks on a turnstile to relinquish the
1058 pmap_delayed_invl_wait_l(vm_page_t m)
1062 bool accounted = false;
1065 m_gen = pmap_delayed_invl_genp(m);
1066 while (*m_gen > pmap_invl_gen) {
1069 atomic_add_long(&invl_wait, 1);
1073 pmap_delayed_invl_wait_block(m_gen, &pmap_invl_gen);
1078 pmap_delayed_invl_wait_u(vm_page_t m)
1081 struct lock_delay_arg lda;
1085 m_gen = pmap_delayed_invl_genp(m);
1086 lock_delay_arg_init(&lda, &di_delay);
1087 while (*m_gen > atomic_load_long(&pmap_invl_gen_head.gen)) {
1088 if (fast || !pmap_invl_callout_inited) {
1089 PV_STAT(atomic_add_long(&invl_wait, 1));
1094 * The page's invalidation generation number
1095 * is still below the current thread's number.
1096 * Prepare to block so that we do not waste
1097 * CPU cycles or worse, suffer livelock.
1099 * Since it is impossible to block without
1100 * racing with pmap_delayed_invl_finish_u(),
1101 * prepare for the race by incrementing
1102 * pmap_invl_waiters and arming a 1-tick
1103 * callout which will unblock us if we lose
1106 atomic_add_int(&pmap_invl_waiters, 1);
1109 * Re-check the current thread's invalidation
1110 * generation after incrementing
1111 * pmap_invl_waiters, so that there is no race
1112 * with pmap_delayed_invl_finish_u() setting
1113 * the page generation and checking
1114 * pmap_invl_waiters. The only race allowed
1115 * is for a missed unblock, which is handled
1119 atomic_load_long(&pmap_invl_gen_head.gen)) {
1120 callout_reset(&pmap_invl_callout, 1,
1121 pmap_delayed_invl_callout_func, NULL);
1122 PV_STAT(atomic_add_long(&invl_wait_slow, 1));
1123 pmap_delayed_invl_wait_block(m_gen,
1124 &pmap_invl_gen_head.gen);
1126 atomic_add_int(&pmap_invl_waiters, -1);
1131 DEFINE_IFUNC(, void, pmap_thread_init_invl_gen, (struct thread *))
1134 return (pmap_di_locked() ? pmap_thread_init_invl_gen_l :
1135 pmap_thread_init_invl_gen_u);
1138 DEFINE_IFUNC(static, void, pmap_delayed_invl_start, (void))
1141 return (pmap_di_locked() ? pmap_delayed_invl_start_l :
1142 pmap_delayed_invl_start_u);
1145 DEFINE_IFUNC(static, void, pmap_delayed_invl_finish, (void))
1148 return (pmap_di_locked() ? pmap_delayed_invl_finish_l :
1149 pmap_delayed_invl_finish_u);
1152 DEFINE_IFUNC(static, void, pmap_delayed_invl_wait, (vm_page_t))
1155 return (pmap_di_locked() ? pmap_delayed_invl_wait_l :
1156 pmap_delayed_invl_wait_u);
1160 * Mark the page m's PV list as participating in the current thread's
1161 * DI block. Any threads concurrently using m's PV list to remove or
1162 * restrict all mappings to m will wait for the current thread's DI
1163 * block to complete before proceeding.
1165 * The function works by setting the DI generation number for m's PV
1166 * list to at least the DI generation number of the current thread.
1167 * This forces a caller of pmap_delayed_invl_wait() to block until
1168 * current thread calls pmap_delayed_invl_finish().
1171 pmap_delayed_invl_page(vm_page_t m)
1175 rw_assert(VM_PAGE_TO_PV_LIST_LOCK(m), RA_WLOCKED);
1176 gen = curthread->td_md.md_invl_gen.gen;
1179 m_gen = pmap_delayed_invl_genp(m);
1187 static caddr_t crashdumpmap;
1190 * Internal flags for pmap_enter()'s helper functions.
1192 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
1193 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
1196 * Internal flags for pmap_mapdev_internal() and
1197 * pmap_change_props_locked().
1199 #define MAPDEV_FLUSHCACHE 0x00000001 /* Flush cache after mapping. */
1200 #define MAPDEV_SETATTR 0x00000002 /* Modify existing attrs. */
1201 #define MAPDEV_ASSERTVALID 0x00000004 /* Assert mapping validity. */
1203 TAILQ_HEAD(pv_chunklist, pv_chunk);
1205 static void free_pv_chunk(struct pv_chunk *pc);
1206 static void free_pv_chunk_batch(struct pv_chunklist *batch);
1207 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
1208 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
1209 static int popcnt_pc_map_pq(uint64_t *map);
1210 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
1211 static void reserve_pv_entries(pmap_t pmap, int needed,
1212 struct rwlock **lockp);
1213 static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1214 struct rwlock **lockp);
1215 static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde,
1216 u_int flags, struct rwlock **lockp);
1217 #if VM_NRESERVLEVEL > 0
1218 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1219 struct rwlock **lockp);
1221 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
1222 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
1225 static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
1226 static int pmap_change_props_locked(vm_offset_t va, vm_size_t size,
1227 vm_prot_t prot, int mode, int flags);
1228 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
1229 static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
1230 vm_offset_t va, struct rwlock **lockp);
1231 static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
1233 static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
1234 vm_prot_t prot, struct rwlock **lockp);
1235 static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
1236 u_int flags, vm_page_t m, struct rwlock **lockp);
1237 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
1238 vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
1239 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
1240 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted);
1241 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
1243 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
1245 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
1247 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
1248 static vm_page_t pmap_large_map_getptp_unlocked(void);
1249 static vm_paddr_t pmap_large_map_kextract(vm_offset_t va);
1250 #if VM_NRESERVLEVEL > 0
1251 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
1252 struct rwlock **lockp);
1254 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
1256 static void pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask);
1257 static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva,
1259 static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va);
1260 static pd_entry_t *pmap_pti_pde(vm_offset_t va);
1261 static void pmap_pti_wire_pte(void *pte);
1262 static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
1263 struct spglist *free, struct rwlock **lockp);
1264 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
1265 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
1266 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
1267 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
1268 struct spglist *free);
1269 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1270 pd_entry_t *pde, struct spglist *free,
1271 struct rwlock **lockp);
1272 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
1273 vm_page_t m, struct rwlock **lockp);
1274 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
1276 static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde);
1278 static pd_entry_t *pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
1279 struct rwlock **lockp);
1280 static vm_page_t pmap_allocpte_alloc(pmap_t pmap, vm_pindex_t ptepindex,
1281 struct rwlock **lockp, vm_offset_t va);
1282 static vm_page_t pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex,
1283 struct rwlock **lockp, vm_offset_t va);
1284 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
1285 struct rwlock **lockp);
1287 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
1288 struct spglist *free);
1289 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
1291 /********************/
1292 /* Inline functions */
1293 /********************/
1296 * Return a non-clipped indexes for a given VA, which are page table
1297 * pages indexes at the corresponding level.
1299 static __inline vm_pindex_t
1300 pmap_pde_pindex(vm_offset_t va)
1302 return (va >> PDRSHIFT);
1305 static __inline vm_pindex_t
1306 pmap_pdpe_pindex(vm_offset_t va)
1308 return (NUPDE + (va >> PDPSHIFT));
1311 static __inline vm_pindex_t
1312 pmap_pml4e_pindex(vm_offset_t va)
1314 return (NUPDE + NUPDPE + (va >> PML4SHIFT));
1317 static __inline vm_pindex_t
1318 pmap_pml5e_pindex(vm_offset_t va)
1320 return (NUPDE + NUPDPE + NUPML4E + (va >> PML5SHIFT));
1323 static __inline pml4_entry_t *
1324 pmap_pml5e(pmap_t pmap, vm_offset_t va)
1327 MPASS(pmap_is_la57(pmap));
1328 return (&pmap->pm_pmltop[pmap_pml5e_index(va)]);
1331 static __inline pml4_entry_t *
1332 pmap_pml5e_u(pmap_t pmap, vm_offset_t va)
1335 MPASS(pmap_is_la57(pmap));
1336 return (&pmap->pm_pmltopu[pmap_pml5e_index(va)]);
1339 static __inline pml4_entry_t *
1340 pmap_pml5e_to_pml4e(pml5_entry_t *pml5e, vm_offset_t va)
1342 pml4_entry_t *pml4e;
1344 /* XXX MPASS(pmap_is_la57(pmap); */
1345 pml4e = (pml4_entry_t *)PHYS_TO_DMAP(*pml5e & PG_FRAME);
1346 return (&pml4e[pmap_pml4e_index(va)]);
1349 /* Return a pointer to the PML4 slot that corresponds to a VA */
1350 static __inline pml4_entry_t *
1351 pmap_pml4e(pmap_t pmap, vm_offset_t va)
1353 pml5_entry_t *pml5e;
1354 pml4_entry_t *pml4e;
1357 if (pmap_is_la57(pmap)) {
1358 pml5e = pmap_pml5e(pmap, va);
1359 PG_V = pmap_valid_bit(pmap);
1360 if ((*pml5e & PG_V) == 0)
1362 pml4e = (pml4_entry_t *)PHYS_TO_DMAP(*pml5e & PG_FRAME);
1364 pml4e = pmap->pm_pmltop;
1366 return (&pml4e[pmap_pml4e_index(va)]);
1369 static __inline pml4_entry_t *
1370 pmap_pml4e_u(pmap_t pmap, vm_offset_t va)
1372 MPASS(!pmap_is_la57(pmap));
1373 return (&pmap->pm_pmltopu[pmap_pml4e_index(va)]);
1376 /* Return a pointer to the PDP slot that corresponds to a VA */
1377 static __inline pdp_entry_t *
1378 pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
1382 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
1383 return (&pdpe[pmap_pdpe_index(va)]);
1386 /* Return a pointer to the PDP slot that corresponds to a VA */
1387 static __inline pdp_entry_t *
1388 pmap_pdpe(pmap_t pmap, vm_offset_t va)
1390 pml4_entry_t *pml4e;
1393 PG_V = pmap_valid_bit(pmap);
1394 pml4e = pmap_pml4e(pmap, va);
1395 if (pml4e == NULL || (*pml4e & PG_V) == 0)
1397 return (pmap_pml4e_to_pdpe(pml4e, va));
1400 /* Return a pointer to the PD slot that corresponds to a VA */
1401 static __inline pd_entry_t *
1402 pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
1406 KASSERT((*pdpe & PG_PS) == 0,
1407 ("%s: pdpe %#lx is a leaf", __func__, *pdpe));
1408 pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
1409 return (&pde[pmap_pde_index(va)]);
1412 /* Return a pointer to the PD slot that corresponds to a VA */
1413 static __inline pd_entry_t *
1414 pmap_pde(pmap_t pmap, vm_offset_t va)
1419 PG_V = pmap_valid_bit(pmap);
1420 pdpe = pmap_pdpe(pmap, va);
1421 if (pdpe == NULL || (*pdpe & PG_V) == 0)
1423 KASSERT((*pdpe & PG_PS) == 0,
1424 ("pmap_pde for 1G page, pmap %p va %#lx", pmap, va));
1425 return (pmap_pdpe_to_pde(pdpe, va));
1428 /* Return a pointer to the PT slot that corresponds to a VA */
1429 static __inline pt_entry_t *
1430 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
1434 KASSERT((*pde & PG_PS) == 0,
1435 ("%s: pde %#lx is a leaf", __func__, *pde));
1436 pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
1437 return (&pte[pmap_pte_index(va)]);
1440 /* Return a pointer to the PT slot that corresponds to a VA */
1441 static __inline pt_entry_t *
1442 pmap_pte(pmap_t pmap, vm_offset_t va)
1447 PG_V = pmap_valid_bit(pmap);
1448 pde = pmap_pde(pmap, va);
1449 if (pde == NULL || (*pde & PG_V) == 0)
1451 if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */
1452 return ((pt_entry_t *)pde);
1453 return (pmap_pde_to_pte(pde, va));
1456 static __inline void
1457 pmap_resident_count_inc(pmap_t pmap, int count)
1460 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1461 pmap->pm_stats.resident_count += count;
1464 static __inline void
1465 pmap_resident_count_dec(pmap_t pmap, int count)
1468 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1469 KASSERT(pmap->pm_stats.resident_count >= count,
1470 ("pmap %p resident count underflow %ld %d", pmap,
1471 pmap->pm_stats.resident_count, count));
1472 pmap->pm_stats.resident_count -= count;
1475 PMAP_INLINE pt_entry_t *
1476 vtopte(vm_offset_t va)
1480 KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopte on a uva/gpa 0x%0lx", va));
1483 mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT +
1484 NPML4EPGSHIFT + NPML5EPGSHIFT)) - 1);
1485 return (P5Tmap + ((va >> PAGE_SHIFT) & mask));
1487 mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT +
1488 NPML4EPGSHIFT)) - 1);
1489 return (P4Tmap + ((va >> PAGE_SHIFT) & mask));
1493 static __inline pd_entry_t *
1494 vtopde(vm_offset_t va)
1498 KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopde on a uva/gpa 0x%0lx", va));
1501 mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
1502 NPML4EPGSHIFT + NPML5EPGSHIFT)) - 1);
1503 return (P5Dmap + ((va >> PDRSHIFT) & mask));
1505 mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT +
1506 NPML4EPGSHIFT)) - 1);
1507 return (P4Dmap + ((va >> PDRSHIFT) & mask));
1512 allocpages(vm_paddr_t *firstaddr, int n)
1517 bzero((void *)ret, n * PAGE_SIZE);
1518 *firstaddr += n * PAGE_SIZE;
1522 CTASSERT(powerof2(NDMPML4E));
1524 /* number of kernel PDP slots */
1525 #define NKPDPE(ptpgs) howmany(ptpgs, NPDEPG)
1528 nkpt_init(vm_paddr_t addr)
1535 pt_pages = howmany(addr, 1 << PDRSHIFT);
1536 pt_pages += NKPDPE(pt_pages);
1539 * Add some slop beyond the bare minimum required for bootstrapping
1542 * This is quite important when allocating KVA for kernel modules.
1543 * The modules are required to be linked in the negative 2GB of
1544 * the address space. If we run out of KVA in this region then
1545 * pmap_growkernel() will need to allocate page table pages to map
1546 * the entire 512GB of KVA space which is an unnecessary tax on
1549 * Secondly, device memory mapped as part of setting up the low-
1550 * level console(s) is taken from KVA, starting at virtual_avail.
1551 * This is because cninit() is called after pmap_bootstrap() but
1552 * before vm_init() and pmap_init(). 20MB for a frame buffer is
1555 pt_pages += 32; /* 64MB additional slop. */
1561 * Returns the proper write/execute permission for a physical page that is
1562 * part of the initial boot allocations.
1564 * If the page has kernel text, it is marked as read-only. If the page has
1565 * kernel read-only data, it is marked as read-only/not-executable. If the
1566 * page has only read-write data, it is marked as read-write/not-executable.
1567 * If the page is below/above the kernel range, it is marked as read-write.
1569 * This function operates on 2M pages, since we map the kernel space that
1572 static inline pt_entry_t
1573 bootaddr_rwx(vm_paddr_t pa)
1577 * The kernel is loaded at a 2MB-aligned address, and memory below that
1578 * need not be executable. The .bss section is padded to a 2MB
1579 * boundary, so memory following the kernel need not be executable
1580 * either. Preloaded kernel modules have their mapping permissions
1581 * fixed up by the linker.
1583 if (pa < trunc_2mpage(btext - KERNBASE) ||
1584 pa >= trunc_2mpage(_end - KERNBASE))
1585 return (X86_PG_RW | pg_nx);
1588 * The linker should ensure that the read-only and read-write
1589 * portions don't share the same 2M page, so this shouldn't
1590 * impact read-only data. However, in any case, any page with
1591 * read-write data needs to be read-write.
1593 if (pa >= trunc_2mpage(brwsection - KERNBASE))
1594 return (X86_PG_RW | pg_nx);
1597 * Mark any 2M page containing kernel text as read-only. Mark
1598 * other pages with read-only data as read-only and not executable.
1599 * (It is likely a small portion of the read-only data section will
1600 * be marked as read-only, but executable. This should be acceptable
1601 * since the read-only protection will keep the data from changing.)
1602 * Note that fixups to the .text section will still work until we
1605 if (pa < round_2mpage(etext - KERNBASE))
1611 create_pagetables(vm_paddr_t *firstaddr)
1613 int i, j, ndm1g, nkpdpe, nkdmpde;
1617 uint64_t DMPDkernphys;
1619 /* Allocate page table pages for the direct map */
1620 ndmpdp = howmany(ptoa(Maxmem), NBPDP);
1621 if (ndmpdp < 4) /* Minimum 4GB of dirmap */
1623 ndmpdpphys = howmany(ndmpdp, NPDPEPG);
1624 if (ndmpdpphys > NDMPML4E) {
1626 * Each NDMPML4E allows 512 GB, so limit to that,
1627 * and then readjust ndmpdp and ndmpdpphys.
1629 printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512);
1630 Maxmem = atop(NDMPML4E * NBPML4);
1631 ndmpdpphys = NDMPML4E;
1632 ndmpdp = NDMPML4E * NPDEPG;
1634 DMPDPphys = allocpages(firstaddr, ndmpdpphys);
1636 if ((amd_feature & AMDID_PAGE1GB) != 0) {
1638 * Calculate the number of 1G pages that will fully fit in
1641 ndm1g = ptoa(Maxmem) >> PDPSHIFT;
1644 * Allocate 2M pages for the kernel. These will be used in
1645 * place of the first one or more 1G pages from ndm1g.
1647 nkdmpde = howmany((vm_offset_t)(brwsection - KERNBASE), NBPDP);
1648 DMPDkernphys = allocpages(firstaddr, nkdmpde);
1651 DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g);
1652 dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
1654 /* Allocate pages */
1655 KPML4phys = allocpages(firstaddr, 1);
1656 KPDPphys = allocpages(firstaddr, NKPML4E);
1659 * Allocate the initial number of kernel page table pages required to
1660 * bootstrap. We defer this until after all memory-size dependent
1661 * allocations are done (e.g. direct map), so that we don't have to
1662 * build in too much slop in our estimate.
1664 * Note that when NKPML4E > 1, we have an empty page underneath
1665 * all but the KPML4I'th one, so we need NKPML4E-1 extra (zeroed)
1666 * pages. (pmap_enter requires a PD page to exist for each KPML4E.)
1668 nkpt_init(*firstaddr);
1669 nkpdpe = NKPDPE(nkpt);
1671 KPTphys = allocpages(firstaddr, nkpt);
1672 KPDphys = allocpages(firstaddr, nkpdpe);
1675 * Connect the zero-filled PT pages to their PD entries. This
1676 * implicitly maps the PT pages at their correct locations within
1679 pd_p = (pd_entry_t *)KPDphys;
1680 for (i = 0; i < nkpt; i++)
1681 pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
1684 * Map from physical address zero to the end of loader preallocated
1685 * memory using 2MB pages. This replaces some of the PD entries
1688 for (i = 0; (i << PDRSHIFT) < KERNend; i++)
1689 /* Preset PG_M and PG_A because demotion expects it. */
1690 pd_p[i] = (i << PDRSHIFT) | X86_PG_V | PG_PS | pg_g |
1691 X86_PG_M | X86_PG_A | bootaddr_rwx(i << PDRSHIFT);
1694 * Because we map the physical blocks in 2M pages, adjust firstaddr
1695 * to record the physical blocks we've actually mapped into kernel
1696 * virtual address space.
1698 if (*firstaddr < round_2mpage(KERNend))
1699 *firstaddr = round_2mpage(KERNend);
1701 /* And connect up the PD to the PDP (leaving room for L4 pages) */
1702 pdp_p = (pdp_entry_t *)(KPDPphys + ptoa(KPML4I - KPML4BASE));
1703 for (i = 0; i < nkpdpe; i++)
1704 pdp_p[i + KPDPI] = (KPDphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
1707 * Now, set up the direct map region using 2MB and/or 1GB pages. If
1708 * the end of physical memory is not aligned to a 1GB page boundary,
1709 * then the residual physical memory is mapped with 2MB pages. Later,
1710 * if pmap_mapdev{_attr}() uses the direct map for non-write-back
1711 * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings
1712 * that are partially used.
1714 pd_p = (pd_entry_t *)DMPDphys;
1715 for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) {
1716 pd_p[j] = (vm_paddr_t)i << PDRSHIFT;
1717 /* Preset PG_M and PG_A because demotion expects it. */
1718 pd_p[j] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
1719 X86_PG_M | X86_PG_A | pg_nx;
1721 pdp_p = (pdp_entry_t *)DMPDPphys;
1722 for (i = 0; i < ndm1g; i++) {
1723 pdp_p[i] = (vm_paddr_t)i << PDPSHIFT;
1724 /* Preset PG_M and PG_A because demotion expects it. */
1725 pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_PS | pg_g |
1726 X86_PG_M | X86_PG_A | pg_nx;
1728 for (j = 0; i < ndmpdp; i++, j++) {
1729 pdp_p[i] = DMPDphys + ptoa(j);
1730 pdp_p[i] |= X86_PG_RW | X86_PG_V | pg_nx;
1734 * Instead of using a 1G page for the memory containing the kernel,
1735 * use 2M pages with read-only and no-execute permissions. (If using 1G
1736 * pages, this will partially overwrite the PDPEs above.)
1739 pd_p = (pd_entry_t *)DMPDkernphys;
1740 for (i = 0; i < (NPDEPG * nkdmpde); i++)
1741 pd_p[i] = (i << PDRSHIFT) | X86_PG_V | PG_PS | pg_g |
1742 X86_PG_M | X86_PG_A | pg_nx |
1743 bootaddr_rwx(i << PDRSHIFT);
1744 for (i = 0; i < nkdmpde; i++)
1745 pdp_p[i] = (DMPDkernphys + ptoa(i)) | X86_PG_RW |
1749 /* And recursively map PML4 to itself in order to get PTmap */
1750 p4_p = (pml4_entry_t *)KPML4phys;
1751 p4_p[PML4PML4I] = KPML4phys;
1752 p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | pg_nx;
1754 /* Connect the Direct Map slot(s) up to the PML4. */
1755 for (i = 0; i < ndmpdpphys; i++) {
1756 p4_p[DMPML4I + i] = DMPDPphys + ptoa(i);
1757 p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | pg_nx;
1760 /* Connect the KVA slots up to the PML4 */
1761 for (i = 0; i < NKPML4E; i++) {
1762 p4_p[KPML4BASE + i] = KPDPphys + ptoa(i);
1763 p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V;
1766 kernel_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
1770 * Bootstrap the system enough to run with virtual memory.
1772 * On amd64 this is called after mapping has already been enabled
1773 * and just syncs the pmap module with what has already been done.
1774 * [We can't call it easily with mapping off since the kernel is not
1775 * mapped with PA == VA, hence we would have to relocate every address
1776 * from the linked base (virtual) address "KERNBASE" to the actual
1777 * (physical) address starting relative to 0]
1780 pmap_bootstrap(vm_paddr_t *firstaddr)
1783 pt_entry_t *pte, *pcpu_pte;
1784 struct region_descriptor r_gdt;
1785 uint64_t cr4, pcpu_phys;
1789 KERNend = *firstaddr;
1790 res = atop(KERNend - (vm_paddr_t)kernphys);
1796 * Create an initial set of page tables to run the kernel in.
1798 create_pagetables(firstaddr);
1800 pcpu_phys = allocpages(firstaddr, MAXCPU);
1803 * Add a physical memory segment (vm_phys_seg) corresponding to the
1804 * preallocated kernel page table pages so that vm_page structures
1805 * representing these pages will be created. The vm_page structures
1806 * are required for promotion of the corresponding kernel virtual
1807 * addresses to superpage mappings.
1809 vm_phys_early_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1812 * Account for the virtual addresses mapped by create_pagetables().
1814 virtual_avail = (vm_offset_t)KERNBASE + round_2mpage(KERNend);
1815 virtual_end = VM_MAX_KERNEL_ADDRESS;
1818 * Enable PG_G global pages, then switch to the kernel page
1819 * table from the bootstrap page table. After the switch, it
1820 * is possible to enable SMEP and SMAP since PG_U bits are
1826 load_cr3(KPML4phys);
1827 if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
1829 if (cpu_stdext_feature & CPUID_STDEXT_SMAP)
1834 * Initialize the kernel pmap (which is statically allocated).
1835 * Count bootstrap data as being resident in case any of this data is
1836 * later unmapped (using pmap_remove()) and freed.
1838 PMAP_LOCK_INIT(kernel_pmap);
1839 kernel_pmap->pm_pmltop = kernel_pml4;
1840 kernel_pmap->pm_cr3 = KPML4phys;
1841 kernel_pmap->pm_ucr3 = PMAP_NO_CR3;
1842 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */
1843 TAILQ_INIT(&kernel_pmap->pm_pvchunk);
1844 kernel_pmap->pm_stats.resident_count = res;
1845 kernel_pmap->pm_flags = pmap_flags;
1848 * Initialize the TLB invalidations generation number lock.
1850 mtx_init(&invl_gen_mtx, "invlgn", NULL, MTX_DEF);
1853 * Reserve some special page table entries/VA space for temporary
1856 #define SYSMAP(c, p, v, n) \
1857 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
1863 * Crashdump maps. The first page is reused as CMAP1 for the
1866 SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS)
1867 CADDR1 = crashdumpmap;
1869 SYSMAP(struct pcpu *, pcpu_pte, __pcpu, MAXCPU);
1872 for (i = 0; i < MAXCPU; i++) {
1873 pcpu_pte[i] = (pcpu_phys + ptoa(i)) | X86_PG_V | X86_PG_RW |
1874 pg_g | pg_nx | X86_PG_M | X86_PG_A;
1878 * Re-initialize PCPU area for BSP after switching.
1879 * Make hardware use gdt and common_tss from the new PCPU.
1881 STAILQ_INIT(&cpuhead);
1882 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
1883 pcpu_init(&__pcpu[0], 0, sizeof(struct pcpu));
1884 amd64_bsp_pcpu_init1(&__pcpu[0]);
1885 amd64_bsp_ist_init(&__pcpu[0]);
1886 __pcpu[0].pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
1888 memcpy(__pcpu[0].pc_gdt, temp_bsp_pcpu.pc_gdt, NGDT *
1889 sizeof(struct user_segment_descriptor));
1890 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&__pcpu[0].pc_common_tss;
1891 ssdtosyssd(&gdt_segs[GPROC0_SEL],
1892 (struct system_segment_descriptor *)&__pcpu[0].pc_gdt[GPROC0_SEL]);
1893 r_gdt.rd_limit = NGDT * sizeof(struct user_segment_descriptor) - 1;
1894 r_gdt.rd_base = (long)__pcpu[0].pc_gdt;
1896 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
1897 ltr(GSEL(GPROC0_SEL, SEL_KPL));
1898 __pcpu[0].pc_dynamic = temp_bsp_pcpu.pc_dynamic;
1899 __pcpu[0].pc_acpi_id = temp_bsp_pcpu.pc_acpi_id;
1902 * Initialize the PAT MSR.
1903 * pmap_init_pat() clears and sets CR4_PGE, which, as a
1904 * side-effect, invalidates stale PG_G TLB entries that might
1905 * have been created in our pre-boot environment.
1909 /* Initialize TLB Context Id. */
1910 if (pmap_pcid_enabled) {
1911 for (i = 0; i < MAXCPU; i++) {
1912 kernel_pmap->pm_pcids[i].pm_pcid = PMAP_PCID_KERN;
1913 kernel_pmap->pm_pcids[i].pm_gen = 1;
1917 * PMAP_PCID_KERN + 1 is used for initialization of
1918 * proc0 pmap. The pmap' pcid state might be used by
1919 * EFIRT entry before first context switch, so it
1920 * needs to be valid.
1922 PCPU_SET(pcid_next, PMAP_PCID_KERN + 2);
1923 PCPU_SET(pcid_gen, 1);
1926 * pcpu area for APs is zeroed during AP startup.
1927 * pc_pcid_next and pc_pcid_gen are initialized by AP
1928 * during pcpu setup.
1930 load_cr4(rcr4() | CR4_PCIDE);
1935 * Setup the PAT MSR.
1944 /* Bail if this CPU doesn't implement PAT. */
1945 if ((cpu_feature & CPUID_PAT) == 0)
1948 /* Set default PAT index table. */
1949 for (i = 0; i < PAT_INDEX_SIZE; i++)
1951 pat_index[PAT_WRITE_BACK] = 0;
1952 pat_index[PAT_WRITE_THROUGH] = 1;
1953 pat_index[PAT_UNCACHEABLE] = 3;
1954 pat_index[PAT_WRITE_COMBINING] = 6;
1955 pat_index[PAT_WRITE_PROTECTED] = 5;
1956 pat_index[PAT_UNCACHED] = 2;
1959 * Initialize default PAT entries.
1960 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
1961 * Program 5 and 6 as WP and WC.
1963 * Leave 4 and 7 as WB and UC. Note that a recursive page table
1964 * mapping for a 2M page uses a PAT value with the bit 3 set due
1965 * to its overload with PG_PS.
1967 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
1968 PAT_VALUE(1, PAT_WRITE_THROUGH) |
1969 PAT_VALUE(2, PAT_UNCACHED) |
1970 PAT_VALUE(3, PAT_UNCACHEABLE) |
1971 PAT_VALUE(4, PAT_WRITE_BACK) |
1972 PAT_VALUE(5, PAT_WRITE_PROTECTED) |
1973 PAT_VALUE(6, PAT_WRITE_COMBINING) |
1974 PAT_VALUE(7, PAT_UNCACHEABLE);
1978 load_cr4(cr4 & ~CR4_PGE);
1980 /* Disable caches (CD = 1, NW = 0). */
1982 load_cr0((cr0 & ~CR0_NW) | CR0_CD);
1984 /* Flushes caches and TLBs. */
1988 /* Update PAT and index table. */
1989 wrmsr(MSR_PAT, pat_msr);
1991 /* Flush caches and TLBs again. */
1995 /* Restore caches and PGE. */
2000 extern const char la57_trampoline[], la57_trampoline_gdt_desc[],
2001 la57_trampoline_gdt[], la57_trampoline_end[];
2004 pmap_bootstrap_la57(void *arg __unused)
2007 pml5_entry_t *v_pml5;
2008 pml4_entry_t *v_pml4;
2012 vm_page_t m_code, m_pml4, m_pdp, m_pd, m_pt, m_pml5;
2013 void (*la57_tramp)(uint64_t pml5);
2014 struct region_descriptor r_gdt;
2016 if ((cpu_stdext_feature2 & CPUID_STDEXT2_LA57) == 0)
2018 if (!TUNABLE_INT_FETCH("vm.pmap.la57", &la57))
2023 r_gdt.rd_limit = NGDT * sizeof(struct user_segment_descriptor) - 1;
2024 r_gdt.rd_base = (long)__pcpu[0].pc_gdt;
2026 m_code = vm_page_alloc_contig(NULL, 0,
2027 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ,
2028 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
2029 if ((m_code->flags & PG_ZERO) == 0)
2030 pmap_zero_page(m_code);
2031 v_code = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_code));
2032 m_pml5 = vm_page_alloc_contig(NULL, 0,
2033 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ,
2034 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
2035 if ((m_pml5->flags & PG_ZERO) == 0)
2036 pmap_zero_page(m_pml5);
2037 KPML5phys = VM_PAGE_TO_PHYS(m_pml5);
2038 v_pml5 = (pml5_entry_t *)PHYS_TO_DMAP(KPML5phys);
2039 m_pml4 = vm_page_alloc_contig(NULL, 0,
2040 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ,
2041 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
2042 if ((m_pml4->flags & PG_ZERO) == 0)
2043 pmap_zero_page(m_pml4);
2044 v_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
2045 m_pdp = vm_page_alloc_contig(NULL, 0,
2046 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ,
2047 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
2048 if ((m_pdp->flags & PG_ZERO) == 0)
2049 pmap_zero_page(m_pdp);
2050 v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
2051 m_pd = vm_page_alloc_contig(NULL, 0,
2052 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ,
2053 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
2054 if ((m_pd->flags & PG_ZERO) == 0)
2055 pmap_zero_page(m_pd);
2056 v_pd = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd));
2057 m_pt = vm_page_alloc_contig(NULL, 0,
2058 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ,
2059 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
2060 if ((m_pt->flags & PG_ZERO) == 0)
2061 pmap_zero_page(m_pt);
2062 v_pt = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pt));
2065 * Map m_code 1:1, it appears below 4G in KVA due to physical
2066 * address being below 4G. Since kernel KVA is in upper half,
2067 * the pml4e should be zero and free for temporary use.
2069 kernel_pmap->pm_pmltop[pmap_pml4e_index(VM_PAGE_TO_PHYS(m_code))] =
2070 VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V | X86_PG_RW | X86_PG_A |
2072 v_pdp[pmap_pdpe_index(VM_PAGE_TO_PHYS(m_code))] =
2073 VM_PAGE_TO_PHYS(m_pd) | X86_PG_V | X86_PG_RW | X86_PG_A |
2075 v_pd[pmap_pde_index(VM_PAGE_TO_PHYS(m_code))] =
2076 VM_PAGE_TO_PHYS(m_pt) | X86_PG_V | X86_PG_RW | X86_PG_A |
2078 v_pt[pmap_pte_index(VM_PAGE_TO_PHYS(m_code))] =
2079 VM_PAGE_TO_PHYS(m_code) | X86_PG_V | X86_PG_RW | X86_PG_A |
2083 * Add pml5 entry at top of KVA pointing to existing pml4 table,
2084 * entering all existing kernel mappings into level 5 table.
2086 v_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V |
2087 X86_PG_RW | X86_PG_A | X86_PG_M | pg_g;
2090 * Add pml5 entry for 1:1 trampoline mapping after LA57 is turned on.
2092 v_pml5[pmap_pml5e_index(VM_PAGE_TO_PHYS(m_code))] =
2093 VM_PAGE_TO_PHYS(m_pml4) | X86_PG_V | X86_PG_RW | X86_PG_A |
2095 v_pml4[pmap_pml4e_index(VM_PAGE_TO_PHYS(m_code))] =
2096 VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V | X86_PG_RW | X86_PG_A |
2100 * Copy and call the 48->57 trampoline, hope we return there, alive.
2102 bcopy(la57_trampoline, v_code, la57_trampoline_end - la57_trampoline);
2103 *(u_long *)(v_code + 2 + (la57_trampoline_gdt_desc - la57_trampoline)) =
2104 la57_trampoline_gdt - la57_trampoline + VM_PAGE_TO_PHYS(m_code);
2105 la57_tramp = (void (*)(uint64_t))VM_PAGE_TO_PHYS(m_code);
2106 la57_tramp(KPML5phys);
2109 * gdt was necessary reset, switch back to our gdt.
2112 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]);
2116 ssdtosyssd(&gdt_segs[GPROC0_SEL],
2117 (struct system_segment_descriptor *)&__pcpu[0].pc_gdt[GPROC0_SEL]);
2118 ltr(GSEL(GPROC0_SEL, SEL_KPL));
2121 * Now unmap the trampoline, and free the pages.
2122 * Clear pml5 entry used for 1:1 trampoline mapping.
2124 pte_clear(&v_pml5[pmap_pml5e_index(VM_PAGE_TO_PHYS(m_code))]);
2125 invlpg((vm_offset_t)v_code);
2126 vm_page_free(m_code);
2127 vm_page_free(m_pdp);
2132 * Recursively map PML5 to itself in order to get PTmap and
2135 v_pml5[PML5PML5I] = KPML5phys | X86_PG_RW | X86_PG_V | pg_nx;
2137 kernel_pmap->pm_cr3 = KPML5phys;
2138 kernel_pmap->pm_pmltop = v_pml5;
2140 SYSINIT(la57, SI_SUB_KMEM, SI_ORDER_ANY, pmap_bootstrap_la57, NULL);
2143 * Initialize a vm_page's machine-dependent fields.
2146 pmap_page_init(vm_page_t m)
2149 TAILQ_INIT(&m->md.pv_list);
2150 m->md.pat_mode = PAT_WRITE_BACK;
2153 static int pmap_allow_2m_x_ept;
2154 SYSCTL_INT(_vm_pmap, OID_AUTO, allow_2m_x_ept, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
2155 &pmap_allow_2m_x_ept, 0,
2156 "Allow executable superpage mappings in EPT");
2159 pmap_allow_2m_x_ept_recalculate(void)
2162 * SKL002, SKL012S. Since the EPT format is only used by
2163 * Intel CPUs, the vendor check is merely a formality.
2165 if (!(cpu_vendor_id != CPU_VENDOR_INTEL ||
2166 (cpu_ia32_arch_caps & IA32_ARCH_CAP_IF_PSCHANGE_MC_NO) != 0 ||
2167 (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
2168 (CPUID_TO_MODEL(cpu_id) == 0x26 || /* Atoms */
2169 CPUID_TO_MODEL(cpu_id) == 0x27 ||
2170 CPUID_TO_MODEL(cpu_id) == 0x35 ||
2171 CPUID_TO_MODEL(cpu_id) == 0x36 ||
2172 CPUID_TO_MODEL(cpu_id) == 0x37 ||
2173 CPUID_TO_MODEL(cpu_id) == 0x86 ||
2174 CPUID_TO_MODEL(cpu_id) == 0x1c ||
2175 CPUID_TO_MODEL(cpu_id) == 0x4a ||
2176 CPUID_TO_MODEL(cpu_id) == 0x4c ||
2177 CPUID_TO_MODEL(cpu_id) == 0x4d ||
2178 CPUID_TO_MODEL(cpu_id) == 0x5a ||
2179 CPUID_TO_MODEL(cpu_id) == 0x5c ||
2180 CPUID_TO_MODEL(cpu_id) == 0x5d ||
2181 CPUID_TO_MODEL(cpu_id) == 0x5f ||
2182 CPUID_TO_MODEL(cpu_id) == 0x6e ||
2183 CPUID_TO_MODEL(cpu_id) == 0x7a ||
2184 CPUID_TO_MODEL(cpu_id) == 0x57 || /* Knights */
2185 CPUID_TO_MODEL(cpu_id) == 0x85))))
2186 pmap_allow_2m_x_ept = 1;
2187 TUNABLE_INT_FETCH("hw.allow_2m_x_ept", &pmap_allow_2m_x_ept);
2191 pmap_allow_2m_x_page(pmap_t pmap, bool executable)
2194 return (pmap->pm_type != PT_EPT || !executable ||
2195 !pmap_allow_2m_x_ept);
2200 pmap_init_pv_table(void)
2202 struct pmap_large_md_page *pvd;
2204 long start, end, highest, pv_npg;
2205 int domain, i, j, pages;
2208 * We strongly depend on the size being a power of two, so the assert
2209 * is overzealous. However, should the struct be resized to a
2210 * different power of two, the code below needs to be revisited.
2212 CTASSERT((sizeof(*pvd) == 64));
2215 * Calculate the size of the array.
2217 pmap_last_pa = vm_phys_segs[vm_phys_nsegs - 1].end;
2218 pv_npg = howmany(pmap_last_pa, NBPDR);
2219 s = (vm_size_t)pv_npg * sizeof(struct pmap_large_md_page);
2221 pv_table = (struct pmap_large_md_page *)kva_alloc(s);
2222 if (pv_table == NULL)
2223 panic("%s: kva_alloc failed\n", __func__);
2226 * Iterate physical segments to allocate space for respective pages.
2230 for (i = 0; i < vm_phys_nsegs; i++) {
2231 end = vm_phys_segs[i].end / NBPDR;
2232 domain = vm_phys_segs[i].domain;
2237 start = highest + 1;
2238 pvd = &pv_table[start];
2240 pages = end - start + 1;
2241 s = round_page(pages * sizeof(*pvd));
2242 highest = start + (s / sizeof(*pvd)) - 1;
2244 for (j = 0; j < s; j += PAGE_SIZE) {
2245 vm_page_t m = vm_page_alloc_domain(NULL, 0,
2246 domain, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
2248 panic("vm_page_alloc_domain failed for %lx\n", (vm_offset_t)pvd + j);
2249 pmap_qenter((vm_offset_t)pvd + j, &m, 1);
2252 for (j = 0; j < s / sizeof(*pvd); j++) {
2253 rw_init_flags(&pvd->pv_lock, "pmap pv list", RW_NEW);
2254 TAILQ_INIT(&pvd->pv_page.pv_list);
2255 pvd->pv_page.pv_gen = 0;
2256 pvd->pv_page.pat_mode = 0;
2257 pvd->pv_invl_gen = 0;
2261 pvd = &pv_dummy_large;
2262 rw_init_flags(&pvd->pv_lock, "pmap pv list dummy", RW_NEW);
2263 TAILQ_INIT(&pvd->pv_page.pv_list);
2264 pvd->pv_page.pv_gen = 0;
2265 pvd->pv_page.pat_mode = 0;
2266 pvd->pv_invl_gen = 0;
2270 pmap_init_pv_table(void)
2276 * Initialize the pool of pv list locks.
2278 for (i = 0; i < NPV_LIST_LOCKS; i++)
2279 rw_init(&pv_list_locks[i], "pmap pv list");
2282 * Calculate the size of the pv head table for superpages.
2284 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR);
2287 * Allocate memory for the pv head table for superpages.
2289 s = (vm_size_t)pv_npg * sizeof(struct md_page);
2291 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
2292 for (i = 0; i < pv_npg; i++)
2293 TAILQ_INIT(&pv_table[i].pv_list);
2294 TAILQ_INIT(&pv_dummy.pv_list);
2299 * Initialize the pmap module.
2300 * Called by vm_init, to initialize any structures that the pmap
2301 * system needs to map virtual memory.
2306 struct pmap_preinit_mapping *ppim;
2308 int error, i, ret, skz63;
2310 /* L1TF, reserve page @0 unconditionally */
2311 vm_page_blacklist_add(0, bootverbose);
2313 /* Detect bare-metal Skylake Server and Skylake-X. */
2314 if (vm_guest == VM_GUEST_NO && cpu_vendor_id == CPU_VENDOR_INTEL &&
2315 CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) == 0x55) {
2317 * Skylake-X errata SKZ63. Processor May Hang When
2318 * Executing Code In an HLE Transaction Region between
2319 * 40000000H and 403FFFFFH.
2321 * Mark the pages in the range as preallocated. It
2322 * seems to be impossible to distinguish between
2323 * Skylake Server and Skylake X.
2326 TUNABLE_INT_FETCH("hw.skz63_enable", &skz63);
2329 printf("SKZ63: skipping 4M RAM starting "
2330 "at physical 1G\n");
2331 for (i = 0; i < atop(0x400000); i++) {
2332 ret = vm_page_blacklist_add(0x40000000 +
2334 if (!ret && bootverbose)
2335 printf("page at %#lx already used\n",
2336 0x40000000 + ptoa(i));
2342 pmap_allow_2m_x_ept_recalculate();
2345 * Initialize the vm page array entries for the kernel pmap's
2348 PMAP_LOCK(kernel_pmap);
2349 for (i = 0; i < nkpt; i++) {
2350 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
2351 KASSERT(mpte >= vm_page_array &&
2352 mpte < &vm_page_array[vm_page_array_size],
2353 ("pmap_init: page table page is out of range"));
2354 mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
2355 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
2356 mpte->ref_count = 1;
2359 * Collect the page table pages that were replaced by a 2MB
2360 * page in create_pagetables(). They are zero filled.
2362 if ((vm_paddr_t)i << PDRSHIFT < KERNend &&
2363 pmap_insert_pt_page(kernel_pmap, mpte, false))
2364 panic("pmap_init: pmap_insert_pt_page failed");
2366 PMAP_UNLOCK(kernel_pmap);
2370 * If the kernel is running on a virtual machine, then it must assume
2371 * that MCA is enabled by the hypervisor. Moreover, the kernel must
2372 * be prepared for the hypervisor changing the vendor and family that
2373 * are reported by CPUID. Consequently, the workaround for AMD Family
2374 * 10h Erratum 383 is enabled if the processor's feature set does not
2375 * include at least one feature that is only supported by older Intel
2376 * or newer AMD processors.
2378 if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 &&
2379 (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
2380 CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
2382 workaround_erratum383 = 1;
2385 * Are large page mappings enabled?
2387 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
2388 if (pg_ps_enabled) {
2389 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
2390 ("pmap_init: can't assign to pagesizes[1]"));
2391 pagesizes[1] = NBPDR;
2392 if ((amd_feature & AMDID_PAGE1GB) != 0) {
2393 KASSERT(MAXPAGESIZES > 2 && pagesizes[2] == 0,
2394 ("pmap_init: can't assign to pagesizes[2]"));
2395 pagesizes[2] = NBPDP;
2400 * Initialize pv chunk lists.
2402 for (i = 0; i < PMAP_MEMDOM; i++) {
2403 mtx_init(&pv_chunks[i].pvc_lock, "pmap pv chunk list", NULL, MTX_DEF);
2404 TAILQ_INIT(&pv_chunks[i].pvc_list);
2406 pmap_init_pv_table();
2408 pmap_initialized = 1;
2409 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
2410 ppim = pmap_preinit_mapping + i;
2413 /* Make the direct map consistent */
2414 if (ppim->pa < dmaplimit && ppim->pa + ppim->sz <= dmaplimit) {
2415 (void)pmap_change_attr(PHYS_TO_DMAP(ppim->pa),
2416 ppim->sz, ppim->mode);
2420 printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i,
2421 ppim->pa, ppim->va, ppim->sz, ppim->mode);
2424 mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
2425 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
2426 (vmem_addr_t *)&qframe);
2428 panic("qframe allocation failed");
2431 TUNABLE_INT_FETCH("vm.pmap.large_map_pml4_entries", &lm_ents);
2432 if (lm_ents > LMEPML4I - LMSPML4I + 1)
2433 lm_ents = LMEPML4I - LMSPML4I + 1;
2435 printf("pmap: large map %u PML4 slots (%lu GB)\n",
2436 lm_ents, (u_long)lm_ents * (NBPML4 / 1024 / 1024 / 1024));
2438 large_vmem = vmem_create("large", LARGEMAP_MIN_ADDRESS,
2439 (vmem_size_t)lm_ents * NBPML4, PAGE_SIZE, 0, M_WAITOK);
2440 if (large_vmem == NULL) {
2441 printf("pmap: cannot create large map\n");
2444 for (i = 0; i < lm_ents; i++) {
2445 m = pmap_large_map_getptp_unlocked();
2447 kernel_pml4[LMSPML4I + i] = X86_PG_V |
2448 X86_PG_RW | X86_PG_A | X86_PG_M | pg_nx |
2454 SYSCTL_UINT(_vm_pmap, OID_AUTO, large_map_pml4_entries,
2455 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &lm_ents, 0,
2456 "Maximum number of PML4 entries for use by large map (tunable). "
2457 "Each entry corresponds to 512GB of address space.");
2459 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2460 "2MB page mapping counters");
2462 static u_long pmap_pde_demotions;
2463 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
2464 &pmap_pde_demotions, 0, "2MB page demotions");
2466 static u_long pmap_pde_mappings;
2467 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
2468 &pmap_pde_mappings, 0, "2MB page mappings");
2470 static u_long pmap_pde_p_failures;
2471 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
2472 &pmap_pde_p_failures, 0, "2MB page promotion failures");
2474 static u_long pmap_pde_promotions;
2475 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
2476 &pmap_pde_promotions, 0, "2MB page promotions");
2478 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
2479 "1GB page mapping counters");
2481 static u_long pmap_pdpe_demotions;
2482 SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
2483 &pmap_pdpe_demotions, 0, "1GB page demotions");
2485 /***************************************************
2486 * Low level helper routines.....
2487 ***************************************************/
2490 pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
2492 int x86_pat_bits = X86_PG_PTE_PAT | X86_PG_PDE_PAT;
2494 switch (pmap->pm_type) {
2497 /* Verify that both PAT bits are not set at the same time */
2498 KASSERT((entry & x86_pat_bits) != x86_pat_bits,
2499 ("Invalid PAT bits in entry %#lx", entry));
2501 /* Swap the PAT bits if one of them is set */
2502 if ((entry & x86_pat_bits) != 0)
2503 entry ^= x86_pat_bits;
2507 * Nothing to do - the memory attributes are represented
2508 * the same way for regular pages and superpages.
2512 panic("pmap_switch_pat_bits: bad pm_type %d", pmap->pm_type);
2519 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
2522 return (mode >= 0 && mode < PAT_INDEX_SIZE &&
2523 pat_index[(int)mode] >= 0);
2527 * Determine the appropriate bits to set in a PTE or PDE for a specified
2531 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
2533 int cache_bits, pat_flag, pat_idx;
2535 if (!pmap_is_valid_memattr(pmap, mode))
2536 panic("Unknown caching mode %d\n", mode);
2538 switch (pmap->pm_type) {
2541 /* The PAT bit is different for PTE's and PDE's. */
2542 pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
2544 /* Map the caching mode to a PAT index. */
2545 pat_idx = pat_index[mode];
2547 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
2550 cache_bits |= pat_flag;
2552 cache_bits |= PG_NC_PCD;
2554 cache_bits |= PG_NC_PWT;
2558 cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode);
2562 panic("unsupported pmap type %d", pmap->pm_type);
2565 return (cache_bits);
2569 pmap_cache_mask(pmap_t pmap, boolean_t is_pde)
2573 switch (pmap->pm_type) {
2576 mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
2579 mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7);
2582 panic("pmap_cache_mask: invalid pm_type %d", pmap->pm_type);
2589 pmap_pat_index(pmap_t pmap, pt_entry_t pte, bool is_pde)
2591 int pat_flag, pat_idx;
2594 switch (pmap->pm_type) {
2597 /* The PAT bit is different for PTE's and PDE's. */
2598 pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
2600 if ((pte & pat_flag) != 0)
2602 if ((pte & PG_NC_PCD) != 0)
2604 if ((pte & PG_NC_PWT) != 0)
2608 if ((pte & EPT_PG_IGNORE_PAT) != 0)
2609 panic("EPT PTE %#lx has no PAT memory type", pte);
2610 pat_idx = (pte & EPT_PG_MEMORY_TYPE(0x7)) >> 3;
2614 /* See pmap_init_pat(). */
2624 pmap_ps_enabled(pmap_t pmap)
2627 return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
2631 pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde)
2634 switch (pmap->pm_type) {
2641 * This is a little bogus since the generation number is
2642 * supposed to be bumped up when a region of the address
2643 * space is invalidated in the page tables.
2645 * In this case the old PDE entry is valid but yet we want
2646 * to make sure that any mappings using the old entry are
2647 * invalidated in the TLB.
2649 * The reason this works as expected is because we rendezvous
2650 * "all" host cpus and force any vcpu context to exit as a
2653 atomic_add_long(&pmap->pm_eptgen, 1);
2656 panic("pmap_update_pde_store: bad pm_type %d", pmap->pm_type);
2658 pde_store(pde, newpde);
2662 * After changing the page size for the specified virtual address in the page
2663 * table, flush the corresponding entries from the processor's TLB. Only the
2664 * calling processor's TLB is affected.
2666 * The calling thread must be pinned to a processor.
2669 pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde)
2673 if (pmap_type_guest(pmap))
2676 KASSERT(pmap->pm_type == PT_X86,
2677 ("pmap_update_pde_invalidate: invalid type %d", pmap->pm_type));
2679 PG_G = pmap_global_bit(pmap);
2681 if ((newpde & PG_PS) == 0)
2682 /* Demotion: flush a specific 2MB page mapping. */
2684 else if ((newpde & PG_G) == 0)
2686 * Promotion: flush every 4KB page mapping from the TLB
2687 * because there are too many to flush individually.
2692 * Promotion: flush every 4KB page mapping from the TLB,
2693 * including any global (PG_G) mappings.
2700 * The amd64 pmap uses different approaches to TLB invalidation
2701 * depending on the kernel configuration, available hardware features,
2702 * and known hardware errata. The kernel configuration option that
2703 * has the greatest operational impact on TLB invalidation is PTI,
2704 * which is enabled automatically on affected Intel CPUs. The most
2705 * impactful hardware features are first PCID, and then INVPCID
2706 * instruction presence. PCID usage is quite different for PTI
2709 * * Kernel Page Table Isolation (PTI or KPTI) is used to mitigate
2710 * the Meltdown bug in some Intel CPUs. Under PTI, each user address
2711 * space is served by two page tables, user and kernel. The user
2712 * page table only maps user space and a kernel trampoline. The
2713 * kernel trampoline includes the entirety of the kernel text but
2714 * only the kernel data that is needed to switch from user to kernel
2715 * mode. The kernel page table maps the user and kernel address
2716 * spaces in their entirety. It is identical to the per-process
2717 * page table used in non-PTI mode.
2719 * User page tables are only used when the CPU is in user mode.
2720 * Consequently, some TLB invalidations can be postponed until the
2721 * switch from kernel to user mode. In contrast, the user
2722 * space part of the kernel page table is used for copyout(9), so
2723 * TLB invalidations on this page table cannot be similarly postponed.
2725 * The existence of a user mode page table for the given pmap is
2726 * indicated by a pm_ucr3 value that differs from PMAP_NO_CR3, in
2727 * which case pm_ucr3 contains the %cr3 register value for the user
2728 * mode page table's root.
2730 * * The pm_active bitmask indicates which CPUs currently have the
2731 * pmap active. A CPU's bit is set on context switch to the pmap, and
2732 * cleared on switching off this CPU. For the kernel page table,
2733 * the pm_active field is immutable and contains all CPUs. The
2734 * kernel page table is always logically active on every processor,
2735 * but not necessarily in use by the hardware, e.g., in PTI mode.
2737 * When requesting invalidation of virtual addresses with
2738 * pmap_invalidate_XXX() functions, the pmap sends shootdown IPIs to
2739 * all CPUs recorded as active in pm_active. Updates to and reads
2740 * from pm_active are not synchronized, and so they may race with
2741 * each other. Shootdown handlers are prepared to handle the race.
2743 * * PCID is an optional feature of the long mode x86 MMU where TLB
2744 * entries are tagged with the 'Process ID' of the address space
2745 * they belong to. This feature provides a limited namespace for
2746 * process identifiers, 12 bits, supporting 4095 simultaneous IDs
2749 * Allocation of a PCID to a pmap is done by an algorithm described
2750 * in section 15.12, "Other TLB Consistency Algorithms", of
2751 * Vahalia's book "Unix Internals". A PCID cannot be allocated for
2752 * the whole lifetime of a pmap in pmap_pinit() due to the limited
2753 * namespace. Instead, a per-CPU, per-pmap PCID is assigned when
2754 * the CPU is about to start caching TLB entries from a pmap,
2755 * i.e., on the context switch that activates the pmap on the CPU.
2757 * The PCID allocator maintains a per-CPU, per-pmap generation
2758 * count, pm_gen, which is incremented each time a new PCID is
2759 * allocated. On TLB invalidation, the generation counters for the
2760 * pmap are zeroed, which signals the context switch code that the
2761 * previously allocated PCID is no longer valid. Effectively,
2762 * zeroing any of these counters triggers a TLB shootdown for the
2763 * given CPU/address space, due to the allocation of a new PCID.
2765 * Zeroing can be performed remotely. Consequently, if a pmap is
2766 * inactive on a CPU, then a TLB shootdown for that pmap and CPU can
2767 * be initiated by an ordinary memory access to reset the target
2768 * CPU's generation count within the pmap. The CPU initiating the
2769 * TLB shootdown does not need to send an IPI to the target CPU.
2771 * * PTI + PCID. The available PCIDs are divided into two sets: PCIDs
2772 * for complete (kernel) page tables, and PCIDs for user mode page
2773 * tables. A user PCID value is obtained from the kernel PCID value
2774 * by setting the highest bit, 11, to 1 (0x800 == PMAP_PCID_USER_PT).
2776 * User space page tables are activated on return to user mode, by
2777 * loading pm_ucr3 into %cr3. If the PCPU(ucr3_load_mask) requests
2778 * clearing bit 63 of the loaded ucr3, this effectively causes
2779 * complete invalidation of the user mode TLB entries for the
2780 * current pmap. In which case, local invalidations of individual
2781 * pages in the user page table are skipped.
2783 * * Local invalidation, all modes. If the requested invalidation is
2784 * for a specific address or the total invalidation of a currently
2785 * active pmap, then the TLB is flushed using INVLPG for a kernel
2786 * page table, and INVPCID(INVPCID_CTXGLOB)/invltlb_glob() for a
2787 * user space page table(s).
2789 * If the INVPCID instruction is available, it is used to flush entries
2790 * from the kernel page table.
2792 * * mode: PTI disabled, PCID present. The kernel reserves PCID 0 for its
2793 * address space, all other 4095 PCIDs are used for user mode spaces
2794 * as described above. A context switch allocates a new PCID if
2795 * the recorded PCID is zero or the recorded generation does not match
2796 * the CPU's generation, effectively flushing the TLB for this address space.
2797 * Total remote invalidation is performed by zeroing pm_gen for all CPUs.
2798 * local user page: INVLPG
2799 * local kernel page: INVLPG
2800 * local user total: INVPCID(CTX)
2801 * local kernel total: INVPCID(CTXGLOB) or invltlb_glob()
2802 * remote user page, inactive pmap: zero pm_gen
2803 * remote user page, active pmap: zero pm_gen + IPI:INVLPG
2804 * (Both actions are required to handle the aforementioned pm_active races.)
2805 * remote kernel page: IPI:INVLPG
2806 * remote user total, inactive pmap: zero pm_gen
2807 * remote user total, active pmap: zero pm_gen + IPI:(INVPCID(CTX) or
2809 * (See note above about pm_active races.)
2810 * remote kernel total: IPI:(INVPCID(CTXGLOB) or invltlb_glob())
2812 * PTI enabled, PCID present.
2813 * local user page: INVLPG for kpt, INVPCID(ADDR) or (INVLPG for ucr3)
2815 * local kernel page: INVLPG
2816 * local user total: INVPCID(CTX) or reload %cr3 for kpt, clear PCID_SAVE
2817 * on loading UCR3 into %cr3 for upt
2818 * local kernel total: INVPCID(CTXGLOB) or invltlb_glob()
2819 * remote user page, inactive pmap: zero pm_gen
2820 * remote user page, active pmap: zero pm_gen + IPI:(INVLPG for kpt,
2821 * INVPCID(ADDR) for upt)
2822 * remote kernel page: IPI:INVLPG
2823 * remote user total, inactive pmap: zero pm_gen
2824 * remote user total, active pmap: zero pm_gen + IPI:(INVPCID(CTX) for kpt,
2825 * clear PCID_SAVE on loading UCR3 into $cr3 for upt)
2826 * remote kernel total: IPI:(INVPCID(CTXGLOB) or invltlb_glob())
2829 * local user page: INVLPG
2830 * local kernel page: INVLPG
2831 * local user total: reload %cr3
2832 * local kernel total: invltlb_glob()
2833 * remote user page, inactive pmap: -
2834 * remote user page, active pmap: IPI:INVLPG
2835 * remote kernel page: IPI:INVLPG
2836 * remote user total, inactive pmap: -
2837 * remote user total, active pmap: IPI:(reload %cr3)
2838 * remote kernel total: IPI:invltlb_glob()
2839 * Since on return to user mode, the reload of %cr3 with ucr3 causes
2840 * TLB invalidation, no specific action is required for user page table.
2842 * EPT. EPT pmaps do not map KVA, all mappings are userspace.
2848 * Interrupt the cpus that are executing in the guest context.
2849 * This will force the vcpu to exit and the cached EPT mappings
2850 * will be invalidated by the host before the next vmresume.
2852 static __inline void
2853 pmap_invalidate_ept(pmap_t pmap)
2859 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
2860 ("pmap_invalidate_ept: absurd pm_active"));
2863 * The TLB mappings associated with a vcpu context are not
2864 * flushed each time a different vcpu is chosen to execute.
2866 * This is in contrast with a process's vtop mappings that
2867 * are flushed from the TLB on each context switch.
2869 * Therefore we need to do more than just a TLB shootdown on
2870 * the active cpus in 'pmap->pm_active'. To do this we keep
2871 * track of the number of invalidations performed on this pmap.
2873 * Each vcpu keeps a cache of this counter and compares it
2874 * just before a vmresume. If the counter is out-of-date an
2875 * invept will be done to flush stale mappings from the TLB.
2877 * To ensure that all vCPU threads have observed the new counter
2878 * value before returning, we use SMR. Ordering is important here:
2879 * the VMM enters an SMR read section before loading the counter
2880 * and after updating the pm_active bit set. Thus, pm_active is
2881 * a superset of active readers, and any reader that has observed
2882 * the goal has observed the new counter value.
2884 atomic_add_long(&pmap->pm_eptgen, 1);
2886 goal = smr_advance(pmap->pm_eptsmr);
2889 * Force the vcpu to exit and trap back into the hypervisor.
2891 ipinum = pmap->pm_flags & PMAP_NESTED_IPIMASK;
2892 ipi_selected(pmap->pm_active, ipinum);
2896 * Ensure that all active vCPUs will observe the new generation counter
2897 * value before executing any more guest instructions.
2899 smr_wait(pmap->pm_eptsmr, goal);
2903 pmap_invalidate_cpu_mask(pmap_t pmap)
2905 return (pmap == kernel_pmap ? all_cpus : pmap->pm_active);
2909 pmap_invalidate_preipi_pcid(pmap_t pmap)
2915 cpuid = PCPU_GET(cpuid);
2916 if (pmap != PCPU_GET(curpmap))
2917 cpuid = 0xffffffff; /* An impossible value */
2921 pmap->pm_pcids[i].pm_gen = 0;
2925 * The fence is between stores to pm_gen and the read of the
2926 * pm_active mask. We need to ensure that it is impossible
2927 * for us to miss the bit update in pm_active and
2928 * simultaneously observe a non-zero pm_gen in
2929 * pmap_activate_sw(), otherwise TLB update is missed.
2930 * Without the fence, IA32 allows such an outcome. Note that
2931 * pm_active is updated by a locked operation, which provides
2932 * the reciprocal fence.
2934 atomic_thread_fence_seq_cst();
2938 pmap_invalidate_preipi_nopcid(pmap_t pmap __unused)
2943 DEFINE_IFUNC(static, void, pmap_invalidate_preipi, (pmap_t))
2945 return (pmap_pcid_enabled ? pmap_invalidate_preipi_pcid :
2946 pmap_invalidate_preipi_nopcid);
2950 pmap_invalidate_page_pcid_cb(pmap_t pmap, vm_offset_t va,
2951 const bool invpcid_works1)
2953 struct invpcid_descr d;
2954 uint64_t kcr3, ucr3;
2959 * Because pm_pcid is recalculated on a context switch, we
2960 * must ensure there is no preemption, not just pinning.
2961 * Otherwise, we might use a stale value below.
2963 CRITICAL_ASSERT(curthread);
2966 * No need to do anything with user page tables invalidation
2967 * if there is no user page table, or invalidation is deferred
2968 * until the return to userspace. ucr3_load_mask is stable
2969 * because we have preemption disabled.
2971 if (pmap->pm_ucr3 == PMAP_NO_CR3 ||
2972 PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK)
2975 cpuid = PCPU_GET(cpuid);
2977 pcid = pmap->pm_pcids[cpuid].pm_pcid;
2978 if (invpcid_works1) {
2979 d.pcid = pcid | PMAP_PCID_USER_PT;
2982 invpcid(&d, INVPCID_ADDR);
2984 kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
2985 ucr3 = pmap->pm_ucr3 | pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
2986 pmap_pti_pcid_invlpg(ucr3, kcr3, va);
2991 pmap_invalidate_page_pcid_invpcid_cb(pmap_t pmap, vm_offset_t va)
2993 pmap_invalidate_page_pcid_cb(pmap, va, true);
2997 pmap_invalidate_page_pcid_noinvpcid_cb(pmap_t pmap, vm_offset_t va)
2999 pmap_invalidate_page_pcid_cb(pmap, va, false);
3003 pmap_invalidate_page_nopcid_cb(pmap_t pmap __unused, vm_offset_t va __unused)
3007 DEFINE_IFUNC(static, void, pmap_invalidate_page_cb, (pmap_t, vm_offset_t))
3009 if (pmap_pcid_enabled)
3010 return (invpcid_works ? pmap_invalidate_page_pcid_invpcid_cb :
3011 pmap_invalidate_page_pcid_noinvpcid_cb);
3012 return (pmap_invalidate_page_nopcid_cb);
3016 pmap_invalidate_page_curcpu_cb(pmap_t pmap, vm_offset_t va,
3017 vm_offset_t addr2 __unused)
3019 if (pmap == kernel_pmap) {
3021 } else if (pmap == PCPU_GET(curpmap)) {
3023 pmap_invalidate_page_cb(pmap, va);
3028 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
3030 if (pmap_type_guest(pmap)) {
3031 pmap_invalidate_ept(pmap);
3035 KASSERT(pmap->pm_type == PT_X86,
3036 ("pmap_invalidate_page: invalid type %d", pmap->pm_type));
3038 pmap_invalidate_preipi(pmap);
3039 smp_masked_invlpg(pmap_invalidate_cpu_mask(pmap), va, pmap,
3040 pmap_invalidate_page_curcpu_cb);
3043 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
3044 #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE)
3047 pmap_invalidate_range_pcid_cb(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
3048 const bool invpcid_works1)
3050 struct invpcid_descr d;
3051 uint64_t kcr3, ucr3;
3055 CRITICAL_ASSERT(curthread);
3057 if (pmap != PCPU_GET(curpmap) ||
3058 pmap->pm_ucr3 == PMAP_NO_CR3 ||
3059 PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK)
3062 cpuid = PCPU_GET(cpuid);
3064 pcid = pmap->pm_pcids[cpuid].pm_pcid;
3065 if (invpcid_works1) {
3066 d.pcid = pcid | PMAP_PCID_USER_PT;
3068 for (d.addr = sva; d.addr < eva; d.addr += PAGE_SIZE)
3069 invpcid(&d, INVPCID_ADDR);
3071 kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3072 ucr3 = pmap->pm_ucr3 | pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3073 pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
3078 pmap_invalidate_range_pcid_invpcid_cb(pmap_t pmap, vm_offset_t sva,
3081 pmap_invalidate_range_pcid_cb(pmap, sva, eva, true);
3085 pmap_invalidate_range_pcid_noinvpcid_cb(pmap_t pmap, vm_offset_t sva,
3088 pmap_invalidate_range_pcid_cb(pmap, sva, eva, false);
3092 pmap_invalidate_range_nopcid_cb(pmap_t pmap __unused, vm_offset_t sva __unused,
3093 vm_offset_t eva __unused)
3097 DEFINE_IFUNC(static, void, pmap_invalidate_range_cb, (pmap_t, vm_offset_t,
3100 if (pmap_pcid_enabled)
3101 return (invpcid_works ? pmap_invalidate_range_pcid_invpcid_cb :
3102 pmap_invalidate_range_pcid_noinvpcid_cb);
3103 return (pmap_invalidate_range_nopcid_cb);
3107 pmap_invalidate_range_curcpu_cb(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3111 if (pmap == kernel_pmap) {
3112 for (addr = sva; addr < eva; addr += PAGE_SIZE)
3114 } else if (pmap == PCPU_GET(curpmap)) {
3115 for (addr = sva; addr < eva; addr += PAGE_SIZE)
3117 pmap_invalidate_range_cb(pmap, sva, eva);
3122 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3124 if (eva - sva >= PMAP_INVLPG_THRESHOLD) {
3125 pmap_invalidate_all(pmap);
3129 if (pmap_type_guest(pmap)) {
3130 pmap_invalidate_ept(pmap);
3134 KASSERT(pmap->pm_type == PT_X86,
3135 ("pmap_invalidate_range: invalid type %d", pmap->pm_type));
3137 pmap_invalidate_preipi(pmap);
3138 smp_masked_invlpg_range(pmap_invalidate_cpu_mask(pmap), sva, eva, pmap,
3139 pmap_invalidate_range_curcpu_cb);
3143 pmap_invalidate_all_pcid_cb(pmap_t pmap, bool invpcid_works1)
3145 struct invpcid_descr d;
3150 if (pmap == kernel_pmap) {
3151 if (invpcid_works1) {
3152 bzero(&d, sizeof(d));
3153 invpcid(&d, INVPCID_CTXGLOB);
3157 } else if (pmap == PCPU_GET(curpmap)) {
3158 CRITICAL_ASSERT(curthread);
3159 cpuid = PCPU_GET(cpuid);
3161 pcid = pmap->pm_pcids[cpuid].pm_pcid;
3162 if (invpcid_works1) {
3166 invpcid(&d, INVPCID_CTX);
3168 kcr3 = pmap->pm_cr3 | pcid;
3171 if (pmap->pm_ucr3 != PMAP_NO_CR3)
3172 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
3177 pmap_invalidate_all_pcid_invpcid_cb(pmap_t pmap)
3179 pmap_invalidate_all_pcid_cb(pmap, true);
3183 pmap_invalidate_all_pcid_noinvpcid_cb(pmap_t pmap)
3185 pmap_invalidate_all_pcid_cb(pmap, false);
3189 pmap_invalidate_all_nopcid_cb(pmap_t pmap)
3191 if (pmap == kernel_pmap)
3193 else if (pmap == PCPU_GET(curpmap))
3197 DEFINE_IFUNC(static, void, pmap_invalidate_all_cb, (pmap_t))
3199 if (pmap_pcid_enabled)
3200 return (invpcid_works ? pmap_invalidate_all_pcid_invpcid_cb :
3201 pmap_invalidate_all_pcid_noinvpcid_cb);
3202 return (pmap_invalidate_all_nopcid_cb);
3206 pmap_invalidate_all_curcpu_cb(pmap_t pmap, vm_offset_t addr1 __unused,
3207 vm_offset_t addr2 __unused)
3209 pmap_invalidate_all_cb(pmap);
3213 pmap_invalidate_all(pmap_t pmap)
3215 if (pmap_type_guest(pmap)) {
3216 pmap_invalidate_ept(pmap);
3220 KASSERT(pmap->pm_type == PT_X86,
3221 ("pmap_invalidate_all: invalid type %d", pmap->pm_type));
3223 pmap_invalidate_preipi(pmap);
3224 smp_masked_invltlb(pmap_invalidate_cpu_mask(pmap), pmap,
3225 pmap_invalidate_all_curcpu_cb);
3229 pmap_invalidate_cache_curcpu_cb(pmap_t pmap __unused, vm_offset_t va __unused,
3230 vm_offset_t addr2 __unused)
3236 pmap_invalidate_cache(void)
3239 smp_cache_flush(pmap_invalidate_cache_curcpu_cb);
3243 cpuset_t invalidate; /* processors that invalidate their TLB */
3248 u_int store; /* processor that updates the PDE */
3252 pmap_update_pde_action(void *arg)
3254 struct pde_action *act = arg;
3256 if (act->store == PCPU_GET(cpuid))
3257 pmap_update_pde_store(act->pmap, act->pde, act->newpde);
3261 pmap_update_pde_teardown(void *arg)
3263 struct pde_action *act = arg;
3265 if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
3266 pmap_update_pde_invalidate(act->pmap, act->va, act->newpde);
3270 * Change the page size for the specified virtual address in a way that
3271 * prevents any possibility of the TLB ever having two entries that map the
3272 * same virtual address using different page sizes. This is the recommended
3273 * workaround for Erratum 383 on AMD Family 10h processors. It prevents a
3274 * machine check exception for a TLB state that is improperly diagnosed as a
3278 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
3280 struct pde_action act;
3281 cpuset_t active, other_cpus;
3285 cpuid = PCPU_GET(cpuid);
3286 other_cpus = all_cpus;
3287 CPU_CLR(cpuid, &other_cpus);
3288 if (pmap == kernel_pmap || pmap_type_guest(pmap))
3291 active = pmap->pm_active;
3293 if (CPU_OVERLAP(&active, &other_cpus)) {
3295 act.invalidate = active;
3299 act.newpde = newpde;
3300 CPU_SET(cpuid, &active);
3301 smp_rendezvous_cpus(active,
3302 smp_no_rendezvous_barrier, pmap_update_pde_action,
3303 pmap_update_pde_teardown, &act);
3305 pmap_update_pde_store(pmap, pde, newpde);
3306 if (CPU_ISSET(cpuid, &active))
3307 pmap_update_pde_invalidate(pmap, va, newpde);
3313 * Normal, non-SMP, invalidation functions.
3316 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
3318 struct invpcid_descr d;
3319 uint64_t kcr3, ucr3;
3322 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3326 KASSERT(pmap->pm_type == PT_X86,
3327 ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
3329 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
3331 if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
3332 pmap->pm_ucr3 != PMAP_NO_CR3) {
3334 pcid = pmap->pm_pcids[0].pm_pcid;
3335 if (invpcid_works) {
3336 d.pcid = pcid | PMAP_PCID_USER_PT;
3339 invpcid(&d, INVPCID_ADDR);
3341 kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
3342 ucr3 = pmap->pm_ucr3 | pcid |
3343 PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3344 pmap_pti_pcid_invlpg(ucr3, kcr3, va);
3348 } else if (pmap_pcid_enabled)
3349 pmap->pm_pcids[0].pm_gen = 0;
3353 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3355 struct invpcid_descr d;
3357 uint64_t kcr3, ucr3;
3359 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3363 KASSERT(pmap->pm_type == PT_X86,
3364 ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
3366 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
3367 for (addr = sva; addr < eva; addr += PAGE_SIZE)
3369 if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
3370 pmap->pm_ucr3 != PMAP_NO_CR3) {
3372 if (invpcid_works) {
3373 d.pcid = pmap->pm_pcids[0].pm_pcid |
3377 for (; d.addr < eva; d.addr += PAGE_SIZE)
3378 invpcid(&d, INVPCID_ADDR);
3380 kcr3 = pmap->pm_cr3 | pmap->pm_pcids[0].
3381 pm_pcid | CR3_PCID_SAVE;
3382 ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[0].
3383 pm_pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
3384 pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
3388 } else if (pmap_pcid_enabled) {
3389 pmap->pm_pcids[0].pm_gen = 0;
3394 pmap_invalidate_all(pmap_t pmap)
3396 struct invpcid_descr d;
3397 uint64_t kcr3, ucr3;
3399 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
3403 KASSERT(pmap->pm_type == PT_X86,
3404 ("pmap_invalidate_all: unknown type %d", pmap->pm_type));
3406 if (pmap == kernel_pmap) {
3407 if (pmap_pcid_enabled && invpcid_works) {
3408 bzero(&d, sizeof(d));
3409 invpcid(&d, INVPCID_CTXGLOB);
3413 } else if (pmap == PCPU_GET(curpmap)) {
3414 if (pmap_pcid_enabled) {
3416 if (invpcid_works) {
3417 d.pcid = pmap->pm_pcids[0].pm_pcid;
3420 invpcid(&d, INVPCID_CTX);
3421 if (pmap->pm_ucr3 != PMAP_NO_CR3) {
3422 d.pcid |= PMAP_PCID_USER_PT;
3423 invpcid(&d, INVPCID_CTX);
3426 kcr3 = pmap->pm_cr3 | pmap->pm_pcids[0].pm_pcid;
3427 if (pmap->pm_ucr3 != PMAP_NO_CR3) {
3428 ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[
3429 0].pm_pcid | PMAP_PCID_USER_PT;
3430 pmap_pti_pcid_invalidate(ucr3, kcr3);
3438 } else if (pmap_pcid_enabled) {
3439 pmap->pm_pcids[0].pm_gen = 0;
3444 pmap_invalidate_cache(void)
3451 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
3454 pmap_update_pde_store(pmap, pde, newpde);
3455 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap))
3456 pmap_update_pde_invalidate(pmap, va, newpde);
3458 pmap->pm_pcids[0].pm_gen = 0;
3463 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
3467 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
3468 * by a promotion that did not invalidate the 512 4KB page mappings
3469 * that might exist in the TLB. Consequently, at this point, the TLB
3470 * may hold both 4KB and 2MB page mappings for the address range [va,
3471 * va + NBPDR). Therefore, the entire range must be invalidated here.
3472 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
3473 * 4KB page mappings for the address range [va, va + NBPDR), and so a
3474 * single INVLPG suffices to invalidate the 2MB page mapping from the
3477 if ((pde & PG_PROMOTED) != 0)
3478 pmap_invalidate_range(pmap, va, va + NBPDR - 1);
3480 pmap_invalidate_page(pmap, va);
3483 DEFINE_IFUNC(, void, pmap_invalidate_cache_range,
3484 (vm_offset_t sva, vm_offset_t eva))
3487 if ((cpu_feature & CPUID_SS) != 0)
3488 return (pmap_invalidate_cache_range_selfsnoop);
3489 if ((cpu_feature & CPUID_CLFSH) != 0)
3490 return (pmap_force_invalidate_cache_range);
3491 return (pmap_invalidate_cache_range_all);
3494 #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
3497 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
3500 KASSERT((sva & PAGE_MASK) == 0,
3501 ("pmap_invalidate_cache_range: sva not page-aligned"));
3502 KASSERT((eva & PAGE_MASK) == 0,
3503 ("pmap_invalidate_cache_range: eva not page-aligned"));
3507 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
3510 pmap_invalidate_cache_range_check_align(sva, eva);
3514 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
3517 sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
3520 * XXX: Some CPUs fault, hang, or trash the local APIC
3521 * registers if we use CLFLUSH on the local APIC range. The
3522 * local APIC is always uncached, so we don't need to flush
3523 * for that range anyway.
3525 if (pmap_kextract(sva) == lapic_paddr)
3528 if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
3530 * Do per-cache line flush. Use a locked
3531 * instruction to insure that previous stores are
3532 * included in the write-back. The processor
3533 * propagates flush to other processors in the cache
3536 atomic_thread_fence_seq_cst();
3537 for (; sva < eva; sva += cpu_clflush_line_size)
3539 atomic_thread_fence_seq_cst();
3542 * Writes are ordered by CLFLUSH on Intel CPUs.
3544 if (cpu_vendor_id != CPU_VENDOR_INTEL)
3546 for (; sva < eva; sva += cpu_clflush_line_size)
3548 if (cpu_vendor_id != CPU_VENDOR_INTEL)
3554 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
3557 pmap_invalidate_cache_range_check_align(sva, eva);
3558 pmap_invalidate_cache();
3562 * Remove the specified set of pages from the data and instruction caches.
3564 * In contrast to pmap_invalidate_cache_range(), this function does not
3565 * rely on the CPU's self-snoop feature, because it is intended for use
3566 * when moving pages into a different cache domain.
3569 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
3571 vm_offset_t daddr, eva;
3575 useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
3576 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
3577 ((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt))
3578 pmap_invalidate_cache();
3581 atomic_thread_fence_seq_cst();
3582 else if (cpu_vendor_id != CPU_VENDOR_INTEL)
3584 for (i = 0; i < count; i++) {
3585 daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
3586 eva = daddr + PAGE_SIZE;
3587 for (; daddr < eva; daddr += cpu_clflush_line_size) {
3595 atomic_thread_fence_seq_cst();
3596 else if (cpu_vendor_id != CPU_VENDOR_INTEL)
3602 pmap_flush_cache_range(vm_offset_t sva, vm_offset_t eva)
3605 pmap_invalidate_cache_range_check_align(sva, eva);
3607 if ((cpu_stdext_feature & CPUID_STDEXT_CLWB) == 0) {
3608 pmap_force_invalidate_cache_range(sva, eva);
3612 /* See comment in pmap_force_invalidate_cache_range(). */
3613 if (pmap_kextract(sva) == lapic_paddr)
3616 atomic_thread_fence_seq_cst();
3617 for (; sva < eva; sva += cpu_clflush_line_size)
3619 atomic_thread_fence_seq_cst();
3623 pmap_flush_cache_phys_range(vm_paddr_t spa, vm_paddr_t epa, vm_memattr_t mattr)
3627 int error, pte_bits;
3629 KASSERT((spa & PAGE_MASK) == 0,
3630 ("pmap_flush_cache_phys_range: spa not page-aligned"));
3631 KASSERT((epa & PAGE_MASK) == 0,
3632 ("pmap_flush_cache_phys_range: epa not page-aligned"));
3634 if (spa < dmaplimit) {
3635 pmap_flush_cache_range(PHYS_TO_DMAP(spa), PHYS_TO_DMAP(MIN(
3637 if (dmaplimit >= epa)
3642 pte_bits = pmap_cache_bits(kernel_pmap, mattr, 0) | X86_PG_RW |
3644 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
3646 KASSERT(error == 0, ("vmem_alloc failed: %d", error));
3647 pte = vtopte(vaddr);
3648 for (; spa < epa; spa += PAGE_SIZE) {
3650 pte_store(pte, spa | pte_bits);
3652 /* XXXKIB atomic inside flush_cache_range are excessive */
3653 pmap_flush_cache_range(vaddr, vaddr + PAGE_SIZE);
3656 vmem_free(kernel_arena, vaddr, PAGE_SIZE);
3660 * Routine: pmap_extract
3662 * Extract the physical page address associated
3663 * with the given map/virtual_address pair.
3666 pmap_extract(pmap_t pmap, vm_offset_t va)
3670 pt_entry_t *pte, PG_V;
3674 PG_V = pmap_valid_bit(pmap);
3676 pdpe = pmap_pdpe(pmap, va);
3677 if (pdpe != NULL && (*pdpe & PG_V) != 0) {
3678 if ((*pdpe & PG_PS) != 0)
3679 pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK);
3681 pde = pmap_pdpe_to_pde(pdpe, va);
3682 if ((*pde & PG_V) != 0) {
3683 if ((*pde & PG_PS) != 0) {
3684 pa = (*pde & PG_PS_FRAME) |
3687 pte = pmap_pde_to_pte(pde, va);
3688 pa = (*pte & PG_FRAME) |
3699 * Routine: pmap_extract_and_hold
3701 * Atomically extract and hold the physical page
3702 * with the given pmap and virtual address pair
3703 * if that mapping permits the given protection.
3706 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3708 pdp_entry_t pdpe, *pdpep;
3709 pd_entry_t pde, *pdep;
3710 pt_entry_t pte, PG_RW, PG_V;
3714 PG_RW = pmap_rw_bit(pmap);
3715 PG_V = pmap_valid_bit(pmap);
3718 pdpep = pmap_pdpe(pmap, va);
3719 if (pdpep == NULL || ((pdpe = *pdpep) & PG_V) == 0)
3721 if ((pdpe & PG_PS) != 0) {
3722 if ((pdpe & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0)
3724 m = PHYS_TO_VM_PAGE((pdpe & PG_PS_FRAME) | (va & PDPMASK));
3728 pdep = pmap_pdpe_to_pde(pdpep, va);
3729 if (pdep == NULL || ((pde = *pdep) & PG_V) == 0)
3731 if ((pde & PG_PS) != 0) {
3732 if ((pde & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0)
3734 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | (va & PDRMASK));
3738 pte = *pmap_pde_to_pte(pdep, va);
3739 if ((pte & PG_V) == 0 ||
3740 ((pte & PG_RW) == 0 && (prot & VM_PROT_WRITE) != 0))
3742 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3745 if (m != NULL && !vm_page_wire_mapped(m))
3753 pmap_kextract(vm_offset_t va)
3758 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
3759 pa = DMAP_TO_PHYS(va);
3760 } else if (PMAP_ADDRESS_IN_LARGEMAP(va)) {
3761 pa = pmap_large_map_kextract(va);
3765 pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
3768 * Beware of a concurrent promotion that changes the
3769 * PDE at this point! For example, vtopte() must not
3770 * be used to access the PTE because it would use the
3771 * new PDE. It is, however, safe to use the old PDE
3772 * because the page table page is preserved by the
3775 pa = *pmap_pde_to_pte(&pde, va);
3776 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
3782 /***************************************************
3783 * Low level mapping routines.....
3784 ***************************************************/
3787 * Add a wired page to the kva.
3788 * Note: not SMP coherent.
3791 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
3796 pte_store(pte, pa | X86_PG_RW | X86_PG_V | pg_g | pg_nx);
3799 static __inline void
3800 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
3806 cache_bits = pmap_cache_bits(kernel_pmap, mode, 0);
3807 pte_store(pte, pa | X86_PG_RW | X86_PG_V | pg_g | pg_nx | cache_bits);
3811 * Remove a page from the kernel pagetables.
3812 * Note: not SMP coherent.
3815 pmap_kremove(vm_offset_t va)
3824 * Used to map a range of physical addresses into kernel
3825 * virtual address space.
3827 * The value passed in '*virt' is a suggested virtual address for
3828 * the mapping. Architectures which can support a direct-mapped
3829 * physical to virtual region can return the appropriate address
3830 * within that region, leaving '*virt' unchanged. Other
3831 * architectures should map the pages starting at '*virt' and
3832 * update '*virt' with the first usable address after the mapped
3836 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
3838 return PHYS_TO_DMAP(start);
3842 * Add a list of wired pages to the kva
3843 * this routine is only used for temporary
3844 * kernel mappings that do not need to have
3845 * page modification or references recorded.
3846 * Note that old mappings are simply written
3847 * over. The page *must* be wired.
3848 * Note: SMP coherent. Uses a ranged shootdown IPI.
3851 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
3853 pt_entry_t *endpte, oldpte, pa, *pte;
3859 endpte = pte + count;
3860 while (pte < endpte) {
3862 cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0);
3863 pa = VM_PAGE_TO_PHYS(m) | cache_bits;
3864 if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) {
3866 pte_store(pte, pa | pg_g | pg_nx | X86_PG_RW | X86_PG_V);
3870 if (__predict_false((oldpte & X86_PG_V) != 0))
3871 pmap_invalidate_range(kernel_pmap, sva, sva + count *
3876 * This routine tears out page mappings from the
3877 * kernel -- it is meant only for temporary mappings.
3878 * Note: SMP coherent. Uses a ranged shootdown IPI.
3881 pmap_qremove(vm_offset_t sva, int count)
3886 while (count-- > 0) {
3887 KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
3891 pmap_invalidate_range(kernel_pmap, sva, va);
3894 /***************************************************
3895 * Page table page management routines.....
3896 ***************************************************/
3898 * Schedule the specified unused page table page to be freed. Specifically,
3899 * add the page to the specified list of pages that will be released to the
3900 * physical memory manager after the TLB has been updated.
3902 static __inline void
3903 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
3904 boolean_t set_PG_ZERO)
3908 m->flags |= PG_ZERO;
3910 m->flags &= ~PG_ZERO;
3911 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
3915 * Inserts the specified page table page into the specified pmap's collection
3916 * of idle page table pages. Each of a pmap's page table pages is responsible
3917 * for mapping a distinct range of virtual addresses. The pmap's collection is
3918 * ordered by this virtual address range.
3920 * If "promoted" is false, then the page table page "mpte" must be zero filled.
3923 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted)
3926 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3927 mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0;
3928 return (vm_radix_insert(&pmap->pm_root, mpte));
3932 * Removes the page table page mapping the specified virtual address from the
3933 * specified pmap's collection of idle page table pages, and returns it.
3934 * Otherwise, returns NULL if there is no page table page corresponding to the
3935 * specified virtual address.
3937 static __inline vm_page_t
3938 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
3941 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3942 return (vm_radix_remove(&pmap->pm_root, pmap_pde_pindex(va)));
3946 * Decrements a page table page's reference count, which is used to record the
3947 * number of valid page table entries within the page. If the reference count
3948 * drops to zero, then the page table page is unmapped. Returns TRUE if the
3949 * page table page was unmapped and FALSE otherwise.
3951 static inline boolean_t
3952 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
3956 if (m->ref_count == 0) {
3957 _pmap_unwire_ptp(pmap, va, m, free);
3964 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
3970 vm_page_t pdpg, pdppg, pml4pg;
3972 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3975 * unmap the page table page
3977 if (m->pindex >= NUPDE + NUPDPE + NUPML4E) {
3979 MPASS(pmap_is_la57(pmap));
3980 pml5 = pmap_pml5e(pmap, va);
3982 if (pmap->pm_pmltopu != NULL && va <= VM_MAXUSER_ADDRESS) {
3983 pml5 = pmap_pml5e_u(pmap, va);
3986 } else if (m->pindex >= NUPDE + NUPDPE) {
3988 pml4 = pmap_pml4e(pmap, va);
3990 if (!pmap_is_la57(pmap) && pmap->pm_pmltopu != NULL &&
3991 va <= VM_MAXUSER_ADDRESS) {
3992 pml4 = pmap_pml4e_u(pmap, va);
3995 } else if (m->pindex >= NUPDE) {
3997 pdp = pmap_pdpe(pmap, va);
4001 pd = pmap_pde(pmap, va);
4004 pmap_resident_count_dec(pmap, 1);
4005 if (m->pindex < NUPDE) {
4006 /* We just released a PT, unhold the matching PD */
4007 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
4008 pmap_unwire_ptp(pmap, va, pdpg, free);
4009 } else if (m->pindex < NUPDE + NUPDPE) {
4010 /* We just released a PD, unhold the matching PDP */
4011 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
4012 pmap_unwire_ptp(pmap, va, pdppg, free);
4013 } else if (m->pindex < NUPDE + NUPDPE + NUPML4E && pmap_is_la57(pmap)) {
4014 /* We just released a PDP, unhold the matching PML4 */
4015 pml4pg = PHYS_TO_VM_PAGE(*pmap_pml5e(pmap, va) & PG_FRAME);
4016 pmap_unwire_ptp(pmap, va, pml4pg, free);
4020 * Put page on a list so that it is released after
4021 * *ALL* TLB shootdown is done
4023 pmap_add_delayed_free_list(m, free, TRUE);
4027 * After removing a page table entry, this routine is used to
4028 * conditionally free the page, and manage the reference count.
4031 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
4032 struct spglist *free)
4036 if (va >= VM_MAXUSER_ADDRESS)
4038 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
4039 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
4040 return (pmap_unwire_ptp(pmap, va, mpte, free));
4044 * Release a page table page reference after a failed attempt to create a
4048 pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
4050 struct spglist free;
4053 if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
4055 * Although "va" was never mapped, paging-structure caches
4056 * could nonetheless have entries that refer to the freed
4057 * page table pages. Invalidate those entries.
4059 pmap_invalidate_page(pmap, va);
4060 vm_page_free_pages_toq(&free, true);
4065 pmap_pinit0(pmap_t pmap)
4071 PMAP_LOCK_INIT(pmap);
4072 pmap->pm_pmltop = kernel_pmap->pm_pmltop;
4073 pmap->pm_pmltopu = NULL;
4074 pmap->pm_cr3 = kernel_pmap->pm_cr3;
4075 /* hack to keep pmap_pti_pcid_invalidate() alive */
4076 pmap->pm_ucr3 = PMAP_NO_CR3;
4077 pmap->pm_root.rt_root = 0;
4078 CPU_ZERO(&pmap->pm_active);
4079 TAILQ_INIT(&pmap->pm_pvchunk);
4080 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4081 pmap->pm_flags = pmap_flags;
4083 pmap->pm_pcids[i].pm_pcid = PMAP_PCID_KERN + 1;
4084 pmap->pm_pcids[i].pm_gen = 1;
4086 pmap_activate_boot(pmap);
4091 p->p_md.md_flags |= P_MD_KPTI;
4094 pmap_thread_init_invl_gen(td);
4096 if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
4097 pmap_pkru_ranges_zone = uma_zcreate("pkru ranges",
4098 sizeof(struct pmap_pkru_range), NULL, NULL, NULL, NULL,
4104 pmap_pinit_pml4(vm_page_t pml4pg)
4106 pml4_entry_t *pm_pml4;
4109 pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
4111 /* Wire in kernel global address entries. */
4112 for (i = 0; i < NKPML4E; i++) {
4113 pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW |
4116 for (i = 0; i < ndmpdpphys; i++) {
4117 pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW |
4121 /* install self-referential address mapping entry(s) */
4122 pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW |
4123 X86_PG_A | X86_PG_M;
4125 /* install large map entries if configured */
4126 for (i = 0; i < lm_ents; i++)
4127 pm_pml4[LMSPML4I + i] = kernel_pmap->pm_pmltop[LMSPML4I + i];
4131 pmap_pinit_pml5(vm_page_t pml5pg)
4133 pml5_entry_t *pm_pml5;
4135 pm_pml5 = (pml5_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml5pg));
4138 * Add pml5 entry at top of KVA pointing to existing pml4 table,
4139 * entering all existing kernel mappings into level 5 table.
4141 pm_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V |
4142 X86_PG_RW | X86_PG_A | X86_PG_M | pg_g |
4143 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE);
4146 * Install self-referential address mapping entry.
4148 pm_pml5[PML5PML5I] = VM_PAGE_TO_PHYS(pml5pg) |
4149 X86_PG_RW | X86_PG_V | X86_PG_M | X86_PG_A |
4150 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE);
4154 pmap_pinit_pml4_pti(vm_page_t pml4pgu)
4156 pml4_entry_t *pm_pml4u;
4159 pm_pml4u = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pgu));
4160 for (i = 0; i < NPML4EPG; i++)
4161 pm_pml4u[i] = pti_pml4[i];
4165 pmap_pinit_pml5_pti(vm_page_t pml5pgu)
4167 pml5_entry_t *pm_pml5u;
4169 pm_pml5u = (pml5_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml5pgu));
4172 * Add pml5 entry at top of KVA pointing to existing pml4 pti
4173 * table, entering all kernel mappings needed for usermode
4174 * into level 5 table.
4176 pm_pml5u[pmap_pml5e_index(UPT_MAX_ADDRESS)] =
4177 pmap_kextract((vm_offset_t)pti_pml4) |
4178 X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M | pg_g |
4179 pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE);
4183 * Initialize a preallocated and zeroed pmap structure,
4184 * such as one in a vmspace structure.
4187 pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
4189 vm_page_t pmltop_pg, pmltop_pgu;
4190 vm_paddr_t pmltop_phys;
4194 * allocate the page directory page
4196 pmltop_pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
4197 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK);
4199 pmltop_phys = VM_PAGE_TO_PHYS(pmltop_pg);
4200 pmap->pm_pmltop = (pml5_entry_t *)PHYS_TO_DMAP(pmltop_phys);
4203 pmap->pm_pcids[i].pm_pcid = PMAP_PCID_NONE;
4204 pmap->pm_pcids[i].pm_gen = 0;
4206 pmap->pm_cr3 = PMAP_NO_CR3; /* initialize to an invalid value */
4207 pmap->pm_ucr3 = PMAP_NO_CR3;
4208 pmap->pm_pmltopu = NULL;
4210 pmap->pm_type = pm_type;
4211 if ((pmltop_pg->flags & PG_ZERO) == 0)
4212 pagezero(pmap->pm_pmltop);
4215 * Do not install the host kernel mappings in the nested page
4216 * tables. These mappings are meaningless in the guest physical
4218 * Install minimal kernel mappings in PTI case.
4222 pmap->pm_cr3 = pmltop_phys;
4223 if (pmap_is_la57(pmap))
4224 pmap_pinit_pml5(pmltop_pg);
4226 pmap_pinit_pml4(pmltop_pg);
4227 if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) {
4228 pmltop_pgu = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
4229 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
4230 pmap->pm_pmltopu = (pml4_entry_t *)PHYS_TO_DMAP(
4231 VM_PAGE_TO_PHYS(pmltop_pgu));
4232 if (pmap_is_la57(pmap))
4233 pmap_pinit_pml5_pti(pmltop_pgu);
4235 pmap_pinit_pml4_pti(pmltop_pgu);
4236 pmap->pm_ucr3 = VM_PAGE_TO_PHYS(pmltop_pgu);
4238 if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
4239 rangeset_init(&pmap->pm_pkru, pkru_dup_range,
4240 pkru_free_range, pmap, M_NOWAIT);
4245 pmap->pm_eptsmr = smr_create("pmap", 0, 0);
4249 pmap->pm_root.rt_root = 0;
4250 CPU_ZERO(&pmap->pm_active);
4251 TAILQ_INIT(&pmap->pm_pvchunk);
4252 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4253 pmap->pm_flags = flags;
4254 pmap->pm_eptgen = 0;
4260 pmap_pinit(pmap_t pmap)
4263 return (pmap_pinit_type(pmap, PT_X86, pmap_flags));
4267 pmap_allocpte_free_unref(pmap_t pmap, vm_offset_t va, pt_entry_t *pte)
4270 struct spglist free;
4272 mpg = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
4273 if (mpg->ref_count != 0)
4276 _pmap_unwire_ptp(pmap, va, mpg, &free);
4277 pmap_invalidate_page(pmap, va);
4278 vm_page_free_pages_toq(&free, true);
4281 static pml4_entry_t *
4282 pmap_allocpte_getpml4(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
4285 vm_pindex_t pml5index;
4292 if (!pmap_is_la57(pmap))
4293 return (&pmap->pm_pmltop[pmap_pml4e_index(va)]);
4295 PG_V = pmap_valid_bit(pmap);
4296 pml5index = pmap_pml5e_index(va);
4297 pml5 = &pmap->pm_pmltop[pml5index];
4298 if ((*pml5 & PG_V) == 0) {
4299 if (pmap_allocpte_nosleep(pmap, pmap_pml5e_pindex(va), lockp,
4306 pml4 = (pml4_entry_t *)PHYS_TO_DMAP(*pml5 & PG_FRAME);
4307 pml4 = &pml4[pmap_pml4e_index(va)];
4308 if ((*pml4 & PG_V) == 0) {
4309 pml4pg = PHYS_TO_VM_PAGE(*pml5 & PG_FRAME);
4310 if (allocated && !addref)
4311 pml4pg->ref_count--;
4312 else if (!allocated && addref)
4313 pml4pg->ref_count++;
4318 static pdp_entry_t *
4319 pmap_allocpte_getpdp(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
4328 PG_V = pmap_valid_bit(pmap);
4330 pml4 = pmap_allocpte_getpml4(pmap, lockp, va, false);
4334 if ((*pml4 & PG_V) == 0) {
4335 /* Have to allocate a new pdp, recurse */
4336 if (pmap_allocpte_nosleep(pmap, pmap_pml4e_pindex(va), lockp,
4338 if (pmap_is_la57(pmap))
4339 pmap_allocpte_free_unref(pmap, va,
4340 pmap_pml5e(pmap, va));
4347 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
4348 pdp = &pdp[pmap_pdpe_index(va)];
4349 if ((*pdp & PG_V) == 0) {
4350 pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
4351 if (allocated && !addref)
4353 else if (!allocated && addref)
4360 * The ptepindexes, i.e. page indices, of the page table pages encountered
4361 * while translating virtual address va are defined as follows:
4362 * - for the page table page (last level),
4363 * ptepindex = pmap_pde_pindex(va) = va >> PDRSHIFT,
4364 * in other words, it is just the index of the PDE that maps the page
4366 * - for the page directory page,
4367 * ptepindex = NUPDE (number of userland PD entries) +
4368 * (pmap_pde_index(va) >> NPDEPGSHIFT)
4369 * i.e. index of PDPE is put after the last index of PDE,
4370 * - for the page directory pointer page,
4371 * ptepindex = NUPDE + NUPDPE + (pmap_pde_index(va) >> (NPDEPGSHIFT +
4373 * i.e. index of pml4e is put after the last index of PDPE,
4374 * - for the PML4 page (if LA57 mode is enabled),
4375 * ptepindex = NUPDE + NUPDPE + NUPML4E + (pmap_pde_index(va) >>
4376 * (NPDEPGSHIFT + NPML4EPGSHIFT + NPML5EPGSHIFT),
4377 * i.e. index of pml5e is put after the last index of PML4E.
4379 * Define an order on the paging entries, where all entries of the
4380 * same height are put together, then heights are put from deepest to
4381 * root. Then ptexpindex is the sequential number of the
4382 * corresponding paging entry in this order.
4384 * The values of NUPDE, NUPDPE, and NUPML4E are determined by the size of
4385 * LA57 paging structures even in LA48 paging mode. Moreover, the
4386 * ptepindexes are calculated as if the paging structures were 5-level
4387 * regardless of the actual mode of operation.
4389 * The root page at PML4/PML5 does not participate in this indexing scheme,
4390 * since it is statically allocated by pmap_pinit() and not by pmap_allocpte().
4393 pmap_allocpte_nosleep(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp,
4396 vm_pindex_t pml5index, pml4index;
4397 pml5_entry_t *pml5, *pml5u;
4398 pml4_entry_t *pml4, *pml4u;
4402 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
4404 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4406 PG_A = pmap_accessed_bit(pmap);
4407 PG_M = pmap_modified_bit(pmap);
4408 PG_V = pmap_valid_bit(pmap);
4409 PG_RW = pmap_rw_bit(pmap);
4412 * Allocate a page table page.
4414 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
4415 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
4417 if ((m->flags & PG_ZERO) == 0)
4421 * Map the pagetable page into the process address space, if
4422 * it isn't already there.
4424 if (ptepindex >= NUPDE + NUPDPE + NUPML4E) {
4425 MPASS(pmap_is_la57(pmap));
4427 pml5index = pmap_pml5e_index(va);
4428 pml5 = &pmap->pm_pmltop[pml5index];
4429 KASSERT((*pml5 & PG_V) == 0,
4430 ("pmap %p va %#lx pml5 %#lx", pmap, va, *pml5));
4431 *pml5 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4433 if (pmap->pm_pmltopu != NULL && pml5index < NUPML5E) {
4434 if (pmap->pm_ucr3 != PMAP_NO_CR3)
4437 pml5u = &pmap->pm_pmltopu[pml5index];
4438 *pml5u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V |
4441 } else if (ptepindex >= NUPDE + NUPDPE) {
4442 pml4index = pmap_pml4e_index(va);
4443 /* Wire up a new PDPE page */
4444 pml4 = pmap_allocpte_getpml4(pmap, lockp, va, true);
4446 vm_page_unwire_noq(m);
4447 vm_page_free_zero(m);
4450 KASSERT((*pml4 & PG_V) == 0,
4451 ("pmap %p va %#lx pml4 %#lx", pmap, va, *pml4));
4452 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4454 if (!pmap_is_la57(pmap) && pmap->pm_pmltopu != NULL &&
4455 pml4index < NUPML4E) {
4457 * PTI: Make all user-space mappings in the
4458 * kernel-mode page table no-execute so that
4459 * we detect any programming errors that leave
4460 * the kernel-mode page table active on return
4463 if (pmap->pm_ucr3 != PMAP_NO_CR3)
4466 pml4u = &pmap->pm_pmltopu[pml4index];
4467 *pml4u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V |
4470 } else if (ptepindex >= NUPDE) {
4471 /* Wire up a new PDE page */
4472 pdp = pmap_allocpte_getpdp(pmap, lockp, va, true);
4474 vm_page_unwire_noq(m);
4475 vm_page_free_zero(m);
4478 KASSERT((*pdp & PG_V) == 0,
4479 ("pmap %p va %#lx pdp %#lx", pmap, va, *pdp));
4480 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4482 /* Wire up a new PTE page */
4483 pdp = pmap_allocpte_getpdp(pmap, lockp, va, false);
4485 vm_page_unwire_noq(m);
4486 vm_page_free_zero(m);
4489 if ((*pdp & PG_V) == 0) {
4490 /* Have to allocate a new pd, recurse */
4491 if (pmap_allocpte_nosleep(pmap, pmap_pdpe_pindex(va),
4492 lockp, va) == NULL) {
4493 pmap_allocpte_free_unref(pmap, va,
4494 pmap_pml4e(pmap, va));
4495 vm_page_unwire_noq(m);
4496 vm_page_free_zero(m);
4500 /* Add reference to the pd page */
4501 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
4504 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
4506 /* Now we know where the page directory page is */
4507 pd = &pd[pmap_pde_index(va)];
4508 KASSERT((*pd & PG_V) == 0,
4509 ("pmap %p va %#lx pd %#lx", pmap, va, *pd));
4510 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
4513 pmap_resident_count_inc(pmap, 1);
4518 * This routine is called if the desired page table page does not exist.
4520 * If page table page allocation fails, this routine may sleep before
4521 * returning NULL. It sleeps only if a lock pointer was given. Sleep
4522 * occurs right before returning to the caller. This way, we never
4523 * drop pmap lock to sleep while a page table page has ref_count == 0,
4524 * which prevents the page from being freed under us.
4527 pmap_allocpte_alloc(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp,
4532 m = pmap_allocpte_nosleep(pmap, ptepindex, lockp, va);
4533 if (m == NULL && lockp != NULL) {
4534 RELEASE_PV_LIST_LOCK(lockp);
4536 PMAP_ASSERT_NOT_IN_DI();
4544 pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
4545 struct rwlock **lockp)
4547 pdp_entry_t *pdpe, PG_V;
4550 vm_pindex_t pdpindex;
4552 PG_V = pmap_valid_bit(pmap);
4555 pdpe = pmap_pdpe(pmap, va);
4556 if (pdpe != NULL && (*pdpe & PG_V) != 0) {
4557 pde = pmap_pdpe_to_pde(pdpe, va);
4558 if (va < VM_MAXUSER_ADDRESS) {
4559 /* Add a reference to the pd page. */
4560 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
4564 } else if (va < VM_MAXUSER_ADDRESS) {
4565 /* Allocate a pd page. */
4566 pdpindex = pmap_pde_pindex(va) >> NPDPEPGSHIFT;
4567 pdpg = pmap_allocpte_alloc(pmap, NUPDE + pdpindex, lockp, va);
4574 pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
4575 pde = &pde[pmap_pde_index(va)];
4577 panic("pmap_alloc_pde: missing page table page for va %#lx",
4584 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4586 vm_pindex_t ptepindex;
4587 pd_entry_t *pd, PG_V;
4590 PG_V = pmap_valid_bit(pmap);
4593 * Calculate pagetable page index
4595 ptepindex = pmap_pde_pindex(va);
4598 * Get the page directory entry
4600 pd = pmap_pde(pmap, va);
4603 * This supports switching from a 2MB page to a
4606 if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
4607 if (!pmap_demote_pde_locked(pmap, pd, va, lockp)) {
4609 * Invalidation of the 2MB page mapping may have caused
4610 * the deallocation of the underlying PD page.
4617 * If the page table page is mapped, we just increment the
4618 * hold count, and activate it.
4620 if (pd != NULL && (*pd & PG_V) != 0) {
4621 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
4625 * Here if the pte page isn't mapped, or if it has been
4628 m = pmap_allocpte_alloc(pmap, ptepindex, lockp, va);
4629 if (m == NULL && lockp != NULL)
4635 /***************************************************
4636 * Pmap allocation/deallocation routines.
4637 ***************************************************/
4640 * Release any resources held by the given physical map.
4641 * Called when a pmap initialized by pmap_pinit is being released.
4642 * Should only be called if the map contains no valid mappings.
4645 pmap_release(pmap_t pmap)
4650 KASSERT(pmap->pm_stats.resident_count == 0,
4651 ("pmap_release: pmap %p resident count %ld != 0",
4652 pmap, pmap->pm_stats.resident_count));
4653 KASSERT(vm_radix_is_empty(&pmap->pm_root),
4654 ("pmap_release: pmap %p has reserved page table page(s)",
4656 KASSERT(CPU_EMPTY(&pmap->pm_active),
4657 ("releasing active pmap %p", pmap));
4659 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pmltop));
4661 if (pmap_is_la57(pmap)) {
4662 pmap->pm_pmltop[pmap_pml5e_index(UPT_MAX_ADDRESS)] = 0;
4663 pmap->pm_pmltop[PML5PML5I] = 0;
4665 for (i = 0; i < NKPML4E; i++) /* KVA */
4666 pmap->pm_pmltop[KPML4BASE + i] = 0;
4667 for (i = 0; i < ndmpdpphys; i++)/* Direct Map */
4668 pmap->pm_pmltop[DMPML4I + i] = 0;
4669 pmap->pm_pmltop[PML4PML4I] = 0; /* Recursive Mapping */
4670 for (i = 0; i < lm_ents; i++) /* Large Map */
4671 pmap->pm_pmltop[LMSPML4I + i] = 0;
4674 vm_page_unwire_noq(m);
4675 vm_page_free_zero(m);
4677 if (pmap->pm_pmltopu != NULL) {
4678 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->
4680 vm_page_unwire_noq(m);
4683 if (pmap->pm_type == PT_X86 &&
4684 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0)
4685 rangeset_fini(&pmap->pm_pkru);
4689 kvm_size(SYSCTL_HANDLER_ARGS)
4691 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
4693 return sysctl_handle_long(oidp, &ksize, 0, req);
4695 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
4696 0, 0, kvm_size, "LU",
4700 kvm_free(SYSCTL_HANDLER_ARGS)
4702 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
4704 return sysctl_handle_long(oidp, &kfree, 0, req);
4706 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
4707 0, 0, kvm_free, "LU",
4708 "Amount of KVM free");
4711 * Allocate physical memory for the vm_page array and map it into KVA,
4712 * attempting to back the vm_pages with domain-local memory.
4715 pmap_page_array_startup(long pages)
4718 pd_entry_t *pde, newpdir;
4719 vm_offset_t va, start, end;
4724 vm_page_array_size = pages;
4726 start = VM_MIN_KERNEL_ADDRESS;
4727 end = start + pages * sizeof(struct vm_page);
4728 for (va = start; va < end; va += NBPDR) {
4729 pfn = first_page + (va - start) / sizeof(struct vm_page);
4730 domain = vm_phys_domain(ptoa(pfn));
4731 pdpe = pmap_pdpe(kernel_pmap, va);
4732 if ((*pdpe & X86_PG_V) == 0) {
4733 pa = vm_phys_early_alloc(domain, PAGE_SIZE);
4735 pagezero((void *)PHYS_TO_DMAP(pa));
4736 *pdpe = (pdp_entry_t)(pa | X86_PG_V | X86_PG_RW |
4737 X86_PG_A | X86_PG_M);
4739 pde = pmap_pdpe_to_pde(pdpe, va);
4740 if ((*pde & X86_PG_V) != 0)
4741 panic("Unexpected pde");
4742 pa = vm_phys_early_alloc(domain, NBPDR);
4743 for (i = 0; i < NPDEPG; i++)
4744 dump_add_page(pa + i * PAGE_SIZE);
4745 newpdir = (pd_entry_t)(pa | X86_PG_V | X86_PG_RW | X86_PG_A |
4746 X86_PG_M | PG_PS | pg_g | pg_nx);
4747 pde_store(pde, newpdir);
4749 vm_page_array = (vm_page_t)start;
4753 * grow the number of kernel page table entries, if needed
4756 pmap_growkernel(vm_offset_t addr)
4760 pd_entry_t *pde, newpdir;
4763 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
4766 * Return if "addr" is within the range of kernel page table pages
4767 * that were preallocated during pmap bootstrap. Moreover, leave
4768 * "kernel_vm_end" and the kernel page table as they were.
4770 * The correctness of this action is based on the following
4771 * argument: vm_map_insert() allocates contiguous ranges of the
4772 * kernel virtual address space. It calls this function if a range
4773 * ends after "kernel_vm_end". If the kernel is mapped between
4774 * "kernel_vm_end" and "addr", then the range cannot begin at
4775 * "kernel_vm_end". In fact, its beginning address cannot be less
4776 * than the kernel. Thus, there is no immediate need to allocate
4777 * any new kernel page table pages between "kernel_vm_end" and
4780 if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR)
4783 addr = roundup2(addr, NBPDR);
4784 if (addr - 1 >= vm_map_max(kernel_map))
4785 addr = vm_map_max(kernel_map);
4786 while (kernel_vm_end < addr) {
4787 pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
4788 if ((*pdpe & X86_PG_V) == 0) {
4789 /* We need a new PDP entry */
4790 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
4791 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
4792 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
4794 panic("pmap_growkernel: no memory to grow kernel");
4795 if ((nkpg->flags & PG_ZERO) == 0)
4796 pmap_zero_page(nkpg);
4797 paddr = VM_PAGE_TO_PHYS(nkpg);
4798 *pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW |
4799 X86_PG_A | X86_PG_M);
4800 continue; /* try again */
4802 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
4803 if ((*pde & X86_PG_V) != 0) {
4804 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
4805 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
4806 kernel_vm_end = vm_map_max(kernel_map);
4812 nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end),
4813 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
4816 panic("pmap_growkernel: no memory to grow kernel");
4817 if ((nkpg->flags & PG_ZERO) == 0)
4818 pmap_zero_page(nkpg);
4819 paddr = VM_PAGE_TO_PHYS(nkpg);
4820 newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
4821 pde_store(pde, newpdir);
4823 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
4824 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
4825 kernel_vm_end = vm_map_max(kernel_map);
4831 /***************************************************
4832 * page management routines.
4833 ***************************************************/
4835 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
4836 CTASSERT(_NPCM == 3);
4837 CTASSERT(_NPCPV == 168);
4839 static __inline struct pv_chunk *
4840 pv_to_chunk(pv_entry_t pv)
4843 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
4846 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
4848 #define PC_FREE0 0xfffffffffffffffful
4849 #define PC_FREE1 0xfffffffffffffffful
4850 #define PC_FREE2 0x000000fffffffffful
4852 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
4855 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
4857 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
4858 "Current number of pv entry chunks");
4859 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
4860 "Current number of pv entry chunks allocated");
4861 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
4862 "Current number of pv entry chunks frees");
4863 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
4864 "Number of times tried to get a chunk page but failed.");
4866 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
4867 static int pv_entry_spare;
4869 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
4870 "Current number of pv entry frees");
4871 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
4872 "Current number of pv entry allocs");
4873 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
4874 "Current number of pv entries");
4875 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
4876 "Current number of spare pv entries");
4880 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap, bool start_di)
4885 pmap_invalidate_all(pmap);
4886 if (pmap != locked_pmap)
4889 pmap_delayed_invl_finish();
4893 * We are in a serious low memory condition. Resort to
4894 * drastic measures to free some pages so we can allocate
4895 * another pv entry chunk.
4897 * Returns NULL if PV entries were reclaimed from the specified pmap.
4899 * We do not, however, unmap 2mpages because subsequent accesses will
4900 * allocate per-page pv entries until repromotion occurs, thereby
4901 * exacerbating the shortage of free pv entries.
4904 reclaim_pv_chunk_domain(pmap_t locked_pmap, struct rwlock **lockp, int domain)
4906 struct pv_chunks_list *pvc;
4907 struct pv_chunk *pc, *pc_marker, *pc_marker_end;
4908 struct pv_chunk_header pc_marker_b, pc_marker_end_b;
4909 struct md_page *pvh;
4911 pmap_t next_pmap, pmap;
4912 pt_entry_t *pte, tpte;
4913 pt_entry_t PG_G, PG_A, PG_M, PG_RW;
4917 struct spglist free;
4919 int bit, field, freed;
4920 bool start_di, restart;
4922 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
4923 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
4926 PG_G = PG_A = PG_M = PG_RW = 0;
4928 bzero(&pc_marker_b, sizeof(pc_marker_b));
4929 bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
4930 pc_marker = (struct pv_chunk *)&pc_marker_b;
4931 pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
4934 * A delayed invalidation block should already be active if
4935 * pmap_advise() or pmap_remove() called this function by way
4936 * of pmap_demote_pde_locked().
4938 start_di = pmap_not_in_di();
4940 pvc = &pv_chunks[domain];
4941 mtx_lock(&pvc->pvc_lock);
4942 pvc->active_reclaims++;
4943 TAILQ_INSERT_HEAD(&pvc->pvc_list, pc_marker, pc_lru);
4944 TAILQ_INSERT_TAIL(&pvc->pvc_list, pc_marker_end, pc_lru);
4945 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
4946 SLIST_EMPTY(&free)) {
4947 next_pmap = pc->pc_pmap;
4948 if (next_pmap == NULL) {
4950 * The next chunk is a marker. However, it is
4951 * not our marker, so active_reclaims must be
4952 * > 1. Consequently, the next_chunk code
4953 * will not rotate the pv_chunks list.
4957 mtx_unlock(&pvc->pvc_lock);
4960 * A pv_chunk can only be removed from the pc_lru list
4961 * when both pc_chunks_mutex is owned and the
4962 * corresponding pmap is locked.
4964 if (pmap != next_pmap) {
4966 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap,
4969 /* Avoid deadlock and lock recursion. */
4970 if (pmap > locked_pmap) {
4971 RELEASE_PV_LIST_LOCK(lockp);
4974 pmap_delayed_invl_start();
4975 mtx_lock(&pvc->pvc_lock);
4977 } else if (pmap != locked_pmap) {
4978 if (PMAP_TRYLOCK(pmap)) {
4980 pmap_delayed_invl_start();
4981 mtx_lock(&pvc->pvc_lock);
4984 pmap = NULL; /* pmap is not locked */
4985 mtx_lock(&pvc->pvc_lock);
4986 pc = TAILQ_NEXT(pc_marker, pc_lru);
4988 pc->pc_pmap != next_pmap)
4992 } else if (start_di)
4993 pmap_delayed_invl_start();
4994 PG_G = pmap_global_bit(pmap);
4995 PG_A = pmap_accessed_bit(pmap);
4996 PG_M = pmap_modified_bit(pmap);
4997 PG_RW = pmap_rw_bit(pmap);
5003 * Destroy every non-wired, 4 KB page mapping in the chunk.
5006 for (field = 0; field < _NPCM; field++) {
5007 for (inuse = ~pc->pc_map[field] & pc_freemask[field];
5008 inuse != 0; inuse &= ~(1UL << bit)) {
5010 pv = &pc->pc_pventry[field * 64 + bit];
5012 pde = pmap_pde(pmap, va);
5013 if ((*pde & PG_PS) != 0)
5015 pte = pmap_pde_to_pte(pde, va);
5016 if ((*pte & PG_W) != 0)
5018 tpte = pte_load_clear(pte);
5019 if ((tpte & PG_G) != 0)
5020 pmap_invalidate_page(pmap, va);
5021 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
5022 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5024 if ((tpte & PG_A) != 0)
5025 vm_page_aflag_set(m, PGA_REFERENCED);
5026 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5027 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5029 if (TAILQ_EMPTY(&m->md.pv_list) &&
5030 (m->flags & PG_FICTITIOUS) == 0) {
5031 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5032 if (TAILQ_EMPTY(&pvh->pv_list)) {
5033 vm_page_aflag_clear(m,
5037 pmap_delayed_invl_page(m);
5038 pc->pc_map[field] |= 1UL << bit;
5039 pmap_unuse_pt(pmap, va, *pde, &free);
5044 mtx_lock(&pvc->pvc_lock);
5047 /* Every freed mapping is for a 4 KB page. */
5048 pmap_resident_count_dec(pmap, freed);
5049 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
5050 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
5051 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
5052 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5053 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
5054 pc->pc_map[2] == PC_FREE2) {
5055 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
5056 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
5057 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
5058 /* Entire chunk is free; return it. */
5059 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
5060 dump_drop_page(m_pc->phys_addr);
5061 mtx_lock(&pvc->pvc_lock);
5062 TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5065 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5066 mtx_lock(&pvc->pvc_lock);
5067 /* One freed pv entry in locked_pmap is sufficient. */
5068 if (pmap == locked_pmap)
5071 TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
5072 TAILQ_INSERT_AFTER(&pvc->pvc_list, pc, pc_marker, pc_lru);
5073 if (pvc->active_reclaims == 1 && pmap != NULL) {
5075 * Rotate the pv chunks list so that we do not
5076 * scan the same pv chunks that could not be
5077 * freed (because they contained a wired
5078 * and/or superpage mapping) on every
5079 * invocation of reclaim_pv_chunk().
5081 while ((pc = TAILQ_FIRST(&pvc->pvc_list)) != pc_marker) {
5082 MPASS(pc->pc_pmap != NULL);
5083 TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5084 TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
5088 TAILQ_REMOVE(&pvc->pvc_list, pc_marker, pc_lru);
5089 TAILQ_REMOVE(&pvc->pvc_list, pc_marker_end, pc_lru);
5090 pvc->active_reclaims--;
5091 mtx_unlock(&pvc->pvc_lock);
5092 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap, start_di);
5093 if (m_pc == NULL && !SLIST_EMPTY(&free)) {
5094 m_pc = SLIST_FIRST(&free);
5095 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
5096 /* Recycle a freed page table page. */
5097 m_pc->ref_count = 1;
5099 vm_page_free_pages_toq(&free, true);
5104 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
5109 domain = PCPU_GET(domain);
5110 for (i = 0; i < vm_ndomains; i++) {
5111 m = reclaim_pv_chunk_domain(locked_pmap, lockp, domain);
5114 domain = (domain + 1) % vm_ndomains;
5121 * free the pv_entry back to the free list
5124 free_pv_entry(pmap_t pmap, pv_entry_t pv)
5126 struct pv_chunk *pc;
5127 int idx, field, bit;
5129 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5130 PV_STAT(atomic_add_long(&pv_entry_frees, 1));
5131 PV_STAT(atomic_add_int(&pv_entry_spare, 1));
5132 PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
5133 pc = pv_to_chunk(pv);
5134 idx = pv - &pc->pc_pventry[0];
5137 pc->pc_map[field] |= 1ul << bit;
5138 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
5139 pc->pc_map[2] != PC_FREE2) {
5140 /* 98% of the time, pc is already at the head of the list. */
5141 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
5142 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5143 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5147 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5152 free_pv_chunk_dequeued(struct pv_chunk *pc)
5156 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
5157 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
5158 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
5159 /* entire chunk is free, return it */
5160 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
5161 dump_drop_page(m->phys_addr);
5162 vm_page_unwire_noq(m);
5167 free_pv_chunk(struct pv_chunk *pc)
5169 struct pv_chunks_list *pvc;
5171 pvc = &pv_chunks[pc_to_domain(pc)];
5172 mtx_lock(&pvc->pvc_lock);
5173 TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5174 mtx_unlock(&pvc->pvc_lock);
5175 free_pv_chunk_dequeued(pc);
5179 free_pv_chunk_batch(struct pv_chunklist *batch)
5181 struct pv_chunks_list *pvc;
5182 struct pv_chunk *pc, *npc;
5185 for (i = 0; i < vm_ndomains; i++) {
5186 if (TAILQ_EMPTY(&batch[i]))
5188 pvc = &pv_chunks[i];
5189 mtx_lock(&pvc->pvc_lock);
5190 TAILQ_FOREACH(pc, &batch[i], pc_list) {
5191 TAILQ_REMOVE(&pvc->pvc_list, pc, pc_lru);
5193 mtx_unlock(&pvc->pvc_lock);
5196 for (i = 0; i < vm_ndomains; i++) {
5197 TAILQ_FOREACH_SAFE(pc, &batch[i], pc_list, npc) {
5198 free_pv_chunk_dequeued(pc);
5204 * Returns a new PV entry, allocating a new PV chunk from the system when
5205 * needed. If this PV chunk allocation fails and a PV list lock pointer was
5206 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
5209 * The given PV list lock may be released.
5212 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
5214 struct pv_chunks_list *pvc;
5217 struct pv_chunk *pc;
5220 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5221 PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
5223 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
5225 for (field = 0; field < _NPCM; field++) {
5226 if (pc->pc_map[field]) {
5227 bit = bsfq(pc->pc_map[field]);
5231 if (field < _NPCM) {
5232 pv = &pc->pc_pventry[field * 64 + bit];
5233 pc->pc_map[field] &= ~(1ul << bit);
5234 /* If this was the last item, move it to tail */
5235 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
5236 pc->pc_map[2] == 0) {
5237 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5238 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
5241 PV_STAT(atomic_add_long(&pv_entry_count, 1));
5242 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
5246 /* No free items, allocate another chunk */
5247 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
5250 if (lockp == NULL) {
5251 PV_STAT(pc_chunk_tryfail++);
5254 m = reclaim_pv_chunk(pmap, lockp);
5258 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
5259 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
5260 dump_add_page(m->phys_addr);
5261 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
5263 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
5264 pc->pc_map[1] = PC_FREE1;
5265 pc->pc_map[2] = PC_FREE2;
5266 pvc = &pv_chunks[vm_page_domain(m)];
5267 mtx_lock(&pvc->pvc_lock);
5268 TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru);
5269 mtx_unlock(&pvc->pvc_lock);
5270 pv = &pc->pc_pventry[0];
5271 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5272 PV_STAT(atomic_add_long(&pv_entry_count, 1));
5273 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
5278 * Returns the number of one bits within the given PV chunk map.
5280 * The erratas for Intel processors state that "POPCNT Instruction May
5281 * Take Longer to Execute Than Expected". It is believed that the
5282 * issue is the spurious dependency on the destination register.
5283 * Provide a hint to the register rename logic that the destination
5284 * value is overwritten, by clearing it, as suggested in the
5285 * optimization manual. It should be cheap for unaffected processors
5288 * Reference numbers for erratas are
5289 * 4th Gen Core: HSD146
5290 * 5th Gen Core: BDM85
5291 * 6th Gen Core: SKL029
5294 popcnt_pc_map_pq(uint64_t *map)
5298 __asm __volatile("xorl %k0,%k0;popcntq %2,%0;"
5299 "xorl %k1,%k1;popcntq %3,%1;addl %k1,%k0;"
5300 "xorl %k1,%k1;popcntq %4,%1;addl %k1,%k0"
5301 : "=&r" (result), "=&r" (tmp)
5302 : "m" (map[0]), "m" (map[1]), "m" (map[2]));
5307 * Ensure that the number of spare PV entries in the specified pmap meets or
5308 * exceeds the given count, "needed".
5310 * The given PV list lock may be released.
5313 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
5315 struct pv_chunks_list *pvc;
5316 struct pch new_tail[PMAP_MEMDOM];
5317 struct pv_chunk *pc;
5322 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5323 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
5326 * Newly allocated PV chunks must be stored in a private list until
5327 * the required number of PV chunks have been allocated. Otherwise,
5328 * reclaim_pv_chunk() could recycle one of these chunks. In
5329 * contrast, these chunks must be added to the pmap upon allocation.
5331 for (i = 0; i < PMAP_MEMDOM; i++)
5332 TAILQ_INIT(&new_tail[i]);
5335 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
5337 if ((cpu_feature2 & CPUID2_POPCNT) == 0)
5338 bit_count((bitstr_t *)pc->pc_map, 0,
5339 sizeof(pc->pc_map) * NBBY, &free);
5342 free = popcnt_pc_map_pq(pc->pc_map);
5346 if (avail >= needed)
5349 for (reclaimed = false; avail < needed; avail += _NPCPV) {
5350 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
5353 m = reclaim_pv_chunk(pmap, lockp);
5358 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
5359 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
5360 dump_add_page(m->phys_addr);
5361 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
5363 pc->pc_map[0] = PC_FREE0;
5364 pc->pc_map[1] = PC_FREE1;
5365 pc->pc_map[2] = PC_FREE2;
5366 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
5367 TAILQ_INSERT_TAIL(&new_tail[vm_page_domain(m)], pc, pc_lru);
5368 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
5371 * The reclaim might have freed a chunk from the current pmap.
5372 * If that chunk contained available entries, we need to
5373 * re-count the number of available entries.
5378 for (i = 0; i < vm_ndomains; i++) {
5379 if (TAILQ_EMPTY(&new_tail[i]))
5381 pvc = &pv_chunks[i];
5382 mtx_lock(&pvc->pvc_lock);
5383 TAILQ_CONCAT(&pvc->pvc_list, &new_tail[i], pc_lru);
5384 mtx_unlock(&pvc->pvc_lock);
5389 * First find and then remove the pv entry for the specified pmap and virtual
5390 * address from the specified pv list. Returns the pv entry if found and NULL
5391 * otherwise. This operation can be performed on pv lists for either 4KB or
5392 * 2MB page mappings.
5394 static __inline pv_entry_t
5395 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
5399 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5400 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
5401 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5410 * After demotion from a 2MB page mapping to 512 4KB page mappings,
5411 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
5412 * entries for each of the 4KB page mappings.
5415 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
5416 struct rwlock **lockp)
5418 struct md_page *pvh;
5419 struct pv_chunk *pc;
5421 vm_offset_t va_last;
5425 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5426 KASSERT((pa & PDRMASK) == 0,
5427 ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
5428 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5431 * Transfer the 2mpage's pv entry for this mapping to the first
5432 * page's pv list. Once this transfer begins, the pv list lock
5433 * must not be released until the last pv entry is reinstantiated.
5435 pvh = pa_to_pvh(pa);
5436 va = trunc_2mpage(va);
5437 pv = pmap_pvh_remove(pvh, pmap, va);
5438 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
5439 m = PHYS_TO_VM_PAGE(pa);
5440 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5442 /* Instantiate the remaining NPTEPG - 1 pv entries. */
5443 PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
5444 va_last = va + NBPDR - PAGE_SIZE;
5446 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
5447 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
5448 pc->pc_map[2] != 0, ("pmap_pv_demote_pde: missing spare"));
5449 for (field = 0; field < _NPCM; field++) {
5450 while (pc->pc_map[field]) {
5451 bit = bsfq(pc->pc_map[field]);
5452 pc->pc_map[field] &= ~(1ul << bit);
5453 pv = &pc->pc_pventry[field * 64 + bit];
5457 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5458 ("pmap_pv_demote_pde: page %p is not managed", m));
5459 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5465 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5466 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
5469 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
5470 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5471 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
5473 PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
5474 PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
5477 #if VM_NRESERVLEVEL > 0
5479 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
5480 * replace the many pv entries for the 4KB page mappings by a single pv entry
5481 * for the 2MB page mapping.
5484 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
5485 struct rwlock **lockp)
5487 struct md_page *pvh;
5489 vm_offset_t va_last;
5492 KASSERT((pa & PDRMASK) == 0,
5493 ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
5494 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5497 * Transfer the first page's pv entry for this mapping to the 2mpage's
5498 * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
5499 * a transfer avoids the possibility that get_pv_entry() calls
5500 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
5501 * mappings that is being promoted.
5503 m = PHYS_TO_VM_PAGE(pa);
5504 va = trunc_2mpage(va);
5505 pv = pmap_pvh_remove(&m->md, pmap, va);
5506 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
5507 pvh = pa_to_pvh(pa);
5508 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5510 /* Free the remaining NPTEPG - 1 pv entries. */
5511 va_last = va + NBPDR - PAGE_SIZE;
5515 pmap_pvh_free(&m->md, pmap, va);
5516 } while (va < va_last);
5518 #endif /* VM_NRESERVLEVEL > 0 */
5521 * First find and then destroy the pv entry for the specified pmap and virtual
5522 * address. This operation can be performed on pv lists for either 4KB or 2MB
5526 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
5530 pv = pmap_pvh_remove(pvh, pmap, va);
5531 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
5532 free_pv_entry(pmap, pv);
5536 * Conditionally create the PV entry for a 4KB page mapping if the required
5537 * memory can be allocated without resorting to reclamation.
5540 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
5541 struct rwlock **lockp)
5545 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5546 /* Pass NULL instead of the lock pointer to disable reclamation. */
5547 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
5549 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5550 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
5558 * Create the PV entry for a 2MB page mapping. Always returns true unless the
5559 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
5560 * false if the PV entry cannot be allocated without resorting to reclamation.
5563 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags,
5564 struct rwlock **lockp)
5566 struct md_page *pvh;
5570 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5571 /* Pass NULL instead of the lock pointer to disable reclamation. */
5572 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
5573 NULL : lockp)) == NULL)
5576 pa = pde & PG_PS_FRAME;
5577 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
5578 pvh = pa_to_pvh(pa);
5579 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
5585 * Fills a page table page with mappings to consecutive physical pages.
5588 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
5592 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
5594 newpte += PAGE_SIZE;
5599 * Tries to demote a 2MB page mapping. If demotion fails, the 2MB page
5600 * mapping is invalidated.
5603 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
5605 struct rwlock *lock;
5609 rv = pmap_demote_pde_locked(pmap, pde, va, &lock);
5616 pmap_demote_pde_check(pt_entry_t *firstpte __unused, pt_entry_t newpte __unused)
5620 pt_entry_t *xpte, *ypte;
5622 for (xpte = firstpte; xpte < firstpte + NPTEPG;
5623 xpte++, newpte += PAGE_SIZE) {
5624 if ((*xpte & PG_FRAME) != (newpte & PG_FRAME)) {
5625 printf("pmap_demote_pde: xpte %zd and newpte map "
5626 "different pages: found %#lx, expected %#lx\n",
5627 xpte - firstpte, *xpte, newpte);
5628 printf("page table dump\n");
5629 for (ypte = firstpte; ypte < firstpte + NPTEPG; ypte++)
5630 printf("%zd %#lx\n", ypte - firstpte, *ypte);
5635 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
5636 ("pmap_demote_pde: firstpte and newpte map different physical"
5643 pmap_demote_pde_abort(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
5644 pd_entry_t oldpde, struct rwlock **lockp)
5646 struct spglist free;
5650 sva = trunc_2mpage(va);
5651 pmap_remove_pde(pmap, pde, sva, &free, lockp);
5652 if ((oldpde & pmap_global_bit(pmap)) == 0)
5653 pmap_invalidate_pde_page(pmap, sva, oldpde);
5654 vm_page_free_pages_toq(&free, true);
5655 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx in pmap %p",
5660 pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
5661 struct rwlock **lockp)
5663 pd_entry_t newpde, oldpde;
5664 pt_entry_t *firstpte, newpte;
5665 pt_entry_t PG_A, PG_G, PG_M, PG_PKU_MASK, PG_RW, PG_V;
5671 PG_A = pmap_accessed_bit(pmap);
5672 PG_G = pmap_global_bit(pmap);
5673 PG_M = pmap_modified_bit(pmap);
5674 PG_RW = pmap_rw_bit(pmap);
5675 PG_V = pmap_valid_bit(pmap);
5676 PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
5677 PG_PKU_MASK = pmap_pku_mask_bit(pmap);
5679 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5680 in_kernel = va >= VM_MAXUSER_ADDRESS;
5682 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
5683 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
5686 * Invalidate the 2MB page mapping and return "failure" if the
5687 * mapping was never accessed.
5689 if ((oldpde & PG_A) == 0) {
5690 KASSERT((oldpde & PG_W) == 0,
5691 ("pmap_demote_pde: a wired mapping is missing PG_A"));
5692 pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp);
5696 mpte = pmap_remove_pt_page(pmap, va);
5698 KASSERT((oldpde & PG_W) == 0,
5699 ("pmap_demote_pde: page table page for a wired mapping"
5703 * If the page table page is missing and the mapping
5704 * is for a kernel address, the mapping must belong to
5705 * the direct map. Page table pages are preallocated
5706 * for every other part of the kernel address space,
5707 * so the direct map region is the only part of the
5708 * kernel address space that must be handled here.
5710 KASSERT(!in_kernel || (va >= DMAP_MIN_ADDRESS &&
5711 va < DMAP_MAX_ADDRESS),
5712 ("pmap_demote_pde: No saved mpte for va %#lx", va));
5715 * If the 2MB page mapping belongs to the direct map
5716 * region of the kernel's address space, then the page
5717 * allocation request specifies the highest possible
5718 * priority (VM_ALLOC_INTERRUPT). Otherwise, the
5719 * priority is normal.
5721 mpte = vm_page_alloc(NULL, pmap_pde_pindex(va),
5722 (in_kernel ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
5723 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
5726 * If the allocation of the new page table page fails,
5727 * invalidate the 2MB page mapping and return "failure".
5730 pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp);
5735 mpte->ref_count = NPTEPG;
5736 pmap_resident_count_inc(pmap, 1);
5739 mptepa = VM_PAGE_TO_PHYS(mpte);
5740 firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
5741 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
5742 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
5743 ("pmap_demote_pde: oldpde is missing PG_M"));
5744 newpte = oldpde & ~PG_PS;
5745 newpte = pmap_swap_pat(pmap, newpte);
5748 * If the page table page is not leftover from an earlier promotion,
5751 if (mpte->valid == 0)
5752 pmap_fill_ptp(firstpte, newpte);
5754 pmap_demote_pde_check(firstpte, newpte);
5757 * If the mapping has changed attributes, update the page table
5760 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
5761 pmap_fill_ptp(firstpte, newpte);
5764 * The spare PV entries must be reserved prior to demoting the
5765 * mapping, that is, prior to changing the PDE. Otherwise, the state
5766 * of the PDE and the PV lists will be inconsistent, which can result
5767 * in reclaim_pv_chunk() attempting to remove a PV entry from the
5768 * wrong PV list and pmap_pv_demote_pde() failing to find the expected
5769 * PV entry for the 2MB page mapping that is being demoted.
5771 if ((oldpde & PG_MANAGED) != 0)
5772 reserve_pv_entries(pmap, NPTEPG - 1, lockp);
5775 * Demote the mapping. This pmap is locked. The old PDE has
5776 * PG_A set. If the old PDE has PG_RW set, it also has PG_M
5777 * set. Thus, there is no danger of a race with another
5778 * processor changing the setting of PG_A and/or PG_M between
5779 * the read above and the store below.
5781 if (workaround_erratum383)
5782 pmap_update_pde(pmap, va, pde, newpde);
5784 pde_store(pde, newpde);
5787 * Invalidate a stale recursive mapping of the page table page.
5790 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
5793 * Demote the PV entry.
5795 if ((oldpde & PG_MANAGED) != 0)
5796 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, lockp);
5798 atomic_add_long(&pmap_pde_demotions, 1);
5799 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx in pmap %p",
5805 * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
5808 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
5814 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
5815 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5816 mpte = pmap_remove_pt_page(pmap, va);
5818 panic("pmap_remove_kernel_pde: Missing pt page.");
5820 mptepa = VM_PAGE_TO_PHYS(mpte);
5821 newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V;
5824 * If this page table page was unmapped by a promotion, then it
5825 * contains valid mappings. Zero it to invalidate those mappings.
5827 if (mpte->valid != 0)
5828 pagezero((void *)PHYS_TO_DMAP(mptepa));
5831 * Demote the mapping.
5833 if (workaround_erratum383)
5834 pmap_update_pde(pmap, va, pde, newpde);
5836 pde_store(pde, newpde);
5839 * Invalidate a stale recursive mapping of the page table page.
5841 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
5845 * pmap_remove_pde: do the things to unmap a superpage in a process
5848 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
5849 struct spglist *free, struct rwlock **lockp)
5851 struct md_page *pvh;
5853 vm_offset_t eva, va;
5855 pt_entry_t PG_G, PG_A, PG_M, PG_RW;
5857 PG_G = pmap_global_bit(pmap);
5858 PG_A = pmap_accessed_bit(pmap);
5859 PG_M = pmap_modified_bit(pmap);
5860 PG_RW = pmap_rw_bit(pmap);
5862 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5863 KASSERT((sva & PDRMASK) == 0,
5864 ("pmap_remove_pde: sva is not 2mpage aligned"));
5865 oldpde = pte_load_clear(pdq);
5867 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
5868 if ((oldpde & PG_G) != 0)
5869 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
5870 pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
5871 if (oldpde & PG_MANAGED) {
5872 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
5873 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
5874 pmap_pvh_free(pvh, pmap, sva);
5876 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
5877 va < eva; va += PAGE_SIZE, m++) {
5878 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
5881 vm_page_aflag_set(m, PGA_REFERENCED);
5882 if (TAILQ_EMPTY(&m->md.pv_list) &&
5883 TAILQ_EMPTY(&pvh->pv_list))
5884 vm_page_aflag_clear(m, PGA_WRITEABLE);
5885 pmap_delayed_invl_page(m);
5888 if (pmap == kernel_pmap) {
5889 pmap_remove_kernel_pde(pmap, pdq, sva);
5891 mpte = pmap_remove_pt_page(pmap, sva);
5893 KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
5894 ("pmap_remove_pde: pte page not promoted"));
5895 pmap_resident_count_dec(pmap, 1);
5896 KASSERT(mpte->ref_count == NPTEPG,
5897 ("pmap_remove_pde: pte page ref count error"));
5898 mpte->ref_count = 0;
5899 pmap_add_delayed_free_list(mpte, free, FALSE);
5902 return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
5906 * pmap_remove_pte: do the things to unmap a page in a process
5909 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
5910 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
5912 struct md_page *pvh;
5913 pt_entry_t oldpte, PG_A, PG_M, PG_RW;
5916 PG_A = pmap_accessed_bit(pmap);
5917 PG_M = pmap_modified_bit(pmap);
5918 PG_RW = pmap_rw_bit(pmap);
5920 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5921 oldpte = pte_load_clear(ptq);
5923 pmap->pm_stats.wired_count -= 1;
5924 pmap_resident_count_dec(pmap, 1);
5925 if (oldpte & PG_MANAGED) {
5926 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
5927 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5930 vm_page_aflag_set(m, PGA_REFERENCED);
5931 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5932 pmap_pvh_free(&m->md, pmap, va);
5933 if (TAILQ_EMPTY(&m->md.pv_list) &&
5934 (m->flags & PG_FICTITIOUS) == 0) {
5935 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5936 if (TAILQ_EMPTY(&pvh->pv_list))
5937 vm_page_aflag_clear(m, PGA_WRITEABLE);
5939 pmap_delayed_invl_page(m);
5941 return (pmap_unuse_pt(pmap, va, ptepde, free));
5945 * Remove a single page from a process address space
5948 pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
5949 struct spglist *free)
5951 struct rwlock *lock;
5952 pt_entry_t *pte, PG_V;
5954 PG_V = pmap_valid_bit(pmap);
5955 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5956 if ((*pde & PG_V) == 0)
5958 pte = pmap_pde_to_pte(pde, va);
5959 if ((*pte & PG_V) == 0)
5962 pmap_remove_pte(pmap, pte, va, *pde, free, &lock);
5965 pmap_invalidate_page(pmap, va);
5969 * Removes the specified range of addresses from the page table page.
5972 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
5973 pd_entry_t *pde, struct spglist *free, struct rwlock **lockp)
5975 pt_entry_t PG_G, *pte;
5979 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5980 PG_G = pmap_global_bit(pmap);
5983 for (pte = pmap_pde_to_pte(pde, sva); sva != eva; pte++,
5987 pmap_invalidate_range(pmap, va, sva);
5992 if ((*pte & PG_G) == 0)
5996 if (pmap_remove_pte(pmap, pte, sva, *pde, free, lockp)) {
6002 pmap_invalidate_range(pmap, va, sva);
6007 * Remove the given range of addresses from the specified map.
6009 * It is assumed that the start and end are properly
6010 * rounded to the page size.
6013 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
6015 struct rwlock *lock;
6017 vm_offset_t va_next;
6018 pml5_entry_t *pml5e;
6019 pml4_entry_t *pml4e;
6021 pd_entry_t ptpaddr, *pde;
6022 pt_entry_t PG_G, PG_V;
6023 struct spglist free;
6026 PG_G = pmap_global_bit(pmap);
6027 PG_V = pmap_valid_bit(pmap);
6030 * Perform an unsynchronized read. This is, however, safe.
6032 if (pmap->pm_stats.resident_count == 0)
6038 pmap_delayed_invl_start();
6040 pmap_pkru_on_remove(pmap, sva, eva);
6043 * special handling of removing one page. a very
6044 * common operation and easy to short circuit some
6047 if (sva + PAGE_SIZE == eva) {
6048 pde = pmap_pde(pmap, sva);
6049 if (pde && (*pde & PG_PS) == 0) {
6050 pmap_remove_page(pmap, sva, pde, &free);
6056 for (; sva < eva; sva = va_next) {
6057 if (pmap->pm_stats.resident_count == 0)
6060 if (pmap_is_la57(pmap)) {
6061 pml5e = pmap_pml5e(pmap, sva);
6062 if ((*pml5e & PG_V) == 0) {
6063 va_next = (sva + NBPML5) & ~PML5MASK;
6068 pml4e = pmap_pml5e_to_pml4e(pml5e, sva);
6070 pml4e = pmap_pml4e(pmap, sva);
6072 if ((*pml4e & PG_V) == 0) {
6073 va_next = (sva + NBPML4) & ~PML4MASK;
6079 va_next = (sva + NBPDP) & ~PDPMASK;
6082 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
6083 if ((*pdpe & PG_V) == 0)
6085 if ((*pdpe & PG_PS) != 0) {
6086 KASSERT(va_next <= eva,
6087 ("partial update of non-transparent 1G mapping "
6088 "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
6089 *pdpe, sva, eva, va_next));
6090 MPASS(pmap != kernel_pmap); /* XXXKIB */
6091 MPASS((*pdpe & (PG_MANAGED | PG_G)) == 0);
6094 pmap_resident_count_dec(pmap, NBPDP / PAGE_SIZE);
6095 mt = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, sva) & PG_FRAME);
6096 pmap_unwire_ptp(pmap, sva, mt, &free);
6101 * Calculate index for next page table.
6103 va_next = (sva + NBPDR) & ~PDRMASK;
6107 pde = pmap_pdpe_to_pde(pdpe, sva);
6111 * Weed out invalid mappings.
6117 * Check for large page.
6119 if ((ptpaddr & PG_PS) != 0) {
6121 * Are we removing the entire large page? If not,
6122 * demote the mapping and fall through.
6124 if (sva + NBPDR == va_next && eva >= va_next) {
6126 * The TLB entry for a PG_G mapping is
6127 * invalidated by pmap_remove_pde().
6129 if ((ptpaddr & PG_G) == 0)
6131 pmap_remove_pde(pmap, pde, sva, &free, &lock);
6133 } else if (!pmap_demote_pde_locked(pmap, pde, sva,
6135 /* The large page mapping was destroyed. */
6142 * Limit our scan to either the end of the va represented
6143 * by the current page table page, or to the end of the
6144 * range being removed.
6149 if (pmap_remove_ptes(pmap, sva, va_next, pde, &free, &lock))
6156 pmap_invalidate_all(pmap);
6158 pmap_delayed_invl_finish();
6159 vm_page_free_pages_toq(&free, true);
6163 * Routine: pmap_remove_all
6165 * Removes this physical page from
6166 * all physical maps in which it resides.
6167 * Reflects back modify bits to the pager.
6170 * Original versions of this routine were very
6171 * inefficient because they iteratively called
6172 * pmap_remove (slow...)
6176 pmap_remove_all(vm_page_t m)
6178 struct md_page *pvh;
6181 struct rwlock *lock;
6182 pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW;
6185 struct spglist free;
6186 int pvh_gen, md_gen;
6188 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
6189 ("pmap_remove_all: page %p is not managed", m));
6191 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
6192 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
6193 pa_to_pvh(VM_PAGE_TO_PHYS(m));
6196 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
6198 if (!PMAP_TRYLOCK(pmap)) {
6199 pvh_gen = pvh->pv_gen;
6203 if (pvh_gen != pvh->pv_gen) {
6210 pde = pmap_pde(pmap, va);
6211 (void)pmap_demote_pde_locked(pmap, pde, va, &lock);
6214 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
6216 if (!PMAP_TRYLOCK(pmap)) {
6217 pvh_gen = pvh->pv_gen;
6218 md_gen = m->md.pv_gen;
6222 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
6228 PG_A = pmap_accessed_bit(pmap);
6229 PG_M = pmap_modified_bit(pmap);
6230 PG_RW = pmap_rw_bit(pmap);
6231 pmap_resident_count_dec(pmap, 1);
6232 pde = pmap_pde(pmap, pv->pv_va);
6233 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
6234 " a 2mpage in page %p's pv list", m));
6235 pte = pmap_pde_to_pte(pde, pv->pv_va);
6236 tpte = pte_load_clear(pte);
6238 pmap->pm_stats.wired_count--;
6240 vm_page_aflag_set(m, PGA_REFERENCED);
6243 * Update the vm_page_t clean and reference bits.
6245 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
6247 pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
6248 pmap_invalidate_page(pmap, pv->pv_va);
6249 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
6251 free_pv_entry(pmap, pv);
6254 vm_page_aflag_clear(m, PGA_WRITEABLE);
6256 pmap_delayed_invl_wait(m);
6257 vm_page_free_pages_toq(&free, true);
6261 * pmap_protect_pde: do the things to protect a 2mpage in a process
6264 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
6266 pd_entry_t newpde, oldpde;
6268 boolean_t anychanged;
6269 pt_entry_t PG_G, PG_M, PG_RW;
6271 PG_G = pmap_global_bit(pmap);
6272 PG_M = pmap_modified_bit(pmap);
6273 PG_RW = pmap_rw_bit(pmap);
6275 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6276 KASSERT((sva & PDRMASK) == 0,
6277 ("pmap_protect_pde: sva is not 2mpage aligned"));
6280 oldpde = newpde = *pde;
6281 if ((prot & VM_PROT_WRITE) == 0) {
6282 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
6283 (PG_MANAGED | PG_M | PG_RW)) {
6284 m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
6285 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
6288 newpde &= ~(PG_RW | PG_M);
6290 if ((prot & VM_PROT_EXECUTE) == 0)
6292 if (newpde != oldpde) {
6294 * As an optimization to future operations on this PDE, clear
6295 * PG_PROMOTED. The impending invalidation will remove any
6296 * lingering 4KB page mappings from the TLB.
6298 if (!atomic_cmpset_long(pde, oldpde, newpde & ~PG_PROMOTED))
6300 if ((oldpde & PG_G) != 0)
6301 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
6305 return (anychanged);
6309 * Set the physical protection on the
6310 * specified range of this map as requested.
6313 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
6316 vm_offset_t va_next;
6317 pml4_entry_t *pml4e;
6319 pd_entry_t ptpaddr, *pde;
6320 pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V;
6321 pt_entry_t obits, pbits;
6322 boolean_t anychanged;
6324 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
6325 if (prot == VM_PROT_NONE) {
6326 pmap_remove(pmap, sva, eva);
6330 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
6331 (VM_PROT_WRITE|VM_PROT_EXECUTE))
6334 PG_G = pmap_global_bit(pmap);
6335 PG_M = pmap_modified_bit(pmap);
6336 PG_V = pmap_valid_bit(pmap);
6337 PG_RW = pmap_rw_bit(pmap);
6341 * Although this function delays and batches the invalidation
6342 * of stale TLB entries, it does not need to call
6343 * pmap_delayed_invl_start() and
6344 * pmap_delayed_invl_finish(), because it does not
6345 * ordinarily destroy mappings. Stale TLB entries from
6346 * protection-only changes need only be invalidated before the
6347 * pmap lock is released, because protection-only changes do
6348 * not destroy PV entries. Even operations that iterate over
6349 * a physical page's PV list of mappings, like
6350 * pmap_remove_write(), acquire the pmap lock for each
6351 * mapping. Consequently, for protection-only changes, the
6352 * pmap lock suffices to synchronize both page table and TLB
6355 * This function only destroys a mapping if pmap_demote_pde()
6356 * fails. In that case, stale TLB entries are immediately
6361 for (; sva < eva; sva = va_next) {
6362 pml4e = pmap_pml4e(pmap, sva);
6363 if (pml4e == NULL || (*pml4e & PG_V) == 0) {
6364 va_next = (sva + NBPML4) & ~PML4MASK;
6370 va_next = (sva + NBPDP) & ~PDPMASK;
6373 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
6374 if ((*pdpe & PG_V) == 0)
6376 if ((*pdpe & PG_PS) != 0) {
6377 KASSERT(va_next <= eva,
6378 ("partial update of non-transparent 1G mapping "
6379 "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
6380 *pdpe, sva, eva, va_next));
6382 obits = pbits = *pdpe;
6383 MPASS((pbits & (PG_MANAGED | PG_G)) == 0);
6384 MPASS(pmap != kernel_pmap); /* XXXKIB */
6385 if ((prot & VM_PROT_WRITE) == 0)
6386 pbits &= ~(PG_RW | PG_M);
6387 if ((prot & VM_PROT_EXECUTE) == 0)
6390 if (pbits != obits) {
6391 if (!atomic_cmpset_long(pdpe, obits, pbits))
6392 /* PG_PS cannot be cleared under us, */
6399 va_next = (sva + NBPDR) & ~PDRMASK;
6403 pde = pmap_pdpe_to_pde(pdpe, sva);
6407 * Weed out invalid mappings.
6413 * Check for large page.
6415 if ((ptpaddr & PG_PS) != 0) {
6417 * Are we protecting the entire large page? If not,
6418 * demote the mapping and fall through.
6420 if (sva + NBPDR == va_next && eva >= va_next) {
6422 * The TLB entry for a PG_G mapping is
6423 * invalidated by pmap_protect_pde().
6425 if (pmap_protect_pde(pmap, pde, sva, prot))
6428 } else if (!pmap_demote_pde(pmap, pde, sva)) {
6430 * The large page mapping was destroyed.
6439 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
6442 obits = pbits = *pte;
6443 if ((pbits & PG_V) == 0)
6446 if ((prot & VM_PROT_WRITE) == 0) {
6447 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
6448 (PG_MANAGED | PG_M | PG_RW)) {
6449 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
6452 pbits &= ~(PG_RW | PG_M);
6454 if ((prot & VM_PROT_EXECUTE) == 0)
6457 if (pbits != obits) {
6458 if (!atomic_cmpset_long(pte, obits, pbits))
6461 pmap_invalidate_page(pmap, sva);
6468 pmap_invalidate_all(pmap);
6472 #if VM_NRESERVLEVEL > 0
6474 pmap_pde_ept_executable(pmap_t pmap, pd_entry_t pde)
6477 if (pmap->pm_type != PT_EPT)
6479 return ((pde & EPT_PG_EXECUTE) != 0);
6483 * Tries to promote the 512, contiguous 4KB page mappings that are within a
6484 * single page table page (PTP) to a single 2MB page mapping. For promotion
6485 * to occur, two conditions must be met: (1) the 4KB page mappings must map
6486 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
6487 * identical characteristics.
6490 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
6491 struct rwlock **lockp)
6494 pt_entry_t *firstpte, oldpte, pa, *pte;
6495 pt_entry_t PG_G, PG_A, PG_M, PG_RW, PG_V, PG_PKU_MASK;
6499 PG_A = pmap_accessed_bit(pmap);
6500 PG_G = pmap_global_bit(pmap);
6501 PG_M = pmap_modified_bit(pmap);
6502 PG_V = pmap_valid_bit(pmap);
6503 PG_RW = pmap_rw_bit(pmap);
6504 PG_PKU_MASK = pmap_pku_mask_bit(pmap);
6505 PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
6507 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6510 * Examine the first PTE in the specified PTP. Abort if this PTE is
6511 * either invalid, unused, or does not map the first 4KB physical page
6512 * within a 2MB page.
6514 firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
6517 if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V) ||
6518 !pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap,
6520 atomic_add_long(&pmap_pde_p_failures, 1);
6521 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6522 " in pmap %p", va, pmap);
6525 if ((newpde & (PG_M | PG_RW)) == PG_RW) {
6527 * When PG_M is already clear, PG_RW can be cleared without
6528 * a TLB invalidation.
6530 if (!atomic_cmpset_long(firstpte, newpde, newpde & ~PG_RW))
6536 * Examine each of the other PTEs in the specified PTP. Abort if this
6537 * PTE maps an unexpected 4KB physical page or does not have identical
6538 * characteristics to the first PTE.
6540 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
6541 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
6544 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
6545 atomic_add_long(&pmap_pde_p_failures, 1);
6546 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6547 " in pmap %p", va, pmap);
6550 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
6552 * When PG_M is already clear, PG_RW can be cleared
6553 * without a TLB invalidation.
6555 if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW))
6558 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
6559 " in pmap %p", (oldpte & PG_FRAME & PDRMASK) |
6560 (va & ~PDRMASK), pmap);
6562 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
6563 atomic_add_long(&pmap_pde_p_failures, 1);
6564 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
6565 " in pmap %p", va, pmap);
6572 * Save the page table page in its current state until the PDE
6573 * mapping the superpage is demoted by pmap_demote_pde() or
6574 * destroyed by pmap_remove_pde().
6576 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
6577 KASSERT(mpte >= vm_page_array &&
6578 mpte < &vm_page_array[vm_page_array_size],
6579 ("pmap_promote_pde: page table page is out of range"));
6580 KASSERT(mpte->pindex == pmap_pde_pindex(va),
6581 ("pmap_promote_pde: page table page's pindex is wrong"));
6582 if (pmap_insert_pt_page(pmap, mpte, true)) {
6583 atomic_add_long(&pmap_pde_p_failures, 1);
6585 "pmap_promote_pde: failure for va %#lx in pmap %p", va,
6591 * Promote the pv entries.
6593 if ((newpde & PG_MANAGED) != 0)
6594 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME, lockp);
6597 * Propagate the PAT index to its proper position.
6599 newpde = pmap_swap_pat(pmap, newpde);
6602 * Map the superpage.
6604 if (workaround_erratum383)
6605 pmap_update_pde(pmap, va, pde, PG_PS | newpde);
6607 pde_store(pde, PG_PROMOTED | PG_PS | newpde);
6609 atomic_add_long(&pmap_pde_promotions, 1);
6610 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
6611 " in pmap %p", va, pmap);
6613 #endif /* VM_NRESERVLEVEL > 0 */
6616 pmap_enter_largepage(pmap_t pmap, vm_offset_t va, pt_entry_t newpte, int flags,
6620 pt_entry_t origpte, *pml4e, *pdpe, *pde, pten, PG_V;
6622 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6623 KASSERT(psind > 0 && psind < MAXPAGESIZES && pagesizes[psind] != 0,
6624 ("psind %d unexpected", psind));
6625 KASSERT(((newpte & PG_FRAME) & (pagesizes[psind] - 1)) == 0,
6626 ("unaligned phys address %#lx newpte %#lx psind %d",
6627 newpte & PG_FRAME, newpte, psind));
6628 KASSERT((va & (pagesizes[psind] - 1)) == 0,
6629 ("unaligned va %#lx psind %d", va, psind));
6630 KASSERT(va < VM_MAXUSER_ADDRESS,
6631 ("kernel mode non-transparent superpage")); /* XXXKIB */
6632 KASSERT(va + pagesizes[psind] < VM_MAXUSER_ADDRESS,
6633 ("overflowing user map va %#lx psind %d", va, psind)); /* XXXKIB */
6635 PG_V = pmap_valid_bit(pmap);
6638 if (!pmap_pkru_same(pmap, va, va + pagesizes[psind]))
6639 return (KERN_PROTECTION_FAILURE);
6641 if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86)
6642 pten |= pmap_pkru_get(pmap, va);
6644 if (psind == 2) { /* 1G */
6645 pml4e = pmap_pml4e(pmap, va);
6646 if (pml4e == NULL || (*pml4e & PG_V) == 0) {
6647 mp = pmap_allocpte_alloc(pmap, pmap_pml4e_pindex(va),
6651 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
6652 pdpe = &pdpe[pmap_pdpe_index(va)];
6654 MPASS(origpte == 0);
6656 pdpe = pmap_pml4e_to_pdpe(pml4e, va);
6657 KASSERT(pdpe != NULL, ("va %#lx lost pdpe", va));
6659 if ((origpte & PG_V) == 0) {
6660 mp = PHYS_TO_VM_PAGE(*pml4e & PG_FRAME);
6665 } else /* (psind == 1) */ { /* 2M */
6666 pde = pmap_pde(pmap, va);
6668 mp = pmap_allocpte_alloc(pmap, pmap_pdpe_pindex(va),
6672 pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mp));
6673 pde = &pde[pmap_pde_index(va)];
6675 MPASS(origpte == 0);
6678 if ((origpte & PG_V) == 0) {
6679 pdpe = pmap_pdpe(pmap, va);
6680 MPASS(pdpe != NULL && (*pdpe & PG_V) != 0);
6681 mp = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
6687 KASSERT((origpte & PG_V) == 0 || ((origpte & PG_PS) != 0 &&
6688 (origpte & PG_PS_FRAME) == (pten & PG_PS_FRAME)),
6689 ("va %#lx changing %s phys page origpte %#lx pten %#lx",
6690 va, psind == 2 ? "1G" : "2M", origpte, pten));
6691 if ((pten & PG_W) != 0 && (origpte & PG_W) == 0)
6692 pmap->pm_stats.wired_count += pagesizes[psind] / PAGE_SIZE;
6693 else if ((pten & PG_W) == 0 && (origpte & PG_W) != 0)
6694 pmap->pm_stats.wired_count -= pagesizes[psind] / PAGE_SIZE;
6695 if ((origpte & PG_V) == 0)
6696 pmap_resident_count_inc(pmap, pagesizes[psind] / PAGE_SIZE);
6698 return (KERN_SUCCESS);
6701 if ((flags & PMAP_ENTER_NOSLEEP) != 0)
6702 return (KERN_RESOURCE_SHORTAGE);
6710 * Insert the given physical page (p) at
6711 * the specified virtual address (v) in the
6712 * target physical map with the protection requested.
6714 * If specified, the page will be wired down, meaning
6715 * that the related pte can not be reclaimed.
6717 * NB: This is the only routine which MAY NOT lazy-evaluate
6718 * or lose information. That is, this routine must actually
6719 * insert this page into the given map NOW.
6721 * When destroying both a page table and PV entry, this function
6722 * performs the TLB invalidation before releasing the PV list
6723 * lock, so we do not need pmap_delayed_invl_page() calls here.
6726 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
6727 u_int flags, int8_t psind)
6729 struct rwlock *lock;
6731 pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V;
6732 pt_entry_t newpte, origpte;
6739 PG_A = pmap_accessed_bit(pmap);
6740 PG_G = pmap_global_bit(pmap);
6741 PG_M = pmap_modified_bit(pmap);
6742 PG_V = pmap_valid_bit(pmap);
6743 PG_RW = pmap_rw_bit(pmap);
6745 va = trunc_page(va);
6746 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
6747 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
6748 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
6750 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
6751 ("pmap_enter: managed mapping within the clean submap"));
6752 if ((m->oflags & VPO_UNMANAGED) == 0)
6753 VM_PAGE_OBJECT_BUSY_ASSERT(m);
6754 KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
6755 ("pmap_enter: flags %u has reserved bits set", flags));
6756 pa = VM_PAGE_TO_PHYS(m);
6757 newpte = (pt_entry_t)(pa | PG_A | PG_V);
6758 if ((flags & VM_PROT_WRITE) != 0)
6760 if ((prot & VM_PROT_WRITE) != 0)
6762 KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
6763 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
6764 if ((prot & VM_PROT_EXECUTE) == 0)
6766 if ((flags & PMAP_ENTER_WIRED) != 0)
6768 if (va < VM_MAXUSER_ADDRESS)
6770 if (pmap == kernel_pmap)
6772 newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0);
6775 * Set modified bit gratuitously for writeable mappings if
6776 * the page is unmanaged. We do not want to take a fault
6777 * to do the dirty bit accounting for these mappings.
6779 if ((m->oflags & VPO_UNMANAGED) != 0) {
6780 if ((newpte & PG_RW) != 0)
6783 newpte |= PG_MANAGED;
6787 if ((flags & PMAP_ENTER_LARGEPAGE) != 0) {
6788 KASSERT((m->oflags & VPO_UNMANAGED) != 0,
6789 ("managed largepage va %#lx flags %#x", va, flags));
6790 rv = pmap_enter_largepage(pmap, va, newpte | PG_PS, flags,
6795 /* Assert the required virtual and physical alignment. */
6796 KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned"));
6797 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
6798 rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m, &lock);
6804 * In the case that a page table page is not
6805 * resident, we are creating it here.
6808 pde = pmap_pde(pmap, va);
6809 if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 ||
6810 pmap_demote_pde_locked(pmap, pde, va, &lock))) {
6811 pte = pmap_pde_to_pte(pde, va);
6812 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
6813 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
6816 } else if (va < VM_MAXUSER_ADDRESS) {
6818 * Here if the pte page isn't mapped, or if it has been
6821 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
6822 mpte = pmap_allocpte_alloc(pmap, pmap_pde_pindex(va),
6823 nosleep ? NULL : &lock, va);
6824 if (mpte == NULL && nosleep) {
6825 rv = KERN_RESOURCE_SHORTAGE;
6830 panic("pmap_enter: invalid page directory va=%#lx", va);
6834 if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86)
6835 newpte |= pmap_pkru_get(pmap, va);
6838 * Is the specified virtual address already mapped?
6840 if ((origpte & PG_V) != 0) {
6842 * Wiring change, just update stats. We don't worry about
6843 * wiring PT pages as they remain resident as long as there
6844 * are valid mappings in them. Hence, if a user page is wired,
6845 * the PT page will be also.
6847 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
6848 pmap->pm_stats.wired_count++;
6849 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
6850 pmap->pm_stats.wired_count--;
6853 * Remove the extra PT page reference.
6857 KASSERT(mpte->ref_count > 0,
6858 ("pmap_enter: missing reference to page table page,"
6863 * Has the physical page changed?
6865 opa = origpte & PG_FRAME;
6868 * No, might be a protection or wiring change.
6870 if ((origpte & PG_MANAGED) != 0 &&
6871 (newpte & PG_RW) != 0)
6872 vm_page_aflag_set(m, PGA_WRITEABLE);
6873 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
6879 * The physical page has changed. Temporarily invalidate
6880 * the mapping. This ensures that all threads sharing the
6881 * pmap keep a consistent view of the mapping, which is
6882 * necessary for the correct handling of COW faults. It
6883 * also permits reuse of the old mapping's PV entry,
6884 * avoiding an allocation.
6886 * For consistency, handle unmanaged mappings the same way.
6888 origpte = pte_load_clear(pte);
6889 KASSERT((origpte & PG_FRAME) == opa,
6890 ("pmap_enter: unexpected pa update for %#lx", va));
6891 if ((origpte & PG_MANAGED) != 0) {
6892 om = PHYS_TO_VM_PAGE(opa);
6895 * The pmap lock is sufficient to synchronize with
6896 * concurrent calls to pmap_page_test_mappings() and
6897 * pmap_ts_referenced().
6899 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
6901 if ((origpte & PG_A) != 0) {
6902 pmap_invalidate_page(pmap, va);
6903 vm_page_aflag_set(om, PGA_REFERENCED);
6905 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
6906 pv = pmap_pvh_remove(&om->md, pmap, va);
6908 ("pmap_enter: no PV entry for %#lx", va));
6909 if ((newpte & PG_MANAGED) == 0)
6910 free_pv_entry(pmap, pv);
6911 if ((om->a.flags & PGA_WRITEABLE) != 0 &&
6912 TAILQ_EMPTY(&om->md.pv_list) &&
6913 ((om->flags & PG_FICTITIOUS) != 0 ||
6914 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
6915 vm_page_aflag_clear(om, PGA_WRITEABLE);
6918 * Since this mapping is unmanaged, assume that PG_A
6921 pmap_invalidate_page(pmap, va);
6926 * Increment the counters.
6928 if ((newpte & PG_W) != 0)
6929 pmap->pm_stats.wired_count++;
6930 pmap_resident_count_inc(pmap, 1);
6934 * Enter on the PV list if part of our managed memory.
6936 if ((newpte & PG_MANAGED) != 0) {
6938 pv = get_pv_entry(pmap, &lock);
6941 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
6942 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
6944 if ((newpte & PG_RW) != 0)
6945 vm_page_aflag_set(m, PGA_WRITEABLE);
6951 if ((origpte & PG_V) != 0) {
6953 origpte = pte_load_store(pte, newpte);
6954 KASSERT((origpte & PG_FRAME) == pa,
6955 ("pmap_enter: unexpected pa update for %#lx", va));
6956 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
6958 if ((origpte & PG_MANAGED) != 0)
6962 * Although the PTE may still have PG_RW set, TLB
6963 * invalidation may nonetheless be required because
6964 * the PTE no longer has PG_M set.
6966 } else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
6968 * This PTE change does not require TLB invalidation.
6972 if ((origpte & PG_A) != 0)
6973 pmap_invalidate_page(pmap, va);
6975 pte_store(pte, newpte);
6979 #if VM_NRESERVLEVEL > 0
6981 * If both the page table page and the reservation are fully
6982 * populated, then attempt promotion.
6984 if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
6985 pmap_ps_enabled(pmap) &&
6986 (m->flags & PG_FICTITIOUS) == 0 &&
6987 vm_reserv_level_iffullpop(m) == 0)
6988 pmap_promote_pde(pmap, pde, va, &lock);
7000 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
7001 * if successful. Returns false if (1) a page table page cannot be allocated
7002 * without sleeping, (2) a mapping already exists at the specified virtual
7003 * address, or (3) a PV entry cannot be allocated without reclaiming another
7007 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
7008 struct rwlock **lockp)
7013 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7014 PG_V = pmap_valid_bit(pmap);
7015 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) |
7017 if ((m->oflags & VPO_UNMANAGED) == 0)
7018 newpde |= PG_MANAGED;
7019 if ((prot & VM_PROT_EXECUTE) == 0)
7021 if (va < VM_MAXUSER_ADDRESS)
7023 return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
7024 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
7029 * Returns true if every page table entry in the specified page table page is
7033 pmap_every_pte_zero(vm_paddr_t pa)
7035 pt_entry_t *pt_end, *pte;
7037 KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
7038 pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
7039 for (pt_end = pte + NPTEPG; pte < pt_end; pte++) {
7047 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
7048 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
7049 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
7050 * a mapping already exists at the specified virtual address. Returns
7051 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
7052 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
7053 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
7055 * The parameter "m" is only used when creating a managed, writeable mapping.
7058 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
7059 vm_page_t m, struct rwlock **lockp)
7061 struct spglist free;
7062 pd_entry_t oldpde, *pde;
7063 pt_entry_t PG_G, PG_RW, PG_V;
7066 KASSERT(pmap == kernel_pmap || (newpde & PG_W) == 0,
7067 ("pmap_enter_pde: cannot create wired user mapping"));
7068 PG_G = pmap_global_bit(pmap);
7069 PG_RW = pmap_rw_bit(pmap);
7070 KASSERT((newpde & (pmap_modified_bit(pmap) | PG_RW)) != PG_RW,
7071 ("pmap_enter_pde: newpde is missing PG_M"));
7072 PG_V = pmap_valid_bit(pmap);
7073 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7075 if (!pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap,
7077 CTR2(KTR_PMAP, "pmap_enter_pde: 2m x blocked for va %#lx"
7078 " in pmap %p", va, pmap);
7079 return (KERN_FAILURE);
7081 if ((pde = pmap_alloc_pde(pmap, va, &pdpg, (flags &
7082 PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
7083 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
7084 " in pmap %p", va, pmap);
7085 return (KERN_RESOURCE_SHORTAGE);
7089 * If pkru is not same for the whole pde range, return failure
7090 * and let vm_fault() cope. Check after pde allocation, since
7093 if (!pmap_pkru_same(pmap, va, va + NBPDR)) {
7094 pmap_abort_ptp(pmap, va, pdpg);
7095 return (KERN_FAILURE);
7097 if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86) {
7098 newpde &= ~X86_PG_PKU_MASK;
7099 newpde |= pmap_pkru_get(pmap, va);
7103 * If there are existing mappings, either abort or remove them.
7106 if ((oldpde & PG_V) != 0) {
7107 KASSERT(pdpg == NULL || pdpg->ref_count > 1,
7108 ("pmap_enter_pde: pdpg's reference count is too low"));
7109 if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (va <
7110 VM_MAXUSER_ADDRESS || (oldpde & PG_PS) != 0 ||
7111 !pmap_every_pte_zero(oldpde & PG_FRAME))) {
7114 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
7115 " in pmap %p", va, pmap);
7116 return (KERN_FAILURE);
7118 /* Break the existing mapping(s). */
7120 if ((oldpde & PG_PS) != 0) {
7122 * The reference to the PD page that was acquired by
7123 * pmap_alloc_pde() ensures that it won't be freed.
7124 * However, if the PDE resulted from a promotion, then
7125 * a reserved PT page could be freed.
7127 (void)pmap_remove_pde(pmap, pde, va, &free, lockp);
7128 if ((oldpde & PG_G) == 0)
7129 pmap_invalidate_pde_page(pmap, va, oldpde);
7131 pmap_delayed_invl_start();
7132 if (pmap_remove_ptes(pmap, va, va + NBPDR, pde, &free,
7134 pmap_invalidate_all(pmap);
7135 pmap_delayed_invl_finish();
7137 if (va < VM_MAXUSER_ADDRESS) {
7138 vm_page_free_pages_toq(&free, true);
7139 KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p",
7142 KASSERT(SLIST_EMPTY(&free),
7143 ("pmap_enter_pde: freed kernel page table page"));
7146 * Both pmap_remove_pde() and pmap_remove_ptes() will
7147 * leave the kernel page table page zero filled.
7149 mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7150 if (pmap_insert_pt_page(pmap, mt, false))
7151 panic("pmap_enter_pde: trie insert failed");
7155 if ((newpde & PG_MANAGED) != 0) {
7157 * Abort this mapping if its PV entry could not be created.
7159 if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) {
7161 pmap_abort_ptp(pmap, va, pdpg);
7162 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
7163 " in pmap %p", va, pmap);
7164 return (KERN_RESOURCE_SHORTAGE);
7166 if ((newpde & PG_RW) != 0) {
7167 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
7168 vm_page_aflag_set(mt, PGA_WRITEABLE);
7173 * Increment counters.
7175 if ((newpde & PG_W) != 0)
7176 pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE;
7177 pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
7180 * Map the superpage. (This is not a promoted mapping; there will not
7181 * be any lingering 4KB page mappings in the TLB.)
7183 pde_store(pde, newpde);
7185 atomic_add_long(&pmap_pde_mappings, 1);
7186 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p",
7188 return (KERN_SUCCESS);
7192 * Maps a sequence of resident pages belonging to the same object.
7193 * The sequence begins with the given page m_start. This page is
7194 * mapped at the given virtual address start. Each subsequent page is
7195 * mapped at a virtual address that is offset from start by the same
7196 * amount as the page is offset from m_start within the object. The
7197 * last page in the sequence is the page with the largest offset from
7198 * m_start that can be mapped at a virtual address less than the given
7199 * virtual address end. Not every virtual page between start and end
7200 * is mapped; only those for which a resident page exists with the
7201 * corresponding offset from m_start are mapped.
7204 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
7205 vm_page_t m_start, vm_prot_t prot)
7207 struct rwlock *lock;
7210 vm_pindex_t diff, psize;
7212 VM_OBJECT_ASSERT_LOCKED(m_start->object);
7214 psize = atop(end - start);
7219 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
7220 va = start + ptoa(diff);
7221 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
7222 m->psind == 1 && pmap_ps_enabled(pmap) &&
7223 pmap_allow_2m_x_page(pmap, (prot & VM_PROT_EXECUTE) != 0) &&
7224 pmap_enter_2mpage(pmap, va, m, prot, &lock))
7225 m = &m[NBPDR / PAGE_SIZE - 1];
7227 mpte = pmap_enter_quick_locked(pmap, va, m, prot,
7229 m = TAILQ_NEXT(m, listq);
7237 * this code makes some *MAJOR* assumptions:
7238 * 1. Current pmap & pmap exists.
7241 * 4. No page table pages.
7242 * but is *MUCH* faster than pmap_enter...
7246 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
7248 struct rwlock *lock;
7252 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
7259 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
7260 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
7262 pt_entry_t newpte, *pte, PG_V;
7264 KASSERT(!VA_IS_CLEANMAP(va) ||
7265 (m->oflags & VPO_UNMANAGED) != 0,
7266 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
7267 PG_V = pmap_valid_bit(pmap);
7268 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
7271 * In the case that a page table page is not
7272 * resident, we are creating it here.
7274 if (va < VM_MAXUSER_ADDRESS) {
7275 vm_pindex_t ptepindex;
7279 * Calculate pagetable page index
7281 ptepindex = pmap_pde_pindex(va);
7282 if (mpte && (mpte->pindex == ptepindex)) {
7286 * Get the page directory entry
7288 ptepa = pmap_pde(pmap, va);
7291 * If the page table page is mapped, we just increment
7292 * the hold count, and activate it. Otherwise, we
7293 * attempt to allocate a page table page. If this
7294 * attempt fails, we don't retry. Instead, we give up.
7296 if (ptepa && (*ptepa & PG_V) != 0) {
7299 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
7303 * Pass NULL instead of the PV list lock
7304 * pointer, because we don't intend to sleep.
7306 mpte = pmap_allocpte_alloc(pmap, ptepindex,
7312 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
7313 pte = &pte[pmap_pte_index(va)];
7325 * Enter on the PV list if part of our managed memory.
7327 if ((m->oflags & VPO_UNMANAGED) == 0 &&
7328 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
7330 pmap_abort_ptp(pmap, va, mpte);
7335 * Increment counters
7337 pmap_resident_count_inc(pmap, 1);
7339 newpte = VM_PAGE_TO_PHYS(m) | PG_V |
7340 pmap_cache_bits(pmap, m->md.pat_mode, 0);
7341 if ((m->oflags & VPO_UNMANAGED) == 0)
7342 newpte |= PG_MANAGED;
7343 if ((prot & VM_PROT_EXECUTE) == 0)
7345 if (va < VM_MAXUSER_ADDRESS)
7346 newpte |= PG_U | pmap_pkru_get(pmap, va);
7347 pte_store(pte, newpte);
7352 * Make a temporary mapping for a physical address. This is only intended
7353 * to be used for panic dumps.
7356 pmap_kenter_temporary(vm_paddr_t pa, int i)
7360 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
7361 pmap_kenter(va, pa);
7363 return ((void *)crashdumpmap);
7367 * This code maps large physical mmap regions into the
7368 * processor address space. Note that some shortcuts
7369 * are taken, but the code works.
7372 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
7373 vm_pindex_t pindex, vm_size_t size)
7376 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
7377 vm_paddr_t pa, ptepa;
7381 PG_A = pmap_accessed_bit(pmap);
7382 PG_M = pmap_modified_bit(pmap);
7383 PG_V = pmap_valid_bit(pmap);
7384 PG_RW = pmap_rw_bit(pmap);
7386 VM_OBJECT_ASSERT_WLOCKED(object);
7387 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
7388 ("pmap_object_init_pt: non-device object"));
7389 if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
7390 if (!pmap_ps_enabled(pmap))
7392 if (!vm_object_populate(object, pindex, pindex + atop(size)))
7394 p = vm_page_lookup(object, pindex);
7395 KASSERT(p->valid == VM_PAGE_BITS_ALL,
7396 ("pmap_object_init_pt: invalid page %p", p));
7397 pat_mode = p->md.pat_mode;
7400 * Abort the mapping if the first page is not physically
7401 * aligned to a 2MB page boundary.
7403 ptepa = VM_PAGE_TO_PHYS(p);
7404 if (ptepa & (NBPDR - 1))
7408 * Skip the first page. Abort the mapping if the rest of
7409 * the pages are not physically contiguous or have differing
7410 * memory attributes.
7412 p = TAILQ_NEXT(p, listq);
7413 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
7415 KASSERT(p->valid == VM_PAGE_BITS_ALL,
7416 ("pmap_object_init_pt: invalid page %p", p));
7417 if (pa != VM_PAGE_TO_PHYS(p) ||
7418 pat_mode != p->md.pat_mode)
7420 p = TAILQ_NEXT(p, listq);
7424 * Map using 2MB pages. Since "ptepa" is 2M aligned and
7425 * "size" is a multiple of 2M, adding the PAT setting to "pa"
7426 * will not affect the termination of this loop.
7429 for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1);
7430 pa < ptepa + size; pa += NBPDR) {
7431 pde = pmap_alloc_pde(pmap, addr, &pdpg, NULL);
7434 * The creation of mappings below is only an
7435 * optimization. If a page directory page
7436 * cannot be allocated without blocking,
7437 * continue on to the next mapping rather than
7443 if ((*pde & PG_V) == 0) {
7444 pde_store(pde, pa | PG_PS | PG_M | PG_A |
7445 PG_U | PG_RW | PG_V);
7446 pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
7447 atomic_add_long(&pmap_pde_mappings, 1);
7449 /* Continue on if the PDE is already valid. */
7451 KASSERT(pdpg->ref_count > 0,
7452 ("pmap_object_init_pt: missing reference "
7453 "to page directory page, va: 0x%lx", addr));
7462 * Clear the wired attribute from the mappings for the specified range of
7463 * addresses in the given pmap. Every valid mapping within that range
7464 * must have the wired attribute set. In contrast, invalid mappings
7465 * cannot have the wired attribute set, so they are ignored.
7467 * The wired attribute of the page table entry is not a hardware
7468 * feature, so there is no need to invalidate any TLB entries.
7469 * Since pmap_demote_pde() for the wired entry must never fail,
7470 * pmap_delayed_invl_start()/finish() calls around the
7471 * function are not needed.
7474 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
7476 vm_offset_t va_next;
7477 pml4_entry_t *pml4e;
7480 pt_entry_t *pte, PG_V, PG_G;
7482 PG_V = pmap_valid_bit(pmap);
7483 PG_G = pmap_global_bit(pmap);
7485 for (; sva < eva; sva = va_next) {
7486 pml4e = pmap_pml4e(pmap, sva);
7487 if (pml4e == NULL || (*pml4e & PG_V) == 0) {
7488 va_next = (sva + NBPML4) & ~PML4MASK;
7494 va_next = (sva + NBPDP) & ~PDPMASK;
7497 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
7498 if ((*pdpe & PG_V) == 0)
7500 if ((*pdpe & PG_PS) != 0) {
7501 KASSERT(va_next <= eva,
7502 ("partial update of non-transparent 1G mapping "
7503 "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
7504 *pdpe, sva, eva, va_next));
7505 MPASS(pmap != kernel_pmap); /* XXXKIB */
7506 MPASS((*pdpe & (PG_MANAGED | PG_G)) == 0);
7507 atomic_clear_long(pdpe, PG_W);
7508 pmap->pm_stats.wired_count -= NBPDP / PAGE_SIZE;
7512 va_next = (sva + NBPDR) & ~PDRMASK;
7515 pde = pmap_pdpe_to_pde(pdpe, sva);
7516 if ((*pde & PG_V) == 0)
7518 if ((*pde & PG_PS) != 0) {
7519 if ((*pde & PG_W) == 0)
7520 panic("pmap_unwire: pde %#jx is missing PG_W",
7524 * Are we unwiring the entire large page? If not,
7525 * demote the mapping and fall through.
7527 if (sva + NBPDR == va_next && eva >= va_next) {
7528 atomic_clear_long(pde, PG_W);
7529 pmap->pm_stats.wired_count -= NBPDR /
7532 } else if (!pmap_demote_pde(pmap, pde, sva))
7533 panic("pmap_unwire: demotion failed");
7537 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
7539 if ((*pte & PG_V) == 0)
7541 if ((*pte & PG_W) == 0)
7542 panic("pmap_unwire: pte %#jx is missing PG_W",
7546 * PG_W must be cleared atomically. Although the pmap
7547 * lock synchronizes access to PG_W, another processor
7548 * could be setting PG_M and/or PG_A concurrently.
7550 atomic_clear_long(pte, PG_W);
7551 pmap->pm_stats.wired_count--;
7558 * Copy the range specified by src_addr/len
7559 * from the source map to the range dst_addr/len
7560 * in the destination map.
7562 * This routine is only advisory and need not do anything.
7565 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
7566 vm_offset_t src_addr)
7568 struct rwlock *lock;
7569 pml4_entry_t *pml4e;
7571 pd_entry_t *pde, srcptepaddr;
7572 pt_entry_t *dst_pte, PG_A, PG_M, PG_V, ptetemp, *src_pte;
7573 vm_offset_t addr, end_addr, va_next;
7574 vm_page_t dst_pdpg, dstmpte, srcmpte;
7576 if (dst_addr != src_addr)
7579 if (dst_pmap->pm_type != src_pmap->pm_type)
7583 * EPT page table entries that require emulation of A/D bits are
7584 * sensitive to clearing the PG_A bit (aka EPT_PG_READ). Although
7585 * we clear PG_M (aka EPT_PG_WRITE) concomitantly, the PG_U bit
7586 * (aka EPT_PG_EXECUTE) could still be set. Since some EPT
7587 * implementations flag an EPT misconfiguration for exec-only
7588 * mappings we skip this function entirely for emulated pmaps.
7590 if (pmap_emulate_ad_bits(dst_pmap))
7593 end_addr = src_addr + len;
7595 if (dst_pmap < src_pmap) {
7596 PMAP_LOCK(dst_pmap);
7597 PMAP_LOCK(src_pmap);
7599 PMAP_LOCK(src_pmap);
7600 PMAP_LOCK(dst_pmap);
7603 PG_A = pmap_accessed_bit(dst_pmap);
7604 PG_M = pmap_modified_bit(dst_pmap);
7605 PG_V = pmap_valid_bit(dst_pmap);
7607 for (addr = src_addr; addr < end_addr; addr = va_next) {
7608 KASSERT(addr < UPT_MIN_ADDRESS,
7609 ("pmap_copy: invalid to pmap_copy page tables"));
7611 pml4e = pmap_pml4e(src_pmap, addr);
7612 if (pml4e == NULL || (*pml4e & PG_V) == 0) {
7613 va_next = (addr + NBPML4) & ~PML4MASK;
7619 va_next = (addr + NBPDP) & ~PDPMASK;
7622 pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
7623 if ((*pdpe & PG_V) == 0)
7625 if ((*pdpe & PG_PS) != 0) {
7626 KASSERT(va_next <= end_addr,
7627 ("partial update of non-transparent 1G mapping "
7628 "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
7629 *pdpe, addr, end_addr, va_next));
7630 MPASS((addr & PDPMASK) == 0);
7631 MPASS((*pdpe & PG_MANAGED) == 0);
7632 srcptepaddr = *pdpe;
7633 pdpe = pmap_pdpe(dst_pmap, addr);
7635 if (pmap_allocpte_alloc(dst_pmap,
7636 pmap_pml4e_pindex(addr), NULL, addr) ==
7639 pdpe = pmap_pdpe(dst_pmap, addr);
7641 pml4e = pmap_pml4e(dst_pmap, addr);
7642 dst_pdpg = PHYS_TO_VM_PAGE(*pml4e & PG_FRAME);
7643 dst_pdpg->ref_count++;
7646 ("1G mapping present in dst pmap "
7647 "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
7648 *pdpe, addr, end_addr, va_next));
7649 *pdpe = srcptepaddr & ~PG_W;
7650 pmap_resident_count_inc(dst_pmap, NBPDP / PAGE_SIZE);
7654 va_next = (addr + NBPDR) & ~PDRMASK;
7658 pde = pmap_pdpe_to_pde(pdpe, addr);
7660 if (srcptepaddr == 0)
7663 if (srcptepaddr & PG_PS) {
7664 if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
7666 pde = pmap_alloc_pde(dst_pmap, addr, &dst_pdpg, NULL);
7669 if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
7670 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
7671 PMAP_ENTER_NORECLAIM, &lock))) {
7672 *pde = srcptepaddr & ~PG_W;
7673 pmap_resident_count_inc(dst_pmap, NBPDR /
7675 atomic_add_long(&pmap_pde_mappings, 1);
7677 pmap_abort_ptp(dst_pmap, addr, dst_pdpg);
7681 srcptepaddr &= PG_FRAME;
7682 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
7683 KASSERT(srcmpte->ref_count > 0,
7684 ("pmap_copy: source page table page is unused"));
7686 if (va_next > end_addr)
7689 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
7690 src_pte = &src_pte[pmap_pte_index(addr)];
7692 for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
7696 * We only virtual copy managed pages.
7698 if ((ptetemp & PG_MANAGED) == 0)
7701 if (dstmpte != NULL) {
7702 KASSERT(dstmpte->pindex ==
7703 pmap_pde_pindex(addr),
7704 ("dstmpte pindex/addr mismatch"));
7705 dstmpte->ref_count++;
7706 } else if ((dstmpte = pmap_allocpte(dst_pmap, addr,
7709 dst_pte = (pt_entry_t *)
7710 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
7711 dst_pte = &dst_pte[pmap_pte_index(addr)];
7712 if (*dst_pte == 0 &&
7713 pmap_try_insert_pv_entry(dst_pmap, addr,
7714 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME), &lock)) {
7716 * Clear the wired, modified, and accessed
7717 * (referenced) bits during the copy.
7719 *dst_pte = ptetemp & ~(PG_W | PG_M | PG_A);
7720 pmap_resident_count_inc(dst_pmap, 1);
7722 pmap_abort_ptp(dst_pmap, addr, dstmpte);
7725 /* Have we copied all of the valid mappings? */
7726 if (dstmpte->ref_count >= srcmpte->ref_count)
7733 PMAP_UNLOCK(src_pmap);
7734 PMAP_UNLOCK(dst_pmap);
7738 pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap)
7742 if (dst_pmap->pm_type != src_pmap->pm_type ||
7743 dst_pmap->pm_type != PT_X86 ||
7744 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0)
7747 if (dst_pmap < src_pmap) {
7748 PMAP_LOCK(dst_pmap);
7749 PMAP_LOCK(src_pmap);
7751 PMAP_LOCK(src_pmap);
7752 PMAP_LOCK(dst_pmap);
7754 error = pmap_pkru_copy(dst_pmap, src_pmap);
7755 /* Clean up partial copy on failure due to no memory. */
7756 if (error == ENOMEM)
7757 pmap_pkru_deassign_all(dst_pmap);
7758 PMAP_UNLOCK(src_pmap);
7759 PMAP_UNLOCK(dst_pmap);
7760 if (error != ENOMEM)
7768 * Zero the specified hardware page.
7771 pmap_zero_page(vm_page_t m)
7773 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
7775 pagezero((void *)va);
7779 * Zero an an area within a single hardware page. off and size must not
7780 * cover an area beyond a single hardware page.
7783 pmap_zero_page_area(vm_page_t m, int off, int size)
7785 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
7787 if (off == 0 && size == PAGE_SIZE)
7788 pagezero((void *)va);
7790 bzero((char *)va + off, size);
7794 * Copy 1 specified hardware page to another.
7797 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
7799 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
7800 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
7802 pagecopy((void *)src, (void *)dst);
7805 int unmapped_buf_allowed = 1;
7808 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
7809 vm_offset_t b_offset, int xfersize)
7813 vm_offset_t vaddr[2], a_pg_offset, b_pg_offset;
7817 while (xfersize > 0) {
7818 a_pg_offset = a_offset & PAGE_MASK;
7819 pages[0] = ma[a_offset >> PAGE_SHIFT];
7820 b_pg_offset = b_offset & PAGE_MASK;
7821 pages[1] = mb[b_offset >> PAGE_SHIFT];
7822 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
7823 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
7824 mapped = pmap_map_io_transient(pages, vaddr, 2, FALSE);
7825 a_cp = (char *)vaddr[0] + a_pg_offset;
7826 b_cp = (char *)vaddr[1] + b_pg_offset;
7827 bcopy(a_cp, b_cp, cnt);
7828 if (__predict_false(mapped))
7829 pmap_unmap_io_transient(pages, vaddr, 2, FALSE);
7837 * Returns true if the pmap's pv is one of the first
7838 * 16 pvs linked to from this page. This count may
7839 * be changed upwards or downwards in the future; it
7840 * is only necessary that true be returned for a small
7841 * subset of pmaps for proper page aging.
7844 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
7846 struct md_page *pvh;
7847 struct rwlock *lock;
7852 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
7853 ("pmap_page_exists_quick: page %p is not managed", m));
7855 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7857 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7858 if (PV_PMAP(pv) == pmap) {
7866 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
7867 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
7868 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
7869 if (PV_PMAP(pv) == pmap) {
7883 * pmap_page_wired_mappings:
7885 * Return the number of managed mappings to the given physical page
7889 pmap_page_wired_mappings(vm_page_t m)
7891 struct rwlock *lock;
7892 struct md_page *pvh;
7896 int count, md_gen, pvh_gen;
7898 if ((m->oflags & VPO_UNMANAGED) != 0)
7900 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7904 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
7906 if (!PMAP_TRYLOCK(pmap)) {
7907 md_gen = m->md.pv_gen;
7911 if (md_gen != m->md.pv_gen) {
7916 pte = pmap_pte(pmap, pv->pv_va);
7917 if ((*pte & PG_W) != 0)
7921 if ((m->flags & PG_FICTITIOUS) == 0) {
7922 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
7923 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
7925 if (!PMAP_TRYLOCK(pmap)) {
7926 md_gen = m->md.pv_gen;
7927 pvh_gen = pvh->pv_gen;
7931 if (md_gen != m->md.pv_gen ||
7932 pvh_gen != pvh->pv_gen) {
7937 pte = pmap_pde(pmap, pv->pv_va);
7938 if ((*pte & PG_W) != 0)
7948 * Returns TRUE if the given page is mapped individually or as part of
7949 * a 2mpage. Otherwise, returns FALSE.
7952 pmap_page_is_mapped(vm_page_t m)
7954 struct rwlock *lock;
7957 if ((m->oflags & VPO_UNMANAGED) != 0)
7959 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
7961 rv = !TAILQ_EMPTY(&m->md.pv_list) ||
7962 ((m->flags & PG_FICTITIOUS) == 0 &&
7963 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
7969 * Destroy all managed, non-wired mappings in the given user-space
7970 * pmap. This pmap cannot be active on any processor besides the
7973 * This function cannot be applied to the kernel pmap. Moreover, it
7974 * is not intended for general use. It is only to be used during
7975 * process termination. Consequently, it can be implemented in ways
7976 * that make it faster than pmap_remove(). First, it can more quickly
7977 * destroy mappings by iterating over the pmap's collection of PV
7978 * entries, rather than searching the page table. Second, it doesn't
7979 * have to test and clear the page table entries atomically, because
7980 * no processor is currently accessing the user address space. In
7981 * particular, a page table entry's dirty bit won't change state once
7982 * this function starts.
7984 * Although this function destroys all of the pmap's managed,
7985 * non-wired mappings, it can delay and batch the invalidation of TLB
7986 * entries without calling pmap_delayed_invl_start() and
7987 * pmap_delayed_invl_finish(). Because the pmap is not active on
7988 * any other processor, none of these TLB entries will ever be used
7989 * before their eventual invalidation. Consequently, there is no need
7990 * for either pmap_remove_all() or pmap_remove_write() to wait for
7991 * that eventual TLB invalidation.
7994 pmap_remove_pages(pmap_t pmap)
7997 pt_entry_t *pte, tpte;
7998 pt_entry_t PG_M, PG_RW, PG_V;
7999 struct spglist free;
8000 struct pv_chunklist free_chunks[PMAP_MEMDOM];
8001 vm_page_t m, mpte, mt;
8003 struct md_page *pvh;
8004 struct pv_chunk *pc, *npc;
8005 struct rwlock *lock;
8007 uint64_t inuse, bitmask;
8008 int allfree, field, freed, i, idx;
8009 boolean_t superpage;
8013 * Assert that the given pmap is only active on the current
8014 * CPU. Unfortunately, we cannot block another CPU from
8015 * activating the pmap while this function is executing.
8017 KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
8020 cpuset_t other_cpus;
8022 other_cpus = all_cpus;
8024 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
8025 CPU_AND(&other_cpus, &pmap->pm_active);
8027 KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
8032 PG_M = pmap_modified_bit(pmap);
8033 PG_V = pmap_valid_bit(pmap);
8034 PG_RW = pmap_rw_bit(pmap);
8036 for (i = 0; i < PMAP_MEMDOM; i++)
8037 TAILQ_INIT(&free_chunks[i]);
8040 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
8043 for (field = 0; field < _NPCM; field++) {
8044 inuse = ~pc->pc_map[field] & pc_freemask[field];
8045 while (inuse != 0) {
8047 bitmask = 1UL << bit;
8048 idx = field * 64 + bit;
8049 pv = &pc->pc_pventry[idx];
8052 pte = pmap_pdpe(pmap, pv->pv_va);
8054 pte = pmap_pdpe_to_pde(pte, pv->pv_va);
8056 if ((tpte & (PG_PS | PG_V)) == PG_V) {
8059 pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
8061 pte = &pte[pmap_pte_index(pv->pv_va)];
8065 * Keep track whether 'tpte' is a
8066 * superpage explicitly instead of
8067 * relying on PG_PS being set.
8069 * This is because PG_PS is numerically
8070 * identical to PG_PTE_PAT and thus a
8071 * regular page could be mistaken for
8077 if ((tpte & PG_V) == 0) {
8078 panic("bad pte va %lx pte %lx",
8083 * We cannot remove wired pages from a process' mapping at this time
8091 pa = tpte & PG_PS_FRAME;
8093 pa = tpte & PG_FRAME;
8095 m = PHYS_TO_VM_PAGE(pa);
8096 KASSERT(m->phys_addr == pa,
8097 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
8098 m, (uintmax_t)m->phys_addr,
8101 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
8102 m < &vm_page_array[vm_page_array_size],
8103 ("pmap_remove_pages: bad tpte %#jx",
8109 * Update the vm_page_t clean/reference bits.
8111 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
8113 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
8119 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
8122 pc->pc_map[field] |= bitmask;
8124 pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
8125 pvh = pa_to_pvh(tpte & PG_PS_FRAME);
8126 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
8128 if (TAILQ_EMPTY(&pvh->pv_list)) {
8129 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
8130 if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
8131 TAILQ_EMPTY(&mt->md.pv_list))
8132 vm_page_aflag_clear(mt, PGA_WRITEABLE);
8134 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
8136 KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
8137 ("pmap_remove_pages: pte page not promoted"));
8138 pmap_resident_count_dec(pmap, 1);
8139 KASSERT(mpte->ref_count == NPTEPG,
8140 ("pmap_remove_pages: pte page reference count error"));
8141 mpte->ref_count = 0;
8142 pmap_add_delayed_free_list(mpte, &free, FALSE);
8145 pmap_resident_count_dec(pmap, 1);
8146 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
8148 if ((m->a.flags & PGA_WRITEABLE) != 0 &&
8149 TAILQ_EMPTY(&m->md.pv_list) &&
8150 (m->flags & PG_FICTITIOUS) == 0) {
8151 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8152 if (TAILQ_EMPTY(&pvh->pv_list))
8153 vm_page_aflag_clear(m, PGA_WRITEABLE);
8156 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
8160 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
8161 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
8162 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
8164 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
8165 TAILQ_INSERT_TAIL(&free_chunks[pc_to_domain(pc)], pc, pc_list);
8170 pmap_invalidate_all(pmap);
8171 pmap_pkru_deassign_all(pmap);
8172 free_pv_chunk_batch((struct pv_chunklist *)&free_chunks);
8174 vm_page_free_pages_toq(&free, true);
8178 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
8180 struct rwlock *lock;
8182 struct md_page *pvh;
8183 pt_entry_t *pte, mask;
8184 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
8186 int md_gen, pvh_gen;
8190 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8193 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8195 if (!PMAP_TRYLOCK(pmap)) {
8196 md_gen = m->md.pv_gen;
8200 if (md_gen != m->md.pv_gen) {
8205 pte = pmap_pte(pmap, pv->pv_va);
8208 PG_M = pmap_modified_bit(pmap);
8209 PG_RW = pmap_rw_bit(pmap);
8210 mask |= PG_RW | PG_M;
8213 PG_A = pmap_accessed_bit(pmap);
8214 PG_V = pmap_valid_bit(pmap);
8215 mask |= PG_V | PG_A;
8217 rv = (*pte & mask) == mask;
8222 if ((m->flags & PG_FICTITIOUS) == 0) {
8223 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
8224 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
8226 if (!PMAP_TRYLOCK(pmap)) {
8227 md_gen = m->md.pv_gen;
8228 pvh_gen = pvh->pv_gen;
8232 if (md_gen != m->md.pv_gen ||
8233 pvh_gen != pvh->pv_gen) {
8238 pte = pmap_pde(pmap, pv->pv_va);
8241 PG_M = pmap_modified_bit(pmap);
8242 PG_RW = pmap_rw_bit(pmap);
8243 mask |= PG_RW | PG_M;
8246 PG_A = pmap_accessed_bit(pmap);
8247 PG_V = pmap_valid_bit(pmap);
8248 mask |= PG_V | PG_A;
8250 rv = (*pte & mask) == mask;
8264 * Return whether or not the specified physical page was modified
8265 * in any physical maps.
8268 pmap_is_modified(vm_page_t m)
8271 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8272 ("pmap_is_modified: page %p is not managed", m));
8275 * If the page is not busied then this check is racy.
8277 if (!pmap_page_is_write_mapped(m))
8279 return (pmap_page_test_mappings(m, FALSE, TRUE));
8283 * pmap_is_prefaultable:
8285 * Return whether or not the specified virtual address is eligible
8289 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
8292 pt_entry_t *pte, PG_V;
8295 PG_V = pmap_valid_bit(pmap);
8298 pde = pmap_pde(pmap, addr);
8299 if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
8300 pte = pmap_pde_to_pte(pde, addr);
8301 rv = (*pte & PG_V) == 0;
8308 * pmap_is_referenced:
8310 * Return whether or not the specified physical page was referenced
8311 * in any physical maps.
8314 pmap_is_referenced(vm_page_t m)
8317 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8318 ("pmap_is_referenced: page %p is not managed", m));
8319 return (pmap_page_test_mappings(m, TRUE, FALSE));
8323 * Clear the write and modified bits in each of the given page's mappings.
8326 pmap_remove_write(vm_page_t m)
8328 struct md_page *pvh;
8330 struct rwlock *lock;
8331 pv_entry_t next_pv, pv;
8333 pt_entry_t oldpte, *pte, PG_M, PG_RW;
8335 int pvh_gen, md_gen;
8337 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8338 ("pmap_remove_write: page %p is not managed", m));
8340 vm_page_assert_busied(m);
8341 if (!pmap_page_is_write_mapped(m))
8344 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8345 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
8346 pa_to_pvh(VM_PAGE_TO_PHYS(m));
8349 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
8351 if (!PMAP_TRYLOCK(pmap)) {
8352 pvh_gen = pvh->pv_gen;
8356 if (pvh_gen != pvh->pv_gen) {
8362 PG_RW = pmap_rw_bit(pmap);
8364 pde = pmap_pde(pmap, va);
8365 if ((*pde & PG_RW) != 0)
8366 (void)pmap_demote_pde_locked(pmap, pde, va, &lock);
8367 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
8368 ("inconsistent pv lock %p %p for page %p",
8369 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
8372 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8374 if (!PMAP_TRYLOCK(pmap)) {
8375 pvh_gen = pvh->pv_gen;
8376 md_gen = m->md.pv_gen;
8380 if (pvh_gen != pvh->pv_gen ||
8381 md_gen != m->md.pv_gen) {
8387 PG_M = pmap_modified_bit(pmap);
8388 PG_RW = pmap_rw_bit(pmap);
8389 pde = pmap_pde(pmap, pv->pv_va);
8390 KASSERT((*pde & PG_PS) == 0,
8391 ("pmap_remove_write: found a 2mpage in page %p's pv list",
8393 pte = pmap_pde_to_pte(pde, pv->pv_va);
8396 if (oldpte & PG_RW) {
8397 if (!atomic_cmpset_long(pte, oldpte, oldpte &
8400 if ((oldpte & PG_M) != 0)
8402 pmap_invalidate_page(pmap, pv->pv_va);
8407 vm_page_aflag_clear(m, PGA_WRITEABLE);
8408 pmap_delayed_invl_wait(m);
8411 static __inline boolean_t
8412 safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
8415 if (!pmap_emulate_ad_bits(pmap))
8418 KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type));
8421 * XWR = 010 or 110 will cause an unconditional EPT misconfiguration
8422 * so we don't let the referenced (aka EPT_PG_READ) bit to be cleared
8423 * if the EPT_PG_WRITE bit is set.
8425 if ((pte & EPT_PG_WRITE) != 0)
8429 * XWR = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set.
8431 if ((pte & EPT_PG_EXECUTE) == 0 ||
8432 ((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0))
8439 * pmap_ts_referenced:
8441 * Return a count of reference bits for a page, clearing those bits.
8442 * It is not necessary for every reference bit to be cleared, but it
8443 * is necessary that 0 only be returned when there are truly no
8444 * reference bits set.
8446 * As an optimization, update the page's dirty field if a modified bit is
8447 * found while counting reference bits. This opportunistic update can be
8448 * performed at low cost and can eliminate the need for some future calls
8449 * to pmap_is_modified(). However, since this function stops after
8450 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
8451 * dirty pages. Those dirty pages will only be detected by a future call
8452 * to pmap_is_modified().
8454 * A DI block is not needed within this function, because
8455 * invalidations are performed before the PV list lock is
8459 pmap_ts_referenced(vm_page_t m)
8461 struct md_page *pvh;
8464 struct rwlock *lock;
8465 pd_entry_t oldpde, *pde;
8466 pt_entry_t *pte, PG_A, PG_M, PG_RW;
8469 int cleared, md_gen, not_cleared, pvh_gen;
8470 struct spglist free;
8473 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8474 ("pmap_ts_referenced: page %p is not managed", m));
8477 pa = VM_PAGE_TO_PHYS(m);
8478 lock = PHYS_TO_PV_LIST_LOCK(pa);
8479 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
8483 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
8484 goto small_mappings;
8490 if (!PMAP_TRYLOCK(pmap)) {
8491 pvh_gen = pvh->pv_gen;
8495 if (pvh_gen != pvh->pv_gen) {
8500 PG_A = pmap_accessed_bit(pmap);
8501 PG_M = pmap_modified_bit(pmap);
8502 PG_RW = pmap_rw_bit(pmap);
8504 pde = pmap_pde(pmap, pv->pv_va);
8506 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
8508 * Although "oldpde" is mapping a 2MB page, because
8509 * this function is called at a 4KB page granularity,
8510 * we only update the 4KB page under test.
8514 if ((oldpde & PG_A) != 0) {
8516 * Since this reference bit is shared by 512 4KB
8517 * pages, it should not be cleared every time it is
8518 * tested. Apply a simple "hash" function on the
8519 * physical page number, the virtual superpage number,
8520 * and the pmap address to select one 4KB page out of
8521 * the 512 on which testing the reference bit will
8522 * result in clearing that reference bit. This
8523 * function is designed to avoid the selection of the
8524 * same 4KB page for every 2MB page mapping.
8526 * On demotion, a mapping that hasn't been referenced
8527 * is simply destroyed. To avoid the possibility of a
8528 * subsequent page fault on a demoted wired mapping,
8529 * always leave its reference bit set. Moreover,
8530 * since the superpage is wired, the current state of
8531 * its reference bit won't affect page replacement.
8533 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
8534 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
8535 (oldpde & PG_W) == 0) {
8536 if (safe_to_clear_referenced(pmap, oldpde)) {
8537 atomic_clear_long(pde, PG_A);
8538 pmap_invalidate_page(pmap, pv->pv_va);
8540 } else if (pmap_demote_pde_locked(pmap, pde,
8541 pv->pv_va, &lock)) {
8543 * Remove the mapping to a single page
8544 * so that a subsequent access may
8545 * repromote. Since the underlying
8546 * page table page is fully populated,
8547 * this removal never frees a page
8551 va += VM_PAGE_TO_PHYS(m) - (oldpde &
8553 pte = pmap_pde_to_pte(pde, va);
8554 pmap_remove_pte(pmap, pte, va, *pde,
8556 pmap_invalidate_page(pmap, va);
8562 * The superpage mapping was removed
8563 * entirely and therefore 'pv' is no
8571 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
8572 ("inconsistent pv lock %p %p for page %p",
8573 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
8578 /* Rotate the PV list if it has more than one entry. */
8579 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
8580 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
8581 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
8584 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
8586 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
8588 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
8595 if (!PMAP_TRYLOCK(pmap)) {
8596 pvh_gen = pvh->pv_gen;
8597 md_gen = m->md.pv_gen;
8601 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
8606 PG_A = pmap_accessed_bit(pmap);
8607 PG_M = pmap_modified_bit(pmap);
8608 PG_RW = pmap_rw_bit(pmap);
8609 pde = pmap_pde(pmap, pv->pv_va);
8610 KASSERT((*pde & PG_PS) == 0,
8611 ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
8613 pte = pmap_pde_to_pte(pde, pv->pv_va);
8614 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
8616 if ((*pte & PG_A) != 0) {
8617 if (safe_to_clear_referenced(pmap, *pte)) {
8618 atomic_clear_long(pte, PG_A);
8619 pmap_invalidate_page(pmap, pv->pv_va);
8621 } else if ((*pte & PG_W) == 0) {
8623 * Wired pages cannot be paged out so
8624 * doing accessed bit emulation for
8625 * them is wasted effort. We do the
8626 * hard work for unwired pages only.
8628 pmap_remove_pte(pmap, pte, pv->pv_va,
8629 *pde, &free, &lock);
8630 pmap_invalidate_page(pmap, pv->pv_va);
8635 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
8636 ("inconsistent pv lock %p %p for page %p",
8637 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
8642 /* Rotate the PV list if it has more than one entry. */
8643 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
8644 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
8645 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
8648 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
8649 not_cleared < PMAP_TS_REFERENCED_MAX);
8652 vm_page_free_pages_toq(&free, true);
8653 return (cleared + not_cleared);
8657 * Apply the given advice to the specified range of addresses within the
8658 * given pmap. Depending on the advice, clear the referenced and/or
8659 * modified flags in each mapping and set the mapped page's dirty field.
8662 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
8664 struct rwlock *lock;
8665 pml4_entry_t *pml4e;
8667 pd_entry_t oldpde, *pde;
8668 pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V;
8669 vm_offset_t va, va_next;
8673 if (advice != MADV_DONTNEED && advice != MADV_FREE)
8677 * A/D bit emulation requires an alternate code path when clearing
8678 * the modified and accessed bits below. Since this function is
8679 * advisory in nature we skip it entirely for pmaps that require
8680 * A/D bit emulation.
8682 if (pmap_emulate_ad_bits(pmap))
8685 PG_A = pmap_accessed_bit(pmap);
8686 PG_G = pmap_global_bit(pmap);
8687 PG_M = pmap_modified_bit(pmap);
8688 PG_V = pmap_valid_bit(pmap);
8689 PG_RW = pmap_rw_bit(pmap);
8691 pmap_delayed_invl_start();
8693 for (; sva < eva; sva = va_next) {
8694 pml4e = pmap_pml4e(pmap, sva);
8695 if (pml4e == NULL || (*pml4e & PG_V) == 0) {
8696 va_next = (sva + NBPML4) & ~PML4MASK;
8702 va_next = (sva + NBPDP) & ~PDPMASK;
8705 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
8706 if ((*pdpe & PG_V) == 0)
8708 if ((*pdpe & PG_PS) != 0) {
8709 KASSERT(va_next <= eva,
8710 ("partial update of non-transparent 1G mapping "
8711 "pdpe %#lx sva %#lx eva %#lx va_next %#lx",
8712 *pdpe, sva, eva, va_next));
8716 va_next = (sva + NBPDR) & ~PDRMASK;
8719 pde = pmap_pdpe_to_pde(pdpe, sva);
8721 if ((oldpde & PG_V) == 0)
8723 else if ((oldpde & PG_PS) != 0) {
8724 if ((oldpde & PG_MANAGED) == 0)
8727 if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) {
8732 * The large page mapping was destroyed.
8738 * Unless the page mappings are wired, remove the
8739 * mapping to a single page so that a subsequent
8740 * access may repromote. Choosing the last page
8741 * within the address range [sva, min(va_next, eva))
8742 * generally results in more repromotions. Since the
8743 * underlying page table page is fully populated, this
8744 * removal never frees a page table page.
8746 if ((oldpde & PG_W) == 0) {
8752 ("pmap_advise: no address gap"));
8753 pte = pmap_pde_to_pte(pde, va);
8754 KASSERT((*pte & PG_V) != 0,
8755 ("pmap_advise: invalid PTE"));
8756 pmap_remove_pte(pmap, pte, va, *pde, NULL,
8766 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
8768 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
8770 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
8771 if (advice == MADV_DONTNEED) {
8773 * Future calls to pmap_is_modified()
8774 * can be avoided by making the page
8777 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
8780 atomic_clear_long(pte, PG_M | PG_A);
8781 } else if ((*pte & PG_A) != 0)
8782 atomic_clear_long(pte, PG_A);
8786 if ((*pte & PG_G) != 0) {
8793 if (va != va_next) {
8794 pmap_invalidate_range(pmap, va, sva);
8799 pmap_invalidate_range(pmap, va, sva);
8802 pmap_invalidate_all(pmap);
8804 pmap_delayed_invl_finish();
8808 * Clear the modify bits on the specified physical page.
8811 pmap_clear_modify(vm_page_t m)
8813 struct md_page *pvh;
8815 pv_entry_t next_pv, pv;
8816 pd_entry_t oldpde, *pde;
8817 pt_entry_t *pte, PG_M, PG_RW;
8818 struct rwlock *lock;
8820 int md_gen, pvh_gen;
8822 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
8823 ("pmap_clear_modify: page %p is not managed", m));
8824 vm_page_assert_busied(m);
8826 if (!pmap_page_is_write_mapped(m))
8828 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
8829 pa_to_pvh(VM_PAGE_TO_PHYS(m));
8830 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
8833 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
8835 if (!PMAP_TRYLOCK(pmap)) {
8836 pvh_gen = pvh->pv_gen;
8840 if (pvh_gen != pvh->pv_gen) {
8845 PG_M = pmap_modified_bit(pmap);
8846 PG_RW = pmap_rw_bit(pmap);
8848 pde = pmap_pde(pmap, va);
8850 /* If oldpde has PG_RW set, then it also has PG_M set. */
8851 if ((oldpde & PG_RW) != 0 &&
8852 pmap_demote_pde_locked(pmap, pde, va, &lock) &&
8853 (oldpde & PG_W) == 0) {
8855 * Write protect the mapping to a single page so that
8856 * a subsequent write access may repromote.
8858 va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME);
8859 pte = pmap_pde_to_pte(pde, va);
8860 atomic_clear_long(pte, PG_M | PG_RW);
8862 pmap_invalidate_page(pmap, va);
8866 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
8868 if (!PMAP_TRYLOCK(pmap)) {
8869 md_gen = m->md.pv_gen;
8870 pvh_gen = pvh->pv_gen;
8874 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
8879 PG_M = pmap_modified_bit(pmap);
8880 PG_RW = pmap_rw_bit(pmap);
8881 pde = pmap_pde(pmap, pv->pv_va);
8882 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
8883 " a 2mpage in page %p's pv list", m));
8884 pte = pmap_pde_to_pte(pde, pv->pv_va);
8885 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
8886 atomic_clear_long(pte, PG_M);
8887 pmap_invalidate_page(pmap, pv->pv_va);
8895 * Miscellaneous support routines follow
8898 /* Adjust the properties for a leaf page table entry. */
8899 static __inline void
8900 pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask)
8904 opte = *(u_long *)pte;
8906 npte = opte & ~mask;
8908 } while (npte != opte && !atomic_fcmpset_long((u_long *)pte, &opte,
8913 * Map a set of physical memory pages into the kernel virtual
8914 * address space. Return a pointer to where it is mapped. This
8915 * routine is intended to be used for mapping device memory,
8919 pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, int flags)
8921 struct pmap_preinit_mapping *ppim;
8922 vm_offset_t va, offset;
8926 offset = pa & PAGE_MASK;
8927 size = round_page(offset + size);
8928 pa = trunc_page(pa);
8930 if (!pmap_initialized) {
8932 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
8933 ppim = pmap_preinit_mapping + i;
8934 if (ppim->va == 0) {
8938 ppim->va = virtual_avail;
8939 virtual_avail += size;
8945 panic("%s: too many preinit mappings", __func__);
8948 * If we have a preinit mapping, re-use it.
8950 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
8951 ppim = pmap_preinit_mapping + i;
8952 if (ppim->pa == pa && ppim->sz == size &&
8953 (ppim->mode == mode ||
8954 (flags & MAPDEV_SETATTR) == 0))
8955 return ((void *)(ppim->va + offset));
8958 * If the specified range of physical addresses fits within
8959 * the direct map window, use the direct map.
8961 if (pa < dmaplimit && pa + size <= dmaplimit) {
8962 va = PHYS_TO_DMAP(pa);
8963 if ((flags & MAPDEV_SETATTR) != 0) {
8964 PMAP_LOCK(kernel_pmap);
8965 i = pmap_change_props_locked(va, size,
8966 PROT_NONE, mode, flags);
8967 PMAP_UNLOCK(kernel_pmap);
8971 return ((void *)(va + offset));
8973 va = kva_alloc(size);
8975 panic("%s: Couldn't allocate KVA", __func__);
8977 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
8978 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
8979 pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
8980 if ((flags & MAPDEV_FLUSHCACHE) != 0)
8981 pmap_invalidate_cache_range(va, va + tmpsize);
8982 return ((void *)(va + offset));
8986 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
8989 return (pmap_mapdev_internal(pa, size, mode, MAPDEV_FLUSHCACHE |
8994 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
8997 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
9001 pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size)
9004 return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE,
9009 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
9012 return (pmap_mapdev_internal(pa, size, PAT_WRITE_BACK,
9013 MAPDEV_FLUSHCACHE));
9017 pmap_unmapdev(vm_offset_t va, vm_size_t size)
9019 struct pmap_preinit_mapping *ppim;
9023 /* If we gave a direct map region in pmap_mapdev, do nothing */
9024 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
9026 offset = va & PAGE_MASK;
9027 size = round_page(offset + size);
9028 va = trunc_page(va);
9029 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
9030 ppim = pmap_preinit_mapping + i;
9031 if (ppim->va == va && ppim->sz == size) {
9032 if (pmap_initialized)
9038 if (va + size == virtual_avail)
9043 if (pmap_initialized) {
9044 pmap_qremove(va, atop(size));
9050 * Tries to demote a 1GB page mapping.
9053 pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
9055 pdp_entry_t newpdpe, oldpdpe;
9056 pd_entry_t *firstpde, newpde, *pde;
9057 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
9061 PG_A = pmap_accessed_bit(pmap);
9062 PG_M = pmap_modified_bit(pmap);
9063 PG_V = pmap_valid_bit(pmap);
9064 PG_RW = pmap_rw_bit(pmap);
9066 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
9068 KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
9069 ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
9070 if ((pdpg = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT |
9071 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
9072 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
9073 " in pmap %p", va, pmap);
9076 pdpgpa = VM_PAGE_TO_PHYS(pdpg);
9077 firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa);
9078 newpdpe = pdpgpa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
9079 KASSERT((oldpdpe & PG_A) != 0,
9080 ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
9081 KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
9082 ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
9086 * Initialize the page directory page.
9088 for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
9094 * Demote the mapping.
9099 * Invalidate a stale recursive mapping of the page directory page.
9101 pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va));
9103 pmap_pdpe_demotions++;
9104 CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
9105 " in pmap %p", va, pmap);
9110 * Sets the memory attribute for the specified page.
9113 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
9116 m->md.pat_mode = ma;
9119 * If "m" is a normal page, update its direct mapping. This update
9120 * can be relied upon to perform any cache operations that are
9121 * required for data coherence.
9123 if ((m->flags & PG_FICTITIOUS) == 0 &&
9124 pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
9126 panic("memory attribute change on the direct map failed");
9130 * Changes the specified virtual address range's memory type to that given by
9131 * the parameter "mode". The specified virtual address range must be
9132 * completely contained within either the direct map or the kernel map. If
9133 * the virtual address range is contained within the kernel map, then the
9134 * memory type for each of the corresponding ranges of the direct map is also
9135 * changed. (The corresponding ranges of the direct map are those ranges that
9136 * map the same physical pages as the specified virtual address range.) These
9137 * changes to the direct map are necessary because Intel describes the
9138 * behavior of their processors as "undefined" if two or more mappings to the
9139 * same physical page have different memory types.
9141 * Returns zero if the change completed successfully, and either EINVAL or
9142 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
9143 * of the virtual address range was not mapped, and ENOMEM is returned if
9144 * there was insufficient memory available to complete the change. In the
9145 * latter case, the memory type may have been changed on some part of the
9146 * virtual address range or the direct map.
9149 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
9153 PMAP_LOCK(kernel_pmap);
9154 error = pmap_change_props_locked(va, size, PROT_NONE, mode,
9156 PMAP_UNLOCK(kernel_pmap);
9161 * Changes the specified virtual address range's protections to those
9162 * specified by "prot". Like pmap_change_attr(), protections for aliases
9163 * in the direct map are updated as well. Protections on aliasing mappings may
9164 * be a subset of the requested protections; for example, mappings in the direct
9165 * map are never executable.
9168 pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot)
9172 /* Only supported within the kernel map. */
9173 if (va < VM_MIN_KERNEL_ADDRESS)
9176 PMAP_LOCK(kernel_pmap);
9177 error = pmap_change_props_locked(va, size, prot, -1,
9178 MAPDEV_ASSERTVALID);
9179 PMAP_UNLOCK(kernel_pmap);
9184 pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
9185 int mode, int flags)
9187 vm_offset_t base, offset, tmpva;
9188 vm_paddr_t pa_start, pa_end, pa_end1;
9190 pd_entry_t *pde, pde_bits, pde_mask;
9191 pt_entry_t *pte, pte_bits, pte_mask;
9195 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
9196 base = trunc_page(va);
9197 offset = va & PAGE_MASK;
9198 size = round_page(offset + size);
9201 * Only supported on kernel virtual addresses, including the direct
9202 * map but excluding the recursive map.
9204 if (base < DMAP_MIN_ADDRESS)
9208 * Construct our flag sets and masks. "bits" is the subset of
9209 * "mask" that will be set in each modified PTE.
9211 * Mappings in the direct map are never allowed to be executable.
9213 pde_bits = pte_bits = 0;
9214 pde_mask = pte_mask = 0;
9216 pde_bits |= pmap_cache_bits(kernel_pmap, mode, true);
9217 pde_mask |= X86_PG_PDE_CACHE;
9218 pte_bits |= pmap_cache_bits(kernel_pmap, mode, false);
9219 pte_mask |= X86_PG_PTE_CACHE;
9221 if (prot != VM_PROT_NONE) {
9222 if ((prot & VM_PROT_WRITE) != 0) {
9223 pde_bits |= X86_PG_RW;
9224 pte_bits |= X86_PG_RW;
9226 if ((prot & VM_PROT_EXECUTE) == 0 ||
9227 va < VM_MIN_KERNEL_ADDRESS) {
9231 pde_mask |= X86_PG_RW | pg_nx;
9232 pte_mask |= X86_PG_RW | pg_nx;
9236 * Pages that aren't mapped aren't supported. Also break down 2MB pages
9237 * into 4KB pages if required.
9239 for (tmpva = base; tmpva < base + size; ) {
9240 pdpe = pmap_pdpe(kernel_pmap, tmpva);
9241 if (pdpe == NULL || *pdpe == 0) {
9242 KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9243 ("%s: addr %#lx is not mapped", __func__, tmpva));
9246 if (*pdpe & PG_PS) {
9248 * If the current 1GB page already has the required
9249 * properties, then we need not demote this page. Just
9250 * increment tmpva to the next 1GB page frame.
9252 if ((*pdpe & pde_mask) == pde_bits) {
9253 tmpva = trunc_1gpage(tmpva) + NBPDP;
9258 * If the current offset aligns with a 1GB page frame
9259 * and there is at least 1GB left within the range, then
9260 * we need not break down this page into 2MB pages.
9262 if ((tmpva & PDPMASK) == 0 &&
9263 tmpva + PDPMASK < base + size) {
9267 if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva))
9270 pde = pmap_pdpe_to_pde(pdpe, tmpva);
9272 KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9273 ("%s: addr %#lx is not mapped", __func__, tmpva));
9278 * If the current 2MB page already has the required
9279 * properties, then we need not demote this page. Just
9280 * increment tmpva to the next 2MB page frame.
9282 if ((*pde & pde_mask) == pde_bits) {
9283 tmpva = trunc_2mpage(tmpva) + NBPDR;
9288 * If the current offset aligns with a 2MB page frame
9289 * and there is at least 2MB left within the range, then
9290 * we need not break down this page into 4KB pages.
9292 if ((tmpva & PDRMASK) == 0 &&
9293 tmpva + PDRMASK < base + size) {
9297 if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
9300 pte = pmap_pde_to_pte(pde, tmpva);
9302 KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
9303 ("%s: addr %#lx is not mapped", __func__, tmpva));
9311 * Ok, all the pages exist, so run through them updating their
9312 * properties if required.
9315 pa_start = pa_end = 0;
9316 for (tmpva = base; tmpva < base + size; ) {
9317 pdpe = pmap_pdpe(kernel_pmap, tmpva);
9318 if (*pdpe & PG_PS) {
9319 if ((*pdpe & pde_mask) != pde_bits) {
9320 pmap_pte_props(pdpe, pde_bits, pde_mask);
9323 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9324 (*pdpe & PG_PS_FRAME) < dmaplimit) {
9325 if (pa_start == pa_end) {
9326 /* Start physical address run. */
9327 pa_start = *pdpe & PG_PS_FRAME;
9328 pa_end = pa_start + NBPDP;
9329 } else if (pa_end == (*pdpe & PG_PS_FRAME))
9332 /* Run ended, update direct map. */
9333 error = pmap_change_props_locked(
9334 PHYS_TO_DMAP(pa_start),
9335 pa_end - pa_start, prot, mode,
9339 /* Start physical address run. */
9340 pa_start = *pdpe & PG_PS_FRAME;
9341 pa_end = pa_start + NBPDP;
9344 tmpva = trunc_1gpage(tmpva) + NBPDP;
9347 pde = pmap_pdpe_to_pde(pdpe, tmpva);
9349 if ((*pde & pde_mask) != pde_bits) {
9350 pmap_pte_props(pde, pde_bits, pde_mask);
9353 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9354 (*pde & PG_PS_FRAME) < dmaplimit) {
9355 if (pa_start == pa_end) {
9356 /* Start physical address run. */
9357 pa_start = *pde & PG_PS_FRAME;
9358 pa_end = pa_start + NBPDR;
9359 } else if (pa_end == (*pde & PG_PS_FRAME))
9362 /* Run ended, update direct map. */
9363 error = pmap_change_props_locked(
9364 PHYS_TO_DMAP(pa_start),
9365 pa_end - pa_start, prot, mode,
9369 /* Start physical address run. */
9370 pa_start = *pde & PG_PS_FRAME;
9371 pa_end = pa_start + NBPDR;
9374 tmpva = trunc_2mpage(tmpva) + NBPDR;
9376 pte = pmap_pde_to_pte(pde, tmpva);
9377 if ((*pte & pte_mask) != pte_bits) {
9378 pmap_pte_props(pte, pte_bits, pte_mask);
9381 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
9382 (*pte & PG_FRAME) < dmaplimit) {
9383 if (pa_start == pa_end) {
9384 /* Start physical address run. */
9385 pa_start = *pte & PG_FRAME;
9386 pa_end = pa_start + PAGE_SIZE;
9387 } else if (pa_end == (*pte & PG_FRAME))
9388 pa_end += PAGE_SIZE;
9390 /* Run ended, update direct map. */
9391 error = pmap_change_props_locked(
9392 PHYS_TO_DMAP(pa_start),
9393 pa_end - pa_start, prot, mode,
9397 /* Start physical address run. */
9398 pa_start = *pte & PG_FRAME;
9399 pa_end = pa_start + PAGE_SIZE;
9405 if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
9406 pa_end1 = MIN(pa_end, dmaplimit);
9407 if (pa_start != pa_end1)
9408 error = pmap_change_props_locked(PHYS_TO_DMAP(pa_start),
9409 pa_end1 - pa_start, prot, mode, flags);
9413 * Flush CPU caches if required to make sure any data isn't cached that
9414 * shouldn't be, etc.
9417 pmap_invalidate_range(kernel_pmap, base, tmpva);
9418 if ((flags & MAPDEV_FLUSHCACHE) != 0)
9419 pmap_invalidate_cache_range(base, tmpva);
9425 * Demotes any mapping within the direct map region that covers more than the
9426 * specified range of physical addresses. This range's size must be a power
9427 * of two and its starting address must be a multiple of its size. Since the
9428 * demotion does not change any attributes of the mapping, a TLB invalidation
9429 * is not mandatory. The caller may, however, request a TLB invalidation.
9432 pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
9441 KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
9442 KASSERT((base & (len - 1)) == 0,
9443 ("pmap_demote_DMAP: base is not a multiple of len"));
9444 if (len < NBPDP && base < dmaplimit) {
9445 va = PHYS_TO_DMAP(base);
9447 PMAP_LOCK(kernel_pmap);
9448 pdpe = pmap_pdpe(kernel_pmap, va);
9449 if ((*pdpe & X86_PG_V) == 0)
9450 panic("pmap_demote_DMAP: invalid PDPE");
9451 if ((*pdpe & PG_PS) != 0) {
9452 if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
9453 panic("pmap_demote_DMAP: PDPE failed");
9457 pde = pmap_pdpe_to_pde(pdpe, va);
9458 if ((*pde & X86_PG_V) == 0)
9459 panic("pmap_demote_DMAP: invalid PDE");
9460 if ((*pde & PG_PS) != 0) {
9461 if (!pmap_demote_pde(kernel_pmap, pde, va))
9462 panic("pmap_demote_DMAP: PDE failed");
9466 if (changed && invalidate)
9467 pmap_invalidate_page(kernel_pmap, va);
9468 PMAP_UNLOCK(kernel_pmap);
9473 * Perform the pmap work for mincore(2). If the page is not both referenced and
9474 * modified by this pmap, returns its physical address so that the caller can
9475 * find other mappings.
9478 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
9482 pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V;
9486 PG_A = pmap_accessed_bit(pmap);
9487 PG_M = pmap_modified_bit(pmap);
9488 PG_V = pmap_valid_bit(pmap);
9489 PG_RW = pmap_rw_bit(pmap);
9495 pdpe = pmap_pdpe(pmap, addr);
9498 if ((*pdpe & PG_V) != 0) {
9499 if ((*pdpe & PG_PS) != 0) {
9501 pa = ((pte & PG_PS_PDP_FRAME) | (addr & PDPMASK)) &
9503 val = MINCORE_PSIND(2);
9505 pdep = pmap_pde(pmap, addr);
9506 if (pdep != NULL && (*pdep & PG_V) != 0) {
9507 if ((*pdep & PG_PS) != 0) {
9509 /* Compute the physical address of the 4KB page. */
9510 pa = ((pte & PG_PS_FRAME) | (addr &
9511 PDRMASK)) & PG_FRAME;
9512 val = MINCORE_PSIND(1);
9514 pte = *pmap_pde_to_pte(pdep, addr);
9515 pa = pte & PG_FRAME;
9521 if ((pte & PG_V) != 0) {
9522 val |= MINCORE_INCORE;
9523 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
9524 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
9525 if ((pte & PG_A) != 0)
9526 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
9528 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
9529 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
9530 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
9539 pmap_pcid_alloc(pmap_t pmap, u_int cpuid)
9541 uint32_t gen, new_gen, pcid_next;
9543 CRITICAL_ASSERT(curthread);
9544 gen = PCPU_GET(pcid_gen);
9545 if (pmap->pm_pcids[cpuid].pm_pcid == PMAP_PCID_KERN)
9546 return (pti ? 0 : CR3_PCID_SAVE);
9547 if (pmap->pm_pcids[cpuid].pm_gen == gen)
9548 return (CR3_PCID_SAVE);
9549 pcid_next = PCPU_GET(pcid_next);
9550 KASSERT((!pti && pcid_next <= PMAP_PCID_OVERMAX) ||
9551 (pti && pcid_next <= PMAP_PCID_OVERMAX_KERN),
9552 ("cpu %d pcid_next %#x", cpuid, pcid_next));
9553 if ((!pti && pcid_next == PMAP_PCID_OVERMAX) ||
9554 (pti && pcid_next == PMAP_PCID_OVERMAX_KERN)) {
9558 PCPU_SET(pcid_gen, new_gen);
9559 pcid_next = PMAP_PCID_KERN + 1;
9563 pmap->pm_pcids[cpuid].pm_pcid = pcid_next;
9564 pmap->pm_pcids[cpuid].pm_gen = new_gen;
9565 PCPU_SET(pcid_next, pcid_next + 1);
9570 pmap_pcid_alloc_checked(pmap_t pmap, u_int cpuid)
9574 cached = pmap_pcid_alloc(pmap, cpuid);
9575 KASSERT(pmap->pm_pcids[cpuid].pm_pcid < PMAP_PCID_OVERMAX,
9576 ("pmap %p cpu %d pcid %#x", pmap, cpuid,
9577 pmap->pm_pcids[cpuid].pm_pcid));
9578 KASSERT(pmap->pm_pcids[cpuid].pm_pcid != PMAP_PCID_KERN ||
9579 pmap == kernel_pmap,
9580 ("non-kernel pmap pmap %p cpu %d pcid %#x",
9581 pmap, cpuid, pmap->pm_pcids[cpuid].pm_pcid));
9586 pmap_activate_sw_pti_post(struct thread *td, pmap_t pmap)
9589 PCPU_GET(tssp)->tss_rsp0 = pmap->pm_ucr3 != PMAP_NO_CR3 ?
9590 PCPU_GET(pti_rsp0) : (uintptr_t)td->td_md.md_stack_base;
9594 pmap_activate_sw_pcid_pti(struct thread *td, pmap_t pmap, u_int cpuid)
9597 uint64_t cached, cr3, kcr3, ucr3;
9599 KASSERT((read_rflags() & PSL_I) == 0,
9600 ("PCID needs interrupts disabled in pmap_activate_sw()"));
9602 /* See the comment in pmap_invalidate_page_pcid(). */
9603 if (PCPU_GET(ucr3_load_mask) != PMAP_UCR3_NOMASK) {
9604 PCPU_SET(ucr3_load_mask, PMAP_UCR3_NOMASK);
9605 old_pmap = PCPU_GET(curpmap);
9606 MPASS(old_pmap->pm_ucr3 != PMAP_NO_CR3);
9607 old_pmap->pm_pcids[cpuid].pm_gen = 0;
9610 cached = pmap_pcid_alloc_checked(pmap, cpuid);
9612 if ((cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
9613 load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid);
9614 PCPU_SET(curpmap, pmap);
9615 kcr3 = pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid;
9616 ucr3 = pmap->pm_ucr3 | pmap->pm_pcids[cpuid].pm_pcid |
9619 if (!cached && pmap->pm_ucr3 != PMAP_NO_CR3)
9620 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
9622 PCPU_SET(kcr3, kcr3 | CR3_PCID_SAVE);
9623 PCPU_SET(ucr3, ucr3 | CR3_PCID_SAVE);
9625 PCPU_INC(pm_save_cnt);
9627 pmap_activate_sw_pti_post(td, pmap);
9631 pmap_activate_sw_pcid_nopti(struct thread *td __unused, pmap_t pmap,
9634 uint64_t cached, cr3;
9636 KASSERT((read_rflags() & PSL_I) == 0,
9637 ("PCID needs interrupts disabled in pmap_activate_sw()"));
9639 cached = pmap_pcid_alloc_checked(pmap, cpuid);
9641 if (!cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3)
9642 load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid |
9644 PCPU_SET(curpmap, pmap);
9646 PCPU_INC(pm_save_cnt);
9650 pmap_activate_sw_nopcid_nopti(struct thread *td __unused, pmap_t pmap,
9651 u_int cpuid __unused)
9654 load_cr3(pmap->pm_cr3);
9655 PCPU_SET(curpmap, pmap);
9659 pmap_activate_sw_nopcid_pti(struct thread *td, pmap_t pmap,
9660 u_int cpuid __unused)
9663 pmap_activate_sw_nopcid_nopti(td, pmap, cpuid);
9664 PCPU_SET(kcr3, pmap->pm_cr3);
9665 PCPU_SET(ucr3, pmap->pm_ucr3);
9666 pmap_activate_sw_pti_post(td, pmap);
9669 DEFINE_IFUNC(static, void, pmap_activate_sw_mode, (struct thread *, pmap_t,
9673 if (pmap_pcid_enabled && pti)
9674 return (pmap_activate_sw_pcid_pti);
9675 else if (pmap_pcid_enabled && !pti)
9676 return (pmap_activate_sw_pcid_nopti);
9677 else if (!pmap_pcid_enabled && pti)
9678 return (pmap_activate_sw_nopcid_pti);
9679 else /* if (!pmap_pcid_enabled && !pti) */
9680 return (pmap_activate_sw_nopcid_nopti);
9684 pmap_activate_sw(struct thread *td)
9686 pmap_t oldpmap, pmap;
9689 oldpmap = PCPU_GET(curpmap);
9690 pmap = vmspace_pmap(td->td_proc->p_vmspace);
9691 if (oldpmap == pmap) {
9692 if (cpu_vendor_id != CPU_VENDOR_INTEL)
9696 cpuid = PCPU_GET(cpuid);
9698 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
9700 CPU_SET(cpuid, &pmap->pm_active);
9702 pmap_activate_sw_mode(td, pmap, cpuid);
9704 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
9706 CPU_CLR(cpuid, &oldpmap->pm_active);
9711 pmap_activate(struct thread *td)
9714 * invltlb_{invpcid,}_pcid_handler() is used to handle an
9715 * invalidate_all IPI, which checks for curpmap ==
9716 * smp_tlb_pmap. The below sequence of operations has a
9717 * window where %CR3 is loaded with the new pmap's PML4
9718 * address, but the curpmap value has not yet been updated.
9719 * This causes the invltlb IPI handler, which is called
9720 * between the updates, to execute as a NOP, which leaves
9721 * stale TLB entries.
9723 * Note that the most common use of pmap_activate_sw(), from
9724 * a context switch, is immune to this race, because
9725 * interrupts are disabled (while the thread lock is owned),
9726 * so the IPI is delayed until after curpmap is updated. Protect
9727 * other callers in a similar way, by disabling interrupts
9728 * around the %cr3 register reload and curpmap assignment.
9731 pmap_activate_sw(td);
9736 pmap_activate_boot(pmap_t pmap)
9742 * kernel_pmap must be never deactivated, and we ensure that
9743 * by never activating it at all.
9745 MPASS(pmap != kernel_pmap);
9747 cpuid = PCPU_GET(cpuid);
9749 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
9751 CPU_SET(cpuid, &pmap->pm_active);
9753 PCPU_SET(curpmap, pmap);
9755 kcr3 = pmap->pm_cr3;
9756 if (pmap_pcid_enabled)
9757 kcr3 |= pmap->pm_pcids[cpuid].pm_pcid | CR3_PCID_SAVE;
9761 PCPU_SET(kcr3, kcr3);
9762 PCPU_SET(ucr3, PMAP_NO_CR3);
9766 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
9771 * Increase the starting virtual address of the given mapping if a
9772 * different alignment might result in more superpage mappings.
9775 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
9776 vm_offset_t *addr, vm_size_t size)
9778 vm_offset_t superpage_offset;
9782 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
9783 offset += ptoa(object->pg_color);
9784 superpage_offset = offset & PDRMASK;
9785 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
9786 (*addr & PDRMASK) == superpage_offset)
9788 if ((*addr & PDRMASK) < superpage_offset)
9789 *addr = (*addr & ~PDRMASK) + superpage_offset;
9791 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
9795 static unsigned long num_dirty_emulations;
9796 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_dirty_emulations, CTLFLAG_RW,
9797 &num_dirty_emulations, 0, NULL);
9799 static unsigned long num_accessed_emulations;
9800 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_accessed_emulations, CTLFLAG_RW,
9801 &num_accessed_emulations, 0, NULL);
9803 static unsigned long num_superpage_accessed_emulations;
9804 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_superpage_accessed_emulations, CTLFLAG_RW,
9805 &num_superpage_accessed_emulations, 0, NULL);
9807 static unsigned long ad_emulation_superpage_promotions;
9808 SYSCTL_ULONG(_vm_pmap, OID_AUTO, ad_emulation_superpage_promotions, CTLFLAG_RW,
9809 &ad_emulation_superpage_promotions, 0, NULL);
9810 #endif /* INVARIANTS */
9813 pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype)
9816 struct rwlock *lock;
9817 #if VM_NRESERVLEVEL > 0
9821 pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V;
9823 KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE,
9824 ("pmap_emulate_accessed_dirty: invalid fault type %d", ftype));
9826 if (!pmap_emulate_ad_bits(pmap))
9829 PG_A = pmap_accessed_bit(pmap);
9830 PG_M = pmap_modified_bit(pmap);
9831 PG_V = pmap_valid_bit(pmap);
9832 PG_RW = pmap_rw_bit(pmap);
9838 pde = pmap_pde(pmap, va);
9839 if (pde == NULL || (*pde & PG_V) == 0)
9842 if ((*pde & PG_PS) != 0) {
9843 if (ftype == VM_PROT_READ) {
9845 atomic_add_long(&num_superpage_accessed_emulations, 1);
9853 pte = pmap_pde_to_pte(pde, va);
9854 if ((*pte & PG_V) == 0)
9857 if (ftype == VM_PROT_WRITE) {
9858 if ((*pte & PG_RW) == 0)
9861 * Set the modified and accessed bits simultaneously.
9863 * Intel EPT PTEs that do software emulation of A/D bits map
9864 * PG_A and PG_M to EPT_PG_READ and EPT_PG_WRITE respectively.
9865 * An EPT misconfiguration is triggered if the PTE is writable
9866 * but not readable (WR=10). This is avoided by setting PG_A
9867 * and PG_M simultaneously.
9869 *pte |= PG_M | PG_A;
9874 #if VM_NRESERVLEVEL > 0
9875 /* try to promote the mapping */
9876 if (va < VM_MAXUSER_ADDRESS)
9877 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
9881 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
9883 if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
9884 pmap_ps_enabled(pmap) &&
9885 (m->flags & PG_FICTITIOUS) == 0 &&
9886 vm_reserv_level_iffullpop(m) == 0) {
9887 pmap_promote_pde(pmap, pde, va, &lock);
9889 atomic_add_long(&ad_emulation_superpage_promotions, 1);
9895 if (ftype == VM_PROT_WRITE)
9896 atomic_add_long(&num_dirty_emulations, 1);
9898 atomic_add_long(&num_accessed_emulations, 1);
9900 rv = 0; /* success */
9909 pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num)
9914 pt_entry_t *pte, PG_V;
9918 PG_V = pmap_valid_bit(pmap);
9921 pml4 = pmap_pml4e(pmap, va);
9925 if ((*pml4 & PG_V) == 0)
9928 pdp = pmap_pml4e_to_pdpe(pml4, va);
9930 if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0)
9933 pde = pmap_pdpe_to_pde(pdp, va);
9935 if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0)
9938 pte = pmap_pde_to_pte(pde, va);
9947 * Get the kernel virtual address of a set of physical pages. If there are
9948 * physical addresses not covered by the DMAP perform a transient mapping
9949 * that will be removed when calling pmap_unmap_io_transient.
9951 * \param page The pages the caller wishes to obtain the virtual
9952 * address on the kernel memory map.
9953 * \param vaddr On return contains the kernel virtual memory address
9954 * of the pages passed in the page parameter.
9955 * \param count Number of pages passed in.
9956 * \param can_fault TRUE if the thread using the mapped pages can take
9957 * page faults, FALSE otherwise.
9959 * \returns TRUE if the caller must call pmap_unmap_io_transient when
9960 * finished or FALSE otherwise.
9964 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
9965 boolean_t can_fault)
9968 boolean_t needs_mapping;
9970 int cache_bits, error __unused, i;
9973 * Allocate any KVA space that we need, this is done in a separate
9974 * loop to prevent calling vmem_alloc while pinned.
9976 needs_mapping = FALSE;
9977 for (i = 0; i < count; i++) {
9978 paddr = VM_PAGE_TO_PHYS(page[i]);
9979 if (__predict_false(paddr >= dmaplimit)) {
9980 error = vmem_alloc(kernel_arena, PAGE_SIZE,
9981 M_BESTFIT | M_WAITOK, &vaddr[i]);
9982 KASSERT(error == 0, ("vmem_alloc failed: %d", error));
9983 needs_mapping = TRUE;
9985 vaddr[i] = PHYS_TO_DMAP(paddr);
9989 /* Exit early if everything is covered by the DMAP */
9994 * NB: The sequence of updating a page table followed by accesses
9995 * to the corresponding pages used in the !DMAP case is subject to
9996 * the situation described in the "AMD64 Architecture Programmer's
9997 * Manual Volume 2: System Programming" rev. 3.23, "7.3.1 Special
9998 * Coherency Considerations". Therefore, issuing the INVLPG right
9999 * after modifying the PTE bits is crucial.
10003 for (i = 0; i < count; i++) {
10004 paddr = VM_PAGE_TO_PHYS(page[i]);
10005 if (paddr >= dmaplimit) {
10008 * Slow path, since we can get page faults
10009 * while mappings are active don't pin the
10010 * thread to the CPU and instead add a global
10011 * mapping visible to all CPUs.
10013 pmap_qenter(vaddr[i], &page[i], 1);
10015 pte = vtopte(vaddr[i]);
10016 cache_bits = pmap_cache_bits(kernel_pmap,
10017 page[i]->md.pat_mode, 0);
10018 pte_store(pte, paddr | X86_PG_RW | X86_PG_V |
10025 return (needs_mapping);
10029 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
10030 boolean_t can_fault)
10037 for (i = 0; i < count; i++) {
10038 paddr = VM_PAGE_TO_PHYS(page[i]);
10039 if (paddr >= dmaplimit) {
10041 pmap_qremove(vaddr[i], 1);
10042 vmem_free(kernel_arena, vaddr[i], PAGE_SIZE);
10048 pmap_quick_enter_page(vm_page_t m)
10052 paddr = VM_PAGE_TO_PHYS(m);
10053 if (paddr < dmaplimit)
10054 return (PHYS_TO_DMAP(paddr));
10055 mtx_lock_spin(&qframe_mtx);
10056 KASSERT(*vtopte(qframe) == 0, ("qframe busy"));
10057 pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A |
10058 X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0));
10063 pmap_quick_remove_page(vm_offset_t addr)
10066 if (addr != qframe)
10068 pte_store(vtopte(qframe), 0);
10070 mtx_unlock_spin(&qframe_mtx);
10074 * Pdp pages from the large map are managed differently from either
10075 * kernel or user page table pages. They are permanently allocated at
10076 * initialization time, and their reference count is permanently set to
10077 * zero. The pml4 entries pointing to those pages are copied into
10078 * each allocated pmap.
10080 * In contrast, pd and pt pages are managed like user page table
10081 * pages. They are dynamically allocated, and their reference count
10082 * represents the number of valid entries within the page.
10085 pmap_large_map_getptp_unlocked(void)
10089 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
10091 if (m != NULL && (m->flags & PG_ZERO) == 0)
10097 pmap_large_map_getptp(void)
10101 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
10102 m = pmap_large_map_getptp_unlocked();
10104 PMAP_UNLOCK(kernel_pmap);
10106 PMAP_LOCK(kernel_pmap);
10107 /* Callers retry. */
10112 static pdp_entry_t *
10113 pmap_large_map_pdpe(vm_offset_t va)
10115 vm_pindex_t pml4_idx;
10118 pml4_idx = pmap_pml4e_index(va);
10119 KASSERT(LMSPML4I <= pml4_idx && pml4_idx < LMSPML4I + lm_ents,
10120 ("pmap_large_map_pdpe: va %#jx out of range idx %#jx LMSPML4I "
10122 (uintmax_t)va, (uintmax_t)pml4_idx, LMSPML4I, lm_ents));
10123 KASSERT((kernel_pml4[pml4_idx] & X86_PG_V) != 0,
10124 ("pmap_large_map_pdpe: invalid pml4 for va %#jx idx %#jx "
10125 "LMSPML4I %#jx lm_ents %d",
10126 (uintmax_t)va, (uintmax_t)pml4_idx, LMSPML4I, lm_ents));
10127 mphys = kernel_pml4[pml4_idx] & PG_FRAME;
10128 return ((pdp_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pdpe_index(va));
10131 static pd_entry_t *
10132 pmap_large_map_pde(vm_offset_t va)
10139 pdpe = pmap_large_map_pdpe(va);
10141 m = pmap_large_map_getptp();
10144 mphys = VM_PAGE_TO_PHYS(m);
10145 *pdpe = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
10147 MPASS((*pdpe & X86_PG_PS) == 0);
10148 mphys = *pdpe & PG_FRAME;
10150 return ((pd_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pde_index(va));
10153 static pt_entry_t *
10154 pmap_large_map_pte(vm_offset_t va)
10161 pde = pmap_large_map_pde(va);
10163 m = pmap_large_map_getptp();
10166 mphys = VM_PAGE_TO_PHYS(m);
10167 *pde = mphys | X86_PG_A | X86_PG_RW | X86_PG_V | pg_nx;
10168 PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->ref_count++;
10170 MPASS((*pde & X86_PG_PS) == 0);
10171 mphys = *pde & PG_FRAME;
10173 return ((pt_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pte_index(va));
10177 pmap_large_map_kextract(vm_offset_t va)
10179 pdp_entry_t *pdpe, pdp;
10180 pd_entry_t *pde, pd;
10181 pt_entry_t *pte, pt;
10183 KASSERT(PMAP_ADDRESS_IN_LARGEMAP(va),
10184 ("not largemap range %#lx", (u_long)va));
10185 pdpe = pmap_large_map_pdpe(va);
10187 KASSERT((pdp & X86_PG_V) != 0,
10188 ("invalid pdp va %#lx pdpe %#lx pdp %#lx", va,
10189 (u_long)pdpe, pdp));
10190 if ((pdp & X86_PG_PS) != 0) {
10191 KASSERT((amd_feature & AMDID_PAGE1GB) != 0,
10192 ("no 1G pages, va %#lx pdpe %#lx pdp %#lx", va,
10193 (u_long)pdpe, pdp));
10194 return ((pdp & PG_PS_PDP_FRAME) | (va & PDPMASK));
10196 pde = pmap_pdpe_to_pde(pdpe, va);
10198 KASSERT((pd & X86_PG_V) != 0,
10199 ("invalid pd va %#lx pde %#lx pd %#lx", va, (u_long)pde, pd));
10200 if ((pd & X86_PG_PS) != 0)
10201 return ((pd & PG_PS_FRAME) | (va & PDRMASK));
10202 pte = pmap_pde_to_pte(pde, va);
10204 KASSERT((pt & X86_PG_V) != 0,
10205 ("invalid pte va %#lx pte %#lx pt %#lx", va, (u_long)pte, pt));
10206 return ((pt & PG_FRAME) | (va & PAGE_MASK));
10210 pmap_large_map_getva(vm_size_t len, vm_offset_t align, vm_offset_t phase,
10211 vmem_addr_t *vmem_res)
10215 * Large mappings are all but static. Consequently, there
10216 * is no point in waiting for an earlier allocation to be
10219 return (vmem_xalloc(large_vmem, len, align, phase, 0, VMEM_ADDR_MIN,
10220 VMEM_ADDR_MAX, M_NOWAIT | M_BESTFIT, vmem_res));
10224 pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
10225 vm_memattr_t mattr)
10230 vm_offset_t va, inc;
10231 vmem_addr_t vmem_res;
10235 if (len == 0 || spa + len < spa)
10238 /* See if DMAP can serve. */
10239 if (spa + len <= dmaplimit) {
10240 va = PHYS_TO_DMAP(spa);
10241 *addr = (void *)va;
10242 return (pmap_change_attr(va, len, mattr));
10246 * No, allocate KVA. Fit the address with best possible
10247 * alignment for superpages. Fall back to worse align if
10251 if ((amd_feature & AMDID_PAGE1GB) != 0 && rounddown2(spa + len,
10252 NBPDP) >= roundup2(spa, NBPDP) + NBPDP)
10253 error = pmap_large_map_getva(len, NBPDP, spa & PDPMASK,
10255 if (error != 0 && rounddown2(spa + len, NBPDR) >= roundup2(spa,
10257 error = pmap_large_map_getva(len, NBPDR, spa & PDRMASK,
10260 error = pmap_large_map_getva(len, PAGE_SIZE, 0, &vmem_res);
10265 * Fill pagetable. PG_M is not pre-set, we scan modified bits
10266 * in the pagetable to minimize flushing. No need to
10267 * invalidate TLB, since we only update invalid entries.
10269 PMAP_LOCK(kernel_pmap);
10270 for (pa = spa, va = vmem_res; len > 0; pa += inc, va += inc,
10272 if ((amd_feature & AMDID_PAGE1GB) != 0 && len >= NBPDP &&
10273 (pa & PDPMASK) == 0 && (va & PDPMASK) == 0) {
10274 pdpe = pmap_large_map_pdpe(va);
10276 *pdpe = pa | pg_g | X86_PG_PS | X86_PG_RW |
10277 X86_PG_V | X86_PG_A | pg_nx |
10278 pmap_cache_bits(kernel_pmap, mattr, TRUE);
10280 } else if (len >= NBPDR && (pa & PDRMASK) == 0 &&
10281 (va & PDRMASK) == 0) {
10282 pde = pmap_large_map_pde(va);
10284 *pde = pa | pg_g | X86_PG_PS | X86_PG_RW |
10285 X86_PG_V | X86_PG_A | pg_nx |
10286 pmap_cache_bits(kernel_pmap, mattr, TRUE);
10287 PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->
10291 pte = pmap_large_map_pte(va);
10293 *pte = pa | pg_g | X86_PG_RW | X86_PG_V |
10294 X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap,
10296 PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte))->
10301 PMAP_UNLOCK(kernel_pmap);
10304 *addr = (void *)vmem_res;
10309 pmap_large_unmap(void *svaa, vm_size_t len)
10311 vm_offset_t sva, va;
10313 pdp_entry_t *pdpe, pdp;
10314 pd_entry_t *pde, pd;
10317 struct spglist spgf;
10319 sva = (vm_offset_t)svaa;
10320 if (len == 0 || sva + len < sva || (sva >= DMAP_MIN_ADDRESS &&
10321 sva + len <= DMAP_MIN_ADDRESS + dmaplimit))
10325 KASSERT(PMAP_ADDRESS_IN_LARGEMAP(sva) &&
10326 PMAP_ADDRESS_IN_LARGEMAP(sva + len - 1),
10327 ("not largemap range %#lx %#lx", (u_long)svaa, (u_long)svaa + len));
10328 PMAP_LOCK(kernel_pmap);
10329 for (va = sva; va < sva + len; va += inc) {
10330 pdpe = pmap_large_map_pdpe(va);
10332 KASSERT((pdp & X86_PG_V) != 0,
10333 ("invalid pdp va %#lx pdpe %#lx pdp %#lx", va,
10334 (u_long)pdpe, pdp));
10335 if ((pdp & X86_PG_PS) != 0) {
10336 KASSERT((amd_feature & AMDID_PAGE1GB) != 0,
10337 ("no 1G pages, va %#lx pdpe %#lx pdp %#lx", va,
10338 (u_long)pdpe, pdp));
10339 KASSERT((va & PDPMASK) == 0,
10340 ("PDPMASK bit set, va %#lx pdpe %#lx pdp %#lx", va,
10341 (u_long)pdpe, pdp));
10342 KASSERT(va + NBPDP <= sva + len,
10343 ("unmap covers partial 1GB page, sva %#lx va %#lx "
10344 "pdpe %#lx pdp %#lx len %#lx", sva, va,
10345 (u_long)pdpe, pdp, len));
10350 pde = pmap_pdpe_to_pde(pdpe, va);
10352 KASSERT((pd & X86_PG_V) != 0,
10353 ("invalid pd va %#lx pde %#lx pd %#lx", va,
10355 if ((pd & X86_PG_PS) != 0) {
10356 KASSERT((va & PDRMASK) == 0,
10357 ("PDRMASK bit set, va %#lx pde %#lx pd %#lx", va,
10359 KASSERT(va + NBPDR <= sva + len,
10360 ("unmap covers partial 2MB page, sva %#lx va %#lx "
10361 "pde %#lx pd %#lx len %#lx", sva, va, (u_long)pde,
10365 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
10367 if (m->ref_count == 0) {
10369 SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10373 pte = pmap_pde_to_pte(pde, va);
10374 KASSERT((*pte & X86_PG_V) != 0,
10375 ("invalid pte va %#lx pte %#lx pt %#lx", va,
10376 (u_long)pte, *pte));
10379 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pte));
10381 if (m->ref_count == 0) {
10383 SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10384 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pde));
10386 if (m->ref_count == 0) {
10388 SLIST_INSERT_HEAD(&spgf, m, plinks.s.ss);
10392 pmap_invalidate_range(kernel_pmap, sva, sva + len);
10393 PMAP_UNLOCK(kernel_pmap);
10394 vm_page_free_pages_toq(&spgf, false);
10395 vmem_free(large_vmem, sva, len);
10399 pmap_large_map_wb_fence_mfence(void)
10406 pmap_large_map_wb_fence_atomic(void)
10409 atomic_thread_fence_seq_cst();
10413 pmap_large_map_wb_fence_nop(void)
10417 DEFINE_IFUNC(static, void, pmap_large_map_wb_fence, (void))
10420 if (cpu_vendor_id != CPU_VENDOR_INTEL)
10421 return (pmap_large_map_wb_fence_mfence);
10422 else if ((cpu_stdext_feature & (CPUID_STDEXT_CLWB |
10423 CPUID_STDEXT_CLFLUSHOPT)) == 0)
10424 return (pmap_large_map_wb_fence_atomic);
10426 /* clflush is strongly enough ordered */
10427 return (pmap_large_map_wb_fence_nop);
10431 pmap_large_map_flush_range_clwb(vm_offset_t va, vm_size_t len)
10434 for (; len > 0; len -= cpu_clflush_line_size,
10435 va += cpu_clflush_line_size)
10440 pmap_large_map_flush_range_clflushopt(vm_offset_t va, vm_size_t len)
10443 for (; len > 0; len -= cpu_clflush_line_size,
10444 va += cpu_clflush_line_size)
10449 pmap_large_map_flush_range_clflush(vm_offset_t va, vm_size_t len)
10452 for (; len > 0; len -= cpu_clflush_line_size,
10453 va += cpu_clflush_line_size)
10458 pmap_large_map_flush_range_nop(vm_offset_t sva __unused, vm_size_t len __unused)
10462 DEFINE_IFUNC(static, void, pmap_large_map_flush_range, (vm_offset_t, vm_size_t))
10465 if ((cpu_stdext_feature & CPUID_STDEXT_CLWB) != 0)
10466 return (pmap_large_map_flush_range_clwb);
10467 else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0)
10468 return (pmap_large_map_flush_range_clflushopt);
10469 else if ((cpu_feature & CPUID_CLFSH) != 0)
10470 return (pmap_large_map_flush_range_clflush);
10472 return (pmap_large_map_flush_range_nop);
10476 pmap_large_map_wb_large(vm_offset_t sva, vm_offset_t eva)
10478 volatile u_long *pe;
10484 for (va = sva; va < eva; va += inc) {
10486 if ((amd_feature & AMDID_PAGE1GB) != 0) {
10487 pe = (volatile u_long *)pmap_large_map_pdpe(va);
10489 if ((p & X86_PG_PS) != 0)
10493 pe = (volatile u_long *)pmap_large_map_pde(va);
10495 if ((p & X86_PG_PS) != 0)
10499 pe = (volatile u_long *)pmap_large_map_pte(va);
10503 seen_other = false;
10505 if ((p & X86_PG_AVAIL1) != 0) {
10507 * Spin-wait for the end of a parallel
10514 * If we saw other write-back
10515 * occuring, we cannot rely on PG_M to
10516 * indicate state of the cache. The
10517 * PG_M bit is cleared before the
10518 * flush to avoid ignoring new writes,
10519 * and writes which are relevant for
10520 * us might happen after.
10526 if ((p & X86_PG_M) != 0 || seen_other) {
10527 if (!atomic_fcmpset_long(pe, &p,
10528 (p & ~X86_PG_M) | X86_PG_AVAIL1))
10530 * If we saw PG_M without
10531 * PG_AVAIL1, and then on the
10532 * next attempt we do not
10533 * observe either PG_M or
10534 * PG_AVAIL1, the other
10535 * write-back started after us
10536 * and finished before us. We
10537 * can rely on it doing our
10541 pmap_large_map_flush_range(va, inc);
10542 atomic_clear_long(pe, X86_PG_AVAIL1);
10551 * Write-back cache lines for the given address range.
10553 * Must be called only on the range or sub-range returned from
10554 * pmap_large_map(). Must not be called on the coalesced ranges.
10556 * Does nothing on CPUs without CLWB, CLFLUSHOPT, or CLFLUSH
10557 * instructions support.
10560 pmap_large_map_wb(void *svap, vm_size_t len)
10562 vm_offset_t eva, sva;
10564 sva = (vm_offset_t)svap;
10566 pmap_large_map_wb_fence();
10567 if (sva >= DMAP_MIN_ADDRESS && eva <= DMAP_MIN_ADDRESS + dmaplimit) {
10568 pmap_large_map_flush_range(sva, len);
10570 KASSERT(sva >= LARGEMAP_MIN_ADDRESS &&
10571 eva <= LARGEMAP_MIN_ADDRESS + lm_ents * NBPML4,
10572 ("pmap_large_map_wb: not largemap %#lx %#lx", sva, len));
10573 pmap_large_map_wb_large(sva, eva);
10575 pmap_large_map_wb_fence();
10579 pmap_pti_alloc_page(void)
10583 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10584 m = vm_page_grab(pti_obj, pti_pg_idx++, VM_ALLOC_NOBUSY |
10585 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
10590 pmap_pti_free_page(vm_page_t m)
10593 KASSERT(m->ref_count > 0, ("page %p not referenced", m));
10594 if (!vm_page_unwire_noq(m))
10596 vm_page_free_zero(m);
10601 pmap_pti_init(void)
10610 pti_obj = vm_pager_allocate(OBJT_PHYS, NULL, 0, VM_PROT_ALL, 0, NULL);
10611 VM_OBJECT_WLOCK(pti_obj);
10612 pml4_pg = pmap_pti_alloc_page();
10613 pti_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4_pg));
10614 for (va = VM_MIN_KERNEL_ADDRESS; va <= VM_MAX_KERNEL_ADDRESS &&
10615 va >= VM_MIN_KERNEL_ADDRESS && va > NBPML4; va += NBPML4) {
10616 pdpe = pmap_pti_pdpe(va);
10617 pmap_pti_wire_pte(pdpe);
10619 pmap_pti_add_kva_locked((vm_offset_t)&__pcpu[0],
10620 (vm_offset_t)&__pcpu[0] + sizeof(__pcpu[0]) * MAXCPU, false);
10621 pmap_pti_add_kva_locked((vm_offset_t)idt, (vm_offset_t)idt +
10622 sizeof(struct gate_descriptor) * NIDT, false);
10624 /* Doublefault stack IST 1 */
10625 va = __pcpu[i].pc_common_tss.tss_ist1 + sizeof(struct nmi_pcpu);
10626 pmap_pti_add_kva_locked(va - DBLFAULT_STACK_SIZE, va, false);
10627 /* NMI stack IST 2 */
10628 va = __pcpu[i].pc_common_tss.tss_ist2 + sizeof(struct nmi_pcpu);
10629 pmap_pti_add_kva_locked(va - NMI_STACK_SIZE, va, false);
10630 /* MC# stack IST 3 */
10631 va = __pcpu[i].pc_common_tss.tss_ist3 +
10632 sizeof(struct nmi_pcpu);
10633 pmap_pti_add_kva_locked(va - MCE_STACK_SIZE, va, false);
10634 /* DB# stack IST 4 */
10635 va = __pcpu[i].pc_common_tss.tss_ist4 + sizeof(struct nmi_pcpu);
10636 pmap_pti_add_kva_locked(va - DBG_STACK_SIZE, va, false);
10638 pmap_pti_add_kva_locked((vm_offset_t)kernphys + KERNBASE,
10639 (vm_offset_t)etext, true);
10640 pti_finalized = true;
10641 VM_OBJECT_WUNLOCK(pti_obj);
10643 SYSINIT(pmap_pti, SI_SUB_CPU + 1, SI_ORDER_ANY, pmap_pti_init, NULL);
10645 static pdp_entry_t *
10646 pmap_pti_pdpe(vm_offset_t va)
10648 pml4_entry_t *pml4e;
10651 vm_pindex_t pml4_idx;
10654 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10656 pml4_idx = pmap_pml4e_index(va);
10657 pml4e = &pti_pml4[pml4_idx];
10661 panic("pml4 alloc after finalization\n");
10662 m = pmap_pti_alloc_page();
10664 pmap_pti_free_page(m);
10665 mphys = *pml4e & ~PAGE_MASK;
10667 mphys = VM_PAGE_TO_PHYS(m);
10668 *pml4e = mphys | X86_PG_RW | X86_PG_V;
10671 mphys = *pml4e & ~PAGE_MASK;
10673 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(mphys) + pmap_pdpe_index(va);
10678 pmap_pti_wire_pte(void *pte)
10682 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10683 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
10688 pmap_pti_unwire_pde(void *pde, bool only_ref)
10692 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10693 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde));
10694 MPASS(m->ref_count > 0);
10695 MPASS(only_ref || m->ref_count > 1);
10696 pmap_pti_free_page(m);
10700 pmap_pti_unwire_pte(void *pte, vm_offset_t va)
10705 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10706 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte));
10707 MPASS(m->ref_count > 0);
10708 if (pmap_pti_free_page(m)) {
10709 pde = pmap_pti_pde(va);
10710 MPASS((*pde & (X86_PG_PS | X86_PG_V)) == X86_PG_V);
10712 pmap_pti_unwire_pde(pde, false);
10716 static pd_entry_t *
10717 pmap_pti_pde(vm_offset_t va)
10722 vm_pindex_t pd_idx;
10725 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10727 pdpe = pmap_pti_pdpe(va);
10729 m = pmap_pti_alloc_page();
10731 pmap_pti_free_page(m);
10732 MPASS((*pdpe & X86_PG_PS) == 0);
10733 mphys = *pdpe & ~PAGE_MASK;
10735 mphys = VM_PAGE_TO_PHYS(m);
10736 *pdpe = mphys | X86_PG_RW | X86_PG_V;
10739 MPASS((*pdpe & X86_PG_PS) == 0);
10740 mphys = *pdpe & ~PAGE_MASK;
10743 pde = (pd_entry_t *)PHYS_TO_DMAP(mphys);
10744 pd_idx = pmap_pde_index(va);
10749 static pt_entry_t *
10750 pmap_pti_pte(vm_offset_t va, bool *unwire_pde)
10757 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10759 pde = pmap_pti_pde(va);
10760 if (unwire_pde != NULL) {
10761 *unwire_pde = true;
10762 pmap_pti_wire_pte(pde);
10765 m = pmap_pti_alloc_page();
10767 pmap_pti_free_page(m);
10768 MPASS((*pde & X86_PG_PS) == 0);
10769 mphys = *pde & ~(PAGE_MASK | pg_nx);
10771 mphys = VM_PAGE_TO_PHYS(m);
10772 *pde = mphys | X86_PG_RW | X86_PG_V;
10773 if (unwire_pde != NULL)
10774 *unwire_pde = false;
10777 MPASS((*pde & X86_PG_PS) == 0);
10778 mphys = *pde & ~(PAGE_MASK | pg_nx);
10781 pte = (pt_entry_t *)PHYS_TO_DMAP(mphys);
10782 pte += pmap_pte_index(va);
10788 pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva, bool exec)
10792 pt_entry_t *pte, ptev;
10795 VM_OBJECT_ASSERT_WLOCKED(pti_obj);
10797 sva = trunc_page(sva);
10798 MPASS(sva > VM_MAXUSER_ADDRESS);
10799 eva = round_page(eva);
10801 for (; sva < eva; sva += PAGE_SIZE) {
10802 pte = pmap_pti_pte(sva, &unwire_pde);
10803 pa = pmap_kextract(sva);
10804 ptev = pa | X86_PG_RW | X86_PG_V | X86_PG_A | X86_PG_G |
10805 (exec ? 0 : pg_nx) | pmap_cache_bits(kernel_pmap,
10806 VM_MEMATTR_DEFAULT, FALSE);
10808 pte_store(pte, ptev);
10809 pmap_pti_wire_pte(pte);
10811 KASSERT(!pti_finalized,
10812 ("pti overlap after fin %#lx %#lx %#lx",
10814 KASSERT(*pte == ptev,
10815 ("pti non-identical pte after fin %#lx %#lx %#lx",
10819 pde = pmap_pti_pde(sva);
10820 pmap_pti_unwire_pde(pde, true);
10826 pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec)
10831 VM_OBJECT_WLOCK(pti_obj);
10832 pmap_pti_add_kva_locked(sva, eva, exec);
10833 VM_OBJECT_WUNLOCK(pti_obj);
10837 pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva)
10844 sva = rounddown2(sva, PAGE_SIZE);
10845 MPASS(sva > VM_MAXUSER_ADDRESS);
10846 eva = roundup2(eva, PAGE_SIZE);
10848 VM_OBJECT_WLOCK(pti_obj);
10849 for (va = sva; va < eva; va += PAGE_SIZE) {
10850 pte = pmap_pti_pte(va, NULL);
10851 KASSERT((*pte & X86_PG_V) != 0,
10852 ("invalid pte va %#lx pte %#lx pt %#lx", va,
10853 (u_long)pte, *pte));
10855 pmap_pti_unwire_pte(pte, va);
10857 pmap_invalidate_range(kernel_pmap, sva, eva);
10858 VM_OBJECT_WUNLOCK(pti_obj);
10862 pkru_dup_range(void *ctx __unused, void *data)
10864 struct pmap_pkru_range *node, *new_node;
10866 new_node = uma_zalloc(pmap_pkru_ranges_zone, M_NOWAIT);
10867 if (new_node == NULL)
10870 memcpy(new_node, node, sizeof(*node));
10875 pkru_free_range(void *ctx __unused, void *node)
10878 uma_zfree(pmap_pkru_ranges_zone, node);
10882 pmap_pkru_assign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, u_int keyidx,
10885 struct pmap_pkru_range *ppr;
10888 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10889 MPASS(pmap->pm_type == PT_X86);
10890 MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
10891 if ((flags & AMD64_PKRU_EXCL) != 0 &&
10892 !rangeset_check_empty(&pmap->pm_pkru, sva, eva))
10894 ppr = uma_zalloc(pmap_pkru_ranges_zone, M_NOWAIT);
10897 ppr->pkru_keyidx = keyidx;
10898 ppr->pkru_flags = flags & AMD64_PKRU_PERSIST;
10899 error = rangeset_insert(&pmap->pm_pkru, sva, eva, ppr);
10901 uma_zfree(pmap_pkru_ranges_zone, ppr);
10906 pmap_pkru_deassign(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
10909 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10910 MPASS(pmap->pm_type == PT_X86);
10911 MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
10912 return (rangeset_remove(&pmap->pm_pkru, sva, eva));
10916 pmap_pkru_deassign_all(pmap_t pmap)
10919 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10920 if (pmap->pm_type == PT_X86 &&
10921 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0)
10922 rangeset_remove_all(&pmap->pm_pkru);
10926 pmap_pkru_same(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
10928 struct pmap_pkru_range *ppr, *prev_ppr;
10931 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10932 if (pmap->pm_type != PT_X86 ||
10933 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0 ||
10934 sva >= VM_MAXUSER_ADDRESS)
10936 MPASS(eva <= VM_MAXUSER_ADDRESS);
10937 for (va = sva; va < eva; prev_ppr = ppr) {
10938 ppr = rangeset_lookup(&pmap->pm_pkru, va);
10941 else if ((ppr == NULL) ^ (prev_ppr == NULL))
10947 if (prev_ppr->pkru_keyidx != ppr->pkru_keyidx)
10949 va = ppr->pkru_rs_el.re_end;
10955 pmap_pkru_get(pmap_t pmap, vm_offset_t va)
10957 struct pmap_pkru_range *ppr;
10959 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10960 if (pmap->pm_type != PT_X86 ||
10961 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0 ||
10962 va >= VM_MAXUSER_ADDRESS)
10964 ppr = rangeset_lookup(&pmap->pm_pkru, va);
10966 return (X86_PG_PKU(ppr->pkru_keyidx));
10971 pred_pkru_on_remove(void *ctx __unused, void *r)
10973 struct pmap_pkru_range *ppr;
10976 return ((ppr->pkru_flags & AMD64_PKRU_PERSIST) == 0);
10980 pmap_pkru_on_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
10983 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
10984 if (pmap->pm_type == PT_X86 &&
10985 (cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
10986 rangeset_remove_pred(&pmap->pm_pkru, sva, eva,
10987 pred_pkru_on_remove);
10992 pmap_pkru_copy(pmap_t dst_pmap, pmap_t src_pmap)
10995 PMAP_LOCK_ASSERT(dst_pmap, MA_OWNED);
10996 PMAP_LOCK_ASSERT(src_pmap, MA_OWNED);
10997 MPASS(dst_pmap->pm_type == PT_X86);
10998 MPASS(src_pmap->pm_type == PT_X86);
10999 MPASS((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0);
11000 if (src_pmap->pm_pkru.rs_data_ctx == NULL)
11002 return (rangeset_copy(&dst_pmap->pm_pkru, &src_pmap->pm_pkru));
11006 pmap_pkru_update_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
11009 pml4_entry_t *pml4e;
11011 pd_entry_t newpde, ptpaddr, *pde;
11012 pt_entry_t newpte, *ptep, pte;
11013 vm_offset_t va, va_next;
11016 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
11017 MPASS(pmap->pm_type == PT_X86);
11018 MPASS(keyidx <= PMAP_MAX_PKRU_IDX);
11020 for (changed = false, va = sva; va < eva; va = va_next) {
11021 pml4e = pmap_pml4e(pmap, va);
11022 if (pml4e == NULL || (*pml4e & X86_PG_V) == 0) {
11023 va_next = (va + NBPML4) & ~PML4MASK;
11029 pdpe = pmap_pml4e_to_pdpe(pml4e, va);
11030 if ((*pdpe & X86_PG_V) == 0) {
11031 va_next = (va + NBPDP) & ~PDPMASK;
11037 va_next = (va + NBPDR) & ~PDRMASK;
11041 pde = pmap_pdpe_to_pde(pdpe, va);
11046 MPASS((ptpaddr & X86_PG_V) != 0);
11047 if ((ptpaddr & PG_PS) != 0) {
11048 if (va + NBPDR == va_next && eva >= va_next) {
11049 newpde = (ptpaddr & ~X86_PG_PKU_MASK) |
11050 X86_PG_PKU(keyidx);
11051 if (newpde != ptpaddr) {
11056 } else if (!pmap_demote_pde(pmap, pde, va)) {
11064 for (ptep = pmap_pde_to_pte(pde, va); va != va_next;
11065 ptep++, va += PAGE_SIZE) {
11067 if ((pte & X86_PG_V) == 0)
11069 newpte = (pte & ~X86_PG_PKU_MASK) | X86_PG_PKU(keyidx);
11070 if (newpte != pte) {
11077 pmap_invalidate_range(pmap, sva, eva);
11081 pmap_pkru_check_uargs(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
11082 u_int keyidx, int flags)
11085 if (pmap->pm_type != PT_X86 || keyidx > PMAP_MAX_PKRU_IDX ||
11086 (flags & ~(AMD64_PKRU_PERSIST | AMD64_PKRU_EXCL)) != 0)
11088 if (eva <= sva || eva > VM_MAXUSER_ADDRESS)
11090 if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) == 0)
11096 pmap_pkru_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, u_int keyidx,
11101 sva = trunc_page(sva);
11102 eva = round_page(eva);
11103 error = pmap_pkru_check_uargs(pmap, sva, eva, keyidx, flags);
11108 error = pmap_pkru_assign(pmap, sva, eva, keyidx, flags);
11110 pmap_pkru_update_range(pmap, sva, eva, keyidx);
11112 if (error != ENOMEM)
11120 pmap_pkru_clear(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
11124 sva = trunc_page(sva);
11125 eva = round_page(eva);
11126 error = pmap_pkru_check_uargs(pmap, sva, eva, 0, 0);
11131 error = pmap_pkru_deassign(pmap, sva, eva);
11133 pmap_pkru_update_range(pmap, sva, eva, 0);
11135 if (error != ENOMEM)
11143 * Track a range of the kernel's virtual address space that is contiguous
11144 * in various mapping attributes.
11146 struct pmap_kernel_map_range {
11155 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
11161 if (eva <= range->sva)
11164 pat_idx = pmap_pat_index(kernel_pmap, range->attrs, true);
11165 for (i = 0; i < PAT_INDEX_SIZE; i++)
11166 if (pat_index[i] == pat_idx)
11170 case PAT_WRITE_BACK:
11173 case PAT_WRITE_THROUGH:
11176 case PAT_UNCACHEABLE:
11182 case PAT_WRITE_PROTECTED:
11185 case PAT_WRITE_COMBINING:
11189 printf("%s: unknown PAT mode %#x for range 0x%016lx-0x%016lx\n",
11190 __func__, pat_idx, range->sva, eva);
11195 sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c %s %d %d %d\n",
11197 (range->attrs & X86_PG_RW) != 0 ? 'w' : '-',
11198 (range->attrs & pg_nx) != 0 ? '-' : 'x',
11199 (range->attrs & X86_PG_U) != 0 ? 'u' : 's',
11200 (range->attrs & X86_PG_G) != 0 ? 'g' : '-',
11201 mode, range->pdpes, range->pdes, range->ptes);
11203 /* Reset to sentinel value. */
11204 range->sva = la57 ? KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1,
11205 NPDEPG - 1, NPTEPG - 1) : KV4ADDR(NPML4EPG - 1, NPDPEPG - 1,
11206 NPDEPG - 1, NPTEPG - 1);
11210 * Determine whether the attributes specified by a page table entry match those
11211 * being tracked by the current range. This is not quite as simple as a direct
11212 * flag comparison since some PAT modes have multiple representations.
11215 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
11217 pt_entry_t diff, mask;
11219 mask = X86_PG_G | X86_PG_RW | X86_PG_U | X86_PG_PDE_CACHE | pg_nx;
11220 diff = (range->attrs ^ attrs) & mask;
11223 if ((diff & ~X86_PG_PDE_PAT) == 0 &&
11224 pmap_pat_index(kernel_pmap, range->attrs, true) ==
11225 pmap_pat_index(kernel_pmap, attrs, true))
11231 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
11235 memset(range, 0, sizeof(*range));
11237 range->attrs = attrs;
11241 * Given a leaf PTE, derive the mapping's attributes. If they do not match
11242 * those of the current run, dump the address range and its attributes, and
11246 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
11247 vm_offset_t va, pml4_entry_t pml4e, pdp_entry_t pdpe, pd_entry_t pde,
11252 attrs = pml4e & (X86_PG_RW | X86_PG_U | pg_nx);
11254 attrs |= pdpe & pg_nx;
11255 attrs &= pg_nx | (pdpe & (X86_PG_RW | X86_PG_U));
11256 if ((pdpe & PG_PS) != 0) {
11257 attrs |= pdpe & (X86_PG_G | X86_PG_PDE_CACHE);
11258 } else if (pde != 0) {
11259 attrs |= pde & pg_nx;
11260 attrs &= pg_nx | (pde & (X86_PG_RW | X86_PG_U));
11262 if ((pde & PG_PS) != 0) {
11263 attrs |= pde & (X86_PG_G | X86_PG_PDE_CACHE);
11264 } else if (pte != 0) {
11265 attrs |= pte & pg_nx;
11266 attrs &= pg_nx | (pte & (X86_PG_RW | X86_PG_U));
11267 attrs |= pte & (X86_PG_G | X86_PG_PTE_CACHE);
11269 /* Canonicalize by always using the PDE PAT bit. */
11270 if ((attrs & X86_PG_PTE_PAT) != 0)
11271 attrs ^= X86_PG_PDE_PAT | X86_PG_PTE_PAT;
11274 if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
11275 sysctl_kmaps_dump(sb, range, va);
11276 sysctl_kmaps_reinit(range, va, attrs);
11281 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
11283 struct pmap_kernel_map_range range;
11284 struct sbuf sbuf, *sb;
11285 pml4_entry_t pml4e;
11286 pdp_entry_t *pdp, pdpe;
11287 pd_entry_t *pd, pde;
11288 pt_entry_t *pt, pte;
11291 int error, i, j, k, l;
11293 error = sysctl_wire_old_buffer(req, 0);
11297 sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
11299 /* Sentinel value. */
11300 range.sva = la57 ? KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1,
11301 NPDEPG - 1, NPTEPG - 1) : KV4ADDR(NPML4EPG - 1, NPDPEPG - 1,
11302 NPDEPG - 1, NPTEPG - 1);
11305 * Iterate over the kernel page tables without holding the kernel pmap
11306 * lock. Outside of the large map, kernel page table pages are never
11307 * freed, so at worst we will observe inconsistencies in the output.
11308 * Within the large map, ensure that PDP and PD page addresses are
11309 * valid before descending.
11311 for (sva = 0, i = pmap_pml4e_index(sva); i < NPML4EPG; i++) {
11314 sbuf_printf(sb, "\nRecursive map:\n");
11317 sbuf_printf(sb, "\nDirect map:\n");
11320 sbuf_printf(sb, "\nKernel map:\n");
11323 sbuf_printf(sb, "\nLarge map:\n");
11327 /* Convert to canonical form. */
11328 if (sva == 1ul << 47)
11332 pml4e = kernel_pml4[i];
11333 if ((pml4e & X86_PG_V) == 0) {
11334 sva = rounddown2(sva, NBPML4);
11335 sysctl_kmaps_dump(sb, &range, sva);
11339 pa = pml4e & PG_FRAME;
11340 pdp = (pdp_entry_t *)PHYS_TO_DMAP(pa);
11342 for (j = pmap_pdpe_index(sva); j < NPDPEPG; j++) {
11344 if ((pdpe & X86_PG_V) == 0) {
11345 sva = rounddown2(sva, NBPDP);
11346 sysctl_kmaps_dump(sb, &range, sva);
11350 pa = pdpe & PG_FRAME;
11351 if (PMAP_ADDRESS_IN_LARGEMAP(sva) &&
11352 vm_phys_paddr_to_vm_page(pa) == NULL)
11354 if ((pdpe & PG_PS) != 0) {
11355 sva = rounddown2(sva, NBPDP);
11356 sysctl_kmaps_check(sb, &range, sva, pml4e, pdpe,
11362 pd = (pd_entry_t *)PHYS_TO_DMAP(pa);
11364 for (k = pmap_pde_index(sva); k < NPDEPG; k++) {
11366 if ((pde & X86_PG_V) == 0) {
11367 sva = rounddown2(sva, NBPDR);
11368 sysctl_kmaps_dump(sb, &range, sva);
11372 pa = pde & PG_FRAME;
11373 if (PMAP_ADDRESS_IN_LARGEMAP(sva) &&
11374 vm_phys_paddr_to_vm_page(pa) == NULL)
11376 if ((pde & PG_PS) != 0) {
11377 sva = rounddown2(sva, NBPDR);
11378 sysctl_kmaps_check(sb, &range, sva,
11379 pml4e, pdpe, pde, 0);
11384 pt = (pt_entry_t *)PHYS_TO_DMAP(pa);
11386 for (l = pmap_pte_index(sva); l < NPTEPG; l++,
11387 sva += PAGE_SIZE) {
11389 if ((pte & X86_PG_V) == 0) {
11390 sysctl_kmaps_dump(sb, &range,
11394 sysctl_kmaps_check(sb, &range, sva,
11395 pml4e, pdpe, pde, pte);
11402 error = sbuf_finish(sb);
11406 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
11407 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
11408 NULL, 0, sysctl_kmaps, "A",
11409 "Dump kernel address layout");
11412 DB_SHOW_COMMAND(pte, pmap_print_pte)
11415 pml5_entry_t *pml5;
11416 pml4_entry_t *pml4;
11419 pt_entry_t *pte, PG_V;
11423 db_printf("show pte addr\n");
11426 va = (vm_offset_t)addr;
11428 if (kdb_thread != NULL)
11429 pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
11431 pmap = PCPU_GET(curpmap);
11433 PG_V = pmap_valid_bit(pmap);
11434 db_printf("VA 0x%016lx", va);
11436 if (pmap_is_la57(pmap)) {
11437 pml5 = pmap_pml5e(pmap, va);
11438 db_printf(" pml5e 0x%016lx", *pml5);
11439 if ((*pml5 & PG_V) == 0) {
11443 pml4 = pmap_pml5e_to_pml4e(pml5, va);
11445 pml4 = pmap_pml4e(pmap, va);
11447 db_printf(" pml4e 0x%016lx", *pml4);
11448 if ((*pml4 & PG_V) == 0) {
11452 pdp = pmap_pml4e_to_pdpe(pml4, va);
11453 db_printf(" pdpe 0x%016lx", *pdp);
11454 if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) {
11458 pde = pmap_pdpe_to_pde(pdp, va);
11459 db_printf(" pde 0x%016lx", *pde);
11460 if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) {
11464 pte = pmap_pde_to_pte(pde, va);
11465 db_printf(" pte 0x%016lx\n", *pte);
11468 DB_SHOW_COMMAND(phys2dmap, pmap_phys2dmap)
11473 a = (vm_paddr_t)addr;
11474 db_printf("0x%jx\n", (uintmax_t)PHYS_TO_DMAP(a));
11476 db_printf("show phys2dmap addr\n");
11481 ptpages_show_page(int level, int idx, vm_page_t pg)
11483 db_printf("l %d i %d pg %p phys %#lx ref %x\n",
11484 level, idx, pg, VM_PAGE_TO_PHYS(pg), pg->ref_count);
11488 ptpages_show_complain(int level, int idx, uint64_t pte)
11490 db_printf("l %d i %d pte %#lx\n", level, idx, pte);
11494 ptpages_show_pml4(vm_page_t pg4, int num_entries, uint64_t PG_V)
11496 vm_page_t pg3, pg2, pg1;
11497 pml4_entry_t *pml4;
11502 pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg4));
11503 for (i4 = 0; i4 < num_entries; i4++) {
11504 if ((pml4[i4] & PG_V) == 0)
11506 pg3 = PHYS_TO_VM_PAGE(pml4[i4] & PG_FRAME);
11508 ptpages_show_complain(3, i4, pml4[i4]);
11511 ptpages_show_page(3, i4, pg3);
11512 pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg3));
11513 for (i3 = 0; i3 < NPDPEPG; i3++) {
11514 if ((pdp[i3] & PG_V) == 0)
11516 pg2 = PHYS_TO_VM_PAGE(pdp[i3] & PG_FRAME);
11518 ptpages_show_complain(2, i3, pdp[i3]);
11521 ptpages_show_page(2, i3, pg2);
11522 pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pg2));
11523 for (i2 = 0; i2 < NPDEPG; i2++) {
11524 if ((pd[i2] & PG_V) == 0)
11526 pg1 = PHYS_TO_VM_PAGE(pd[i2] & PG_FRAME);
11528 ptpages_show_complain(1, i2, pd[i2]);
11531 ptpages_show_page(1, i2, pg1);
11537 DB_SHOW_COMMAND(ptpages, pmap_ptpages)
11541 pml5_entry_t *pml5;
11546 pmap = (pmap_t)addr;
11548 pmap = PCPU_GET(curpmap);
11550 PG_V = pmap_valid_bit(pmap);
11552 if (pmap_is_la57(pmap)) {
11553 pml5 = pmap->pm_pmltop;
11554 for (i5 = 0; i5 < NUPML5E; i5++) {
11555 if ((pml5[i5] & PG_V) == 0)
11557 pg = PHYS_TO_VM_PAGE(pml5[i5] & PG_FRAME);
11559 ptpages_show_complain(4, i5, pml5[i5]);
11562 ptpages_show_page(4, i5, pg);
11563 ptpages_show_pml4(pg, NPML4EPG, PG_V);
11566 ptpages_show_pml4(PHYS_TO_VM_PAGE(DMAP_TO_PHYS(
11567 (vm_offset_t)pmap->pm_pmltop)), NUP4ML4E, PG_V);