2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
8 * Copyright (c) 2003 Peter Wemm
10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11 * All rights reserved.
13 * This code is derived from software contributed to Berkeley by
14 * the Systems Programming Group of the University of Utah Computer
15 * Science Department and William Jolitz of UUNET Technologies Inc.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. All advertising materials mentioning features or use of this software
26 * must display the following acknowledgement:
27 * This product includes software developed by the University of
28 * California, Berkeley and its contributors.
29 * 4. Neither the name of the University nor the names of its contributors
30 * may be used to endorse or promote products derived from this software
31 * without specific prior written permission.
33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
48 * Copyright (c) 2003 Networks Associates Technology, Inc.
49 * All rights reserved.
51 * This software was developed for the FreeBSD Project by Jake Burkholder,
52 * Safeport Network Services, and Network Associates Laboratories, the
53 * Security Research Division of Network Associates, Inc. under
54 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
55 * CHATS research program.
57 * Redistribution and use in source and binary forms, with or without
58 * modification, are permitted provided that the following conditions
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
66 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
69 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
70 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
71 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
72 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
74 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
75 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
79 #define AMD64_NPT_AWARE
81 #include <sys/cdefs.h>
82 __FBSDID("$FreeBSD$");
85 * Manages physical address maps.
87 * Since the information managed by this module is
88 * also stored by the logical address mapping module,
89 * this module may throw away valid virtual-to-physical
90 * mappings at almost any time. However, invalidations
91 * of virtual-to-physical mappings must be done as
94 * In order to cope with hardware architectures which
95 * make virtual-to-physical map invalidates expensive,
96 * this module may delay invalidate or reduced protection
97 * operations until such time as they are actually
98 * necessary. This module is given full information as
99 * to which processors are currently using which maps,
100 * and to when physical maps must be made correct.
103 #include "opt_pmap.h"
106 #include <sys/param.h>
107 #include <sys/bitstring.h>
109 #include <sys/systm.h>
110 #include <sys/kernel.h>
112 #include <sys/lock.h>
113 #include <sys/malloc.h>
114 #include <sys/mman.h>
115 #include <sys/mutex.h>
116 #include <sys/proc.h>
117 #include <sys/rwlock.h>
119 #include <sys/turnstile.h>
120 #include <sys/vmem.h>
121 #include <sys/vmmeter.h>
122 #include <sys/sched.h>
123 #include <sys/sysctl.h>
127 #include <vm/vm_param.h>
128 #include <vm/vm_kern.h>
129 #include <vm/vm_page.h>
130 #include <vm/vm_map.h>
131 #include <vm/vm_object.h>
132 #include <vm/vm_extern.h>
133 #include <vm/vm_pageout.h>
134 #include <vm/vm_pager.h>
135 #include <vm/vm_phys.h>
136 #include <vm/vm_radix.h>
137 #include <vm/vm_reserv.h>
140 #include <machine/intr_machdep.h>
141 #include <x86/apicvar.h>
142 #include <machine/cpu.h>
143 #include <machine/cputypes.h>
144 #include <machine/md_var.h>
145 #include <machine/pcb.h>
146 #include <machine/specialreg.h>
148 #include <machine/smp.h>
151 static __inline boolean_t
152 pmap_type_guest(pmap_t pmap)
155 return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
158 static __inline boolean_t
159 pmap_emulate_ad_bits(pmap_t pmap)
162 return ((pmap->pm_flags & PMAP_EMULATE_AD_BITS) != 0);
165 static __inline pt_entry_t
166 pmap_valid_bit(pmap_t pmap)
170 switch (pmap->pm_type) {
176 if (pmap_emulate_ad_bits(pmap))
177 mask = EPT_PG_EMUL_V;
182 panic("pmap_valid_bit: invalid pm_type %d", pmap->pm_type);
188 static __inline pt_entry_t
189 pmap_rw_bit(pmap_t pmap)
193 switch (pmap->pm_type) {
199 if (pmap_emulate_ad_bits(pmap))
200 mask = EPT_PG_EMUL_RW;
205 panic("pmap_rw_bit: invalid pm_type %d", pmap->pm_type);
211 static __inline pt_entry_t
212 pmap_global_bit(pmap_t pmap)
216 switch (pmap->pm_type) {
225 panic("pmap_global_bit: invalid pm_type %d", pmap->pm_type);
231 static __inline pt_entry_t
232 pmap_accessed_bit(pmap_t pmap)
236 switch (pmap->pm_type) {
242 if (pmap_emulate_ad_bits(pmap))
248 panic("pmap_accessed_bit: invalid pm_type %d", pmap->pm_type);
254 static __inline pt_entry_t
255 pmap_modified_bit(pmap_t pmap)
259 switch (pmap->pm_type) {
265 if (pmap_emulate_ad_bits(pmap))
271 panic("pmap_modified_bit: invalid pm_type %d", pmap->pm_type);
277 extern struct pcpu __pcpu[];
279 #if !defined(DIAGNOSTIC)
280 #ifdef __GNUC_GNU_INLINE__
281 #define PMAP_INLINE __attribute__((__gnu_inline__)) inline
283 #define PMAP_INLINE extern inline
290 #define PV_STAT(x) do { x ; } while (0)
292 #define PV_STAT(x) do { } while (0)
295 #define pa_index(pa) ((pa) >> PDRSHIFT)
296 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
298 #define NPV_LIST_LOCKS MAXCPU
300 #define PHYS_TO_PV_LIST_LOCK(pa) \
301 (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
303 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
304 struct rwlock **_lockp = (lockp); \
305 struct rwlock *_new_lock; \
307 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
308 if (_new_lock != *_lockp) { \
309 if (*_lockp != NULL) \
310 rw_wunlock(*_lockp); \
311 *_lockp = _new_lock; \
316 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
317 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
319 #define RELEASE_PV_LIST_LOCK(lockp) do { \
320 struct rwlock **_lockp = (lockp); \
322 if (*_lockp != NULL) { \
323 rw_wunlock(*_lockp); \
328 #define VM_PAGE_TO_PV_LIST_LOCK(m) \
329 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
331 struct pmap kernel_pmap_store;
333 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
334 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
337 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
338 "Number of kernel page table pages allocated on bootup");
341 vm_paddr_t dmaplimit;
342 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
345 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
347 static int pat_works = 1;
348 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
349 "Is page attribute table fully functional?");
351 static int pg_ps_enabled = 1;
352 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
353 &pg_ps_enabled, 0, "Are large page mappings enabled?");
355 #define PAT_INDEX_SIZE 8
356 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
358 static u_int64_t KPTphys; /* phys addr of kernel level 1 */
359 static u_int64_t KPDphys; /* phys addr of kernel level 2 */
360 u_int64_t KPDPphys; /* phys addr of kernel level 3 */
361 u_int64_t KPML4phys; /* phys addr of kernel level 4 */
363 static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
364 static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
365 static int ndmpdpphys; /* number of DMPDPphys pages */
368 * pmap_mapdev support pre initialization (i.e. console)
370 #define PMAP_PREINIT_MAPPING_COUNT 8
371 static struct pmap_preinit_mapping {
376 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
377 static int pmap_initialized;
380 * Data for the pv entry allocation mechanism.
381 * Updates to pv_invl_gen are protected by the pv_list_locks[]
382 * elements, but reads are not.
384 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
385 static struct mtx pv_chunks_mutex;
386 static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
387 static u_long pv_invl_gen[NPV_LIST_LOCKS];
388 static struct md_page *pv_table;
389 static struct md_page pv_dummy;
392 * All those kernel PT submaps that BSD is so fond of
394 pt_entry_t *CMAP1 = NULL;
396 static vm_offset_t qframe = 0;
397 static struct mtx qframe_mtx;
399 static int pmap_flags = PMAP_PDE_SUPERPAGE; /* flags for x86 pmaps */
401 int pmap_pcid_enabled = 1;
402 SYSCTL_INT(_vm_pmap, OID_AUTO, pcid_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
403 &pmap_pcid_enabled, 0, "Is TLB Context ID enabled ?");
404 int invpcid_works = 0;
405 SYSCTL_INT(_vm_pmap, OID_AUTO, invpcid_works, CTLFLAG_RD, &invpcid_works, 0,
406 "Is the invpcid instruction available ?");
409 pmap_pcid_save_cnt_proc(SYSCTL_HANDLER_ARGS)
416 res += cpuid_to_pcpu[i]->pc_pm_save_cnt;
418 return (sysctl_handle_64(oidp, &res, 0, req));
420 SYSCTL_PROC(_vm_pmap, OID_AUTO, pcid_save_cnt, CTLTYPE_U64 | CTLFLAG_RW |
421 CTLFLAG_MPSAFE, NULL, 0, pmap_pcid_save_cnt_proc, "QU",
422 "Count of saved TLB context on switch");
424 static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker =
425 LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker);
426 static struct mtx invl_gen_mtx;
427 static u_long pmap_invl_gen = 0;
428 /* Fake lock object to satisfy turnstiles interface. */
429 static struct lock_object invl_gen_ts = {
437 return (curthread->td_md.md_invl_gen.gen == 0);
440 #define PMAP_ASSERT_NOT_IN_DI() \
441 KASSERT(pmap_not_in_di(), ("DI already started"))
444 * Start a new Delayed Invalidation (DI) block of code, executed by
445 * the current thread. Within a DI block, the current thread may
446 * destroy both the page table and PV list entries for a mapping and
447 * then release the corresponding PV list lock before ensuring that
448 * the mapping is flushed from the TLBs of any processors with the
452 pmap_delayed_invl_started(void)
454 struct pmap_invl_gen *invl_gen;
457 invl_gen = &curthread->td_md.md_invl_gen;
458 PMAP_ASSERT_NOT_IN_DI();
459 mtx_lock(&invl_gen_mtx);
460 if (LIST_EMPTY(&pmap_invl_gen_tracker))
461 currgen = pmap_invl_gen;
463 currgen = LIST_FIRST(&pmap_invl_gen_tracker)->gen;
464 invl_gen->gen = currgen + 1;
465 LIST_INSERT_HEAD(&pmap_invl_gen_tracker, invl_gen, link);
466 mtx_unlock(&invl_gen_mtx);
470 * Finish the DI block, previously started by the current thread. All
471 * required TLB flushes for the pages marked by
472 * pmap_delayed_invl_page() must be finished before this function is
475 * This function works by bumping the global DI generation number to
476 * the generation number of the current thread's DI, unless there is a
477 * pending DI that started earlier. In the latter case, bumping the
478 * global DI generation number would incorrectly signal that the
479 * earlier DI had finished. Instead, this function bumps the earlier
480 * DI's generation number to match the generation number of the
481 * current thread's DI.
484 pmap_delayed_invl_finished(void)
486 struct pmap_invl_gen *invl_gen, *next;
487 struct turnstile *ts;
489 invl_gen = &curthread->td_md.md_invl_gen;
490 KASSERT(invl_gen->gen != 0, ("missed invl_started"));
491 mtx_lock(&invl_gen_mtx);
492 next = LIST_NEXT(invl_gen, link);
494 turnstile_chain_lock(&invl_gen_ts);
495 ts = turnstile_lookup(&invl_gen_ts);
496 pmap_invl_gen = invl_gen->gen;
498 turnstile_broadcast(ts, TS_SHARED_QUEUE);
499 turnstile_unpend(ts, TS_SHARED_LOCK);
501 turnstile_chain_unlock(&invl_gen_ts);
503 next->gen = invl_gen->gen;
505 LIST_REMOVE(invl_gen, link);
506 mtx_unlock(&invl_gen_mtx);
511 static long invl_wait;
512 SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait, CTLFLAG_RD, &invl_wait, 0,
513 "Number of times DI invalidation blocked pmap_remove_all/write");
517 pmap_delayed_invl_genp(vm_page_t m)
520 return (&pv_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]);
524 * Ensure that all currently executing DI blocks, that need to flush
525 * TLB for the given page m, actually flushed the TLB at the time the
526 * function returned. If the page m has an empty PV list and we call
527 * pmap_delayed_invl_wait(), upon its return we know that no CPU has a
528 * valid mapping for the page m in either its page table or TLB.
530 * This function works by blocking until the global DI generation
531 * number catches up with the generation number associated with the
532 * given page m and its PV list. Since this function's callers
533 * typically own an object lock and sometimes own a page lock, it
534 * cannot sleep. Instead, it blocks on a turnstile to relinquish the
538 pmap_delayed_invl_wait(vm_page_t m)
541 struct turnstile *ts;
544 bool accounted = false;
548 m_gen = pmap_delayed_invl_genp(m);
549 while (*m_gen > pmap_invl_gen) {
552 atomic_add_long(&invl_wait, 1);
556 ts = turnstile_trywait(&invl_gen_ts);
557 if (*m_gen > pmap_invl_gen)
558 turnstile_wait(ts, NULL, TS_SHARED_QUEUE);
560 turnstile_cancel(ts);
565 * Mark the page m's PV list as participating in the current thread's
566 * DI block. Any threads concurrently using m's PV list to remove or
567 * restrict all mappings to m will wait for the current thread's DI
568 * block to complete before proceeding.
570 * The function works by setting the DI generation number for m's PV
571 * list to at least the DI generation number of the current thread.
572 * This forces a caller of pmap_delayed_invl_wait() to block until
573 * current thread calls pmap_delayed_invl_finished().
576 pmap_delayed_invl_page(vm_page_t m)
580 rw_assert(VM_PAGE_TO_PV_LIST_LOCK(m), RA_WLOCKED);
581 gen = curthread->td_md.md_invl_gen.gen;
584 m_gen = pmap_delayed_invl_genp(m);
592 static caddr_t crashdumpmap;
595 * Internal flags for pmap_enter()'s helper functions.
597 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
598 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
600 static void free_pv_chunk(struct pv_chunk *pc);
601 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
602 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
603 static int popcnt_pc_map_pq(uint64_t *map);
604 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
605 static void reserve_pv_entries(pmap_t pmap, int needed,
606 struct rwlock **lockp);
607 static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
608 struct rwlock **lockp);
609 static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde,
610 u_int flags, struct rwlock **lockp);
611 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
612 struct rwlock **lockp);
613 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
614 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
617 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
618 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
619 static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
620 vm_offset_t va, struct rwlock **lockp);
621 static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
623 static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
624 vm_prot_t prot, struct rwlock **lockp);
625 static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
626 u_int flags, vm_page_t m, struct rwlock **lockp);
627 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
628 vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
629 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
630 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
631 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
633 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
634 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask);
635 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
636 struct rwlock **lockp);
637 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
639 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask);
640 static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
641 struct spglist *free, struct rwlock **lockp);
642 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
643 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
644 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
645 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
646 struct spglist *free);
647 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
648 pd_entry_t *pde, struct spglist *free,
649 struct rwlock **lockp);
650 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
651 vm_page_t m, struct rwlock **lockp);
652 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
654 static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde);
656 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
657 struct rwlock **lockp);
658 static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va,
659 struct rwlock **lockp);
660 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
661 struct rwlock **lockp);
663 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
664 struct spglist *free);
665 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
666 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
669 * Move the kernel virtual free pointer to the next
670 * 2MB. This is used to help improve performance
671 * by using a large (2MB) page for much of the kernel
672 * (.text, .data, .bss)
675 pmap_kmem_choose(vm_offset_t addr)
677 vm_offset_t newaddr = addr;
679 newaddr = roundup2(addr, NBPDR);
683 /********************/
684 /* Inline functions */
685 /********************/
687 /* Return a non-clipped PD index for a given VA */
688 static __inline vm_pindex_t
689 pmap_pde_pindex(vm_offset_t va)
691 return (va >> PDRSHIFT);
695 /* Return a pointer to the PML4 slot that corresponds to a VA */
696 static __inline pml4_entry_t *
697 pmap_pml4e(pmap_t pmap, vm_offset_t va)
700 return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
703 /* Return a pointer to the PDP slot that corresponds to a VA */
704 static __inline pdp_entry_t *
705 pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
709 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
710 return (&pdpe[pmap_pdpe_index(va)]);
713 /* Return a pointer to the PDP slot that corresponds to a VA */
714 static __inline pdp_entry_t *
715 pmap_pdpe(pmap_t pmap, vm_offset_t va)
720 PG_V = pmap_valid_bit(pmap);
721 pml4e = pmap_pml4e(pmap, va);
722 if ((*pml4e & PG_V) == 0)
724 return (pmap_pml4e_to_pdpe(pml4e, va));
727 /* Return a pointer to the PD slot that corresponds to a VA */
728 static __inline pd_entry_t *
729 pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
733 pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
734 return (&pde[pmap_pde_index(va)]);
737 /* Return a pointer to the PD slot that corresponds to a VA */
738 static __inline pd_entry_t *
739 pmap_pde(pmap_t pmap, vm_offset_t va)
744 PG_V = pmap_valid_bit(pmap);
745 pdpe = pmap_pdpe(pmap, va);
746 if (pdpe == NULL || (*pdpe & PG_V) == 0)
748 return (pmap_pdpe_to_pde(pdpe, va));
751 /* Return a pointer to the PT slot that corresponds to a VA */
752 static __inline pt_entry_t *
753 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
757 pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
758 return (&pte[pmap_pte_index(va)]);
761 /* Return a pointer to the PT slot that corresponds to a VA */
762 static __inline pt_entry_t *
763 pmap_pte(pmap_t pmap, vm_offset_t va)
768 PG_V = pmap_valid_bit(pmap);
769 pde = pmap_pde(pmap, va);
770 if (pde == NULL || (*pde & PG_V) == 0)
772 if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */
773 return ((pt_entry_t *)pde);
774 return (pmap_pde_to_pte(pde, va));
778 pmap_resident_count_inc(pmap_t pmap, int count)
781 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
782 pmap->pm_stats.resident_count += count;
786 pmap_resident_count_dec(pmap_t pmap, int count)
789 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
790 KASSERT(pmap->pm_stats.resident_count >= count,
791 ("pmap %p resident count underflow %ld %d", pmap,
792 pmap->pm_stats.resident_count, count));
793 pmap->pm_stats.resident_count -= count;
796 PMAP_INLINE pt_entry_t *
797 vtopte(vm_offset_t va)
799 u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
801 KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopte on a uva/gpa 0x%0lx", va));
803 return (PTmap + ((va >> PAGE_SHIFT) & mask));
806 static __inline pd_entry_t *
807 vtopde(vm_offset_t va)
809 u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
811 KASSERT(va >= VM_MAXUSER_ADDRESS, ("vtopde on a uva/gpa 0x%0lx", va));
813 return (PDmap + ((va >> PDRSHIFT) & mask));
817 allocpages(vm_paddr_t *firstaddr, int n)
822 bzero((void *)ret, n * PAGE_SIZE);
823 *firstaddr += n * PAGE_SIZE;
827 CTASSERT(powerof2(NDMPML4E));
829 /* number of kernel PDP slots */
830 #define NKPDPE(ptpgs) howmany(ptpgs, NPDEPG)
833 nkpt_init(vm_paddr_t addr)
840 pt_pages = howmany(addr, 1 << PDRSHIFT);
841 pt_pages += NKPDPE(pt_pages);
844 * Add some slop beyond the bare minimum required for bootstrapping
847 * This is quite important when allocating KVA for kernel modules.
848 * The modules are required to be linked in the negative 2GB of
849 * the address space. If we run out of KVA in this region then
850 * pmap_growkernel() will need to allocate page table pages to map
851 * the entire 512GB of KVA space which is an unnecessary tax on
854 * Secondly, device memory mapped as part of setting up the low-
855 * level console(s) is taken from KVA, starting at virtual_avail.
856 * This is because cninit() is called after pmap_bootstrap() but
857 * before vm_init() and pmap_init(). 20MB for a frame buffer is
860 pt_pages += 32; /* 64MB additional slop. */
866 create_pagetables(vm_paddr_t *firstaddr)
868 int i, j, ndm1g, nkpdpe;
874 /* Allocate page table pages for the direct map */
875 ndmpdp = howmany(ptoa(Maxmem), NBPDP);
876 if (ndmpdp < 4) /* Minimum 4GB of dirmap */
878 ndmpdpphys = howmany(ndmpdp, NPDPEPG);
879 if (ndmpdpphys > NDMPML4E) {
881 * Each NDMPML4E allows 512 GB, so limit to that,
882 * and then readjust ndmpdp and ndmpdpphys.
884 printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512);
885 Maxmem = atop(NDMPML4E * NBPML4);
886 ndmpdpphys = NDMPML4E;
887 ndmpdp = NDMPML4E * NPDEPG;
889 DMPDPphys = allocpages(firstaddr, ndmpdpphys);
891 if ((amd_feature & AMDID_PAGE1GB) != 0)
892 ndm1g = ptoa(Maxmem) >> PDPSHIFT;
894 DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g);
895 dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
898 KPML4phys = allocpages(firstaddr, 1);
899 KPDPphys = allocpages(firstaddr, NKPML4E);
902 * Allocate the initial number of kernel page table pages required to
903 * bootstrap. We defer this until after all memory-size dependent
904 * allocations are done (e.g. direct map), so that we don't have to
905 * build in too much slop in our estimate.
907 * Note that when NKPML4E > 1, we have an empty page underneath
908 * all but the KPML4I'th one, so we need NKPML4E-1 extra (zeroed)
909 * pages. (pmap_enter requires a PD page to exist for each KPML4E.)
911 nkpt_init(*firstaddr);
912 nkpdpe = NKPDPE(nkpt);
914 KPTphys = allocpages(firstaddr, nkpt);
915 KPDphys = allocpages(firstaddr, nkpdpe);
917 /* Fill in the underlying page table pages */
918 /* Nominally read-only (but really R/W) from zero to physfree */
919 /* XXX not fully used, underneath 2M pages */
920 pt_p = (pt_entry_t *)KPTphys;
921 for (i = 0; ptoa(i) < *firstaddr; i++)
922 pt_p[i] = ptoa(i) | X86_PG_RW | X86_PG_V | X86_PG_G;
924 /* Now map the page tables at their location within PTmap */
925 pd_p = (pd_entry_t *)KPDphys;
926 for (i = 0; i < nkpt; i++)
927 pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
929 /* Map from zero to end of allocations under 2M pages */
930 /* This replaces some of the KPTphys entries above */
931 for (i = 0; (i << PDRSHIFT) < *firstaddr; i++)
932 pd_p[i] = (i << PDRSHIFT) | X86_PG_RW | X86_PG_V | PG_PS |
935 /* And connect up the PD to the PDP (leaving room for L4 pages) */
936 pdp_p = (pdp_entry_t *)(KPDPphys + ptoa(KPML4I - KPML4BASE));
937 for (i = 0; i < nkpdpe; i++)
938 pdp_p[i + KPDPI] = (KPDphys + ptoa(i)) | X86_PG_RW | X86_PG_V |
942 * Now, set up the direct map region using 2MB and/or 1GB pages. If
943 * the end of physical memory is not aligned to a 1GB page boundary,
944 * then the residual physical memory is mapped with 2MB pages. Later,
945 * if pmap_mapdev{_attr}() uses the direct map for non-write-back
946 * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings
947 * that are partially used.
949 pd_p = (pd_entry_t *)DMPDphys;
950 for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) {
951 pd_p[j] = (vm_paddr_t)i << PDRSHIFT;
952 /* Preset PG_M and PG_A because demotion expects it. */
953 pd_p[j] |= X86_PG_RW | X86_PG_V | PG_PS | X86_PG_G |
956 pdp_p = (pdp_entry_t *)DMPDPphys;
957 for (i = 0; i < ndm1g; i++) {
958 pdp_p[i] = (vm_paddr_t)i << PDPSHIFT;
959 /* Preset PG_M and PG_A because demotion expects it. */
960 pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_PS | X86_PG_G |
963 for (j = 0; i < ndmpdp; i++, j++) {
964 pdp_p[i] = DMPDphys + ptoa(j);
965 pdp_p[i] |= X86_PG_RW | X86_PG_V | PG_U;
968 /* And recursively map PML4 to itself in order to get PTmap */
969 p4_p = (pml4_entry_t *)KPML4phys;
970 p4_p[PML4PML4I] = KPML4phys;
971 p4_p[PML4PML4I] |= X86_PG_RW | X86_PG_V | PG_U;
973 /* Connect the Direct Map slot(s) up to the PML4. */
974 for (i = 0; i < ndmpdpphys; i++) {
975 p4_p[DMPML4I + i] = DMPDPphys + ptoa(i);
976 p4_p[DMPML4I + i] |= X86_PG_RW | X86_PG_V | PG_U;
979 /* Connect the KVA slots up to the PML4 */
980 for (i = 0; i < NKPML4E; i++) {
981 p4_p[KPML4BASE + i] = KPDPphys + ptoa(i);
982 p4_p[KPML4BASE + i] |= X86_PG_RW | X86_PG_V | PG_U;
987 * Bootstrap the system enough to run with virtual memory.
989 * On amd64 this is called after mapping has already been enabled
990 * and just syncs the pmap module with what has already been done.
991 * [We can't call it easily with mapping off since the kernel is not
992 * mapped with PA == VA, hence we would have to relocate every address
993 * from the linked base (virtual) address "KERNBASE" to the actual
994 * (physical) address starting relative to 0]
997 pmap_bootstrap(vm_paddr_t *firstaddr)
1004 * Create an initial set of page tables to run the kernel in.
1006 create_pagetables(firstaddr);
1009 * Add a physical memory segment (vm_phys_seg) corresponding to the
1010 * preallocated kernel page table pages so that vm_page structures
1011 * representing these pages will be created. The vm_page structures
1012 * are required for promotion of the corresponding kernel virtual
1013 * addresses to superpage mappings.
1015 vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1017 virtual_avail = (vm_offset_t) KERNBASE + *firstaddr;
1018 virtual_avail = pmap_kmem_choose(virtual_avail);
1020 virtual_end = VM_MAX_KERNEL_ADDRESS;
1023 /* XXX do %cr0 as well */
1024 load_cr4(rcr4() | CR4_PGE);
1025 load_cr3(KPML4phys);
1026 if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
1027 load_cr4(rcr4() | CR4_SMEP);
1030 * Initialize the kernel pmap (which is statically allocated).
1032 PMAP_LOCK_INIT(kernel_pmap);
1033 kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
1034 kernel_pmap->pm_cr3 = KPML4phys;
1035 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */
1036 TAILQ_INIT(&kernel_pmap->pm_pvchunk);
1037 kernel_pmap->pm_flags = pmap_flags;
1040 * Initialize the TLB invalidations generation number lock.
1042 mtx_init(&invl_gen_mtx, "invlgn", NULL, MTX_DEF);
1045 * Reserve some special page table entries/VA space for temporary
1048 #define SYSMAP(c, p, v, n) \
1049 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
1055 * Crashdump maps. The first page is reused as CMAP1 for the
1058 SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS)
1059 CADDR1 = crashdumpmap;
1064 * Initialize the PAT MSR.
1065 * pmap_init_pat() clears and sets CR4_PGE, which, as a
1066 * side-effect, invalidates stale PG_G TLB entries that might
1067 * have been created in our pre-boot environment.
1071 /* Initialize TLB Context Id. */
1072 TUNABLE_INT_FETCH("vm.pmap.pcid_enabled", &pmap_pcid_enabled);
1073 if ((cpu_feature2 & CPUID2_PCID) != 0 && pmap_pcid_enabled) {
1074 /* Check for INVPCID support */
1075 invpcid_works = (cpu_stdext_feature & CPUID_STDEXT_INVPCID)
1077 for (i = 0; i < MAXCPU; i++) {
1078 kernel_pmap->pm_pcids[i].pm_pcid = PMAP_PCID_KERN;
1079 kernel_pmap->pm_pcids[i].pm_gen = 1;
1081 __pcpu[0].pc_pcid_next = PMAP_PCID_KERN + 1;
1082 __pcpu[0].pc_pcid_gen = 1;
1084 * pcpu area for APs is zeroed during AP startup.
1085 * pc_pcid_next and pc_pcid_gen are initialized by AP
1086 * during pcpu setup.
1088 load_cr4(rcr4() | CR4_PCIDE);
1090 pmap_pcid_enabled = 0;
1095 * Setup the PAT MSR.
1100 int pat_table[PAT_INDEX_SIZE];
1105 /* Bail if this CPU doesn't implement PAT. */
1106 if ((cpu_feature & CPUID_PAT) == 0)
1109 /* Set default PAT index table. */
1110 for (i = 0; i < PAT_INDEX_SIZE; i++)
1112 pat_table[PAT_WRITE_BACK] = 0;
1113 pat_table[PAT_WRITE_THROUGH] = 1;
1114 pat_table[PAT_UNCACHEABLE] = 3;
1115 pat_table[PAT_WRITE_COMBINING] = 3;
1116 pat_table[PAT_WRITE_PROTECTED] = 3;
1117 pat_table[PAT_UNCACHED] = 3;
1119 /* Initialize default PAT entries. */
1120 pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
1121 PAT_VALUE(1, PAT_WRITE_THROUGH) |
1122 PAT_VALUE(2, PAT_UNCACHED) |
1123 PAT_VALUE(3, PAT_UNCACHEABLE) |
1124 PAT_VALUE(4, PAT_WRITE_BACK) |
1125 PAT_VALUE(5, PAT_WRITE_THROUGH) |
1126 PAT_VALUE(6, PAT_UNCACHED) |
1127 PAT_VALUE(7, PAT_UNCACHEABLE);
1131 * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
1132 * Program 5 and 6 as WP and WC.
1133 * Leave 4 and 7 as WB and UC.
1135 pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
1136 pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
1137 PAT_VALUE(6, PAT_WRITE_COMBINING);
1138 pat_table[PAT_UNCACHED] = 2;
1139 pat_table[PAT_WRITE_PROTECTED] = 5;
1140 pat_table[PAT_WRITE_COMBINING] = 6;
1143 * Just replace PAT Index 2 with WC instead of UC-.
1145 pat_msr &= ~PAT_MASK(2);
1146 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
1147 pat_table[PAT_WRITE_COMBINING] = 2;
1152 load_cr4(cr4 & ~CR4_PGE);
1154 /* Disable caches (CD = 1, NW = 0). */
1156 load_cr0((cr0 & ~CR0_NW) | CR0_CD);
1158 /* Flushes caches and TLBs. */
1162 /* Update PAT and index table. */
1163 wrmsr(MSR_PAT, pat_msr);
1164 for (i = 0; i < PAT_INDEX_SIZE; i++)
1165 pat_index[i] = pat_table[i];
1167 /* Flush caches and TLBs again. */
1171 /* Restore caches and PGE. */
1177 * Initialize a vm_page's machine-dependent fields.
1180 pmap_page_init(vm_page_t m)
1183 TAILQ_INIT(&m->md.pv_list);
1184 m->md.pat_mode = PAT_WRITE_BACK;
1188 * Initialize the pmap module.
1189 * Called by vm_init, to initialize any structures that the pmap
1190 * system needs to map virtual memory.
1195 struct pmap_preinit_mapping *ppim;
1198 int error, i, pv_npg;
1201 * Initialize the vm page array entries for the kernel pmap's
1204 for (i = 0; i < nkpt; i++) {
1205 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
1206 KASSERT(mpte >= vm_page_array &&
1207 mpte < &vm_page_array[vm_page_array_size],
1208 ("pmap_init: page table page is out of range"));
1209 mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
1210 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
1214 * If the kernel is running on a virtual machine, then it must assume
1215 * that MCA is enabled by the hypervisor. Moreover, the kernel must
1216 * be prepared for the hypervisor changing the vendor and family that
1217 * are reported by CPUID. Consequently, the workaround for AMD Family
1218 * 10h Erratum 383 is enabled if the processor's feature set does not
1219 * include at least one feature that is only supported by older Intel
1220 * or newer AMD processors.
1222 if (vm_guest != VM_GUEST_NO && (cpu_feature & CPUID_SS) == 0 &&
1223 (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
1224 CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
1226 workaround_erratum383 = 1;
1229 * Are large page mappings enabled?
1231 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
1232 if (pg_ps_enabled) {
1233 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1234 ("pmap_init: can't assign to pagesizes[1]"));
1235 pagesizes[1] = NBPDR;
1239 * Initialize the pv chunk list mutex.
1241 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
1244 * Initialize the pool of pv list locks.
1246 for (i = 0; i < NPV_LIST_LOCKS; i++)
1247 rw_init(&pv_list_locks[i], "pmap pv list");
1250 * Calculate the size of the pv head table for superpages.
1252 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR);
1255 * Allocate memory for the pv head table for superpages.
1257 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
1259 pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
1261 for (i = 0; i < pv_npg; i++)
1262 TAILQ_INIT(&pv_table[i].pv_list);
1263 TAILQ_INIT(&pv_dummy.pv_list);
1265 pmap_initialized = 1;
1266 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
1267 ppim = pmap_preinit_mapping + i;
1270 /* Make the direct map consistent */
1271 if (ppim->pa < dmaplimit && ppim->pa + ppim->sz < dmaplimit) {
1272 (void)pmap_change_attr(PHYS_TO_DMAP(ppim->pa),
1273 ppim->sz, ppim->mode);
1277 printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i,
1278 ppim->pa, ppim->va, ppim->sz, ppim->mode);
1281 mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
1282 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
1283 (vmem_addr_t *)&qframe);
1285 panic("qframe allocation failed");
1288 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
1289 "2MB page mapping counters");
1291 static u_long pmap_pde_demotions;
1292 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
1293 &pmap_pde_demotions, 0, "2MB page demotions");
1295 static u_long pmap_pde_mappings;
1296 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
1297 &pmap_pde_mappings, 0, "2MB page mappings");
1299 static u_long pmap_pde_p_failures;
1300 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
1301 &pmap_pde_p_failures, 0, "2MB page promotion failures");
1303 static u_long pmap_pde_promotions;
1304 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
1305 &pmap_pde_promotions, 0, "2MB page promotions");
1307 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD, 0,
1308 "1GB page mapping counters");
1310 static u_long pmap_pdpe_demotions;
1311 SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
1312 &pmap_pdpe_demotions, 0, "1GB page demotions");
1314 /***************************************************
1315 * Low level helper routines.....
1316 ***************************************************/
1319 pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
1321 int x86_pat_bits = X86_PG_PTE_PAT | X86_PG_PDE_PAT;
1323 switch (pmap->pm_type) {
1326 /* Verify that both PAT bits are not set at the same time */
1327 KASSERT((entry & x86_pat_bits) != x86_pat_bits,
1328 ("Invalid PAT bits in entry %#lx", entry));
1330 /* Swap the PAT bits if one of them is set */
1331 if ((entry & x86_pat_bits) != 0)
1332 entry ^= x86_pat_bits;
1336 * Nothing to do - the memory attributes are represented
1337 * the same way for regular pages and superpages.
1341 panic("pmap_switch_pat_bits: bad pm_type %d", pmap->pm_type);
1348 * Determine the appropriate bits to set in a PTE or PDE for a specified
1352 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
1354 int cache_bits, pat_flag, pat_idx;
1356 if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0)
1357 panic("Unknown caching mode %d\n", mode);
1359 switch (pmap->pm_type) {
1362 /* The PAT bit is different for PTE's and PDE's. */
1363 pat_flag = is_pde ? X86_PG_PDE_PAT : X86_PG_PTE_PAT;
1365 /* Map the caching mode to a PAT index. */
1366 pat_idx = pat_index[mode];
1368 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
1371 cache_bits |= pat_flag;
1373 cache_bits |= PG_NC_PCD;
1375 cache_bits |= PG_NC_PWT;
1379 cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode);
1383 panic("unsupported pmap type %d", pmap->pm_type);
1386 return (cache_bits);
1390 pmap_cache_mask(pmap_t pmap, boolean_t is_pde)
1394 switch (pmap->pm_type) {
1397 mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
1400 mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7);
1403 panic("pmap_cache_mask: invalid pm_type %d", pmap->pm_type);
1410 pmap_ps_enabled(pmap_t pmap)
1413 return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
1417 pmap_update_pde_store(pmap_t pmap, pd_entry_t *pde, pd_entry_t newpde)
1420 switch (pmap->pm_type) {
1427 * This is a little bogus since the generation number is
1428 * supposed to be bumped up when a region of the address
1429 * space is invalidated in the page tables.
1431 * In this case the old PDE entry is valid but yet we want
1432 * to make sure that any mappings using the old entry are
1433 * invalidated in the TLB.
1435 * The reason this works as expected is because we rendezvous
1436 * "all" host cpus and force any vcpu context to exit as a
1439 atomic_add_acq_long(&pmap->pm_eptgen, 1);
1442 panic("pmap_update_pde_store: bad pm_type %d", pmap->pm_type);
1444 pde_store(pde, newpde);
1448 * After changing the page size for the specified virtual address in the page
1449 * table, flush the corresponding entries from the processor's TLB. Only the
1450 * calling processor's TLB is affected.
1452 * The calling thread must be pinned to a processor.
1455 pmap_update_pde_invalidate(pmap_t pmap, vm_offset_t va, pd_entry_t newpde)
1459 if (pmap_type_guest(pmap))
1462 KASSERT(pmap->pm_type == PT_X86,
1463 ("pmap_update_pde_invalidate: invalid type %d", pmap->pm_type));
1465 PG_G = pmap_global_bit(pmap);
1467 if ((newpde & PG_PS) == 0)
1468 /* Demotion: flush a specific 2MB page mapping. */
1470 else if ((newpde & PG_G) == 0)
1472 * Promotion: flush every 4KB page mapping from the TLB
1473 * because there are too many to flush individually.
1478 * Promotion: flush every 4KB page mapping from the TLB,
1479 * including any global (PG_G) mappings.
1487 * For SMP, these functions have to use the IPI mechanism for coherence.
1489 * N.B.: Before calling any of the following TLB invalidation functions,
1490 * the calling processor must ensure that all stores updating a non-
1491 * kernel page table are globally performed. Otherwise, another
1492 * processor could cache an old, pre-update entry without being
1493 * invalidated. This can happen one of two ways: (1) The pmap becomes
1494 * active on another processor after its pm_active field is checked by
1495 * one of the following functions but before a store updating the page
1496 * table is globally performed. (2) The pmap becomes active on another
1497 * processor before its pm_active field is checked but due to
1498 * speculative loads one of the following functions stills reads the
1499 * pmap as inactive on the other processor.
1501 * The kernel page table is exempt because its pm_active field is
1502 * immutable. The kernel page table is always active on every
1507 * Interrupt the cpus that are executing in the guest context.
1508 * This will force the vcpu to exit and the cached EPT mappings
1509 * will be invalidated by the host before the next vmresume.
1511 static __inline void
1512 pmap_invalidate_ept(pmap_t pmap)
1517 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1518 ("pmap_invalidate_ept: absurd pm_active"));
1521 * The TLB mappings associated with a vcpu context are not
1522 * flushed each time a different vcpu is chosen to execute.
1524 * This is in contrast with a process's vtop mappings that
1525 * are flushed from the TLB on each context switch.
1527 * Therefore we need to do more than just a TLB shootdown on
1528 * the active cpus in 'pmap->pm_active'. To do this we keep
1529 * track of the number of invalidations performed on this pmap.
1531 * Each vcpu keeps a cache of this counter and compares it
1532 * just before a vmresume. If the counter is out-of-date an
1533 * invept will be done to flush stale mappings from the TLB.
1535 atomic_add_acq_long(&pmap->pm_eptgen, 1);
1538 * Force the vcpu to exit and trap back into the hypervisor.
1540 ipinum = pmap->pm_flags & PMAP_NESTED_IPIMASK;
1541 ipi_selected(pmap->pm_active, ipinum);
1546 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1551 if (pmap_type_guest(pmap)) {
1552 pmap_invalidate_ept(pmap);
1556 KASSERT(pmap->pm_type == PT_X86,
1557 ("pmap_invalidate_page: invalid type %d", pmap->pm_type));
1560 if (pmap == kernel_pmap) {
1564 cpuid = PCPU_GET(cpuid);
1565 if (pmap == PCPU_GET(curpmap))
1567 else if (pmap_pcid_enabled)
1568 pmap->pm_pcids[cpuid].pm_gen = 0;
1569 if (pmap_pcid_enabled) {
1572 pmap->pm_pcids[i].pm_gen = 0;
1575 mask = &pmap->pm_active;
1577 smp_masked_invlpg(*mask, va);
1581 /* 4k PTEs -- Chosen to exceed the total size of Broadwell L2 TLB */
1582 #define PMAP_INVLPG_THRESHOLD (4 * 1024 * PAGE_SIZE)
1585 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1591 if (eva - sva >= PMAP_INVLPG_THRESHOLD) {
1592 pmap_invalidate_all(pmap);
1596 if (pmap_type_guest(pmap)) {
1597 pmap_invalidate_ept(pmap);
1601 KASSERT(pmap->pm_type == PT_X86,
1602 ("pmap_invalidate_range: invalid type %d", pmap->pm_type));
1605 cpuid = PCPU_GET(cpuid);
1606 if (pmap == kernel_pmap) {
1607 for (addr = sva; addr < eva; addr += PAGE_SIZE)
1611 if (pmap == PCPU_GET(curpmap)) {
1612 for (addr = sva; addr < eva; addr += PAGE_SIZE)
1614 } else if (pmap_pcid_enabled) {
1615 pmap->pm_pcids[cpuid].pm_gen = 0;
1617 if (pmap_pcid_enabled) {
1620 pmap->pm_pcids[i].pm_gen = 0;
1623 mask = &pmap->pm_active;
1625 smp_masked_invlpg_range(*mask, sva, eva);
1630 pmap_invalidate_all(pmap_t pmap)
1633 struct invpcid_descr d;
1636 if (pmap_type_guest(pmap)) {
1637 pmap_invalidate_ept(pmap);
1641 KASSERT(pmap->pm_type == PT_X86,
1642 ("pmap_invalidate_all: invalid type %d", pmap->pm_type));
1645 if (pmap == kernel_pmap) {
1646 if (pmap_pcid_enabled && invpcid_works) {
1647 bzero(&d, sizeof(d));
1648 invpcid(&d, INVPCID_CTXGLOB);
1654 cpuid = PCPU_GET(cpuid);
1655 if (pmap == PCPU_GET(curpmap)) {
1656 if (pmap_pcid_enabled) {
1657 if (invpcid_works) {
1658 d.pcid = pmap->pm_pcids[cpuid].pm_pcid;
1661 invpcid(&d, INVPCID_CTX);
1663 load_cr3(pmap->pm_cr3 | pmap->pm_pcids
1664 [PCPU_GET(cpuid)].pm_pcid);
1669 } else if (pmap_pcid_enabled) {
1670 pmap->pm_pcids[cpuid].pm_gen = 0;
1672 if (pmap_pcid_enabled) {
1675 pmap->pm_pcids[i].pm_gen = 0;
1678 mask = &pmap->pm_active;
1680 smp_masked_invltlb(*mask, pmap);
1685 pmap_invalidate_cache(void)
1695 cpuset_t invalidate; /* processors that invalidate their TLB */
1700 u_int store; /* processor that updates the PDE */
1704 pmap_update_pde_action(void *arg)
1706 struct pde_action *act = arg;
1708 if (act->store == PCPU_GET(cpuid))
1709 pmap_update_pde_store(act->pmap, act->pde, act->newpde);
1713 pmap_update_pde_teardown(void *arg)
1715 struct pde_action *act = arg;
1717 if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
1718 pmap_update_pde_invalidate(act->pmap, act->va, act->newpde);
1722 * Change the page size for the specified virtual address in a way that
1723 * prevents any possibility of the TLB ever having two entries that map the
1724 * same virtual address using different page sizes. This is the recommended
1725 * workaround for Erratum 383 on AMD Family 10h processors. It prevents a
1726 * machine check exception for a TLB state that is improperly diagnosed as a
1730 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
1732 struct pde_action act;
1733 cpuset_t active, other_cpus;
1737 cpuid = PCPU_GET(cpuid);
1738 other_cpus = all_cpus;
1739 CPU_CLR(cpuid, &other_cpus);
1740 if (pmap == kernel_pmap || pmap_type_guest(pmap))
1743 active = pmap->pm_active;
1745 if (CPU_OVERLAP(&active, &other_cpus)) {
1747 act.invalidate = active;
1751 act.newpde = newpde;
1752 CPU_SET(cpuid, &active);
1753 smp_rendezvous_cpus(active,
1754 smp_no_rendevous_barrier, pmap_update_pde_action,
1755 pmap_update_pde_teardown, &act);
1757 pmap_update_pde_store(pmap, pde, newpde);
1758 if (CPU_ISSET(cpuid, &active))
1759 pmap_update_pde_invalidate(pmap, va, newpde);
1765 * Normal, non-SMP, invalidation functions.
1768 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1771 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
1775 KASSERT(pmap->pm_type == PT_X86,
1776 ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
1778 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap))
1780 else if (pmap_pcid_enabled)
1781 pmap->pm_pcids[0].pm_gen = 0;
1785 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1789 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
1793 KASSERT(pmap->pm_type == PT_X86,
1794 ("pmap_invalidate_range: unknown type %d", pmap->pm_type));
1796 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap)) {
1797 for (addr = sva; addr < eva; addr += PAGE_SIZE)
1799 } else if (pmap_pcid_enabled) {
1800 pmap->pm_pcids[0].pm_gen = 0;
1805 pmap_invalidate_all(pmap_t pmap)
1807 struct invpcid_descr d;
1809 if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
1813 KASSERT(pmap->pm_type == PT_X86,
1814 ("pmap_invalidate_all: unknown type %d", pmap->pm_type));
1816 if (pmap == kernel_pmap) {
1817 if (pmap_pcid_enabled && invpcid_works) {
1818 bzero(&d, sizeof(d));
1819 invpcid(&d, INVPCID_CTXGLOB);
1823 } else if (pmap == PCPU_GET(curpmap)) {
1824 if (pmap_pcid_enabled) {
1825 if (invpcid_works) {
1826 d.pcid = pmap->pm_pcids[0].pm_pcid;
1829 invpcid(&d, INVPCID_CTX);
1831 load_cr3(pmap->pm_cr3 | pmap->pm_pcids[0].
1837 } else if (pmap_pcid_enabled) {
1838 pmap->pm_pcids[0].pm_gen = 0;
1843 pmap_invalidate_cache(void)
1850 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
1853 pmap_update_pde_store(pmap, pde, newpde);
1854 if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap))
1855 pmap_update_pde_invalidate(pmap, va, newpde);
1857 pmap->pm_pcids[0].pm_gen = 0;
1862 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
1866 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
1867 * by a promotion that did not invalidate the 512 4KB page mappings
1868 * that might exist in the TLB. Consequently, at this point, the TLB
1869 * may hold both 4KB and 2MB page mappings for the address range [va,
1870 * va + NBPDR). Therefore, the entire range must be invalidated here.
1871 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
1872 * 4KB page mappings for the address range [va, va + NBPDR), and so a
1873 * single INVLPG suffices to invalidate the 2MB page mapping from the
1876 if ((pde & PG_PROMOTED) != 0)
1877 pmap_invalidate_range(pmap, va, va + NBPDR - 1);
1879 pmap_invalidate_page(pmap, va);
1882 #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
1885 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
1889 sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
1891 KASSERT((sva & PAGE_MASK) == 0,
1892 ("pmap_invalidate_cache_range: sva not page-aligned"));
1893 KASSERT((eva & PAGE_MASK) == 0,
1894 ("pmap_invalidate_cache_range: eva not page-aligned"));
1897 if ((cpu_feature & CPUID_SS) != 0 && !force)
1898 ; /* If "Self Snoop" is supported and allowed, do nothing. */
1899 else if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0 &&
1900 eva - sva < PMAP_CLFLUSH_THRESHOLD) {
1902 * XXX: Some CPUs fault, hang, or trash the local APIC
1903 * registers if we use CLFLUSH on the local APIC
1904 * range. The local APIC is always uncached, so we
1905 * don't need to flush for that range anyway.
1907 if (pmap_kextract(sva) == lapic_paddr)
1911 * Otherwise, do per-cache line flush. Use the sfence
1912 * instruction to insure that previous stores are
1913 * included in the write-back. The processor
1914 * propagates flush to other processors in the cache
1918 for (; sva < eva; sva += cpu_clflush_line_size)
1921 } else if ((cpu_feature & CPUID_CLFSH) != 0 &&
1922 eva - sva < PMAP_CLFLUSH_THRESHOLD) {
1923 if (pmap_kextract(sva) == lapic_paddr)
1926 * Writes are ordered by CLFLUSH on Intel CPUs.
1928 if (cpu_vendor_id != CPU_VENDOR_INTEL)
1930 for (; sva < eva; sva += cpu_clflush_line_size)
1932 if (cpu_vendor_id != CPU_VENDOR_INTEL)
1937 * No targeted cache flush methods are supported by CPU,
1938 * or the supplied range is bigger than 2MB.
1939 * Globally invalidate cache.
1941 pmap_invalidate_cache();
1946 * Remove the specified set of pages from the data and instruction caches.
1948 * In contrast to pmap_invalidate_cache_range(), this function does not
1949 * rely on the CPU's self-snoop feature, because it is intended for use
1950 * when moving pages into a different cache domain.
1953 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
1955 vm_offset_t daddr, eva;
1959 useclflushopt = (cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0;
1960 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
1961 ((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt))
1962 pmap_invalidate_cache();
1966 else if (cpu_vendor_id != CPU_VENDOR_INTEL)
1968 for (i = 0; i < count; i++) {
1969 daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
1970 eva = daddr + PAGE_SIZE;
1971 for (; daddr < eva; daddr += cpu_clflush_line_size) {
1980 else if (cpu_vendor_id != CPU_VENDOR_INTEL)
1986 * Routine: pmap_extract
1988 * Extract the physical page address associated
1989 * with the given map/virtual_address pair.
1992 pmap_extract(pmap_t pmap, vm_offset_t va)
1996 pt_entry_t *pte, PG_V;
2000 PG_V = pmap_valid_bit(pmap);
2002 pdpe = pmap_pdpe(pmap, va);
2003 if (pdpe != NULL && (*pdpe & PG_V) != 0) {
2004 if ((*pdpe & PG_PS) != 0)
2005 pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK);
2007 pde = pmap_pdpe_to_pde(pdpe, va);
2008 if ((*pde & PG_V) != 0) {
2009 if ((*pde & PG_PS) != 0) {
2010 pa = (*pde & PG_PS_FRAME) |
2013 pte = pmap_pde_to_pte(pde, va);
2014 pa = (*pte & PG_FRAME) |
2025 * Routine: pmap_extract_and_hold
2027 * Atomically extract and hold the physical page
2028 * with the given pmap and virtual address pair
2029 * if that mapping permits the given protection.
2032 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
2034 pd_entry_t pde, *pdep;
2035 pt_entry_t pte, PG_RW, PG_V;
2041 PG_RW = pmap_rw_bit(pmap);
2042 PG_V = pmap_valid_bit(pmap);
2045 pdep = pmap_pde(pmap, va);
2046 if (pdep != NULL && (pde = *pdep)) {
2048 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
2049 if (vm_page_pa_tryrelock(pmap, (pde &
2050 PG_PS_FRAME) | (va & PDRMASK), &pa))
2052 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
2057 pte = *pmap_pde_to_pte(pdep, va);
2059 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
2060 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
2063 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
2074 pmap_kextract(vm_offset_t va)
2079 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
2080 pa = DMAP_TO_PHYS(va);
2084 pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
2087 * Beware of a concurrent promotion that changes the
2088 * PDE at this point! For example, vtopte() must not
2089 * be used to access the PTE because it would use the
2090 * new PDE. It is, however, safe to use the old PDE
2091 * because the page table page is preserved by the
2094 pa = *pmap_pde_to_pte(&pde, va);
2095 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
2101 /***************************************************
2102 * Low level mapping routines.....
2103 ***************************************************/
2106 * Add a wired page to the kva.
2107 * Note: not SMP coherent.
2110 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
2115 pte_store(pte, pa | X86_PG_RW | X86_PG_V | X86_PG_G);
2118 static __inline void
2119 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
2125 cache_bits = pmap_cache_bits(kernel_pmap, mode, 0);
2126 pte_store(pte, pa | X86_PG_RW | X86_PG_V | X86_PG_G | cache_bits);
2130 * Remove a page from the kernel pagetables.
2131 * Note: not SMP coherent.
2134 pmap_kremove(vm_offset_t va)
2143 * Used to map a range of physical addresses into kernel
2144 * virtual address space.
2146 * The value passed in '*virt' is a suggested virtual address for
2147 * the mapping. Architectures which can support a direct-mapped
2148 * physical to virtual region can return the appropriate address
2149 * within that region, leaving '*virt' unchanged. Other
2150 * architectures should map the pages starting at '*virt' and
2151 * update '*virt' with the first usable address after the mapped
2155 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
2157 return PHYS_TO_DMAP(start);
2162 * Add a list of wired pages to the kva
2163 * this routine is only used for temporary
2164 * kernel mappings that do not need to have
2165 * page modification or references recorded.
2166 * Note that old mappings are simply written
2167 * over. The page *must* be wired.
2168 * Note: SMP coherent. Uses a ranged shootdown IPI.
2171 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
2173 pt_entry_t *endpte, oldpte, pa, *pte;
2179 endpte = pte + count;
2180 while (pte < endpte) {
2182 cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0);
2183 pa = VM_PAGE_TO_PHYS(m) | cache_bits;
2184 if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) {
2186 pte_store(pte, pa | X86_PG_G | X86_PG_RW | X86_PG_V);
2190 if (__predict_false((oldpte & X86_PG_V) != 0))
2191 pmap_invalidate_range(kernel_pmap, sva, sva + count *
2196 * This routine tears out page mappings from the
2197 * kernel -- it is meant only for temporary mappings.
2198 * Note: SMP coherent. Uses a ranged shootdown IPI.
2201 pmap_qremove(vm_offset_t sva, int count)
2206 while (count-- > 0) {
2207 KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
2211 pmap_invalidate_range(kernel_pmap, sva, va);
2214 /***************************************************
2215 * Page table page management routines.....
2216 ***************************************************/
2217 static __inline void
2218 pmap_free_zero_pages(struct spglist *free)
2223 for (count = 0; (m = SLIST_FIRST(free)) != NULL; count++) {
2224 SLIST_REMOVE_HEAD(free, plinks.s.ss);
2225 /* Preserve the page's PG_ZERO setting. */
2226 vm_page_free_toq(m);
2228 atomic_subtract_int(&vm_cnt.v_wire_count, count);
2232 * Schedule the specified unused page table page to be freed. Specifically,
2233 * add the page to the specified list of pages that will be released to the
2234 * physical memory manager after the TLB has been updated.
2236 static __inline void
2237 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
2238 boolean_t set_PG_ZERO)
2242 m->flags |= PG_ZERO;
2244 m->flags &= ~PG_ZERO;
2245 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
2249 * Inserts the specified page table page into the specified pmap's collection
2250 * of idle page table pages. Each of a pmap's page table pages is responsible
2251 * for mapping a distinct range of virtual addresses. The pmap's collection is
2252 * ordered by this virtual address range.
2255 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
2258 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2259 return (vm_radix_insert(&pmap->pm_root, mpte));
2263 * Removes the page table page mapping the specified virtual address from the
2264 * specified pmap's collection of idle page table pages, and returns it.
2265 * Otherwise, returns NULL if there is no page table page corresponding to the
2266 * specified virtual address.
2268 static __inline vm_page_t
2269 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
2272 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2273 return (vm_radix_remove(&pmap->pm_root, pmap_pde_pindex(va)));
2277 * Decrements a page table page's wire count, which is used to record the
2278 * number of valid page table entries within the page. If the wire count
2279 * drops to zero, then the page table page is unmapped. Returns TRUE if the
2280 * page table page was unmapped and FALSE otherwise.
2282 static inline boolean_t
2283 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
2287 if (m->wire_count == 0) {
2288 _pmap_unwire_ptp(pmap, va, m, free);
2295 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
2298 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2300 * unmap the page table page
2302 if (m->pindex >= (NUPDE + NUPDPE)) {
2305 pml4 = pmap_pml4e(pmap, va);
2307 } else if (m->pindex >= NUPDE) {
2310 pdp = pmap_pdpe(pmap, va);
2315 pd = pmap_pde(pmap, va);
2318 pmap_resident_count_dec(pmap, 1);
2319 if (m->pindex < NUPDE) {
2320 /* We just released a PT, unhold the matching PD */
2323 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
2324 pmap_unwire_ptp(pmap, va, pdpg, free);
2326 if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
2327 /* We just released a PD, unhold the matching PDP */
2330 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
2331 pmap_unwire_ptp(pmap, va, pdppg, free);
2335 * Put page on a list so that it is released after
2336 * *ALL* TLB shootdown is done
2338 pmap_add_delayed_free_list(m, free, TRUE);
2342 * After removing a page table entry, this routine is used to
2343 * conditionally free the page, and manage the hold/wire counts.
2346 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
2347 struct spglist *free)
2351 if (va >= VM_MAXUSER_ADDRESS)
2353 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
2354 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
2355 return (pmap_unwire_ptp(pmap, va, mpte, free));
2359 pmap_pinit0(pmap_t pmap)
2363 PMAP_LOCK_INIT(pmap);
2364 pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
2365 pmap->pm_cr3 = KPML4phys;
2366 pmap->pm_root.rt_root = 0;
2367 CPU_ZERO(&pmap->pm_active);
2368 TAILQ_INIT(&pmap->pm_pvchunk);
2369 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2370 pmap->pm_flags = pmap_flags;
2372 pmap->pm_pcids[i].pm_pcid = PMAP_PCID_NONE;
2373 pmap->pm_pcids[i].pm_gen = 0;
2375 PCPU_SET(curpmap, kernel_pmap);
2376 pmap_activate(curthread);
2377 CPU_FILL(&kernel_pmap->pm_active);
2381 pmap_pinit_pml4(vm_page_t pml4pg)
2383 pml4_entry_t *pm_pml4;
2386 pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
2388 /* Wire in kernel global address entries. */
2389 for (i = 0; i < NKPML4E; i++) {
2390 pm_pml4[KPML4BASE + i] = (KPDPphys + ptoa(i)) | X86_PG_RW |
2393 for (i = 0; i < ndmpdpphys; i++) {
2394 pm_pml4[DMPML4I + i] = (DMPDPphys + ptoa(i)) | X86_PG_RW |
2398 /* install self-referential address mapping entry(s) */
2399 pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | X86_PG_V | X86_PG_RW |
2400 X86_PG_A | X86_PG_M;
2404 * Initialize a preallocated and zeroed pmap structure,
2405 * such as one in a vmspace structure.
2408 pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
2411 vm_paddr_t pml4phys;
2415 * allocate the page directory page
2417 while ((pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
2418 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
2421 pml4phys = VM_PAGE_TO_PHYS(pml4pg);
2422 pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(pml4phys);
2424 pmap->pm_pcids[i].pm_pcid = PMAP_PCID_NONE;
2425 pmap->pm_pcids[i].pm_gen = 0;
2427 pmap->pm_cr3 = ~0; /* initialize to an invalid value */
2429 if ((pml4pg->flags & PG_ZERO) == 0)
2430 pagezero(pmap->pm_pml4);
2433 * Do not install the host kernel mappings in the nested page
2434 * tables. These mappings are meaningless in the guest physical
2437 if ((pmap->pm_type = pm_type) == PT_X86) {
2438 pmap->pm_cr3 = pml4phys;
2439 pmap_pinit_pml4(pml4pg);
2442 pmap->pm_root.rt_root = 0;
2443 CPU_ZERO(&pmap->pm_active);
2444 TAILQ_INIT(&pmap->pm_pvchunk);
2445 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
2446 pmap->pm_flags = flags;
2447 pmap->pm_eptgen = 0;
2453 pmap_pinit(pmap_t pmap)
2456 return (pmap_pinit_type(pmap, PT_X86, pmap_flags));
2460 * This routine is called if the desired page table page does not exist.
2462 * If page table page allocation fails, this routine may sleep before
2463 * returning NULL. It sleeps only if a lock pointer was given.
2465 * Note: If a page allocation fails at page table level two or three,
2466 * one or two pages may be held during the wait, only to be released
2467 * afterwards. This conservative approach is easily argued to avoid
2471 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
2473 vm_page_t m, pdppg, pdpg;
2474 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
2476 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2478 PG_A = pmap_accessed_bit(pmap);
2479 PG_M = pmap_modified_bit(pmap);
2480 PG_V = pmap_valid_bit(pmap);
2481 PG_RW = pmap_rw_bit(pmap);
2484 * Allocate a page table page.
2486 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
2487 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
2488 if (lockp != NULL) {
2489 RELEASE_PV_LIST_LOCK(lockp);
2491 PMAP_ASSERT_NOT_IN_DI();
2497 * Indicate the need to retry. While waiting, the page table
2498 * page may have been allocated.
2502 if ((m->flags & PG_ZERO) == 0)
2506 * Map the pagetable page into the process address space, if
2507 * it isn't already there.
2510 if (ptepindex >= (NUPDE + NUPDPE)) {
2512 vm_pindex_t pml4index;
2514 /* Wire up a new PDPE page */
2515 pml4index = ptepindex - (NUPDE + NUPDPE);
2516 pml4 = &pmap->pm_pml4[pml4index];
2517 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
2519 } else if (ptepindex >= NUPDE) {
2520 vm_pindex_t pml4index;
2521 vm_pindex_t pdpindex;
2525 /* Wire up a new PDE page */
2526 pdpindex = ptepindex - NUPDE;
2527 pml4index = pdpindex >> NPML4EPGSHIFT;
2529 pml4 = &pmap->pm_pml4[pml4index];
2530 if ((*pml4 & PG_V) == 0) {
2531 /* Have to allocate a new pdp, recurse */
2532 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
2535 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
2536 vm_page_free_zero(m);
2540 /* Add reference to pdp page */
2541 pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
2542 pdppg->wire_count++;
2544 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
2546 /* Now find the pdp page */
2547 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
2548 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
2551 vm_pindex_t pml4index;
2552 vm_pindex_t pdpindex;
2557 /* Wire up a new PTE page */
2558 pdpindex = ptepindex >> NPDPEPGSHIFT;
2559 pml4index = pdpindex >> NPML4EPGSHIFT;
2561 /* First, find the pdp and check that its valid. */
2562 pml4 = &pmap->pm_pml4[pml4index];
2563 if ((*pml4 & PG_V) == 0) {
2564 /* Have to allocate a new pd, recurse */
2565 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
2568 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
2569 vm_page_free_zero(m);
2572 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
2573 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
2575 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
2576 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
2577 if ((*pdp & PG_V) == 0) {
2578 /* Have to allocate a new pd, recurse */
2579 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
2582 atomic_subtract_int(&vm_cnt.v_wire_count,
2584 vm_page_free_zero(m);
2588 /* Add reference to the pd page */
2589 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
2593 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
2595 /* Now we know where the page directory page is */
2596 pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
2597 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
2600 pmap_resident_count_inc(pmap, 1);
2606 pmap_allocpde(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
2608 vm_pindex_t pdpindex, ptepindex;
2609 pdp_entry_t *pdpe, PG_V;
2612 PG_V = pmap_valid_bit(pmap);
2615 pdpe = pmap_pdpe(pmap, va);
2616 if (pdpe != NULL && (*pdpe & PG_V) != 0) {
2617 /* Add a reference to the pd page. */
2618 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
2621 /* Allocate a pd page. */
2622 ptepindex = pmap_pde_pindex(va);
2623 pdpindex = ptepindex >> NPDPEPGSHIFT;
2624 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
2625 if (pdpg == NULL && lockp != NULL)
2632 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
2634 vm_pindex_t ptepindex;
2635 pd_entry_t *pd, PG_V;
2638 PG_V = pmap_valid_bit(pmap);
2641 * Calculate pagetable page index
2643 ptepindex = pmap_pde_pindex(va);
2646 * Get the page directory entry
2648 pd = pmap_pde(pmap, va);
2651 * This supports switching from a 2MB page to a
2654 if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
2655 if (!pmap_demote_pde_locked(pmap, pd, va, lockp)) {
2657 * Invalidation of the 2MB page mapping may have caused
2658 * the deallocation of the underlying PD page.
2665 * If the page table page is mapped, we just increment the
2666 * hold count, and activate it.
2668 if (pd != NULL && (*pd & PG_V) != 0) {
2669 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
2673 * Here if the pte page isn't mapped, or if it has been
2676 m = _pmap_allocpte(pmap, ptepindex, lockp);
2677 if (m == NULL && lockp != NULL)
2684 /***************************************************
2685 * Pmap allocation/deallocation routines.
2686 ***************************************************/
2689 * Release any resources held by the given physical map.
2690 * Called when a pmap initialized by pmap_pinit is being released.
2691 * Should only be called if the map contains no valid mappings.
2694 pmap_release(pmap_t pmap)
2699 KASSERT(pmap->pm_stats.resident_count == 0,
2700 ("pmap_release: pmap resident count %ld != 0",
2701 pmap->pm_stats.resident_count));
2702 KASSERT(vm_radix_is_empty(&pmap->pm_root),
2703 ("pmap_release: pmap has reserved page table page(s)"));
2704 KASSERT(CPU_EMPTY(&pmap->pm_active),
2705 ("releasing active pmap %p", pmap));
2707 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4));
2709 for (i = 0; i < NKPML4E; i++) /* KVA */
2710 pmap->pm_pml4[KPML4BASE + i] = 0;
2711 for (i = 0; i < ndmpdpphys; i++)/* Direct Map */
2712 pmap->pm_pml4[DMPML4I + i] = 0;
2713 pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */
2716 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
2717 vm_page_free_zero(m);
2721 kvm_size(SYSCTL_HANDLER_ARGS)
2723 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
2725 return sysctl_handle_long(oidp, &ksize, 0, req);
2727 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
2728 0, 0, kvm_size, "LU", "Size of KVM");
2731 kvm_free(SYSCTL_HANDLER_ARGS)
2733 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
2735 return sysctl_handle_long(oidp, &kfree, 0, req);
2737 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
2738 0, 0, kvm_free, "LU", "Amount of KVM free");
2741 * grow the number of kernel page table entries, if needed
2744 pmap_growkernel(vm_offset_t addr)
2748 pd_entry_t *pde, newpdir;
2751 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
2754 * Return if "addr" is within the range of kernel page table pages
2755 * that were preallocated during pmap bootstrap. Moreover, leave
2756 * "kernel_vm_end" and the kernel page table as they were.
2758 * The correctness of this action is based on the following
2759 * argument: vm_map_insert() allocates contiguous ranges of the
2760 * kernel virtual address space. It calls this function if a range
2761 * ends after "kernel_vm_end". If the kernel is mapped between
2762 * "kernel_vm_end" and "addr", then the range cannot begin at
2763 * "kernel_vm_end". In fact, its beginning address cannot be less
2764 * than the kernel. Thus, there is no immediate need to allocate
2765 * any new kernel page table pages between "kernel_vm_end" and
2768 if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR)
2771 addr = roundup2(addr, NBPDR);
2772 if (addr - 1 >= kernel_map->max_offset)
2773 addr = kernel_map->max_offset;
2774 while (kernel_vm_end < addr) {
2775 pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
2776 if ((*pdpe & X86_PG_V) == 0) {
2777 /* We need a new PDP entry */
2778 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
2779 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
2780 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
2782 panic("pmap_growkernel: no memory to grow kernel");
2783 if ((nkpg->flags & PG_ZERO) == 0)
2784 pmap_zero_page(nkpg);
2785 paddr = VM_PAGE_TO_PHYS(nkpg);
2786 *pdpe = (pdp_entry_t)(paddr | X86_PG_V | X86_PG_RW |
2787 X86_PG_A | X86_PG_M);
2788 continue; /* try again */
2790 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
2791 if ((*pde & X86_PG_V) != 0) {
2792 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
2793 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
2794 kernel_vm_end = kernel_map->max_offset;
2800 nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end),
2801 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2804 panic("pmap_growkernel: no memory to grow kernel");
2805 if ((nkpg->flags & PG_ZERO) == 0)
2806 pmap_zero_page(nkpg);
2807 paddr = VM_PAGE_TO_PHYS(nkpg);
2808 newpdir = paddr | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
2809 pde_store(pde, newpdir);
2811 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
2812 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
2813 kernel_vm_end = kernel_map->max_offset;
2820 /***************************************************
2821 * page management routines.
2822 ***************************************************/
2824 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
2825 CTASSERT(_NPCM == 3);
2826 CTASSERT(_NPCPV == 168);
2828 static __inline struct pv_chunk *
2829 pv_to_chunk(pv_entry_t pv)
2832 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
2835 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
2837 #define PC_FREE0 0xfffffffffffffffful
2838 #define PC_FREE1 0xfffffffffffffffful
2839 #define PC_FREE2 0x000000fffffffffful
2841 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
2844 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
2846 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
2847 "Current number of pv entry chunks");
2848 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
2849 "Current number of pv entry chunks allocated");
2850 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
2851 "Current number of pv entry chunks frees");
2852 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
2853 "Number of times tried to get a chunk page but failed.");
2855 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
2856 static int pv_entry_spare;
2858 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
2859 "Current number of pv entry frees");
2860 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
2861 "Current number of pv entry allocs");
2862 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
2863 "Current number of pv entries");
2864 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
2865 "Current number of spare pv entries");
2869 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap, bool start_di)
2874 pmap_invalidate_all(pmap);
2875 if (pmap != locked_pmap)
2878 pmap_delayed_invl_finished();
2882 * We are in a serious low memory condition. Resort to
2883 * drastic measures to free some pages so we can allocate
2884 * another pv entry chunk.
2886 * Returns NULL if PV entries were reclaimed from the specified pmap.
2888 * We do not, however, unmap 2mpages because subsequent accesses will
2889 * allocate per-page pv entries until repromotion occurs, thereby
2890 * exacerbating the shortage of free pv entries.
2893 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
2895 struct pv_chunk *pc, *pc_marker;
2896 struct pv_chunk_header pc_marker_b;
2897 struct md_page *pvh;
2899 pmap_t next_pmap, pmap;
2900 pt_entry_t *pte, tpte;
2901 pt_entry_t PG_G, PG_A, PG_M, PG_RW;
2905 struct spglist free;
2907 int bit, field, freed;
2910 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
2911 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
2914 PG_G = PG_A = PG_M = PG_RW = 0;
2916 bzero(&pc_marker_b, sizeof(pc_marker_b));
2917 pc_marker = (struct pv_chunk *)&pc_marker_b;
2920 * A delayed invalidation block should already be active if
2921 * pmap_advise() or pmap_remove() called this function by way
2922 * of pmap_demote_pde_locked().
2924 start_di = pmap_not_in_di();
2926 mtx_lock(&pv_chunks_mutex);
2927 TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
2928 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != NULL &&
2929 SLIST_EMPTY(&free)) {
2930 next_pmap = pc->pc_pmap;
2931 if (next_pmap == NULL) /* marker */
2933 mtx_unlock(&pv_chunks_mutex);
2936 * A pv_chunk can only be removed from the pc_lru list
2937 * when both pc_chunks_mutex is owned and the
2938 * corresponding pmap is locked.
2940 if (pmap != next_pmap) {
2941 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap,
2944 /* Avoid deadlock and lock recursion. */
2945 if (pmap > locked_pmap) {
2946 RELEASE_PV_LIST_LOCK(lockp);
2949 pmap_delayed_invl_started();
2950 mtx_lock(&pv_chunks_mutex);
2952 } else if (pmap != locked_pmap) {
2953 if (PMAP_TRYLOCK(pmap)) {
2955 pmap_delayed_invl_started();
2956 mtx_lock(&pv_chunks_mutex);
2959 pmap = NULL; /* pmap is not locked */
2960 mtx_lock(&pv_chunks_mutex);
2961 pc = TAILQ_NEXT(pc_marker, pc_lru);
2963 pc->pc_pmap != next_pmap)
2967 } else if (start_di)
2968 pmap_delayed_invl_started();
2969 PG_G = pmap_global_bit(pmap);
2970 PG_A = pmap_accessed_bit(pmap);
2971 PG_M = pmap_modified_bit(pmap);
2972 PG_RW = pmap_rw_bit(pmap);
2976 * Destroy every non-wired, 4 KB page mapping in the chunk.
2979 for (field = 0; field < _NPCM; field++) {
2980 for (inuse = ~pc->pc_map[field] & pc_freemask[field];
2981 inuse != 0; inuse &= ~(1UL << bit)) {
2983 pv = &pc->pc_pventry[field * 64 + bit];
2985 pde = pmap_pde(pmap, va);
2986 if ((*pde & PG_PS) != 0)
2988 pte = pmap_pde_to_pte(pde, va);
2989 if ((*pte & PG_W) != 0)
2991 tpte = pte_load_clear(pte);
2992 if ((tpte & PG_G) != 0)
2993 pmap_invalidate_page(pmap, va);
2994 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
2995 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
2997 if ((tpte & PG_A) != 0)
2998 vm_page_aflag_set(m, PGA_REFERENCED);
2999 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3000 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3002 if (TAILQ_EMPTY(&m->md.pv_list) &&
3003 (m->flags & PG_FICTITIOUS) == 0) {
3004 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3005 if (TAILQ_EMPTY(&pvh->pv_list)) {
3006 vm_page_aflag_clear(m,
3010 pmap_delayed_invl_page(m);
3011 pc->pc_map[field] |= 1UL << bit;
3012 pmap_unuse_pt(pmap, va, *pde, &free);
3017 mtx_lock(&pv_chunks_mutex);
3020 /* Every freed mapping is for a 4 KB page. */
3021 pmap_resident_count_dec(pmap, freed);
3022 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
3023 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
3024 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
3025 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3026 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
3027 pc->pc_map[2] == PC_FREE2) {
3028 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
3029 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
3030 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
3031 /* Entire chunk is free; return it. */
3032 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
3033 dump_drop_page(m_pc->phys_addr);
3034 mtx_lock(&pv_chunks_mutex);
3035 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
3038 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3039 mtx_lock(&pv_chunks_mutex);
3040 /* One freed pv entry in locked_pmap is sufficient. */
3041 if (pmap == locked_pmap)
3044 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
3045 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
3047 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
3048 mtx_unlock(&pv_chunks_mutex);
3049 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap, start_di);
3050 if (m_pc == NULL && !SLIST_EMPTY(&free)) {
3051 m_pc = SLIST_FIRST(&free);
3052 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
3053 /* Recycle a freed page table page. */
3054 m_pc->wire_count = 1;
3056 pmap_free_zero_pages(&free);
3061 * free the pv_entry back to the free list
3064 free_pv_entry(pmap_t pmap, pv_entry_t pv)
3066 struct pv_chunk *pc;
3067 int idx, field, bit;
3069 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3070 PV_STAT(atomic_add_long(&pv_entry_frees, 1));
3071 PV_STAT(atomic_add_int(&pv_entry_spare, 1));
3072 PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
3073 pc = pv_to_chunk(pv);
3074 idx = pv - &pc->pc_pventry[0];
3077 pc->pc_map[field] |= 1ul << bit;
3078 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
3079 pc->pc_map[2] != PC_FREE2) {
3080 /* 98% of the time, pc is already at the head of the list. */
3081 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
3082 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3083 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3087 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3092 free_pv_chunk(struct pv_chunk *pc)
3096 mtx_lock(&pv_chunks_mutex);
3097 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
3098 mtx_unlock(&pv_chunks_mutex);
3099 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
3100 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
3101 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
3102 /* entire chunk is free, return it */
3103 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
3104 dump_drop_page(m->phys_addr);
3105 vm_page_unwire(m, PQ_NONE);
3110 * Returns a new PV entry, allocating a new PV chunk from the system when
3111 * needed. If this PV chunk allocation fails and a PV list lock pointer was
3112 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
3115 * The given PV list lock may be released.
3118 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
3122 struct pv_chunk *pc;
3125 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3126 PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
3128 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
3130 for (field = 0; field < _NPCM; field++) {
3131 if (pc->pc_map[field]) {
3132 bit = bsfq(pc->pc_map[field]);
3136 if (field < _NPCM) {
3137 pv = &pc->pc_pventry[field * 64 + bit];
3138 pc->pc_map[field] &= ~(1ul << bit);
3139 /* If this was the last item, move it to tail */
3140 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
3141 pc->pc_map[2] == 0) {
3142 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3143 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
3146 PV_STAT(atomic_add_long(&pv_entry_count, 1));
3147 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
3151 /* No free items, allocate another chunk */
3152 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
3155 if (lockp == NULL) {
3156 PV_STAT(pc_chunk_tryfail++);
3159 m = reclaim_pv_chunk(pmap, lockp);
3163 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
3164 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
3165 dump_add_page(m->phys_addr);
3166 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
3168 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
3169 pc->pc_map[1] = PC_FREE1;
3170 pc->pc_map[2] = PC_FREE2;
3171 mtx_lock(&pv_chunks_mutex);
3172 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
3173 mtx_unlock(&pv_chunks_mutex);
3174 pv = &pc->pc_pventry[0];
3175 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3176 PV_STAT(atomic_add_long(&pv_entry_count, 1));
3177 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
3182 * Returns the number of one bits within the given PV chunk map.
3184 * The erratas for Intel processors state that "POPCNT Instruction May
3185 * Take Longer to Execute Than Expected". It is believed that the
3186 * issue is the spurious dependency on the destination register.
3187 * Provide a hint to the register rename logic that the destination
3188 * value is overwritten, by clearing it, as suggested in the
3189 * optimization manual. It should be cheap for unaffected processors
3192 * Reference numbers for erratas are
3193 * 4th Gen Core: HSD146
3194 * 5th Gen Core: BDM85
3195 * 6th Gen Core: SKL029
3198 popcnt_pc_map_pq(uint64_t *map)
3202 __asm __volatile("xorl %k0,%k0;popcntq %2,%0;"
3203 "xorl %k1,%k1;popcntq %3,%1;addl %k1,%k0;"
3204 "xorl %k1,%k1;popcntq %4,%1;addl %k1,%k0"
3205 : "=&r" (result), "=&r" (tmp)
3206 : "m" (map[0]), "m" (map[1]), "m" (map[2]));
3211 * Ensure that the number of spare PV entries in the specified pmap meets or
3212 * exceeds the given count, "needed".
3214 * The given PV list lock may be released.
3217 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
3219 struct pch new_tail;
3220 struct pv_chunk *pc;
3224 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3225 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
3228 * Newly allocated PV chunks must be stored in a private list until
3229 * the required number of PV chunks have been allocated. Otherwise,
3230 * reclaim_pv_chunk() could recycle one of these chunks. In
3231 * contrast, these chunks must be added to the pmap upon allocation.
3233 TAILQ_INIT(&new_tail);
3236 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
3238 if ((cpu_feature2 & CPUID2_POPCNT) == 0)
3239 bit_count((bitstr_t *)pc->pc_map, 0,
3240 sizeof(pc->pc_map) * NBBY, &free);
3243 free = popcnt_pc_map_pq(pc->pc_map);
3247 if (avail >= needed)
3250 for (; avail < needed; avail += _NPCPV) {
3251 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
3254 m = reclaim_pv_chunk(pmap, lockp);
3258 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
3259 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
3260 dump_add_page(m->phys_addr);
3261 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
3263 pc->pc_map[0] = PC_FREE0;
3264 pc->pc_map[1] = PC_FREE1;
3265 pc->pc_map[2] = PC_FREE2;
3266 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
3267 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
3268 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
3270 if (!TAILQ_EMPTY(&new_tail)) {
3271 mtx_lock(&pv_chunks_mutex);
3272 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
3273 mtx_unlock(&pv_chunks_mutex);
3278 * First find and then remove the pv entry for the specified pmap and virtual
3279 * address from the specified pv list. Returns the pv entry if found and NULL
3280 * otherwise. This operation can be performed on pv lists for either 4KB or
3281 * 2MB page mappings.
3283 static __inline pv_entry_t
3284 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3288 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3289 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
3290 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
3299 * After demotion from a 2MB page mapping to 512 4KB page mappings,
3300 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
3301 * entries for each of the 4KB page mappings.
3304 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
3305 struct rwlock **lockp)
3307 struct md_page *pvh;
3308 struct pv_chunk *pc;
3310 vm_offset_t va_last;
3314 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3315 KASSERT((pa & PDRMASK) == 0,
3316 ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
3317 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3320 * Transfer the 2mpage's pv entry for this mapping to the first
3321 * page's pv list. Once this transfer begins, the pv list lock
3322 * must not be released until the last pv entry is reinstantiated.
3324 pvh = pa_to_pvh(pa);
3325 va = trunc_2mpage(va);
3326 pv = pmap_pvh_remove(pvh, pmap, va);
3327 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
3328 m = PHYS_TO_VM_PAGE(pa);
3329 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3331 /* Instantiate the remaining NPTEPG - 1 pv entries. */
3332 PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
3333 va_last = va + NBPDR - PAGE_SIZE;
3335 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
3336 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
3337 pc->pc_map[2] != 0, ("pmap_pv_demote_pde: missing spare"));
3338 for (field = 0; field < _NPCM; field++) {
3339 while (pc->pc_map[field]) {
3340 bit = bsfq(pc->pc_map[field]);
3341 pc->pc_map[field] &= ~(1ul << bit);
3342 pv = &pc->pc_pventry[field * 64 + bit];
3346 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3347 ("pmap_pv_demote_pde: page %p is not managed", m));
3348 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3354 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3355 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
3358 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
3359 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3360 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
3362 PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
3363 PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
3367 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
3368 * replace the many pv entries for the 4KB page mappings by a single pv entry
3369 * for the 2MB page mapping.
3372 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
3373 struct rwlock **lockp)
3375 struct md_page *pvh;
3377 vm_offset_t va_last;
3380 KASSERT((pa & PDRMASK) == 0,
3381 ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
3382 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3385 * Transfer the first page's pv entry for this mapping to the 2mpage's
3386 * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
3387 * a transfer avoids the possibility that get_pv_entry() calls
3388 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
3389 * mappings that is being promoted.
3391 m = PHYS_TO_VM_PAGE(pa);
3392 va = trunc_2mpage(va);
3393 pv = pmap_pvh_remove(&m->md, pmap, va);
3394 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
3395 pvh = pa_to_pvh(pa);
3396 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3398 /* Free the remaining NPTEPG - 1 pv entries. */
3399 va_last = va + NBPDR - PAGE_SIZE;
3403 pmap_pvh_free(&m->md, pmap, va);
3404 } while (va < va_last);
3408 * First find and then destroy the pv entry for the specified pmap and virtual
3409 * address. This operation can be performed on pv lists for either 4KB or 2MB
3413 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
3417 pv = pmap_pvh_remove(pvh, pmap, va);
3418 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
3419 free_pv_entry(pmap, pv);
3423 * Conditionally create the PV entry for a 4KB page mapping if the required
3424 * memory can be allocated without resorting to reclamation.
3427 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
3428 struct rwlock **lockp)
3432 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3433 /* Pass NULL instead of the lock pointer to disable reclamation. */
3434 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
3436 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3437 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3445 * Create the PV entry for a 2MB page mapping. Always returns true unless the
3446 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
3447 * false if the PV entry cannot be allocated without resorting to reclamation.
3450 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags,
3451 struct rwlock **lockp)
3453 struct md_page *pvh;
3457 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3458 /* Pass NULL instead of the lock pointer to disable reclamation. */
3459 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
3460 NULL : lockp)) == NULL)
3463 pa = pde & PG_PS_FRAME;
3464 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3465 pvh = pa_to_pvh(pa);
3466 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3472 * Fills a page table page with mappings to consecutive physical pages.
3475 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
3479 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
3481 newpte += PAGE_SIZE;
3486 * Tries to demote a 2MB page mapping. If demotion fails, the 2MB page
3487 * mapping is invalidated.
3490 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
3492 struct rwlock *lock;
3496 rv = pmap_demote_pde_locked(pmap, pde, va, &lock);
3503 pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
3504 struct rwlock **lockp)
3506 pd_entry_t newpde, oldpde;
3507 pt_entry_t *firstpte, newpte;
3508 pt_entry_t PG_A, PG_G, PG_M, PG_RW, PG_V;
3511 struct spglist free;
3515 PG_G = pmap_global_bit(pmap);
3516 PG_A = pmap_accessed_bit(pmap);
3517 PG_M = pmap_modified_bit(pmap);
3518 PG_RW = pmap_rw_bit(pmap);
3519 PG_V = pmap_valid_bit(pmap);
3520 PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
3522 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3524 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
3525 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
3526 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
3528 KASSERT((oldpde & PG_W) == 0,
3529 ("pmap_demote_pde: page table page for a wired mapping"
3533 * Invalidate the 2MB page mapping and return "failure" if the
3534 * mapping was never accessed or the allocation of the new
3535 * page table page fails. If the 2MB page mapping belongs to
3536 * the direct map region of the kernel's address space, then
3537 * the page allocation request specifies the highest possible
3538 * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is
3539 * normal. Page table pages are preallocated for every other
3540 * part of the kernel address space, so the direct map region
3541 * is the only part of the kernel address space that must be
3544 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
3545 pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
3546 DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
3547 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
3549 sva = trunc_2mpage(va);
3550 pmap_remove_pde(pmap, pde, sva, &free, lockp);
3551 if ((oldpde & PG_G) == 0)
3552 pmap_invalidate_pde_page(pmap, sva, oldpde);
3553 pmap_free_zero_pages(&free);
3554 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
3555 " in pmap %p", va, pmap);
3558 if (va < VM_MAXUSER_ADDRESS)
3559 pmap_resident_count_inc(pmap, 1);
3561 mptepa = VM_PAGE_TO_PHYS(mpte);
3562 firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
3563 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
3564 KASSERT((oldpde & PG_A) != 0,
3565 ("pmap_demote_pde: oldpde is missing PG_A"));
3566 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
3567 ("pmap_demote_pde: oldpde is missing PG_M"));
3568 newpte = oldpde & ~PG_PS;
3569 newpte = pmap_swap_pat(pmap, newpte);
3572 * If the page table page is new, initialize it.
3574 if (mpte->wire_count == 1) {
3575 mpte->wire_count = NPTEPG;
3576 pmap_fill_ptp(firstpte, newpte);
3578 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
3579 ("pmap_demote_pde: firstpte and newpte map different physical"
3583 * If the mapping has changed attributes, update the page table
3586 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
3587 pmap_fill_ptp(firstpte, newpte);
3590 * The spare PV entries must be reserved prior to demoting the
3591 * mapping, that is, prior to changing the PDE. Otherwise, the state
3592 * of the PDE and the PV lists will be inconsistent, which can result
3593 * in reclaim_pv_chunk() attempting to remove a PV entry from the
3594 * wrong PV list and pmap_pv_demote_pde() failing to find the expected
3595 * PV entry for the 2MB page mapping that is being demoted.
3597 if ((oldpde & PG_MANAGED) != 0)
3598 reserve_pv_entries(pmap, NPTEPG - 1, lockp);
3601 * Demote the mapping. This pmap is locked. The old PDE has
3602 * PG_A set. If the old PDE has PG_RW set, it also has PG_M
3603 * set. Thus, there is no danger of a race with another
3604 * processor changing the setting of PG_A and/or PG_M between
3605 * the read above and the store below.
3607 if (workaround_erratum383)
3608 pmap_update_pde(pmap, va, pde, newpde);
3610 pde_store(pde, newpde);
3613 * Invalidate a stale recursive mapping of the page table page.
3615 if (va >= VM_MAXUSER_ADDRESS)
3616 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
3619 * Demote the PV entry.
3621 if ((oldpde & PG_MANAGED) != 0)
3622 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, lockp);
3624 atomic_add_long(&pmap_pde_demotions, 1);
3625 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx"
3626 " in pmap %p", va, pmap);
3631 * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
3634 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
3640 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
3641 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3642 mpte = pmap_remove_pt_page(pmap, va);
3644 panic("pmap_remove_kernel_pde: Missing pt page.");
3646 mptepa = VM_PAGE_TO_PHYS(mpte);
3647 newpde = mptepa | X86_PG_M | X86_PG_A | X86_PG_RW | X86_PG_V;
3650 * Initialize the page table page.
3652 pagezero((void *)PHYS_TO_DMAP(mptepa));
3655 * Demote the mapping.
3657 if (workaround_erratum383)
3658 pmap_update_pde(pmap, va, pde, newpde);
3660 pde_store(pde, newpde);
3663 * Invalidate a stale recursive mapping of the page table page.
3665 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
3669 * pmap_remove_pde: do the things to unmap a superpage in a process
3672 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
3673 struct spglist *free, struct rwlock **lockp)
3675 struct md_page *pvh;
3677 vm_offset_t eva, va;
3679 pt_entry_t PG_G, PG_A, PG_M, PG_RW;
3681 PG_G = pmap_global_bit(pmap);
3682 PG_A = pmap_accessed_bit(pmap);
3683 PG_M = pmap_modified_bit(pmap);
3684 PG_RW = pmap_rw_bit(pmap);
3686 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3687 KASSERT((sva & PDRMASK) == 0,
3688 ("pmap_remove_pde: sva is not 2mpage aligned"));
3689 oldpde = pte_load_clear(pdq);
3691 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
3692 if ((oldpde & PG_G) != 0)
3693 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
3694 pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
3695 if (oldpde & PG_MANAGED) {
3696 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
3697 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
3698 pmap_pvh_free(pvh, pmap, sva);
3700 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
3701 va < eva; va += PAGE_SIZE, m++) {
3702 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
3705 vm_page_aflag_set(m, PGA_REFERENCED);
3706 if (TAILQ_EMPTY(&m->md.pv_list) &&
3707 TAILQ_EMPTY(&pvh->pv_list))
3708 vm_page_aflag_clear(m, PGA_WRITEABLE);
3709 pmap_delayed_invl_page(m);
3712 if (pmap == kernel_pmap) {
3713 pmap_remove_kernel_pde(pmap, pdq, sva);
3715 mpte = pmap_remove_pt_page(pmap, sva);
3717 pmap_resident_count_dec(pmap, 1);
3718 KASSERT(mpte->wire_count == NPTEPG,
3719 ("pmap_remove_pde: pte page wire count error"));
3720 mpte->wire_count = 0;
3721 pmap_add_delayed_free_list(mpte, free, FALSE);
3724 return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
3728 * pmap_remove_pte: do the things to unmap a page in a process
3731 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
3732 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
3734 struct md_page *pvh;
3735 pt_entry_t oldpte, PG_A, PG_M, PG_RW;
3738 PG_A = pmap_accessed_bit(pmap);
3739 PG_M = pmap_modified_bit(pmap);
3740 PG_RW = pmap_rw_bit(pmap);
3742 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3743 oldpte = pte_load_clear(ptq);
3745 pmap->pm_stats.wired_count -= 1;
3746 pmap_resident_count_dec(pmap, 1);
3747 if (oldpte & PG_MANAGED) {
3748 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
3749 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3752 vm_page_aflag_set(m, PGA_REFERENCED);
3753 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
3754 pmap_pvh_free(&m->md, pmap, va);
3755 if (TAILQ_EMPTY(&m->md.pv_list) &&
3756 (m->flags & PG_FICTITIOUS) == 0) {
3757 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3758 if (TAILQ_EMPTY(&pvh->pv_list))
3759 vm_page_aflag_clear(m, PGA_WRITEABLE);
3761 pmap_delayed_invl_page(m);
3763 return (pmap_unuse_pt(pmap, va, ptepde, free));
3767 * Remove a single page from a process address space
3770 pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
3771 struct spglist *free)
3773 struct rwlock *lock;
3774 pt_entry_t *pte, PG_V;
3776 PG_V = pmap_valid_bit(pmap);
3777 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3778 if ((*pde & PG_V) == 0)
3780 pte = pmap_pde_to_pte(pde, va);
3781 if ((*pte & PG_V) == 0)
3784 pmap_remove_pte(pmap, pte, va, *pde, free, &lock);
3787 pmap_invalidate_page(pmap, va);
3791 * Removes the specified range of addresses from the page table page.
3794 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
3795 pd_entry_t *pde, struct spglist *free, struct rwlock **lockp)
3797 pt_entry_t PG_G, *pte;
3801 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3802 PG_G = pmap_global_bit(pmap);
3805 for (pte = pmap_pde_to_pte(pde, sva); sva != eva; pte++,
3809 pmap_invalidate_range(pmap, va, sva);
3814 if ((*pte & PG_G) == 0)
3818 if (pmap_remove_pte(pmap, pte, sva, *pde, free, lockp)) {
3824 pmap_invalidate_range(pmap, va, sva);
3829 * Remove the given range of addresses from the specified map.
3831 * It is assumed that the start and end are properly
3832 * rounded to the page size.
3835 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3837 struct rwlock *lock;
3838 vm_offset_t va_next;
3839 pml4_entry_t *pml4e;
3841 pd_entry_t ptpaddr, *pde;
3842 pt_entry_t PG_G, PG_V;
3843 struct spglist free;
3846 PG_G = pmap_global_bit(pmap);
3847 PG_V = pmap_valid_bit(pmap);
3850 * Perform an unsynchronized read. This is, however, safe.
3852 if (pmap->pm_stats.resident_count == 0)
3858 pmap_delayed_invl_started();
3862 * special handling of removing one page. a very
3863 * common operation and easy to short circuit some
3866 if (sva + PAGE_SIZE == eva) {
3867 pde = pmap_pde(pmap, sva);
3868 if (pde && (*pde & PG_PS) == 0) {
3869 pmap_remove_page(pmap, sva, pde, &free);
3875 for (; sva < eva; sva = va_next) {
3877 if (pmap->pm_stats.resident_count == 0)
3880 pml4e = pmap_pml4e(pmap, sva);
3881 if ((*pml4e & PG_V) == 0) {
3882 va_next = (sva + NBPML4) & ~PML4MASK;
3888 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
3889 if ((*pdpe & PG_V) == 0) {
3890 va_next = (sva + NBPDP) & ~PDPMASK;
3897 * Calculate index for next page table.
3899 va_next = (sva + NBPDR) & ~PDRMASK;
3903 pde = pmap_pdpe_to_pde(pdpe, sva);
3907 * Weed out invalid mappings.
3913 * Check for large page.
3915 if ((ptpaddr & PG_PS) != 0) {
3917 * Are we removing the entire large page? If not,
3918 * demote the mapping and fall through.
3920 if (sva + NBPDR == va_next && eva >= va_next) {
3922 * The TLB entry for a PG_G mapping is
3923 * invalidated by pmap_remove_pde().
3925 if ((ptpaddr & PG_G) == 0)
3927 pmap_remove_pde(pmap, pde, sva, &free, &lock);
3929 } else if (!pmap_demote_pde_locked(pmap, pde, sva,
3931 /* The large page mapping was destroyed. */
3938 * Limit our scan to either the end of the va represented
3939 * by the current page table page, or to the end of the
3940 * range being removed.
3945 if (pmap_remove_ptes(pmap, sva, va_next, pde, &free, &lock))
3952 pmap_invalidate_all(pmap);
3954 pmap_delayed_invl_finished();
3955 pmap_free_zero_pages(&free);
3959 * Routine: pmap_remove_all
3961 * Removes this physical page from
3962 * all physical maps in which it resides.
3963 * Reflects back modify bits to the pager.
3966 * Original versions of this routine were very
3967 * inefficient because they iteratively called
3968 * pmap_remove (slow...)
3972 pmap_remove_all(vm_page_t m)
3974 struct md_page *pvh;
3977 struct rwlock *lock;
3978 pt_entry_t *pte, tpte, PG_A, PG_M, PG_RW;
3981 struct spglist free;
3982 int pvh_gen, md_gen;
3984 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3985 ("pmap_remove_all: page %p is not managed", m));
3987 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3988 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
3989 pa_to_pvh(VM_PAGE_TO_PHYS(m));
3992 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
3994 if (!PMAP_TRYLOCK(pmap)) {
3995 pvh_gen = pvh->pv_gen;
3999 if (pvh_gen != pvh->pv_gen) {
4006 pde = pmap_pde(pmap, va);
4007 (void)pmap_demote_pde_locked(pmap, pde, va, &lock);
4010 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
4012 if (!PMAP_TRYLOCK(pmap)) {
4013 pvh_gen = pvh->pv_gen;
4014 md_gen = m->md.pv_gen;
4018 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4024 PG_A = pmap_accessed_bit(pmap);
4025 PG_M = pmap_modified_bit(pmap);
4026 PG_RW = pmap_rw_bit(pmap);
4027 pmap_resident_count_dec(pmap, 1);
4028 pde = pmap_pde(pmap, pv->pv_va);
4029 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
4030 " a 2mpage in page %p's pv list", m));
4031 pte = pmap_pde_to_pte(pde, pv->pv_va);
4032 tpte = pte_load_clear(pte);
4034 pmap->pm_stats.wired_count--;
4036 vm_page_aflag_set(m, PGA_REFERENCED);
4039 * Update the vm_page_t clean and reference bits.
4041 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
4043 pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
4044 pmap_invalidate_page(pmap, pv->pv_va);
4045 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4047 free_pv_entry(pmap, pv);
4050 vm_page_aflag_clear(m, PGA_WRITEABLE);
4052 pmap_delayed_invl_wait(m);
4053 pmap_free_zero_pages(&free);
4057 * pmap_protect_pde: do the things to protect a 2mpage in a process
4060 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
4062 pd_entry_t newpde, oldpde;
4063 vm_offset_t eva, va;
4065 boolean_t anychanged;
4066 pt_entry_t PG_G, PG_M, PG_RW;
4068 PG_G = pmap_global_bit(pmap);
4069 PG_M = pmap_modified_bit(pmap);
4070 PG_RW = pmap_rw_bit(pmap);
4072 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4073 KASSERT((sva & PDRMASK) == 0,
4074 ("pmap_protect_pde: sva is not 2mpage aligned"));
4077 oldpde = newpde = *pde;
4078 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
4079 (PG_MANAGED | PG_M | PG_RW)) {
4081 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
4082 va < eva; va += PAGE_SIZE, m++)
4085 if ((prot & VM_PROT_WRITE) == 0)
4086 newpde &= ~(PG_RW | PG_M);
4087 if ((prot & VM_PROT_EXECUTE) == 0)
4089 if (newpde != oldpde) {
4091 * As an optimization to future operations on this PDE, clear
4092 * PG_PROMOTED. The impending invalidation will remove any
4093 * lingering 4KB page mappings from the TLB.
4095 if (!atomic_cmpset_long(pde, oldpde, newpde & ~PG_PROMOTED))
4097 if ((oldpde & PG_G) != 0)
4098 pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
4102 return (anychanged);
4106 * Set the physical protection on the
4107 * specified range of this map as requested.
4110 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
4112 vm_offset_t va_next;
4113 pml4_entry_t *pml4e;
4115 pd_entry_t ptpaddr, *pde;
4116 pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V;
4117 boolean_t anychanged;
4119 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4120 if (prot == VM_PROT_NONE) {
4121 pmap_remove(pmap, sva, eva);
4125 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
4126 (VM_PROT_WRITE|VM_PROT_EXECUTE))
4129 PG_G = pmap_global_bit(pmap);
4130 PG_M = pmap_modified_bit(pmap);
4131 PG_V = pmap_valid_bit(pmap);
4132 PG_RW = pmap_rw_bit(pmap);
4136 * Although this function delays and batches the invalidation
4137 * of stale TLB entries, it does not need to call
4138 * pmap_delayed_invl_started() and
4139 * pmap_delayed_invl_finished(), because it does not
4140 * ordinarily destroy mappings. Stale TLB entries from
4141 * protection-only changes need only be invalidated before the
4142 * pmap lock is released, because protection-only changes do
4143 * not destroy PV entries. Even operations that iterate over
4144 * a physical page's PV list of mappings, like
4145 * pmap_remove_write(), acquire the pmap lock for each
4146 * mapping. Consequently, for protection-only changes, the
4147 * pmap lock suffices to synchronize both page table and TLB
4150 * This function only destroys a mapping if pmap_demote_pde()
4151 * fails. In that case, stale TLB entries are immediately
4156 for (; sva < eva; sva = va_next) {
4158 pml4e = pmap_pml4e(pmap, sva);
4159 if ((*pml4e & PG_V) == 0) {
4160 va_next = (sva + NBPML4) & ~PML4MASK;
4166 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
4167 if ((*pdpe & PG_V) == 0) {
4168 va_next = (sva + NBPDP) & ~PDPMASK;
4174 va_next = (sva + NBPDR) & ~PDRMASK;
4178 pde = pmap_pdpe_to_pde(pdpe, sva);
4182 * Weed out invalid mappings.
4188 * Check for large page.
4190 if ((ptpaddr & PG_PS) != 0) {
4192 * Are we protecting the entire large page? If not,
4193 * demote the mapping and fall through.
4195 if (sva + NBPDR == va_next && eva >= va_next) {
4197 * The TLB entry for a PG_G mapping is
4198 * invalidated by pmap_protect_pde().
4200 if (pmap_protect_pde(pmap, pde, sva, prot))
4203 } else if (!pmap_demote_pde(pmap, pde, sva)) {
4205 * The large page mapping was destroyed.
4214 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
4216 pt_entry_t obits, pbits;
4220 obits = pbits = *pte;
4221 if ((pbits & PG_V) == 0)
4224 if ((prot & VM_PROT_WRITE) == 0) {
4225 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
4226 (PG_MANAGED | PG_M | PG_RW)) {
4227 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
4230 pbits &= ~(PG_RW | PG_M);
4232 if ((prot & VM_PROT_EXECUTE) == 0)
4235 if (pbits != obits) {
4236 if (!atomic_cmpset_long(pte, obits, pbits))
4239 pmap_invalidate_page(pmap, sva);
4246 pmap_invalidate_all(pmap);
4251 * Tries to promote the 512, contiguous 4KB page mappings that are within a
4252 * single page table page (PTP) to a single 2MB page mapping. For promotion
4253 * to occur, two conditions must be met: (1) the 4KB page mappings must map
4254 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
4255 * identical characteristics.
4258 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
4259 struct rwlock **lockp)
4262 pt_entry_t *firstpte, oldpte, pa, *pte;
4263 pt_entry_t PG_G, PG_A, PG_M, PG_RW, PG_V;
4267 PG_A = pmap_accessed_bit(pmap);
4268 PG_G = pmap_global_bit(pmap);
4269 PG_M = pmap_modified_bit(pmap);
4270 PG_V = pmap_valid_bit(pmap);
4271 PG_RW = pmap_rw_bit(pmap);
4272 PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
4274 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4277 * Examine the first PTE in the specified PTP. Abort if this PTE is
4278 * either invalid, unused, or does not map the first 4KB physical page
4279 * within a 2MB page.
4281 firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
4284 if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
4285 atomic_add_long(&pmap_pde_p_failures, 1);
4286 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
4287 " in pmap %p", va, pmap);
4290 if ((newpde & (PG_M | PG_RW)) == PG_RW) {
4292 * When PG_M is already clear, PG_RW can be cleared without
4293 * a TLB invalidation.
4295 if (!atomic_cmpset_long(firstpte, newpde, newpde & ~PG_RW))
4301 * Examine each of the other PTEs in the specified PTP. Abort if this
4302 * PTE maps an unexpected 4KB physical page or does not have identical
4303 * characteristics to the first PTE.
4305 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
4306 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
4309 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
4310 atomic_add_long(&pmap_pde_p_failures, 1);
4311 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
4312 " in pmap %p", va, pmap);
4315 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
4317 * When PG_M is already clear, PG_RW can be cleared
4318 * without a TLB invalidation.
4320 if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW))
4323 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
4324 " in pmap %p", (oldpte & PG_FRAME & PDRMASK) |
4325 (va & ~PDRMASK), pmap);
4327 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
4328 atomic_add_long(&pmap_pde_p_failures, 1);
4329 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
4330 " in pmap %p", va, pmap);
4337 * Save the page table page in its current state until the PDE
4338 * mapping the superpage is demoted by pmap_demote_pde() or
4339 * destroyed by pmap_remove_pde().
4341 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
4342 KASSERT(mpte >= vm_page_array &&
4343 mpte < &vm_page_array[vm_page_array_size],
4344 ("pmap_promote_pde: page table page is out of range"));
4345 KASSERT(mpte->pindex == pmap_pde_pindex(va),
4346 ("pmap_promote_pde: page table page's pindex is wrong"));
4347 if (pmap_insert_pt_page(pmap, mpte)) {
4348 atomic_add_long(&pmap_pde_p_failures, 1);
4350 "pmap_promote_pde: failure for va %#lx in pmap %p", va,
4356 * Promote the pv entries.
4358 if ((newpde & PG_MANAGED) != 0)
4359 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME, lockp);
4362 * Propagate the PAT index to its proper position.
4364 newpde = pmap_swap_pat(pmap, newpde);
4367 * Map the superpage.
4369 if (workaround_erratum383)
4370 pmap_update_pde(pmap, va, pde, PG_PS | newpde);
4372 pde_store(pde, PG_PROMOTED | PG_PS | newpde);
4374 atomic_add_long(&pmap_pde_promotions, 1);
4375 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
4376 " in pmap %p", va, pmap);
4380 * Insert the given physical page (p) at
4381 * the specified virtual address (v) in the
4382 * target physical map with the protection requested.
4384 * If specified, the page will be wired down, meaning
4385 * that the related pte can not be reclaimed.
4387 * NB: This is the only routine which MAY NOT lazy-evaluate
4388 * or lose information. That is, this routine must actually
4389 * insert this page into the given map NOW.
4391 * When destroying both a page table and PV entry, this function
4392 * performs the TLB invalidation before releasing the PV list
4393 * lock, so we do not need pmap_delayed_invl_page() calls here.
4396 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
4397 u_int flags, int8_t psind)
4399 struct rwlock *lock;
4401 pt_entry_t *pte, PG_G, PG_A, PG_M, PG_RW, PG_V;
4402 pt_entry_t newpte, origpte;
4409 PG_A = pmap_accessed_bit(pmap);
4410 PG_G = pmap_global_bit(pmap);
4411 PG_M = pmap_modified_bit(pmap);
4412 PG_V = pmap_valid_bit(pmap);
4413 PG_RW = pmap_rw_bit(pmap);
4415 va = trunc_page(va);
4416 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
4417 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
4418 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
4420 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
4421 va >= kmi.clean_eva,
4422 ("pmap_enter: managed mapping within the clean submap"));
4423 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
4424 VM_OBJECT_ASSERT_LOCKED(m->object);
4425 KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
4426 ("pmap_enter: flags %u has reserved bits set", flags));
4427 pa = VM_PAGE_TO_PHYS(m);
4428 newpte = (pt_entry_t)(pa | PG_A | PG_V);
4429 if ((flags & VM_PROT_WRITE) != 0)
4431 if ((prot & VM_PROT_WRITE) != 0)
4433 KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
4434 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
4435 if ((prot & VM_PROT_EXECUTE) == 0)
4437 if ((flags & PMAP_ENTER_WIRED) != 0)
4439 if (va < VM_MAXUSER_ADDRESS)
4441 if (pmap == kernel_pmap)
4443 newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0);
4446 * Set modified bit gratuitously for writeable mappings if
4447 * the page is unmanaged. We do not want to take a fault
4448 * to do the dirty bit accounting for these mappings.
4450 if ((m->oflags & VPO_UNMANAGED) != 0) {
4451 if ((newpte & PG_RW) != 0)
4454 newpte |= PG_MANAGED;
4459 /* Assert the required virtual and physical alignment. */
4460 KASSERT((va & PDRMASK) == 0, ("pmap_enter: va unaligned"));
4461 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
4462 rv = pmap_enter_pde(pmap, va, newpte | PG_PS, flags, m, &lock);
4468 * In the case that a page table page is not
4469 * resident, we are creating it here.
4472 pde = pmap_pde(pmap, va);
4473 if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 ||
4474 pmap_demote_pde_locked(pmap, pde, va, &lock))) {
4475 pte = pmap_pde_to_pte(pde, va);
4476 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
4477 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
4480 } else if (va < VM_MAXUSER_ADDRESS) {
4482 * Here if the pte page isn't mapped, or if it has been
4485 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
4486 mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va),
4487 nosleep ? NULL : &lock);
4488 if (mpte == NULL && nosleep) {
4489 rv = KERN_RESOURCE_SHORTAGE;
4494 panic("pmap_enter: invalid page directory va=%#lx", va);
4499 * Is the specified virtual address already mapped?
4501 if ((origpte & PG_V) != 0) {
4503 * Wiring change, just update stats. We don't worry about
4504 * wiring PT pages as they remain resident as long as there
4505 * are valid mappings in them. Hence, if a user page is wired,
4506 * the PT page will be also.
4508 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
4509 pmap->pm_stats.wired_count++;
4510 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
4511 pmap->pm_stats.wired_count--;
4514 * Remove the extra PT page reference.
4518 KASSERT(mpte->wire_count > 0,
4519 ("pmap_enter: missing reference to page table page,"
4524 * Has the physical page changed?
4526 opa = origpte & PG_FRAME;
4529 * No, might be a protection or wiring change.
4531 if ((origpte & PG_MANAGED) != 0 &&
4532 (newpte & PG_RW) != 0)
4533 vm_page_aflag_set(m, PGA_WRITEABLE);
4534 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
4540 * Increment the counters.
4542 if ((newpte & PG_W) != 0)
4543 pmap->pm_stats.wired_count++;
4544 pmap_resident_count_inc(pmap, 1);
4548 * Enter on the PV list if part of our managed memory.
4550 if ((newpte & PG_MANAGED) != 0) {
4551 pv = get_pv_entry(pmap, &lock);
4553 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
4554 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4556 if ((newpte & PG_RW) != 0)
4557 vm_page_aflag_set(m, PGA_WRITEABLE);
4563 if ((origpte & PG_V) != 0) {
4565 origpte = pte_load_store(pte, newpte);
4566 opa = origpte & PG_FRAME;
4568 if ((origpte & PG_MANAGED) != 0) {
4569 om = PHYS_TO_VM_PAGE(opa);
4570 if ((origpte & (PG_M | PG_RW)) == (PG_M |
4573 if ((origpte & PG_A) != 0)
4574 vm_page_aflag_set(om, PGA_REFERENCED);
4575 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
4576 pmap_pvh_free(&om->md, pmap, va);
4577 if ((om->aflags & PGA_WRITEABLE) != 0 &&
4578 TAILQ_EMPTY(&om->md.pv_list) &&
4579 ((om->flags & PG_FICTITIOUS) != 0 ||
4580 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
4581 vm_page_aflag_clear(om, PGA_WRITEABLE);
4583 } else if ((newpte & PG_M) == 0 && (origpte & (PG_M |
4584 PG_RW)) == (PG_M | PG_RW)) {
4585 if ((origpte & PG_MANAGED) != 0)
4589 * Although the PTE may still have PG_RW set, TLB
4590 * invalidation may nonetheless be required because
4591 * the PTE no longer has PG_M set.
4593 } else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
4595 * This PTE change does not require TLB invalidation.
4599 if ((origpte & PG_A) != 0)
4600 pmap_invalidate_page(pmap, va);
4602 pte_store(pte, newpte);
4607 * If both the page table page and the reservation are fully
4608 * populated, then attempt promotion.
4610 if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
4611 pmap_ps_enabled(pmap) &&
4612 (m->flags & PG_FICTITIOUS) == 0 &&
4613 vm_reserv_level_iffullpop(m) == 0)
4614 pmap_promote_pde(pmap, pde, va, &lock);
4625 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
4626 * if successful. Returns false if (1) a page table page cannot be allocated
4627 * without sleeping, (2) a mapping already exists at the specified virtual
4628 * address, or (3) a PV entry cannot be allocated without reclaiming another
4632 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
4633 struct rwlock **lockp)
4638 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4639 PG_V = pmap_valid_bit(pmap);
4640 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) |
4642 if ((m->oflags & VPO_UNMANAGED) == 0)
4643 newpde |= PG_MANAGED;
4644 if ((prot & VM_PROT_EXECUTE) == 0)
4646 if (va < VM_MAXUSER_ADDRESS)
4648 return (pmap_enter_pde(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
4649 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
4654 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
4655 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
4656 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
4657 * a mapping already exists at the specified virtual address. Returns
4658 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
4659 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
4660 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
4662 * The parameter "m" is only used when creating a managed, writeable mapping.
4665 pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
4666 vm_page_t m, struct rwlock **lockp)
4668 struct spglist free;
4669 pd_entry_t oldpde, *pde;
4670 pt_entry_t PG_G, PG_RW, PG_V;
4673 PG_G = pmap_global_bit(pmap);
4674 PG_RW = pmap_rw_bit(pmap);
4675 KASSERT((newpde & (pmap_modified_bit(pmap) | PG_RW)) != PG_RW,
4676 ("pmap_enter_pde: newpde is missing PG_M"));
4677 PG_V = pmap_valid_bit(pmap);
4678 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4680 if ((pdpg = pmap_allocpde(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
4681 NULL : lockp)) == NULL) {
4682 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
4683 " in pmap %p", va, pmap);
4684 return (KERN_RESOURCE_SHORTAGE);
4686 pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
4687 pde = &pde[pmap_pde_index(va)];
4689 if ((oldpde & PG_V) != 0) {
4690 KASSERT(pdpg->wire_count > 1,
4691 ("pmap_enter_pde: pdpg's wire count is too low"));
4692 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
4694 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
4695 " in pmap %p", va, pmap);
4696 return (KERN_FAILURE);
4698 /* Break the existing mapping(s). */
4700 if ((oldpde & PG_PS) != 0) {
4702 * The reference to the PD page that was acquired by
4703 * pmap_allocpde() ensures that it won't be freed.
4704 * However, if the PDE resulted from a promotion, then
4705 * a reserved PT page could be freed.
4707 (void)pmap_remove_pde(pmap, pde, va, &free, lockp);
4708 if ((oldpde & PG_G) == 0)
4709 pmap_invalidate_pde_page(pmap, va, oldpde);
4711 pmap_delayed_invl_started();
4712 if (pmap_remove_ptes(pmap, va, va + NBPDR, pde, &free,
4714 pmap_invalidate_all(pmap);
4715 pmap_delayed_invl_finished();
4717 pmap_free_zero_pages(&free);
4718 if (va >= VM_MAXUSER_ADDRESS) {
4719 mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
4720 if (pmap_insert_pt_page(pmap, mt)) {
4722 * XXX Currently, this can't happen because
4723 * we do not perform pmap_enter(psind == 1)
4724 * on the kernel pmap.
4726 panic("pmap_enter_pde: trie insert failed");
4729 KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p",
4732 if ((newpde & PG_MANAGED) != 0) {
4734 * Abort this mapping if its PV entry could not be created.
4736 if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) {
4738 if (pmap_unwire_ptp(pmap, va, pdpg, &free)) {
4740 * Although "va" is not mapped, paging-
4741 * structure caches could nonetheless have
4742 * entries that refer to the freed page table
4743 * pages. Invalidate those entries.
4745 pmap_invalidate_page(pmap, va);
4746 pmap_free_zero_pages(&free);
4748 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
4749 " in pmap %p", va, pmap);
4750 return (KERN_RESOURCE_SHORTAGE);
4752 if ((newpde & PG_RW) != 0) {
4753 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
4754 vm_page_aflag_set(mt, PGA_WRITEABLE);
4759 * Increment counters.
4761 if ((newpde & PG_W) != 0)
4762 pmap->pm_stats.wired_count += NBPDR / PAGE_SIZE;
4763 pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
4766 * Map the superpage. (This is not a promoted mapping; there will not
4767 * be any lingering 4KB page mappings in the TLB.)
4769 pde_store(pde, newpde);
4771 atomic_add_long(&pmap_pde_mappings, 1);
4772 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
4773 " in pmap %p", va, pmap);
4774 return (KERN_SUCCESS);
4778 * Maps a sequence of resident pages belonging to the same object.
4779 * The sequence begins with the given page m_start. This page is
4780 * mapped at the given virtual address start. Each subsequent page is
4781 * mapped at a virtual address that is offset from start by the same
4782 * amount as the page is offset from m_start within the object. The
4783 * last page in the sequence is the page with the largest offset from
4784 * m_start that can be mapped at a virtual address less than the given
4785 * virtual address end. Not every virtual page between start and end
4786 * is mapped; only those for which a resident page exists with the
4787 * corresponding offset from m_start are mapped.
4790 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
4791 vm_page_t m_start, vm_prot_t prot)
4793 struct rwlock *lock;
4796 vm_pindex_t diff, psize;
4798 VM_OBJECT_ASSERT_LOCKED(m_start->object);
4800 psize = atop(end - start);
4805 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
4806 va = start + ptoa(diff);
4807 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
4808 m->psind == 1 && pmap_ps_enabled(pmap) &&
4809 pmap_enter_2mpage(pmap, va, m, prot, &lock))
4810 m = &m[NBPDR / PAGE_SIZE - 1];
4812 mpte = pmap_enter_quick_locked(pmap, va, m, prot,
4814 m = TAILQ_NEXT(m, listq);
4822 * this code makes some *MAJOR* assumptions:
4823 * 1. Current pmap & pmap exists.
4826 * 4. No page table pages.
4827 * but is *MUCH* faster than pmap_enter...
4831 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
4833 struct rwlock *lock;
4837 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
4844 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
4845 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
4847 struct spglist free;
4848 pt_entry_t *pte, PG_V;
4851 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
4852 (m->oflags & VPO_UNMANAGED) != 0,
4853 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
4854 PG_V = pmap_valid_bit(pmap);
4855 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4858 * In the case that a page table page is not
4859 * resident, we are creating it here.
4861 if (va < VM_MAXUSER_ADDRESS) {
4862 vm_pindex_t ptepindex;
4866 * Calculate pagetable page index
4868 ptepindex = pmap_pde_pindex(va);
4869 if (mpte && (mpte->pindex == ptepindex)) {
4873 * Get the page directory entry
4875 ptepa = pmap_pde(pmap, va);
4878 * If the page table page is mapped, we just increment
4879 * the hold count, and activate it. Otherwise, we
4880 * attempt to allocate a page table page. If this
4881 * attempt fails, we don't retry. Instead, we give up.
4883 if (ptepa && (*ptepa & PG_V) != 0) {
4886 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
4890 * Pass NULL instead of the PV list lock
4891 * pointer, because we don't intend to sleep.
4893 mpte = _pmap_allocpte(pmap, ptepindex, NULL);
4898 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
4899 pte = &pte[pmap_pte_index(va)];
4913 * Enter on the PV list if part of our managed memory.
4915 if ((m->oflags & VPO_UNMANAGED) == 0 &&
4916 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
4919 if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
4921 * Although "va" is not mapped, paging-
4922 * structure caches could nonetheless have
4923 * entries that refer to the freed page table
4924 * pages. Invalidate those entries.
4926 pmap_invalidate_page(pmap, va);
4927 pmap_free_zero_pages(&free);
4935 * Increment counters
4937 pmap_resident_count_inc(pmap, 1);
4939 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 0);
4940 if ((prot & VM_PROT_EXECUTE) == 0)
4944 * Now validate mapping with RO protection
4946 if ((m->oflags & VPO_UNMANAGED) != 0)
4947 pte_store(pte, pa | PG_V | PG_U);
4949 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
4954 * Make a temporary mapping for a physical address. This is only intended
4955 * to be used for panic dumps.
4958 pmap_kenter_temporary(vm_paddr_t pa, int i)
4962 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
4963 pmap_kenter(va, pa);
4965 return ((void *)crashdumpmap);
4969 * This code maps large physical mmap regions into the
4970 * processor address space. Note that some shortcuts
4971 * are taken, but the code works.
4974 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
4975 vm_pindex_t pindex, vm_size_t size)
4978 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
4979 vm_paddr_t pa, ptepa;
4983 PG_A = pmap_accessed_bit(pmap);
4984 PG_M = pmap_modified_bit(pmap);
4985 PG_V = pmap_valid_bit(pmap);
4986 PG_RW = pmap_rw_bit(pmap);
4988 VM_OBJECT_ASSERT_WLOCKED(object);
4989 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
4990 ("pmap_object_init_pt: non-device object"));
4991 if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
4992 if (!pmap_ps_enabled(pmap))
4994 if (!vm_object_populate(object, pindex, pindex + atop(size)))
4996 p = vm_page_lookup(object, pindex);
4997 KASSERT(p->valid == VM_PAGE_BITS_ALL,
4998 ("pmap_object_init_pt: invalid page %p", p));
4999 pat_mode = p->md.pat_mode;
5002 * Abort the mapping if the first page is not physically
5003 * aligned to a 2MB page boundary.
5005 ptepa = VM_PAGE_TO_PHYS(p);
5006 if (ptepa & (NBPDR - 1))
5010 * Skip the first page. Abort the mapping if the rest of
5011 * the pages are not physically contiguous or have differing
5012 * memory attributes.
5014 p = TAILQ_NEXT(p, listq);
5015 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
5017 KASSERT(p->valid == VM_PAGE_BITS_ALL,
5018 ("pmap_object_init_pt: invalid page %p", p));
5019 if (pa != VM_PAGE_TO_PHYS(p) ||
5020 pat_mode != p->md.pat_mode)
5022 p = TAILQ_NEXT(p, listq);
5026 * Map using 2MB pages. Since "ptepa" is 2M aligned and
5027 * "size" is a multiple of 2M, adding the PAT setting to "pa"
5028 * will not affect the termination of this loop.
5031 for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1);
5032 pa < ptepa + size; pa += NBPDR) {
5033 pdpg = pmap_allocpde(pmap, addr, NULL);
5036 * The creation of mappings below is only an
5037 * optimization. If a page directory page
5038 * cannot be allocated without blocking,
5039 * continue on to the next mapping rather than
5045 pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
5046 pde = &pde[pmap_pde_index(addr)];
5047 if ((*pde & PG_V) == 0) {
5048 pde_store(pde, pa | PG_PS | PG_M | PG_A |
5049 PG_U | PG_RW | PG_V);
5050 pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
5051 atomic_add_long(&pmap_pde_mappings, 1);
5053 /* Continue on if the PDE is already valid. */
5055 KASSERT(pdpg->wire_count > 0,
5056 ("pmap_object_init_pt: missing reference "
5057 "to page directory page, va: 0x%lx", addr));
5066 * Clear the wired attribute from the mappings for the specified range of
5067 * addresses in the given pmap. Every valid mapping within that range
5068 * must have the wired attribute set. In contrast, invalid mappings
5069 * cannot have the wired attribute set, so they are ignored.
5071 * The wired attribute of the page table entry is not a hardware
5072 * feature, so there is no need to invalidate any TLB entries.
5073 * Since pmap_demote_pde() for the wired entry must never fail,
5074 * pmap_delayed_invl_started()/finished() calls around the
5075 * function are not needed.
5078 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5080 vm_offset_t va_next;
5081 pml4_entry_t *pml4e;
5084 pt_entry_t *pte, PG_V;
5086 PG_V = pmap_valid_bit(pmap);
5088 for (; sva < eva; sva = va_next) {
5089 pml4e = pmap_pml4e(pmap, sva);
5090 if ((*pml4e & PG_V) == 0) {
5091 va_next = (sva + NBPML4) & ~PML4MASK;
5096 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
5097 if ((*pdpe & PG_V) == 0) {
5098 va_next = (sva + NBPDP) & ~PDPMASK;
5103 va_next = (sva + NBPDR) & ~PDRMASK;
5106 pde = pmap_pdpe_to_pde(pdpe, sva);
5107 if ((*pde & PG_V) == 0)
5109 if ((*pde & PG_PS) != 0) {
5110 if ((*pde & PG_W) == 0)
5111 panic("pmap_unwire: pde %#jx is missing PG_W",
5115 * Are we unwiring the entire large page? If not,
5116 * demote the mapping and fall through.
5118 if (sva + NBPDR == va_next && eva >= va_next) {
5119 atomic_clear_long(pde, PG_W);
5120 pmap->pm_stats.wired_count -= NBPDR /
5123 } else if (!pmap_demote_pde(pmap, pde, sva))
5124 panic("pmap_unwire: demotion failed");
5128 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
5130 if ((*pte & PG_V) == 0)
5132 if ((*pte & PG_W) == 0)
5133 panic("pmap_unwire: pte %#jx is missing PG_W",
5137 * PG_W must be cleared atomically. Although the pmap
5138 * lock synchronizes access to PG_W, another processor
5139 * could be setting PG_M and/or PG_A concurrently.
5141 atomic_clear_long(pte, PG_W);
5142 pmap->pm_stats.wired_count--;
5149 * Copy the range specified by src_addr/len
5150 * from the source map to the range dst_addr/len
5151 * in the destination map.
5153 * This routine is only advisory and need not do anything.
5157 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
5158 vm_offset_t src_addr)
5160 struct rwlock *lock;
5161 struct spglist free;
5163 vm_offset_t end_addr = src_addr + len;
5164 vm_offset_t va_next;
5165 vm_page_t dst_pdpg, dstmpte, srcmpte;
5166 pt_entry_t PG_A, PG_M, PG_V;
5168 if (dst_addr != src_addr)
5171 if (dst_pmap->pm_type != src_pmap->pm_type)
5175 * EPT page table entries that require emulation of A/D bits are
5176 * sensitive to clearing the PG_A bit (aka EPT_PG_READ). Although
5177 * we clear PG_M (aka EPT_PG_WRITE) concomitantly, the PG_U bit
5178 * (aka EPT_PG_EXECUTE) could still be set. Since some EPT
5179 * implementations flag an EPT misconfiguration for exec-only
5180 * mappings we skip this function entirely for emulated pmaps.
5182 if (pmap_emulate_ad_bits(dst_pmap))
5186 if (dst_pmap < src_pmap) {
5187 PMAP_LOCK(dst_pmap);
5188 PMAP_LOCK(src_pmap);
5190 PMAP_LOCK(src_pmap);
5191 PMAP_LOCK(dst_pmap);
5194 PG_A = pmap_accessed_bit(dst_pmap);
5195 PG_M = pmap_modified_bit(dst_pmap);
5196 PG_V = pmap_valid_bit(dst_pmap);
5198 for (addr = src_addr; addr < end_addr; addr = va_next) {
5199 pt_entry_t *src_pte, *dst_pte;
5200 pml4_entry_t *pml4e;
5202 pd_entry_t srcptepaddr, *pde;
5204 KASSERT(addr < UPT_MIN_ADDRESS,
5205 ("pmap_copy: invalid to pmap_copy page tables"));
5207 pml4e = pmap_pml4e(src_pmap, addr);
5208 if ((*pml4e & PG_V) == 0) {
5209 va_next = (addr + NBPML4) & ~PML4MASK;
5215 pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
5216 if ((*pdpe & PG_V) == 0) {
5217 va_next = (addr + NBPDP) & ~PDPMASK;
5223 va_next = (addr + NBPDR) & ~PDRMASK;
5227 pde = pmap_pdpe_to_pde(pdpe, addr);
5229 if (srcptepaddr == 0)
5232 if (srcptepaddr & PG_PS) {
5233 if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
5235 dst_pdpg = pmap_allocpde(dst_pmap, addr, NULL);
5236 if (dst_pdpg == NULL)
5238 pde = (pd_entry_t *)
5239 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg));
5240 pde = &pde[pmap_pde_index(addr)];
5241 if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
5242 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
5243 PMAP_ENTER_NORECLAIM, &lock))) {
5244 *pde = srcptepaddr & ~PG_W;
5245 pmap_resident_count_inc(dst_pmap, NBPDR / PAGE_SIZE);
5246 atomic_add_long(&pmap_pde_mappings, 1);
5248 dst_pdpg->wire_count--;
5252 srcptepaddr &= PG_FRAME;
5253 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
5254 KASSERT(srcmpte->wire_count > 0,
5255 ("pmap_copy: source page table page is unused"));
5257 if (va_next > end_addr)
5260 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
5261 src_pte = &src_pte[pmap_pte_index(addr)];
5263 while (addr < va_next) {
5267 * we only virtual copy managed pages
5269 if ((ptetemp & PG_MANAGED) != 0) {
5270 if (dstmpte != NULL &&
5271 dstmpte->pindex == pmap_pde_pindex(addr))
5272 dstmpte->wire_count++;
5273 else if ((dstmpte = pmap_allocpte(dst_pmap,
5274 addr, NULL)) == NULL)
5276 dst_pte = (pt_entry_t *)
5277 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
5278 dst_pte = &dst_pte[pmap_pte_index(addr)];
5279 if (*dst_pte == 0 &&
5280 pmap_try_insert_pv_entry(dst_pmap, addr,
5281 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME),
5284 * Clear the wired, modified, and
5285 * accessed (referenced) bits
5288 *dst_pte = ptetemp & ~(PG_W | PG_M |
5290 pmap_resident_count_inc(dst_pmap, 1);
5293 if (pmap_unwire_ptp(dst_pmap, addr,
5296 * Although "addr" is not
5297 * mapped, paging-structure
5298 * caches could nonetheless
5299 * have entries that refer to
5300 * the freed page table pages.
5301 * Invalidate those entries.
5303 pmap_invalidate_page(dst_pmap,
5305 pmap_free_zero_pages(&free);
5309 if (dstmpte->wire_count >= srcmpte->wire_count)
5319 PMAP_UNLOCK(src_pmap);
5320 PMAP_UNLOCK(dst_pmap);
5324 * pmap_zero_page zeros the specified hardware page by mapping
5325 * the page into KVM and using bzero to clear its contents.
5328 pmap_zero_page(vm_page_t m)
5330 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5332 pagezero((void *)va);
5336 * pmap_zero_page_area zeros the specified hardware page by mapping
5337 * the page into KVM and using bzero to clear its contents.
5339 * off and size may not cover an area beyond a single hardware page.
5342 pmap_zero_page_area(vm_page_t m, int off, int size)
5344 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5346 if (off == 0 && size == PAGE_SIZE)
5347 pagezero((void *)va);
5349 bzero((char *)va + off, size);
5353 * pmap_zero_page_idle zeros the specified hardware page by mapping
5354 * the page into KVM and using bzero to clear its contents. This
5355 * is intended to be called from the vm_pagezero process only and
5359 pmap_zero_page_idle(vm_page_t m)
5361 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5363 pagezero((void *)va);
5367 * pmap_copy_page copies the specified (machine independent)
5368 * page by mapping the page into virtual memory and using
5369 * bcopy to copy the page, one machine dependent page at a
5373 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
5375 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
5376 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
5378 pagecopy((void *)src, (void *)dst);
5381 int unmapped_buf_allowed = 1;
5384 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
5385 vm_offset_t b_offset, int xfersize)
5389 vm_offset_t vaddr[2], a_pg_offset, b_pg_offset;
5393 while (xfersize > 0) {
5394 a_pg_offset = a_offset & PAGE_MASK;
5395 pages[0] = ma[a_offset >> PAGE_SHIFT];
5396 b_pg_offset = b_offset & PAGE_MASK;
5397 pages[1] = mb[b_offset >> PAGE_SHIFT];
5398 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
5399 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
5400 mapped = pmap_map_io_transient(pages, vaddr, 2, FALSE);
5401 a_cp = (char *)vaddr[0] + a_pg_offset;
5402 b_cp = (char *)vaddr[1] + b_pg_offset;
5403 bcopy(a_cp, b_cp, cnt);
5404 if (__predict_false(mapped))
5405 pmap_unmap_io_transient(pages, vaddr, 2, FALSE);
5413 * Returns true if the pmap's pv is one of the first
5414 * 16 pvs linked to from this page. This count may
5415 * be changed upwards or downwards in the future; it
5416 * is only necessary that true be returned for a small
5417 * subset of pmaps for proper page aging.
5420 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
5422 struct md_page *pvh;
5423 struct rwlock *lock;
5428 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5429 ("pmap_page_exists_quick: page %p is not managed", m));
5431 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5433 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5434 if (PV_PMAP(pv) == pmap) {
5442 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
5443 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5444 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5445 if (PV_PMAP(pv) == pmap) {
5459 * pmap_page_wired_mappings:
5461 * Return the number of managed mappings to the given physical page
5465 pmap_page_wired_mappings(vm_page_t m)
5467 struct rwlock *lock;
5468 struct md_page *pvh;
5472 int count, md_gen, pvh_gen;
5474 if ((m->oflags & VPO_UNMANAGED) != 0)
5476 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5480 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5482 if (!PMAP_TRYLOCK(pmap)) {
5483 md_gen = m->md.pv_gen;
5487 if (md_gen != m->md.pv_gen) {
5492 pte = pmap_pte(pmap, pv->pv_va);
5493 if ((*pte & PG_W) != 0)
5497 if ((m->flags & PG_FICTITIOUS) == 0) {
5498 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5499 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5501 if (!PMAP_TRYLOCK(pmap)) {
5502 md_gen = m->md.pv_gen;
5503 pvh_gen = pvh->pv_gen;
5507 if (md_gen != m->md.pv_gen ||
5508 pvh_gen != pvh->pv_gen) {
5513 pte = pmap_pde(pmap, pv->pv_va);
5514 if ((*pte & PG_W) != 0)
5524 * Returns TRUE if the given page is mapped individually or as part of
5525 * a 2mpage. Otherwise, returns FALSE.
5528 pmap_page_is_mapped(vm_page_t m)
5530 struct rwlock *lock;
5533 if ((m->oflags & VPO_UNMANAGED) != 0)
5535 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5537 rv = !TAILQ_EMPTY(&m->md.pv_list) ||
5538 ((m->flags & PG_FICTITIOUS) == 0 &&
5539 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
5545 * Destroy all managed, non-wired mappings in the given user-space
5546 * pmap. This pmap cannot be active on any processor besides the
5549 * This function cannot be applied to the kernel pmap. Moreover, it
5550 * is not intended for general use. It is only to be used during
5551 * process termination. Consequently, it can be implemented in ways
5552 * that make it faster than pmap_remove(). First, it can more quickly
5553 * destroy mappings by iterating over the pmap's collection of PV
5554 * entries, rather than searching the page table. Second, it doesn't
5555 * have to test and clear the page table entries atomically, because
5556 * no processor is currently accessing the user address space. In
5557 * particular, a page table entry's dirty bit won't change state once
5558 * this function starts.
5560 * Although this function destroys all of the pmap's managed,
5561 * non-wired mappings, it can delay and batch the invalidation of TLB
5562 * entries without calling pmap_delayed_invl_started() and
5563 * pmap_delayed_invl_finished(). Because the pmap is not active on
5564 * any other processor, none of these TLB entries will ever be used
5565 * before their eventual invalidation. Consequently, there is no need
5566 * for either pmap_remove_all() or pmap_remove_write() to wait for
5567 * that eventual TLB invalidation.
5570 pmap_remove_pages(pmap_t pmap)
5573 pt_entry_t *pte, tpte;
5574 pt_entry_t PG_M, PG_RW, PG_V;
5575 struct spglist free;
5576 vm_page_t m, mpte, mt;
5578 struct md_page *pvh;
5579 struct pv_chunk *pc, *npc;
5580 struct rwlock *lock;
5582 uint64_t inuse, bitmask;
5583 int allfree, field, freed, idx;
5584 boolean_t superpage;
5588 * Assert that the given pmap is only active on the current
5589 * CPU. Unfortunately, we cannot block another CPU from
5590 * activating the pmap while this function is executing.
5592 KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
5595 cpuset_t other_cpus;
5597 other_cpus = all_cpus;
5599 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
5600 CPU_AND(&other_cpus, &pmap->pm_active);
5602 KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
5607 PG_M = pmap_modified_bit(pmap);
5608 PG_V = pmap_valid_bit(pmap);
5609 PG_RW = pmap_rw_bit(pmap);
5613 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
5616 for (field = 0; field < _NPCM; field++) {
5617 inuse = ~pc->pc_map[field] & pc_freemask[field];
5618 while (inuse != 0) {
5620 bitmask = 1UL << bit;
5621 idx = field * 64 + bit;
5622 pv = &pc->pc_pventry[idx];
5625 pte = pmap_pdpe(pmap, pv->pv_va);
5627 pte = pmap_pdpe_to_pde(pte, pv->pv_va);
5629 if ((tpte & (PG_PS | PG_V)) == PG_V) {
5632 pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
5634 pte = &pte[pmap_pte_index(pv->pv_va)];
5638 * Keep track whether 'tpte' is a
5639 * superpage explicitly instead of
5640 * relying on PG_PS being set.
5642 * This is because PG_PS is numerically
5643 * identical to PG_PTE_PAT and thus a
5644 * regular page could be mistaken for
5650 if ((tpte & PG_V) == 0) {
5651 panic("bad pte va %lx pte %lx",
5656 * We cannot remove wired pages from a process' mapping at this time
5664 pa = tpte & PG_PS_FRAME;
5666 pa = tpte & PG_FRAME;
5668 m = PHYS_TO_VM_PAGE(pa);
5669 KASSERT(m->phys_addr == pa,
5670 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
5671 m, (uintmax_t)m->phys_addr,
5674 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
5675 m < &vm_page_array[vm_page_array_size],
5676 ("pmap_remove_pages: bad tpte %#jx",
5682 * Update the vm_page_t clean/reference bits.
5684 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5686 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
5692 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
5695 pc->pc_map[field] |= bitmask;
5697 pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
5698 pvh = pa_to_pvh(tpte & PG_PS_FRAME);
5699 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
5701 if (TAILQ_EMPTY(&pvh->pv_list)) {
5702 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
5703 if ((mt->aflags & PGA_WRITEABLE) != 0 &&
5704 TAILQ_EMPTY(&mt->md.pv_list))
5705 vm_page_aflag_clear(mt, PGA_WRITEABLE);
5707 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
5709 pmap_resident_count_dec(pmap, 1);
5710 KASSERT(mpte->wire_count == NPTEPG,
5711 ("pmap_remove_pages: pte page wire count error"));
5712 mpte->wire_count = 0;
5713 pmap_add_delayed_free_list(mpte, &free, FALSE);
5716 pmap_resident_count_dec(pmap, 1);
5717 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
5719 if ((m->aflags & PGA_WRITEABLE) != 0 &&
5720 TAILQ_EMPTY(&m->md.pv_list) &&
5721 (m->flags & PG_FICTITIOUS) == 0) {
5722 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5723 if (TAILQ_EMPTY(&pvh->pv_list))
5724 vm_page_aflag_clear(m, PGA_WRITEABLE);
5727 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
5731 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
5732 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
5733 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
5735 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5741 pmap_invalidate_all(pmap);
5743 pmap_free_zero_pages(&free);
5747 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
5749 struct rwlock *lock;
5751 struct md_page *pvh;
5752 pt_entry_t *pte, mask;
5753 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
5755 int md_gen, pvh_gen;
5759 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5762 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5764 if (!PMAP_TRYLOCK(pmap)) {
5765 md_gen = m->md.pv_gen;
5769 if (md_gen != m->md.pv_gen) {
5774 pte = pmap_pte(pmap, pv->pv_va);
5777 PG_M = pmap_modified_bit(pmap);
5778 PG_RW = pmap_rw_bit(pmap);
5779 mask |= PG_RW | PG_M;
5782 PG_A = pmap_accessed_bit(pmap);
5783 PG_V = pmap_valid_bit(pmap);
5784 mask |= PG_V | PG_A;
5786 rv = (*pte & mask) == mask;
5791 if ((m->flags & PG_FICTITIOUS) == 0) {
5792 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5793 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
5795 if (!PMAP_TRYLOCK(pmap)) {
5796 md_gen = m->md.pv_gen;
5797 pvh_gen = pvh->pv_gen;
5801 if (md_gen != m->md.pv_gen ||
5802 pvh_gen != pvh->pv_gen) {
5807 pte = pmap_pde(pmap, pv->pv_va);
5810 PG_M = pmap_modified_bit(pmap);
5811 PG_RW = pmap_rw_bit(pmap);
5812 mask |= PG_RW | PG_M;
5815 PG_A = pmap_accessed_bit(pmap);
5816 PG_V = pmap_valid_bit(pmap);
5817 mask |= PG_V | PG_A;
5819 rv = (*pte & mask) == mask;
5833 * Return whether or not the specified physical page was modified
5834 * in any physical maps.
5837 pmap_is_modified(vm_page_t m)
5840 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5841 ("pmap_is_modified: page %p is not managed", m));
5844 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
5845 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
5846 * is clear, no PTEs can have PG_M set.
5848 VM_OBJECT_ASSERT_WLOCKED(m->object);
5849 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
5851 return (pmap_page_test_mappings(m, FALSE, TRUE));
5855 * pmap_is_prefaultable:
5857 * Return whether or not the specified virtual address is eligible
5861 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
5864 pt_entry_t *pte, PG_V;
5867 PG_V = pmap_valid_bit(pmap);
5870 pde = pmap_pde(pmap, addr);
5871 if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
5872 pte = pmap_pde_to_pte(pde, addr);
5873 rv = (*pte & PG_V) == 0;
5880 * pmap_is_referenced:
5882 * Return whether or not the specified physical page was referenced
5883 * in any physical maps.
5886 pmap_is_referenced(vm_page_t m)
5889 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5890 ("pmap_is_referenced: page %p is not managed", m));
5891 return (pmap_page_test_mappings(m, TRUE, FALSE));
5895 * Clear the write and modified bits in each of the given page's mappings.
5898 pmap_remove_write(vm_page_t m)
5900 struct md_page *pvh;
5902 struct rwlock *lock;
5903 pv_entry_t next_pv, pv;
5905 pt_entry_t oldpte, *pte, PG_M, PG_RW;
5907 int pvh_gen, md_gen;
5909 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5910 ("pmap_remove_write: page %p is not managed", m));
5913 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
5914 * set by another thread while the object is locked. Thus,
5915 * if PGA_WRITEABLE is clear, no page table entries need updating.
5917 VM_OBJECT_ASSERT_WLOCKED(m->object);
5918 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
5920 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5921 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5922 pa_to_pvh(VM_PAGE_TO_PHYS(m));
5925 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5927 if (!PMAP_TRYLOCK(pmap)) {
5928 pvh_gen = pvh->pv_gen;
5932 if (pvh_gen != pvh->pv_gen) {
5938 PG_RW = pmap_rw_bit(pmap);
5940 pde = pmap_pde(pmap, va);
5941 if ((*pde & PG_RW) != 0)
5942 (void)pmap_demote_pde_locked(pmap, pde, va, &lock);
5943 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
5944 ("inconsistent pv lock %p %p for page %p",
5945 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
5948 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5950 if (!PMAP_TRYLOCK(pmap)) {
5951 pvh_gen = pvh->pv_gen;
5952 md_gen = m->md.pv_gen;
5956 if (pvh_gen != pvh->pv_gen ||
5957 md_gen != m->md.pv_gen) {
5963 PG_M = pmap_modified_bit(pmap);
5964 PG_RW = pmap_rw_bit(pmap);
5965 pde = pmap_pde(pmap, pv->pv_va);
5966 KASSERT((*pde & PG_PS) == 0,
5967 ("pmap_remove_write: found a 2mpage in page %p's pv list",
5969 pte = pmap_pde_to_pte(pde, pv->pv_va);
5972 if (oldpte & PG_RW) {
5973 if (!atomic_cmpset_long(pte, oldpte, oldpte &
5976 if ((oldpte & PG_M) != 0)
5978 pmap_invalidate_page(pmap, pv->pv_va);
5983 vm_page_aflag_clear(m, PGA_WRITEABLE);
5984 pmap_delayed_invl_wait(m);
5987 static __inline boolean_t
5988 safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
5991 if (!pmap_emulate_ad_bits(pmap))
5994 KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type));
5997 * XWR = 010 or 110 will cause an unconditional EPT misconfiguration
5998 * so we don't let the referenced (aka EPT_PG_READ) bit to be cleared
5999 * if the EPT_PG_WRITE bit is set.
6001 if ((pte & EPT_PG_WRITE) != 0)
6005 * XWR = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set.
6007 if ((pte & EPT_PG_EXECUTE) == 0 ||
6008 ((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0))
6015 * pmap_ts_referenced:
6017 * Return a count of reference bits for a page, clearing those bits.
6018 * It is not necessary for every reference bit to be cleared, but it
6019 * is necessary that 0 only be returned when there are truly no
6020 * reference bits set.
6022 * As an optimization, update the page's dirty field if a modified bit is
6023 * found while counting reference bits. This opportunistic update can be
6024 * performed at low cost and can eliminate the need for some future calls
6025 * to pmap_is_modified(). However, since this function stops after
6026 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
6027 * dirty pages. Those dirty pages will only be detected by a future call
6028 * to pmap_is_modified().
6030 * A DI block is not needed within this function, because
6031 * invalidations are performed before the PV list lock is
6035 pmap_ts_referenced(vm_page_t m)
6037 struct md_page *pvh;
6040 struct rwlock *lock;
6041 pd_entry_t oldpde, *pde;
6042 pt_entry_t *pte, PG_A, PG_M, PG_RW;
6045 int cleared, md_gen, not_cleared, pvh_gen;
6046 struct spglist free;
6049 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
6050 ("pmap_ts_referenced: page %p is not managed", m));
6053 pa = VM_PAGE_TO_PHYS(m);
6054 lock = PHYS_TO_PV_LIST_LOCK(pa);
6055 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
6059 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
6060 goto small_mappings;
6066 if (!PMAP_TRYLOCK(pmap)) {
6067 pvh_gen = pvh->pv_gen;
6071 if (pvh_gen != pvh->pv_gen) {
6076 PG_A = pmap_accessed_bit(pmap);
6077 PG_M = pmap_modified_bit(pmap);
6078 PG_RW = pmap_rw_bit(pmap);
6080 pde = pmap_pde(pmap, pv->pv_va);
6082 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
6084 * Although "oldpde" is mapping a 2MB page, because
6085 * this function is called at a 4KB page granularity,
6086 * we only update the 4KB page under test.
6090 if ((oldpde & PG_A) != 0) {
6092 * Since this reference bit is shared by 512 4KB
6093 * pages, it should not be cleared every time it is
6094 * tested. Apply a simple "hash" function on the
6095 * physical page number, the virtual superpage number,
6096 * and the pmap address to select one 4KB page out of
6097 * the 512 on which testing the reference bit will
6098 * result in clearing that reference bit. This
6099 * function is designed to avoid the selection of the
6100 * same 4KB page for every 2MB page mapping.
6102 * On demotion, a mapping that hasn't been referenced
6103 * is simply destroyed. To avoid the possibility of a
6104 * subsequent page fault on a demoted wired mapping,
6105 * always leave its reference bit set. Moreover,
6106 * since the superpage is wired, the current state of
6107 * its reference bit won't affect page replacement.
6109 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PDRSHIFT) ^
6110 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
6111 (oldpde & PG_W) == 0) {
6112 if (safe_to_clear_referenced(pmap, oldpde)) {
6113 atomic_clear_long(pde, PG_A);
6114 pmap_invalidate_page(pmap, pv->pv_va);
6116 } else if (pmap_demote_pde_locked(pmap, pde,
6117 pv->pv_va, &lock)) {
6119 * Remove the mapping to a single page
6120 * so that a subsequent access may
6121 * repromote. Since the underlying
6122 * page table page is fully populated,
6123 * this removal never frees a page
6127 va += VM_PAGE_TO_PHYS(m) - (oldpde &
6129 pte = pmap_pde_to_pte(pde, va);
6130 pmap_remove_pte(pmap, pte, va, *pde,
6132 pmap_invalidate_page(pmap, va);
6138 * The superpage mapping was removed
6139 * entirely and therefore 'pv' is no
6147 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
6148 ("inconsistent pv lock %p %p for page %p",
6149 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
6154 /* Rotate the PV list if it has more than one entry. */
6155 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
6156 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
6157 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
6160 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
6162 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
6164 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
6171 if (!PMAP_TRYLOCK(pmap)) {
6172 pvh_gen = pvh->pv_gen;
6173 md_gen = m->md.pv_gen;
6177 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
6182 PG_A = pmap_accessed_bit(pmap);
6183 PG_M = pmap_modified_bit(pmap);
6184 PG_RW = pmap_rw_bit(pmap);
6185 pde = pmap_pde(pmap, pv->pv_va);
6186 KASSERT((*pde & PG_PS) == 0,
6187 ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
6189 pte = pmap_pde_to_pte(pde, pv->pv_va);
6190 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
6192 if ((*pte & PG_A) != 0) {
6193 if (safe_to_clear_referenced(pmap, *pte)) {
6194 atomic_clear_long(pte, PG_A);
6195 pmap_invalidate_page(pmap, pv->pv_va);
6197 } else if ((*pte & PG_W) == 0) {
6199 * Wired pages cannot be paged out so
6200 * doing accessed bit emulation for
6201 * them is wasted effort. We do the
6202 * hard work for unwired pages only.
6204 pmap_remove_pte(pmap, pte, pv->pv_va,
6205 *pde, &free, &lock);
6206 pmap_invalidate_page(pmap, pv->pv_va);
6211 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
6212 ("inconsistent pv lock %p %p for page %p",
6213 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
6218 /* Rotate the PV list if it has more than one entry. */
6219 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
6220 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
6221 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
6224 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
6225 not_cleared < PMAP_TS_REFERENCED_MAX);
6228 pmap_free_zero_pages(&free);
6229 return (cleared + not_cleared);
6233 * Apply the given advice to the specified range of addresses within the
6234 * given pmap. Depending on the advice, clear the referenced and/or
6235 * modified flags in each mapping and set the mapped page's dirty field.
6238 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
6240 struct rwlock *lock;
6241 pml4_entry_t *pml4e;
6243 pd_entry_t oldpde, *pde;
6244 pt_entry_t *pte, PG_A, PG_G, PG_M, PG_RW, PG_V;
6245 vm_offset_t va, va_next;
6247 boolean_t anychanged;
6249 if (advice != MADV_DONTNEED && advice != MADV_FREE)
6253 * A/D bit emulation requires an alternate code path when clearing
6254 * the modified and accessed bits below. Since this function is
6255 * advisory in nature we skip it entirely for pmaps that require
6256 * A/D bit emulation.
6258 if (pmap_emulate_ad_bits(pmap))
6261 PG_A = pmap_accessed_bit(pmap);
6262 PG_G = pmap_global_bit(pmap);
6263 PG_M = pmap_modified_bit(pmap);
6264 PG_V = pmap_valid_bit(pmap);
6265 PG_RW = pmap_rw_bit(pmap);
6267 pmap_delayed_invl_started();
6269 for (; sva < eva; sva = va_next) {
6270 pml4e = pmap_pml4e(pmap, sva);
6271 if ((*pml4e & PG_V) == 0) {
6272 va_next = (sva + NBPML4) & ~PML4MASK;
6277 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
6278 if ((*pdpe & PG_V) == 0) {
6279 va_next = (sva + NBPDP) & ~PDPMASK;
6284 va_next = (sva + NBPDR) & ~PDRMASK;
6287 pde = pmap_pdpe_to_pde(pdpe, sva);
6289 if ((oldpde & PG_V) == 0)
6291 else if ((oldpde & PG_PS) != 0) {
6292 if ((oldpde & PG_MANAGED) == 0)
6295 if (!pmap_demote_pde_locked(pmap, pde, sva, &lock)) {
6300 * The large page mapping was destroyed.
6306 * Unless the page mappings are wired, remove the
6307 * mapping to a single page so that a subsequent
6308 * access may repromote. Since the underlying page
6309 * table page is fully populated, this removal never
6310 * frees a page table page.
6312 if ((oldpde & PG_W) == 0) {
6313 pte = pmap_pde_to_pte(pde, sva);
6314 KASSERT((*pte & PG_V) != 0,
6315 ("pmap_advise: invalid PTE"));
6316 pmap_remove_pte(pmap, pte, sva, *pde, NULL,
6326 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
6328 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
6330 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
6331 if (advice == MADV_DONTNEED) {
6333 * Future calls to pmap_is_modified()
6334 * can be avoided by making the page
6337 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
6340 atomic_clear_long(pte, PG_M | PG_A);
6341 } else if ((*pte & PG_A) != 0)
6342 atomic_clear_long(pte, PG_A);
6346 if ((*pte & PG_G) != 0) {
6353 if (va != va_next) {
6354 pmap_invalidate_range(pmap, va, sva);
6359 pmap_invalidate_range(pmap, va, sva);
6362 pmap_invalidate_all(pmap);
6364 pmap_delayed_invl_finished();
6368 * Clear the modify bits on the specified physical page.
6371 pmap_clear_modify(vm_page_t m)
6373 struct md_page *pvh;
6375 pv_entry_t next_pv, pv;
6376 pd_entry_t oldpde, *pde;
6377 pt_entry_t oldpte, *pte, PG_M, PG_RW, PG_V;
6378 struct rwlock *lock;
6380 int md_gen, pvh_gen;
6382 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
6383 ("pmap_clear_modify: page %p is not managed", m));
6384 VM_OBJECT_ASSERT_WLOCKED(m->object);
6385 KASSERT(!vm_page_xbusied(m),
6386 ("pmap_clear_modify: page %p is exclusive busied", m));
6389 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
6390 * If the object containing the page is locked and the page is not
6391 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
6393 if ((m->aflags & PGA_WRITEABLE) == 0)
6395 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
6396 pa_to_pvh(VM_PAGE_TO_PHYS(m));
6397 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
6400 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
6402 if (!PMAP_TRYLOCK(pmap)) {
6403 pvh_gen = pvh->pv_gen;
6407 if (pvh_gen != pvh->pv_gen) {
6412 PG_M = pmap_modified_bit(pmap);
6413 PG_V = pmap_valid_bit(pmap);
6414 PG_RW = pmap_rw_bit(pmap);
6416 pde = pmap_pde(pmap, va);
6418 if ((oldpde & PG_RW) != 0) {
6419 if (pmap_demote_pde_locked(pmap, pde, va, &lock)) {
6420 if ((oldpde & PG_W) == 0) {
6422 * Write protect the mapping to a
6423 * single page so that a subsequent
6424 * write access may repromote.
6426 va += VM_PAGE_TO_PHYS(m) - (oldpde &
6428 pte = pmap_pde_to_pte(pde, va);
6430 if ((oldpte & PG_V) != 0) {
6431 while (!atomic_cmpset_long(pte,
6433 oldpte & ~(PG_M | PG_RW)))
6436 pmap_invalidate_page(pmap, va);
6443 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
6445 if (!PMAP_TRYLOCK(pmap)) {
6446 md_gen = m->md.pv_gen;
6447 pvh_gen = pvh->pv_gen;
6451 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
6456 PG_M = pmap_modified_bit(pmap);
6457 PG_RW = pmap_rw_bit(pmap);
6458 pde = pmap_pde(pmap, pv->pv_va);
6459 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
6460 " a 2mpage in page %p's pv list", m));
6461 pte = pmap_pde_to_pte(pde, pv->pv_va);
6462 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
6463 atomic_clear_long(pte, PG_M);
6464 pmap_invalidate_page(pmap, pv->pv_va);
6472 * Miscellaneous support routines follow
6475 /* Adjust the cache mode for a 4KB page mapped via a PTE. */
6476 static __inline void
6477 pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask)
6482 * The cache mode bits are all in the low 32-bits of the
6483 * PTE, so we can just spin on updating the low 32-bits.
6486 opte = *(u_int *)pte;
6487 npte = opte & ~mask;
6489 } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
6492 /* Adjust the cache mode for a 2MB page mapped via a PDE. */
6493 static __inline void
6494 pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask)
6499 * The cache mode bits are all in the low 32-bits of the
6500 * PDE, so we can just spin on updating the low 32-bits.
6503 opde = *(u_int *)pde;
6504 npde = opde & ~mask;
6506 } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
6510 * Map a set of physical memory pages into the kernel virtual
6511 * address space. Return a pointer to where it is mapped. This
6512 * routine is intended to be used for mapping device memory,
6516 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
6518 struct pmap_preinit_mapping *ppim;
6519 vm_offset_t va, offset;
6523 offset = pa & PAGE_MASK;
6524 size = round_page(offset + size);
6525 pa = trunc_page(pa);
6527 if (!pmap_initialized) {
6529 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
6530 ppim = pmap_preinit_mapping + i;
6531 if (ppim->va == 0) {
6535 ppim->va = virtual_avail;
6536 virtual_avail += size;
6542 panic("%s: too many preinit mappings", __func__);
6545 * If we have a preinit mapping, re-use it.
6547 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
6548 ppim = pmap_preinit_mapping + i;
6549 if (ppim->pa == pa && ppim->sz == size &&
6551 return ((void *)(ppim->va + offset));
6554 * If the specified range of physical addresses fits within
6555 * the direct map window, use the direct map.
6557 if (pa < dmaplimit && pa + size < dmaplimit) {
6558 va = PHYS_TO_DMAP(pa);
6559 if (!pmap_change_attr(va, size, mode))
6560 return ((void *)(va + offset));
6562 va = kva_alloc(size);
6564 panic("%s: Couldn't allocate KVA", __func__);
6566 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
6567 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
6568 pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
6569 pmap_invalidate_cache_range(va, va + tmpsize, FALSE);
6570 return ((void *)(va + offset));
6574 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
6577 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
6581 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
6584 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
6588 pmap_unmapdev(vm_offset_t va, vm_size_t size)
6590 struct pmap_preinit_mapping *ppim;
6594 /* If we gave a direct map region in pmap_mapdev, do nothing */
6595 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
6597 offset = va & PAGE_MASK;
6598 size = round_page(offset + size);
6599 va = trunc_page(va);
6600 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
6601 ppim = pmap_preinit_mapping + i;
6602 if (ppim->va == va && ppim->sz == size) {
6603 if (pmap_initialized)
6609 if (va + size == virtual_avail)
6614 if (pmap_initialized)
6619 * Tries to demote a 1GB page mapping.
6622 pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
6624 pdp_entry_t newpdpe, oldpdpe;
6625 pd_entry_t *firstpde, newpde, *pde;
6626 pt_entry_t PG_A, PG_M, PG_RW, PG_V;
6630 PG_A = pmap_accessed_bit(pmap);
6631 PG_M = pmap_modified_bit(pmap);
6632 PG_V = pmap_valid_bit(pmap);
6633 PG_RW = pmap_rw_bit(pmap);
6635 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
6637 KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
6638 ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
6639 if ((pdpg = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT |
6640 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
6641 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
6642 " in pmap %p", va, pmap);
6645 pdpgpa = VM_PAGE_TO_PHYS(pdpg);
6646 firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa);
6647 newpdpe = pdpgpa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
6648 KASSERT((oldpdpe & PG_A) != 0,
6649 ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
6650 KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
6651 ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
6655 * Initialize the page directory page.
6657 for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
6663 * Demote the mapping.
6668 * Invalidate a stale recursive mapping of the page directory page.
6670 pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va));
6672 pmap_pdpe_demotions++;
6673 CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
6674 " in pmap %p", va, pmap);
6679 * Sets the memory attribute for the specified page.
6682 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
6685 m->md.pat_mode = ma;
6688 * If "m" is a normal page, update its direct mapping. This update
6689 * can be relied upon to perform any cache operations that are
6690 * required for data coherence.
6692 if ((m->flags & PG_FICTITIOUS) == 0 &&
6693 pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
6695 panic("memory attribute change on the direct map failed");
6699 * Changes the specified virtual address range's memory type to that given by
6700 * the parameter "mode". The specified virtual address range must be
6701 * completely contained within either the direct map or the kernel map. If
6702 * the virtual address range is contained within the kernel map, then the
6703 * memory type for each of the corresponding ranges of the direct map is also
6704 * changed. (The corresponding ranges of the direct map are those ranges that
6705 * map the same physical pages as the specified virtual address range.) These
6706 * changes to the direct map are necessary because Intel describes the
6707 * behavior of their processors as "undefined" if two or more mappings to the
6708 * same physical page have different memory types.
6710 * Returns zero if the change completed successfully, and either EINVAL or
6711 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
6712 * of the virtual address range was not mapped, and ENOMEM is returned if
6713 * there was insufficient memory available to complete the change. In the
6714 * latter case, the memory type may have been changed on some part of the
6715 * virtual address range or the direct map.
6718 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
6722 PMAP_LOCK(kernel_pmap);
6723 error = pmap_change_attr_locked(va, size, mode);
6724 PMAP_UNLOCK(kernel_pmap);
6729 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
6731 vm_offset_t base, offset, tmpva;
6732 vm_paddr_t pa_start, pa_end, pa_end1;
6736 int cache_bits_pte, cache_bits_pde, error;
6739 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
6740 base = trunc_page(va);
6741 offset = va & PAGE_MASK;
6742 size = round_page(offset + size);
6745 * Only supported on kernel virtual addresses, including the direct
6746 * map but excluding the recursive map.
6748 if (base < DMAP_MIN_ADDRESS)
6751 cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1);
6752 cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0);
6756 * Pages that aren't mapped aren't supported. Also break down 2MB pages
6757 * into 4KB pages if required.
6759 for (tmpva = base; tmpva < base + size; ) {
6760 pdpe = pmap_pdpe(kernel_pmap, tmpva);
6761 if (pdpe == NULL || *pdpe == 0)
6763 if (*pdpe & PG_PS) {
6765 * If the current 1GB page already has the required
6766 * memory type, then we need not demote this page. Just
6767 * increment tmpva to the next 1GB page frame.
6769 if ((*pdpe & X86_PG_PDE_CACHE) == cache_bits_pde) {
6770 tmpva = trunc_1gpage(tmpva) + NBPDP;
6775 * If the current offset aligns with a 1GB page frame
6776 * and there is at least 1GB left within the range, then
6777 * we need not break down this page into 2MB pages.
6779 if ((tmpva & PDPMASK) == 0 &&
6780 tmpva + PDPMASK < base + size) {
6784 if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva))
6787 pde = pmap_pdpe_to_pde(pdpe, tmpva);
6792 * If the current 2MB page already has the required
6793 * memory type, then we need not demote this page. Just
6794 * increment tmpva to the next 2MB page frame.
6796 if ((*pde & X86_PG_PDE_CACHE) == cache_bits_pde) {
6797 tmpva = trunc_2mpage(tmpva) + NBPDR;
6802 * If the current offset aligns with a 2MB page frame
6803 * and there is at least 2MB left within the range, then
6804 * we need not break down this page into 4KB pages.
6806 if ((tmpva & PDRMASK) == 0 &&
6807 tmpva + PDRMASK < base + size) {
6811 if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
6814 pte = pmap_pde_to_pte(pde, tmpva);
6822 * Ok, all the pages exist, so run through them updating their
6823 * cache mode if required.
6825 pa_start = pa_end = 0;
6826 for (tmpva = base; tmpva < base + size; ) {
6827 pdpe = pmap_pdpe(kernel_pmap, tmpva);
6828 if (*pdpe & PG_PS) {
6829 if ((*pdpe & X86_PG_PDE_CACHE) != cache_bits_pde) {
6830 pmap_pde_attr(pdpe, cache_bits_pde,
6834 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6835 (*pdpe & PG_PS_FRAME) < dmaplimit) {
6836 if (pa_start == pa_end) {
6837 /* Start physical address run. */
6838 pa_start = *pdpe & PG_PS_FRAME;
6839 pa_end = pa_start + NBPDP;
6840 } else if (pa_end == (*pdpe & PG_PS_FRAME))
6843 /* Run ended, update direct map. */
6844 error = pmap_change_attr_locked(
6845 PHYS_TO_DMAP(pa_start),
6846 pa_end - pa_start, mode);
6849 /* Start physical address run. */
6850 pa_start = *pdpe & PG_PS_FRAME;
6851 pa_end = pa_start + NBPDP;
6854 tmpva = trunc_1gpage(tmpva) + NBPDP;
6857 pde = pmap_pdpe_to_pde(pdpe, tmpva);
6859 if ((*pde & X86_PG_PDE_CACHE) != cache_bits_pde) {
6860 pmap_pde_attr(pde, cache_bits_pde,
6864 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6865 (*pde & PG_PS_FRAME) < dmaplimit) {
6866 if (pa_start == pa_end) {
6867 /* Start physical address run. */
6868 pa_start = *pde & PG_PS_FRAME;
6869 pa_end = pa_start + NBPDR;
6870 } else if (pa_end == (*pde & PG_PS_FRAME))
6873 /* Run ended, update direct map. */
6874 error = pmap_change_attr_locked(
6875 PHYS_TO_DMAP(pa_start),
6876 pa_end - pa_start, mode);
6879 /* Start physical address run. */
6880 pa_start = *pde & PG_PS_FRAME;
6881 pa_end = pa_start + NBPDR;
6884 tmpva = trunc_2mpage(tmpva) + NBPDR;
6886 pte = pmap_pde_to_pte(pde, tmpva);
6887 if ((*pte & X86_PG_PTE_CACHE) != cache_bits_pte) {
6888 pmap_pte_attr(pte, cache_bits_pte,
6892 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6893 (*pte & PG_FRAME) < dmaplimit) {
6894 if (pa_start == pa_end) {
6895 /* Start physical address run. */
6896 pa_start = *pte & PG_FRAME;
6897 pa_end = pa_start + PAGE_SIZE;
6898 } else if (pa_end == (*pte & PG_FRAME))
6899 pa_end += PAGE_SIZE;
6901 /* Run ended, update direct map. */
6902 error = pmap_change_attr_locked(
6903 PHYS_TO_DMAP(pa_start),
6904 pa_end - pa_start, mode);
6907 /* Start physical address run. */
6908 pa_start = *pte & PG_FRAME;
6909 pa_end = pa_start + PAGE_SIZE;
6915 if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
6916 pa_end1 = MIN(pa_end, dmaplimit);
6917 if (pa_start != pa_end1)
6918 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
6919 pa_end1 - pa_start, mode);
6923 * Flush CPU caches if required to make sure any data isn't cached that
6924 * shouldn't be, etc.
6927 pmap_invalidate_range(kernel_pmap, base, tmpva);
6928 pmap_invalidate_cache_range(base, tmpva, FALSE);
6934 * Demotes any mapping within the direct map region that covers more than the
6935 * specified range of physical addresses. This range's size must be a power
6936 * of two and its starting address must be a multiple of its size. Since the
6937 * demotion does not change any attributes of the mapping, a TLB invalidation
6938 * is not mandatory. The caller may, however, request a TLB invalidation.
6941 pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
6950 KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
6951 KASSERT((base & (len - 1)) == 0,
6952 ("pmap_demote_DMAP: base is not a multiple of len"));
6953 if (len < NBPDP && base < dmaplimit) {
6954 va = PHYS_TO_DMAP(base);
6956 PMAP_LOCK(kernel_pmap);
6957 pdpe = pmap_pdpe(kernel_pmap, va);
6958 if ((*pdpe & X86_PG_V) == 0)
6959 panic("pmap_demote_DMAP: invalid PDPE");
6960 if ((*pdpe & PG_PS) != 0) {
6961 if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
6962 panic("pmap_demote_DMAP: PDPE failed");
6966 pde = pmap_pdpe_to_pde(pdpe, va);
6967 if ((*pde & X86_PG_V) == 0)
6968 panic("pmap_demote_DMAP: invalid PDE");
6969 if ((*pde & PG_PS) != 0) {
6970 if (!pmap_demote_pde(kernel_pmap, pde, va))
6971 panic("pmap_demote_DMAP: PDE failed");
6975 if (changed && invalidate)
6976 pmap_invalidate_page(kernel_pmap, va);
6977 PMAP_UNLOCK(kernel_pmap);
6982 * perform the pmap work for mincore
6985 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
6988 pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V;
6992 PG_A = pmap_accessed_bit(pmap);
6993 PG_M = pmap_modified_bit(pmap);
6994 PG_V = pmap_valid_bit(pmap);
6995 PG_RW = pmap_rw_bit(pmap);
6999 pdep = pmap_pde(pmap, addr);
7000 if (pdep != NULL && (*pdep & PG_V)) {
7001 if (*pdep & PG_PS) {
7003 /* Compute the physical address of the 4KB page. */
7004 pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
7006 val = MINCORE_SUPER;
7008 pte = *pmap_pde_to_pte(pdep, addr);
7009 pa = pte & PG_FRAME;
7017 if ((pte & PG_V) != 0) {
7018 val |= MINCORE_INCORE;
7019 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
7020 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
7021 if ((pte & PG_A) != 0)
7022 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
7024 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
7025 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
7026 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
7027 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
7028 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
7031 PA_UNLOCK_COND(*locked_pa);
7037 pmap_pcid_alloc(pmap_t pmap, u_int cpuid)
7039 uint32_t gen, new_gen, pcid_next;
7041 CRITICAL_ASSERT(curthread);
7042 gen = PCPU_GET(pcid_gen);
7043 if (pmap->pm_pcids[cpuid].pm_pcid == PMAP_PCID_KERN ||
7044 pmap->pm_pcids[cpuid].pm_gen == gen)
7045 return (CR3_PCID_SAVE);
7046 pcid_next = PCPU_GET(pcid_next);
7047 KASSERT(pcid_next <= PMAP_PCID_OVERMAX, ("cpu %d pcid_next %#x",
7049 if (pcid_next == PMAP_PCID_OVERMAX) {
7053 PCPU_SET(pcid_gen, new_gen);
7054 pcid_next = PMAP_PCID_KERN + 1;
7058 pmap->pm_pcids[cpuid].pm_pcid = pcid_next;
7059 pmap->pm_pcids[cpuid].pm_gen = new_gen;
7060 PCPU_SET(pcid_next, pcid_next + 1);
7065 pmap_activate_sw(struct thread *td)
7067 pmap_t oldpmap, pmap;
7068 uint64_t cached, cr3;
7072 oldpmap = PCPU_GET(curpmap);
7073 pmap = vmspace_pmap(td->td_proc->p_vmspace);
7074 if (oldpmap == pmap)
7076 cpuid = PCPU_GET(cpuid);
7078 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
7080 CPU_SET(cpuid, &pmap->pm_active);
7083 if (pmap_pcid_enabled) {
7084 cached = pmap_pcid_alloc(pmap, cpuid);
7085 KASSERT(pmap->pm_pcids[cpuid].pm_pcid >= 0 &&
7086 pmap->pm_pcids[cpuid].pm_pcid < PMAP_PCID_OVERMAX,
7087 ("pmap %p cpu %d pcid %#x", pmap, cpuid,
7088 pmap->pm_pcids[cpuid].pm_pcid));
7089 KASSERT(pmap->pm_pcids[cpuid].pm_pcid != PMAP_PCID_KERN ||
7090 pmap == kernel_pmap,
7091 ("non-kernel pmap thread %p pmap %p cpu %d pcid %#x",
7092 td, pmap, cpuid, pmap->pm_pcids[cpuid].pm_pcid));
7095 * If the INVPCID instruction is not available,
7096 * invltlb_pcid_handler() is used for handle
7097 * invalidate_all IPI, which checks for curpmap ==
7098 * smp_tlb_pmap. Below operations sequence has a
7099 * window where %CR3 is loaded with the new pmap's
7100 * PML4 address, but curpmap value is not yet updated.
7101 * This causes invltlb IPI handler, called between the
7102 * updates, to execute as NOP, which leaves stale TLB
7105 * Note that the most typical use of
7106 * pmap_activate_sw(), from the context switch, is
7107 * immune to this race, because interrupts are
7108 * disabled (while the thread lock is owned), and IPI
7109 * happends after curpmap is updated. Protect other
7110 * callers in a similar way, by disabling interrupts
7111 * around the %cr3 register reload and curpmap
7115 rflags = intr_disable();
7117 if (!cached || (cr3 & ~CR3_PCID_MASK) != pmap->pm_cr3) {
7118 load_cr3(pmap->pm_cr3 | pmap->pm_pcids[cpuid].pm_pcid |
7121 PCPU_INC(pm_save_cnt);
7123 PCPU_SET(curpmap, pmap);
7125 intr_restore(rflags);
7126 } else if (cr3 != pmap->pm_cr3) {
7127 load_cr3(pmap->pm_cr3);
7128 PCPU_SET(curpmap, pmap);
7131 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
7133 CPU_CLR(cpuid, &oldpmap->pm_active);
7138 pmap_activate(struct thread *td)
7142 pmap_activate_sw(td);
7147 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
7152 * Increase the starting virtual address of the given mapping if a
7153 * different alignment might result in more superpage mappings.
7156 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
7157 vm_offset_t *addr, vm_size_t size)
7159 vm_offset_t superpage_offset;
7163 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
7164 offset += ptoa(object->pg_color);
7165 superpage_offset = offset & PDRMASK;
7166 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
7167 (*addr & PDRMASK) == superpage_offset)
7169 if ((*addr & PDRMASK) < superpage_offset)
7170 *addr = (*addr & ~PDRMASK) + superpage_offset;
7172 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
7176 static unsigned long num_dirty_emulations;
7177 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_dirty_emulations, CTLFLAG_RW,
7178 &num_dirty_emulations, 0, NULL);
7180 static unsigned long num_accessed_emulations;
7181 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_accessed_emulations, CTLFLAG_RW,
7182 &num_accessed_emulations, 0, NULL);
7184 static unsigned long num_superpage_accessed_emulations;
7185 SYSCTL_ULONG(_vm_pmap, OID_AUTO, num_superpage_accessed_emulations, CTLFLAG_RW,
7186 &num_superpage_accessed_emulations, 0, NULL);
7188 static unsigned long ad_emulation_superpage_promotions;
7189 SYSCTL_ULONG(_vm_pmap, OID_AUTO, ad_emulation_superpage_promotions, CTLFLAG_RW,
7190 &ad_emulation_superpage_promotions, 0, NULL);
7191 #endif /* INVARIANTS */
7194 pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype)
7197 struct rwlock *lock;
7200 pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V;
7202 KASSERT(ftype == VM_PROT_READ || ftype == VM_PROT_WRITE,
7203 ("pmap_emulate_accessed_dirty: invalid fault type %d", ftype));
7205 if (!pmap_emulate_ad_bits(pmap))
7208 PG_A = pmap_accessed_bit(pmap);
7209 PG_M = pmap_modified_bit(pmap);
7210 PG_V = pmap_valid_bit(pmap);
7211 PG_RW = pmap_rw_bit(pmap);
7217 pde = pmap_pde(pmap, va);
7218 if (pde == NULL || (*pde & PG_V) == 0)
7221 if ((*pde & PG_PS) != 0) {
7222 if (ftype == VM_PROT_READ) {
7224 atomic_add_long(&num_superpage_accessed_emulations, 1);
7232 pte = pmap_pde_to_pte(pde, va);
7233 if ((*pte & PG_V) == 0)
7236 if (ftype == VM_PROT_WRITE) {
7237 if ((*pte & PG_RW) == 0)
7240 * Set the modified and accessed bits simultaneously.
7242 * Intel EPT PTEs that do software emulation of A/D bits map
7243 * PG_A and PG_M to EPT_PG_READ and EPT_PG_WRITE respectively.
7244 * An EPT misconfiguration is triggered if the PTE is writable
7245 * but not readable (WR=10). This is avoided by setting PG_A
7246 * and PG_M simultaneously.
7248 *pte |= PG_M | PG_A;
7253 /* try to promote the mapping */
7254 if (va < VM_MAXUSER_ADDRESS)
7255 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
7259 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
7261 if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
7262 pmap_ps_enabled(pmap) &&
7263 (m->flags & PG_FICTITIOUS) == 0 &&
7264 vm_reserv_level_iffullpop(m) == 0) {
7265 pmap_promote_pde(pmap, pde, va, &lock);
7267 atomic_add_long(&ad_emulation_superpage_promotions, 1);
7271 if (ftype == VM_PROT_WRITE)
7272 atomic_add_long(&num_dirty_emulations, 1);
7274 atomic_add_long(&num_accessed_emulations, 1);
7276 rv = 0; /* success */
7285 pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num)
7290 pt_entry_t *pte, PG_V;
7294 PG_V = pmap_valid_bit(pmap);
7297 pml4 = pmap_pml4e(pmap, va);
7299 if ((*pml4 & PG_V) == 0)
7302 pdp = pmap_pml4e_to_pdpe(pml4, va);
7304 if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0)
7307 pde = pmap_pdpe_to_pde(pdp, va);
7309 if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0)
7312 pte = pmap_pde_to_pte(pde, va);
7321 * Get the kernel virtual address of a set of physical pages. If there are
7322 * physical addresses not covered by the DMAP perform a transient mapping
7323 * that will be removed when calling pmap_unmap_io_transient.
7325 * \param page The pages the caller wishes to obtain the virtual
7326 * address on the kernel memory map.
7327 * \param vaddr On return contains the kernel virtual memory address
7328 * of the pages passed in the page parameter.
7329 * \param count Number of pages passed in.
7330 * \param can_fault TRUE if the thread using the mapped pages can take
7331 * page faults, FALSE otherwise.
7333 * \returns TRUE if the caller must call pmap_unmap_io_transient when
7334 * finished or FALSE otherwise.
7338 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
7339 boolean_t can_fault)
7342 boolean_t needs_mapping;
7344 int cache_bits, error, i;
7347 * Allocate any KVA space that we need, this is done in a separate
7348 * loop to prevent calling vmem_alloc while pinned.
7350 needs_mapping = FALSE;
7351 for (i = 0; i < count; i++) {
7352 paddr = VM_PAGE_TO_PHYS(page[i]);
7353 if (__predict_false(paddr >= dmaplimit)) {
7354 error = vmem_alloc(kernel_arena, PAGE_SIZE,
7355 M_BESTFIT | M_WAITOK, &vaddr[i]);
7356 KASSERT(error == 0, ("vmem_alloc failed: %d", error));
7357 needs_mapping = TRUE;
7359 vaddr[i] = PHYS_TO_DMAP(paddr);
7363 /* Exit early if everything is covered by the DMAP */
7368 * NB: The sequence of updating a page table followed by accesses
7369 * to the corresponding pages used in the !DMAP case is subject to
7370 * the situation described in the "AMD64 Architecture Programmer's
7371 * Manual Volume 2: System Programming" rev. 3.23, "7.3.1 Special
7372 * Coherency Considerations". Therefore, issuing the INVLPG right
7373 * after modifying the PTE bits is crucial.
7377 for (i = 0; i < count; i++) {
7378 paddr = VM_PAGE_TO_PHYS(page[i]);
7379 if (paddr >= dmaplimit) {
7382 * Slow path, since we can get page faults
7383 * while mappings are active don't pin the
7384 * thread to the CPU and instead add a global
7385 * mapping visible to all CPUs.
7387 pmap_qenter(vaddr[i], &page[i], 1);
7389 pte = vtopte(vaddr[i]);
7390 cache_bits = pmap_cache_bits(kernel_pmap,
7391 page[i]->md.pat_mode, 0);
7392 pte_store(pte, paddr | X86_PG_RW | X86_PG_V |
7399 return (needs_mapping);
7403 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
7404 boolean_t can_fault)
7411 for (i = 0; i < count; i++) {
7412 paddr = VM_PAGE_TO_PHYS(page[i]);
7413 if (paddr >= dmaplimit) {
7415 pmap_qremove(vaddr[i], 1);
7416 vmem_free(kernel_arena, vaddr[i], PAGE_SIZE);
7422 pmap_quick_enter_page(vm_page_t m)
7426 paddr = VM_PAGE_TO_PHYS(m);
7427 if (paddr < dmaplimit)
7428 return (PHYS_TO_DMAP(paddr));
7429 mtx_lock_spin(&qframe_mtx);
7430 KASSERT(*vtopte(qframe) == 0, ("qframe busy"));
7431 pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A |
7432 X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0));
7437 pmap_quick_remove_page(vm_offset_t addr)
7442 pte_store(vtopte(qframe), 0);
7444 mtx_unlock_spin(&qframe_mtx);
7447 #include "opt_ddb.h"
7449 #include <ddb/ddb.h>
7451 DB_SHOW_COMMAND(pte, pmap_print_pte)
7457 pt_entry_t *pte, PG_V;
7461 va = (vm_offset_t)addr;
7462 pmap = PCPU_GET(curpmap); /* XXX */
7464 db_printf("show pte addr\n");
7467 PG_V = pmap_valid_bit(pmap);
7468 pml4 = pmap_pml4e(pmap, va);
7469 db_printf("VA %#016lx pml4e %#016lx", va, *pml4);
7470 if ((*pml4 & PG_V) == 0) {
7474 pdp = pmap_pml4e_to_pdpe(pml4, va);
7475 db_printf(" pdpe %#016lx", *pdp);
7476 if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) {
7480 pde = pmap_pdpe_to_pde(pdp, va);
7481 db_printf(" pde %#016lx", *pde);
7482 if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) {
7486 pte = pmap_pde_to_pte(pde, va);
7487 db_printf(" pte %#016lx\n", *pte);
7490 DB_SHOW_COMMAND(phys2dmap, pmap_phys2dmap)
7495 a = (vm_paddr_t)addr;
7496 db_printf("0x%jx\n", (uintmax_t)PHYS_TO_DMAP(a));
7498 db_printf("show phys2dmap addr\n");