2 * SPDX-License-Identifier: BSD-4-Clause
4 * Copyright (c) 1991 Regents of the University of California.
6 * Copyright (c) 1994 John S. Dyson
8 * Copyright (c) 1994 David Greenman
10 * Copyright (c) 2003 Peter Wemm
11 * All rights reserved.
12 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
13 * All rights reserved.
14 * Copyright (c) 2014 Andrew Turner
15 * All rights reserved.
16 * Copyright (c) 2014 The FreeBSD Foundation
17 * All rights reserved.
18 * Copyright (c) 2015-2018 Ruslan Bukin <br@bsdpad.com>
19 * All rights reserved.
21 * This code is derived from software contributed to Berkeley by
22 * the Systems Programming Group of the University of Utah Computer
23 * Science Department and William Jolitz of UUNET Technologies Inc.
25 * Portions of this software were developed by Andrew Turner under
26 * sponsorship from The FreeBSD Foundation.
28 * Portions of this software were developed by SRI International and the
29 * University of Cambridge Computer Laboratory under DARPA/AFRL contract
30 * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
32 * Portions of this software were developed by the University of Cambridge
33 * Computer Laboratory as part of the CTSRD Project, with support from the
34 * UK Higher Education Innovation Fund (HEIF).
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
67 * Copyright (c) 2003 Networks Associates Technology, Inc.
68 * All rights reserved.
70 * This software was developed for the FreeBSD Project by Jake Burkholder,
71 * Safeport Network Services, and Network Associates Laboratories, the
72 * Security Research Division of Network Associates, Inc. under
73 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
74 * CHATS research program.
76 * Redistribution and use in source and binary forms, with or without
77 * modification, are permitted provided that the following conditions
79 * 1. Redistributions of source code must retain the above copyright
80 * notice, this list of conditions and the following disclaimer.
81 * 2. Redistributions in binary form must reproduce the above copyright
82 * notice, this list of conditions and the following disclaimer in the
83 * documentation and/or other materials provided with the distribution.
85 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
86 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
89 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
90 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
91 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
92 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
93 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
94 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
98 #include <sys/cdefs.h>
99 __FBSDID("$FreeBSD$");
102 * Manages physical address maps.
104 * Since the information managed by this module is
105 * also stored by the logical address mapping module,
106 * this module may throw away valid virtual-to-physical
107 * mappings at almost any time. However, invalidations
108 * of virtual-to-physical mappings must be done as
111 * In order to cope with hardware architectures which
112 * make virtual-to-physical map invalidates expensive,
113 * this module may delay invalidate or reduced protection
114 * operations until such time as they are actually
115 * necessary. This module is given full information as
116 * to which processors are currently using which maps,
117 * and to when physical maps must be made correct.
120 #include <sys/param.h>
121 #include <sys/systm.h>
122 #include <sys/bitstring.h>
124 #include <sys/cpuset.h>
125 #include <sys/kernel.h>
127 #include <sys/lock.h>
128 #include <sys/malloc.h>
129 #include <sys/mman.h>
130 #include <sys/msgbuf.h>
131 #include <sys/mutex.h>
132 #include <sys/physmem.h>
133 #include <sys/proc.h>
134 #include <sys/rwlock.h>
135 #include <sys/sbuf.h>
137 #include <sys/vmem.h>
138 #include <sys/vmmeter.h>
139 #include <sys/sched.h>
140 #include <sys/sysctl.h>
144 #include <vm/vm_param.h>
145 #include <vm/vm_kern.h>
146 #include <vm/vm_page.h>
147 #include <vm/vm_map.h>
148 #include <vm/vm_object.h>
149 #include <vm/vm_extern.h>
150 #include <vm/vm_pageout.h>
151 #include <vm/vm_pager.h>
152 #include <vm/vm_phys.h>
153 #include <vm/vm_radix.h>
154 #include <vm/vm_reserv.h>
155 #include <vm/vm_dumpset.h>
158 #include <machine/machdep.h>
159 #include <machine/md_var.h>
160 #include <machine/pcb.h>
161 #include <machine/sbi.h>
163 #define NUL1E (Ln_ENTRIES * Ln_ENTRIES)
164 #define NUL2E (Ln_ENTRIES * NUL1E)
166 #if !defined(DIAGNOSTIC)
167 #ifdef __GNUC_GNU_INLINE__
168 #define PMAP_INLINE __attribute__((__gnu_inline__)) inline
170 #define PMAP_INLINE extern inline
177 #define PV_STAT(x) do { x ; } while (0)
179 #define PV_STAT(x) do { } while (0)
182 #define pmap_l2_pindex(v) ((v) >> L2_SHIFT)
183 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
185 #define NPV_LIST_LOCKS MAXCPU
187 #define PHYS_TO_PV_LIST_LOCK(pa) \
188 (&pv_list_locks[pmap_l2_pindex(pa) % NPV_LIST_LOCKS])
190 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
191 struct rwlock **_lockp = (lockp); \
192 struct rwlock *_new_lock; \
194 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
195 if (_new_lock != *_lockp) { \
196 if (*_lockp != NULL) \
197 rw_wunlock(*_lockp); \
198 *_lockp = _new_lock; \
203 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
204 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
206 #define RELEASE_PV_LIST_LOCK(lockp) do { \
207 struct rwlock **_lockp = (lockp); \
209 if (*_lockp != NULL) { \
210 rw_wunlock(*_lockp); \
215 #define VM_PAGE_TO_PV_LIST_LOCK(m) \
216 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
218 /* The list of all the user pmaps */
219 LIST_HEAD(pmaplist, pmap);
220 static struct pmaplist allpmaps = LIST_HEAD_INITIALIZER();
222 struct pmap kernel_pmap_store;
224 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
225 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
226 vm_offset_t kernel_vm_end = 0;
228 vm_paddr_t dmap_phys_base; /* The start of the dmap region */
229 vm_paddr_t dmap_phys_max; /* The limit of the dmap region */
230 vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */
232 /* This code assumes all L1 DMAP entries will be used */
233 CTASSERT((DMAP_MIN_ADDRESS & ~L1_OFFSET) == DMAP_MIN_ADDRESS);
234 CTASSERT((DMAP_MAX_ADDRESS & ~L1_OFFSET) == DMAP_MAX_ADDRESS);
236 static struct rwlock_padalign pvh_global_lock;
237 static struct mtx_padalign allpmaps_lock;
239 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
240 "VM/pmap parameters");
242 static int superpages_enabled = 1;
243 SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
244 CTLFLAG_RDTUN, &superpages_enabled, 0,
245 "Enable support for transparent superpages");
247 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
248 "2MB page mapping counters");
250 static u_long pmap_l2_demotions;
251 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
252 &pmap_l2_demotions, 0,
253 "2MB page demotions");
255 static u_long pmap_l2_mappings;
256 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
257 &pmap_l2_mappings, 0,
258 "2MB page mappings");
260 static u_long pmap_l2_p_failures;
261 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
262 &pmap_l2_p_failures, 0,
263 "2MB page promotion failures");
265 static u_long pmap_l2_promotions;
266 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
267 &pmap_l2_promotions, 0,
268 "2MB page promotions");
271 * Data for the pv entry allocation mechanism
273 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
274 static struct mtx pv_chunks_mutex;
275 static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
276 static struct md_page *pv_table;
277 static struct md_page pv_dummy;
279 extern cpuset_t all_harts;
282 * Internal flags for pmap_enter()'s helper functions.
284 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
285 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
287 static void free_pv_chunk(struct pv_chunk *pc);
288 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
289 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
290 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
291 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
292 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
294 static bool pmap_demote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va);
295 static bool pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2,
296 vm_offset_t va, struct rwlock **lockp);
297 static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
298 u_int flags, vm_page_t m, struct rwlock **lockp);
299 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
300 vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
301 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
302 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
303 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
304 vm_page_t m, struct rwlock **lockp);
306 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
307 struct rwlock **lockp);
309 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
310 struct spglist *free);
311 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
313 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
315 #define pmap_clear(pte) pmap_store(pte, 0)
316 #define pmap_clear_bits(pte, bits) atomic_clear_64(pte, bits)
317 #define pmap_load_store(pte, entry) atomic_swap_64(pte, entry)
318 #define pmap_load_clear(pte) pmap_load_store(pte, 0)
319 #define pmap_load(pte) atomic_load_64(pte)
320 #define pmap_store(pte, entry) atomic_store_64(pte, entry)
321 #define pmap_store_bits(pte, bits) atomic_set_64(pte, bits)
323 /********************/
324 /* Inline functions */
325 /********************/
328 pagecopy(void *s, void *d)
331 memcpy(d, s, PAGE_SIZE);
341 #define pmap_l1_index(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK)
342 #define pmap_l2_index(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK)
343 #define pmap_l3_index(va) (((va) >> L3_SHIFT) & Ln_ADDR_MASK)
345 #define PTE_TO_PHYS(pte) \
346 ((((pte) & ~PTE_HI_MASK) >> PTE_PPN0_S) * PAGE_SIZE)
347 #define L2PTE_TO_PHYS(l2) \
348 ((((l2) & ~PTE_HI_MASK) >> PTE_PPN1_S) << L2_SHIFT)
350 static __inline pd_entry_t *
351 pmap_l1(pmap_t pmap, vm_offset_t va)
354 KASSERT(VIRT_IS_VALID(va),
355 ("%s: malformed virtual address %#lx", __func__, va));
356 return (&pmap->pm_l1[pmap_l1_index(va)]);
359 static __inline pd_entry_t *
360 pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va)
365 phys = PTE_TO_PHYS(pmap_load(l1));
366 l2 = (pd_entry_t *)PHYS_TO_DMAP(phys);
368 return (&l2[pmap_l2_index(va)]);
371 static __inline pd_entry_t *
372 pmap_l2(pmap_t pmap, vm_offset_t va)
376 l1 = pmap_l1(pmap, va);
377 if ((pmap_load(l1) & PTE_V) == 0)
379 if ((pmap_load(l1) & PTE_RX) != 0)
382 return (pmap_l1_to_l2(l1, va));
385 static __inline pt_entry_t *
386 pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va)
391 phys = PTE_TO_PHYS(pmap_load(l2));
392 l3 = (pd_entry_t *)PHYS_TO_DMAP(phys);
394 return (&l3[pmap_l3_index(va)]);
397 static __inline pt_entry_t *
398 pmap_l3(pmap_t pmap, vm_offset_t va)
402 l2 = pmap_l2(pmap, va);
405 if ((pmap_load(l2) & PTE_V) == 0)
407 if ((pmap_load(l2) & PTE_RX) != 0)
410 return (pmap_l2_to_l3(l2, va));
414 pmap_resident_count_inc(pmap_t pmap, int count)
417 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
418 pmap->pm_stats.resident_count += count;
422 pmap_resident_count_dec(pmap_t pmap, int count)
425 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
426 KASSERT(pmap->pm_stats.resident_count >= count,
427 ("pmap %p resident count underflow %ld %d", pmap,
428 pmap->pm_stats.resident_count, count));
429 pmap->pm_stats.resident_count -= count;
433 pmap_distribute_l1(struct pmap *pmap, vm_pindex_t l1index,
436 struct pmap *user_pmap;
439 /* Distribute new kernel L1 entry to all the user pmaps */
440 if (pmap != kernel_pmap)
443 mtx_lock(&allpmaps_lock);
444 LIST_FOREACH(user_pmap, &allpmaps, pm_list) {
445 l1 = &user_pmap->pm_l1[l1index];
446 pmap_store(l1, entry);
448 mtx_unlock(&allpmaps_lock);
452 pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
458 l1 = (pd_entry_t *)l1pt;
459 *l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
461 /* Check locore has used a table L1 map */
462 KASSERT((l1[*l1_slot] & PTE_RX) == 0,
463 ("Invalid bootstrap L1 table"));
465 /* Find the address of the L2 table */
466 l2 = (pt_entry_t *)init_pt_va;
467 *l2_slot = pmap_l2_index(va);
473 pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
475 u_int l1_slot, l2_slot;
479 l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
481 /* Check locore has used L2 superpages */
482 KASSERT((l2[l2_slot] & PTE_RX) != 0,
483 ("Invalid bootstrap L2 table"));
485 /* L2 is superpages */
486 ret = L2PTE_TO_PHYS(l2[l2_slot]);
487 ret += (va & L2_OFFSET);
493 pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa, vm_paddr_t max_pa)
502 pa = dmap_phys_base = min_pa & ~L1_OFFSET;
503 va = DMAP_MIN_ADDRESS;
504 l1 = (pd_entry_t *)kern_l1;
505 l1_slot = pmap_l1_index(DMAP_MIN_ADDRESS);
507 for (; va < DMAP_MAX_ADDRESS && pa < max_pa;
508 pa += L1_SIZE, va += L1_SIZE, l1_slot++) {
509 KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
512 pn = (pa / PAGE_SIZE);
514 entry |= (pn << PTE_PPN0_S);
515 pmap_store(&l1[l1_slot], entry);
518 /* Set the upper limit of the DMAP region */
526 pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
535 KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
537 l2 = pmap_l2(kernel_pmap, va);
538 l2 = (pd_entry_t *)((uintptr_t)l2 & ~(PAGE_SIZE - 1));
539 l2_slot = pmap_l2_index(va);
542 for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
543 KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
545 pa = pmap_early_vtophys(l1pt, l3pt);
546 pn = (pa / PAGE_SIZE);
548 entry |= (pn << PTE_PPN0_S);
549 pmap_store(&l2[l2_slot], entry);
553 /* Clean the L2 page table */
554 memset((void *)l3_start, 0, l3pt - l3_start);
560 * Bootstrap the system enough to run with virtual memory.
563 pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen)
565 u_int l1_slot, l2_slot;
566 vm_offset_t freemempos;
567 vm_offset_t dpcpu, msgbufpv;
568 vm_paddr_t max_pa, min_pa, pa;
572 printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
574 /* Set this early so we can use the pagetable walking functions */
575 kernel_pmap_store.pm_l1 = (pd_entry_t *)l1pt;
576 PMAP_LOCK_INIT(kernel_pmap);
578 rw_init(&pvh_global_lock, "pmap pv global");
581 * Set the current CPU as active in the kernel pmap. Secondary cores
582 * will add themselves later in init_secondary(). The SBI firmware
583 * may rely on this mask being precise, so CPU_FILL() is not used.
585 CPU_SET(PCPU_GET(hart), &kernel_pmap->pm_active);
587 /* Assume the address we were loaded to is a valid physical address. */
588 min_pa = max_pa = kernstart;
590 physmap_idx = physmem_avail(physmap, nitems(physmap));
594 * Find the minimum physical address. physmap is sorted,
595 * but may contain empty ranges.
597 for (i = 0; i < physmap_idx * 2; i += 2) {
598 if (physmap[i] == physmap[i + 1])
600 if (physmap[i] <= min_pa)
602 if (physmap[i + 1] > max_pa)
603 max_pa = physmap[i + 1];
605 printf("physmap_idx %u\n", physmap_idx);
606 printf("min_pa %lx\n", min_pa);
607 printf("max_pa %lx\n", max_pa);
609 /* Create a direct map region early so we can use it for pa -> va */
610 pmap_bootstrap_dmap(l1pt, min_pa, max_pa);
613 * Read the page table to find out what is already mapped.
614 * This assumes we have mapped a block of memory from KERNBASE
615 * using a single L1 entry.
617 (void)pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot);
619 /* Sanity check the index, KERNBASE should be the first VA */
620 KASSERT(l2_slot == 0, ("The L2 index is non-zero"));
622 freemempos = roundup2(KERNBASE + kernlen, PAGE_SIZE);
624 /* Create the l3 tables for the early devmap */
625 freemempos = pmap_bootstrap_l3(l1pt,
626 VM_MAX_KERNEL_ADDRESS - L2_SIZE, freemempos);
629 * Invalidate the mapping we created for the DTB. At this point a copy
630 * has been created, and we no longer need it. We want to avoid the
631 * possibility of an aliased mapping in the future.
633 l2p = pmap_l2(kernel_pmap, VM_EARLY_DTB_ADDRESS);
634 if ((pmap_load(l2p) & PTE_V) != 0)
639 #define alloc_pages(var, np) \
640 (var) = freemempos; \
641 freemempos += (np * PAGE_SIZE); \
642 memset((char *)(var), 0, ((np) * PAGE_SIZE));
644 /* Allocate dynamic per-cpu area. */
645 alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
646 dpcpu_init((void *)dpcpu, 0);
648 /* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
649 alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
650 msgbufp = (void *)msgbufpv;
652 virtual_avail = roundup2(freemempos, L2_SIZE);
653 virtual_end = VM_MAX_KERNEL_ADDRESS - L2_SIZE;
654 kernel_vm_end = virtual_avail;
656 pa = pmap_early_vtophys(l1pt, freemempos);
658 physmem_exclude_region(kernstart, pa - kernstart, EXFLAG_NOALLOC);
662 * Initialize a vm_page's machine-dependent fields.
665 pmap_page_init(vm_page_t m)
668 TAILQ_INIT(&m->md.pv_list);
669 m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
673 * Initialize the pmap module.
674 * Called by vm_init, to initialize any structures that the pmap
675 * system needs to map virtual memory.
684 * Initialize the pv chunk and pmap list mutexes.
686 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
687 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_DEF);
690 * Initialize the pool of pv list locks.
692 for (i = 0; i < NPV_LIST_LOCKS; i++)
693 rw_init(&pv_list_locks[i], "pmap pv list");
696 * Calculate the size of the pv head table for superpages.
698 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE);
701 * Allocate memory for the pv head table for superpages.
703 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
705 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
706 for (i = 0; i < pv_npg; i++)
707 TAILQ_INIT(&pv_table[i].pv_list);
708 TAILQ_INIT(&pv_dummy.pv_list);
710 if (superpages_enabled)
711 pagesizes[1] = L2_SIZE;
716 * For SMP, these functions have to use IPIs for coherence.
718 * In general, the calling thread uses a plain fence to order the
719 * writes to the page tables before invoking an SBI callback to invoke
720 * sfence_vma() on remote CPUs.
723 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
728 mask = pmap->pm_active;
729 CPU_CLR(PCPU_GET(hart), &mask);
731 if (!CPU_EMPTY(&mask) && smp_started)
732 sbi_remote_sfence_vma(mask.__bits, va, 1);
738 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
743 mask = pmap->pm_active;
744 CPU_CLR(PCPU_GET(hart), &mask);
746 if (!CPU_EMPTY(&mask) && smp_started)
747 sbi_remote_sfence_vma(mask.__bits, sva, eva - sva + 1);
750 * Might consider a loop of sfence_vma_page() for a small
751 * number of pages in the future.
758 pmap_invalidate_all(pmap_t pmap)
763 mask = pmap->pm_active;
764 CPU_CLR(PCPU_GET(hart), &mask);
767 * XXX: The SBI doc doesn't detail how to specify x0 as the
768 * address to perform a global fence. BBL currently treats
769 * all sfence_vma requests as global however.
772 if (!CPU_EMPTY(&mask) && smp_started)
773 sbi_remote_sfence_vma(mask.__bits, 0, 0);
779 * Normal, non-SMP, invalidation functions.
780 * We inline these within pmap.c for speed.
783 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
790 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
794 * Might consider a loop of sfence_vma_page() for a small
795 * number of pages in the future.
801 pmap_invalidate_all(pmap_t pmap)
809 * Routine: pmap_extract
811 * Extract the physical page address associated
812 * with the given map/virtual_address pair.
815 pmap_extract(pmap_t pmap, vm_offset_t va)
824 * Start with the l2 tabel. We are unable to allocate
825 * pages in the l1 table.
827 l2p = pmap_l2(pmap, va);
830 if ((l2 & PTE_RX) == 0) {
831 l3p = pmap_l2_to_l3(l2p, va);
834 pa = PTE_TO_PHYS(l3);
835 pa |= (va & L3_OFFSET);
838 /* L2 is superpages */
839 pa = L2PTE_TO_PHYS(l2);
840 pa |= (va & L2_OFFSET);
848 * Routine: pmap_extract_and_hold
850 * Atomically extract and hold the physical page
851 * with the given pmap and virtual address pair
852 * if that mapping permits the given protection.
855 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
863 l3p = pmap_l3(pmap, va);
864 if (l3p != NULL && (l3 = pmap_load(l3p)) != 0) {
865 if ((l3 & PTE_W) != 0 || (prot & VM_PROT_WRITE) == 0) {
866 phys = PTE_TO_PHYS(l3);
867 m = PHYS_TO_VM_PAGE(phys);
868 if (!vm_page_wire_mapped(m))
877 pmap_kextract(vm_offset_t va)
883 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
884 pa = DMAP_TO_PHYS(va);
886 l2 = pmap_l2(kernel_pmap, va);
888 panic("pmap_kextract: No l2");
891 * Beware of concurrent promotion and demotion! We must
892 * use l2e rather than loading from l2 multiple times to
893 * ensure we see a consistent state, including the
894 * implicit load in pmap_l2_to_l3. It is, however, safe
895 * to use an old l2e because the L3 page is preserved by
898 if ((l2e & PTE_RX) != 0) {
900 pa = L2PTE_TO_PHYS(l2e);
901 pa |= (va & L2_OFFSET);
905 l3 = pmap_l2_to_l3(&l2e, va);
907 panic("pmap_kextract: No l3...");
908 pa = PTE_TO_PHYS(pmap_load(l3));
909 pa |= (va & PAGE_MASK);
914 /***************************************************
915 * Low level mapping routines.....
916 ***************************************************/
919 pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode __unused)
926 KASSERT((pa & L3_OFFSET) == 0,
927 ("pmap_kenter_device: Invalid physical address"));
928 KASSERT((sva & L3_OFFSET) == 0,
929 ("pmap_kenter_device: Invalid virtual address"));
930 KASSERT((size & PAGE_MASK) == 0,
931 ("pmap_kenter_device: Mapping is not page-sized"));
935 l3 = pmap_l3(kernel_pmap, va);
936 KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va));
938 pn = (pa / PAGE_SIZE);
940 entry |= (pn << PTE_PPN0_S);
941 pmap_store(l3, entry);
947 pmap_invalidate_range(kernel_pmap, sva, va);
951 pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
953 pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE);
957 * Remove a page from the kernel pagetables.
958 * Note: not SMP coherent.
961 pmap_kremove(vm_offset_t va)
965 l3 = pmap_l3(kernel_pmap, va);
966 KASSERT(l3 != NULL, ("pmap_kremove: Invalid address"));
973 pmap_kremove_device(vm_offset_t sva, vm_size_t size)
978 KASSERT((sva & L3_OFFSET) == 0,
979 ("pmap_kremove_device: Invalid virtual address"));
980 KASSERT((size & PAGE_MASK) == 0,
981 ("pmap_kremove_device: Mapping is not page-sized"));
985 l3 = pmap_l3(kernel_pmap, va);
986 KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va));
993 pmap_invalidate_range(kernel_pmap, sva, va);
997 * Used to map a range of physical addresses into kernel
998 * virtual address space.
1000 * The value passed in '*virt' is a suggested virtual address for
1001 * the mapping. Architectures which can support a direct-mapped
1002 * physical to virtual region can return the appropriate address
1003 * within that region, leaving '*virt' unchanged. Other
1004 * architectures should map the pages starting at '*virt' and
1005 * update '*virt' with the first usable address after the mapped
1009 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1012 return PHYS_TO_DMAP(start);
1016 * Add a list of wired pages to the kva
1017 * this routine is only used for temporary
1018 * kernel mappings that do not need to have
1019 * page modification or references recorded.
1020 * Note that old mappings are simply written
1021 * over. The page *must* be wired.
1022 * Note: SMP coherent. Uses a ranged shootdown IPI.
1025 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1035 for (i = 0; i < count; i++) {
1037 pa = VM_PAGE_TO_PHYS(m);
1038 pn = (pa / PAGE_SIZE);
1039 l3 = pmap_l3(kernel_pmap, va);
1042 entry |= (pn << PTE_PPN0_S);
1043 pmap_store(l3, entry);
1047 pmap_invalidate_range(kernel_pmap, sva, va);
1051 * This routine tears out page mappings from the
1052 * kernel -- it is meant only for temporary mappings.
1053 * Note: SMP coherent. Uses a ranged shootdown IPI.
1056 pmap_qremove(vm_offset_t sva, int count)
1061 KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
1063 for (va = sva; count-- > 0; va += PAGE_SIZE) {
1064 l3 = pmap_l3(kernel_pmap, va);
1065 KASSERT(l3 != NULL, ("pmap_kremove: Invalid address"));
1068 pmap_invalidate_range(kernel_pmap, sva, va);
1072 pmap_ps_enabled(pmap_t pmap __unused)
1075 return (superpages_enabled);
1078 /***************************************************
1079 * Page table page management routines.....
1080 ***************************************************/
1082 * Schedule the specified unused page table page to be freed. Specifically,
1083 * add the page to the specified list of pages that will be released to the
1084 * physical memory manager after the TLB has been updated.
1086 static __inline void
1087 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1088 boolean_t set_PG_ZERO)
1092 m->flags |= PG_ZERO;
1094 m->flags &= ~PG_ZERO;
1095 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1099 * Inserts the specified page table page into the specified pmap's collection
1100 * of idle page table pages. Each of a pmap's page table pages is responsible
1101 * for mapping a distinct range of virtual addresses. The pmap's collection is
1102 * ordered by this virtual address range.
1104 * If "promoted" is false, then the page table page "ml3" must be zero filled.
1107 pmap_insert_pt_page(pmap_t pmap, vm_page_t ml3, bool promoted)
1110 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1111 ml3->valid = promoted ? VM_PAGE_BITS_ALL : 0;
1112 return (vm_radix_insert(&pmap->pm_root, ml3));
1116 * Removes the page table page mapping the specified virtual address from the
1117 * specified pmap's collection of idle page table pages, and returns it.
1118 * Otherwise, returns NULL if there is no page table page corresponding to the
1119 * specified virtual address.
1121 static __inline vm_page_t
1122 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
1125 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1126 return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
1130 * Decrements a page table page's reference count, which is used to record the
1131 * number of valid page table entries within the page. If the reference count
1132 * drops to zero, then the page table page is unmapped. Returns TRUE if the
1133 * page table page was unmapped and FALSE otherwise.
1135 static inline boolean_t
1136 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1140 if (m->ref_count == 0) {
1141 _pmap_unwire_ptp(pmap, va, m, free);
1149 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1153 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1154 if (m->pindex >= NUL2E) {
1156 l1 = pmap_l1(pmap, va);
1158 pmap_distribute_l1(pmap, pmap_l1_index(va), 0);
1161 l2 = pmap_l2(pmap, va);
1164 pmap_resident_count_dec(pmap, 1);
1165 if (m->pindex < NUL2E) {
1169 l1 = pmap_l1(pmap, va);
1170 phys = PTE_TO_PHYS(pmap_load(l1));
1171 pdpg = PHYS_TO_VM_PAGE(phys);
1172 pmap_unwire_ptp(pmap, va, pdpg, free);
1174 pmap_invalidate_page(pmap, va);
1179 * Put page on a list so that it is released after
1180 * *ALL* TLB shootdown is done
1182 pmap_add_delayed_free_list(m, free, TRUE);
1186 * After removing a page table entry, this routine is used to
1187 * conditionally free the page, and manage the reference count.
1190 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1191 struct spglist *free)
1195 if (va >= VM_MAXUSER_ADDRESS)
1197 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1198 mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(ptepde));
1199 return (pmap_unwire_ptp(pmap, va, mpte, free));
1203 pmap_pinit0(pmap_t pmap)
1206 PMAP_LOCK_INIT(pmap);
1207 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1208 pmap->pm_l1 = kernel_pmap->pm_l1;
1209 pmap->pm_satp = SATP_MODE_SV39 | (vtophys(pmap->pm_l1) >> PAGE_SHIFT);
1210 CPU_ZERO(&pmap->pm_active);
1211 pmap_activate_boot(pmap);
1215 pmap_pinit(pmap_t pmap)
1221 * allocate the l1 page
1223 l1pt = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO |
1226 l1phys = VM_PAGE_TO_PHYS(l1pt);
1227 pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys);
1228 pmap->pm_satp = SATP_MODE_SV39 | (l1phys >> PAGE_SHIFT);
1230 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1232 CPU_ZERO(&pmap->pm_active);
1234 mtx_lock(&allpmaps_lock);
1235 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1236 mtx_unlock(&allpmaps_lock);
1238 memcpy(pmap->pm_l1, kernel_pmap->pm_l1, PAGE_SIZE);
1240 vm_radix_init(&pmap->pm_root);
1246 * This routine is called if the desired page table page does not exist.
1248 * If page table page allocation fails, this routine may sleep before
1249 * returning NULL. It sleeps only if a lock pointer was given.
1251 * Note: If a page allocation fails at page table level two or three,
1252 * one or two pages may be held during the wait, only to be released
1253 * afterwards. This conservative approach is easily argued to avoid
1257 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1259 vm_page_t m, /*pdppg, */pdpg;
1264 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1267 * Allocate a page table page.
1269 m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1271 if (lockp != NULL) {
1272 RELEASE_PV_LIST_LOCK(lockp);
1274 rw_runlock(&pvh_global_lock);
1276 rw_rlock(&pvh_global_lock);
1281 * Indicate the need to retry. While waiting, the page table
1282 * page may have been allocated.
1286 m->pindex = ptepindex;
1289 * Map the pagetable page into the process address space, if
1290 * it isn't already there.
1293 if (ptepindex >= NUL2E) {
1295 vm_pindex_t l1index;
1297 l1index = ptepindex - NUL2E;
1298 l1 = &pmap->pm_l1[l1index];
1299 KASSERT((pmap_load(l1) & PTE_V) == 0,
1300 ("%s: L1 entry %#lx is valid", __func__, pmap_load(l1)));
1302 pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE);
1304 entry |= (pn << PTE_PPN0_S);
1305 pmap_store(l1, entry);
1306 pmap_distribute_l1(pmap, l1index, entry);
1308 vm_pindex_t l1index;
1309 pd_entry_t *l1, *l2;
1311 l1index = ptepindex >> (L1_SHIFT - L2_SHIFT);
1312 l1 = &pmap->pm_l1[l1index];
1313 if (pmap_load(l1) == 0) {
1314 /* recurse for allocating page dir */
1315 if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1317 vm_page_unwire_noq(m);
1318 vm_page_free_zero(m);
1322 phys = PTE_TO_PHYS(pmap_load(l1));
1323 pdpg = PHYS_TO_VM_PAGE(phys);
1327 phys = PTE_TO_PHYS(pmap_load(l1));
1328 l2 = (pd_entry_t *)PHYS_TO_DMAP(phys);
1329 l2 = &l2[ptepindex & Ln_ADDR_MASK];
1330 KASSERT((pmap_load(l2) & PTE_V) == 0,
1331 ("%s: L2 entry %#lx is valid", __func__, pmap_load(l2)));
1333 pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE);
1335 entry |= (pn << PTE_PPN0_S);
1336 pmap_store(l2, entry);
1339 pmap_resident_count_inc(pmap, 1);
1345 pmap_alloc_l2(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1349 vm_pindex_t l2pindex;
1352 l1 = pmap_l1(pmap, va);
1353 if (l1 != NULL && (pmap_load(l1) & PTE_V) != 0) {
1354 KASSERT((pmap_load(l1) & PTE_RWX) == 0,
1355 ("%s: L1 entry %#lx for VA %#lx is a leaf", __func__,
1356 pmap_load(l1), va));
1357 /* Add a reference to the L2 page. */
1358 l2pg = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l1)));
1361 /* Allocate a L2 page. */
1362 l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
1363 l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
1364 if (l2pg == NULL && lockp != NULL)
1371 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1373 vm_pindex_t ptepindex;
1379 * Calculate pagetable page index
1381 ptepindex = pmap_l2_pindex(va);
1384 * Get the page directory entry
1386 l2 = pmap_l2(pmap, va);
1389 * If the page table page is mapped, we just increment the
1390 * hold count, and activate it.
1392 if (l2 != NULL && pmap_load(l2) != 0) {
1393 phys = PTE_TO_PHYS(pmap_load(l2));
1394 m = PHYS_TO_VM_PAGE(phys);
1398 * Here if the pte page isn't mapped, or if it has been
1401 m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1402 if (m == NULL && lockp != NULL)
1408 /***************************************************
1409 * Pmap allocation/deallocation routines.
1410 ***************************************************/
1413 * Release any resources held by the given physical map.
1414 * Called when a pmap initialized by pmap_pinit is being released.
1415 * Should only be called if the map contains no valid mappings.
1418 pmap_release(pmap_t pmap)
1422 KASSERT(pmap->pm_stats.resident_count == 0,
1423 ("pmap_release: pmap resident count %ld != 0",
1424 pmap->pm_stats.resident_count));
1425 KASSERT(CPU_EMPTY(&pmap->pm_active),
1426 ("releasing active pmap %p", pmap));
1428 mtx_lock(&allpmaps_lock);
1429 LIST_REMOVE(pmap, pm_list);
1430 mtx_unlock(&allpmaps_lock);
1432 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l1));
1433 vm_page_unwire_noq(m);
1438 kvm_size(SYSCTL_HANDLER_ARGS)
1440 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
1442 return sysctl_handle_long(oidp, &ksize, 0, req);
1444 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
1445 0, 0, kvm_size, "LU",
1449 kvm_free(SYSCTL_HANDLER_ARGS)
1451 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1453 return sysctl_handle_long(oidp, &kfree, 0, req);
1455 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
1456 0, 0, kvm_free, "LU",
1457 "Amount of KVM free");
1460 * grow the number of kernel page table entries, if needed
1463 pmap_growkernel(vm_offset_t addr)
1467 pd_entry_t *l1, *l2;
1471 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1473 addr = roundup2(addr, L2_SIZE);
1474 if (addr - 1 >= vm_map_max(kernel_map))
1475 addr = vm_map_max(kernel_map);
1476 while (kernel_vm_end < addr) {
1477 l1 = pmap_l1(kernel_pmap, kernel_vm_end);
1478 if (pmap_load(l1) == 0) {
1479 /* We need a new PDP entry */
1480 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
1481 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1483 panic("pmap_growkernel: no memory to grow kernel");
1484 nkpg->pindex = kernel_vm_end >> L1_SHIFT;
1485 paddr = VM_PAGE_TO_PHYS(nkpg);
1487 pn = (paddr / PAGE_SIZE);
1489 entry |= (pn << PTE_PPN0_S);
1490 pmap_store(l1, entry);
1491 pmap_distribute_l1(kernel_pmap,
1492 pmap_l1_index(kernel_vm_end), entry);
1493 continue; /* try again */
1495 l2 = pmap_l1_to_l2(l1, kernel_vm_end);
1496 if ((pmap_load(l2) & PTE_V) != 0 &&
1497 (pmap_load(l2) & PTE_RWX) == 0) {
1498 kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1499 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1500 kernel_vm_end = vm_map_max(kernel_map);
1506 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
1509 panic("pmap_growkernel: no memory to grow kernel");
1510 nkpg->pindex = kernel_vm_end >> L2_SHIFT;
1511 paddr = VM_PAGE_TO_PHYS(nkpg);
1513 pn = (paddr / PAGE_SIZE);
1515 entry |= (pn << PTE_PPN0_S);
1516 pmap_store(l2, entry);
1518 pmap_invalidate_page(kernel_pmap, kernel_vm_end);
1520 kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1521 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1522 kernel_vm_end = vm_map_max(kernel_map);
1528 /***************************************************
1529 * page management routines.
1530 ***************************************************/
1532 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1533 CTASSERT(_NPCM == 3);
1534 CTASSERT(_NPCPV == 168);
1536 static __inline struct pv_chunk *
1537 pv_to_chunk(pv_entry_t pv)
1540 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1543 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1545 #define PC_FREE0 0xfffffffffffffffful
1546 #define PC_FREE1 0xfffffffffffffffful
1547 #define PC_FREE2 0x000000fffffffffful
1549 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
1553 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1555 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1556 "Current number of pv entry chunks");
1557 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1558 "Current number of pv entry chunks allocated");
1559 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1560 "Current number of pv entry chunks frees");
1561 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1562 "Number of times tried to get a chunk page but failed.");
1564 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
1565 static int pv_entry_spare;
1567 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1568 "Current number of pv entry frees");
1569 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1570 "Current number of pv entry allocs");
1571 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1572 "Current number of pv entries");
1573 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1574 "Current number of spare pv entries");
1579 * We are in a serious low memory condition. Resort to
1580 * drastic measures to free some pages so we can allocate
1581 * another pv entry chunk.
1583 * Returns NULL if PV entries were reclaimed from the specified pmap.
1585 * We do not, however, unmap 2mpages because subsequent accesses will
1586 * allocate per-page pv entries until repromotion occurs, thereby
1587 * exacerbating the shortage of free pv entries.
1590 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1593 panic("RISCVTODO: reclaim_pv_chunk");
1597 * free the pv_entry back to the free list
1600 free_pv_entry(pmap_t pmap, pv_entry_t pv)
1602 struct pv_chunk *pc;
1603 int idx, field, bit;
1605 rw_assert(&pvh_global_lock, RA_LOCKED);
1606 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1607 PV_STAT(atomic_add_long(&pv_entry_frees, 1));
1608 PV_STAT(atomic_add_int(&pv_entry_spare, 1));
1609 PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
1610 pc = pv_to_chunk(pv);
1611 idx = pv - &pc->pc_pventry[0];
1614 pc->pc_map[field] |= 1ul << bit;
1615 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
1616 pc->pc_map[2] != PC_FREE2) {
1617 /* 98% of the time, pc is already at the head of the list. */
1618 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1619 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1620 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1624 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1629 free_pv_chunk(struct pv_chunk *pc)
1633 mtx_lock(&pv_chunks_mutex);
1634 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1635 mtx_unlock(&pv_chunks_mutex);
1636 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1637 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1638 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1639 /* entire chunk is free, return it */
1640 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1641 dump_drop_page(m->phys_addr);
1642 vm_page_unwire_noq(m);
1647 * Returns a new PV entry, allocating a new PV chunk from the system when
1648 * needed. If this PV chunk allocation fails and a PV list lock pointer was
1649 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
1652 * The given PV list lock may be released.
1655 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
1659 struct pv_chunk *pc;
1662 rw_assert(&pvh_global_lock, RA_LOCKED);
1663 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1664 PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
1666 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1668 for (field = 0; field < _NPCM; field++) {
1669 if (pc->pc_map[field]) {
1670 bit = ffsl(pc->pc_map[field]) - 1;
1674 if (field < _NPCM) {
1675 pv = &pc->pc_pventry[field * 64 + bit];
1676 pc->pc_map[field] &= ~(1ul << bit);
1677 /* If this was the last item, move it to tail */
1678 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
1679 pc->pc_map[2] == 0) {
1680 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1681 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1684 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1685 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
1689 /* No free items, allocate another chunk */
1690 m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
1692 if (lockp == NULL) {
1693 PV_STAT(pc_chunk_tryfail++);
1696 m = reclaim_pv_chunk(pmap, lockp);
1700 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1701 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1702 dump_add_page(m->phys_addr);
1703 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1705 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
1706 pc->pc_map[1] = PC_FREE1;
1707 pc->pc_map[2] = PC_FREE2;
1708 mtx_lock(&pv_chunks_mutex);
1709 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1710 mtx_unlock(&pv_chunks_mutex);
1711 pv = &pc->pc_pventry[0];
1712 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1713 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1714 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1719 * Ensure that the number of spare PV entries in the specified pmap meets or
1720 * exceeds the given count, "needed".
1722 * The given PV list lock may be released.
1725 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
1727 struct pch new_tail;
1728 struct pv_chunk *pc;
1733 rw_assert(&pvh_global_lock, RA_LOCKED);
1734 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1735 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
1738 * Newly allocated PV chunks must be stored in a private list until
1739 * the required number of PV chunks have been allocated. Otherwise,
1740 * reclaim_pv_chunk() could recycle one of these chunks. In
1741 * contrast, these chunks must be added to the pmap upon allocation.
1743 TAILQ_INIT(&new_tail);
1746 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
1747 bit_count((bitstr_t *)pc->pc_map, 0,
1748 sizeof(pc->pc_map) * NBBY, &free);
1752 if (avail >= needed)
1755 for (reclaimed = false; avail < needed; avail += _NPCPV) {
1756 m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
1758 m = reclaim_pv_chunk(pmap, lockp);
1765 dump_add_page(m->phys_addr);
1767 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1769 pc->pc_map[0] = PC_FREE0;
1770 pc->pc_map[1] = PC_FREE1;
1771 pc->pc_map[2] = PC_FREE2;
1772 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1773 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
1776 * The reclaim might have freed a chunk from the current pmap.
1777 * If that chunk contained available entries, we need to
1778 * re-count the number of available entries.
1783 if (!TAILQ_EMPTY(&new_tail)) {
1784 mtx_lock(&pv_chunks_mutex);
1785 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
1786 mtx_unlock(&pv_chunks_mutex);
1791 * First find and then remove the pv entry for the specified pmap and virtual
1792 * address from the specified pv list. Returns the pv entry if found and NULL
1793 * otherwise. This operation can be performed on pv lists for either 4KB or
1794 * 2MB page mappings.
1796 static __inline pv_entry_t
1797 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1801 rw_assert(&pvh_global_lock, RA_LOCKED);
1802 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
1803 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1804 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
1813 * First find and then destroy the pv entry for the specified pmap and virtual
1814 * address. This operation can be performed on pv lists for either 4KB or 2MB
1818 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1822 pv = pmap_pvh_remove(pvh, pmap, va);
1824 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found for %#lx", va));
1825 free_pv_entry(pmap, pv);
1829 * Conditionally create the PV entry for a 4KB page mapping if the required
1830 * memory can be allocated without resorting to reclamation.
1833 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
1834 struct rwlock **lockp)
1838 rw_assert(&pvh_global_lock, RA_LOCKED);
1839 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1840 /* Pass NULL instead of the lock pointer to disable reclamation. */
1841 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
1843 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1844 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
1852 * After demotion from a 2MB page mapping to 512 4KB page mappings,
1853 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
1854 * entries for each of the 4KB page mappings.
1856 static void __unused
1857 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1858 struct rwlock **lockp)
1860 struct md_page *pvh;
1861 struct pv_chunk *pc;
1864 vm_offset_t va_last;
1867 rw_assert(&pvh_global_lock, RA_LOCKED);
1868 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1869 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1872 * Transfer the 2mpage's pv entry for this mapping to the first
1873 * page's pv list. Once this transfer begins, the pv list lock
1874 * must not be released until the last pv entry is reinstantiated.
1876 pvh = pa_to_pvh(pa);
1878 pv = pmap_pvh_remove(pvh, pmap, va);
1879 KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
1880 m = PHYS_TO_VM_PAGE(pa);
1881 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
1883 /* Instantiate the remaining 511 pv entries. */
1884 va_last = va + L2_SIZE - PAGE_SIZE;
1886 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1887 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
1888 pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare"));
1889 for (field = 0; field < _NPCM; field++) {
1890 while (pc->pc_map[field] != 0) {
1891 bit = ffsl(pc->pc_map[field]) - 1;
1892 pc->pc_map[field] &= ~(1ul << bit);
1893 pv = &pc->pc_pventry[field * 64 + bit];
1897 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1898 ("pmap_pv_demote_l2: page %p is not managed", m));
1899 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
1905 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1906 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1909 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
1910 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1911 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1916 #if VM_NRESERVLEVEL > 0
1918 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1919 struct rwlock **lockp)
1921 struct md_page *pvh;
1924 vm_offset_t va_last;
1926 rw_assert(&pvh_global_lock, RA_LOCKED);
1927 KASSERT((va & L2_OFFSET) == 0,
1928 ("pmap_pv_promote_l2: misaligned va %#lx", va));
1930 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1932 m = PHYS_TO_VM_PAGE(pa);
1933 pv = pmap_pvh_remove(&m->md, pmap, va);
1934 KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv for %#lx not found", va));
1935 pvh = pa_to_pvh(pa);
1936 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
1939 va_last = va + L2_SIZE - PAGE_SIZE;
1943 pmap_pvh_free(&m->md, pmap, va);
1944 } while (va < va_last);
1946 #endif /* VM_NRESERVLEVEL > 0 */
1949 * Create the PV entry for a 2MB page mapping. Always returns true unless the
1950 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
1951 * false if the PV entry cannot be allocated without resorting to reclamation.
1954 pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
1955 struct rwlock **lockp)
1957 struct md_page *pvh;
1961 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1962 /* Pass NULL instead of the lock pointer to disable reclamation. */
1963 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
1964 NULL : lockp)) == NULL)
1967 pa = PTE_TO_PHYS(l2e);
1968 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1969 pvh = pa_to_pvh(pa);
1970 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
1976 pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
1978 pt_entry_t newl2, oldl2;
1982 KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va));
1983 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
1984 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1986 ml3 = pmap_remove_pt_page(pmap, va);
1988 panic("pmap_remove_kernel_l2: Missing pt page");
1990 ml3pa = VM_PAGE_TO_PHYS(ml3);
1991 newl2 = ml3pa | PTE_V;
1994 * If this page table page was unmapped by a promotion, then it
1995 * contains valid mappings. Zero it to invalidate those mappings.
1997 if (ml3->valid != 0)
1998 pagezero((void *)PHYS_TO_DMAP(ml3pa));
2001 * Demote the mapping.
2003 oldl2 = pmap_load_store(l2, newl2);
2004 KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx",
2005 __func__, l2, oldl2));
2009 * pmap_remove_l2: Do the things to unmap a level 2 superpage.
2012 pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
2013 pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
2015 struct md_page *pvh;
2017 vm_offset_t eva, va;
2020 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2021 KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned"));
2022 oldl2 = pmap_load_clear(l2);
2023 KASSERT((oldl2 & PTE_RWX) != 0,
2024 ("pmap_remove_l2: L2e %lx is not a superpage mapping", oldl2));
2027 * The sfence.vma documentation states that it is sufficient to specify
2028 * a single address within a superpage mapping. However, since we do
2029 * not perform any invalidation upon promotion, TLBs may still be
2030 * caching 4KB mappings within the superpage, so we must invalidate the
2033 pmap_invalidate_range(pmap, sva, sva + L2_SIZE);
2034 if ((oldl2 & PTE_SW_WIRED) != 0)
2035 pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
2036 pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
2037 if ((oldl2 & PTE_SW_MANAGED) != 0) {
2038 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, PTE_TO_PHYS(oldl2));
2039 pvh = pa_to_pvh(PTE_TO_PHYS(oldl2));
2040 pmap_pvh_free(pvh, pmap, sva);
2041 eva = sva + L2_SIZE;
2042 for (va = sva, m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(oldl2));
2043 va < eva; va += PAGE_SIZE, m++) {
2044 if ((oldl2 & PTE_D) != 0)
2046 if ((oldl2 & PTE_A) != 0)
2047 vm_page_aflag_set(m, PGA_REFERENCED);
2048 if (TAILQ_EMPTY(&m->md.pv_list) &&
2049 TAILQ_EMPTY(&pvh->pv_list))
2050 vm_page_aflag_clear(m, PGA_WRITEABLE);
2053 if (pmap == kernel_pmap) {
2054 pmap_remove_kernel_l2(pmap, l2, sva);
2056 ml3 = pmap_remove_pt_page(pmap, sva);
2058 KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
2059 ("pmap_remove_l2: l3 page not promoted"));
2060 pmap_resident_count_dec(pmap, 1);
2061 KASSERT(ml3->ref_count == Ln_ENTRIES,
2062 ("pmap_remove_l2: l3 page ref count error"));
2064 vm_page_unwire_noq(ml3);
2065 pmap_add_delayed_free_list(ml3, free, FALSE);
2068 return (pmap_unuse_pt(pmap, sva, l1e, free));
2072 * pmap_remove_l3: do the things to unmap a page in a process
2075 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
2076 pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
2078 struct md_page *pvh;
2083 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2084 old_l3 = pmap_load_clear(l3);
2085 pmap_invalidate_page(pmap, va);
2086 if (old_l3 & PTE_SW_WIRED)
2087 pmap->pm_stats.wired_count -= 1;
2088 pmap_resident_count_dec(pmap, 1);
2089 if (old_l3 & PTE_SW_MANAGED) {
2090 phys = PTE_TO_PHYS(old_l3);
2091 m = PHYS_TO_VM_PAGE(phys);
2092 if ((old_l3 & PTE_D) != 0)
2095 vm_page_aflag_set(m, PGA_REFERENCED);
2096 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2097 pmap_pvh_free(&m->md, pmap, va);
2098 if (TAILQ_EMPTY(&m->md.pv_list) &&
2099 (m->flags & PG_FICTITIOUS) == 0) {
2100 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2101 if (TAILQ_EMPTY(&pvh->pv_list))
2102 vm_page_aflag_clear(m, PGA_WRITEABLE);
2106 return (pmap_unuse_pt(pmap, va, l2e, free));
2110 * Remove the given range of addresses from the specified map.
2112 * It is assumed that the start and end are properly
2113 * rounded to the page size.
2116 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2118 struct spglist free;
2119 struct rwlock *lock;
2120 vm_offset_t va, va_next;
2121 pd_entry_t *l1, *l2, l2e;
2125 * Perform an unsynchronized read. This is, however, safe.
2127 if (pmap->pm_stats.resident_count == 0)
2132 rw_rlock(&pvh_global_lock);
2136 for (; sva < eva; sva = va_next) {
2137 if (pmap->pm_stats.resident_count == 0)
2140 l1 = pmap_l1(pmap, sva);
2141 if (pmap_load(l1) == 0) {
2142 va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2149 * Calculate index for next page table.
2151 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2155 l2 = pmap_l1_to_l2(l1, sva);
2158 if ((l2e = pmap_load(l2)) == 0)
2160 if ((l2e & PTE_RWX) != 0) {
2161 if (sva + L2_SIZE == va_next && eva >= va_next) {
2162 (void)pmap_remove_l2(pmap, l2, sva,
2163 pmap_load(l1), &free, &lock);
2165 } else if (!pmap_demote_l2_locked(pmap, l2, sva,
2168 * The large page mapping was destroyed.
2172 l2e = pmap_load(l2);
2176 * Limit our scan to either the end of the va represented
2177 * by the current page table page, or to the end of the
2178 * range being removed.
2184 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
2186 if (pmap_load(l3) == 0) {
2187 if (va != va_next) {
2188 pmap_invalidate_range(pmap, va, sva);
2195 if (pmap_remove_l3(pmap, l3, sva, l2e, &free, &lock)) {
2201 pmap_invalidate_range(pmap, va, sva);
2205 rw_runlock(&pvh_global_lock);
2207 vm_page_free_pages_toq(&free, false);
2211 * Routine: pmap_remove_all
2213 * Removes this physical page from
2214 * all physical maps in which it resides.
2215 * Reflects back modify bits to the pager.
2218 * Original versions of this routine were very
2219 * inefficient because they iteratively called
2220 * pmap_remove (slow...)
2224 pmap_remove_all(vm_page_t m)
2226 struct spglist free;
2227 struct md_page *pvh;
2229 pt_entry_t *l3, l3e;
2230 pd_entry_t *l2, l2e;
2234 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2235 ("pmap_remove_all: page %p is not managed", m));
2237 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2238 pa_to_pvh(VM_PAGE_TO_PHYS(m));
2240 rw_wlock(&pvh_global_lock);
2241 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2245 l2 = pmap_l2(pmap, va);
2246 (void)pmap_demote_l2(pmap, l2, va);
2249 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2252 pmap_resident_count_dec(pmap, 1);
2253 l2 = pmap_l2(pmap, pv->pv_va);
2254 KASSERT(l2 != NULL, ("pmap_remove_all: no l2 table found"));
2255 l2e = pmap_load(l2);
2257 KASSERT((l2e & PTE_RX) == 0,
2258 ("pmap_remove_all: found a superpage in %p's pv list", m));
2260 l3 = pmap_l2_to_l3(l2, pv->pv_va);
2261 l3e = pmap_load_clear(l3);
2262 pmap_invalidate_page(pmap, pv->pv_va);
2263 if (l3e & PTE_SW_WIRED)
2264 pmap->pm_stats.wired_count--;
2265 if ((l3e & PTE_A) != 0)
2266 vm_page_aflag_set(m, PGA_REFERENCED);
2269 * Update the vm_page_t clean and reference bits.
2271 if ((l3e & PTE_D) != 0)
2273 pmap_unuse_pt(pmap, pv->pv_va, pmap_load(l2), &free);
2274 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2276 free_pv_entry(pmap, pv);
2279 vm_page_aflag_clear(m, PGA_WRITEABLE);
2280 rw_wunlock(&pvh_global_lock);
2281 vm_page_free_pages_toq(&free, false);
2285 * Set the physical protection on the
2286 * specified range of this map as requested.
2289 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2291 pd_entry_t *l1, *l2, l2e;
2292 pt_entry_t *l3, l3e, mask;
2295 vm_offset_t va_next;
2296 bool anychanged, pv_lists_locked;
2298 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2299 pmap_remove(pmap, sva, eva);
2303 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) ==
2304 (VM_PROT_WRITE | VM_PROT_EXECUTE))
2308 pv_lists_locked = false;
2310 if ((prot & VM_PROT_WRITE) == 0)
2311 mask |= PTE_W | PTE_D;
2312 if ((prot & VM_PROT_EXECUTE) == 0)
2316 for (; sva < eva; sva = va_next) {
2317 l1 = pmap_l1(pmap, sva);
2318 if (pmap_load(l1) == 0) {
2319 va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2325 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2329 l2 = pmap_l1_to_l2(l1, sva);
2330 if (l2 == NULL || (l2e = pmap_load(l2)) == 0)
2332 if ((l2e & PTE_RWX) != 0) {
2333 if (sva + L2_SIZE == va_next && eva >= va_next) {
2335 if ((prot & VM_PROT_WRITE) == 0 &&
2336 (l2e & (PTE_SW_MANAGED | PTE_D)) ==
2337 (PTE_SW_MANAGED | PTE_D)) {
2338 pa = PTE_TO_PHYS(l2e);
2339 m = PHYS_TO_VM_PAGE(pa);
2340 for (mt = m; mt < &m[Ln_ENTRIES]; mt++)
2343 if (!atomic_fcmpset_long(l2, &l2e, l2e & ~mask))
2348 if (!pv_lists_locked) {
2349 pv_lists_locked = true;
2350 if (!rw_try_rlock(&pvh_global_lock)) {
2352 pmap_invalidate_all(
2355 rw_rlock(&pvh_global_lock);
2359 if (!pmap_demote_l2(pmap, l2, sva)) {
2361 * The large page mapping was destroyed.
2371 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
2373 l3e = pmap_load(l3);
2375 if ((l3e & PTE_V) == 0)
2377 if ((prot & VM_PROT_WRITE) == 0 &&
2378 (l3e & (PTE_SW_MANAGED | PTE_D)) ==
2379 (PTE_SW_MANAGED | PTE_D)) {
2380 m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(l3e));
2383 if (!atomic_fcmpset_long(l3, &l3e, l3e & ~mask))
2389 pmap_invalidate_all(pmap);
2390 if (pv_lists_locked)
2391 rw_runlock(&pvh_global_lock);
2396 pmap_fault(pmap_t pmap, vm_offset_t va, vm_prot_t ftype)
2398 pd_entry_t *l2, l2e;
2399 pt_entry_t bits, *pte, oldpte;
2404 l2 = pmap_l2(pmap, va);
2405 if (l2 == NULL || ((l2e = pmap_load(l2)) & PTE_V) == 0)
2407 if ((l2e & PTE_RWX) == 0) {
2408 pte = pmap_l2_to_l3(l2, va);
2409 if (pte == NULL || ((oldpte = pmap_load(pte)) & PTE_V) == 0)
2416 if ((pmap != kernel_pmap && (oldpte & PTE_U) == 0) ||
2417 (ftype == VM_PROT_WRITE && (oldpte & PTE_W) == 0) ||
2418 (ftype == VM_PROT_EXECUTE && (oldpte & PTE_X) == 0) ||
2419 (ftype == VM_PROT_READ && (oldpte & PTE_R) == 0))
2423 if (ftype == VM_PROT_WRITE)
2427 * Spurious faults can occur if the implementation caches invalid
2428 * entries in the TLB, or if simultaneous accesses on multiple CPUs
2429 * race with each other.
2431 if ((oldpte & bits) != bits)
2432 pmap_store_bits(pte, bits);
2441 pmap_demote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va)
2443 struct rwlock *lock;
2447 rv = pmap_demote_l2_locked(pmap, l2, va, &lock);
2454 * Tries to demote a 2MB page mapping. If demotion fails, the 2MB page
2455 * mapping is invalidated.
2458 pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
2459 struct rwlock **lockp)
2461 struct spglist free;
2463 pd_entry_t newl2, oldl2;
2464 pt_entry_t *firstl3, newl3;
2468 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2470 oldl2 = pmap_load(l2);
2471 KASSERT((oldl2 & PTE_RWX) != 0,
2472 ("pmap_demote_l2_locked: oldl2 is not a leaf entry"));
2473 if ((oldl2 & PTE_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
2475 if ((oldl2 & PTE_A) == 0 || (mpte = vm_page_alloc_noobj(
2476 (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : 0) |
2477 VM_ALLOC_WIRED)) == NULL) {
2479 (void)pmap_remove_l2(pmap, l2, va & ~L2_OFFSET,
2480 pmap_load(pmap_l1(pmap, va)), &free, lockp);
2481 vm_page_free_pages_toq(&free, true);
2482 CTR2(KTR_PMAP, "pmap_demote_l2_locked: "
2483 "failure for va %#lx in pmap %p", va, pmap);
2486 mpte->pindex = pmap_l2_pindex(va);
2487 if (va < VM_MAXUSER_ADDRESS) {
2488 mpte->ref_count = Ln_ENTRIES;
2489 pmap_resident_count_inc(pmap, 1);
2492 mptepa = VM_PAGE_TO_PHYS(mpte);
2493 firstl3 = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
2494 newl2 = ((mptepa / PAGE_SIZE) << PTE_PPN0_S) | PTE_V;
2495 KASSERT((oldl2 & PTE_A) != 0,
2496 ("pmap_demote_l2_locked: oldl2 is missing PTE_A"));
2497 KASSERT((oldl2 & (PTE_D | PTE_W)) != PTE_W,
2498 ("pmap_demote_l2_locked: oldl2 is missing PTE_D"));
2502 * If the page table page is not leftover from an earlier promotion,
2505 if (mpte->valid == 0) {
2506 for (i = 0; i < Ln_ENTRIES; i++)
2507 pmap_store(firstl3 + i, newl3 + (i << PTE_PPN0_S));
2509 KASSERT(PTE_TO_PHYS(pmap_load(firstl3)) == PTE_TO_PHYS(newl3),
2510 ("pmap_demote_l2_locked: firstl3 and newl3 map different physical "
2514 * If the mapping has changed attributes, update the page table
2517 if ((pmap_load(firstl3) & PTE_PROMOTE) != (newl3 & PTE_PROMOTE))
2518 for (i = 0; i < Ln_ENTRIES; i++)
2519 pmap_store(firstl3 + i, newl3 + (i << PTE_PPN0_S));
2522 * The spare PV entries must be reserved prior to demoting the
2523 * mapping, that is, prior to changing the L2 entry. Otherwise, the
2524 * state of the L2 entry and the PV lists will be inconsistent, which
2525 * can result in reclaim_pv_chunk() attempting to remove a PV entry from
2526 * the wrong PV list and pmap_pv_demote_l2() failing to find the
2527 * expected PV entry for the 2MB page mapping that is being demoted.
2529 if ((oldl2 & PTE_SW_MANAGED) != 0)
2530 reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
2533 * Demote the mapping.
2535 pmap_store(l2, newl2);
2538 * Demote the PV entry.
2540 if ((oldl2 & PTE_SW_MANAGED) != 0)
2541 pmap_pv_demote_l2(pmap, va, PTE_TO_PHYS(oldl2), lockp);
2543 atomic_add_long(&pmap_l2_demotions, 1);
2544 CTR2(KTR_PMAP, "pmap_demote_l2_locked: success for va %#lx in pmap %p",
2549 #if VM_NRESERVLEVEL > 0
2551 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
2552 struct rwlock **lockp)
2554 pt_entry_t *firstl3, firstl3e, *l3, l3e;
2558 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2561 KASSERT((pmap_load(l2) & PTE_RWX) == 0,
2562 ("pmap_promote_l2: invalid l2 entry %p", l2));
2564 firstl3 = (pt_entry_t *)PHYS_TO_DMAP(PTE_TO_PHYS(pmap_load(l2)));
2565 firstl3e = pmap_load(firstl3);
2566 pa = PTE_TO_PHYS(firstl3e);
2567 if ((pa & L2_OFFSET) != 0) {
2568 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx pmap %p",
2570 atomic_add_long(&pmap_l2_p_failures, 1);
2575 * Downgrade a clean, writable mapping to read-only to ensure that the
2576 * hardware does not set PTE_D while we are comparing PTEs.
2578 * Upon a write access to a clean mapping, the implementation will
2579 * either atomically check protections and set PTE_D, or raise a page
2580 * fault. In the latter case, the pmap lock provides atomicity. Thus,
2581 * we do not issue an sfence.vma here and instead rely on pmap_fault()
2584 while ((firstl3e & (PTE_W | PTE_D)) == PTE_W) {
2585 if (atomic_fcmpset_64(firstl3, &firstl3e, firstl3e & ~PTE_W)) {
2592 for (l3 = firstl3 + 1; l3 < firstl3 + Ln_ENTRIES; l3++) {
2593 l3e = pmap_load(l3);
2594 if (PTE_TO_PHYS(l3e) != pa) {
2596 "pmap_promote_l2: failure for va %#lx pmap %p",
2598 atomic_add_long(&pmap_l2_p_failures, 1);
2601 while ((l3e & (PTE_W | PTE_D)) == PTE_W) {
2602 if (atomic_fcmpset_64(l3, &l3e, l3e & ~PTE_W)) {
2607 if ((l3e & PTE_PROMOTE) != (firstl3e & PTE_PROMOTE)) {
2609 "pmap_promote_l2: failure for va %#lx pmap %p",
2611 atomic_add_long(&pmap_l2_p_failures, 1);
2617 ml3 = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2)));
2618 KASSERT(ml3->pindex == pmap_l2_pindex(va),
2619 ("pmap_promote_l2: page table page's pindex is wrong"));
2620 if (pmap_insert_pt_page(pmap, ml3, true)) {
2621 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx pmap %p",
2623 atomic_add_long(&pmap_l2_p_failures, 1);
2627 if ((firstl3e & PTE_SW_MANAGED) != 0)
2628 pmap_pv_promote_l2(pmap, va, PTE_TO_PHYS(firstl3e), lockp);
2630 pmap_store(l2, firstl3e);
2632 atomic_add_long(&pmap_l2_promotions, 1);
2633 CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
2639 * Insert the given physical page (p) at
2640 * the specified virtual address (v) in the
2641 * target physical map with the protection requested.
2643 * If specified, the page will be wired down, meaning
2644 * that the related pte can not be reclaimed.
2646 * NB: This is the only routine which MAY NOT lazy-evaluate
2647 * or lose information. That is, this routine must actually
2648 * insert this page into the given map NOW.
2651 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2652 u_int flags, int8_t psind)
2654 struct rwlock *lock;
2655 pd_entry_t *l1, *l2, l2e;
2656 pt_entry_t new_l3, orig_l3;
2659 vm_paddr_t opa, pa, l2_pa, l3_pa;
2660 vm_page_t mpte, om, l2_m, l3_m;
2662 pn_t l2_pn, l3_pn, pn;
2666 va = trunc_page(va);
2667 if ((m->oflags & VPO_UNMANAGED) == 0)
2668 VM_PAGE_OBJECT_BUSY_ASSERT(m);
2669 pa = VM_PAGE_TO_PHYS(m);
2670 pn = (pa / PAGE_SIZE);
2672 new_l3 = PTE_V | PTE_R | PTE_A;
2673 if (prot & VM_PROT_EXECUTE)
2675 if (flags & VM_PROT_WRITE)
2677 if (prot & VM_PROT_WRITE)
2679 if (va < VM_MAX_USER_ADDRESS)
2682 new_l3 |= (pn << PTE_PPN0_S);
2683 if ((flags & PMAP_ENTER_WIRED) != 0)
2684 new_l3 |= PTE_SW_WIRED;
2687 * Set modified bit gratuitously for writeable mappings if
2688 * the page is unmanaged. We do not want to take a fault
2689 * to do the dirty bit accounting for these mappings.
2691 if ((m->oflags & VPO_UNMANAGED) != 0) {
2692 if (prot & VM_PROT_WRITE)
2695 new_l3 |= PTE_SW_MANAGED;
2697 CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
2701 rw_rlock(&pvh_global_lock);
2704 /* Assert the required virtual and physical alignment. */
2705 KASSERT((va & L2_OFFSET) == 0,
2706 ("pmap_enter: va %#lx unaligned", va));
2707 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
2708 rv = pmap_enter_l2(pmap, va, new_l3, flags, m, &lock);
2712 l2 = pmap_l2(pmap, va);
2713 if (l2 != NULL && ((l2e = pmap_load(l2)) & PTE_V) != 0 &&
2714 ((l2e & PTE_RWX) == 0 || pmap_demote_l2_locked(pmap, l2,
2716 l3 = pmap_l2_to_l3(l2, va);
2717 if (va < VM_MAXUSER_ADDRESS) {
2718 mpte = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2)));
2721 } else if (va < VM_MAXUSER_ADDRESS) {
2722 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2723 mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
2724 if (mpte == NULL && nosleep) {
2725 CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
2728 rw_runlock(&pvh_global_lock);
2730 return (KERN_RESOURCE_SHORTAGE);
2732 l3 = pmap_l3(pmap, va);
2734 l3 = pmap_l3(pmap, va);
2735 /* TODO: This is not optimal, but should mostly work */
2738 l2_m = vm_page_alloc_noobj(VM_ALLOC_WIRED |
2741 panic("pmap_enter: l2 pte_m == NULL");
2743 l2_pa = VM_PAGE_TO_PHYS(l2_m);
2744 l2_pn = (l2_pa / PAGE_SIZE);
2746 l1 = pmap_l1(pmap, va);
2748 entry |= (l2_pn << PTE_PPN0_S);
2749 pmap_store(l1, entry);
2750 pmap_distribute_l1(pmap, pmap_l1_index(va), entry);
2751 l2 = pmap_l1_to_l2(l1, va);
2754 l3_m = vm_page_alloc_noobj(VM_ALLOC_WIRED |
2757 panic("pmap_enter: l3 pte_m == NULL");
2759 l3_pa = VM_PAGE_TO_PHYS(l3_m);
2760 l3_pn = (l3_pa / PAGE_SIZE);
2762 entry |= (l3_pn << PTE_PPN0_S);
2763 pmap_store(l2, entry);
2764 l3 = pmap_l2_to_l3(l2, va);
2766 pmap_invalidate_page(pmap, va);
2769 orig_l3 = pmap_load(l3);
2770 opa = PTE_TO_PHYS(orig_l3);
2774 * Is the specified virtual address already mapped?
2776 if ((orig_l3 & PTE_V) != 0) {
2778 * Wiring change, just update stats. We don't worry about
2779 * wiring PT pages as they remain resident as long as there
2780 * are valid mappings in them. Hence, if a user page is wired,
2781 * the PT page will be also.
2783 if ((flags & PMAP_ENTER_WIRED) != 0 &&
2784 (orig_l3 & PTE_SW_WIRED) == 0)
2785 pmap->pm_stats.wired_count++;
2786 else if ((flags & PMAP_ENTER_WIRED) == 0 &&
2787 (orig_l3 & PTE_SW_WIRED) != 0)
2788 pmap->pm_stats.wired_count--;
2791 * Remove the extra PT page reference.
2795 KASSERT(mpte->ref_count > 0,
2796 ("pmap_enter: missing reference to page table page,"
2801 * Has the physical page changed?
2805 * No, might be a protection or wiring change.
2807 if ((orig_l3 & PTE_SW_MANAGED) != 0 &&
2808 (new_l3 & PTE_W) != 0)
2809 vm_page_aflag_set(m, PGA_WRITEABLE);
2814 * The physical page has changed. Temporarily invalidate
2815 * the mapping. This ensures that all threads sharing the
2816 * pmap keep a consistent view of the mapping, which is
2817 * necessary for the correct handling of COW faults. It
2818 * also permits reuse of the old mapping's PV entry,
2819 * avoiding an allocation.
2821 * For consistency, handle unmanaged mappings the same way.
2823 orig_l3 = pmap_load_clear(l3);
2824 KASSERT(PTE_TO_PHYS(orig_l3) == opa,
2825 ("pmap_enter: unexpected pa update for %#lx", va));
2826 if ((orig_l3 & PTE_SW_MANAGED) != 0) {
2827 om = PHYS_TO_VM_PAGE(opa);
2830 * The pmap lock is sufficient to synchronize with
2831 * concurrent calls to pmap_page_test_mappings() and
2832 * pmap_ts_referenced().
2834 if ((orig_l3 & PTE_D) != 0)
2836 if ((orig_l3 & PTE_A) != 0)
2837 vm_page_aflag_set(om, PGA_REFERENCED);
2838 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
2839 pv = pmap_pvh_remove(&om->md, pmap, va);
2841 ("pmap_enter: no PV entry for %#lx", va));
2842 if ((new_l3 & PTE_SW_MANAGED) == 0)
2843 free_pv_entry(pmap, pv);
2844 if ((om->a.flags & PGA_WRITEABLE) != 0 &&
2845 TAILQ_EMPTY(&om->md.pv_list) &&
2846 ((om->flags & PG_FICTITIOUS) != 0 ||
2847 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
2848 vm_page_aflag_clear(om, PGA_WRITEABLE);
2850 pmap_invalidate_page(pmap, va);
2854 * Increment the counters.
2856 if ((new_l3 & PTE_SW_WIRED) != 0)
2857 pmap->pm_stats.wired_count++;
2858 pmap_resident_count_inc(pmap, 1);
2861 * Enter on the PV list if part of our managed memory.
2863 if ((new_l3 & PTE_SW_MANAGED) != 0) {
2865 pv = get_pv_entry(pmap, &lock);
2868 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
2869 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2871 if ((new_l3 & PTE_W) != 0)
2872 vm_page_aflag_set(m, PGA_WRITEABLE);
2877 * Sync the i-cache on all harts before updating the PTE
2878 * if the new PTE is executable.
2880 if (prot & VM_PROT_EXECUTE)
2881 pmap_sync_icache(pmap, va, PAGE_SIZE);
2884 * Update the L3 entry.
2887 orig_l3 = pmap_load_store(l3, new_l3);
2888 pmap_invalidate_page(pmap, va);
2889 KASSERT(PTE_TO_PHYS(orig_l3) == pa,
2890 ("pmap_enter: invalid update"));
2891 if ((orig_l3 & (PTE_D | PTE_SW_MANAGED)) ==
2892 (PTE_D | PTE_SW_MANAGED))
2895 pmap_store(l3, new_l3);
2898 #if VM_NRESERVLEVEL > 0
2899 if (mpte != NULL && mpte->ref_count == Ln_ENTRIES &&
2900 pmap_ps_enabled(pmap) &&
2901 (m->flags & PG_FICTITIOUS) == 0 &&
2902 vm_reserv_level_iffullpop(m) == 0)
2903 pmap_promote_l2(pmap, l2, va, &lock);
2910 rw_runlock(&pvh_global_lock);
2916 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
2917 * if successful. Returns false if (1) a page table page cannot be allocated
2918 * without sleeping, (2) a mapping already exists at the specified virtual
2919 * address, or (3) a PV entry cannot be allocated without reclaiming another
2923 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2924 struct rwlock **lockp)
2929 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2931 pn = VM_PAGE_TO_PHYS(m) / PAGE_SIZE;
2932 new_l2 = (pd_entry_t)((pn << PTE_PPN0_S) | PTE_R | PTE_V);
2933 if ((m->oflags & VPO_UNMANAGED) == 0)
2934 new_l2 |= PTE_SW_MANAGED;
2935 if ((prot & VM_PROT_EXECUTE) != 0)
2937 if (va < VM_MAXUSER_ADDRESS)
2939 return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
2940 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
2945 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
2946 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
2947 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
2948 * a mapping already exists at the specified virtual address. Returns
2949 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
2950 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
2951 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
2953 * The parameter "m" is only used when creating a managed, writeable mapping.
2956 pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
2957 vm_page_t m, struct rwlock **lockp)
2959 struct spglist free;
2960 pd_entry_t *l2, *l3, oldl2;
2964 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2966 if ((l2pg = pmap_alloc_l2(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
2967 NULL : lockp)) == NULL) {
2968 CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
2970 return (KERN_RESOURCE_SHORTAGE);
2973 l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
2974 l2 = &l2[pmap_l2_index(va)];
2975 if ((oldl2 = pmap_load(l2)) != 0) {
2976 KASSERT(l2pg->ref_count > 1,
2977 ("pmap_enter_l2: l2pg's ref count is too low"));
2978 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
2981 "pmap_enter_l2: failure for va %#lx in pmap %p",
2983 return (KERN_FAILURE);
2986 if ((oldl2 & PTE_RWX) != 0)
2987 (void)pmap_remove_l2(pmap, l2, va,
2988 pmap_load(pmap_l1(pmap, va)), &free, lockp);
2990 for (sva = va; sva < va + L2_SIZE; sva += PAGE_SIZE) {
2991 l3 = pmap_l2_to_l3(l2, sva);
2992 if ((pmap_load(l3) & PTE_V) != 0 &&
2993 pmap_remove_l3(pmap, l3, sva, oldl2, &free,
2997 vm_page_free_pages_toq(&free, true);
2998 if (va >= VM_MAXUSER_ADDRESS) {
3000 * Both pmap_remove_l2() and pmap_remove_l3() will
3001 * leave the kernel page table page zero filled.
3003 mt = PHYS_TO_VM_PAGE(PTE_TO_PHYS(pmap_load(l2)));
3004 if (pmap_insert_pt_page(pmap, mt, false))
3005 panic("pmap_enter_l2: trie insert failed");
3007 KASSERT(pmap_load(l2) == 0,
3008 ("pmap_enter_l2: non-zero L2 entry %p", l2));
3011 if ((new_l2 & PTE_SW_MANAGED) != 0) {
3013 * Abort this mapping if its PV entry could not be created.
3015 if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
3017 if (pmap_unwire_ptp(pmap, va, l2pg, &free)) {
3019 * Although "va" is not mapped, paging-structure
3020 * caches could nonetheless have entries that
3021 * refer to the freed page table pages.
3022 * Invalidate those entries.
3024 pmap_invalidate_page(pmap, va);
3025 vm_page_free_pages_toq(&free, true);
3028 "pmap_enter_l2: failure for va %#lx in pmap %p",
3030 return (KERN_RESOURCE_SHORTAGE);
3032 if ((new_l2 & PTE_W) != 0)
3033 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3034 vm_page_aflag_set(mt, PGA_WRITEABLE);
3038 * Increment counters.
3040 if ((new_l2 & PTE_SW_WIRED) != 0)
3041 pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
3042 pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
3045 * Map the superpage.
3047 pmap_store(l2, new_l2);
3049 atomic_add_long(&pmap_l2_mappings, 1);
3050 CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
3053 return (KERN_SUCCESS);
3057 * Maps a sequence of resident pages belonging to the same object.
3058 * The sequence begins with the given page m_start. This page is
3059 * mapped at the given virtual address start. Each subsequent page is
3060 * mapped at a virtual address that is offset from start by the same
3061 * amount as the page is offset from m_start within the object. The
3062 * last page in the sequence is the page with the largest offset from
3063 * m_start that can be mapped at a virtual address less than the given
3064 * virtual address end. Not every virtual page between start and end
3065 * is mapped; only those for which a resident page exists with the
3066 * corresponding offset from m_start are mapped.
3069 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3070 vm_page_t m_start, vm_prot_t prot)
3072 struct rwlock *lock;
3075 vm_pindex_t diff, psize;
3077 VM_OBJECT_ASSERT_LOCKED(m_start->object);
3079 psize = atop(end - start);
3083 rw_rlock(&pvh_global_lock);
3085 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3086 va = start + ptoa(diff);
3087 if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
3088 m->psind == 1 && pmap_ps_enabled(pmap) &&
3089 pmap_enter_2mpage(pmap, va, m, prot, &lock))
3090 m = &m[L2_SIZE / PAGE_SIZE - 1];
3092 mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
3094 m = TAILQ_NEXT(m, listq);
3098 rw_runlock(&pvh_global_lock);
3103 * this code makes some *MAJOR* assumptions:
3104 * 1. Current pmap & pmap exists.
3107 * 4. No page table pages.
3108 * but is *MUCH* faster than pmap_enter...
3112 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3114 struct rwlock *lock;
3117 rw_rlock(&pvh_global_lock);
3119 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
3122 rw_runlock(&pvh_global_lock);
3127 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3128 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
3130 struct spglist free;
3133 pt_entry_t *l3, newl3;
3135 KASSERT(!VA_IS_CLEANMAP(va) ||
3136 (m->oflags & VPO_UNMANAGED) != 0,
3137 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3138 rw_assert(&pvh_global_lock, RA_LOCKED);
3139 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3141 CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
3143 * In the case that a page table page is not
3144 * resident, we are creating it here.
3146 if (va < VM_MAXUSER_ADDRESS) {
3147 vm_pindex_t l2pindex;
3150 * Calculate pagetable page index
3152 l2pindex = pmap_l2_pindex(va);
3153 if (mpte && (mpte->pindex == l2pindex)) {
3159 l2 = pmap_l2(pmap, va);
3162 * If the page table page is mapped, we just increment
3163 * the hold count, and activate it. Otherwise, we
3164 * attempt to allocate a page table page. If this
3165 * attempt fails, we don't retry. Instead, we give up.
3167 if (l2 != NULL && pmap_load(l2) != 0) {
3168 phys = PTE_TO_PHYS(pmap_load(l2));
3169 mpte = PHYS_TO_VM_PAGE(phys);
3173 * Pass NULL instead of the PV list lock
3174 * pointer, because we don't intend to sleep.
3176 mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
3181 l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3182 l3 = &l3[pmap_l3_index(va)];
3185 l3 = pmap_l3(kernel_pmap, va);
3188 panic("pmap_enter_quick_locked: No l3");
3189 if (pmap_load(l3) != 0) {
3198 * Enter on the PV list if part of our managed memory.
3200 if ((m->oflags & VPO_UNMANAGED) == 0 &&
3201 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3204 if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
3205 pmap_invalidate_page(pmap, va);
3206 vm_page_free_pages_toq(&free, false);
3214 * Increment counters
3216 pmap_resident_count_inc(pmap, 1);
3218 newl3 = ((VM_PAGE_TO_PHYS(m) / PAGE_SIZE) << PTE_PPN0_S) |
3220 if ((prot & VM_PROT_EXECUTE) != 0)
3222 if ((m->oflags & VPO_UNMANAGED) == 0)
3223 newl3 |= PTE_SW_MANAGED;
3224 if (va < VM_MAX_USER_ADDRESS)
3228 * Sync the i-cache on all harts before updating the PTE
3229 * if the new PTE is executable.
3231 if (prot & VM_PROT_EXECUTE)
3232 pmap_sync_icache(pmap, va, PAGE_SIZE);
3234 pmap_store(l3, newl3);
3236 pmap_invalidate_page(pmap, va);
3241 * This code maps large physical mmap regions into the
3242 * processor address space. Note that some shortcuts
3243 * are taken, but the code works.
3246 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3247 vm_pindex_t pindex, vm_size_t size)
3250 VM_OBJECT_ASSERT_WLOCKED(object);
3251 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3252 ("pmap_object_init_pt: non-device object"));
3256 * Clear the wired attribute from the mappings for the specified range of
3257 * addresses in the given pmap. Every valid mapping within that range
3258 * must have the wired attribute set. In contrast, invalid mappings
3259 * cannot have the wired attribute set, so they are ignored.
3261 * The wired attribute of the page table entry is not a hardware feature,
3262 * so there is no need to invalidate any TLB entries.
3265 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3267 vm_offset_t va_next;
3268 pd_entry_t *l1, *l2, l2e;
3269 pt_entry_t *l3, l3e;
3270 bool pv_lists_locked;
3272 pv_lists_locked = false;
3275 for (; sva < eva; sva = va_next) {
3276 l1 = pmap_l1(pmap, sva);
3277 if (pmap_load(l1) == 0) {
3278 va_next = (sva + L1_SIZE) & ~L1_OFFSET;
3284 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3288 l2 = pmap_l1_to_l2(l1, sva);
3289 if ((l2e = pmap_load(l2)) == 0)
3291 if ((l2e & PTE_RWX) != 0) {
3292 if (sva + L2_SIZE == va_next && eva >= va_next) {
3293 if ((l2e & PTE_SW_WIRED) == 0)
3294 panic("pmap_unwire: l2 %#jx is missing "
3295 "PTE_SW_WIRED", (uintmax_t)l2e);
3296 pmap_clear_bits(l2, PTE_SW_WIRED);
3299 if (!pv_lists_locked) {
3300 pv_lists_locked = true;
3301 if (!rw_try_rlock(&pvh_global_lock)) {
3303 rw_rlock(&pvh_global_lock);
3308 if (!pmap_demote_l2(pmap, l2, sva))
3309 panic("pmap_unwire: demotion failed");
3315 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
3317 if ((l3e = pmap_load(l3)) == 0)
3319 if ((l3e & PTE_SW_WIRED) == 0)
3320 panic("pmap_unwire: l3 %#jx is missing "
3321 "PTE_SW_WIRED", (uintmax_t)l3e);
3324 * PG_W must be cleared atomically. Although the pmap
3325 * lock synchronizes access to PG_W, another processor
3326 * could be setting PG_M and/or PG_A concurrently.
3328 pmap_clear_bits(l3, PTE_SW_WIRED);
3329 pmap->pm_stats.wired_count--;
3332 if (pv_lists_locked)
3333 rw_runlock(&pvh_global_lock);
3338 * Copy the range specified by src_addr/len
3339 * from the source map to the range dst_addr/len
3340 * in the destination map.
3342 * This routine is only advisory and need not do anything.
3346 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
3347 vm_offset_t src_addr)
3353 * pmap_zero_page zeros the specified hardware page by mapping
3354 * the page into KVM and using bzero to clear its contents.
3357 pmap_zero_page(vm_page_t m)
3359 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3361 pagezero((void *)va);
3365 * pmap_zero_page_area zeros the specified hardware page by mapping
3366 * the page into KVM and using bzero to clear its contents.
3368 * off and size may not cover an area beyond a single hardware page.
3371 pmap_zero_page_area(vm_page_t m, int off, int size)
3373 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3375 if (off == 0 && size == PAGE_SIZE)
3376 pagezero((void *)va);
3378 bzero((char *)va + off, size);
3382 * pmap_copy_page copies the specified (machine independent)
3383 * page by mapping the page into virtual memory and using
3384 * bcopy to copy the page, one machine dependent page at a
3388 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
3390 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
3391 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
3393 pagecopy((void *)src, (void *)dst);
3396 int unmapped_buf_allowed = 1;
3399 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
3400 vm_offset_t b_offset, int xfersize)
3404 vm_paddr_t p_a, p_b;
3405 vm_offset_t a_pg_offset, b_pg_offset;
3408 while (xfersize > 0) {
3409 a_pg_offset = a_offset & PAGE_MASK;
3410 m_a = ma[a_offset >> PAGE_SHIFT];
3411 p_a = m_a->phys_addr;
3412 b_pg_offset = b_offset & PAGE_MASK;
3413 m_b = mb[b_offset >> PAGE_SHIFT];
3414 p_b = m_b->phys_addr;
3415 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
3416 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
3417 if (__predict_false(!PHYS_IN_DMAP(p_a))) {
3418 panic("!DMAP a %lx", p_a);
3420 a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
3422 if (__predict_false(!PHYS_IN_DMAP(p_b))) {
3423 panic("!DMAP b %lx", p_b);
3425 b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
3427 bcopy(a_cp, b_cp, cnt);
3435 pmap_quick_enter_page(vm_page_t m)
3438 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
3442 pmap_quick_remove_page(vm_offset_t addr)
3447 * Returns true if the pmap's pv is one of the first
3448 * 16 pvs linked to from this page. This count may
3449 * be changed upwards or downwards in the future; it
3450 * is only necessary that true be returned for a small
3451 * subset of pmaps for proper page aging.
3454 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
3456 struct md_page *pvh;
3457 struct rwlock *lock;
3462 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3463 ("pmap_page_exists_quick: page %p is not managed", m));
3465 rw_rlock(&pvh_global_lock);
3466 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3468 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3469 if (PV_PMAP(pv) == pmap) {
3477 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
3478 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3479 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3480 if (PV_PMAP(pv) == pmap) {
3490 rw_runlock(&pvh_global_lock);
3495 * pmap_page_wired_mappings:
3497 * Return the number of managed mappings to the given physical page
3501 pmap_page_wired_mappings(vm_page_t m)
3503 struct md_page *pvh;
3504 struct rwlock *lock;
3509 int count, md_gen, pvh_gen;
3511 if ((m->oflags & VPO_UNMANAGED) != 0)
3513 rw_rlock(&pvh_global_lock);
3514 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3518 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3520 if (!PMAP_TRYLOCK(pmap)) {
3521 md_gen = m->md.pv_gen;
3525 if (md_gen != m->md.pv_gen) {
3530 l2 = pmap_l2(pmap, pv->pv_va);
3531 KASSERT((pmap_load(l2) & PTE_RWX) == 0,
3532 ("%s: found a 2mpage in page %p's pv list", __func__, m));
3533 l3 = pmap_l2_to_l3(l2, pv->pv_va);
3534 if ((pmap_load(l3) & PTE_SW_WIRED) != 0)
3538 if ((m->flags & PG_FICTITIOUS) == 0) {
3539 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3540 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3542 if (!PMAP_TRYLOCK(pmap)) {
3543 md_gen = m->md.pv_gen;
3544 pvh_gen = pvh->pv_gen;
3548 if (md_gen != m->md.pv_gen ||
3549 pvh_gen != pvh->pv_gen) {
3554 l2 = pmap_l2(pmap, pv->pv_va);
3555 if ((pmap_load(l2) & PTE_SW_WIRED) != 0)
3561 rw_runlock(&pvh_global_lock);
3566 * Returns true if the given page is mapped individually or as part of
3567 * a 2mpage. Otherwise, returns false.
3570 pmap_page_is_mapped(vm_page_t m)
3572 struct rwlock *lock;
3575 if ((m->oflags & VPO_UNMANAGED) != 0)
3577 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3579 rv = !TAILQ_EMPTY(&m->md.pv_list) ||
3580 ((m->flags & PG_FICTITIOUS) == 0 &&
3581 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
3587 pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entry_t pv,
3588 struct spglist *free, bool superpage)
3590 struct md_page *pvh;
3594 pmap_resident_count_dec(pmap, Ln_ENTRIES);
3595 pvh = pa_to_pvh(m->phys_addr);
3596 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
3598 if (TAILQ_EMPTY(&pvh->pv_list)) {
3599 for (mt = m; mt < &m[Ln_ENTRIES]; mt++)
3600 if (TAILQ_EMPTY(&mt->md.pv_list) &&
3601 (mt->a.flags & PGA_WRITEABLE) != 0)
3602 vm_page_aflag_clear(mt, PGA_WRITEABLE);
3604 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
3606 KASSERT(mpte->valid == VM_PAGE_BITS_ALL,
3607 ("pmap_remove_pages: pte page not promoted"));
3608 pmap_resident_count_dec(pmap, 1);
3609 KASSERT(mpte->ref_count == Ln_ENTRIES,
3610 ("pmap_remove_pages: pte page ref count error"));
3611 mpte->ref_count = 0;
3612 pmap_add_delayed_free_list(mpte, free, FALSE);
3615 pmap_resident_count_dec(pmap, 1);
3616 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3618 if (TAILQ_EMPTY(&m->md.pv_list) &&
3619 (m->a.flags & PGA_WRITEABLE) != 0) {
3620 pvh = pa_to_pvh(m->phys_addr);
3621 if (TAILQ_EMPTY(&pvh->pv_list))
3622 vm_page_aflag_clear(m, PGA_WRITEABLE);
3628 * Destroy all managed, non-wired mappings in the given user-space
3629 * pmap. This pmap cannot be active on any processor besides the
3632 * This function cannot be applied to the kernel pmap. Moreover, it
3633 * is not intended for general use. It is only to be used during
3634 * process termination. Consequently, it can be implemented in ways
3635 * that make it faster than pmap_remove(). First, it can more quickly
3636 * destroy mappings by iterating over the pmap's collection of PV
3637 * entries, rather than searching the page table. Second, it doesn't
3638 * have to test and clear the page table entries atomically, because
3639 * no processor is currently accessing the user address space. In
3640 * particular, a page table entry's dirty bit won't change state once
3641 * this function starts.
3644 pmap_remove_pages(pmap_t pmap)
3646 struct spglist free;
3648 pt_entry_t *pte, tpte;
3651 struct pv_chunk *pc, *npc;
3652 struct rwlock *lock;
3654 uint64_t inuse, bitmask;
3655 int allfree, field, freed, idx;
3661 rw_rlock(&pvh_global_lock);
3663 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
3666 for (field = 0; field < _NPCM; field++) {
3667 inuse = ~pc->pc_map[field] & pc_freemask[field];
3668 while (inuse != 0) {
3669 bit = ffsl(inuse) - 1;
3670 bitmask = 1UL << bit;
3671 idx = field * 64 + bit;
3672 pv = &pc->pc_pventry[idx];
3675 pte = pmap_l1(pmap, pv->pv_va);
3676 ptepde = pmap_load(pte);
3677 pte = pmap_l1_to_l2(pte, pv->pv_va);
3678 tpte = pmap_load(pte);
3679 if ((tpte & PTE_RWX) != 0) {
3683 pte = pmap_l2_to_l3(pte, pv->pv_va);
3684 tpte = pmap_load(pte);
3689 * We cannot remove wired pages from a
3690 * process' mapping at this time.
3692 if (tpte & PTE_SW_WIRED) {
3697 m = PHYS_TO_VM_PAGE(PTE_TO_PHYS(tpte));
3698 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
3699 m < &vm_page_array[vm_page_array_size],
3700 ("pmap_remove_pages: bad pte %#jx",
3706 * Update the vm_page_t clean/reference bits.
3708 if ((tpte & (PTE_D | PTE_W)) ==
3712 mt < &m[Ln_ENTRIES]; mt++)
3718 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
3721 pc->pc_map[field] |= bitmask;
3723 pmap_remove_pages_pv(pmap, m, pv, &free,
3725 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
3729 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
3730 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
3731 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
3733 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3739 pmap_invalidate_all(pmap);
3740 rw_runlock(&pvh_global_lock);
3742 vm_page_free_pages_toq(&free, false);
3746 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3748 struct md_page *pvh;
3749 struct rwlock *lock;
3751 pt_entry_t *l3, mask;
3754 int md_gen, pvh_gen;
3764 rw_rlock(&pvh_global_lock);
3765 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3768 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3770 if (!PMAP_TRYLOCK(pmap)) {
3771 md_gen = m->md.pv_gen;
3775 if (md_gen != m->md.pv_gen) {
3780 l2 = pmap_l2(pmap, pv->pv_va);
3781 KASSERT((pmap_load(l2) & PTE_RWX) == 0,
3782 ("%s: found a 2mpage in page %p's pv list", __func__, m));
3783 l3 = pmap_l2_to_l3(l2, pv->pv_va);
3784 rv = (pmap_load(l3) & mask) == mask;
3789 if ((m->flags & PG_FICTITIOUS) == 0) {
3790 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3791 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3793 if (!PMAP_TRYLOCK(pmap)) {
3794 md_gen = m->md.pv_gen;
3795 pvh_gen = pvh->pv_gen;
3799 if (md_gen != m->md.pv_gen ||
3800 pvh_gen != pvh->pv_gen) {
3805 l2 = pmap_l2(pmap, pv->pv_va);
3806 rv = (pmap_load(l2) & mask) == mask;
3814 rw_runlock(&pvh_global_lock);
3821 * Return whether or not the specified physical page was modified
3822 * in any physical maps.
3825 pmap_is_modified(vm_page_t m)
3828 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3829 ("pmap_is_modified: page %p is not managed", m));
3832 * If the page is not busied then this check is racy.
3834 if (!pmap_page_is_write_mapped(m))
3836 return (pmap_page_test_mappings(m, FALSE, TRUE));
3840 * pmap_is_prefaultable:
3842 * Return whether or not the specified virtual address is eligible
3846 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3852 * Return TRUE if and only if the L3 entry for the specified virtual
3853 * address is allocated but invalid.
3857 l3 = pmap_l3(pmap, addr);
3858 if (l3 != NULL && pmap_load(l3) == 0) {
3866 * pmap_is_referenced:
3868 * Return whether or not the specified physical page was referenced
3869 * in any physical maps.
3872 pmap_is_referenced(vm_page_t m)
3875 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3876 ("pmap_is_referenced: page %p is not managed", m));
3877 return (pmap_page_test_mappings(m, TRUE, FALSE));
3881 * Clear the write and modified bits in each of the given page's mappings.
3884 pmap_remove_write(vm_page_t m)
3886 struct md_page *pvh;
3887 struct rwlock *lock;
3890 pt_entry_t *l3, oldl3, newl3;
3891 pv_entry_t next_pv, pv;
3893 int md_gen, pvh_gen;
3895 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3896 ("pmap_remove_write: page %p is not managed", m));
3897 vm_page_assert_busied(m);
3899 if (!pmap_page_is_write_mapped(m))
3901 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3902 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
3903 pa_to_pvh(VM_PAGE_TO_PHYS(m));
3904 rw_rlock(&pvh_global_lock);
3907 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
3909 if (!PMAP_TRYLOCK(pmap)) {
3910 pvh_gen = pvh->pv_gen;
3914 if (pvh_gen != pvh->pv_gen) {
3921 l2 = pmap_l2(pmap, va);
3922 if ((pmap_load(l2) & PTE_W) != 0)
3923 (void)pmap_demote_l2_locked(pmap, l2, va, &lock);
3924 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
3925 ("inconsistent pv lock %p %p for page %p",
3926 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
3929 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3931 if (!PMAP_TRYLOCK(pmap)) {
3932 pvh_gen = pvh->pv_gen;
3933 md_gen = m->md.pv_gen;
3937 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
3943 l2 = pmap_l2(pmap, pv->pv_va);
3944 KASSERT((pmap_load(l2) & PTE_RWX) == 0,
3945 ("%s: found a 2mpage in page %p's pv list", __func__, m));
3946 l3 = pmap_l2_to_l3(l2, pv->pv_va);
3947 oldl3 = pmap_load(l3);
3949 if ((oldl3 & PTE_W) != 0) {
3950 newl3 = oldl3 & ~(PTE_D | PTE_W);
3951 if (!atomic_fcmpset_long(l3, &oldl3, newl3))
3953 if ((oldl3 & PTE_D) != 0)
3955 pmap_invalidate_page(pmap, pv->pv_va);
3960 vm_page_aflag_clear(m, PGA_WRITEABLE);
3961 rw_runlock(&pvh_global_lock);
3965 * pmap_ts_referenced:
3967 * Return a count of reference bits for a page, clearing those bits.
3968 * It is not necessary for every reference bit to be cleared, but it
3969 * is necessary that 0 only be returned when there are truly no
3970 * reference bits set.
3972 * As an optimization, update the page's dirty field if a modified bit is
3973 * found while counting reference bits. This opportunistic update can be
3974 * performed at low cost and can eliminate the need for some future calls
3975 * to pmap_is_modified(). However, since this function stops after
3976 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3977 * dirty pages. Those dirty pages will only be detected by a future call
3978 * to pmap_is_modified().
3981 pmap_ts_referenced(vm_page_t m)
3983 struct spglist free;
3984 struct md_page *pvh;
3985 struct rwlock *lock;
3988 pd_entry_t *l2, l2e;
3989 pt_entry_t *l3, l3e;
3992 int cleared, md_gen, not_cleared, pvh_gen;
3994 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3995 ("pmap_ts_referenced: page %p is not managed", m));
3998 pa = VM_PAGE_TO_PHYS(m);
3999 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
4001 lock = PHYS_TO_PV_LIST_LOCK(pa);
4002 rw_rlock(&pvh_global_lock);
4006 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
4007 goto small_mappings;
4011 if (!PMAP_TRYLOCK(pmap)) {
4012 pvh_gen = pvh->pv_gen;
4016 if (pvh_gen != pvh->pv_gen) {
4022 l2 = pmap_l2(pmap, va);
4023 l2e = pmap_load(l2);
4024 if ((l2e & (PTE_W | PTE_D)) == (PTE_W | PTE_D)) {
4026 * Although l2e is mapping a 2MB page, because
4027 * this function is called at a 4KB page granularity,
4028 * we only update the 4KB page under test.
4032 if ((l2e & PTE_A) != 0) {
4034 * Since this reference bit is shared by 512 4KB
4035 * pages, it should not be cleared every time it is
4036 * tested. Apply a simple "hash" function on the
4037 * physical page number, the virtual superpage number,
4038 * and the pmap address to select one 4KB page out of
4039 * the 512 on which testing the reference bit will
4040 * result in clearing that reference bit. This
4041 * function is designed to avoid the selection of the
4042 * same 4KB page for every 2MB page mapping.
4044 * On demotion, a mapping that hasn't been referenced
4045 * is simply destroyed. To avoid the possibility of a
4046 * subsequent page fault on a demoted wired mapping,
4047 * always leave its reference bit set. Moreover,
4048 * since the superpage is wired, the current state of
4049 * its reference bit won't affect page replacement.
4051 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
4052 (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
4053 (l2e & PTE_SW_WIRED) == 0) {
4054 pmap_clear_bits(l2, PTE_A);
4055 pmap_invalidate_page(pmap, va);
4061 /* Rotate the PV list if it has more than one entry. */
4062 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4063 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4064 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
4067 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
4069 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
4071 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
4076 if (!PMAP_TRYLOCK(pmap)) {
4077 pvh_gen = pvh->pv_gen;
4078 md_gen = m->md.pv_gen;
4082 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4087 l2 = pmap_l2(pmap, pv->pv_va);
4089 KASSERT((pmap_load(l2) & PTE_RX) == 0,
4090 ("pmap_ts_referenced: found an invalid l2 table"));
4092 l3 = pmap_l2_to_l3(l2, pv->pv_va);
4093 l3e = pmap_load(l3);
4094 if ((l3e & PTE_D) != 0)
4096 if ((l3e & PTE_A) != 0) {
4097 if ((l3e & PTE_SW_WIRED) == 0) {
4099 * Wired pages cannot be paged out so
4100 * doing accessed bit emulation for
4101 * them is wasted effort. We do the
4102 * hard work for unwired pages only.
4104 pmap_clear_bits(l3, PTE_A);
4105 pmap_invalidate_page(pmap, pv->pv_va);
4111 /* Rotate the PV list if it has more than one entry. */
4112 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4113 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4114 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4117 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
4118 not_cleared < PMAP_TS_REFERENCED_MAX);
4121 rw_runlock(&pvh_global_lock);
4122 vm_page_free_pages_toq(&free, false);
4123 return (cleared + not_cleared);
4127 * Apply the given advice to the specified range of addresses within the
4128 * given pmap. Depending on the advice, clear the referenced and/or
4129 * modified flags in each mapping and set the mapped page's dirty field.
4132 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
4137 * Clear the modify bits on the specified physical page.
4140 pmap_clear_modify(vm_page_t m)
4142 struct md_page *pvh;
4143 struct rwlock *lock;
4145 pv_entry_t next_pv, pv;
4146 pd_entry_t *l2, oldl2;
4149 int md_gen, pvh_gen;
4151 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4152 ("pmap_clear_modify: page %p is not managed", m));
4153 vm_page_assert_busied(m);
4155 if (!pmap_page_is_write_mapped(m))
4159 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
4160 * If the object containing the page is locked and the page is not
4161 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
4163 if ((m->a.flags & PGA_WRITEABLE) == 0)
4165 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
4166 pa_to_pvh(VM_PAGE_TO_PHYS(m));
4167 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4168 rw_rlock(&pvh_global_lock);
4171 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
4173 if (!PMAP_TRYLOCK(pmap)) {
4174 pvh_gen = pvh->pv_gen;
4178 if (pvh_gen != pvh->pv_gen) {
4184 l2 = pmap_l2(pmap, va);
4185 oldl2 = pmap_load(l2);
4186 /* If oldl2 has PTE_W set, then it also has PTE_D set. */
4187 if ((oldl2 & PTE_W) != 0 &&
4188 pmap_demote_l2_locked(pmap, l2, va, &lock) &&
4189 (oldl2 & PTE_SW_WIRED) == 0) {
4191 * Write protect the mapping to a single page so that
4192 * a subsequent write access may repromote.
4194 va += VM_PAGE_TO_PHYS(m) - PTE_TO_PHYS(oldl2);
4195 l3 = pmap_l2_to_l3(l2, va);
4196 pmap_clear_bits(l3, PTE_D | PTE_W);
4198 pmap_invalidate_page(pmap, va);
4202 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4204 if (!PMAP_TRYLOCK(pmap)) {
4205 md_gen = m->md.pv_gen;
4206 pvh_gen = pvh->pv_gen;
4210 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4215 l2 = pmap_l2(pmap, pv->pv_va);
4216 KASSERT((pmap_load(l2) & PTE_RWX) == 0,
4217 ("%s: found a 2mpage in page %p's pv list", __func__, m));
4218 l3 = pmap_l2_to_l3(l2, pv->pv_va);
4219 if ((pmap_load(l3) & (PTE_D | PTE_W)) == (PTE_D | PTE_W)) {
4220 pmap_clear_bits(l3, PTE_D | PTE_W);
4221 pmap_invalidate_page(pmap, pv->pv_va);
4226 rw_runlock(&pvh_global_lock);
4230 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
4233 return ((void *)PHYS_TO_DMAP(pa));
4237 pmap_unmapbios(vm_paddr_t pa, vm_size_t size)
4242 * Sets the memory attribute for the specified page.
4245 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
4248 m->md.pv_memattr = ma;
4251 * If "m" is a normal page, update its direct mapping. This update
4252 * can be relied upon to perform any cache operations that are
4253 * required for data coherence.
4255 if ((m->flags & PG_FICTITIOUS) == 0 &&
4256 pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
4257 m->md.pv_memattr) != 0)
4258 panic("memory attribute change on the direct map failed");
4262 * Changes the specified virtual address range's memory type to that given by
4263 * the parameter "mode". The specified virtual address range must be
4264 * completely contained within either the direct map or the kernel map.
4266 * Returns zero if the change completed successfully, and either EINVAL or
4267 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
4268 * of the virtual address range was not mapped, and ENOMEM is returned if
4269 * there was insufficient memory available to complete the change. In the
4270 * latter case, the memory type may have been changed on some part of the
4271 * virtual address range.
4274 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
4278 PMAP_LOCK(kernel_pmap);
4279 error = pmap_change_attr_locked(va, size, mode);
4280 PMAP_UNLOCK(kernel_pmap);
4285 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
4287 vm_offset_t base, offset, tmpva;
4288 pd_entry_t *l1, l1e;
4289 pd_entry_t *l2, l2e;
4290 pt_entry_t *l3, l3e;
4292 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
4293 base = trunc_page(va);
4294 offset = va & PAGE_MASK;
4295 size = round_page(offset + size);
4297 if (!VIRT_IN_DMAP(base) &&
4298 !(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
4301 for (tmpva = base; tmpva < base + size; ) {
4302 l1 = pmap_l1(kernel_pmap, tmpva);
4303 if (l1 == NULL || ((l1e = pmap_load(l1)) & PTE_V) == 0)
4305 if ((l1e & PTE_RWX) != 0) {
4307 * TODO: Demote if attributes don't match and there
4308 * isn't an L1 page left in the range, and update the
4309 * L1 entry if the attributes don't match but there is
4310 * an L1 page left in the range, once we support the
4311 * upcoming Svpbmt extension.
4313 tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
4316 l2 = pmap_l1_to_l2(l1, tmpva);
4317 if (l2 == NULL || ((l2e = pmap_load(l2)) & PTE_V) == 0)
4319 if ((l2e & PTE_RWX) != 0) {
4321 * TODO: Demote if attributes don't match and there
4322 * isn't an L2 page left in the range, and update the
4323 * L2 entry if the attributes don't match but there is
4324 * an L2 page left in the range, once we support the
4325 * upcoming Svpbmt extension.
4327 tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
4330 l3 = pmap_l2_to_l3(l2, tmpva);
4331 if (l3 == NULL || ((l3e = pmap_load(l3)) & PTE_V) == 0)
4334 * TODO: Update the L3 entry if the attributes don't match once
4335 * we support the upcoming Svpbmt extension.
4344 * Perform the pmap work for mincore(2). If the page is not both referenced and
4345 * modified by this pmap, returns its physical address so that the caller can
4346 * find other mappings.
4349 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
4351 pt_entry_t *l2, *l3, tpte;
4357 l2 = pmap_l2(pmap, addr);
4358 if (l2 != NULL && ((tpte = pmap_load(l2)) & PTE_V) != 0) {
4359 if ((tpte & PTE_RWX) != 0) {
4360 pa = PTE_TO_PHYS(tpte) | (addr & L2_OFFSET);
4361 val = MINCORE_INCORE | MINCORE_PSIND(1);
4363 l3 = pmap_l2_to_l3(l2, addr);
4364 tpte = pmap_load(l3);
4365 if ((tpte & PTE_V) == 0) {
4369 pa = PTE_TO_PHYS(tpte) | (addr & L3_OFFSET);
4370 val = MINCORE_INCORE;
4373 if ((tpte & PTE_D) != 0)
4374 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
4375 if ((tpte & PTE_A) != 0)
4376 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
4377 managed = (tpte & PTE_SW_MANAGED) == PTE_SW_MANAGED;
4382 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
4383 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
4391 pmap_activate_sw(struct thread *td)
4393 pmap_t oldpmap, pmap;
4396 oldpmap = PCPU_GET(curpmap);
4397 pmap = vmspace_pmap(td->td_proc->p_vmspace);
4398 if (pmap == oldpmap)
4400 load_satp(pmap->pm_satp);
4402 hart = PCPU_GET(hart);
4404 CPU_SET_ATOMIC(hart, &pmap->pm_active);
4405 CPU_CLR_ATOMIC(hart, &oldpmap->pm_active);
4407 CPU_SET(hart, &pmap->pm_active);
4408 CPU_CLR(hart, &oldpmap->pm_active);
4410 PCPU_SET(curpmap, pmap);
4416 pmap_activate(struct thread *td)
4420 pmap_activate_sw(td);
4425 pmap_activate_boot(pmap_t pmap)
4429 hart = PCPU_GET(hart);
4431 CPU_SET_ATOMIC(hart, &pmap->pm_active);
4433 CPU_SET(hart, &pmap->pm_active);
4435 PCPU_SET(curpmap, pmap);
4439 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
4444 * From the RISC-V User-Level ISA V2.2:
4446 * "To make a store to instruction memory visible to all
4447 * RISC-V harts, the writing hart has to execute a data FENCE
4448 * before requesting that all remote RISC-V harts execute a
4451 * However, this is slightly misleading; we still need to
4452 * perform a FENCE.I for the local hart, as FENCE does nothing
4453 * for its icache. FENCE.I alone is also sufficient for the
4458 CPU_CLR(PCPU_GET(hart), &mask);
4460 if (!CPU_EMPTY(&mask) && smp_started) {
4462 sbi_remote_fence_i(mask.__bits);
4468 * Increase the starting virtual address of the given mapping if a
4469 * different alignment might result in more superpage mappings.
4472 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
4473 vm_offset_t *addr, vm_size_t size)
4475 vm_offset_t superpage_offset;
4479 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
4480 offset += ptoa(object->pg_color);
4481 superpage_offset = offset & L2_OFFSET;
4482 if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
4483 (*addr & L2_OFFSET) == superpage_offset)
4485 if ((*addr & L2_OFFSET) < superpage_offset)
4486 *addr = (*addr & ~L2_OFFSET) + superpage_offset;
4488 *addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
4492 * Get the kernel virtual address of a set of physical pages. If there are
4493 * physical addresses not covered by the DMAP perform a transient mapping
4494 * that will be removed when calling pmap_unmap_io_transient.
4496 * \param page The pages the caller wishes to obtain the virtual
4497 * address on the kernel memory map.
4498 * \param vaddr On return contains the kernel virtual memory address
4499 * of the pages passed in the page parameter.
4500 * \param count Number of pages passed in.
4501 * \param can_fault TRUE if the thread using the mapped pages can take
4502 * page faults, FALSE otherwise.
4504 * \returns TRUE if the caller must call pmap_unmap_io_transient when
4505 * finished or FALSE otherwise.
4509 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
4510 boolean_t can_fault)
4513 boolean_t needs_mapping;
4517 * Allocate any KVA space that we need, this is done in a separate
4518 * loop to prevent calling vmem_alloc while pinned.
4520 needs_mapping = FALSE;
4521 for (i = 0; i < count; i++) {
4522 paddr = VM_PAGE_TO_PHYS(page[i]);
4523 if (__predict_false(paddr >= DMAP_MAX_PHYSADDR)) {
4524 error = vmem_alloc(kernel_arena, PAGE_SIZE,
4525 M_BESTFIT | M_WAITOK, &vaddr[i]);
4526 KASSERT(error == 0, ("vmem_alloc failed: %d", error));
4527 needs_mapping = TRUE;
4529 vaddr[i] = PHYS_TO_DMAP(paddr);
4533 /* Exit early if everything is covered by the DMAP */
4539 for (i = 0; i < count; i++) {
4540 paddr = VM_PAGE_TO_PHYS(page[i]);
4541 if (paddr >= DMAP_MAX_PHYSADDR) {
4543 "pmap_map_io_transient: TODO: Map out of DMAP data");
4547 return (needs_mapping);
4551 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
4552 boolean_t can_fault)
4559 for (i = 0; i < count; i++) {
4560 paddr = VM_PAGE_TO_PHYS(page[i]);
4561 if (paddr >= DMAP_MAX_PHYSADDR) {
4562 panic("RISCVTODO: pmap_unmap_io_transient: Unmap data");
4568 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
4571 return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_BACK);
4575 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l1, pd_entry_t **l2,
4578 pd_entry_t *l1p, *l2p;
4580 /* Get l1 directory entry. */
4581 l1p = pmap_l1(pmap, va);
4584 if (l1p == NULL || (pmap_load(l1p) & PTE_V) == 0)
4587 if ((pmap_load(l1p) & PTE_RX) != 0) {
4593 /* Get l2 directory entry. */
4594 l2p = pmap_l1_to_l2(l1p, va);
4597 if (l2p == NULL || (pmap_load(l2p) & PTE_V) == 0)
4600 if ((pmap_load(l2p) & PTE_RX) != 0) {
4605 /* Get l3 page table entry. */
4606 *l3 = pmap_l2_to_l3(l2p, va);
4612 * Track a range of the kernel's virtual address space that is contiguous
4613 * in various mapping attributes.
4615 struct pmap_kernel_map_range {
4624 sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
4628 if (eva <= range->sva)
4631 sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c %d %d %d\n",
4633 (range->attrs & PTE_W) == PTE_W ? 'w' : '-',
4634 (range->attrs & PTE_X) == PTE_X ? 'x' : '-',
4635 (range->attrs & PTE_U) == PTE_U ? 'u' : 's',
4636 (range->attrs & PTE_G) == PTE_G ? 'g' : '-',
4637 range->l1pages, range->l2pages, range->l3pages);
4639 /* Reset to sentinel value. */
4640 range->sva = 0xfffffffffffffffful;
4644 * Determine whether the attributes specified by a page table entry match those
4645 * being tracked by the current range.
4648 sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
4651 return (range->attrs == attrs);
4655 sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
4659 memset(range, 0, sizeof(*range));
4661 range->attrs = attrs;
4665 * Given a leaf PTE, derive the mapping's attributes. If they do not match
4666 * those of the current run, dump the address range and its attributes, and
4670 sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
4671 vm_offset_t va, pd_entry_t l1e, pd_entry_t l2e, pt_entry_t l3e)
4675 /* The PTE global bit is inherited by lower levels. */
4676 attrs = l1e & PTE_G;
4677 if ((l1e & PTE_RWX) != 0)
4678 attrs |= l1e & (PTE_RWX | PTE_U);
4680 attrs |= l2e & PTE_G;
4681 if ((l2e & PTE_RWX) != 0)
4682 attrs |= l2e & (PTE_RWX | PTE_U);
4684 attrs |= l3e & (PTE_RWX | PTE_U | PTE_G);
4686 if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
4687 sysctl_kmaps_dump(sb, range, va);
4688 sysctl_kmaps_reinit(range, va, attrs);
4693 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
4695 struct pmap_kernel_map_range range;
4696 struct sbuf sbuf, *sb;
4697 pd_entry_t l1e, *l2, l2e;
4698 pt_entry_t *l3, l3e;
4703 error = sysctl_wire_old_buffer(req, 0);
4707 sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
4709 /* Sentinel value. */
4710 range.sva = 0xfffffffffffffffful;
4713 * Iterate over the kernel page tables without holding the kernel pmap
4714 * lock. Kernel page table pages are never freed, so at worst we will
4715 * observe inconsistencies in the output.
4717 sva = VM_MIN_KERNEL_ADDRESS;
4718 for (i = pmap_l1_index(sva); i < Ln_ENTRIES; i++) {
4719 if (i == pmap_l1_index(DMAP_MIN_ADDRESS))
4720 sbuf_printf(sb, "\nDirect map:\n");
4721 else if (i == pmap_l1_index(VM_MIN_KERNEL_ADDRESS))
4722 sbuf_printf(sb, "\nKernel map:\n");
4724 l1e = kernel_pmap->pm_l1[i];
4725 if ((l1e & PTE_V) == 0) {
4726 sysctl_kmaps_dump(sb, &range, sva);
4730 if ((l1e & PTE_RWX) != 0) {
4731 sysctl_kmaps_check(sb, &range, sva, l1e, 0, 0);
4736 pa = PTE_TO_PHYS(l1e);
4737 l2 = (pd_entry_t *)PHYS_TO_DMAP(pa);
4739 for (j = pmap_l2_index(sva); j < Ln_ENTRIES; j++) {
4741 if ((l2e & PTE_V) == 0) {
4742 sysctl_kmaps_dump(sb, &range, sva);
4746 if ((l2e & PTE_RWX) != 0) {
4747 sysctl_kmaps_check(sb, &range, sva, l1e, l2e, 0);
4752 pa = PTE_TO_PHYS(l2e);
4753 l3 = (pd_entry_t *)PHYS_TO_DMAP(pa);
4755 for (k = pmap_l3_index(sva); k < Ln_ENTRIES; k++,
4758 if ((l3e & PTE_V) == 0) {
4759 sysctl_kmaps_dump(sb, &range, sva);
4762 sysctl_kmaps_check(sb, &range, sva,
4769 error = sbuf_finish(sb);
4773 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
4774 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
4775 NULL, 0, sysctl_kmaps, "A",
4776 "Dump kernel address layout");