2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
8 * Copyright (c) 2003 Peter Wemm
10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11 * All rights reserved.
12 * Copyright (c) 2014 Andrew Turner
13 * All rights reserved.
14 * Copyright (c) 2014-2016 The FreeBSD Foundation
15 * All rights reserved.
17 * This code is derived from software contributed to Berkeley by
18 * the Systems Programming Group of the University of Utah Computer
19 * Science Department and William Jolitz of UUNET Technologies Inc.
21 * This software was developed by Andrew Turner under sponsorship from
22 * the FreeBSD Foundation.
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 * must display the following acknowledgement:
34 * This product includes software developed by the University of
35 * California, Berkeley and its contributors.
36 * 4. Neither the name of the University nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
55 * Copyright (c) 2003 Networks Associates Technology, Inc.
56 * All rights reserved.
58 * This software was developed for the FreeBSD Project by Jake Burkholder,
59 * Safeport Network Services, and Network Associates Laboratories, the
60 * Security Research Division of Network Associates, Inc. under
61 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
62 * CHATS research program.
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
67 * 1. Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * 2. Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in the
71 * documentation and/or other materials provided with the distribution.
73 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
74 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
77 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
86 #include <sys/cdefs.h>
87 __FBSDID("$FreeBSD$");
90 * Manages physical address maps.
92 * Since the information managed by this module is
93 * also stored by the logical address mapping module,
94 * this module may throw away valid virtual-to-physical
95 * mappings at almost any time. However, invalidations
96 * of virtual-to-physical mappings must be done as
99 * In order to cope with hardware architectures which
100 * make virtual-to-physical map invalidates expensive,
101 * this module may delay invalidate or reduced protection
102 * operations until such time as they are actually
103 * necessary. This module is given full information as
104 * to which processors are currently using which maps,
105 * and to when physical maps must be made correct.
110 #include <sys/param.h>
111 #include <sys/bitstring.h>
113 #include <sys/systm.h>
114 #include <sys/kernel.h>
116 #include <sys/lock.h>
117 #include <sys/malloc.h>
118 #include <sys/mman.h>
119 #include <sys/msgbuf.h>
120 #include <sys/mutex.h>
121 #include <sys/proc.h>
122 #include <sys/rwlock.h>
124 #include <sys/vmem.h>
125 #include <sys/vmmeter.h>
126 #include <sys/sched.h>
127 #include <sys/sysctl.h>
128 #include <sys/_unrhdr.h>
132 #include <vm/vm_param.h>
133 #include <vm/vm_kern.h>
134 #include <vm/vm_page.h>
135 #include <vm/vm_map.h>
136 #include <vm/vm_object.h>
137 #include <vm/vm_extern.h>
138 #include <vm/vm_pageout.h>
139 #include <vm/vm_pager.h>
140 #include <vm/vm_phys.h>
141 #include <vm/vm_radix.h>
142 #include <vm/vm_reserv.h>
145 #include <machine/machdep.h>
146 #include <machine/md_var.h>
147 #include <machine/pcb.h>
149 #include <arm/include/physmem.h>
151 #define NL0PG (PAGE_SIZE/(sizeof (pd_entry_t)))
152 #define NL1PG (PAGE_SIZE/(sizeof (pd_entry_t)))
153 #define NL2PG (PAGE_SIZE/(sizeof (pd_entry_t)))
154 #define NL3PG (PAGE_SIZE/(sizeof (pt_entry_t)))
156 #define NUL0E L0_ENTRIES
157 #define NUL1E (NUL0E * NL1PG)
158 #define NUL2E (NUL1E * NL2PG)
160 #if !defined(DIAGNOSTIC)
161 #ifdef __GNUC_GNU_INLINE__
162 #define PMAP_INLINE __attribute__((__gnu_inline__)) inline
164 #define PMAP_INLINE extern inline
171 * These are configured by the mair_el1 register. This is set up in locore.S
173 #define DEVICE_MEMORY 0
174 #define UNCACHED_MEMORY 1
175 #define CACHED_MEMORY 2
179 #define PV_STAT(x) do { x ; } while (0)
181 #define PV_STAT(x) do { } while (0)
184 #define pmap_l2_pindex(v) ((v) >> L2_SHIFT)
185 #define pa_to_pvh(pa) (&pv_table[pmap_l2_pindex(pa)])
187 #define NPV_LIST_LOCKS MAXCPU
189 #define PHYS_TO_PV_LIST_LOCK(pa) \
190 (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
192 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
193 struct rwlock **_lockp = (lockp); \
194 struct rwlock *_new_lock; \
196 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
197 if (_new_lock != *_lockp) { \
198 if (*_lockp != NULL) \
199 rw_wunlock(*_lockp); \
200 *_lockp = _new_lock; \
205 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
206 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
208 #define RELEASE_PV_LIST_LOCK(lockp) do { \
209 struct rwlock **_lockp = (lockp); \
211 if (*_lockp != NULL) { \
212 rw_wunlock(*_lockp); \
217 #define VM_PAGE_TO_PV_LIST_LOCK(m) \
218 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
221 * The presence of this flag indicates that the mapping is writeable.
222 * If the ATTR_AP_RO bit is also set, then the mapping is clean, otherwise it is
223 * dirty. This flag may only be set on managed mappings.
225 static pt_entry_t ATTR_SW_DBM;
227 struct pmap kernel_pmap_store;
229 /* Used for mapping ACPI memory before VM is initialized */
230 #define PMAP_PREINIT_MAPPING_COUNT 32
231 #define PMAP_PREINIT_MAPPING_SIZE (PMAP_PREINIT_MAPPING_COUNT * L2_SIZE)
232 static vm_offset_t preinit_map_va; /* Start VA of pre-init mapping space */
233 static int vm_initialized = 0; /* No need to use pre-init maps when set */
236 * Reserve a few L2 blocks starting from 'preinit_map_va' pointer.
237 * Always map entire L2 block for simplicity.
238 * VA of L2 block = preinit_map_va + i * L2_SIZE
240 static struct pmap_preinit_mapping {
244 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
246 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
247 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
248 vm_offset_t kernel_vm_end = 0;
251 * Data for the pv entry allocation mechanism.
253 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
254 static struct mtx pv_chunks_mutex;
255 static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
256 static struct md_page *pv_table;
257 static struct md_page pv_dummy;
259 vm_paddr_t dmap_phys_base; /* The start of the dmap region */
260 vm_paddr_t dmap_phys_max; /* The limit of the dmap region */
261 vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */
263 /* This code assumes all L1 DMAP entries will be used */
264 CTASSERT((DMAP_MIN_ADDRESS & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
265 CTASSERT((DMAP_MAX_ADDRESS & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
267 #define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
268 extern pt_entry_t pagetable_dmap[];
270 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
271 static vm_paddr_t physmap[PHYSMAP_SIZE];
272 static u_int physmap_idx;
274 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
276 static int superpages_enabled = 1;
277 SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
278 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
279 "Are large page mappings enabled?");
282 * Internal flags for pmap_enter()'s helper functions.
284 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
285 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
287 static void free_pv_chunk(struct pv_chunk *pc);
288 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
289 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
290 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
291 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
292 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
295 static int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
296 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
297 static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
298 static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
299 vm_offset_t va, struct rwlock **lockp);
300 static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
301 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
302 vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
303 static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
304 u_int flags, vm_page_t m, struct rwlock **lockp);
305 static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
306 pd_entry_t l1e, struct spglist *free, struct rwlock **lockp);
307 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
308 pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
309 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
310 vm_page_t m, struct rwlock **lockp);
312 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
313 struct rwlock **lockp);
315 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
316 struct spglist *free);
317 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
318 static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
321 * These load the old table data and store the new value.
322 * They need to be atomic as the System MMU may write to the table at
323 * the same time as the CPU.
325 #define pmap_clear(table) atomic_store_64(table, 0)
326 #define pmap_clear_bits(table, bits) atomic_clear_64(table, bits)
327 #define pmap_load(table) (*table)
328 #define pmap_load_clear(table) atomic_swap_64(table, 0)
329 #define pmap_load_store(table, entry) atomic_swap_64(table, entry)
330 #define pmap_set_bits(table, bits) atomic_set_64(table, bits)
331 #define pmap_store(table, entry) atomic_store_64(table, entry)
333 /********************/
334 /* Inline functions */
335 /********************/
338 pagecopy(void *s, void *d)
341 memcpy(d, s, PAGE_SIZE);
344 static __inline pd_entry_t *
345 pmap_l0(pmap_t pmap, vm_offset_t va)
348 return (&pmap->pm_l0[pmap_l0_index(va)]);
351 static __inline pd_entry_t *
352 pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
356 l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
357 return (&l1[pmap_l1_index(va)]);
360 static __inline pd_entry_t *
361 pmap_l1(pmap_t pmap, vm_offset_t va)
365 l0 = pmap_l0(pmap, va);
366 if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE)
369 return (pmap_l0_to_l1(l0, va));
372 static __inline pd_entry_t *
373 pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va)
377 l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
378 return (&l2[pmap_l2_index(va)]);
381 static __inline pd_entry_t *
382 pmap_l2(pmap_t pmap, vm_offset_t va)
386 l1 = pmap_l1(pmap, va);
387 if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE)
390 return (pmap_l1_to_l2(l1, va));
393 static __inline pt_entry_t *
394 pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va)
398 l3 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK);
399 return (&l3[pmap_l3_index(va)]);
403 * Returns the lowest valid pde for a given virtual address.
404 * The next level may or may not point to a valid page or block.
406 static __inline pd_entry_t *
407 pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
409 pd_entry_t *l0, *l1, *l2, desc;
411 l0 = pmap_l0(pmap, va);
412 desc = pmap_load(l0) & ATTR_DESCR_MASK;
413 if (desc != L0_TABLE) {
418 l1 = pmap_l0_to_l1(l0, va);
419 desc = pmap_load(l1) & ATTR_DESCR_MASK;
420 if (desc != L1_TABLE) {
425 l2 = pmap_l1_to_l2(l1, va);
426 desc = pmap_load(l2) & ATTR_DESCR_MASK;
427 if (desc != L2_TABLE) {
437 * Returns the lowest valid pte block or table entry for a given virtual
438 * address. If there are no valid entries return NULL and set the level to
439 * the first invalid level.
441 static __inline pt_entry_t *
442 pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
444 pd_entry_t *l1, *l2, desc;
447 l1 = pmap_l1(pmap, va);
452 desc = pmap_load(l1) & ATTR_DESCR_MASK;
453 if (desc == L1_BLOCK) {
458 if (desc != L1_TABLE) {
463 l2 = pmap_l1_to_l2(l1, va);
464 desc = pmap_load(l2) & ATTR_DESCR_MASK;
465 if (desc == L2_BLOCK) {
470 if (desc != L2_TABLE) {
476 l3 = pmap_l2_to_l3(l2, va);
477 if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
484 pmap_ps_enabled(pmap_t pmap __unused)
487 return (superpages_enabled != 0);
491 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
492 pd_entry_t **l2, pt_entry_t **l3)
494 pd_entry_t *l0p, *l1p, *l2p;
496 if (pmap->pm_l0 == NULL)
499 l0p = pmap_l0(pmap, va);
502 if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE)
505 l1p = pmap_l0_to_l1(l0p, va);
508 if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) {
514 if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE)
517 l2p = pmap_l1_to_l2(l1p, va);
520 if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) {
525 if ((pmap_load(l2p) & ATTR_DESCR_MASK) != L2_TABLE)
528 *l3 = pmap_l2_to_l3(l2p, va);
534 pmap_l3_valid(pt_entry_t l3)
537 return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
541 CTASSERT(L1_BLOCK == L2_BLOCK);
544 * Checks if the PTE is dirty.
547 pmap_pte_dirty(pt_entry_t pte)
550 KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte));
551 KASSERT((pte & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) != 0,
552 ("pte %#lx is writeable and missing ATTR_SW_DBM", pte));
554 return ((pte & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) ==
555 (ATTR_AP(ATTR_AP_RW) | ATTR_SW_DBM));
559 pmap_resident_count_inc(pmap_t pmap, int count)
562 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
563 pmap->pm_stats.resident_count += count;
567 pmap_resident_count_dec(pmap_t pmap, int count)
570 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
571 KASSERT(pmap->pm_stats.resident_count >= count,
572 ("pmap %p resident count underflow %ld %d", pmap,
573 pmap->pm_stats.resident_count, count));
574 pmap->pm_stats.resident_count -= count;
578 pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
584 l1 = (pd_entry_t *)l1pt;
585 *l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
587 /* Check locore has used a table L1 map */
588 KASSERT((l1[*l1_slot] & ATTR_DESCR_MASK) == L1_TABLE,
589 ("Invalid bootstrap L1 table"));
590 /* Find the address of the L2 table */
591 l2 = (pt_entry_t *)init_pt_va;
592 *l2_slot = pmap_l2_index(va);
598 pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
600 u_int l1_slot, l2_slot;
603 l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
605 return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET));
609 pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
610 vm_offset_t freemempos)
614 vm_paddr_t l2_pa, pa;
615 u_int l1_slot, l2_slot, prev_l1_slot;
618 dmap_phys_base = min_pa & ~L1_OFFSET;
624 #define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
625 memset(pagetable_dmap, 0, PAGE_SIZE * DMAP_TABLES);
627 for (i = 0; i < (physmap_idx * 2); i += 2) {
628 pa = physmap[i] & ~L2_OFFSET;
629 va = pa - dmap_phys_base + DMAP_MIN_ADDRESS;
631 /* Create L2 mappings at the start of the region */
632 if ((pa & L1_OFFSET) != 0) {
633 l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
634 if (l1_slot != prev_l1_slot) {
635 prev_l1_slot = l1_slot;
636 l2 = (pt_entry_t *)freemempos;
637 l2_pa = pmap_early_vtophys(kern_l1,
639 freemempos += PAGE_SIZE;
641 pmap_store(&pagetable_dmap[l1_slot],
642 (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
644 memset(l2, 0, PAGE_SIZE);
647 ("pmap_bootstrap_dmap: NULL l2 map"));
648 for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
649 pa += L2_SIZE, va += L2_SIZE) {
651 * We are on a boundary, stop to
652 * create a level 1 block
654 if ((pa & L1_OFFSET) == 0)
657 l2_slot = pmap_l2_index(va);
658 KASSERT(l2_slot != 0, ("..."));
659 pmap_store(&l2[l2_slot],
660 (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
661 ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
663 KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS),
667 for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1] &&
668 (physmap[i + 1] - pa) >= L1_SIZE;
669 pa += L1_SIZE, va += L1_SIZE) {
670 l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
671 pmap_store(&pagetable_dmap[l1_slot],
672 (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN |
673 ATTR_IDX(CACHED_MEMORY) | L1_BLOCK);
676 /* Create L2 mappings at the end of the region */
677 if (pa < physmap[i + 1]) {
678 l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
679 if (l1_slot != prev_l1_slot) {
680 prev_l1_slot = l1_slot;
681 l2 = (pt_entry_t *)freemempos;
682 l2_pa = pmap_early_vtophys(kern_l1,
684 freemempos += PAGE_SIZE;
686 pmap_store(&pagetable_dmap[l1_slot],
687 (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
689 memset(l2, 0, PAGE_SIZE);
692 ("pmap_bootstrap_dmap: NULL l2 map"));
693 for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
694 pa += L2_SIZE, va += L2_SIZE) {
695 l2_slot = pmap_l2_index(va);
696 pmap_store(&l2[l2_slot],
697 (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
698 ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
702 if (pa > dmap_phys_max) {
714 pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
721 KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
723 l1 = (pd_entry_t *)l1pt;
724 l1_slot = pmap_l1_index(va);
727 for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) {
728 KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
730 pa = pmap_early_vtophys(l1pt, l2pt);
731 pmap_store(&l1[l1_slot],
732 (pa & ~Ln_TABLE_MASK) | L1_TABLE);
736 /* Clean the L2 page table */
737 memset((void *)l2_start, 0, l2pt - l2_start);
743 pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
750 KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
752 l2 = pmap_l2(kernel_pmap, va);
753 l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE);
754 l2_slot = pmap_l2_index(va);
757 for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
758 KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
760 pa = pmap_early_vtophys(l1pt, l3pt);
761 pmap_store(&l2[l2_slot],
762 (pa & ~Ln_TABLE_MASK) | L2_TABLE);
766 /* Clean the L2 page table */
767 memset((void *)l3_start, 0, l3pt - l3_start);
773 * Bootstrap the system enough to run with virtual memory.
776 pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
779 u_int l1_slot, l2_slot;
781 vm_offset_t va, freemempos;
782 vm_offset_t dpcpu, msgbufpv;
783 vm_paddr_t start_pa, pa, min_pa;
788 /* Determine whether the hardware implements DBM management. */
789 uint64_t reg = READ_SPECIALREG(ID_AA64MMFR1_EL1);
790 ATTR_SW_DBM = ID_AA64MMFR1_HAFDBS(reg) == ID_AA64MMFR1_HAFDBS_AF_DBS ?
791 ATTR_DBM : _ATTR_SW_DBM;
793 ATTR_SW_DBM = _ATTR_SW_DBM;
796 kern_delta = KERNBASE - kernstart;
798 printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
799 printf("%lx\n", l1pt);
800 printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
802 /* Set this early so we can use the pagetable walking functions */
803 kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt;
804 PMAP_LOCK_INIT(kernel_pmap);
806 /* Assume the address we were loaded to is a valid physical address */
807 min_pa = KERNBASE - kern_delta;
809 physmap_idx = arm_physmem_avail(physmap, nitems(physmap));
813 * Find the minimum physical address. physmap is sorted,
814 * but may contain empty ranges.
816 for (i = 0; i < (physmap_idx * 2); i += 2) {
817 if (physmap[i] == physmap[i + 1])
819 if (physmap[i] <= min_pa)
823 freemempos = KERNBASE + kernlen;
824 freemempos = roundup2(freemempos, PAGE_SIZE);
826 /* Create a direct map region early so we can use it for pa -> va */
827 freemempos = pmap_bootstrap_dmap(l1pt, min_pa, freemempos);
830 start_pa = pa = KERNBASE - kern_delta;
833 * Read the page table to find out what is already mapped.
834 * This assumes we have mapped a block of memory from KERNBASE
835 * using a single L1 entry.
837 l2 = pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot);
839 /* Sanity check the index, KERNBASE should be the first VA */
840 KASSERT(l2_slot == 0, ("The L2 index is non-zero"));
842 /* Find how many pages we have mapped */
843 for (; l2_slot < Ln_ENTRIES; l2_slot++) {
844 if ((l2[l2_slot] & ATTR_DESCR_MASK) == 0)
847 /* Check locore used L2 blocks */
848 KASSERT((l2[l2_slot] & ATTR_DESCR_MASK) == L2_BLOCK,
849 ("Invalid bootstrap L2 table"));
850 KASSERT((l2[l2_slot] & ~ATTR_MASK) == pa,
851 ("Incorrect PA in L2 table"));
857 va = roundup2(va, L1_SIZE);
859 /* Create the l2 tables up to VM_MAX_KERNEL_ADDRESS */
860 freemempos = pmap_bootstrap_l2(l1pt, va, freemempos);
861 /* And the l3 tables for the early devmap */
862 freemempos = pmap_bootstrap_l3(l1pt,
863 VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE), freemempos);
867 #define alloc_pages(var, np) \
868 (var) = freemempos; \
869 freemempos += (np * PAGE_SIZE); \
870 memset((char *)(var), 0, ((np) * PAGE_SIZE));
872 /* Allocate dynamic per-cpu area. */
873 alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
874 dpcpu_init((void *)dpcpu, 0);
876 /* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
877 alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
878 msgbufp = (void *)msgbufpv;
880 /* Reserve some VA space for early BIOS/ACPI mapping */
881 preinit_map_va = roundup2(freemempos, L2_SIZE);
883 virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
884 virtual_avail = roundup2(virtual_avail, L1_SIZE);
885 virtual_end = VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE);
886 kernel_vm_end = virtual_avail;
888 pa = pmap_early_vtophys(l1pt, freemempos);
890 arm_physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
896 * Initialize a vm_page's machine-dependent fields.
899 pmap_page_init(vm_page_t m)
902 TAILQ_INIT(&m->md.pv_list);
903 m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
907 * Initialize the pmap module.
908 * Called by vm_init, to initialize any structures that the pmap
909 * system needs to map virtual memory.
918 * Are large page mappings enabled?
920 TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
921 if (superpages_enabled) {
922 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
923 ("pmap_init: can't assign to pagesizes[1]"));
924 pagesizes[1] = L2_SIZE;
928 * Initialize the pv chunk list mutex.
930 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
933 * Initialize the pool of pv list locks.
935 for (i = 0; i < NPV_LIST_LOCKS; i++)
936 rw_init(&pv_list_locks[i], "pmap pv list");
939 * Calculate the size of the pv head table for superpages.
941 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE);
944 * Allocate memory for the pv head table for superpages.
946 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
948 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
949 for (i = 0; i < pv_npg; i++)
950 TAILQ_INIT(&pv_table[i].pv_list);
951 TAILQ_INIT(&pv_dummy.pv_list);
956 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD, 0,
957 "2MB page mapping counters");
959 static u_long pmap_l2_demotions;
960 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
961 &pmap_l2_demotions, 0, "2MB page demotions");
963 static u_long pmap_l2_mappings;
964 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
965 &pmap_l2_mappings, 0, "2MB page mappings");
967 static u_long pmap_l2_p_failures;
968 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
969 &pmap_l2_p_failures, 0, "2MB page promotion failures");
971 static u_long pmap_l2_promotions;
972 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
973 &pmap_l2_promotions, 0, "2MB page promotions");
976 * Invalidate a single TLB entry.
979 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
985 "tlbi vaae1is, %0 \n"
988 : : "r"(va >> PAGE_SHIFT));
993 pmap_invalidate_range_nopin(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
998 for (addr = sva; addr < eva; addr += PAGE_SIZE) {
1000 "tlbi vaae1is, %0" : : "r"(addr >> PAGE_SHIFT));
1007 static __inline void
1008 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1012 pmap_invalidate_range_nopin(pmap, sva, eva);
1016 static __inline void
1017 pmap_invalidate_all(pmap_t pmap)
1030 * Routine: pmap_extract
1032 * Extract the physical page address associated
1033 * with the given map/virtual_address pair.
1036 pmap_extract(pmap_t pmap, vm_offset_t va)
1038 pt_entry_t *pte, tpte;
1045 * Find the block or page map for this virtual address. pmap_pte
1046 * will return either a valid block/page entry, or NULL.
1048 pte = pmap_pte(pmap, va, &lvl);
1050 tpte = pmap_load(pte);
1051 pa = tpte & ~ATTR_MASK;
1054 KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
1055 ("pmap_extract: Invalid L1 pte found: %lx",
1056 tpte & ATTR_DESCR_MASK));
1057 pa |= (va & L1_OFFSET);
1060 KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
1061 ("pmap_extract: Invalid L2 pte found: %lx",
1062 tpte & ATTR_DESCR_MASK));
1063 pa |= (va & L2_OFFSET);
1066 KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
1067 ("pmap_extract: Invalid L3 pte found: %lx",
1068 tpte & ATTR_DESCR_MASK));
1069 pa |= (va & L3_OFFSET);
1078 * Routine: pmap_extract_and_hold
1080 * Atomically extract and hold the physical page
1081 * with the given pmap and virtual address pair
1082 * if that mapping permits the given protection.
1085 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1087 pt_entry_t *pte, tpte;
1097 pte = pmap_pte(pmap, va, &lvl);
1099 tpte = pmap_load(pte);
1101 KASSERT(lvl > 0 && lvl <= 3,
1102 ("pmap_extract_and_hold: Invalid level %d", lvl));
1103 CTASSERT(L1_BLOCK == L2_BLOCK);
1104 KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
1105 (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
1106 ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
1107 tpte & ATTR_DESCR_MASK));
1108 if (((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) ||
1109 ((prot & VM_PROT_WRITE) == 0)) {
1112 off = va & L1_OFFSET;
1115 off = va & L2_OFFSET;
1121 if (vm_page_pa_tryrelock(pmap,
1122 (tpte & ~ATTR_MASK) | off, &pa))
1124 m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off);
1134 pmap_kextract(vm_offset_t va)
1136 pt_entry_t *pte, tpte;
1140 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1141 pa = DMAP_TO_PHYS(va);
1144 pte = pmap_pte(kernel_pmap, va, &lvl);
1146 tpte = pmap_load(pte);
1147 pa = tpte & ~ATTR_MASK;
1150 KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
1151 ("pmap_kextract: Invalid L1 pte found: %lx",
1152 tpte & ATTR_DESCR_MASK));
1153 pa |= (va & L1_OFFSET);
1156 KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
1157 ("pmap_kextract: Invalid L2 pte found: %lx",
1158 tpte & ATTR_DESCR_MASK));
1159 pa |= (va & L2_OFFSET);
1162 KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
1163 ("pmap_kextract: Invalid L3 pte found: %lx",
1164 tpte & ATTR_DESCR_MASK));
1165 pa |= (va & L3_OFFSET);
1173 /***************************************************
1174 * Low level mapping routines.....
1175 ***************************************************/
1178 pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
1181 pt_entry_t *pte, attr;
1185 KASSERT((pa & L3_OFFSET) == 0,
1186 ("pmap_kenter: Invalid physical address"));
1187 KASSERT((sva & L3_OFFSET) == 0,
1188 ("pmap_kenter: Invalid virtual address"));
1189 KASSERT((size & PAGE_MASK) == 0,
1190 ("pmap_kenter: Mapping is not page-sized"));
1192 attr = ATTR_DEFAULT | ATTR_IDX(mode) | L3_PAGE;
1193 if (mode == DEVICE_MEMORY)
1198 pde = pmap_pde(kernel_pmap, va, &lvl);
1199 KASSERT(pde != NULL,
1200 ("pmap_kenter: Invalid page entry, va: 0x%lx", va));
1201 KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
1203 pte = pmap_l2_to_l3(pde, va);
1204 pmap_load_store(pte, (pa & ~L3_OFFSET) | attr);
1210 pmap_invalidate_range(kernel_pmap, sva, va);
1214 pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
1217 pmap_kenter(sva, size, pa, DEVICE_MEMORY);
1221 * Remove a page from the kernel pagetables.
1224 pmap_kremove(vm_offset_t va)
1229 pte = pmap_pte(kernel_pmap, va, &lvl);
1230 KASSERT(pte != NULL, ("pmap_kremove: Invalid address"));
1231 KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl));
1234 pmap_invalidate_page(kernel_pmap, va);
1238 pmap_kremove_device(vm_offset_t sva, vm_size_t size)
1244 KASSERT((sva & L3_OFFSET) == 0,
1245 ("pmap_kremove_device: Invalid virtual address"));
1246 KASSERT((size & PAGE_MASK) == 0,
1247 ("pmap_kremove_device: Mapping is not page-sized"));
1251 pte = pmap_pte(kernel_pmap, va, &lvl);
1252 KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va));
1254 ("Invalid device pagetable level: %d != 3", lvl));
1260 pmap_invalidate_range(kernel_pmap, sva, va);
1264 * Used to map a range of physical addresses into kernel
1265 * virtual address space.
1267 * The value passed in '*virt' is a suggested virtual address for
1268 * the mapping. Architectures which can support a direct-mapped
1269 * physical to virtual region can return the appropriate address
1270 * within that region, leaving '*virt' unchanged. Other
1271 * architectures should map the pages starting at '*virt' and
1272 * update '*virt' with the first usable address after the mapped
1276 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1278 return PHYS_TO_DMAP(start);
1283 * Add a list of wired pages to the kva
1284 * this routine is only used for temporary
1285 * kernel mappings that do not need to have
1286 * page modification or references recorded.
1287 * Note that old mappings are simply written
1288 * over. The page *must* be wired.
1289 * Note: SMP coherent. Uses a ranged shootdown IPI.
1292 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1295 pt_entry_t *pte, pa;
1301 for (i = 0; i < count; i++) {
1302 pde = pmap_pde(kernel_pmap, va, &lvl);
1303 KASSERT(pde != NULL,
1304 ("pmap_qenter: Invalid page entry, va: 0x%lx", va));
1306 ("pmap_qenter: Invalid level %d", lvl));
1309 pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) |
1310 ATTR_IDX(m->md.pv_memattr) | L3_PAGE;
1311 if (m->md.pv_memattr == DEVICE_MEMORY)
1313 pte = pmap_l2_to_l3(pde, va);
1314 pmap_load_store(pte, pa);
1318 pmap_invalidate_range(kernel_pmap, sva, va);
1322 * This routine tears out page mappings from the
1323 * kernel -- it is meant only for temporary mappings.
1326 pmap_qremove(vm_offset_t sva, int count)
1332 KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
1335 while (count-- > 0) {
1336 pte = pmap_pte(kernel_pmap, va, &lvl);
1338 ("Invalid device pagetable level: %d != 3", lvl));
1345 pmap_invalidate_range(kernel_pmap, sva, va);
1348 /***************************************************
1349 * Page table page management routines.....
1350 ***************************************************/
1352 * Schedule the specified unused page table page to be freed. Specifically,
1353 * add the page to the specified list of pages that will be released to the
1354 * physical memory manager after the TLB has been updated.
1356 static __inline void
1357 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1358 boolean_t set_PG_ZERO)
1362 m->flags |= PG_ZERO;
1364 m->flags &= ~PG_ZERO;
1365 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1369 * Decrements a page table page's wire count, which is used to record the
1370 * number of valid page table entries within the page. If the wire count
1371 * drops to zero, then the page table page is unmapped. Returns TRUE if the
1372 * page table page was unmapped and FALSE otherwise.
1374 static inline boolean_t
1375 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1379 if (m->wire_count == 0) {
1380 _pmap_unwire_l3(pmap, va, m, free);
1387 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1390 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1392 * unmap the page table page
1394 if (m->pindex >= (NUL2E + NUL1E)) {
1398 l0 = pmap_l0(pmap, va);
1400 } else if (m->pindex >= NUL2E) {
1404 l1 = pmap_l1(pmap, va);
1410 l2 = pmap_l2(pmap, va);
1413 pmap_resident_count_dec(pmap, 1);
1414 if (m->pindex < NUL2E) {
1415 /* We just released an l3, unhold the matching l2 */
1416 pd_entry_t *l1, tl1;
1419 l1 = pmap_l1(pmap, va);
1420 tl1 = pmap_load(l1);
1421 l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1422 pmap_unwire_l3(pmap, va, l2pg, free);
1423 } else if (m->pindex < (NUL2E + NUL1E)) {
1424 /* We just released an l2, unhold the matching l1 */
1425 pd_entry_t *l0, tl0;
1428 l0 = pmap_l0(pmap, va);
1429 tl0 = pmap_load(l0);
1430 l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1431 pmap_unwire_l3(pmap, va, l1pg, free);
1433 pmap_invalidate_page(pmap, va);
1436 * Put page on a list so that it is released after
1437 * *ALL* TLB shootdown is done
1439 pmap_add_delayed_free_list(m, free, TRUE);
1443 * After removing a page table entry, this routine is used to
1444 * conditionally free the page, and manage the hold/wire counts.
1447 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1448 struct spglist *free)
1452 if (va >= VM_MAXUSER_ADDRESS)
1454 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1455 mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK);
1456 return (pmap_unwire_l3(pmap, va, mpte, free));
1460 pmap_pinit0(pmap_t pmap)
1463 PMAP_LOCK_INIT(pmap);
1464 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1465 pmap->pm_l0 = kernel_pmap->pm_l0;
1466 pmap->pm_root.rt_root = 0;
1470 pmap_pinit(pmap_t pmap)
1476 * allocate the l0 page
1478 while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
1479 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1482 l0phys = VM_PAGE_TO_PHYS(l0pt);
1483 pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(l0phys);
1485 if ((l0pt->flags & PG_ZERO) == 0)
1486 pagezero(pmap->pm_l0);
1488 pmap->pm_root.rt_root = 0;
1489 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1495 * This routine is called if the desired page table page does not exist.
1497 * If page table page allocation fails, this routine may sleep before
1498 * returning NULL. It sleeps only if a lock pointer was given.
1500 * Note: If a page allocation fails at page table level two or three,
1501 * one or two pages may be held during the wait, only to be released
1502 * afterwards. This conservative approach is easily argued to avoid
1506 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1508 vm_page_t m, l1pg, l2pg;
1510 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1513 * Allocate a page table page.
1515 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1516 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1517 if (lockp != NULL) {
1518 RELEASE_PV_LIST_LOCK(lockp);
1525 * Indicate the need to retry. While waiting, the page table
1526 * page may have been allocated.
1530 if ((m->flags & PG_ZERO) == 0)
1534 * Map the pagetable page into the process address space, if
1535 * it isn't already there.
1538 if (ptepindex >= (NUL2E + NUL1E)) {
1540 vm_pindex_t l0index;
1542 l0index = ptepindex - (NUL2E + NUL1E);
1543 l0 = &pmap->pm_l0[l0index];
1544 pmap_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
1545 } else if (ptepindex >= NUL2E) {
1546 vm_pindex_t l0index, l1index;
1547 pd_entry_t *l0, *l1;
1550 l1index = ptepindex - NUL2E;
1551 l0index = l1index >> L0_ENTRIES_SHIFT;
1553 l0 = &pmap->pm_l0[l0index];
1554 tl0 = pmap_load(l0);
1556 /* recurse for allocating page dir */
1557 if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
1559 vm_page_unwire_noq(m);
1560 vm_page_free_zero(m);
1564 l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1568 l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
1569 l1 = &l1[ptepindex & Ln_ADDR_MASK];
1570 pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
1572 vm_pindex_t l0index, l1index;
1573 pd_entry_t *l0, *l1, *l2;
1574 pd_entry_t tl0, tl1;
1576 l1index = ptepindex >> Ln_ENTRIES_SHIFT;
1577 l0index = l1index >> L0_ENTRIES_SHIFT;
1579 l0 = &pmap->pm_l0[l0index];
1580 tl0 = pmap_load(l0);
1582 /* recurse for allocating page dir */
1583 if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1585 vm_page_unwire_noq(m);
1586 vm_page_free_zero(m);
1589 tl0 = pmap_load(l0);
1590 l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1591 l1 = &l1[l1index & Ln_ADDR_MASK];
1593 l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1594 l1 = &l1[l1index & Ln_ADDR_MASK];
1595 tl1 = pmap_load(l1);
1597 /* recurse for allocating page dir */
1598 if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1600 vm_page_unwire_noq(m);
1601 vm_page_free_zero(m);
1605 l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1610 l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
1611 l2 = &l2[ptepindex & Ln_ADDR_MASK];
1612 pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
1615 pmap_resident_count_inc(pmap, 1);
1621 pmap_alloc_l2(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1625 vm_pindex_t l2pindex;
1628 l1 = pmap_l1(pmap, va);
1629 if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
1630 /* Add a reference to the L2 page. */
1631 l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK);
1634 /* Allocate a L2 page. */
1635 l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
1636 l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
1637 if (l2pg == NULL && lockp != NULL)
1644 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1646 vm_pindex_t ptepindex;
1647 pd_entry_t *pde, tpde;
1655 * Calculate pagetable page index
1657 ptepindex = pmap_l2_pindex(va);
1660 * Get the page directory entry
1662 pde = pmap_pde(pmap, va, &lvl);
1665 * If the page table page is mapped, we just increment the hold count,
1666 * and activate it. If we get a level 2 pde it will point to a level 3
1674 pte = pmap_l0_to_l1(pde, va);
1675 KASSERT(pmap_load(pte) == 0,
1676 ("pmap_alloc_l3: TODO: l0 superpages"));
1681 pte = pmap_l1_to_l2(pde, va);
1682 KASSERT(pmap_load(pte) == 0,
1683 ("pmap_alloc_l3: TODO: l1 superpages"));
1687 tpde = pmap_load(pde);
1689 m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
1695 panic("pmap_alloc_l3: Invalid level %d", lvl);
1699 * Here if the pte page isn't mapped, or if it has been deallocated.
1701 m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1702 if (m == NULL && lockp != NULL)
1708 /***************************************************
1709 * Pmap allocation/deallocation routines.
1710 ***************************************************/
1713 * Release any resources held by the given physical map.
1714 * Called when a pmap initialized by pmap_pinit is being released.
1715 * Should only be called if the map contains no valid mappings.
1718 pmap_release(pmap_t pmap)
1722 KASSERT(pmap->pm_stats.resident_count == 0,
1723 ("pmap_release: pmap resident count %ld != 0",
1724 pmap->pm_stats.resident_count));
1725 KASSERT(vm_radix_is_empty(&pmap->pm_root),
1726 ("pmap_release: pmap has reserved page table page(s)"));
1728 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l0));
1730 vm_page_unwire_noq(m);
1731 vm_page_free_zero(m);
1735 kvm_size(SYSCTL_HANDLER_ARGS)
1737 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
1739 return sysctl_handle_long(oidp, &ksize, 0, req);
1741 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1742 0, 0, kvm_size, "LU", "Size of KVM");
1745 kvm_free(SYSCTL_HANDLER_ARGS)
1747 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1749 return sysctl_handle_long(oidp, &kfree, 0, req);
1751 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1752 0, 0, kvm_free, "LU", "Amount of KVM free");
1755 * grow the number of kernel page table entries, if needed
1758 pmap_growkernel(vm_offset_t addr)
1762 pd_entry_t *l0, *l1, *l2;
1764 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1766 addr = roundup2(addr, L2_SIZE);
1767 if (addr - 1 >= vm_map_max(kernel_map))
1768 addr = vm_map_max(kernel_map);
1769 while (kernel_vm_end < addr) {
1770 l0 = pmap_l0(kernel_pmap, kernel_vm_end);
1771 KASSERT(pmap_load(l0) != 0,
1772 ("pmap_growkernel: No level 0 kernel entry"));
1774 l1 = pmap_l0_to_l1(l0, kernel_vm_end);
1775 if (pmap_load(l1) == 0) {
1776 /* We need a new PDP entry */
1777 nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
1778 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
1779 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1781 panic("pmap_growkernel: no memory to grow kernel");
1782 if ((nkpg->flags & PG_ZERO) == 0)
1783 pmap_zero_page(nkpg);
1784 paddr = VM_PAGE_TO_PHYS(nkpg);
1785 pmap_store(l1, paddr | L1_TABLE);
1786 continue; /* try again */
1788 l2 = pmap_l1_to_l2(l1, kernel_vm_end);
1789 if ((pmap_load(l2) & ATTR_AF) != 0) {
1790 kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1791 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1792 kernel_vm_end = vm_map_max(kernel_map);
1798 nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
1799 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
1802 panic("pmap_growkernel: no memory to grow kernel");
1803 if ((nkpg->flags & PG_ZERO) == 0)
1804 pmap_zero_page(nkpg);
1805 paddr = VM_PAGE_TO_PHYS(nkpg);
1806 pmap_load_store(l2, paddr | L2_TABLE);
1807 pmap_invalidate_page(kernel_pmap, kernel_vm_end);
1809 kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1810 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1811 kernel_vm_end = vm_map_max(kernel_map);
1818 /***************************************************
1819 * page management routines.
1820 ***************************************************/
1822 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1823 CTASSERT(_NPCM == 3);
1824 CTASSERT(_NPCPV == 168);
1826 static __inline struct pv_chunk *
1827 pv_to_chunk(pv_entry_t pv)
1830 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1833 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1835 #define PC_FREE0 0xfffffffffffffffful
1836 #define PC_FREE1 0xfffffffffffffffful
1837 #define PC_FREE2 0x000000fffffffffful
1839 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
1843 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1845 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1846 "Current number of pv entry chunks");
1847 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1848 "Current number of pv entry chunks allocated");
1849 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1850 "Current number of pv entry chunks frees");
1851 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1852 "Number of times tried to get a chunk page but failed.");
1854 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
1855 static int pv_entry_spare;
1857 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1858 "Current number of pv entry frees");
1859 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1860 "Current number of pv entry allocs");
1861 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1862 "Current number of pv entries");
1863 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1864 "Current number of spare pv entries");
1869 * We are in a serious low memory condition. Resort to
1870 * drastic measures to free some pages so we can allocate
1871 * another pv entry chunk.
1873 * Returns NULL if PV entries were reclaimed from the specified pmap.
1875 * We do not, however, unmap 2mpages because subsequent accesses will
1876 * allocate per-page pv entries until repromotion occurs, thereby
1877 * exacerbating the shortage of free pv entries.
1880 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1882 struct pv_chunk *pc, *pc_marker, *pc_marker_end;
1883 struct pv_chunk_header pc_marker_b, pc_marker_end_b;
1884 struct md_page *pvh;
1886 pmap_t next_pmap, pmap;
1887 pt_entry_t *pte, tpte;
1891 struct spglist free;
1893 int bit, field, freed, lvl;
1894 static int active_reclaims = 0;
1896 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1897 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
1902 bzero(&pc_marker_b, sizeof(pc_marker_b));
1903 bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
1904 pc_marker = (struct pv_chunk *)&pc_marker_b;
1905 pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
1907 mtx_lock(&pv_chunks_mutex);
1909 TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
1910 TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
1911 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
1912 SLIST_EMPTY(&free)) {
1913 next_pmap = pc->pc_pmap;
1914 if (next_pmap == NULL) {
1916 * The next chunk is a marker. However, it is
1917 * not our marker, so active_reclaims must be
1918 * > 1. Consequently, the next_chunk code
1919 * will not rotate the pv_chunks list.
1923 mtx_unlock(&pv_chunks_mutex);
1926 * A pv_chunk can only be removed from the pc_lru list
1927 * when both pv_chunks_mutex is owned and the
1928 * corresponding pmap is locked.
1930 if (pmap != next_pmap) {
1931 if (pmap != NULL && pmap != locked_pmap)
1934 /* Avoid deadlock and lock recursion. */
1935 if (pmap > locked_pmap) {
1936 RELEASE_PV_LIST_LOCK(lockp);
1938 mtx_lock(&pv_chunks_mutex);
1940 } else if (pmap != locked_pmap) {
1941 if (PMAP_TRYLOCK(pmap)) {
1942 mtx_lock(&pv_chunks_mutex);
1945 pmap = NULL; /* pmap is not locked */
1946 mtx_lock(&pv_chunks_mutex);
1947 pc = TAILQ_NEXT(pc_marker, pc_lru);
1949 pc->pc_pmap != next_pmap)
1957 * Destroy every non-wired, 4 KB page mapping in the chunk.
1960 for (field = 0; field < _NPCM; field++) {
1961 for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1962 inuse != 0; inuse &= ~(1UL << bit)) {
1963 bit = ffsl(inuse) - 1;
1964 pv = &pc->pc_pventry[field * 64 + bit];
1966 pde = pmap_pde(pmap, va, &lvl);
1969 pte = pmap_l2_to_l3(pde, va);
1970 tpte = pmap_load(pte);
1971 if ((tpte & ATTR_SW_WIRED) != 0)
1973 tpte = pmap_load_clear(pte);
1974 pmap_invalidate_page(pmap, va);
1975 m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK);
1976 if (pmap_pte_dirty(tpte))
1978 if ((tpte & ATTR_AF) != 0)
1979 vm_page_aflag_set(m, PGA_REFERENCED);
1980 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1981 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
1983 if (TAILQ_EMPTY(&m->md.pv_list) &&
1984 (m->flags & PG_FICTITIOUS) == 0) {
1985 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
1986 if (TAILQ_EMPTY(&pvh->pv_list)) {
1987 vm_page_aflag_clear(m,
1991 pc->pc_map[field] |= 1UL << bit;
1992 pmap_unuse_pt(pmap, va, pmap_load(pde), &free);
1997 mtx_lock(&pv_chunks_mutex);
2000 /* Every freed mapping is for a 4 KB page. */
2001 pmap_resident_count_dec(pmap, freed);
2002 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
2003 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
2004 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
2005 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2006 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
2007 pc->pc_map[2] == PC_FREE2) {
2008 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
2009 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
2010 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
2011 /* Entire chunk is free; return it. */
2012 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2013 dump_drop_page(m_pc->phys_addr);
2014 mtx_lock(&pv_chunks_mutex);
2015 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2018 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2019 mtx_lock(&pv_chunks_mutex);
2020 /* One freed pv entry in locked_pmap is sufficient. */
2021 if (pmap == locked_pmap)
2025 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2026 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
2027 if (active_reclaims == 1 && pmap != NULL) {
2029 * Rotate the pv chunks list so that we do not
2030 * scan the same pv chunks that could not be
2031 * freed (because they contained a wired
2032 * and/or superpage mapping) on every
2033 * invocation of reclaim_pv_chunk().
2035 while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
2036 MPASS(pc->pc_pmap != NULL);
2037 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2038 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2042 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2043 TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
2045 mtx_unlock(&pv_chunks_mutex);
2046 if (pmap != NULL && pmap != locked_pmap)
2048 if (m_pc == NULL && !SLIST_EMPTY(&free)) {
2049 m_pc = SLIST_FIRST(&free);
2050 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2051 /* Recycle a freed page table page. */
2052 m_pc->wire_count = 1;
2054 vm_page_free_pages_toq(&free, true);
2059 * free the pv_entry back to the free list
2062 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2064 struct pv_chunk *pc;
2065 int idx, field, bit;
2067 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2068 PV_STAT(atomic_add_long(&pv_entry_frees, 1));
2069 PV_STAT(atomic_add_int(&pv_entry_spare, 1));
2070 PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
2071 pc = pv_to_chunk(pv);
2072 idx = pv - &pc->pc_pventry[0];
2075 pc->pc_map[field] |= 1ul << bit;
2076 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
2077 pc->pc_map[2] != PC_FREE2) {
2078 /* 98% of the time, pc is already at the head of the list. */
2079 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
2080 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2081 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2085 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2090 free_pv_chunk(struct pv_chunk *pc)
2094 mtx_lock(&pv_chunks_mutex);
2095 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2096 mtx_unlock(&pv_chunks_mutex);
2097 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
2098 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
2099 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
2100 /* entire chunk is free, return it */
2101 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2102 dump_drop_page(m->phys_addr);
2103 vm_page_unwire_noq(m);
2108 * Returns a new PV entry, allocating a new PV chunk from the system when
2109 * needed. If this PV chunk allocation fails and a PV list lock pointer was
2110 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
2113 * The given PV list lock may be released.
2116 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
2120 struct pv_chunk *pc;
2123 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2124 PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
2126 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2128 for (field = 0; field < _NPCM; field++) {
2129 if (pc->pc_map[field]) {
2130 bit = ffsl(pc->pc_map[field]) - 1;
2134 if (field < _NPCM) {
2135 pv = &pc->pc_pventry[field * 64 + bit];
2136 pc->pc_map[field] &= ~(1ul << bit);
2137 /* If this was the last item, move it to tail */
2138 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
2139 pc->pc_map[2] == 0) {
2140 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2141 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
2144 PV_STAT(atomic_add_long(&pv_entry_count, 1));
2145 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
2149 /* No free items, allocate another chunk */
2150 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2153 if (lockp == NULL) {
2154 PV_STAT(pc_chunk_tryfail++);
2157 m = reclaim_pv_chunk(pmap, lockp);
2161 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2162 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2163 dump_add_page(m->phys_addr);
2164 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2166 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
2167 pc->pc_map[1] = PC_FREE1;
2168 pc->pc_map[2] = PC_FREE2;
2169 mtx_lock(&pv_chunks_mutex);
2170 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2171 mtx_unlock(&pv_chunks_mutex);
2172 pv = &pc->pc_pventry[0];
2173 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2174 PV_STAT(atomic_add_long(&pv_entry_count, 1));
2175 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
2180 * Ensure that the number of spare PV entries in the specified pmap meets or
2181 * exceeds the given count, "needed".
2183 * The given PV list lock may be released.
2186 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
2188 struct pch new_tail;
2189 struct pv_chunk *pc;
2194 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2195 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
2198 * Newly allocated PV chunks must be stored in a private list until
2199 * the required number of PV chunks have been allocated. Otherwise,
2200 * reclaim_pv_chunk() could recycle one of these chunks. In
2201 * contrast, these chunks must be added to the pmap upon allocation.
2203 TAILQ_INIT(&new_tail);
2206 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
2207 bit_count((bitstr_t *)pc->pc_map, 0,
2208 sizeof(pc->pc_map) * NBBY, &free);
2212 if (avail >= needed)
2215 for (reclaimed = false; avail < needed; avail += _NPCPV) {
2216 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2219 m = reclaim_pv_chunk(pmap, lockp);
2224 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2225 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2226 dump_add_page(m->phys_addr);
2227 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2229 pc->pc_map[0] = PC_FREE0;
2230 pc->pc_map[1] = PC_FREE1;
2231 pc->pc_map[2] = PC_FREE2;
2232 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2233 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
2234 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
2237 * The reclaim might have freed a chunk from the current pmap.
2238 * If that chunk contained available entries, we need to
2239 * re-count the number of available entries.
2244 if (!TAILQ_EMPTY(&new_tail)) {
2245 mtx_lock(&pv_chunks_mutex);
2246 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
2247 mtx_unlock(&pv_chunks_mutex);
2252 * First find and then remove the pv entry for the specified pmap and virtual
2253 * address from the specified pv list. Returns the pv entry if found and NULL
2254 * otherwise. This operation can be performed on pv lists for either 4KB or
2255 * 2MB page mappings.
2257 static __inline pv_entry_t
2258 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2262 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
2263 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2264 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
2273 * After demotion from a 2MB page mapping to 512 4KB page mappings,
2274 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
2275 * entries for each of the 4KB page mappings.
2278 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2279 struct rwlock **lockp)
2281 struct md_page *pvh;
2282 struct pv_chunk *pc;
2284 vm_offset_t va_last;
2288 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2289 KASSERT((va & L2_OFFSET) == 0,
2290 ("pmap_pv_demote_l2: va is not 2mpage aligned"));
2291 KASSERT((pa & L2_OFFSET) == 0,
2292 ("pmap_pv_demote_l2: pa is not 2mpage aligned"));
2293 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2296 * Transfer the 2mpage's pv entry for this mapping to the first
2297 * page's pv list. Once this transfer begins, the pv list lock
2298 * must not be released until the last pv entry is reinstantiated.
2300 pvh = pa_to_pvh(pa);
2301 pv = pmap_pvh_remove(pvh, pmap, va);
2302 KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
2303 m = PHYS_TO_VM_PAGE(pa);
2304 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2306 /* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */
2307 PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1));
2308 va_last = va + L2_SIZE - PAGE_SIZE;
2310 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2311 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
2312 pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare"));
2313 for (field = 0; field < _NPCM; field++) {
2314 while (pc->pc_map[field]) {
2315 bit = ffsl(pc->pc_map[field]) - 1;
2316 pc->pc_map[field] &= ~(1ul << bit);
2317 pv = &pc->pc_pventry[field * 64 + bit];
2321 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2322 ("pmap_pv_demote_l2: page %p is not managed", m));
2323 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2329 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2330 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2333 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
2334 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2335 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2337 PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1));
2338 PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1));
2342 * First find and then destroy the pv entry for the specified pmap and virtual
2343 * address. This operation can be performed on pv lists for either 4KB or 2MB
2347 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2351 pv = pmap_pvh_remove(pvh, pmap, va);
2352 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
2353 free_pv_entry(pmap, pv);
2357 * Conditionally create the PV entry for a 4KB page mapping if the required
2358 * memory can be allocated without resorting to reclamation.
2361 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
2362 struct rwlock **lockp)
2366 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2367 /* Pass NULL instead of the lock pointer to disable reclamation. */
2368 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
2370 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2371 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2379 * Create the PV entry for a 2MB page mapping. Always returns true unless the
2380 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
2381 * false if the PV entry cannot be allocated without resorting to reclamation.
2384 pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
2385 struct rwlock **lockp)
2387 struct md_page *pvh;
2391 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2392 /* Pass NULL instead of the lock pointer to disable reclamation. */
2393 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
2394 NULL : lockp)) == NULL)
2397 pa = l2e & ~ATTR_MASK;
2398 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2399 pvh = pa_to_pvh(pa);
2400 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2406 pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
2408 pt_entry_t newl2, oldl2;
2412 KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va));
2413 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
2414 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2416 ml3 = pmap_remove_pt_page(pmap, va);
2418 panic("pmap_remove_kernel_l2: Missing pt page");
2420 ml3pa = VM_PAGE_TO_PHYS(ml3);
2421 newl2 = ml3pa | L2_TABLE;
2424 * If this page table page was unmapped by a promotion, then it
2425 * contains valid mappings. Zero it to invalidate those mappings.
2427 if (ml3->valid != 0)
2428 pagezero((void *)PHYS_TO_DMAP(ml3pa));
2431 * Demote the mapping. The caller must have already invalidated the
2432 * mapping (i.e., the "break" in break-before-make).
2434 oldl2 = pmap_load_store(l2, newl2);
2435 KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx",
2436 __func__, l2, oldl2));
2440 * pmap_remove_l2: Do the things to unmap a level 2 superpage.
2443 pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
2444 pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
2446 struct md_page *pvh;
2448 vm_offset_t eva, va;
2451 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2452 KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned"));
2453 old_l2 = pmap_load_clear(l2);
2454 KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
2455 ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2));
2458 * Since a promotion must break the 4KB page mappings before making
2459 * the 2MB page mapping, a pmap_invalidate_page() suffices.
2461 pmap_invalidate_page(pmap, sva);
2463 if (old_l2 & ATTR_SW_WIRED)
2464 pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
2465 pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
2466 if (old_l2 & ATTR_SW_MANAGED) {
2467 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK);
2468 pvh = pa_to_pvh(old_l2 & ~ATTR_MASK);
2469 pmap_pvh_free(pvh, pmap, sva);
2470 eva = sva + L2_SIZE;
2471 for (va = sva, m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
2472 va < eva; va += PAGE_SIZE, m++) {
2473 if (pmap_pte_dirty(old_l2))
2475 if (old_l2 & ATTR_AF)
2476 vm_page_aflag_set(m, PGA_REFERENCED);
2477 if (TAILQ_EMPTY(&m->md.pv_list) &&
2478 TAILQ_EMPTY(&pvh->pv_list))
2479 vm_page_aflag_clear(m, PGA_WRITEABLE);
2482 if (pmap == kernel_pmap) {
2483 pmap_remove_kernel_l2(pmap, l2, sva);
2485 ml3 = pmap_remove_pt_page(pmap, sva);
2487 KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
2488 ("pmap_remove_l2: l3 page not promoted"));
2489 pmap_resident_count_dec(pmap, 1);
2490 KASSERT(ml3->wire_count == NL3PG,
2491 ("pmap_remove_l2: l3 page wire count error"));
2492 ml3->wire_count = 0;
2493 pmap_add_delayed_free_list(ml3, free, FALSE);
2496 return (pmap_unuse_pt(pmap, sva, l1e, free));
2500 * pmap_remove_l3: do the things to unmap a page in a process
2503 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
2504 pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
2506 struct md_page *pvh;
2510 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2511 old_l3 = pmap_load_clear(l3);
2512 pmap_invalidate_page(pmap, va);
2513 if (old_l3 & ATTR_SW_WIRED)
2514 pmap->pm_stats.wired_count -= 1;
2515 pmap_resident_count_dec(pmap, 1);
2516 if (old_l3 & ATTR_SW_MANAGED) {
2517 m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2518 if (pmap_pte_dirty(old_l3))
2520 if (old_l3 & ATTR_AF)
2521 vm_page_aflag_set(m, PGA_REFERENCED);
2522 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2523 pmap_pvh_free(&m->md, pmap, va);
2524 if (TAILQ_EMPTY(&m->md.pv_list) &&
2525 (m->flags & PG_FICTITIOUS) == 0) {
2526 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2527 if (TAILQ_EMPTY(&pvh->pv_list))
2528 vm_page_aflag_clear(m, PGA_WRITEABLE);
2531 return (pmap_unuse_pt(pmap, va, l2e, free));
2535 * Remove the specified range of addresses from the L3 page table that is
2536 * identified by the given L2 entry.
2539 pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
2540 vm_offset_t eva, struct spglist *free, struct rwlock **lockp)
2542 struct md_page *pvh;
2543 struct rwlock *new_lock;
2544 pt_entry_t *l3, old_l3;
2548 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2549 KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
2550 ("pmap_remove_l3_range: range crosses an L3 page table boundary"));
2552 for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
2553 if (!pmap_l3_valid(pmap_load(l3))) {
2555 pmap_invalidate_range(pmap, va, sva);
2560 old_l3 = pmap_load_clear(l3);
2561 if ((old_l3 & ATTR_SW_WIRED) != 0)
2562 pmap->pm_stats.wired_count--;
2563 pmap_resident_count_dec(pmap, 1);
2564 if ((old_l3 & ATTR_SW_MANAGED) != 0) {
2565 m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2566 if (pmap_pte_dirty(old_l3))
2568 if ((old_l3 & ATTR_AF) != 0)
2569 vm_page_aflag_set(m, PGA_REFERENCED);
2570 new_lock = PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m));
2571 if (new_lock != *lockp) {
2572 if (*lockp != NULL) {
2574 * Pending TLB invalidations must be
2575 * performed before the PV list lock is
2576 * released. Otherwise, a concurrent
2577 * pmap_remove_all() on a physical page
2578 * could return while a stale TLB entry
2579 * still provides access to that page.
2582 pmap_invalidate_range(pmap, va,
2591 pmap_pvh_free(&m->md, pmap, sva);
2592 if (TAILQ_EMPTY(&m->md.pv_list) &&
2593 (m->flags & PG_FICTITIOUS) == 0) {
2594 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2595 if (TAILQ_EMPTY(&pvh->pv_list))
2596 vm_page_aflag_clear(m, PGA_WRITEABLE);
2601 if (pmap_unuse_pt(pmap, sva, l2e, free)) {
2607 pmap_invalidate_range(pmap, va, sva);
2611 * Remove the given range of addresses from the specified map.
2613 * It is assumed that the start and end are properly
2614 * rounded to the page size.
2617 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2619 struct rwlock *lock;
2620 vm_offset_t va_next;
2621 pd_entry_t *l0, *l1, *l2;
2622 pt_entry_t l3_paddr;
2623 struct spglist free;
2626 * Perform an unsynchronized read. This is, however, safe.
2628 if (pmap->pm_stats.resident_count == 0)
2636 for (; sva < eva; sva = va_next) {
2638 if (pmap->pm_stats.resident_count == 0)
2641 l0 = pmap_l0(pmap, sva);
2642 if (pmap_load(l0) == 0) {
2643 va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2649 l1 = pmap_l0_to_l1(l0, sva);
2650 if (pmap_load(l1) == 0) {
2651 va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2658 * Calculate index for next page table.
2660 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2664 l2 = pmap_l1_to_l2(l1, sva);
2668 l3_paddr = pmap_load(l2);
2670 if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
2671 if (sva + L2_SIZE == va_next && eva >= va_next) {
2672 pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
2675 } else if (pmap_demote_l2_locked(pmap, l2, sva,
2678 l3_paddr = pmap_load(l2);
2682 * Weed out invalid mappings.
2684 if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
2688 * Limit our scan to either the end of the va represented
2689 * by the current page table page, or to the end of the
2690 * range being removed.
2695 pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free,
2701 vm_page_free_pages_toq(&free, true);
2705 * Routine: pmap_remove_all
2707 * Removes this physical page from
2708 * all physical maps in which it resides.
2709 * Reflects back modify bits to the pager.
2712 * Original versions of this routine were very
2713 * inefficient because they iteratively called
2714 * pmap_remove (slow...)
2718 pmap_remove_all(vm_page_t m)
2720 struct md_page *pvh;
2723 struct rwlock *lock;
2724 pd_entry_t *pde, tpde;
2725 pt_entry_t *pte, tpte;
2727 struct spglist free;
2728 int lvl, pvh_gen, md_gen;
2730 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2731 ("pmap_remove_all: page %p is not managed", m));
2733 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2734 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2735 pa_to_pvh(VM_PAGE_TO_PHYS(m));
2738 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2740 if (!PMAP_TRYLOCK(pmap)) {
2741 pvh_gen = pvh->pv_gen;
2745 if (pvh_gen != pvh->pv_gen) {
2752 pte = pmap_pte(pmap, va, &lvl);
2753 KASSERT(pte != NULL,
2754 ("pmap_remove_all: no page table entry found"));
2756 ("pmap_remove_all: invalid pte level %d", lvl));
2758 pmap_demote_l2_locked(pmap, pte, va, &lock);
2761 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2763 if (!PMAP_TRYLOCK(pmap)) {
2764 pvh_gen = pvh->pv_gen;
2765 md_gen = m->md.pv_gen;
2769 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2775 pmap_resident_count_dec(pmap, 1);
2777 pde = pmap_pde(pmap, pv->pv_va, &lvl);
2778 KASSERT(pde != NULL,
2779 ("pmap_remove_all: no page directory entry found"));
2781 ("pmap_remove_all: invalid pde level %d", lvl));
2782 tpde = pmap_load(pde);
2784 pte = pmap_l2_to_l3(pde, pv->pv_va);
2785 tpte = pmap_load_clear(pte);
2786 pmap_invalidate_page(pmap, pv->pv_va);
2787 if (tpte & ATTR_SW_WIRED)
2788 pmap->pm_stats.wired_count--;
2789 if ((tpte & ATTR_AF) != 0)
2790 vm_page_aflag_set(m, PGA_REFERENCED);
2793 * Update the vm_page_t clean and reference bits.
2795 if (pmap_pte_dirty(tpte))
2797 pmap_unuse_pt(pmap, pv->pv_va, tpde, &free);
2798 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2800 free_pv_entry(pmap, pv);
2803 vm_page_aflag_clear(m, PGA_WRITEABLE);
2805 vm_page_free_pages_toq(&free, true);
2809 * pmap_protect_l2: do the things to protect a 2MB page in a pmap
2812 pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
2818 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2819 KASSERT((sva & L2_OFFSET) == 0,
2820 ("pmap_protect_l2: sva is not 2mpage aligned"));
2821 old_l2 = pmap_load(l2);
2822 KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
2823 ("pmap_protect_l2: L2e %lx is not a block mapping", old_l2));
2826 * Return if the L2 entry already has the desired access restrictions
2830 if ((old_l2 & mask) == nbits)
2834 * When a dirty read/write superpage mapping is write protected,
2835 * update the dirty field of each of the superpage's constituent 4KB
2838 if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
2839 (nbits & ATTR_AP(ATTR_AP_RO)) != 0 && pmap_pte_dirty(old_l2)) {
2840 m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
2841 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
2845 if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
2849 * Since a promotion must break the 4KB page mappings before making
2850 * the 2MB page mapping, a pmap_invalidate_page() suffices.
2852 pmap_invalidate_page(pmap, sva);
2856 * Set the physical protection on the
2857 * specified range of this map as requested.
2860 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2862 vm_offset_t va, va_next;
2863 pd_entry_t *l0, *l1, *l2;
2864 pt_entry_t *l3p, l3, mask, nbits;
2866 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
2867 if (prot == VM_PROT_NONE) {
2868 pmap_remove(pmap, sva, eva);
2873 if ((prot & VM_PROT_WRITE) == 0) {
2874 mask |= ATTR_AP_RW_BIT | ATTR_SW_DBM;
2875 nbits |= ATTR_AP(ATTR_AP_RO);
2877 if ((prot & VM_PROT_EXECUTE) == 0) {
2885 for (; sva < eva; sva = va_next) {
2887 l0 = pmap_l0(pmap, sva);
2888 if (pmap_load(l0) == 0) {
2889 va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2895 l1 = pmap_l0_to_l1(l0, sva);
2896 if (pmap_load(l1) == 0) {
2897 va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2903 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2907 l2 = pmap_l1_to_l2(l1, sva);
2908 if (pmap_load(l2) == 0)
2911 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
2912 if (sva + L2_SIZE == va_next && eva >= va_next) {
2913 pmap_protect_l2(pmap, l2, sva, mask, nbits);
2915 } else if (pmap_demote_l2(pmap, l2, sva) == NULL)
2918 KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
2919 ("pmap_protect: Invalid L2 entry after demotion"));
2925 for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
2927 l3 = pmap_load(l3p);
2930 * Go to the next L3 entry if the current one is
2931 * invalid or already has the desired access
2932 * restrictions in place. (The latter case occurs
2933 * frequently. For example, in a "buildworld"
2934 * workload, almost 1 out of 4 L3 entries already
2935 * have the desired restrictions.)
2937 if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) {
2938 if (va != va_next) {
2939 pmap_invalidate_range(pmap, va, sva);
2946 * When a dirty read/write mapping is write protected,
2947 * update the page's dirty field.
2949 if ((l3 & ATTR_SW_MANAGED) != 0 &&
2950 (nbits & ATTR_AP(ATTR_AP_RO)) != 0 &&
2952 vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK));
2954 if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits))
2960 pmap_invalidate_range(pmap, va, sva);
2966 * Inserts the specified page table page into the specified pmap's collection
2967 * of idle page table pages. Each of a pmap's page table pages is responsible
2968 * for mapping a distinct range of virtual addresses. The pmap's collection is
2969 * ordered by this virtual address range.
2971 * If "promoted" is false, then the page table page "mpte" must be zero filled.
2974 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted)
2977 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2978 mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0;
2979 return (vm_radix_insert(&pmap->pm_root, mpte));
2983 * Removes the page table page mapping the specified virtual address from the
2984 * specified pmap's collection of idle page table pages, and returns it.
2985 * Otherwise, returns NULL if there is no page table page corresponding to the
2986 * specified virtual address.
2988 static __inline vm_page_t
2989 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
2992 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2993 return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
2997 * Performs a break-before-make update of a pmap entry. This is needed when
2998 * either promoting or demoting pages to ensure the TLB doesn't get into an
2999 * inconsistent state.
3002 pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
3003 vm_offset_t va, vm_size_t size)
3007 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3010 * Ensure we don't get switched out with the page table in an
3011 * inconsistent state. We also need to ensure no interrupts fire
3012 * as they may make use of an address we are about to invalidate.
3014 intr = intr_disable();
3017 /* Clear the old mapping */
3019 pmap_invalidate_range_nopin(pmap, va, va + size);
3021 /* Create the new mapping */
3022 pmap_store(pte, newpte);
3029 #if VM_NRESERVLEVEL > 0
3031 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
3032 * replace the many pv entries for the 4KB page mappings by a single pv entry
3033 * for the 2MB page mapping.
3036 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
3037 struct rwlock **lockp)
3039 struct md_page *pvh;
3041 vm_offset_t va_last;
3044 KASSERT((pa & L2_OFFSET) == 0,
3045 ("pmap_pv_promote_l2: pa is not 2mpage aligned"));
3046 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3049 * Transfer the first page's pv entry for this mapping to the 2mpage's
3050 * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
3051 * a transfer avoids the possibility that get_pv_entry() calls
3052 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
3053 * mappings that is being promoted.
3055 m = PHYS_TO_VM_PAGE(pa);
3056 va = va & ~L2_OFFSET;
3057 pv = pmap_pvh_remove(&m->md, pmap, va);
3058 KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found"));
3059 pvh = pa_to_pvh(pa);
3060 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3062 /* Free the remaining NPTEPG - 1 pv entries. */
3063 va_last = va + L2_SIZE - PAGE_SIZE;
3067 pmap_pvh_free(&m->md, pmap, va);
3068 } while (va < va_last);
3072 * Tries to promote the 512, contiguous 4KB page mappings that are within a
3073 * single level 2 table entry to a single 2MB page mapping. For promotion
3074 * to occur, two conditions must be met: (1) the 4KB page mappings must map
3075 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
3076 * identical characteristics.
3079 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
3080 struct rwlock **lockp)
3082 pt_entry_t *firstl3, *l3, newl2, oldl3, pa;
3086 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3088 sva = va & ~L2_OFFSET;
3089 firstl3 = pmap_l2_to_l3(l2, sva);
3090 newl2 = pmap_load(firstl3);
3093 if (((newl2 & (~ATTR_MASK | ATTR_AF)) & L2_OFFSET) != ATTR_AF) {
3094 atomic_add_long(&pmap_l2_p_failures, 1);
3095 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3096 " in pmap %p", va, pmap);
3100 if ((newl2 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) ==
3101 (ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM)) {
3102 if (!atomic_fcmpset_64(l2, &newl2, newl2 & ~ATTR_SW_DBM))
3104 newl2 &= ~ATTR_SW_DBM;
3107 pa = newl2 + L2_SIZE - PAGE_SIZE;
3108 for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
3109 oldl3 = pmap_load(l3);
3111 if ((oldl3 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) ==
3112 (ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM)) {
3113 if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
3116 oldl3 &= ~ATTR_SW_DBM;
3119 atomic_add_long(&pmap_l2_p_failures, 1);
3120 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3121 " in pmap %p", va, pmap);
3128 * Save the page table page in its current state until the L2
3129 * mapping the superpage is demoted by pmap_demote_l2() or
3130 * destroyed by pmap_remove_l3().
3132 mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
3133 KASSERT(mpte >= vm_page_array &&
3134 mpte < &vm_page_array[vm_page_array_size],
3135 ("pmap_promote_l2: page table page is out of range"));
3136 KASSERT(mpte->pindex == pmap_l2_pindex(va),
3137 ("pmap_promote_l2: page table page's pindex is wrong"));
3138 if (pmap_insert_pt_page(pmap, mpte, true)) {
3139 atomic_add_long(&pmap_l2_p_failures, 1);
3141 "pmap_promote_l2: failure for va %#lx in pmap %p", va,
3146 if ((newl2 & ATTR_SW_MANAGED) != 0)
3147 pmap_pv_promote_l2(pmap, va, newl2 & ~ATTR_MASK, lockp);
3149 newl2 &= ~ATTR_DESCR_MASK;
3152 pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE);
3154 atomic_add_long(&pmap_l2_promotions, 1);
3155 CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
3158 #endif /* VM_NRESERVLEVEL > 0 */
3161 * Insert the given physical page (p) at
3162 * the specified virtual address (v) in the
3163 * target physical map with the protection requested.
3165 * If specified, the page will be wired down, meaning
3166 * that the related pte can not be reclaimed.
3168 * NB: This is the only routine which MAY NOT lazy-evaluate
3169 * or lose information. That is, this routine must actually
3170 * insert this page into the given map NOW.
3173 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3174 u_int flags, int8_t psind)
3176 struct rwlock *lock;
3178 pt_entry_t new_l3, orig_l3;
3179 pt_entry_t *l2, *l3;
3186 va = trunc_page(va);
3187 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
3188 VM_OBJECT_ASSERT_LOCKED(m->object);
3189 pa = VM_PAGE_TO_PHYS(m);
3190 new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
3192 if ((prot & VM_PROT_WRITE) == 0)
3193 new_l3 |= ATTR_AP(ATTR_AP_RO);
3194 if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
3196 if ((flags & PMAP_ENTER_WIRED) != 0)
3197 new_l3 |= ATTR_SW_WIRED;
3198 if (va < VM_MAXUSER_ADDRESS)
3199 new_l3 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
3200 if ((m->oflags & VPO_UNMANAGED) == 0) {
3201 new_l3 |= ATTR_SW_MANAGED;
3202 if ((prot & VM_PROT_WRITE) != 0) {
3203 new_l3 |= ATTR_SW_DBM;
3204 if ((flags & VM_PROT_WRITE) == 0)
3205 new_l3 |= ATTR_AP(ATTR_AP_RO);
3209 CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
3214 /* Assert the required virtual and physical alignment. */
3215 KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned"));
3216 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
3217 rv = pmap_enter_l2(pmap, va, (new_l3 & ~L3_PAGE) | L2_BLOCK,
3224 * In the case that a page table page is not
3225 * resident, we are creating it here.
3228 pde = pmap_pde(pmap, va, &lvl);
3229 if (pde != NULL && lvl == 2) {
3230 l3 = pmap_l2_to_l3(pde, va);
3231 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
3232 mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3236 } else if (pde != NULL && lvl == 1) {
3237 l2 = pmap_l1_to_l2(pde, va);
3238 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
3239 (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
3240 l3 = &l3[pmap_l3_index(va)];
3241 if (va < VM_MAXUSER_ADDRESS) {
3242 mpte = PHYS_TO_VM_PAGE(
3243 pmap_load(l2) & ~ATTR_MASK);
3248 /* We need to allocate an L3 table. */
3250 if (va < VM_MAXUSER_ADDRESS) {
3251 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
3254 * We use _pmap_alloc_l3() instead of pmap_alloc_l3() in order
3255 * to handle the possibility that a superpage mapping for "va"
3256 * was created while we slept.
3258 mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va),
3259 nosleep ? NULL : &lock);
3260 if (mpte == NULL && nosleep) {
3261 CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
3262 rv = KERN_RESOURCE_SHORTAGE;
3267 panic("pmap_enter: missing L3 table for kernel va %#lx", va);
3270 orig_l3 = pmap_load(l3);
3271 opa = orig_l3 & ~ATTR_MASK;
3275 * Is the specified virtual address already mapped?
3277 if (pmap_l3_valid(orig_l3)) {
3279 * Wiring change, just update stats. We don't worry about
3280 * wiring PT pages as they remain resident as long as there
3281 * are valid mappings in them. Hence, if a user page is wired,
3282 * the PT page will be also.
3284 if ((flags & PMAP_ENTER_WIRED) != 0 &&
3285 (orig_l3 & ATTR_SW_WIRED) == 0)
3286 pmap->pm_stats.wired_count++;
3287 else if ((flags & PMAP_ENTER_WIRED) == 0 &&
3288 (orig_l3 & ATTR_SW_WIRED) != 0)
3289 pmap->pm_stats.wired_count--;
3292 * Remove the extra PT page reference.
3296 KASSERT(mpte->wire_count > 0,
3297 ("pmap_enter: missing reference to page table page,"
3302 * Has the physical page changed?
3306 * No, might be a protection or wiring change.
3308 if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
3309 (new_l3 & ATTR_SW_DBM) != 0)
3310 vm_page_aflag_set(m, PGA_WRITEABLE);
3315 * The physical page has changed. Temporarily invalidate
3318 orig_l3 = pmap_load_clear(l3);
3319 KASSERT((orig_l3 & ~ATTR_MASK) == opa,
3320 ("pmap_enter: unexpected pa update for %#lx", va));
3321 if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
3322 om = PHYS_TO_VM_PAGE(opa);
3325 * The pmap lock is sufficient to synchronize with
3326 * concurrent calls to pmap_page_test_mappings() and
3327 * pmap_ts_referenced().
3329 if (pmap_pte_dirty(orig_l3))
3331 if ((orig_l3 & ATTR_AF) != 0)
3332 vm_page_aflag_set(om, PGA_REFERENCED);
3333 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
3334 pv = pmap_pvh_remove(&om->md, pmap, va);
3335 if ((m->oflags & VPO_UNMANAGED) != 0)
3336 free_pv_entry(pmap, pv);
3337 if ((om->aflags & PGA_WRITEABLE) != 0 &&
3338 TAILQ_EMPTY(&om->md.pv_list) &&
3339 ((om->flags & PG_FICTITIOUS) != 0 ||
3340 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
3341 vm_page_aflag_clear(om, PGA_WRITEABLE);
3343 pmap_invalidate_page(pmap, va);
3347 * Increment the counters.
3349 if ((new_l3 & ATTR_SW_WIRED) != 0)
3350 pmap->pm_stats.wired_count++;
3351 pmap_resident_count_inc(pmap, 1);
3354 * Enter on the PV list if part of our managed memory.
3356 if ((m->oflags & VPO_UNMANAGED) == 0) {
3358 pv = get_pv_entry(pmap, &lock);
3361 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3362 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3364 if ((new_l3 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
3365 vm_page_aflag_set(m, PGA_WRITEABLE);
3370 * Sync icache if exec permission and attribute VM_MEMATTR_WRITE_BACK
3371 * is set. Do it now, before the mapping is stored and made
3372 * valid for hardware table walk. If done later, then other can
3373 * access this page before caches are properly synced.
3374 * Don't do it for kernel memory which is mapped with exec
3375 * permission even if the memory isn't going to hold executable
3376 * code. The only time when icache sync is needed is after
3377 * kernel module is loaded and the relocation info is processed.
3378 * And it's done in elf_cpu_load_file().
3380 if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
3381 m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
3382 (opa != pa || (orig_l3 & ATTR_XN)))
3383 cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3386 * Update the L3 entry
3388 if (pmap_l3_valid(orig_l3)) {
3389 KASSERT(opa == pa, ("pmap_enter: invalid update"));
3390 if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) {
3391 /* same PA, different attributes */
3392 /* XXXMJ need to reload orig_l3 for hardware DBM. */
3393 pmap_load_store(l3, new_l3);
3394 pmap_invalidate_page(pmap, va);
3395 if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
3396 pmap_pte_dirty(orig_l3))
3401 * This can happens if multiple threads simultaneously
3402 * access not yet mapped page. This bad for performance
3403 * since this can cause full demotion-NOP-promotion
3405 * Another possible reasons are:
3406 * - VM and pmap memory layout are diverged
3407 * - tlb flush is missing somewhere and CPU doesn't see
3410 CTR4(KTR_PMAP, "%s: already mapped page - "
3411 "pmap %p va 0x%#lx pte 0x%lx",
3412 __func__, pmap, va, new_l3);
3416 pmap_store(l3, new_l3);
3420 #if VM_NRESERVLEVEL > 0
3421 if (pmap != pmap_kernel() &&
3422 (mpte == NULL || mpte->wire_count == NL3PG) &&
3423 pmap_ps_enabled(pmap) &&
3424 (m->flags & PG_FICTITIOUS) == 0 &&
3425 vm_reserv_level_iffullpop(m) == 0) {
3426 pmap_promote_l2(pmap, pde, va, &lock);
3439 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
3440 * if successful. Returns false if (1) a page table page cannot be allocated
3441 * without sleeping, (2) a mapping already exists at the specified virtual
3442 * address, or (3) a PV entry cannot be allocated without reclaiming another
3446 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3447 struct rwlock **lockp)
3451 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3453 new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
3454 ATTR_IDX(m->md.pv_memattr) | ATTR_AP(ATTR_AP_RO) | L2_BLOCK);
3455 if ((m->oflags & VPO_UNMANAGED) == 0) {
3456 new_l2 |= ATTR_SW_MANAGED;
3459 if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
3461 if (va < VM_MAXUSER_ADDRESS)
3462 new_l2 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
3463 return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
3464 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
3469 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
3470 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
3471 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
3472 * a mapping already exists at the specified virtual address. Returns
3473 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
3474 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
3475 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3477 * The parameter "m" is only used when creating a managed, writeable mapping.
3480 pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
3481 vm_page_t m, struct rwlock **lockp)
3483 struct spglist free;
3484 pd_entry_t *l2, old_l2;
3487 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3489 if ((l2pg = pmap_alloc_l2(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
3490 NULL : lockp)) == NULL) {
3491 CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
3493 return (KERN_RESOURCE_SHORTAGE);
3496 l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
3497 l2 = &l2[pmap_l2_index(va)];
3498 if ((old_l2 = pmap_load(l2)) != 0) {
3499 KASSERT(l2pg->wire_count > 1,
3500 ("pmap_enter_l2: l2pg's wire count is too low"));
3501 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
3504 "pmap_enter_l2: failure for va %#lx in pmap %p",
3506 return (KERN_FAILURE);
3509 if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK)
3510 (void)pmap_remove_l2(pmap, l2, va,
3511 pmap_load(pmap_l1(pmap, va)), &free, lockp);
3513 pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
3515 vm_page_free_pages_toq(&free, true);
3516 if (va >= VM_MAXUSER_ADDRESS) {
3518 * Both pmap_remove_l2() and pmap_remove_l3_range()
3519 * will leave the kernel page table page zero filled.
3520 * Nonetheless, the TLB could have an intermediate
3521 * entry for the kernel page table page.
3523 mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
3524 if (pmap_insert_pt_page(pmap, mt, false))
3525 panic("pmap_enter_l2: trie insert failed");
3527 pmap_invalidate_page(pmap, va);
3529 KASSERT(pmap_load(l2) == 0,
3530 ("pmap_enter_l2: non-zero L2 entry %p", l2));
3533 if ((new_l2 & ATTR_SW_MANAGED) != 0) {
3535 * Abort this mapping if its PV entry could not be created.
3537 if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
3539 if (pmap_unwire_l3(pmap, va, l2pg, &free)) {
3541 * Although "va" is not mapped, the TLB could
3542 * nonetheless have intermediate entries that
3543 * refer to the freed page table pages.
3544 * Invalidate those entries.
3546 * XXX redundant invalidation (See
3547 * _pmap_unwire_l3().)
3549 pmap_invalidate_page(pmap, va);
3550 vm_page_free_pages_toq(&free, true);
3553 "pmap_enter_l2: failure for va %#lx in pmap %p",
3555 return (KERN_RESOURCE_SHORTAGE);
3557 if ((new_l2 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
3558 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3559 vm_page_aflag_set(mt, PGA_WRITEABLE);
3563 * Increment counters.
3565 if ((new_l2 & ATTR_SW_WIRED) != 0)
3566 pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
3567 pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
3570 * Map the superpage.
3572 pmap_store(l2, new_l2);
3575 atomic_add_long(&pmap_l2_mappings, 1);
3576 CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
3579 return (KERN_SUCCESS);
3583 * Maps a sequence of resident pages belonging to the same object.
3584 * The sequence begins with the given page m_start. This page is
3585 * mapped at the given virtual address start. Each subsequent page is
3586 * mapped at a virtual address that is offset from start by the same
3587 * amount as the page is offset from m_start within the object. The
3588 * last page in the sequence is the page with the largest offset from
3589 * m_start that can be mapped at a virtual address less than the given
3590 * virtual address end. Not every virtual page between start and end
3591 * is mapped; only those for which a resident page exists with the
3592 * corresponding offset from m_start are mapped.
3595 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3596 vm_page_t m_start, vm_prot_t prot)
3598 struct rwlock *lock;
3601 vm_pindex_t diff, psize;
3603 VM_OBJECT_ASSERT_LOCKED(m_start->object);
3605 psize = atop(end - start);
3610 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3611 va = start + ptoa(diff);
3612 if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
3613 m->psind == 1 && pmap_ps_enabled(pmap) &&
3614 pmap_enter_2mpage(pmap, va, m, prot, &lock))
3615 m = &m[L2_SIZE / PAGE_SIZE - 1];
3617 mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
3619 m = TAILQ_NEXT(m, listq);
3627 * this code makes some *MAJOR* assumptions:
3628 * 1. Current pmap & pmap exists.
3631 * 4. No page table pages.
3632 * but is *MUCH* faster than pmap_enter...
3636 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3638 struct rwlock *lock;
3642 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
3649 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3650 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
3652 struct spglist free;
3654 pt_entry_t *l2, *l3, l3_val;
3658 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3659 (m->oflags & VPO_UNMANAGED) != 0,
3660 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3661 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3663 CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
3665 * In the case that a page table page is not
3666 * resident, we are creating it here.
3668 if (va < VM_MAXUSER_ADDRESS) {
3669 vm_pindex_t l2pindex;
3672 * Calculate pagetable page index
3674 l2pindex = pmap_l2_pindex(va);
3675 if (mpte && (mpte->pindex == l2pindex)) {
3681 pde = pmap_pde(pmap, va, &lvl);
3684 * If the page table page is mapped, we just increment
3685 * the hold count, and activate it. Otherwise, we
3686 * attempt to allocate a page table page. If this
3687 * attempt fails, we don't retry. Instead, we give up.
3690 l2 = pmap_l1_to_l2(pde, va);
3691 if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
3695 if (lvl == 2 && pmap_load(pde) != 0) {
3697 PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3701 * Pass NULL instead of the PV list lock
3702 * pointer, because we don't intend to sleep.
3704 mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
3709 l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3710 l3 = &l3[pmap_l3_index(va)];
3713 pde = pmap_pde(kernel_pmap, va, &lvl);
3714 KASSERT(pde != NULL,
3715 ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
3718 ("pmap_enter_quick_locked: Invalid level %d", lvl));
3719 l3 = pmap_l2_to_l3(pde, va);
3723 * Abort if a mapping already exists.
3725 if (pmap_load(l3) != 0) {
3734 * Enter on the PV list if part of our managed memory.
3736 if ((m->oflags & VPO_UNMANAGED) == 0 &&
3737 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3740 if (pmap_unwire_l3(pmap, va, mpte, &free)) {
3741 pmap_invalidate_page(pmap, va);
3742 vm_page_free_pages_toq(&free, true);
3750 * Increment counters
3752 pmap_resident_count_inc(pmap, 1);
3754 pa = VM_PAGE_TO_PHYS(m);
3755 l3_val = pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
3756 ATTR_AP(ATTR_AP_RO) | L3_PAGE;
3757 if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
3759 else if (va < VM_MAXUSER_ADDRESS)
3763 * Now validate mapping with RO protection
3765 if ((m->oflags & VPO_UNMANAGED) == 0) {
3766 l3_val |= ATTR_SW_MANAGED;
3770 /* Sync icache before the mapping is stored to PTE */
3771 if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
3772 m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
3773 cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3775 pmap_store(l3, l3_val);
3782 * This code maps large physical mmap regions into the
3783 * processor address space. Note that some shortcuts
3784 * are taken, but the code works.
3787 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3788 vm_pindex_t pindex, vm_size_t size)
3791 VM_OBJECT_ASSERT_WLOCKED(object);
3792 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3793 ("pmap_object_init_pt: non-device object"));
3797 * Clear the wired attribute from the mappings for the specified range of
3798 * addresses in the given pmap. Every valid mapping within that range
3799 * must have the wired attribute set. In contrast, invalid mappings
3800 * cannot have the wired attribute set, so they are ignored.
3802 * The wired attribute of the page table entry is not a hardware feature,
3803 * so there is no need to invalidate any TLB entries.
3806 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3808 vm_offset_t va_next;
3809 pd_entry_t *l0, *l1, *l2;
3813 for (; sva < eva; sva = va_next) {
3814 l0 = pmap_l0(pmap, sva);
3815 if (pmap_load(l0) == 0) {
3816 va_next = (sva + L0_SIZE) & ~L0_OFFSET;
3822 l1 = pmap_l0_to_l1(l0, sva);
3823 if (pmap_load(l1) == 0) {
3824 va_next = (sva + L1_SIZE) & ~L1_OFFSET;
3830 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3834 l2 = pmap_l1_to_l2(l1, sva);
3835 if (pmap_load(l2) == 0)
3838 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
3839 if ((pmap_load(l2) & ATTR_SW_WIRED) == 0)
3840 panic("pmap_unwire: l2 %#jx is missing "
3841 "ATTR_SW_WIRED", (uintmax_t)pmap_load(l2));
3844 * Are we unwiring the entire large page? If not,
3845 * demote the mapping and fall through.
3847 if (sva + L2_SIZE == va_next && eva >= va_next) {
3848 pmap_clear_bits(l2, ATTR_SW_WIRED);
3849 pmap->pm_stats.wired_count -= L2_SIZE /
3852 } else if (pmap_demote_l2(pmap, l2, sva) == NULL)
3853 panic("pmap_unwire: demotion failed");
3855 KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
3856 ("pmap_unwire: Invalid l2 entry after demotion"));
3860 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
3862 if (pmap_load(l3) == 0)
3864 if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
3865 panic("pmap_unwire: l3 %#jx is missing "
3866 "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3));
3869 * ATTR_SW_WIRED must be cleared atomically. Although
3870 * the pmap lock synchronizes access to ATTR_SW_WIRED,
3871 * the System MMU may write to the entry concurrently.
3873 pmap_clear_bits(l3, ATTR_SW_WIRED);
3874 pmap->pm_stats.wired_count--;
3881 * Copy the range specified by src_addr/len
3882 * from the source map to the range dst_addr/len
3883 * in the destination map.
3885 * This routine is only advisory and need not do anything.
3887 * Because the executable mappings created by this routine are copied,
3888 * it should not have to flush the instruction cache.
3891 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
3892 vm_offset_t src_addr)
3894 struct rwlock *lock;
3895 struct spglist free;
3896 pd_entry_t *l0, *l1, *l2, srcptepaddr;
3897 pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte;
3898 vm_offset_t addr, end_addr, va_next;
3899 vm_page_t dst_l2pg, dstmpte, srcmpte;
3901 if (dst_addr != src_addr)
3903 end_addr = src_addr + len;
3905 if (dst_pmap < src_pmap) {
3906 PMAP_LOCK(dst_pmap);
3907 PMAP_LOCK(src_pmap);
3909 PMAP_LOCK(src_pmap);
3910 PMAP_LOCK(dst_pmap);
3912 for (addr = src_addr; addr < end_addr; addr = va_next) {
3913 l0 = pmap_l0(src_pmap, addr);
3914 if (pmap_load(l0) == 0) {
3915 va_next = (addr + L0_SIZE) & ~L0_OFFSET;
3920 l1 = pmap_l0_to_l1(l0, addr);
3921 if (pmap_load(l1) == 0) {
3922 va_next = (addr + L1_SIZE) & ~L1_OFFSET;
3927 va_next = (addr + L2_SIZE) & ~L2_OFFSET;
3930 l2 = pmap_l1_to_l2(l1, addr);
3931 srcptepaddr = pmap_load(l2);
3932 if (srcptepaddr == 0)
3934 if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) {
3935 if ((addr & L2_OFFSET) != 0 ||
3936 addr + L2_SIZE > end_addr)
3938 dst_l2pg = pmap_alloc_l2(dst_pmap, addr, NULL);
3939 if (dst_l2pg == NULL)
3942 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_l2pg));
3943 l2 = &l2[pmap_l2_index(addr)];
3944 if (pmap_load(l2) == 0 &&
3945 ((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
3946 pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
3947 PMAP_ENTER_NORECLAIM, &lock))) {
3948 mask = ATTR_AF | ATTR_SW_WIRED;
3950 if ((srcptepaddr & ATTR_SW_DBM) != 0)
3951 nbits |= ATTR_AP_RW_BIT;
3952 pmap_store(l2, (srcptepaddr & ~mask) | nbits);
3953 pmap_resident_count_inc(dst_pmap, L2_SIZE /
3955 atomic_add_long(&pmap_l2_mappings, 1);
3957 dst_l2pg->wire_count--;
3960 KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
3961 ("pmap_copy: invalid L2 entry"));
3962 srcptepaddr &= ~ATTR_MASK;
3963 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
3964 KASSERT(srcmpte->wire_count > 0,
3965 ("pmap_copy: source page table page is unused"));
3966 if (va_next > end_addr)
3968 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
3969 src_pte = &src_pte[pmap_l3_index(addr)];
3971 for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
3972 ptetemp = pmap_load(src_pte);
3975 * We only virtual copy managed pages.
3977 if ((ptetemp & ATTR_SW_MANAGED) == 0)
3980 if (dstmpte != NULL) {
3981 KASSERT(dstmpte->pindex == pmap_l2_pindex(addr),
3982 ("dstmpte pindex/addr mismatch"));
3983 dstmpte->wire_count++;
3984 } else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr,
3987 dst_pte = (pt_entry_t *)
3988 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
3989 dst_pte = &dst_pte[pmap_l3_index(addr)];
3990 if (pmap_load(dst_pte) == 0 &&
3991 pmap_try_insert_pv_entry(dst_pmap, addr,
3992 PHYS_TO_VM_PAGE(ptetemp & ~ATTR_MASK), &lock)) {
3994 * Clear the wired, modified, and accessed
3995 * (referenced) bits during the copy.
3997 mask = ATTR_AF | ATTR_SW_WIRED;
3999 if ((ptetemp & ATTR_SW_DBM) != 0)
4000 nbits |= ATTR_AP_RW_BIT;
4001 pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
4002 pmap_resident_count_inc(dst_pmap, 1);
4005 if (pmap_unwire_l3(dst_pmap, addr, dstmpte,
4008 * Although "addr" is not mapped,
4009 * the TLB could nonetheless have
4010 * intermediate entries that refer
4011 * to the freed page table pages.
4012 * Invalidate those entries.
4014 * XXX redundant invalidation
4016 pmap_invalidate_page(dst_pmap, addr);
4017 vm_page_free_pages_toq(&free, true);
4021 /* Have we copied all of the valid mappings? */
4022 if (dstmpte->wire_count >= srcmpte->wire_count)
4028 * XXX This barrier may not be needed because the destination pmap is
4035 PMAP_UNLOCK(src_pmap);
4036 PMAP_UNLOCK(dst_pmap);
4040 * pmap_zero_page zeros the specified hardware page by mapping
4041 * the page into KVM and using bzero to clear its contents.
4044 pmap_zero_page(vm_page_t m)
4046 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4048 pagezero((void *)va);
4052 * pmap_zero_page_area zeros the specified hardware page by mapping
4053 * the page into KVM and using bzero to clear its contents.
4055 * off and size may not cover an area beyond a single hardware page.
4058 pmap_zero_page_area(vm_page_t m, int off, int size)
4060 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4062 if (off == 0 && size == PAGE_SIZE)
4063 pagezero((void *)va);
4065 bzero((char *)va + off, size);
4069 * pmap_copy_page copies the specified (machine independent)
4070 * page by mapping the page into virtual memory and using
4071 * bcopy to copy the page, one machine dependent page at a
4075 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
4077 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
4078 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
4080 pagecopy((void *)src, (void *)dst);
4083 int unmapped_buf_allowed = 1;
4086 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
4087 vm_offset_t b_offset, int xfersize)
4091 vm_paddr_t p_a, p_b;
4092 vm_offset_t a_pg_offset, b_pg_offset;
4095 while (xfersize > 0) {
4096 a_pg_offset = a_offset & PAGE_MASK;
4097 m_a = ma[a_offset >> PAGE_SHIFT];
4098 p_a = m_a->phys_addr;
4099 b_pg_offset = b_offset & PAGE_MASK;
4100 m_b = mb[b_offset >> PAGE_SHIFT];
4101 p_b = m_b->phys_addr;
4102 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
4103 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
4104 if (__predict_false(!PHYS_IN_DMAP(p_a))) {
4105 panic("!DMAP a %lx", p_a);
4107 a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
4109 if (__predict_false(!PHYS_IN_DMAP(p_b))) {
4110 panic("!DMAP b %lx", p_b);
4112 b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
4114 bcopy(a_cp, b_cp, cnt);
4122 pmap_quick_enter_page(vm_page_t m)
4125 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
4129 pmap_quick_remove_page(vm_offset_t addr)
4134 * Returns true if the pmap's pv is one of the first
4135 * 16 pvs linked to from this page. This count may
4136 * be changed upwards or downwards in the future; it
4137 * is only necessary that true be returned for a small
4138 * subset of pmaps for proper page aging.
4141 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4143 struct md_page *pvh;
4144 struct rwlock *lock;
4149 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4150 ("pmap_page_exists_quick: page %p is not managed", m));
4152 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4154 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4155 if (PV_PMAP(pv) == pmap) {
4163 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4164 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4165 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4166 if (PV_PMAP(pv) == pmap) {
4180 * pmap_page_wired_mappings:
4182 * Return the number of managed mappings to the given physical page
4186 pmap_page_wired_mappings(vm_page_t m)
4188 struct rwlock *lock;
4189 struct md_page *pvh;
4193 int count, lvl, md_gen, pvh_gen;
4195 if ((m->oflags & VPO_UNMANAGED) != 0)
4197 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4201 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4203 if (!PMAP_TRYLOCK(pmap)) {
4204 md_gen = m->md.pv_gen;
4208 if (md_gen != m->md.pv_gen) {
4213 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4214 if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4218 if ((m->flags & PG_FICTITIOUS) == 0) {
4219 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4220 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4222 if (!PMAP_TRYLOCK(pmap)) {
4223 md_gen = m->md.pv_gen;
4224 pvh_gen = pvh->pv_gen;
4228 if (md_gen != m->md.pv_gen ||
4229 pvh_gen != pvh->pv_gen) {
4234 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4236 (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4246 * Destroy all managed, non-wired mappings in the given user-space
4247 * pmap. This pmap cannot be active on any processor besides the
4250 * This function cannot be applied to the kernel pmap. Moreover, it
4251 * is not intended for general use. It is only to be used during
4252 * process termination. Consequently, it can be implemented in ways
4253 * that make it faster than pmap_remove(). First, it can more quickly
4254 * destroy mappings by iterating over the pmap's collection of PV
4255 * entries, rather than searching the page table. Second, it doesn't
4256 * have to test and clear the page table entries atomically, because
4257 * no processor is currently accessing the user address space. In
4258 * particular, a page table entry's dirty bit won't change state once
4259 * this function starts.
4262 pmap_remove_pages(pmap_t pmap)
4265 pt_entry_t *pte, tpte;
4266 struct spglist free;
4267 vm_page_t m, ml3, mt;
4269 struct md_page *pvh;
4270 struct pv_chunk *pc, *npc;
4271 struct rwlock *lock;
4273 uint64_t inuse, bitmask;
4274 int allfree, field, freed, idx, lvl;
4281 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4284 for (field = 0; field < _NPCM; field++) {
4285 inuse = ~pc->pc_map[field] & pc_freemask[field];
4286 while (inuse != 0) {
4287 bit = ffsl(inuse) - 1;
4288 bitmask = 1UL << bit;
4289 idx = field * 64 + bit;
4290 pv = &pc->pc_pventry[idx];
4293 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4294 KASSERT(pde != NULL,
4295 ("Attempting to remove an unmapped page"));
4299 pte = pmap_l1_to_l2(pde, pv->pv_va);
4300 tpte = pmap_load(pte);
4301 KASSERT((tpte & ATTR_DESCR_MASK) ==
4303 ("Attempting to remove an invalid "
4304 "block: %lx", tpte));
4305 tpte = pmap_load(pte);
4308 pte = pmap_l2_to_l3(pde, pv->pv_va);
4309 tpte = pmap_load(pte);
4310 KASSERT((tpte & ATTR_DESCR_MASK) ==
4312 ("Attempting to remove an invalid "
4313 "page: %lx", tpte));
4317 "Invalid page directory level: %d",
4322 * We cannot remove wired pages from a process' mapping at this time
4324 if (tpte & ATTR_SW_WIRED) {
4329 pa = tpte & ~ATTR_MASK;
4331 m = PHYS_TO_VM_PAGE(pa);
4332 KASSERT(m->phys_addr == pa,
4333 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
4334 m, (uintmax_t)m->phys_addr,
4337 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
4338 m < &vm_page_array[vm_page_array_size],
4339 ("pmap_remove_pages: bad pte %#jx",
4343 * Because this pmap is not active on other
4344 * processors, the dirty bit cannot have
4345 * changed state since we last loaded pte.
4350 * Update the vm_page_t clean/reference bits.
4352 if (pmap_pte_dirty(tpte)) {
4355 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4364 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
4367 pc->pc_map[field] |= bitmask;
4370 pmap_resident_count_dec(pmap,
4371 L2_SIZE / PAGE_SIZE);
4372 pvh = pa_to_pvh(tpte & ~ATTR_MASK);
4373 TAILQ_REMOVE(&pvh->pv_list, pv,pv_next);
4375 if (TAILQ_EMPTY(&pvh->pv_list)) {
4376 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4377 if ((mt->aflags & PGA_WRITEABLE) != 0 &&
4378 TAILQ_EMPTY(&mt->md.pv_list))
4379 vm_page_aflag_clear(mt, PGA_WRITEABLE);
4381 ml3 = pmap_remove_pt_page(pmap,
4384 KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
4385 ("pmap_remove_pages: l3 page not promoted"));
4386 pmap_resident_count_dec(pmap,1);
4387 KASSERT(ml3->wire_count == NL3PG,
4388 ("pmap_remove_pages: l3 page wire count error"));
4389 ml3->wire_count = 0;
4390 pmap_add_delayed_free_list(ml3,
4395 pmap_resident_count_dec(pmap, 1);
4396 TAILQ_REMOVE(&m->md.pv_list, pv,
4399 if ((m->aflags & PGA_WRITEABLE) != 0 &&
4400 TAILQ_EMPTY(&m->md.pv_list) &&
4401 (m->flags & PG_FICTITIOUS) == 0) {
4403 VM_PAGE_TO_PHYS(m));
4404 if (TAILQ_EMPTY(&pvh->pv_list))
4405 vm_page_aflag_clear(m,
4410 pmap_unuse_pt(pmap, pv->pv_va, pmap_load(pde),
4415 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
4416 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
4417 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
4419 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4423 pmap_invalidate_all(pmap);
4427 vm_page_free_pages_toq(&free, true);
4431 * This is used to check if a page has been accessed or modified. As we
4432 * don't have a bit to see if it has been modified we have to assume it
4433 * has been if the page is read/write.
4436 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
4438 struct rwlock *lock;
4440 struct md_page *pvh;
4441 pt_entry_t *pte, mask, value;
4443 int lvl, md_gen, pvh_gen;
4447 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4450 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4452 if (!PMAP_TRYLOCK(pmap)) {
4453 md_gen = m->md.pv_gen;
4457 if (md_gen != m->md.pv_gen) {
4462 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4464 ("pmap_page_test_mappings: Invalid level %d", lvl));
4468 mask |= ATTR_AP_RW_BIT;
4469 value |= ATTR_AP(ATTR_AP_RW);
4472 mask |= ATTR_AF | ATTR_DESCR_MASK;
4473 value |= ATTR_AF | L3_PAGE;
4475 rv = (pmap_load(pte) & mask) == value;
4480 if ((m->flags & PG_FICTITIOUS) == 0) {
4481 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4482 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4484 if (!PMAP_TRYLOCK(pmap)) {
4485 md_gen = m->md.pv_gen;
4486 pvh_gen = pvh->pv_gen;
4490 if (md_gen != m->md.pv_gen ||
4491 pvh_gen != pvh->pv_gen) {
4496 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4498 ("pmap_page_test_mappings: Invalid level %d", lvl));
4502 mask |= ATTR_AP_RW_BIT;
4503 value |= ATTR_AP(ATTR_AP_RW);
4506 mask |= ATTR_AF | ATTR_DESCR_MASK;
4507 value |= ATTR_AF | L2_BLOCK;
4509 rv = (pmap_load(pte) & mask) == value;
4523 * Return whether or not the specified physical page was modified
4524 * in any physical maps.
4527 pmap_is_modified(vm_page_t m)
4530 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4531 ("pmap_is_modified: page %p is not managed", m));
4534 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
4535 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
4536 * is clear, no PTEs can have PG_M set.
4538 VM_OBJECT_ASSERT_WLOCKED(m->object);
4539 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
4541 return (pmap_page_test_mappings(m, FALSE, TRUE));
4545 * pmap_is_prefaultable:
4547 * Return whether or not the specified virtual address is eligible
4551 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
4559 pte = pmap_pte(pmap, addr, &lvl);
4560 if (pte != NULL && pmap_load(pte) != 0) {
4568 * pmap_is_referenced:
4570 * Return whether or not the specified physical page was referenced
4571 * in any physical maps.
4574 pmap_is_referenced(vm_page_t m)
4577 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4578 ("pmap_is_referenced: page %p is not managed", m));
4579 return (pmap_page_test_mappings(m, TRUE, FALSE));
4583 * Clear the write and modified bits in each of the given page's mappings.
4586 pmap_remove_write(vm_page_t m)
4588 struct md_page *pvh;
4590 struct rwlock *lock;
4591 pv_entry_t next_pv, pv;
4592 pt_entry_t oldpte, *pte;
4594 int lvl, md_gen, pvh_gen;
4596 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4597 ("pmap_remove_write: page %p is not managed", m));
4600 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
4601 * set by another thread while the object is locked. Thus,
4602 * if PGA_WRITEABLE is clear, no page table entries need updating.
4604 VM_OBJECT_ASSERT_WLOCKED(m->object);
4605 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
4607 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4608 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
4609 pa_to_pvh(VM_PAGE_TO_PHYS(m));
4612 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
4614 if (!PMAP_TRYLOCK(pmap)) {
4615 pvh_gen = pvh->pv_gen;
4619 if (pvh_gen != pvh->pv_gen) {
4626 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4627 if ((pmap_load(pte) & ATTR_SW_DBM) != 0)
4628 (void)pmap_demote_l2_locked(pmap, pte, va, &lock);
4629 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
4630 ("inconsistent pv lock %p %p for page %p",
4631 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
4634 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4636 if (!PMAP_TRYLOCK(pmap)) {
4637 pvh_gen = pvh->pv_gen;
4638 md_gen = m->md.pv_gen;
4642 if (pvh_gen != pvh->pv_gen ||
4643 md_gen != m->md.pv_gen) {
4649 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4650 oldpte = pmap_load(pte);
4652 if ((oldpte & ATTR_SW_DBM) != 0) {
4653 if (!atomic_fcmpset_long(pte, &oldpte,
4654 (oldpte | ATTR_AP_RW_BIT) & ~ATTR_SW_DBM))
4656 if ((oldpte & ATTR_AP_RW_BIT) ==
4657 ATTR_AP(ATTR_AP_RW))
4659 pmap_invalidate_page(pmap, pv->pv_va);
4664 vm_page_aflag_clear(m, PGA_WRITEABLE);
4668 * pmap_ts_referenced:
4670 * Return a count of reference bits for a page, clearing those bits.
4671 * It is not necessary for every reference bit to be cleared, but it
4672 * is necessary that 0 only be returned when there are truly no
4673 * reference bits set.
4675 * As an optimization, update the page's dirty field if a modified bit is
4676 * found while counting reference bits. This opportunistic update can be
4677 * performed at low cost and can eliminate the need for some future calls
4678 * to pmap_is_modified(). However, since this function stops after
4679 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
4680 * dirty pages. Those dirty pages will only be detected by a future call
4681 * to pmap_is_modified().
4684 pmap_ts_referenced(vm_page_t m)
4686 struct md_page *pvh;
4689 struct rwlock *lock;
4690 pd_entry_t *pde, tpde;
4691 pt_entry_t *pte, tpte;
4694 int cleared, lvl, md_gen, not_cleared, pvh_gen;
4695 struct spglist free;
4697 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4698 ("pmap_ts_referenced: page %p is not managed", m));
4701 pa = VM_PAGE_TO_PHYS(m);
4702 lock = PHYS_TO_PV_LIST_LOCK(pa);
4703 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
4707 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
4708 goto small_mappings;
4714 if (!PMAP_TRYLOCK(pmap)) {
4715 pvh_gen = pvh->pv_gen;
4719 if (pvh_gen != pvh->pv_gen) {
4725 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4726 KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found"));
4728 ("pmap_ts_referenced: invalid pde level %d", lvl));
4729 tpde = pmap_load(pde);
4730 KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE,
4731 ("pmap_ts_referenced: found an invalid l1 table"));
4732 pte = pmap_l1_to_l2(pde, pv->pv_va);
4733 tpte = pmap_load(pte);
4734 if (pmap_pte_dirty(tpte)) {
4736 * Although "tpte" is mapping a 2MB page, because
4737 * this function is called at a 4KB page granularity,
4738 * we only update the 4KB page under test.
4743 if ((tpte & ATTR_AF) != 0) {
4745 * Since this reference bit is shared by 512 4KB pages,
4746 * it should not be cleared every time it is tested.
4747 * Apply a simple "hash" function on the physical page
4748 * number, the virtual superpage number, and the pmap
4749 * address to select one 4KB page out of the 512 on
4750 * which testing the reference bit will result in
4751 * clearing that reference bit. This function is
4752 * designed to avoid the selection of the same 4KB page
4753 * for every 2MB page mapping.
4755 * On demotion, a mapping that hasn't been referenced
4756 * is simply destroyed. To avoid the possibility of a
4757 * subsequent page fault on a demoted wired mapping,
4758 * always leave its reference bit set. Moreover,
4759 * since the superpage is wired, the current state of
4760 * its reference bit won't affect page replacement.
4762 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
4763 (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
4764 (tpte & ATTR_SW_WIRED) == 0) {
4765 pmap_clear_bits(pte, ATTR_AF);
4766 pmap_invalidate_page(pmap, pv->pv_va);
4772 /* Rotate the PV list if it has more than one entry. */
4773 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4774 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4775 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
4778 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
4780 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
4782 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
4789 if (!PMAP_TRYLOCK(pmap)) {
4790 pvh_gen = pvh->pv_gen;
4791 md_gen = m->md.pv_gen;
4795 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4800 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4801 KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found"));
4803 ("pmap_ts_referenced: invalid pde level %d", lvl));
4804 tpde = pmap_load(pde);
4805 KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE,
4806 ("pmap_ts_referenced: found an invalid l2 table"));
4807 pte = pmap_l2_to_l3(pde, pv->pv_va);
4808 tpte = pmap_load(pte);
4809 if (pmap_pte_dirty(tpte))
4811 if ((tpte & ATTR_AF) != 0) {
4812 if ((tpte & ATTR_SW_WIRED) == 0) {
4813 pmap_clear_bits(pte, ATTR_AF);
4814 pmap_invalidate_page(pmap, pv->pv_va);
4820 /* Rotate the PV list if it has more than one entry. */
4821 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4822 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4823 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4826 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
4827 not_cleared < PMAP_TS_REFERENCED_MAX);
4830 vm_page_free_pages_toq(&free, true);
4831 return (cleared + not_cleared);
4835 * Apply the given advice to the specified range of addresses within the
4836 * given pmap. Depending on the advice, clear the referenced and/or
4837 * modified flags in each mapping and set the mapped page's dirty field.
4840 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
4845 * Clear the modify bits on the specified physical page.
4848 pmap_clear_modify(vm_page_t m)
4850 struct md_page *pvh;
4851 struct rwlock *lock;
4853 pv_entry_t next_pv, pv;
4854 pd_entry_t *l2, oldl2;
4855 pt_entry_t *l3, oldl3;
4857 int md_gen, pvh_gen;
4859 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4860 ("pmap_clear_modify: page %p is not managed", m));
4861 VM_OBJECT_ASSERT_WLOCKED(m->object);
4862 KASSERT(!vm_page_xbusied(m),
4863 ("pmap_clear_modify: page %p is exclusive busied", m));
4866 * If the page is not PGA_WRITEABLE, then no PTEs can have ATTR_SW_DBM
4867 * set. If the object containing the page is locked and the page is not
4868 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
4870 if ((m->aflags & PGA_WRITEABLE) == 0)
4872 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
4873 pa_to_pvh(VM_PAGE_TO_PHYS(m));
4874 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4877 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
4879 if (!PMAP_TRYLOCK(pmap)) {
4880 pvh_gen = pvh->pv_gen;
4884 if (pvh_gen != pvh->pv_gen) {
4890 l2 = pmap_l2(pmap, va);
4891 oldl2 = pmap_load(l2);
4892 if ((oldl2 & ATTR_SW_DBM) != 0) {
4893 if (pmap_demote_l2_locked(pmap, l2, va, &lock)) {
4894 if ((oldl2 & ATTR_SW_WIRED) == 0) {
4896 * Write protect the mapping to a
4897 * single page so that a subsequent
4898 * write access may repromote.
4900 va += VM_PAGE_TO_PHYS(m) -
4901 (oldl2 & ~ATTR_MASK);
4902 l3 = pmap_l2_to_l3(l2, va);
4903 oldl3 = pmap_load(l3);
4904 if (pmap_l3_valid(oldl3)) {
4905 while (!atomic_fcmpset_long(l3,
4906 &oldl3, (oldl3 & ~ATTR_SW_DBM) |
4907 ATTR_AP(ATTR_AP_RO)))
4910 pmap_invalidate_page(pmap, va);
4917 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4919 if (!PMAP_TRYLOCK(pmap)) {
4920 md_gen = m->md.pv_gen;
4921 pvh_gen = pvh->pv_gen;
4925 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4930 l2 = pmap_l2(pmap, pv->pv_va);
4931 l3 = pmap_l2_to_l3(l2, pv->pv_va);
4932 oldl3 = pmap_load(l3);
4933 if (pmap_l3_valid(oldl3) &&
4934 (oldl3 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM) {
4935 pmap_set_bits(l3, ATTR_AP(ATTR_AP_RO));
4936 pmap_invalidate_page(pmap, pv->pv_va);
4944 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
4946 struct pmap_preinit_mapping *ppim;
4947 vm_offset_t va, offset;
4950 int i, lvl, l2_blocks, free_l2_count, start_idx;
4952 if (!vm_initialized) {
4954 * No L3 ptables so map entire L2 blocks where start VA is:
4955 * preinit_map_va + start_idx * L2_SIZE
4956 * There may be duplicate mappings (multiple VA -> same PA) but
4957 * ARM64 dcache is always PIPT so that's acceptable.
4962 /* Calculate how many L2 blocks are needed for the mapping */
4963 l2_blocks = (roundup2(pa + size, L2_SIZE) -
4964 rounddown2(pa, L2_SIZE)) >> L2_SHIFT;
4966 offset = pa & L2_OFFSET;
4968 if (preinit_map_va == 0)
4971 /* Map 2MiB L2 blocks from reserved VA space */
4975 /* Find enough free contiguous VA space */
4976 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
4977 ppim = pmap_preinit_mapping + i;
4978 if (free_l2_count > 0 && ppim->pa != 0) {
4979 /* Not enough space here */
4985 if (ppim->pa == 0) {
4987 if (start_idx == -1)
4990 if (free_l2_count == l2_blocks)
4994 if (free_l2_count != l2_blocks)
4995 panic("%s: too many preinit mappings", __func__);
4997 va = preinit_map_va + (start_idx * L2_SIZE);
4998 for (i = start_idx; i < start_idx + l2_blocks; i++) {
4999 /* Mark entries as allocated */
5000 ppim = pmap_preinit_mapping + i;
5002 ppim->va = va + offset;
5007 pa = rounddown2(pa, L2_SIZE);
5008 for (i = 0; i < l2_blocks; i++) {
5009 pde = pmap_pde(kernel_pmap, va, &lvl);
5010 KASSERT(pde != NULL,
5011 ("pmap_mapbios: Invalid page entry, va: 0x%lx",
5014 ("pmap_mapbios: Invalid level %d", lvl));
5016 /* Insert L2_BLOCK */
5017 l2 = pmap_l1_to_l2(pde, va);
5019 pa | ATTR_DEFAULT | ATTR_XN |
5020 ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
5025 pmap_invalidate_all(kernel_pmap);
5027 va = preinit_map_va + (start_idx * L2_SIZE);
5030 /* kva_alloc may be used to map the pages */
5031 offset = pa & PAGE_MASK;
5032 size = round_page(offset + size);
5034 va = kva_alloc(size);
5036 panic("%s: Couldn't allocate KVA", __func__);
5038 pde = pmap_pde(kernel_pmap, va, &lvl);
5039 KASSERT(lvl == 2, ("pmap_mapbios: Invalid level %d", lvl));
5041 /* L3 table is linked */
5042 va = trunc_page(va);
5043 pa = trunc_page(pa);
5044 pmap_kenter(va, size, pa, CACHED_MEMORY);
5047 return ((void *)(va + offset));
5051 pmap_unmapbios(vm_offset_t va, vm_size_t size)
5053 struct pmap_preinit_mapping *ppim;
5054 vm_offset_t offset, tmpsize, va_trunc;
5057 int i, lvl, l2_blocks, block;
5061 (roundup2(va + size, L2_SIZE) - rounddown2(va, L2_SIZE)) >> L2_SHIFT;
5062 KASSERT(l2_blocks > 0, ("pmap_unmapbios: invalid size %lx", size));
5064 /* Remove preinit mapping */
5065 preinit_map = false;
5067 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5068 ppim = pmap_preinit_mapping + i;
5069 if (ppim->va == va) {
5070 KASSERT(ppim->size == size,
5071 ("pmap_unmapbios: size mismatch"));
5076 offset = block * L2_SIZE;
5077 va_trunc = rounddown2(va, L2_SIZE) + offset;
5079 /* Remove L2_BLOCK */
5080 pde = pmap_pde(kernel_pmap, va_trunc, &lvl);
5081 KASSERT(pde != NULL,
5082 ("pmap_unmapbios: Invalid page entry, va: 0x%lx",
5084 l2 = pmap_l1_to_l2(pde, va_trunc);
5087 if (block == (l2_blocks - 1))
5093 pmap_invalidate_all(kernel_pmap);
5097 /* Unmap the pages reserved with kva_alloc. */
5098 if (vm_initialized) {
5099 offset = va & PAGE_MASK;
5100 size = round_page(offset + size);
5101 va = trunc_page(va);
5103 pde = pmap_pde(kernel_pmap, va, &lvl);
5104 KASSERT(pde != NULL,
5105 ("pmap_unmapbios: Invalid page entry, va: 0x%lx", va));
5106 KASSERT(lvl == 2, ("pmap_unmapbios: Invalid level %d", lvl));
5108 /* Unmap and invalidate the pages */
5109 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
5110 pmap_kremove(va + tmpsize);
5117 * Sets the memory attribute for the specified page.
5120 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5123 m->md.pv_memattr = ma;
5126 * If "m" is a normal page, update its direct mapping. This update
5127 * can be relied upon to perform any cache operations that are
5128 * required for data coherence.
5130 if ((m->flags & PG_FICTITIOUS) == 0 &&
5131 pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
5132 m->md.pv_memattr) != 0)
5133 panic("memory attribute change on the direct map failed");
5137 * Changes the specified virtual address range's memory type to that given by
5138 * the parameter "mode". The specified virtual address range must be
5139 * completely contained within either the direct map or the kernel map. If
5140 * the virtual address range is contained within the kernel map, then the
5141 * memory type for each of the corresponding ranges of the direct map is also
5142 * changed. (The corresponding ranges of the direct map are those ranges that
5143 * map the same physical pages as the specified virtual address range.) These
5144 * changes to the direct map are necessary because Intel describes the
5145 * behavior of their processors as "undefined" if two or more mappings to the
5146 * same physical page have different memory types.
5148 * Returns zero if the change completed successfully, and either EINVAL or
5149 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
5150 * of the virtual address range was not mapped, and ENOMEM is returned if
5151 * there was insufficient memory available to complete the change. In the
5152 * latter case, the memory type may have been changed on some part of the
5153 * virtual address range or the direct map.
5156 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
5160 PMAP_LOCK(kernel_pmap);
5161 error = pmap_change_attr_locked(va, size, mode);
5162 PMAP_UNLOCK(kernel_pmap);
5167 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
5169 vm_offset_t base, offset, tmpva;
5170 pt_entry_t l3, *pte, *newpte;
5173 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
5174 base = trunc_page(va);
5175 offset = va & PAGE_MASK;
5176 size = round_page(offset + size);
5178 if (!VIRT_IN_DMAP(base))
5181 for (tmpva = base; tmpva < base + size; ) {
5182 pte = pmap_pte(kernel_pmap, tmpva, &lvl);
5186 if ((pmap_load(pte) & ATTR_IDX_MASK) == ATTR_IDX(mode)) {
5188 * We already have the correct attribute,
5189 * ignore this entry.
5193 panic("Invalid DMAP table level: %d\n", lvl);
5195 tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
5198 tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
5206 * Split the entry to an level 3 table, then
5207 * set the new attribute.
5211 panic("Invalid DMAP table level: %d\n", lvl);
5213 newpte = pmap_demote_l1(kernel_pmap, pte,
5214 tmpva & ~L1_OFFSET);
5217 pte = pmap_l1_to_l2(pte, tmpva);
5219 newpte = pmap_demote_l2(kernel_pmap, pte,
5223 pte = pmap_l2_to_l3(pte, tmpva);
5225 /* Update the entry */
5226 l3 = pmap_load(pte);
5227 l3 &= ~ATTR_IDX_MASK;
5228 l3 |= ATTR_IDX(mode);
5229 if (mode == DEVICE_MEMORY)
5232 pmap_update_entry(kernel_pmap, pte, l3, tmpva,
5236 * If moving to a non-cacheable entry flush
5239 if (mode == VM_MEMATTR_UNCACHEABLE)
5240 cpu_dcache_wbinv_range(tmpva, L3_SIZE);
5252 * Create an L2 table to map all addresses within an L1 mapping.
5255 pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
5257 pt_entry_t *l2, newl2, oldl1;
5259 vm_paddr_t l2phys, phys;
5263 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5264 oldl1 = pmap_load(l1);
5265 KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
5266 ("pmap_demote_l1: Demoting a non-block entry"));
5267 KASSERT((va & L1_OFFSET) == 0,
5268 ("pmap_demote_l1: Invalid virtual address %#lx", va));
5269 KASSERT((oldl1 & ATTR_SW_MANAGED) == 0,
5270 ("pmap_demote_l1: Level 1 table shouldn't be managed"));
5273 if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
5274 tmpl1 = kva_alloc(PAGE_SIZE);
5279 if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
5280 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
5281 CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
5282 " in pmap %p", va, pmap);
5286 l2phys = VM_PAGE_TO_PHYS(ml2);
5287 l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
5289 /* Address the range points at */
5290 phys = oldl1 & ~ATTR_MASK;
5291 /* The attributed from the old l1 table to be copied */
5292 newl2 = oldl1 & ATTR_MASK;
5294 /* Create the new entries */
5295 for (i = 0; i < Ln_ENTRIES; i++) {
5296 l2[i] = newl2 | phys;
5299 KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK),
5300 ("Invalid l2 page (%lx != %lx)", l2[0],
5301 (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
5304 pmap_kenter(tmpl1, PAGE_SIZE,
5305 DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, CACHED_MEMORY);
5306 l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
5309 pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
5312 pmap_kremove(tmpl1);
5313 kva_free(tmpl1, PAGE_SIZE);
5320 pmap_fill_l3(pt_entry_t *firstl3, pt_entry_t newl3)
5324 for (l3 = firstl3; l3 - firstl3 < Ln_ENTRIES; l3++) {
5331 pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2,
5332 struct rwlock **lockp)
5334 struct spglist free;
5337 (void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free,
5339 vm_page_free_pages_toq(&free, true);
5343 * Create an L3 table to map all addresses within an L2 mapping.
5346 pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
5347 struct rwlock **lockp)
5349 pt_entry_t *l3, newl3, oldl2;
5354 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5356 oldl2 = pmap_load(l2);
5357 KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
5358 ("pmap_demote_l2: Demoting a non-block entry"));
5362 if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
5363 tmpl2 = kva_alloc(PAGE_SIZE);
5369 * Invalidate the 2MB page mapping and return "failure" if the
5370 * mapping was never accessed.
5372 if ((oldl2 & ATTR_AF) == 0) {
5373 KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
5374 ("pmap_demote_l2: a wired mapping is missing ATTR_AF"));
5375 pmap_demote_l2_abort(pmap, va, l2, lockp);
5376 CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx in pmap %p",
5381 if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
5382 KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
5383 ("pmap_demote_l2: page table page for a wired mapping"
5387 * If the page table page is missing and the mapping
5388 * is for a kernel address, the mapping must belong to
5389 * the direct map. Page table pages are preallocated
5390 * for every other part of the kernel address space,
5391 * so the direct map region is the only part of the
5392 * kernel address space that must be handled here.
5394 KASSERT(va < VM_MAXUSER_ADDRESS || VIRT_IN_DMAP(va),
5395 ("pmap_demote_l2: No saved mpte for va %#lx", va));
5398 * If the 2MB page mapping belongs to the direct map
5399 * region of the kernel's address space, then the page
5400 * allocation request specifies the highest possible
5401 * priority (VM_ALLOC_INTERRUPT). Otherwise, the
5402 * priority is normal.
5404 ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
5405 (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
5406 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
5409 * If the allocation of the new page table page fails,
5410 * invalidate the 2MB page mapping and return "failure".
5413 pmap_demote_l2_abort(pmap, va, l2, lockp);
5414 CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
5415 " in pmap %p", va, pmap);
5419 if (va < VM_MAXUSER_ADDRESS) {
5420 ml3->wire_count = NL3PG;
5421 pmap_resident_count_inc(pmap, 1);
5424 l3phys = VM_PAGE_TO_PHYS(ml3);
5425 l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
5426 newl3 = (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE;
5427 KASSERT((oldl2 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) !=
5428 (ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM),
5429 ("pmap_demote_l2: L2 entry is writeable but not dirty"));
5432 * If the page table page is not leftover from an earlier promotion,
5433 * or the mapping attributes have changed, (re)initialize the L3 table.
5435 if (ml3->valid == 0 || (l3[0] & ATTR_MASK) != (newl3 & ATTR_MASK))
5436 pmap_fill_l3(l3, newl3);
5439 * Map the temporary page so we don't lose access to the l2 table.
5442 pmap_kenter(tmpl2, PAGE_SIZE,
5443 DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, CACHED_MEMORY);
5444 l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
5448 * The spare PV entries must be reserved prior to demoting the
5449 * mapping, that is, prior to changing the PDE. Otherwise, the state
5450 * of the L2 and the PV lists will be inconsistent, which can result
5451 * in reclaim_pv_chunk() attempting to remove a PV entry from the
5452 * wrong PV list and pmap_pv_demote_l2() failing to find the expected
5453 * PV entry for the 2MB page mapping that is being demoted.
5455 if ((oldl2 & ATTR_SW_MANAGED) != 0)
5456 reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
5459 * Pass PAGE_SIZE so that a single TLB invalidation is performed on
5460 * the 2MB page mapping.
5462 pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
5465 * Demote the PV entry.
5467 if ((oldl2 & ATTR_SW_MANAGED) != 0)
5468 pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp);
5470 atomic_add_long(&pmap_l2_demotions, 1);
5471 CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
5472 " in pmap %p %lx", va, pmap, l3[0]);
5476 pmap_kremove(tmpl2);
5477 kva_free(tmpl2, PAGE_SIZE);
5485 pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
5487 struct rwlock *lock;
5491 l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
5498 * perform the pmap work for mincore
5501 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
5503 pt_entry_t *pte, tpte;
5504 vm_paddr_t mask, pa;
5511 pte = pmap_pte(pmap, addr, &lvl);
5513 tpte = pmap_load(pte);
5526 panic("pmap_mincore: invalid level %d", lvl);
5529 managed = (tpte & ATTR_SW_MANAGED) != 0;
5530 val = MINCORE_INCORE;
5532 val |= MINCORE_SUPER;
5533 if ((managed && pmap_pte_dirty(tpte)) || (!managed &&
5534 (tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)))
5535 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5536 if ((tpte & ATTR_AF) == ATTR_AF)
5537 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5539 pa = (tpte & ~ATTR_MASK) | (addr & mask);
5543 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5544 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
5545 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
5546 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
5549 PA_UNLOCK_COND(*locked_pa);
5556 pmap_activate(struct thread *td)
5561 pmap = vmspace_pmap(td->td_proc->p_vmspace);
5562 td->td_proc->p_md.md_l0addr = vtophys(pmap->pm_l0);
5564 "msr ttbr0_el1, %0 \n"
5566 : : "r"(td->td_proc->p_md.md_l0addr));
5567 pmap_invalidate_all(pmap);
5572 pmap_switch(struct thread *old, struct thread *new)
5574 pcpu_bp_harden bp_harden;
5577 /* Store the new curthread */
5578 PCPU_SET(curthread, new);
5580 /* And the new pcb */
5582 PCPU_SET(curpcb, pcb);
5585 * TODO: We may need to flush the cache here if switching
5586 * to a user process.
5590 old->td_proc->p_md.md_l0addr != new->td_proc->p_md.md_l0addr) {
5592 /* Switch to the new pmap */
5593 "msr ttbr0_el1, %0 \n"
5596 /* Invalidate the TLB */
5601 : : "r"(new->td_proc->p_md.md_l0addr));
5604 * Stop userspace from training the branch predictor against
5605 * other processes. This will call into a CPU specific
5606 * function that clears the branch predictor state.
5608 bp_harden = PCPU_GET(bp_harden);
5609 if (bp_harden != NULL)
5617 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
5620 if (va >= VM_MIN_KERNEL_ADDRESS) {
5621 cpu_icache_sync_range(va, sz);
5626 /* Find the length of data in this page to flush */
5627 offset = va & PAGE_MASK;
5628 len = imin(PAGE_SIZE - offset, sz);
5631 /* Extract the physical address & find it in the DMAP */
5632 pa = pmap_extract(pmap, va);
5634 cpu_icache_sync_range(PHYS_TO_DMAP(pa), len);
5636 /* Move to the next page */
5639 /* Set the length for the next iteration */
5640 len = imin(PAGE_SIZE, sz);
5646 pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
5655 ec = ESR_ELx_EXCEPTION(esr);
5657 case EXCP_INSN_ABORT_L:
5658 case EXCP_INSN_ABORT:
5659 case EXCP_DATA_ABORT_L:
5660 case EXCP_DATA_ABORT:
5666 /* Data and insn aborts use same encoding for FSC field. */
5667 switch (esr & ISS_DATA_DFSC_MASK) {
5668 case ISS_DATA_DFSC_AFF_L1:
5669 case ISS_DATA_DFSC_AFF_L2:
5670 case ISS_DATA_DFSC_AFF_L3:
5672 pte = pmap_pte(pmap, far, &lvl);
5674 pmap_set_bits(pte, ATTR_AF);
5677 * XXXMJ as an optimization we could mark the entry
5678 * dirty if this is a write fault.
5683 case ISS_DATA_DFSC_PF_L1:
5684 case ISS_DATA_DFSC_PF_L2:
5685 case ISS_DATA_DFSC_PF_L3:
5686 if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) ||
5687 (esr & ISS_DATA_WnR) == 0)
5690 pte = pmap_pte(pmap, far, &lvl);
5692 (pmap_load(pte) & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) ==
5693 (ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM)) {
5694 pmap_clear_bits(pte, ATTR_AP_RW_BIT);
5695 pmap_invalidate_page(pmap, trunc_page(far));
5700 case ISS_DATA_DFSC_TF_L0:
5701 case ISS_DATA_DFSC_TF_L1:
5702 case ISS_DATA_DFSC_TF_L2:
5703 case ISS_DATA_DFSC_TF_L3:
5705 /* Ask the MMU to check the address */
5706 intr = intr_disable();
5707 if (pmap == kernel_pmap)
5708 par = arm64_address_translate_s1e1r(far);
5710 par = arm64_address_translate_s1e0r(far);
5715 * If the translation was successful the address was invalid
5716 * due to a break-before-make sequence. We can unlock and
5717 * return success to the trap handler.
5719 if (PAR_SUCCESS(par))
5728 * Increase the starting virtual address of the given mapping if a
5729 * different alignment might result in more superpage mappings.
5732 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
5733 vm_offset_t *addr, vm_size_t size)
5735 vm_offset_t superpage_offset;
5739 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
5740 offset += ptoa(object->pg_color);
5741 superpage_offset = offset & L2_OFFSET;
5742 if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
5743 (*addr & L2_OFFSET) == superpage_offset)
5745 if ((*addr & L2_OFFSET) < superpage_offset)
5746 *addr = (*addr & ~L2_OFFSET) + superpage_offset;
5748 *addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
5752 * Get the kernel virtual address of a set of physical pages. If there are
5753 * physical addresses not covered by the DMAP perform a transient mapping
5754 * that will be removed when calling pmap_unmap_io_transient.
5756 * \param page The pages the caller wishes to obtain the virtual
5757 * address on the kernel memory map.
5758 * \param vaddr On return contains the kernel virtual memory address
5759 * of the pages passed in the page parameter.
5760 * \param count Number of pages passed in.
5761 * \param can_fault TRUE if the thread using the mapped pages can take
5762 * page faults, FALSE otherwise.
5764 * \returns TRUE if the caller must call pmap_unmap_io_transient when
5765 * finished or FALSE otherwise.
5769 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
5770 boolean_t can_fault)
5773 boolean_t needs_mapping;
5777 * Allocate any KVA space that we need, this is done in a separate
5778 * loop to prevent calling vmem_alloc while pinned.
5780 needs_mapping = FALSE;
5781 for (i = 0; i < count; i++) {
5782 paddr = VM_PAGE_TO_PHYS(page[i]);
5783 if (__predict_false(!PHYS_IN_DMAP(paddr))) {
5784 error = vmem_alloc(kernel_arena, PAGE_SIZE,
5785 M_BESTFIT | M_WAITOK, &vaddr[i]);
5786 KASSERT(error == 0, ("vmem_alloc failed: %d", error));
5787 needs_mapping = TRUE;
5789 vaddr[i] = PHYS_TO_DMAP(paddr);
5793 /* Exit early if everything is covered by the DMAP */
5799 for (i = 0; i < count; i++) {
5800 paddr = VM_PAGE_TO_PHYS(page[i]);
5801 if (!PHYS_IN_DMAP(paddr)) {
5803 "pmap_map_io_transient: TODO: Map out of DMAP data");
5807 return (needs_mapping);
5811 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
5812 boolean_t can_fault)
5819 for (i = 0; i < count; i++) {
5820 paddr = VM_PAGE_TO_PHYS(page[i]);
5821 if (!PHYS_IN_DMAP(paddr)) {
5822 panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
5828 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
5831 return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);