2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
8 * Copyright (c) 2003 Peter Wemm
10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11 * All rights reserved.
12 * Copyright (c) 2014 Andrew Turner
13 * All rights reserved.
14 * Copyright (c) 2014-2016 The FreeBSD Foundation
15 * All rights reserved.
17 * This code is derived from software contributed to Berkeley by
18 * the Systems Programming Group of the University of Utah Computer
19 * Science Department and William Jolitz of UUNET Technologies Inc.
21 * This software was developed by Andrew Turner under sponsorship from
22 * the FreeBSD Foundation.
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 * must display the following acknowledgement:
34 * This product includes software developed by the University of
35 * California, Berkeley and its contributors.
36 * 4. Neither the name of the University nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
55 * Copyright (c) 2003 Networks Associates Technology, Inc.
56 * All rights reserved.
58 * This software was developed for the FreeBSD Project by Jake Burkholder,
59 * Safeport Network Services, and Network Associates Laboratories, the
60 * Security Research Division of Network Associates, Inc. under
61 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
62 * CHATS research program.
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
67 * 1. Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * 2. Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in the
71 * documentation and/or other materials provided with the distribution.
73 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
74 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
77 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
86 #include <sys/cdefs.h>
87 __FBSDID("$FreeBSD$");
90 * Manages physical address maps.
92 * Since the information managed by this module is
93 * also stored by the logical address mapping module,
94 * this module may throw away valid virtual-to-physical
95 * mappings at almost any time. However, invalidations
96 * of virtual-to-physical mappings must be done as
99 * In order to cope with hardware architectures which
100 * make virtual-to-physical map invalidates expensive,
101 * this module may delay invalidate or reduced protection
102 * operations until such time as they are actually
103 * necessary. This module is given full information as
104 * to which processors are currently using which maps,
105 * and to when physical maps must be made correct.
110 #include <sys/param.h>
111 #include <sys/bitstring.h>
113 #include <sys/systm.h>
114 #include <sys/kernel.h>
116 #include <sys/limits.h>
117 #include <sys/lock.h>
118 #include <sys/malloc.h>
119 #include <sys/mman.h>
120 #include <sys/msgbuf.h>
121 #include <sys/mutex.h>
122 #include <sys/proc.h>
123 #include <sys/rwlock.h>
125 #include <sys/vmem.h>
126 #include <sys/vmmeter.h>
127 #include <sys/sched.h>
128 #include <sys/sysctl.h>
129 #include <sys/_unrhdr.h>
133 #include <vm/vm_param.h>
134 #include <vm/vm_kern.h>
135 #include <vm/vm_page.h>
136 #include <vm/vm_map.h>
137 #include <vm/vm_object.h>
138 #include <vm/vm_extern.h>
139 #include <vm/vm_pageout.h>
140 #include <vm/vm_pager.h>
141 #include <vm/vm_phys.h>
142 #include <vm/vm_radix.h>
143 #include <vm/vm_reserv.h>
146 #include <machine/machdep.h>
147 #include <machine/md_var.h>
148 #include <machine/pcb.h>
150 #include <arm/include/physmem.h>
152 #define NL0PG (PAGE_SIZE/(sizeof (pd_entry_t)))
153 #define NL1PG (PAGE_SIZE/(sizeof (pd_entry_t)))
154 #define NL2PG (PAGE_SIZE/(sizeof (pd_entry_t)))
155 #define NL3PG (PAGE_SIZE/(sizeof (pt_entry_t)))
157 #define NUL0E L0_ENTRIES
158 #define NUL1E (NUL0E * NL1PG)
159 #define NUL2E (NUL1E * NL2PG)
161 #if !defined(DIAGNOSTIC)
162 #ifdef __GNUC_GNU_INLINE__
163 #define PMAP_INLINE __attribute__((__gnu_inline__)) inline
165 #define PMAP_INLINE extern inline
172 * These are configured by the mair_el1 register. This is set up in locore.S
174 #define DEVICE_MEMORY 0
175 #define UNCACHED_MEMORY 1
176 #define CACHED_MEMORY 2
180 #define PV_STAT(x) do { x ; } while (0)
182 #define PV_STAT(x) do { } while (0)
185 #define pmap_l2_pindex(v) ((v) >> L2_SHIFT)
186 #define pa_to_pvh(pa) (&pv_table[pmap_l2_pindex(pa)])
188 #define NPV_LIST_LOCKS MAXCPU
190 #define PHYS_TO_PV_LIST_LOCK(pa) \
191 (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
193 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
194 struct rwlock **_lockp = (lockp); \
195 struct rwlock *_new_lock; \
197 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
198 if (_new_lock != *_lockp) { \
199 if (*_lockp != NULL) \
200 rw_wunlock(*_lockp); \
201 *_lockp = _new_lock; \
206 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
207 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
209 #define RELEASE_PV_LIST_LOCK(lockp) do { \
210 struct rwlock **_lockp = (lockp); \
212 if (*_lockp != NULL) { \
213 rw_wunlock(*_lockp); \
218 #define VM_PAGE_TO_PV_LIST_LOCK(m) \
219 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
222 * The presence of this flag indicates that the mapping is writeable.
223 * If the ATTR_AP_RO bit is also set, then the mapping is clean, otherwise it is
224 * dirty. This flag may only be set on managed mappings.
226 * The DBM bit is reserved on ARMv8.0 but it seems we can safely treat it
227 * as a software managed bit.
229 #define ATTR_SW_DBM ATTR_DBM
231 struct pmap kernel_pmap_store;
233 /* Used for mapping ACPI memory before VM is initialized */
234 #define PMAP_PREINIT_MAPPING_COUNT 32
235 #define PMAP_PREINIT_MAPPING_SIZE (PMAP_PREINIT_MAPPING_COUNT * L2_SIZE)
236 static vm_offset_t preinit_map_va; /* Start VA of pre-init mapping space */
237 static int vm_initialized = 0; /* No need to use pre-init maps when set */
240 * Reserve a few L2 blocks starting from 'preinit_map_va' pointer.
241 * Always map entire L2 block for simplicity.
242 * VA of L2 block = preinit_map_va + i * L2_SIZE
244 static struct pmap_preinit_mapping {
248 } pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
250 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
251 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
252 vm_offset_t kernel_vm_end = 0;
255 * Data for the pv entry allocation mechanism.
257 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
258 static struct mtx pv_chunks_mutex;
259 static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
260 static struct md_page *pv_table;
261 static struct md_page pv_dummy;
263 vm_paddr_t dmap_phys_base; /* The start of the dmap region */
264 vm_paddr_t dmap_phys_max; /* The limit of the dmap region */
265 vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */
267 /* This code assumes all L1 DMAP entries will be used */
268 CTASSERT((DMAP_MIN_ADDRESS & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
269 CTASSERT((DMAP_MAX_ADDRESS & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
271 #define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
272 extern pt_entry_t pagetable_dmap[];
274 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
275 static vm_paddr_t physmap[PHYSMAP_SIZE];
276 static u_int physmap_idx;
278 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
281 * This ASID allocator uses a bit vector ("asid_set") to remember which ASIDs
282 * that it has currently allocated to a pmap, a cursor ("asid_next") to
283 * optimize its search for a free ASID in the bit vector, and an epoch number
284 * ("asid_epoch") to indicate when it has reclaimed all previously allocated
285 * ASIDs that are not currently active on a processor.
287 * The current epoch number is always in the range [0, INT_MAX). Negative
288 * numbers and INT_MAX are reserved for special cases that are described
291 static SYSCTL_NODE(_vm_pmap, OID_AUTO, asid, CTLFLAG_RD, 0, "ASID allocator");
292 static int asid_bits;
293 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, bits, CTLFLAG_RD, &asid_bits, 0,
294 "The number of bits in an ASID");
295 static bitstr_t *asid_set;
296 static int asid_set_size;
297 static int asid_next;
298 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, next, CTLFLAG_RD, &asid_next, 0,
299 "The last allocated ASID plus one");
300 static int asid_epoch;
301 SYSCTL_INT(_vm_pmap_asid, OID_AUTO, epoch, CTLFLAG_RD, &asid_epoch, 0,
302 "The current epoch number");
303 static struct mtx asid_set_mutex;
306 * A pmap's cookie encodes an ASID and epoch number. Cookies for reserved
307 * ASIDs have a negative epoch number, specifically, INT_MIN. Cookies for
308 * dynamically allocated ASIDs have a non-negative epoch number.
310 * An invalid ASID is represented by -1.
312 * There are two special-case cookie values: (1) COOKIE_FROM(-1, INT_MIN),
313 * which indicates that an ASID should never be allocated to the pmap, and
314 * (2) COOKIE_FROM(-1, INT_MAX), which indicates that an ASID should be
315 * allocated when the pmap is next activated.
317 #define COOKIE_FROM(asid, epoch) ((long)((u_int)(asid) | \
318 ((u_long)(epoch) << 32)))
319 #define COOKIE_TO_ASID(cookie) ((int)(cookie))
320 #define COOKIE_TO_EPOCH(cookie) ((int)((u_long)(cookie) >> 32))
322 static int superpages_enabled = 1;
323 SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
324 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
325 "Are large page mappings enabled?");
328 * Internal flags for pmap_enter()'s helper functions.
330 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
331 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
333 static void free_pv_chunk(struct pv_chunk *pc);
334 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
335 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
336 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
337 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
338 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
341 static bool pmap_activate_int(pmap_t pmap);
342 static void pmap_alloc_asid(pmap_t pmap);
343 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
344 static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
345 static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
346 vm_offset_t va, struct rwlock **lockp);
347 static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
348 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
349 vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
350 static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
351 u_int flags, vm_page_t m, struct rwlock **lockp);
352 static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
353 pd_entry_t l1e, struct spglist *free, struct rwlock **lockp);
354 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
355 pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
356 static void pmap_reset_asid_set(void);
357 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
358 vm_page_t m, struct rwlock **lockp);
360 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
361 struct rwlock **lockp);
363 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
364 struct spglist *free);
365 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
366 static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
369 * These load the old table data and store the new value.
370 * They need to be atomic as the System MMU may write to the table at
371 * the same time as the CPU.
373 #define pmap_clear(table) atomic_store_64(table, 0)
374 #define pmap_clear_bits(table, bits) atomic_clear_64(table, bits)
375 #define pmap_load(table) (*table)
376 #define pmap_load_clear(table) atomic_swap_64(table, 0)
377 #define pmap_load_store(table, entry) atomic_swap_64(table, entry)
378 #define pmap_set_bits(table, bits) atomic_set_64(table, bits)
379 #define pmap_store(table, entry) atomic_store_64(table, entry)
381 /********************/
382 /* Inline functions */
383 /********************/
386 pagecopy(void *s, void *d)
389 memcpy(d, s, PAGE_SIZE);
392 static __inline pd_entry_t *
393 pmap_l0(pmap_t pmap, vm_offset_t va)
396 return (&pmap->pm_l0[pmap_l0_index(va)]);
399 static __inline pd_entry_t *
400 pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
404 l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
405 return (&l1[pmap_l1_index(va)]);
408 static __inline pd_entry_t *
409 pmap_l1(pmap_t pmap, vm_offset_t va)
413 l0 = pmap_l0(pmap, va);
414 if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE)
417 return (pmap_l0_to_l1(l0, va));
420 static __inline pd_entry_t *
421 pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va)
425 l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
426 return (&l2[pmap_l2_index(va)]);
429 static __inline pd_entry_t *
430 pmap_l2(pmap_t pmap, vm_offset_t va)
434 l1 = pmap_l1(pmap, va);
435 if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE)
438 return (pmap_l1_to_l2(l1, va));
441 static __inline pt_entry_t *
442 pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va)
446 l3 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK);
447 return (&l3[pmap_l3_index(va)]);
451 * Returns the lowest valid pde for a given virtual address.
452 * The next level may or may not point to a valid page or block.
454 static __inline pd_entry_t *
455 pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
457 pd_entry_t *l0, *l1, *l2, desc;
459 l0 = pmap_l0(pmap, va);
460 desc = pmap_load(l0) & ATTR_DESCR_MASK;
461 if (desc != L0_TABLE) {
466 l1 = pmap_l0_to_l1(l0, va);
467 desc = pmap_load(l1) & ATTR_DESCR_MASK;
468 if (desc != L1_TABLE) {
473 l2 = pmap_l1_to_l2(l1, va);
474 desc = pmap_load(l2) & ATTR_DESCR_MASK;
475 if (desc != L2_TABLE) {
485 * Returns the lowest valid pte block or table entry for a given virtual
486 * address. If there are no valid entries return NULL and set the level to
487 * the first invalid level.
489 static __inline pt_entry_t *
490 pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
492 pd_entry_t *l1, *l2, desc;
495 l1 = pmap_l1(pmap, va);
500 desc = pmap_load(l1) & ATTR_DESCR_MASK;
501 if (desc == L1_BLOCK) {
506 if (desc != L1_TABLE) {
511 l2 = pmap_l1_to_l2(l1, va);
512 desc = pmap_load(l2) & ATTR_DESCR_MASK;
513 if (desc == L2_BLOCK) {
518 if (desc != L2_TABLE) {
524 l3 = pmap_l2_to_l3(l2, va);
525 if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
532 pmap_ps_enabled(pmap_t pmap __unused)
535 return (superpages_enabled != 0);
539 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
540 pd_entry_t **l2, pt_entry_t **l3)
542 pd_entry_t *l0p, *l1p, *l2p;
544 if (pmap->pm_l0 == NULL)
547 l0p = pmap_l0(pmap, va);
550 if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE)
553 l1p = pmap_l0_to_l1(l0p, va);
556 if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) {
562 if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE)
565 l2p = pmap_l1_to_l2(l1p, va);
568 if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) {
573 if ((pmap_load(l2p) & ATTR_DESCR_MASK) != L2_TABLE)
576 *l3 = pmap_l2_to_l3(l2p, va);
582 pmap_l3_valid(pt_entry_t l3)
585 return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
589 CTASSERT(L1_BLOCK == L2_BLOCK);
592 * Checks if the PTE is dirty.
595 pmap_pte_dirty(pt_entry_t pte)
598 KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte));
599 KASSERT((pte & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) != 0,
600 ("pte %#lx is writeable and missing ATTR_SW_DBM", pte));
602 return ((pte & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) ==
603 (ATTR_AP(ATTR_AP_RW) | ATTR_SW_DBM));
607 pmap_resident_count_inc(pmap_t pmap, int count)
610 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
611 pmap->pm_stats.resident_count += count;
615 pmap_resident_count_dec(pmap_t pmap, int count)
618 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
619 KASSERT(pmap->pm_stats.resident_count >= count,
620 ("pmap %p resident count underflow %ld %d", pmap,
621 pmap->pm_stats.resident_count, count));
622 pmap->pm_stats.resident_count -= count;
626 pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
632 l1 = (pd_entry_t *)l1pt;
633 *l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
635 /* Check locore has used a table L1 map */
636 KASSERT((l1[*l1_slot] & ATTR_DESCR_MASK) == L1_TABLE,
637 ("Invalid bootstrap L1 table"));
638 /* Find the address of the L2 table */
639 l2 = (pt_entry_t *)init_pt_va;
640 *l2_slot = pmap_l2_index(va);
646 pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
648 u_int l1_slot, l2_slot;
651 l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
653 return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET));
657 pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
658 vm_offset_t freemempos)
662 vm_paddr_t l2_pa, pa;
663 u_int l1_slot, l2_slot, prev_l1_slot;
666 dmap_phys_base = min_pa & ~L1_OFFSET;
672 #define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
673 memset(pagetable_dmap, 0, PAGE_SIZE * DMAP_TABLES);
675 for (i = 0; i < (physmap_idx * 2); i += 2) {
676 pa = physmap[i] & ~L2_OFFSET;
677 va = pa - dmap_phys_base + DMAP_MIN_ADDRESS;
679 /* Create L2 mappings at the start of the region */
680 if ((pa & L1_OFFSET) != 0) {
681 l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
682 if (l1_slot != prev_l1_slot) {
683 prev_l1_slot = l1_slot;
684 l2 = (pt_entry_t *)freemempos;
685 l2_pa = pmap_early_vtophys(kern_l1,
687 freemempos += PAGE_SIZE;
689 pmap_store(&pagetable_dmap[l1_slot],
690 (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
692 memset(l2, 0, PAGE_SIZE);
695 ("pmap_bootstrap_dmap: NULL l2 map"));
696 for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
697 pa += L2_SIZE, va += L2_SIZE) {
699 * We are on a boundary, stop to
700 * create a level 1 block
702 if ((pa & L1_OFFSET) == 0)
705 l2_slot = pmap_l2_index(va);
706 KASSERT(l2_slot != 0, ("..."));
707 pmap_store(&l2[l2_slot],
708 (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
709 ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
711 KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS),
715 for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1] &&
716 (physmap[i + 1] - pa) >= L1_SIZE;
717 pa += L1_SIZE, va += L1_SIZE) {
718 l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
719 pmap_store(&pagetable_dmap[l1_slot],
720 (pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN |
721 ATTR_IDX(CACHED_MEMORY) | L1_BLOCK);
724 /* Create L2 mappings at the end of the region */
725 if (pa < physmap[i + 1]) {
726 l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
727 if (l1_slot != prev_l1_slot) {
728 prev_l1_slot = l1_slot;
729 l2 = (pt_entry_t *)freemempos;
730 l2_pa = pmap_early_vtophys(kern_l1,
732 freemempos += PAGE_SIZE;
734 pmap_store(&pagetable_dmap[l1_slot],
735 (l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
737 memset(l2, 0, PAGE_SIZE);
740 ("pmap_bootstrap_dmap: NULL l2 map"));
741 for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
742 pa += L2_SIZE, va += L2_SIZE) {
743 l2_slot = pmap_l2_index(va);
744 pmap_store(&l2[l2_slot],
745 (pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
746 ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
750 if (pa > dmap_phys_max) {
762 pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
769 KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
771 l1 = (pd_entry_t *)l1pt;
772 l1_slot = pmap_l1_index(va);
775 for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) {
776 KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
778 pa = pmap_early_vtophys(l1pt, l2pt);
779 pmap_store(&l1[l1_slot],
780 (pa & ~Ln_TABLE_MASK) | L1_TABLE);
784 /* Clean the L2 page table */
785 memset((void *)l2_start, 0, l2pt - l2_start);
791 pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
798 KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
800 l2 = pmap_l2(kernel_pmap, va);
801 l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE);
802 l2_slot = pmap_l2_index(va);
805 for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
806 KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
808 pa = pmap_early_vtophys(l1pt, l3pt);
809 pmap_store(&l2[l2_slot],
810 (pa & ~Ln_TABLE_MASK) | ATTR_UXN | L2_TABLE);
814 /* Clean the L2 page table */
815 memset((void *)l3_start, 0, l3pt - l3_start);
821 * Bootstrap the system enough to run with virtual memory.
824 pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
827 u_int l1_slot, l2_slot;
829 vm_offset_t va, freemempos;
830 vm_offset_t dpcpu, msgbufpv;
831 vm_paddr_t start_pa, pa, min_pa;
835 /* Verify that the ASID is set through TTBR0. */
836 KASSERT((READ_SPECIALREG(tcr_el1) & TCR_A1) == 0,
837 ("pmap_bootstrap: TCR_EL1.A1 != 0"));
839 kern_delta = KERNBASE - kernstart;
841 printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
842 printf("%lx\n", l1pt);
843 printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
845 /* Set this early so we can use the pagetable walking functions */
846 kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt;
847 PMAP_LOCK_INIT(kernel_pmap);
848 kernel_pmap->pm_l0_paddr = l0pt - kern_delta;
849 kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN);
851 /* Assume the address we were loaded to is a valid physical address */
852 min_pa = KERNBASE - kern_delta;
854 physmap_idx = arm_physmem_avail(physmap, nitems(physmap));
858 * Find the minimum physical address. physmap is sorted,
859 * but may contain empty ranges.
861 for (i = 0; i < (physmap_idx * 2); i += 2) {
862 if (physmap[i] == physmap[i + 1])
864 if (physmap[i] <= min_pa)
868 freemempos = KERNBASE + kernlen;
869 freemempos = roundup2(freemempos, PAGE_SIZE);
871 /* Create a direct map region early so we can use it for pa -> va */
872 freemempos = pmap_bootstrap_dmap(l1pt, min_pa, freemempos);
875 start_pa = pa = KERNBASE - kern_delta;
878 * Read the page table to find out what is already mapped.
879 * This assumes we have mapped a block of memory from KERNBASE
880 * using a single L1 entry.
882 l2 = pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot);
884 /* Sanity check the index, KERNBASE should be the first VA */
885 KASSERT(l2_slot == 0, ("The L2 index is non-zero"));
887 /* Find how many pages we have mapped */
888 for (; l2_slot < Ln_ENTRIES; l2_slot++) {
889 if ((l2[l2_slot] & ATTR_DESCR_MASK) == 0)
892 /* Check locore used L2 blocks */
893 KASSERT((l2[l2_slot] & ATTR_DESCR_MASK) == L2_BLOCK,
894 ("Invalid bootstrap L2 table"));
895 KASSERT((l2[l2_slot] & ~ATTR_MASK) == pa,
896 ("Incorrect PA in L2 table"));
902 va = roundup2(va, L1_SIZE);
904 /* Create the l2 tables up to VM_MAX_KERNEL_ADDRESS */
905 freemempos = pmap_bootstrap_l2(l1pt, va, freemempos);
906 /* And the l3 tables for the early devmap */
907 freemempos = pmap_bootstrap_l3(l1pt,
908 VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE), freemempos);
912 #define alloc_pages(var, np) \
913 (var) = freemempos; \
914 freemempos += (np * PAGE_SIZE); \
915 memset((char *)(var), 0, ((np) * PAGE_SIZE));
917 /* Allocate dynamic per-cpu area. */
918 alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
919 dpcpu_init((void *)dpcpu, 0);
921 /* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
922 alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
923 msgbufp = (void *)msgbufpv;
925 /* Reserve some VA space for early BIOS/ACPI mapping */
926 preinit_map_va = roundup2(freemempos, L2_SIZE);
928 virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
929 virtual_avail = roundup2(virtual_avail, L1_SIZE);
930 virtual_end = VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE);
931 kernel_vm_end = virtual_avail;
933 pa = pmap_early_vtophys(l1pt, freemempos);
935 arm_physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);
941 * Initialize a vm_page's machine-dependent fields.
944 pmap_page_init(vm_page_t m)
947 TAILQ_INIT(&m->md.pv_list);
948 m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
952 * Initialize the pmap module.
953 * Called by vm_init, to initialize any structures that the pmap
954 * system needs to map virtual memory.
963 * Determine whether an ASID is 8 or 16 bits in size.
965 asid_bits = (READ_SPECIALREG(tcr_el1) & TCR_ASID_16) != 0 ? 16 : 8;
968 * Are large page mappings enabled?
970 TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
971 if (superpages_enabled) {
972 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
973 ("pmap_init: can't assign to pagesizes[1]"));
974 pagesizes[1] = L2_SIZE;
978 * Initialize the ASID allocator. At this point, we are still too
979 * early in the overall initialization process to use bit_alloc().
981 asid_set_size = 1 << asid_bits;
982 asid_set = (bitstr_t *)kmem_malloc(bitstr_size(asid_set_size),
984 for (i = 0; i < ASID_FIRST_AVAILABLE; i++)
985 bit_set(asid_set, i);
986 asid_next = ASID_FIRST_AVAILABLE;
987 mtx_init(&asid_set_mutex, "asid set", NULL, MTX_SPIN);
990 * Initialize the pv chunk list mutex.
992 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
995 * Initialize the pool of pv list locks.
997 for (i = 0; i < NPV_LIST_LOCKS; i++)
998 rw_init(&pv_list_locks[i], "pmap pv list");
1001 * Calculate the size of the pv head table for superpages.
1003 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE);
1006 * Allocate memory for the pv head table for superpages.
1008 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
1010 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
1011 for (i = 0; i < pv_npg; i++)
1012 TAILQ_INIT(&pv_table[i].pv_list);
1013 TAILQ_INIT(&pv_dummy.pv_list);
1018 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD, 0,
1019 "2MB page mapping counters");
1021 static u_long pmap_l2_demotions;
1022 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
1023 &pmap_l2_demotions, 0, "2MB page demotions");
1025 static u_long pmap_l2_mappings;
1026 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, mappings, CTLFLAG_RD,
1027 &pmap_l2_mappings, 0, "2MB page mappings");
1029 static u_long pmap_l2_p_failures;
1030 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
1031 &pmap_l2_p_failures, 0, "2MB page promotion failures");
1033 static u_long pmap_l2_promotions;
1034 SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
1035 &pmap_l2_promotions, 0, "2MB page promotions");
1038 * Invalidate a single TLB entry.
1040 static __inline void
1041 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1047 if (pmap == kernel_pmap) {
1049 __asm __volatile("tlbi vaae1is, %0" : : "r" (r));
1051 r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | atop(va);
1052 __asm __volatile("tlbi vae1is, %0" : : "r" (r));
1059 static __inline void
1060 pmap_invalidate_range_nopin(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1062 uint64_t end, r, start;
1065 if (pmap == kernel_pmap) {
1068 for (r = start; r < end; r++)
1069 __asm __volatile("tlbi vaae1is, %0" : : "r" (r));
1071 start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1074 for (r = start; r < end; r++)
1075 __asm __volatile("tlbi vae1is, %0" : : "r" (r));
1081 static __inline void
1082 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1086 pmap_invalidate_range_nopin(pmap, sva, eva);
1090 static __inline void
1091 pmap_invalidate_all(pmap_t pmap)
1097 if (pmap == kernel_pmap) {
1098 __asm __volatile("tlbi vmalle1is");
1100 r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
1101 __asm __volatile("tlbi aside1is, %0" : : "r" (r));
1109 * Routine: pmap_extract
1111 * Extract the physical page address associated
1112 * with the given map/virtual_address pair.
1115 pmap_extract(pmap_t pmap, vm_offset_t va)
1117 pt_entry_t *pte, tpte;
1124 * Find the block or page map for this virtual address. pmap_pte
1125 * will return either a valid block/page entry, or NULL.
1127 pte = pmap_pte(pmap, va, &lvl);
1129 tpte = pmap_load(pte);
1130 pa = tpte & ~ATTR_MASK;
1133 KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
1134 ("pmap_extract: Invalid L1 pte found: %lx",
1135 tpte & ATTR_DESCR_MASK));
1136 pa |= (va & L1_OFFSET);
1139 KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
1140 ("pmap_extract: Invalid L2 pte found: %lx",
1141 tpte & ATTR_DESCR_MASK));
1142 pa |= (va & L2_OFFSET);
1145 KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
1146 ("pmap_extract: Invalid L3 pte found: %lx",
1147 tpte & ATTR_DESCR_MASK));
1148 pa |= (va & L3_OFFSET);
1157 * Routine: pmap_extract_and_hold
1159 * Atomically extract and hold the physical page
1160 * with the given pmap and virtual address pair
1161 * if that mapping permits the given protection.
1164 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1166 pt_entry_t *pte, tpte;
1173 pte = pmap_pte(pmap, va, &lvl);
1175 tpte = pmap_load(pte);
1177 KASSERT(lvl > 0 && lvl <= 3,
1178 ("pmap_extract_and_hold: Invalid level %d", lvl));
1179 CTASSERT(L1_BLOCK == L2_BLOCK);
1180 KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
1181 (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
1182 ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
1183 tpte & ATTR_DESCR_MASK));
1184 if (((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) ||
1185 ((prot & VM_PROT_WRITE) == 0)) {
1188 off = va & L1_OFFSET;
1191 off = va & L2_OFFSET;
1197 m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off);
1198 if (!vm_page_wire_mapped(m))
1207 pmap_kextract(vm_offset_t va)
1209 pt_entry_t *pte, tpte;
1211 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
1212 return (DMAP_TO_PHYS(va));
1213 pte = pmap_l1(kernel_pmap, va);
1218 * A concurrent pmap_update_entry() will clear the entry's valid bit
1219 * but leave the rest of the entry unchanged. Therefore, we treat a
1220 * non-zero entry as being valid, and we ignore the valid bit when
1221 * determining whether the entry maps a block, page, or table.
1223 tpte = pmap_load(pte);
1226 if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK)
1227 return ((tpte & ~ATTR_MASK) | (va & L1_OFFSET));
1228 pte = pmap_l1_to_l2(&tpte, va);
1229 tpte = pmap_load(pte);
1232 if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK)
1233 return ((tpte & ~ATTR_MASK) | (va & L2_OFFSET));
1234 pte = pmap_l2_to_l3(&tpte, va);
1235 tpte = pmap_load(pte);
1238 return ((tpte & ~ATTR_MASK) | (va & L3_OFFSET));
1241 /***************************************************
1242 * Low level mapping routines.....
1243 ***************************************************/
1246 pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
1249 pt_entry_t *pte, attr;
1253 KASSERT((pa & L3_OFFSET) == 0,
1254 ("pmap_kenter: Invalid physical address"));
1255 KASSERT((sva & L3_OFFSET) == 0,
1256 ("pmap_kenter: Invalid virtual address"));
1257 KASSERT((size & PAGE_MASK) == 0,
1258 ("pmap_kenter: Mapping is not page-sized"));
1260 attr = ATTR_DEFAULT | ATTR_IDX(mode) | L3_PAGE;
1261 if (mode == DEVICE_MEMORY)
1268 pde = pmap_pde(kernel_pmap, va, &lvl);
1269 KASSERT(pde != NULL,
1270 ("pmap_kenter: Invalid page entry, va: 0x%lx", va));
1271 KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
1273 pte = pmap_l2_to_l3(pde, va);
1274 pmap_load_store(pte, (pa & ~L3_OFFSET) | attr);
1280 pmap_invalidate_range(kernel_pmap, sva, va);
1284 pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
1287 pmap_kenter(sva, size, pa, DEVICE_MEMORY);
1291 * Remove a page from the kernel pagetables.
1294 pmap_kremove(vm_offset_t va)
1299 pte = pmap_pte(kernel_pmap, va, &lvl);
1300 KASSERT(pte != NULL, ("pmap_kremove: Invalid address"));
1301 KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl));
1304 pmap_invalidate_page(kernel_pmap, va);
1308 pmap_kremove_device(vm_offset_t sva, vm_size_t size)
1314 KASSERT((sva & L3_OFFSET) == 0,
1315 ("pmap_kremove_device: Invalid virtual address"));
1316 KASSERT((size & PAGE_MASK) == 0,
1317 ("pmap_kremove_device: Mapping is not page-sized"));
1321 pte = pmap_pte(kernel_pmap, va, &lvl);
1322 KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va));
1324 ("Invalid device pagetable level: %d != 3", lvl));
1330 pmap_invalidate_range(kernel_pmap, sva, va);
1334 * Used to map a range of physical addresses into kernel
1335 * virtual address space.
1337 * The value passed in '*virt' is a suggested virtual address for
1338 * the mapping. Architectures which can support a direct-mapped
1339 * physical to virtual region can return the appropriate address
1340 * within that region, leaving '*virt' unchanged. Other
1341 * architectures should map the pages starting at '*virt' and
1342 * update '*virt' with the first usable address after the mapped
1346 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1348 return PHYS_TO_DMAP(start);
1353 * Add a list of wired pages to the kva
1354 * this routine is only used for temporary
1355 * kernel mappings that do not need to have
1356 * page modification or references recorded.
1357 * Note that old mappings are simply written
1358 * over. The page *must* be wired.
1359 * Note: SMP coherent. Uses a ranged shootdown IPI.
1362 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1365 pt_entry_t *pte, pa;
1371 for (i = 0; i < count; i++) {
1372 pde = pmap_pde(kernel_pmap, va, &lvl);
1373 KASSERT(pde != NULL,
1374 ("pmap_qenter: Invalid page entry, va: 0x%lx", va));
1376 ("pmap_qenter: Invalid level %d", lvl));
1379 pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) |
1380 ATTR_UXN | ATTR_IDX(m->md.pv_memattr) | L3_PAGE;
1381 if (m->md.pv_memattr == DEVICE_MEMORY)
1383 pte = pmap_l2_to_l3(pde, va);
1384 pmap_load_store(pte, pa);
1388 pmap_invalidate_range(kernel_pmap, sva, va);
1392 * This routine tears out page mappings from the
1393 * kernel -- it is meant only for temporary mappings.
1396 pmap_qremove(vm_offset_t sva, int count)
1402 KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
1405 while (count-- > 0) {
1406 pte = pmap_pte(kernel_pmap, va, &lvl);
1408 ("Invalid device pagetable level: %d != 3", lvl));
1415 pmap_invalidate_range(kernel_pmap, sva, va);
1418 /***************************************************
1419 * Page table page management routines.....
1420 ***************************************************/
1422 * Schedule the specified unused page table page to be freed. Specifically,
1423 * add the page to the specified list of pages that will be released to the
1424 * physical memory manager after the TLB has been updated.
1426 static __inline void
1427 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1428 boolean_t set_PG_ZERO)
1432 m->flags |= PG_ZERO;
1434 m->flags &= ~PG_ZERO;
1435 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1439 * Decrements a page table page's reference count, which is used to record the
1440 * number of valid page table entries within the page. If the reference count
1441 * drops to zero, then the page table page is unmapped. Returns TRUE if the
1442 * page table page was unmapped and FALSE otherwise.
1444 static inline boolean_t
1445 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1449 if (m->ref_count == 0) {
1450 _pmap_unwire_l3(pmap, va, m, free);
1457 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1460 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1462 * unmap the page table page
1464 if (m->pindex >= (NUL2E + NUL1E)) {
1468 l0 = pmap_l0(pmap, va);
1470 } else if (m->pindex >= NUL2E) {
1474 l1 = pmap_l1(pmap, va);
1480 l2 = pmap_l2(pmap, va);
1483 pmap_resident_count_dec(pmap, 1);
1484 if (m->pindex < NUL2E) {
1485 /* We just released an l3, unhold the matching l2 */
1486 pd_entry_t *l1, tl1;
1489 l1 = pmap_l1(pmap, va);
1490 tl1 = pmap_load(l1);
1491 l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1492 pmap_unwire_l3(pmap, va, l2pg, free);
1493 } else if (m->pindex < (NUL2E + NUL1E)) {
1494 /* We just released an l2, unhold the matching l1 */
1495 pd_entry_t *l0, tl0;
1498 l0 = pmap_l0(pmap, va);
1499 tl0 = pmap_load(l0);
1500 l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1501 pmap_unwire_l3(pmap, va, l1pg, free);
1503 pmap_invalidate_page(pmap, va);
1506 * Put page on a list so that it is released after
1507 * *ALL* TLB shootdown is done
1509 pmap_add_delayed_free_list(m, free, TRUE);
1513 * After removing a page table entry, this routine is used to
1514 * conditionally free the page, and manage the reference count.
1517 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1518 struct spglist *free)
1522 if (va >= VM_MAXUSER_ADDRESS)
1524 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1525 mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK);
1526 return (pmap_unwire_l3(pmap, va, mpte, free));
1530 pmap_pinit0(pmap_t pmap)
1533 PMAP_LOCK_INIT(pmap);
1534 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1535 pmap->pm_l0_paddr = READ_SPECIALREG(ttbr0_el1);
1536 pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
1537 pmap->pm_root.rt_root = 0;
1538 pmap->pm_cookie = COOKIE_FROM(ASID_RESERVED_FOR_PID_0, INT_MIN);
1540 PCPU_SET(curpmap, pmap);
1544 pmap_pinit(pmap_t pmap)
1549 * allocate the l0 page
1551 while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
1552 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1555 pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(l0pt);
1556 pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
1558 if ((l0pt->flags & PG_ZERO) == 0)
1559 pagezero(pmap->pm_l0);
1561 pmap->pm_root.rt_root = 0;
1562 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1563 pmap->pm_cookie = COOKIE_FROM(-1, INT_MAX);
1569 * This routine is called if the desired page table page does not exist.
1571 * If page table page allocation fails, this routine may sleep before
1572 * returning NULL. It sleeps only if a lock pointer was given.
1574 * Note: If a page allocation fails at page table level two or three,
1575 * one or two pages may be held during the wait, only to be released
1576 * afterwards. This conservative approach is easily argued to avoid
1580 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1582 vm_page_t m, l1pg, l2pg;
1584 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1587 * Allocate a page table page.
1589 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1590 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1591 if (lockp != NULL) {
1592 RELEASE_PV_LIST_LOCK(lockp);
1599 * Indicate the need to retry. While waiting, the page table
1600 * page may have been allocated.
1604 if ((m->flags & PG_ZERO) == 0)
1608 * Because of AArch64's weak memory consistency model, we must have a
1609 * barrier here to ensure that the stores for zeroing "m", whether by
1610 * pmap_zero_page() or an earlier function, are visible before adding
1611 * "m" to the page table. Otherwise, a page table walk by another
1612 * processor's MMU could see the mapping to "m" and a stale, non-zero
1618 * Map the pagetable page into the process address space, if
1619 * it isn't already there.
1622 if (ptepindex >= (NUL2E + NUL1E)) {
1624 vm_pindex_t l0index;
1626 l0index = ptepindex - (NUL2E + NUL1E);
1627 l0 = &pmap->pm_l0[l0index];
1628 pmap_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
1629 } else if (ptepindex >= NUL2E) {
1630 vm_pindex_t l0index, l1index;
1631 pd_entry_t *l0, *l1;
1634 l1index = ptepindex - NUL2E;
1635 l0index = l1index >> L0_ENTRIES_SHIFT;
1637 l0 = &pmap->pm_l0[l0index];
1638 tl0 = pmap_load(l0);
1640 /* recurse for allocating page dir */
1641 if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
1643 vm_page_unwire_noq(m);
1644 vm_page_free_zero(m);
1648 l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1652 l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
1653 l1 = &l1[ptepindex & Ln_ADDR_MASK];
1654 pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
1656 vm_pindex_t l0index, l1index;
1657 pd_entry_t *l0, *l1, *l2;
1658 pd_entry_t tl0, tl1;
1660 l1index = ptepindex >> Ln_ENTRIES_SHIFT;
1661 l0index = l1index >> L0_ENTRIES_SHIFT;
1663 l0 = &pmap->pm_l0[l0index];
1664 tl0 = pmap_load(l0);
1666 /* recurse for allocating page dir */
1667 if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1669 vm_page_unwire_noq(m);
1670 vm_page_free_zero(m);
1673 tl0 = pmap_load(l0);
1674 l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1675 l1 = &l1[l1index & Ln_ADDR_MASK];
1677 l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1678 l1 = &l1[l1index & Ln_ADDR_MASK];
1679 tl1 = pmap_load(l1);
1681 /* recurse for allocating page dir */
1682 if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1684 vm_page_unwire_noq(m);
1685 vm_page_free_zero(m);
1689 l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1694 l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
1695 l2 = &l2[ptepindex & Ln_ADDR_MASK];
1696 pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
1699 pmap_resident_count_inc(pmap, 1);
1705 pmap_alloc_l2(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1709 vm_pindex_t l2pindex;
1712 l1 = pmap_l1(pmap, va);
1713 if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
1714 /* Add a reference to the L2 page. */
1715 l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK);
1718 /* Allocate a L2 page. */
1719 l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
1720 l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
1721 if (l2pg == NULL && lockp != NULL)
1728 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1730 vm_pindex_t ptepindex;
1731 pd_entry_t *pde, tpde;
1739 * Calculate pagetable page index
1741 ptepindex = pmap_l2_pindex(va);
1744 * Get the page directory entry
1746 pde = pmap_pde(pmap, va, &lvl);
1749 * If the page table page is mapped, we just increment the hold count,
1750 * and activate it. If we get a level 2 pde it will point to a level 3
1758 pte = pmap_l0_to_l1(pde, va);
1759 KASSERT(pmap_load(pte) == 0,
1760 ("pmap_alloc_l3: TODO: l0 superpages"));
1765 pte = pmap_l1_to_l2(pde, va);
1766 KASSERT(pmap_load(pte) == 0,
1767 ("pmap_alloc_l3: TODO: l1 superpages"));
1771 tpde = pmap_load(pde);
1773 m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
1779 panic("pmap_alloc_l3: Invalid level %d", lvl);
1783 * Here if the pte page isn't mapped, or if it has been deallocated.
1785 m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1786 if (m == NULL && lockp != NULL)
1792 /***************************************************
1793 * Pmap allocation/deallocation routines.
1794 ***************************************************/
1797 * Release any resources held by the given physical map.
1798 * Called when a pmap initialized by pmap_pinit is being released.
1799 * Should only be called if the map contains no valid mappings.
1802 pmap_release(pmap_t pmap)
1807 KASSERT(pmap->pm_stats.resident_count == 0,
1808 ("pmap_release: pmap resident count %ld != 0",
1809 pmap->pm_stats.resident_count));
1810 KASSERT(vm_radix_is_empty(&pmap->pm_root),
1811 ("pmap_release: pmap has reserved page table page(s)"));
1813 mtx_lock_spin(&asid_set_mutex);
1814 if (COOKIE_TO_EPOCH(pmap->pm_cookie) == asid_epoch) {
1815 asid = COOKIE_TO_ASID(pmap->pm_cookie);
1816 KASSERT(asid >= ASID_FIRST_AVAILABLE && asid < asid_set_size,
1817 ("pmap_release: pmap cookie has out-of-range asid"));
1818 bit_clear(asid_set, asid);
1820 mtx_unlock_spin(&asid_set_mutex);
1822 m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr);
1823 vm_page_unwire_noq(m);
1824 vm_page_free_zero(m);
1828 kvm_size(SYSCTL_HANDLER_ARGS)
1830 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
1832 return sysctl_handle_long(oidp, &ksize, 0, req);
1834 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1835 0, 0, kvm_size, "LU", "Size of KVM");
1838 kvm_free(SYSCTL_HANDLER_ARGS)
1840 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1842 return sysctl_handle_long(oidp, &kfree, 0, req);
1844 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1845 0, 0, kvm_free, "LU", "Amount of KVM free");
1848 * grow the number of kernel page table entries, if needed
1851 pmap_growkernel(vm_offset_t addr)
1855 pd_entry_t *l0, *l1, *l2;
1857 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1859 addr = roundup2(addr, L2_SIZE);
1860 if (addr - 1 >= vm_map_max(kernel_map))
1861 addr = vm_map_max(kernel_map);
1862 while (kernel_vm_end < addr) {
1863 l0 = pmap_l0(kernel_pmap, kernel_vm_end);
1864 KASSERT(pmap_load(l0) != 0,
1865 ("pmap_growkernel: No level 0 kernel entry"));
1867 l1 = pmap_l0_to_l1(l0, kernel_vm_end);
1868 if (pmap_load(l1) == 0) {
1869 /* We need a new PDP entry */
1870 nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
1871 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
1872 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1874 panic("pmap_growkernel: no memory to grow kernel");
1875 if ((nkpg->flags & PG_ZERO) == 0)
1876 pmap_zero_page(nkpg);
1877 /* See the dmb() in _pmap_alloc_l3(). */
1879 paddr = VM_PAGE_TO_PHYS(nkpg);
1880 pmap_store(l1, paddr | L1_TABLE);
1881 continue; /* try again */
1883 l2 = pmap_l1_to_l2(l1, kernel_vm_end);
1884 if (pmap_load(l2) != 0) {
1885 kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1886 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1887 kernel_vm_end = vm_map_max(kernel_map);
1893 nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
1894 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
1897 panic("pmap_growkernel: no memory to grow kernel");
1898 if ((nkpg->flags & PG_ZERO) == 0)
1899 pmap_zero_page(nkpg);
1900 /* See the dmb() in _pmap_alloc_l3(). */
1902 paddr = VM_PAGE_TO_PHYS(nkpg);
1903 pmap_store(l2, paddr | L2_TABLE);
1905 kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1906 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1907 kernel_vm_end = vm_map_max(kernel_map);
1914 /***************************************************
1915 * page management routines.
1916 ***************************************************/
1918 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1919 CTASSERT(_NPCM == 3);
1920 CTASSERT(_NPCPV == 168);
1922 static __inline struct pv_chunk *
1923 pv_to_chunk(pv_entry_t pv)
1926 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1929 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1931 #define PC_FREE0 0xfffffffffffffffful
1932 #define PC_FREE1 0xfffffffffffffffful
1933 #define PC_FREE2 0x000000fffffffffful
1935 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
1939 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1941 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1942 "Current number of pv entry chunks");
1943 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1944 "Current number of pv entry chunks allocated");
1945 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1946 "Current number of pv entry chunks frees");
1947 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1948 "Number of times tried to get a chunk page but failed.");
1950 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
1951 static int pv_entry_spare;
1953 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1954 "Current number of pv entry frees");
1955 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1956 "Current number of pv entry allocs");
1957 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1958 "Current number of pv entries");
1959 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1960 "Current number of spare pv entries");
1965 * We are in a serious low memory condition. Resort to
1966 * drastic measures to free some pages so we can allocate
1967 * another pv entry chunk.
1969 * Returns NULL if PV entries were reclaimed from the specified pmap.
1971 * We do not, however, unmap 2mpages because subsequent accesses will
1972 * allocate per-page pv entries until repromotion occurs, thereby
1973 * exacerbating the shortage of free pv entries.
1976 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1978 struct pv_chunk *pc, *pc_marker, *pc_marker_end;
1979 struct pv_chunk_header pc_marker_b, pc_marker_end_b;
1980 struct md_page *pvh;
1982 pmap_t next_pmap, pmap;
1983 pt_entry_t *pte, tpte;
1987 struct spglist free;
1989 int bit, field, freed, lvl;
1990 static int active_reclaims = 0;
1992 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1993 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
1998 bzero(&pc_marker_b, sizeof(pc_marker_b));
1999 bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
2000 pc_marker = (struct pv_chunk *)&pc_marker_b;
2001 pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
2003 mtx_lock(&pv_chunks_mutex);
2005 TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
2006 TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
2007 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
2008 SLIST_EMPTY(&free)) {
2009 next_pmap = pc->pc_pmap;
2010 if (next_pmap == NULL) {
2012 * The next chunk is a marker. However, it is
2013 * not our marker, so active_reclaims must be
2014 * > 1. Consequently, the next_chunk code
2015 * will not rotate the pv_chunks list.
2019 mtx_unlock(&pv_chunks_mutex);
2022 * A pv_chunk can only be removed from the pc_lru list
2023 * when both pv_chunks_mutex is owned and the
2024 * corresponding pmap is locked.
2026 if (pmap != next_pmap) {
2027 if (pmap != NULL && pmap != locked_pmap)
2030 /* Avoid deadlock and lock recursion. */
2031 if (pmap > locked_pmap) {
2032 RELEASE_PV_LIST_LOCK(lockp);
2034 mtx_lock(&pv_chunks_mutex);
2036 } else if (pmap != locked_pmap) {
2037 if (PMAP_TRYLOCK(pmap)) {
2038 mtx_lock(&pv_chunks_mutex);
2041 pmap = NULL; /* pmap is not locked */
2042 mtx_lock(&pv_chunks_mutex);
2043 pc = TAILQ_NEXT(pc_marker, pc_lru);
2045 pc->pc_pmap != next_pmap)
2053 * Destroy every non-wired, 4 KB page mapping in the chunk.
2056 for (field = 0; field < _NPCM; field++) {
2057 for (inuse = ~pc->pc_map[field] & pc_freemask[field];
2058 inuse != 0; inuse &= ~(1UL << bit)) {
2059 bit = ffsl(inuse) - 1;
2060 pv = &pc->pc_pventry[field * 64 + bit];
2062 pde = pmap_pde(pmap, va, &lvl);
2065 pte = pmap_l2_to_l3(pde, va);
2066 tpte = pmap_load(pte);
2067 if ((tpte & ATTR_SW_WIRED) != 0)
2069 tpte = pmap_load_clear(pte);
2070 pmap_invalidate_page(pmap, va);
2071 m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK);
2072 if (pmap_pte_dirty(tpte))
2074 if ((tpte & ATTR_AF) != 0)
2075 vm_page_aflag_set(m, PGA_REFERENCED);
2076 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2077 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2079 if (TAILQ_EMPTY(&m->md.pv_list) &&
2080 (m->flags & PG_FICTITIOUS) == 0) {
2081 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2082 if (TAILQ_EMPTY(&pvh->pv_list)) {
2083 vm_page_aflag_clear(m,
2087 pc->pc_map[field] |= 1UL << bit;
2088 pmap_unuse_pt(pmap, va, pmap_load(pde), &free);
2093 mtx_lock(&pv_chunks_mutex);
2096 /* Every freed mapping is for a 4 KB page. */
2097 pmap_resident_count_dec(pmap, freed);
2098 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
2099 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
2100 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
2101 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2102 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
2103 pc->pc_map[2] == PC_FREE2) {
2104 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
2105 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
2106 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
2107 /* Entire chunk is free; return it. */
2108 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2109 dump_drop_page(m_pc->phys_addr);
2110 mtx_lock(&pv_chunks_mutex);
2111 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2114 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2115 mtx_lock(&pv_chunks_mutex);
2116 /* One freed pv entry in locked_pmap is sufficient. */
2117 if (pmap == locked_pmap)
2121 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2122 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
2123 if (active_reclaims == 1 && pmap != NULL) {
2125 * Rotate the pv chunks list so that we do not
2126 * scan the same pv chunks that could not be
2127 * freed (because they contained a wired
2128 * and/or superpage mapping) on every
2129 * invocation of reclaim_pv_chunk().
2131 while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
2132 MPASS(pc->pc_pmap != NULL);
2133 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2134 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2138 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
2139 TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
2141 mtx_unlock(&pv_chunks_mutex);
2142 if (pmap != NULL && pmap != locked_pmap)
2144 if (m_pc == NULL && !SLIST_EMPTY(&free)) {
2145 m_pc = SLIST_FIRST(&free);
2146 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2147 /* Recycle a freed page table page. */
2148 m_pc->ref_count = 1;
2150 vm_page_free_pages_toq(&free, true);
2155 * free the pv_entry back to the free list
2158 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2160 struct pv_chunk *pc;
2161 int idx, field, bit;
2163 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2164 PV_STAT(atomic_add_long(&pv_entry_frees, 1));
2165 PV_STAT(atomic_add_int(&pv_entry_spare, 1));
2166 PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
2167 pc = pv_to_chunk(pv);
2168 idx = pv - &pc->pc_pventry[0];
2171 pc->pc_map[field] |= 1ul << bit;
2172 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
2173 pc->pc_map[2] != PC_FREE2) {
2174 /* 98% of the time, pc is already at the head of the list. */
2175 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
2176 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2177 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2181 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2186 free_pv_chunk(struct pv_chunk *pc)
2190 mtx_lock(&pv_chunks_mutex);
2191 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
2192 mtx_unlock(&pv_chunks_mutex);
2193 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
2194 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
2195 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
2196 /* entire chunk is free, return it */
2197 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2198 dump_drop_page(m->phys_addr);
2199 vm_page_unwire_noq(m);
2204 * Returns a new PV entry, allocating a new PV chunk from the system when
2205 * needed. If this PV chunk allocation fails and a PV list lock pointer was
2206 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
2209 * The given PV list lock may be released.
2212 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
2216 struct pv_chunk *pc;
2219 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2220 PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
2222 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2224 for (field = 0; field < _NPCM; field++) {
2225 if (pc->pc_map[field]) {
2226 bit = ffsl(pc->pc_map[field]) - 1;
2230 if (field < _NPCM) {
2231 pv = &pc->pc_pventry[field * 64 + bit];
2232 pc->pc_map[field] &= ~(1ul << bit);
2233 /* If this was the last item, move it to tail */
2234 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
2235 pc->pc_map[2] == 0) {
2236 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2237 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
2240 PV_STAT(atomic_add_long(&pv_entry_count, 1));
2241 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
2245 /* No free items, allocate another chunk */
2246 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2249 if (lockp == NULL) {
2250 PV_STAT(pc_chunk_tryfail++);
2253 m = reclaim_pv_chunk(pmap, lockp);
2257 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2258 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2259 dump_add_page(m->phys_addr);
2260 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2262 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
2263 pc->pc_map[1] = PC_FREE1;
2264 pc->pc_map[2] = PC_FREE2;
2265 mtx_lock(&pv_chunks_mutex);
2266 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
2267 mtx_unlock(&pv_chunks_mutex);
2268 pv = &pc->pc_pventry[0];
2269 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2270 PV_STAT(atomic_add_long(&pv_entry_count, 1));
2271 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
2276 * Ensure that the number of spare PV entries in the specified pmap meets or
2277 * exceeds the given count, "needed".
2279 * The given PV list lock may be released.
2282 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
2284 struct pch new_tail;
2285 struct pv_chunk *pc;
2290 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2291 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
2294 * Newly allocated PV chunks must be stored in a private list until
2295 * the required number of PV chunks have been allocated. Otherwise,
2296 * reclaim_pv_chunk() could recycle one of these chunks. In
2297 * contrast, these chunks must be added to the pmap upon allocation.
2299 TAILQ_INIT(&new_tail);
2302 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
2303 bit_count((bitstr_t *)pc->pc_map, 0,
2304 sizeof(pc->pc_map) * NBBY, &free);
2308 if (avail >= needed)
2311 for (reclaimed = false; avail < needed; avail += _NPCPV) {
2312 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2315 m = reclaim_pv_chunk(pmap, lockp);
2320 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2321 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2322 dump_add_page(m->phys_addr);
2323 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2325 pc->pc_map[0] = PC_FREE0;
2326 pc->pc_map[1] = PC_FREE1;
2327 pc->pc_map[2] = PC_FREE2;
2328 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2329 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
2330 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
2333 * The reclaim might have freed a chunk from the current pmap.
2334 * If that chunk contained available entries, we need to
2335 * re-count the number of available entries.
2340 if (!TAILQ_EMPTY(&new_tail)) {
2341 mtx_lock(&pv_chunks_mutex);
2342 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
2343 mtx_unlock(&pv_chunks_mutex);
2348 * First find and then remove the pv entry for the specified pmap and virtual
2349 * address from the specified pv list. Returns the pv entry if found and NULL
2350 * otherwise. This operation can be performed on pv lists for either 4KB or
2351 * 2MB page mappings.
2353 static __inline pv_entry_t
2354 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2358 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
2359 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2360 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
2369 * After demotion from a 2MB page mapping to 512 4KB page mappings,
2370 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
2371 * entries for each of the 4KB page mappings.
2374 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2375 struct rwlock **lockp)
2377 struct md_page *pvh;
2378 struct pv_chunk *pc;
2380 vm_offset_t va_last;
2384 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2385 KASSERT((va & L2_OFFSET) == 0,
2386 ("pmap_pv_demote_l2: va is not 2mpage aligned"));
2387 KASSERT((pa & L2_OFFSET) == 0,
2388 ("pmap_pv_demote_l2: pa is not 2mpage aligned"));
2389 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2392 * Transfer the 2mpage's pv entry for this mapping to the first
2393 * page's pv list. Once this transfer begins, the pv list lock
2394 * must not be released until the last pv entry is reinstantiated.
2396 pvh = pa_to_pvh(pa);
2397 pv = pmap_pvh_remove(pvh, pmap, va);
2398 KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
2399 m = PHYS_TO_VM_PAGE(pa);
2400 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2402 /* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */
2403 PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1));
2404 va_last = va + L2_SIZE - PAGE_SIZE;
2406 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2407 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
2408 pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare"));
2409 for (field = 0; field < _NPCM; field++) {
2410 while (pc->pc_map[field]) {
2411 bit = ffsl(pc->pc_map[field]) - 1;
2412 pc->pc_map[field] &= ~(1ul << bit);
2413 pv = &pc->pc_pventry[field * 64 + bit];
2417 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2418 ("pmap_pv_demote_l2: page %p is not managed", m));
2419 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2425 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2426 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2429 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
2430 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2431 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2433 PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1));
2434 PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1));
2438 * First find and then destroy the pv entry for the specified pmap and virtual
2439 * address. This operation can be performed on pv lists for either 4KB or 2MB
2443 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2447 pv = pmap_pvh_remove(pvh, pmap, va);
2448 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
2449 free_pv_entry(pmap, pv);
2453 * Conditionally create the PV entry for a 4KB page mapping if the required
2454 * memory can be allocated without resorting to reclamation.
2457 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
2458 struct rwlock **lockp)
2462 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2463 /* Pass NULL instead of the lock pointer to disable reclamation. */
2464 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
2466 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2467 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2475 * Create the PV entry for a 2MB page mapping. Always returns true unless the
2476 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
2477 * false if the PV entry cannot be allocated without resorting to reclamation.
2480 pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
2481 struct rwlock **lockp)
2483 struct md_page *pvh;
2487 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2488 /* Pass NULL instead of the lock pointer to disable reclamation. */
2489 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
2490 NULL : lockp)) == NULL)
2493 pa = l2e & ~ATTR_MASK;
2494 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2495 pvh = pa_to_pvh(pa);
2496 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2502 pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
2504 pt_entry_t newl2, oldl2;
2508 KASSERT(!VIRT_IN_DMAP(va), ("removing direct mapping of %#lx", va));
2509 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
2510 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2512 ml3 = pmap_remove_pt_page(pmap, va);
2514 panic("pmap_remove_kernel_l2: Missing pt page");
2516 ml3pa = VM_PAGE_TO_PHYS(ml3);
2517 newl2 = ml3pa | L2_TABLE;
2520 * If this page table page was unmapped by a promotion, then it
2521 * contains valid mappings. Zero it to invalidate those mappings.
2523 if (ml3->valid != 0)
2524 pagezero((void *)PHYS_TO_DMAP(ml3pa));
2527 * Demote the mapping. The caller must have already invalidated the
2528 * mapping (i.e., the "break" in break-before-make).
2530 oldl2 = pmap_load_store(l2, newl2);
2531 KASSERT(oldl2 == 0, ("%s: found existing mapping at %p: %#lx",
2532 __func__, l2, oldl2));
2536 * pmap_remove_l2: Do the things to unmap a level 2 superpage.
2539 pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
2540 pd_entry_t l1e, struct spglist *free, struct rwlock **lockp)
2542 struct md_page *pvh;
2544 vm_offset_t eva, va;
2547 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2548 KASSERT((sva & L2_OFFSET) == 0, ("pmap_remove_l2: sva is not aligned"));
2549 old_l2 = pmap_load_clear(l2);
2550 KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
2551 ("pmap_remove_l2: L2e %lx is not a block mapping", old_l2));
2554 * Since a promotion must break the 4KB page mappings before making
2555 * the 2MB page mapping, a pmap_invalidate_page() suffices.
2557 pmap_invalidate_page(pmap, sva);
2559 if (old_l2 & ATTR_SW_WIRED)
2560 pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
2561 pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
2562 if (old_l2 & ATTR_SW_MANAGED) {
2563 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, old_l2 & ~ATTR_MASK);
2564 pvh = pa_to_pvh(old_l2 & ~ATTR_MASK);
2565 pmap_pvh_free(pvh, pmap, sva);
2566 eva = sva + L2_SIZE;
2567 for (va = sva, m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
2568 va < eva; va += PAGE_SIZE, m++) {
2569 if (pmap_pte_dirty(old_l2))
2571 if (old_l2 & ATTR_AF)
2572 vm_page_aflag_set(m, PGA_REFERENCED);
2573 if (TAILQ_EMPTY(&m->md.pv_list) &&
2574 TAILQ_EMPTY(&pvh->pv_list))
2575 vm_page_aflag_clear(m, PGA_WRITEABLE);
2578 if (pmap == kernel_pmap) {
2579 pmap_remove_kernel_l2(pmap, l2, sva);
2581 ml3 = pmap_remove_pt_page(pmap, sva);
2583 KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
2584 ("pmap_remove_l2: l3 page not promoted"));
2585 pmap_resident_count_dec(pmap, 1);
2586 KASSERT(ml3->ref_count == NL3PG,
2587 ("pmap_remove_l2: l3 page ref count error"));
2589 pmap_add_delayed_free_list(ml3, free, FALSE);
2592 return (pmap_unuse_pt(pmap, sva, l1e, free));
2596 * pmap_remove_l3: do the things to unmap a page in a process
2599 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
2600 pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
2602 struct md_page *pvh;
2606 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2607 old_l3 = pmap_load_clear(l3);
2608 pmap_invalidate_page(pmap, va);
2609 if (old_l3 & ATTR_SW_WIRED)
2610 pmap->pm_stats.wired_count -= 1;
2611 pmap_resident_count_dec(pmap, 1);
2612 if (old_l3 & ATTR_SW_MANAGED) {
2613 m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2614 if (pmap_pte_dirty(old_l3))
2616 if (old_l3 & ATTR_AF)
2617 vm_page_aflag_set(m, PGA_REFERENCED);
2618 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2619 pmap_pvh_free(&m->md, pmap, va);
2620 if (TAILQ_EMPTY(&m->md.pv_list) &&
2621 (m->flags & PG_FICTITIOUS) == 0) {
2622 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2623 if (TAILQ_EMPTY(&pvh->pv_list))
2624 vm_page_aflag_clear(m, PGA_WRITEABLE);
2627 return (pmap_unuse_pt(pmap, va, l2e, free));
2631 * Remove the specified range of addresses from the L3 page table that is
2632 * identified by the given L2 entry.
2635 pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
2636 vm_offset_t eva, struct spglist *free, struct rwlock **lockp)
2638 struct md_page *pvh;
2639 struct rwlock *new_lock;
2640 pt_entry_t *l3, old_l3;
2644 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2645 KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
2646 ("pmap_remove_l3_range: range crosses an L3 page table boundary"));
2648 for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
2649 if (!pmap_l3_valid(pmap_load(l3))) {
2651 pmap_invalidate_range(pmap, va, sva);
2656 old_l3 = pmap_load_clear(l3);
2657 if ((old_l3 & ATTR_SW_WIRED) != 0)
2658 pmap->pm_stats.wired_count--;
2659 pmap_resident_count_dec(pmap, 1);
2660 if ((old_l3 & ATTR_SW_MANAGED) != 0) {
2661 m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2662 if (pmap_pte_dirty(old_l3))
2664 if ((old_l3 & ATTR_AF) != 0)
2665 vm_page_aflag_set(m, PGA_REFERENCED);
2666 new_lock = PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m));
2667 if (new_lock != *lockp) {
2668 if (*lockp != NULL) {
2670 * Pending TLB invalidations must be
2671 * performed before the PV list lock is
2672 * released. Otherwise, a concurrent
2673 * pmap_remove_all() on a physical page
2674 * could return while a stale TLB entry
2675 * still provides access to that page.
2678 pmap_invalidate_range(pmap, va,
2687 pmap_pvh_free(&m->md, pmap, sva);
2688 if (TAILQ_EMPTY(&m->md.pv_list) &&
2689 (m->flags & PG_FICTITIOUS) == 0) {
2690 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2691 if (TAILQ_EMPTY(&pvh->pv_list))
2692 vm_page_aflag_clear(m, PGA_WRITEABLE);
2697 if (pmap_unuse_pt(pmap, sva, l2e, free)) {
2703 pmap_invalidate_range(pmap, va, sva);
2707 * Remove the given range of addresses from the specified map.
2709 * It is assumed that the start and end are properly
2710 * rounded to the page size.
2713 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2715 struct rwlock *lock;
2716 vm_offset_t va_next;
2717 pd_entry_t *l0, *l1, *l2;
2718 pt_entry_t l3_paddr;
2719 struct spglist free;
2722 * Perform an unsynchronized read. This is, however, safe.
2724 if (pmap->pm_stats.resident_count == 0)
2732 for (; sva < eva; sva = va_next) {
2734 if (pmap->pm_stats.resident_count == 0)
2737 l0 = pmap_l0(pmap, sva);
2738 if (pmap_load(l0) == 0) {
2739 va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2745 l1 = pmap_l0_to_l1(l0, sva);
2746 if (pmap_load(l1) == 0) {
2747 va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2754 * Calculate index for next page table.
2756 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2760 l2 = pmap_l1_to_l2(l1, sva);
2764 l3_paddr = pmap_load(l2);
2766 if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
2767 if (sva + L2_SIZE == va_next && eva >= va_next) {
2768 pmap_remove_l2(pmap, l2, sva, pmap_load(l1),
2771 } else if (pmap_demote_l2_locked(pmap, l2, sva,
2774 l3_paddr = pmap_load(l2);
2778 * Weed out invalid mappings.
2780 if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
2784 * Limit our scan to either the end of the va represented
2785 * by the current page table page, or to the end of the
2786 * range being removed.
2791 pmap_remove_l3_range(pmap, l3_paddr, sva, va_next, &free,
2797 vm_page_free_pages_toq(&free, true);
2801 * Routine: pmap_remove_all
2803 * Removes this physical page from
2804 * all physical maps in which it resides.
2805 * Reflects back modify bits to the pager.
2808 * Original versions of this routine were very
2809 * inefficient because they iteratively called
2810 * pmap_remove (slow...)
2814 pmap_remove_all(vm_page_t m)
2816 struct md_page *pvh;
2819 struct rwlock *lock;
2820 pd_entry_t *pde, tpde;
2821 pt_entry_t *pte, tpte;
2823 struct spglist free;
2824 int lvl, pvh_gen, md_gen;
2826 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2827 ("pmap_remove_all: page %p is not managed", m));
2829 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2830 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2831 pa_to_pvh(VM_PAGE_TO_PHYS(m));
2834 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2836 if (!PMAP_TRYLOCK(pmap)) {
2837 pvh_gen = pvh->pv_gen;
2841 if (pvh_gen != pvh->pv_gen) {
2848 pte = pmap_pte(pmap, va, &lvl);
2849 KASSERT(pte != NULL,
2850 ("pmap_remove_all: no page table entry found"));
2852 ("pmap_remove_all: invalid pte level %d", lvl));
2854 pmap_demote_l2_locked(pmap, pte, va, &lock);
2857 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2859 if (!PMAP_TRYLOCK(pmap)) {
2860 pvh_gen = pvh->pv_gen;
2861 md_gen = m->md.pv_gen;
2865 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2871 pmap_resident_count_dec(pmap, 1);
2873 pde = pmap_pde(pmap, pv->pv_va, &lvl);
2874 KASSERT(pde != NULL,
2875 ("pmap_remove_all: no page directory entry found"));
2877 ("pmap_remove_all: invalid pde level %d", lvl));
2878 tpde = pmap_load(pde);
2880 pte = pmap_l2_to_l3(pde, pv->pv_va);
2881 tpte = pmap_load_clear(pte);
2882 if (tpte & ATTR_SW_WIRED)
2883 pmap->pm_stats.wired_count--;
2884 if ((tpte & ATTR_AF) != 0) {
2885 pmap_invalidate_page(pmap, pv->pv_va);
2886 vm_page_aflag_set(m, PGA_REFERENCED);
2890 * Update the vm_page_t clean and reference bits.
2892 if (pmap_pte_dirty(tpte))
2894 pmap_unuse_pt(pmap, pv->pv_va, tpde, &free);
2895 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2897 free_pv_entry(pmap, pv);
2900 vm_page_aflag_clear(m, PGA_WRITEABLE);
2902 vm_page_free_pages_toq(&free, true);
2906 * pmap_protect_l2: do the things to protect a 2MB page in a pmap
2909 pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
2915 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2916 KASSERT((sva & L2_OFFSET) == 0,
2917 ("pmap_protect_l2: sva is not 2mpage aligned"));
2918 old_l2 = pmap_load(l2);
2919 KASSERT((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK,
2920 ("pmap_protect_l2: L2e %lx is not a block mapping", old_l2));
2923 * Return if the L2 entry already has the desired access restrictions
2927 if ((old_l2 & mask) == nbits)
2931 * When a dirty read/write superpage mapping is write protected,
2932 * update the dirty field of each of the superpage's constituent 4KB
2935 if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
2936 (nbits & ATTR_AP(ATTR_AP_RO)) != 0 && pmap_pte_dirty(old_l2)) {
2937 m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
2938 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
2942 if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
2946 * Since a promotion must break the 4KB page mappings before making
2947 * the 2MB page mapping, a pmap_invalidate_page() suffices.
2949 pmap_invalidate_page(pmap, sva);
2953 * Set the physical protection on the
2954 * specified range of this map as requested.
2957 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2959 vm_offset_t va, va_next;
2960 pd_entry_t *l0, *l1, *l2;
2961 pt_entry_t *l3p, l3, mask, nbits;
2963 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
2964 if (prot == VM_PROT_NONE) {
2965 pmap_remove(pmap, sva, eva);
2970 if ((prot & VM_PROT_WRITE) == 0) {
2971 mask |= ATTR_AP_RW_BIT | ATTR_SW_DBM;
2972 nbits |= ATTR_AP(ATTR_AP_RO);
2974 if ((prot & VM_PROT_EXECUTE) == 0) {
2982 for (; sva < eva; sva = va_next) {
2984 l0 = pmap_l0(pmap, sva);
2985 if (pmap_load(l0) == 0) {
2986 va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2992 l1 = pmap_l0_to_l1(l0, sva);
2993 if (pmap_load(l1) == 0) {
2994 va_next = (sva + L1_SIZE) & ~L1_OFFSET;
3000 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3004 l2 = pmap_l1_to_l2(l1, sva);
3005 if (pmap_load(l2) == 0)
3008 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
3009 if (sva + L2_SIZE == va_next && eva >= va_next) {
3010 pmap_protect_l2(pmap, l2, sva, mask, nbits);
3012 } else if (pmap_demote_l2(pmap, l2, sva) == NULL)
3015 KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
3016 ("pmap_protect: Invalid L2 entry after demotion"));
3022 for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
3024 l3 = pmap_load(l3p);
3027 * Go to the next L3 entry if the current one is
3028 * invalid or already has the desired access
3029 * restrictions in place. (The latter case occurs
3030 * frequently. For example, in a "buildworld"
3031 * workload, almost 1 out of 4 L3 entries already
3032 * have the desired restrictions.)
3034 if (!pmap_l3_valid(l3) || (l3 & mask) == nbits) {
3035 if (va != va_next) {
3036 pmap_invalidate_range(pmap, va, sva);
3043 * When a dirty read/write mapping is write protected,
3044 * update the page's dirty field.
3046 if ((l3 & ATTR_SW_MANAGED) != 0 &&
3047 (nbits & ATTR_AP(ATTR_AP_RO)) != 0 &&
3049 vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK));
3051 if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits))
3057 pmap_invalidate_range(pmap, va, sva);
3063 * Inserts the specified page table page into the specified pmap's collection
3064 * of idle page table pages. Each of a pmap's page table pages is responsible
3065 * for mapping a distinct range of virtual addresses. The pmap's collection is
3066 * ordered by this virtual address range.
3068 * If "promoted" is false, then the page table page "mpte" must be zero filled.
3071 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted)
3074 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3075 mpte->valid = promoted ? VM_PAGE_BITS_ALL : 0;
3076 return (vm_radix_insert(&pmap->pm_root, mpte));
3080 * Removes the page table page mapping the specified virtual address from the
3081 * specified pmap's collection of idle page table pages, and returns it.
3082 * Otherwise, returns NULL if there is no page table page corresponding to the
3083 * specified virtual address.
3085 static __inline vm_page_t
3086 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
3089 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3090 return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
3094 * Performs a break-before-make update of a pmap entry. This is needed when
3095 * either promoting or demoting pages to ensure the TLB doesn't get into an
3096 * inconsistent state.
3099 pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
3100 vm_offset_t va, vm_size_t size)
3104 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3107 * Ensure we don't get switched out with the page table in an
3108 * inconsistent state. We also need to ensure no interrupts fire
3109 * as they may make use of an address we are about to invalidate.
3111 intr = intr_disable();
3114 * Clear the old mapping's valid bit, but leave the rest of the entry
3115 * unchanged, so that a lockless, concurrent pmap_kextract() can still
3116 * lookup the physical address.
3118 pmap_clear_bits(pte, ATTR_DESCR_VALID);
3119 pmap_invalidate_range_nopin(pmap, va, va + size);
3121 /* Create the new mapping */
3122 pmap_store(pte, newpte);
3128 #if VM_NRESERVLEVEL > 0
3130 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
3131 * replace the many pv entries for the 4KB page mappings by a single pv entry
3132 * for the 2MB page mapping.
3135 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
3136 struct rwlock **lockp)
3138 struct md_page *pvh;
3140 vm_offset_t va_last;
3143 KASSERT((pa & L2_OFFSET) == 0,
3144 ("pmap_pv_promote_l2: pa is not 2mpage aligned"));
3145 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
3148 * Transfer the first page's pv entry for this mapping to the 2mpage's
3149 * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
3150 * a transfer avoids the possibility that get_pv_entry() calls
3151 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
3152 * mappings that is being promoted.
3154 m = PHYS_TO_VM_PAGE(pa);
3155 va = va & ~L2_OFFSET;
3156 pv = pmap_pvh_remove(&m->md, pmap, va);
3157 KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found"));
3158 pvh = pa_to_pvh(pa);
3159 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
3161 /* Free the remaining NPTEPG - 1 pv entries. */
3162 va_last = va + L2_SIZE - PAGE_SIZE;
3166 pmap_pvh_free(&m->md, pmap, va);
3167 } while (va < va_last);
3171 * Tries to promote the 512, contiguous 4KB page mappings that are within a
3172 * single level 2 table entry to a single 2MB page mapping. For promotion
3173 * to occur, two conditions must be met: (1) the 4KB page mappings must map
3174 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
3175 * identical characteristics.
3178 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
3179 struct rwlock **lockp)
3181 pt_entry_t *firstl3, *l3, newl2, oldl3, pa;
3185 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3187 sva = va & ~L2_OFFSET;
3188 firstl3 = pmap_l2_to_l3(l2, sva);
3189 newl2 = pmap_load(firstl3);
3192 if (((newl2 & (~ATTR_MASK | ATTR_AF)) & L2_OFFSET) != ATTR_AF) {
3193 atomic_add_long(&pmap_l2_p_failures, 1);
3194 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3195 " in pmap %p", va, pmap);
3199 if ((newl2 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) ==
3200 (ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM)) {
3201 if (!atomic_fcmpset_64(l2, &newl2, newl2 & ~ATTR_SW_DBM))
3203 newl2 &= ~ATTR_SW_DBM;
3206 pa = newl2 + L2_SIZE - PAGE_SIZE;
3207 for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
3208 oldl3 = pmap_load(l3);
3210 if ((oldl3 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) ==
3211 (ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM)) {
3212 if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
3215 oldl3 &= ~ATTR_SW_DBM;
3218 atomic_add_long(&pmap_l2_p_failures, 1);
3219 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
3220 " in pmap %p", va, pmap);
3227 * Save the page table page in its current state until the L2
3228 * mapping the superpage is demoted by pmap_demote_l2() or
3229 * destroyed by pmap_remove_l3().
3231 mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
3232 KASSERT(mpte >= vm_page_array &&
3233 mpte < &vm_page_array[vm_page_array_size],
3234 ("pmap_promote_l2: page table page is out of range"));
3235 KASSERT(mpte->pindex == pmap_l2_pindex(va),
3236 ("pmap_promote_l2: page table page's pindex is wrong"));
3237 if (pmap_insert_pt_page(pmap, mpte, true)) {
3238 atomic_add_long(&pmap_l2_p_failures, 1);
3240 "pmap_promote_l2: failure for va %#lx in pmap %p", va,
3245 if ((newl2 & ATTR_SW_MANAGED) != 0)
3246 pmap_pv_promote_l2(pmap, va, newl2 & ~ATTR_MASK, lockp);
3248 newl2 &= ~ATTR_DESCR_MASK;
3251 pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE);
3253 atomic_add_long(&pmap_l2_promotions, 1);
3254 CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
3257 #endif /* VM_NRESERVLEVEL > 0 */
3260 * Insert the given physical page (p) at
3261 * the specified virtual address (v) in the
3262 * target physical map with the protection requested.
3264 * If specified, the page will be wired down, meaning
3265 * that the related pte can not be reclaimed.
3267 * NB: This is the only routine which MAY NOT lazy-evaluate
3268 * or lose information. That is, this routine must actually
3269 * insert this page into the given map NOW.
3272 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3273 u_int flags, int8_t psind)
3275 struct rwlock *lock;
3277 pt_entry_t new_l3, orig_l3;
3278 pt_entry_t *l2, *l3;
3285 va = trunc_page(va);
3286 if ((m->oflags & VPO_UNMANAGED) == 0)
3287 VM_PAGE_OBJECT_BUSY_ASSERT(m);
3288 pa = VM_PAGE_TO_PHYS(m);
3289 new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
3291 if ((prot & VM_PROT_WRITE) == 0)
3292 new_l3 |= ATTR_AP(ATTR_AP_RO);
3293 if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
3295 if ((flags & PMAP_ENTER_WIRED) != 0)
3296 new_l3 |= ATTR_SW_WIRED;
3297 if (va < VM_MAXUSER_ADDRESS)
3298 new_l3 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
3301 if (pmap != kernel_pmap)
3303 if ((m->oflags & VPO_UNMANAGED) == 0) {
3304 new_l3 |= ATTR_SW_MANAGED;
3305 if ((prot & VM_PROT_WRITE) != 0) {
3306 new_l3 |= ATTR_SW_DBM;
3307 if ((flags & VM_PROT_WRITE) == 0)
3308 new_l3 |= ATTR_AP(ATTR_AP_RO);
3312 CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
3317 /* Assert the required virtual and physical alignment. */
3318 KASSERT((va & L2_OFFSET) == 0, ("pmap_enter: va unaligned"));
3319 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
3320 rv = pmap_enter_l2(pmap, va, (new_l3 & ~L3_PAGE) | L2_BLOCK,
3327 * In the case that a page table page is not
3328 * resident, we are creating it here.
3331 pde = pmap_pde(pmap, va, &lvl);
3332 if (pde != NULL && lvl == 2) {
3333 l3 = pmap_l2_to_l3(pde, va);
3334 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
3335 mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3339 } else if (pde != NULL && lvl == 1) {
3340 l2 = pmap_l1_to_l2(pde, va);
3341 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
3342 (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
3343 l3 = &l3[pmap_l3_index(va)];
3344 if (va < VM_MAXUSER_ADDRESS) {
3345 mpte = PHYS_TO_VM_PAGE(
3346 pmap_load(l2) & ~ATTR_MASK);
3351 /* We need to allocate an L3 table. */
3353 if (va < VM_MAXUSER_ADDRESS) {
3354 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
3357 * We use _pmap_alloc_l3() instead of pmap_alloc_l3() in order
3358 * to handle the possibility that a superpage mapping for "va"
3359 * was created while we slept.
3361 mpte = _pmap_alloc_l3(pmap, pmap_l2_pindex(va),
3362 nosleep ? NULL : &lock);
3363 if (mpte == NULL && nosleep) {
3364 CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
3365 rv = KERN_RESOURCE_SHORTAGE;
3370 panic("pmap_enter: missing L3 table for kernel va %#lx", va);
3373 orig_l3 = pmap_load(l3);
3374 opa = orig_l3 & ~ATTR_MASK;
3378 * Is the specified virtual address already mapped?
3380 if (pmap_l3_valid(orig_l3)) {
3382 * Wiring change, just update stats. We don't worry about
3383 * wiring PT pages as they remain resident as long as there
3384 * are valid mappings in them. Hence, if a user page is wired,
3385 * the PT page will be also.
3387 if ((flags & PMAP_ENTER_WIRED) != 0 &&
3388 (orig_l3 & ATTR_SW_WIRED) == 0)
3389 pmap->pm_stats.wired_count++;
3390 else if ((flags & PMAP_ENTER_WIRED) == 0 &&
3391 (orig_l3 & ATTR_SW_WIRED) != 0)
3392 pmap->pm_stats.wired_count--;
3395 * Remove the extra PT page reference.
3399 KASSERT(mpte->ref_count > 0,
3400 ("pmap_enter: missing reference to page table page,"
3405 * Has the physical page changed?
3409 * No, might be a protection or wiring change.
3411 if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
3412 (new_l3 & ATTR_SW_DBM) != 0)
3413 vm_page_aflag_set(m, PGA_WRITEABLE);
3418 * The physical page has changed. Temporarily invalidate
3421 orig_l3 = pmap_load_clear(l3);
3422 KASSERT((orig_l3 & ~ATTR_MASK) == opa,
3423 ("pmap_enter: unexpected pa update for %#lx", va));
3424 if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
3425 om = PHYS_TO_VM_PAGE(opa);
3428 * The pmap lock is sufficient to synchronize with
3429 * concurrent calls to pmap_page_test_mappings() and
3430 * pmap_ts_referenced().
3432 if (pmap_pte_dirty(orig_l3))
3434 if ((orig_l3 & ATTR_AF) != 0)
3435 vm_page_aflag_set(om, PGA_REFERENCED);
3436 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
3437 pv = pmap_pvh_remove(&om->md, pmap, va);
3438 if ((m->oflags & VPO_UNMANAGED) != 0)
3439 free_pv_entry(pmap, pv);
3440 if ((om->aflags & PGA_WRITEABLE) != 0 &&
3441 TAILQ_EMPTY(&om->md.pv_list) &&
3442 ((om->flags & PG_FICTITIOUS) != 0 ||
3443 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
3444 vm_page_aflag_clear(om, PGA_WRITEABLE);
3446 pmap_invalidate_page(pmap, va);
3450 * Increment the counters.
3452 if ((new_l3 & ATTR_SW_WIRED) != 0)
3453 pmap->pm_stats.wired_count++;
3454 pmap_resident_count_inc(pmap, 1);
3457 * Enter on the PV list if part of our managed memory.
3459 if ((m->oflags & VPO_UNMANAGED) == 0) {
3461 pv = get_pv_entry(pmap, &lock);
3464 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3465 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3467 if ((new_l3 & ATTR_SW_DBM) != 0)
3468 vm_page_aflag_set(m, PGA_WRITEABLE);
3473 * Sync icache if exec permission and attribute VM_MEMATTR_WRITE_BACK
3474 * is set. Do it now, before the mapping is stored and made
3475 * valid for hardware table walk. If done later, then other can
3476 * access this page before caches are properly synced.
3477 * Don't do it for kernel memory which is mapped with exec
3478 * permission even if the memory isn't going to hold executable
3479 * code. The only time when icache sync is needed is after
3480 * kernel module is loaded and the relocation info is processed.
3481 * And it's done in elf_cpu_load_file().
3483 if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
3484 m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
3485 (opa != pa || (orig_l3 & ATTR_XN)))
3486 cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3489 * Update the L3 entry
3491 if (pmap_l3_valid(orig_l3)) {
3492 KASSERT(opa == pa, ("pmap_enter: invalid update"));
3493 if ((orig_l3 & ~ATTR_AF) != (new_l3 & ~ATTR_AF)) {
3494 /* same PA, different attributes */
3495 /* XXXMJ need to reload orig_l3 for hardware DBM. */
3496 pmap_load_store(l3, new_l3);
3497 pmap_invalidate_page(pmap, va);
3498 if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
3499 pmap_pte_dirty(orig_l3))
3504 * This can happens if multiple threads simultaneously
3505 * access not yet mapped page. This bad for performance
3506 * since this can cause full demotion-NOP-promotion
3508 * Another possible reasons are:
3509 * - VM and pmap memory layout are diverged
3510 * - tlb flush is missing somewhere and CPU doesn't see
3513 CTR4(KTR_PMAP, "%s: already mapped page - "
3514 "pmap %p va 0x%#lx pte 0x%lx",
3515 __func__, pmap, va, new_l3);
3519 pmap_store(l3, new_l3);
3523 #if VM_NRESERVLEVEL > 0
3524 if ((mpte == NULL || mpte->ref_count == NL3PG) &&
3525 pmap_ps_enabled(pmap) &&
3526 (m->flags & PG_FICTITIOUS) == 0 &&
3527 vm_reserv_level_iffullpop(m) == 0) {
3528 pmap_promote_l2(pmap, pde, va, &lock);
3541 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
3542 * if successful. Returns false if (1) a page table page cannot be allocated
3543 * without sleeping, (2) a mapping already exists at the specified virtual
3544 * address, or (3) a PV entry cannot be allocated without reclaiming another
3548 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3549 struct rwlock **lockp)
3553 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3555 new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
3556 ATTR_IDX(m->md.pv_memattr) | ATTR_AP(ATTR_AP_RO) | L2_BLOCK);
3557 if ((m->oflags & VPO_UNMANAGED) == 0) {
3558 new_l2 |= ATTR_SW_MANAGED;
3561 if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
3563 if (va < VM_MAXUSER_ADDRESS)
3564 new_l2 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
3567 if (pmap != kernel_pmap)
3569 return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
3570 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
3575 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
3576 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
3577 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
3578 * a mapping already exists at the specified virtual address. Returns
3579 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
3580 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
3581 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3583 * The parameter "m" is only used when creating a managed, writeable mapping.
3586 pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
3587 vm_page_t m, struct rwlock **lockp)
3589 struct spglist free;
3590 pd_entry_t *l2, old_l2;
3593 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3595 if ((l2pg = pmap_alloc_l2(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
3596 NULL : lockp)) == NULL) {
3597 CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
3599 return (KERN_RESOURCE_SHORTAGE);
3602 l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
3603 l2 = &l2[pmap_l2_index(va)];
3604 if ((old_l2 = pmap_load(l2)) != 0) {
3605 KASSERT(l2pg->ref_count > 1,
3606 ("pmap_enter_l2: l2pg's ref count is too low"));
3607 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
3610 "pmap_enter_l2: failure for va %#lx in pmap %p",
3612 return (KERN_FAILURE);
3615 if ((old_l2 & ATTR_DESCR_MASK) == L2_BLOCK)
3616 (void)pmap_remove_l2(pmap, l2, va,
3617 pmap_load(pmap_l1(pmap, va)), &free, lockp);
3619 pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
3621 vm_page_free_pages_toq(&free, true);
3622 if (va >= VM_MAXUSER_ADDRESS) {
3624 * Both pmap_remove_l2() and pmap_remove_l3_range()
3625 * will leave the kernel page table page zero filled.
3626 * Nonetheless, the TLB could have an intermediate
3627 * entry for the kernel page table page.
3629 mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
3630 if (pmap_insert_pt_page(pmap, mt, false))
3631 panic("pmap_enter_l2: trie insert failed");
3633 pmap_invalidate_page(pmap, va);
3635 KASSERT(pmap_load(l2) == 0,
3636 ("pmap_enter_l2: non-zero L2 entry %p", l2));
3639 if ((new_l2 & ATTR_SW_MANAGED) != 0) {
3641 * Abort this mapping if its PV entry could not be created.
3643 if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
3645 if (pmap_unwire_l3(pmap, va, l2pg, &free)) {
3647 * Although "va" is not mapped, the TLB could
3648 * nonetheless have intermediate entries that
3649 * refer to the freed page table pages.
3650 * Invalidate those entries.
3652 * XXX redundant invalidation (See
3653 * _pmap_unwire_l3().)
3655 pmap_invalidate_page(pmap, va);
3656 vm_page_free_pages_toq(&free, true);
3659 "pmap_enter_l2: failure for va %#lx in pmap %p",
3661 return (KERN_RESOURCE_SHORTAGE);
3663 if ((new_l2 & ATTR_SW_DBM) != 0)
3664 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3665 vm_page_aflag_set(mt, PGA_WRITEABLE);
3669 * Increment counters.
3671 if ((new_l2 & ATTR_SW_WIRED) != 0)
3672 pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
3673 pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
3676 * Map the superpage.
3678 pmap_store(l2, new_l2);
3681 atomic_add_long(&pmap_l2_mappings, 1);
3682 CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
3685 return (KERN_SUCCESS);
3689 * Maps a sequence of resident pages belonging to the same object.
3690 * The sequence begins with the given page m_start. This page is
3691 * mapped at the given virtual address start. Each subsequent page is
3692 * mapped at a virtual address that is offset from start by the same
3693 * amount as the page is offset from m_start within the object. The
3694 * last page in the sequence is the page with the largest offset from
3695 * m_start that can be mapped at a virtual address less than the given
3696 * virtual address end. Not every virtual page between start and end
3697 * is mapped; only those for which a resident page exists with the
3698 * corresponding offset from m_start are mapped.
3701 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3702 vm_page_t m_start, vm_prot_t prot)
3704 struct rwlock *lock;
3707 vm_pindex_t diff, psize;
3709 VM_OBJECT_ASSERT_LOCKED(m_start->object);
3711 psize = atop(end - start);
3716 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3717 va = start + ptoa(diff);
3718 if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end &&
3719 m->psind == 1 && pmap_ps_enabled(pmap) &&
3720 pmap_enter_2mpage(pmap, va, m, prot, &lock))
3721 m = &m[L2_SIZE / PAGE_SIZE - 1];
3723 mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
3725 m = TAILQ_NEXT(m, listq);
3733 * this code makes some *MAJOR* assumptions:
3734 * 1. Current pmap & pmap exists.
3737 * 4. No page table pages.
3738 * but is *MUCH* faster than pmap_enter...
3742 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3744 struct rwlock *lock;
3748 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
3755 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3756 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
3758 struct spglist free;
3760 pt_entry_t *l2, *l3, l3_val;
3764 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3765 (m->oflags & VPO_UNMANAGED) != 0,
3766 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3767 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3769 CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
3771 * In the case that a page table page is not
3772 * resident, we are creating it here.
3774 if (va < VM_MAXUSER_ADDRESS) {
3775 vm_pindex_t l2pindex;
3778 * Calculate pagetable page index
3780 l2pindex = pmap_l2_pindex(va);
3781 if (mpte && (mpte->pindex == l2pindex)) {
3787 pde = pmap_pde(pmap, va, &lvl);
3790 * If the page table page is mapped, we just increment
3791 * the hold count, and activate it. Otherwise, we
3792 * attempt to allocate a page table page. If this
3793 * attempt fails, we don't retry. Instead, we give up.
3796 l2 = pmap_l1_to_l2(pde, va);
3797 if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
3801 if (lvl == 2 && pmap_load(pde) != 0) {
3803 PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3807 * Pass NULL instead of the PV list lock
3808 * pointer, because we don't intend to sleep.
3810 mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
3815 l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3816 l3 = &l3[pmap_l3_index(va)];
3819 pde = pmap_pde(kernel_pmap, va, &lvl);
3820 KASSERT(pde != NULL,
3821 ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
3824 ("pmap_enter_quick_locked: Invalid level %d", lvl));
3825 l3 = pmap_l2_to_l3(pde, va);
3829 * Abort if a mapping already exists.
3831 if (pmap_load(l3) != 0) {
3840 * Enter on the PV list if part of our managed memory.
3842 if ((m->oflags & VPO_UNMANAGED) == 0 &&
3843 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3846 if (pmap_unwire_l3(pmap, va, mpte, &free)) {
3847 pmap_invalidate_page(pmap, va);
3848 vm_page_free_pages_toq(&free, true);
3856 * Increment counters
3858 pmap_resident_count_inc(pmap, 1);
3860 pa = VM_PAGE_TO_PHYS(m);
3861 l3_val = pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
3862 ATTR_AP(ATTR_AP_RO) | L3_PAGE;
3863 if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
3865 if (va < VM_MAXUSER_ADDRESS)
3866 l3_val |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
3869 if (pmap != kernel_pmap)
3873 * Now validate mapping with RO protection
3875 if ((m->oflags & VPO_UNMANAGED) == 0) {
3876 l3_val |= ATTR_SW_MANAGED;
3880 /* Sync icache before the mapping is stored to PTE */
3881 if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
3882 m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
3883 cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
3885 pmap_store(l3, l3_val);
3892 * This code maps large physical mmap regions into the
3893 * processor address space. Note that some shortcuts
3894 * are taken, but the code works.
3897 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3898 vm_pindex_t pindex, vm_size_t size)
3901 VM_OBJECT_ASSERT_WLOCKED(object);
3902 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3903 ("pmap_object_init_pt: non-device object"));
3907 * Clear the wired attribute from the mappings for the specified range of
3908 * addresses in the given pmap. Every valid mapping within that range
3909 * must have the wired attribute set. In contrast, invalid mappings
3910 * cannot have the wired attribute set, so they are ignored.
3912 * The wired attribute of the page table entry is not a hardware feature,
3913 * so there is no need to invalidate any TLB entries.
3916 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3918 vm_offset_t va_next;
3919 pd_entry_t *l0, *l1, *l2;
3923 for (; sva < eva; sva = va_next) {
3924 l0 = pmap_l0(pmap, sva);
3925 if (pmap_load(l0) == 0) {
3926 va_next = (sva + L0_SIZE) & ~L0_OFFSET;
3932 l1 = pmap_l0_to_l1(l0, sva);
3933 if (pmap_load(l1) == 0) {
3934 va_next = (sva + L1_SIZE) & ~L1_OFFSET;
3940 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3944 l2 = pmap_l1_to_l2(l1, sva);
3945 if (pmap_load(l2) == 0)
3948 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
3949 if ((pmap_load(l2) & ATTR_SW_WIRED) == 0)
3950 panic("pmap_unwire: l2 %#jx is missing "
3951 "ATTR_SW_WIRED", (uintmax_t)pmap_load(l2));
3954 * Are we unwiring the entire large page? If not,
3955 * demote the mapping and fall through.
3957 if (sva + L2_SIZE == va_next && eva >= va_next) {
3958 pmap_clear_bits(l2, ATTR_SW_WIRED);
3959 pmap->pm_stats.wired_count -= L2_SIZE /
3962 } else if (pmap_demote_l2(pmap, l2, sva) == NULL)
3963 panic("pmap_unwire: demotion failed");
3965 KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
3966 ("pmap_unwire: Invalid l2 entry after demotion"));
3970 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
3972 if (pmap_load(l3) == 0)
3974 if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
3975 panic("pmap_unwire: l3 %#jx is missing "
3976 "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3));
3979 * ATTR_SW_WIRED must be cleared atomically. Although
3980 * the pmap lock synchronizes access to ATTR_SW_WIRED,
3981 * the System MMU may write to the entry concurrently.
3983 pmap_clear_bits(l3, ATTR_SW_WIRED);
3984 pmap->pm_stats.wired_count--;
3991 * Copy the range specified by src_addr/len
3992 * from the source map to the range dst_addr/len
3993 * in the destination map.
3995 * This routine is only advisory and need not do anything.
3997 * Because the executable mappings created by this routine are copied,
3998 * it should not have to flush the instruction cache.
4001 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
4002 vm_offset_t src_addr)
4004 struct rwlock *lock;
4005 struct spglist free;
4006 pd_entry_t *l0, *l1, *l2, srcptepaddr;
4007 pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte;
4008 vm_offset_t addr, end_addr, va_next;
4009 vm_page_t dst_l2pg, dstmpte, srcmpte;
4011 if (dst_addr != src_addr)
4013 end_addr = src_addr + len;
4015 if (dst_pmap < src_pmap) {
4016 PMAP_LOCK(dst_pmap);
4017 PMAP_LOCK(src_pmap);
4019 PMAP_LOCK(src_pmap);
4020 PMAP_LOCK(dst_pmap);
4022 for (addr = src_addr; addr < end_addr; addr = va_next) {
4023 l0 = pmap_l0(src_pmap, addr);
4024 if (pmap_load(l0) == 0) {
4025 va_next = (addr + L0_SIZE) & ~L0_OFFSET;
4030 l1 = pmap_l0_to_l1(l0, addr);
4031 if (pmap_load(l1) == 0) {
4032 va_next = (addr + L1_SIZE) & ~L1_OFFSET;
4037 va_next = (addr + L2_SIZE) & ~L2_OFFSET;
4040 l2 = pmap_l1_to_l2(l1, addr);
4041 srcptepaddr = pmap_load(l2);
4042 if (srcptepaddr == 0)
4044 if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) {
4045 if ((addr & L2_OFFSET) != 0 ||
4046 addr + L2_SIZE > end_addr)
4048 dst_l2pg = pmap_alloc_l2(dst_pmap, addr, NULL);
4049 if (dst_l2pg == NULL)
4052 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_l2pg));
4053 l2 = &l2[pmap_l2_index(addr)];
4054 if (pmap_load(l2) == 0 &&
4055 ((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
4056 pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
4057 PMAP_ENTER_NORECLAIM, &lock))) {
4058 mask = ATTR_AF | ATTR_SW_WIRED;
4060 if ((srcptepaddr & ATTR_SW_DBM) != 0)
4061 nbits |= ATTR_AP_RW_BIT;
4062 pmap_store(l2, (srcptepaddr & ~mask) | nbits);
4063 pmap_resident_count_inc(dst_pmap, L2_SIZE /
4065 atomic_add_long(&pmap_l2_mappings, 1);
4067 dst_l2pg->ref_count--;
4070 KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
4071 ("pmap_copy: invalid L2 entry"));
4072 srcptepaddr &= ~ATTR_MASK;
4073 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
4074 KASSERT(srcmpte->ref_count > 0,
4075 ("pmap_copy: source page table page is unused"));
4076 if (va_next > end_addr)
4078 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
4079 src_pte = &src_pte[pmap_l3_index(addr)];
4081 for (; addr < va_next; addr += PAGE_SIZE, src_pte++) {
4082 ptetemp = pmap_load(src_pte);
4085 * We only virtual copy managed pages.
4087 if ((ptetemp & ATTR_SW_MANAGED) == 0)
4090 if (dstmpte != NULL) {
4091 KASSERT(dstmpte->pindex == pmap_l2_pindex(addr),
4092 ("dstmpte pindex/addr mismatch"));
4093 dstmpte->ref_count++;
4094 } else if ((dstmpte = pmap_alloc_l3(dst_pmap, addr,
4097 dst_pte = (pt_entry_t *)
4098 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
4099 dst_pte = &dst_pte[pmap_l3_index(addr)];
4100 if (pmap_load(dst_pte) == 0 &&
4101 pmap_try_insert_pv_entry(dst_pmap, addr,
4102 PHYS_TO_VM_PAGE(ptetemp & ~ATTR_MASK), &lock)) {
4104 * Clear the wired, modified, and accessed
4105 * (referenced) bits during the copy.
4107 mask = ATTR_AF | ATTR_SW_WIRED;
4109 if ((ptetemp & ATTR_SW_DBM) != 0)
4110 nbits |= ATTR_AP_RW_BIT;
4111 pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
4112 pmap_resident_count_inc(dst_pmap, 1);
4115 if (pmap_unwire_l3(dst_pmap, addr, dstmpte,
4118 * Although "addr" is not mapped,
4119 * the TLB could nonetheless have
4120 * intermediate entries that refer
4121 * to the freed page table pages.
4122 * Invalidate those entries.
4124 * XXX redundant invalidation
4126 pmap_invalidate_page(dst_pmap, addr);
4127 vm_page_free_pages_toq(&free, true);
4131 /* Have we copied all of the valid mappings? */
4132 if (dstmpte->ref_count >= srcmpte->ref_count)
4138 * XXX This barrier may not be needed because the destination pmap is
4145 PMAP_UNLOCK(src_pmap);
4146 PMAP_UNLOCK(dst_pmap);
4150 * pmap_zero_page zeros the specified hardware page by mapping
4151 * the page into KVM and using bzero to clear its contents.
4154 pmap_zero_page(vm_page_t m)
4156 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4158 pagezero((void *)va);
4162 * pmap_zero_page_area zeros the specified hardware page by mapping
4163 * the page into KVM and using bzero to clear its contents.
4165 * off and size may not cover an area beyond a single hardware page.
4168 pmap_zero_page_area(vm_page_t m, int off, int size)
4170 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
4172 if (off == 0 && size == PAGE_SIZE)
4173 pagezero((void *)va);
4175 bzero((char *)va + off, size);
4179 * pmap_copy_page copies the specified (machine independent)
4180 * page by mapping the page into virtual memory and using
4181 * bcopy to copy the page, one machine dependent page at a
4185 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
4187 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
4188 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
4190 pagecopy((void *)src, (void *)dst);
4193 int unmapped_buf_allowed = 1;
4196 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
4197 vm_offset_t b_offset, int xfersize)
4201 vm_paddr_t p_a, p_b;
4202 vm_offset_t a_pg_offset, b_pg_offset;
4205 while (xfersize > 0) {
4206 a_pg_offset = a_offset & PAGE_MASK;
4207 m_a = ma[a_offset >> PAGE_SHIFT];
4208 p_a = m_a->phys_addr;
4209 b_pg_offset = b_offset & PAGE_MASK;
4210 m_b = mb[b_offset >> PAGE_SHIFT];
4211 p_b = m_b->phys_addr;
4212 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
4213 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
4214 if (__predict_false(!PHYS_IN_DMAP(p_a))) {
4215 panic("!DMAP a %lx", p_a);
4217 a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
4219 if (__predict_false(!PHYS_IN_DMAP(p_b))) {
4220 panic("!DMAP b %lx", p_b);
4222 b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
4224 bcopy(a_cp, b_cp, cnt);
4232 pmap_quick_enter_page(vm_page_t m)
4235 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
4239 pmap_quick_remove_page(vm_offset_t addr)
4244 * Returns true if the pmap's pv is one of the first
4245 * 16 pvs linked to from this page. This count may
4246 * be changed upwards or downwards in the future; it
4247 * is only necessary that true be returned for a small
4248 * subset of pmaps for proper page aging.
4251 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4253 struct md_page *pvh;
4254 struct rwlock *lock;
4259 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4260 ("pmap_page_exists_quick: page %p is not managed", m));
4262 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4264 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4265 if (PV_PMAP(pv) == pmap) {
4273 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4274 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4275 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4276 if (PV_PMAP(pv) == pmap) {
4290 * pmap_page_wired_mappings:
4292 * Return the number of managed mappings to the given physical page
4296 pmap_page_wired_mappings(vm_page_t m)
4298 struct rwlock *lock;
4299 struct md_page *pvh;
4303 int count, lvl, md_gen, pvh_gen;
4305 if ((m->oflags & VPO_UNMANAGED) != 0)
4307 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4311 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4313 if (!PMAP_TRYLOCK(pmap)) {
4314 md_gen = m->md.pv_gen;
4318 if (md_gen != m->md.pv_gen) {
4323 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4324 if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4328 if ((m->flags & PG_FICTITIOUS) == 0) {
4329 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4330 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4332 if (!PMAP_TRYLOCK(pmap)) {
4333 md_gen = m->md.pv_gen;
4334 pvh_gen = pvh->pv_gen;
4338 if (md_gen != m->md.pv_gen ||
4339 pvh_gen != pvh->pv_gen) {
4344 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4346 (pmap_load(pte) & ATTR_SW_WIRED) != 0)
4356 * Returns true if the given page is mapped individually or as part of
4357 * a 2mpage. Otherwise, returns false.
4360 pmap_page_is_mapped(vm_page_t m)
4362 struct rwlock *lock;
4365 if ((m->oflags & VPO_UNMANAGED) != 0)
4367 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4369 rv = !TAILQ_EMPTY(&m->md.pv_list) ||
4370 ((m->flags & PG_FICTITIOUS) == 0 &&
4371 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
4377 * Destroy all managed, non-wired mappings in the given user-space
4378 * pmap. This pmap cannot be active on any processor besides the
4381 * This function cannot be applied to the kernel pmap. Moreover, it
4382 * is not intended for general use. It is only to be used during
4383 * process termination. Consequently, it can be implemented in ways
4384 * that make it faster than pmap_remove(). First, it can more quickly
4385 * destroy mappings by iterating over the pmap's collection of PV
4386 * entries, rather than searching the page table. Second, it doesn't
4387 * have to test and clear the page table entries atomically, because
4388 * no processor is currently accessing the user address space. In
4389 * particular, a page table entry's dirty bit won't change state once
4390 * this function starts.
4393 pmap_remove_pages(pmap_t pmap)
4396 pt_entry_t *pte, tpte;
4397 struct spglist free;
4398 vm_page_t m, ml3, mt;
4400 struct md_page *pvh;
4401 struct pv_chunk *pc, *npc;
4402 struct rwlock *lock;
4404 uint64_t inuse, bitmask;
4405 int allfree, field, freed, idx, lvl;
4408 KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
4414 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4417 for (field = 0; field < _NPCM; field++) {
4418 inuse = ~pc->pc_map[field] & pc_freemask[field];
4419 while (inuse != 0) {
4420 bit = ffsl(inuse) - 1;
4421 bitmask = 1UL << bit;
4422 idx = field * 64 + bit;
4423 pv = &pc->pc_pventry[idx];
4426 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4427 KASSERT(pde != NULL,
4428 ("Attempting to remove an unmapped page"));
4432 pte = pmap_l1_to_l2(pde, pv->pv_va);
4433 tpte = pmap_load(pte);
4434 KASSERT((tpte & ATTR_DESCR_MASK) ==
4436 ("Attempting to remove an invalid "
4437 "block: %lx", tpte));
4440 pte = pmap_l2_to_l3(pde, pv->pv_va);
4441 tpte = pmap_load(pte);
4442 KASSERT((tpte & ATTR_DESCR_MASK) ==
4444 ("Attempting to remove an invalid "
4445 "page: %lx", tpte));
4449 "Invalid page directory level: %d",
4454 * We cannot remove wired pages from a process' mapping at this time
4456 if (tpte & ATTR_SW_WIRED) {
4461 pa = tpte & ~ATTR_MASK;
4463 m = PHYS_TO_VM_PAGE(pa);
4464 KASSERT(m->phys_addr == pa,
4465 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
4466 m, (uintmax_t)m->phys_addr,
4469 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
4470 m < &vm_page_array[vm_page_array_size],
4471 ("pmap_remove_pages: bad pte %#jx",
4475 * Because this pmap is not active on other
4476 * processors, the dirty bit cannot have
4477 * changed state since we last loaded pte.
4482 * Update the vm_page_t clean/reference bits.
4484 if (pmap_pte_dirty(tpte)) {
4487 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4496 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
4499 pc->pc_map[field] |= bitmask;
4502 pmap_resident_count_dec(pmap,
4503 L2_SIZE / PAGE_SIZE);
4504 pvh = pa_to_pvh(tpte & ~ATTR_MASK);
4505 TAILQ_REMOVE(&pvh->pv_list, pv,pv_next);
4507 if (TAILQ_EMPTY(&pvh->pv_list)) {
4508 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
4509 if ((mt->aflags & PGA_WRITEABLE) != 0 &&
4510 TAILQ_EMPTY(&mt->md.pv_list))
4511 vm_page_aflag_clear(mt, PGA_WRITEABLE);
4513 ml3 = pmap_remove_pt_page(pmap,
4516 KASSERT(ml3->valid == VM_PAGE_BITS_ALL,
4517 ("pmap_remove_pages: l3 page not promoted"));
4518 pmap_resident_count_dec(pmap,1);
4519 KASSERT(ml3->ref_count == NL3PG,
4520 ("pmap_remove_pages: l3 page ref count error"));
4522 pmap_add_delayed_free_list(ml3,
4527 pmap_resident_count_dec(pmap, 1);
4528 TAILQ_REMOVE(&m->md.pv_list, pv,
4531 if ((m->aflags & PGA_WRITEABLE) != 0 &&
4532 TAILQ_EMPTY(&m->md.pv_list) &&
4533 (m->flags & PG_FICTITIOUS) == 0) {
4535 VM_PAGE_TO_PHYS(m));
4536 if (TAILQ_EMPTY(&pvh->pv_list))
4537 vm_page_aflag_clear(m,
4542 pmap_unuse_pt(pmap, pv->pv_va, pmap_load(pde),
4547 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
4548 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
4549 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
4551 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4557 pmap_invalidate_all(pmap);
4559 vm_page_free_pages_toq(&free, true);
4563 * This is used to check if a page has been accessed or modified.
4566 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
4568 struct rwlock *lock;
4570 struct md_page *pvh;
4571 pt_entry_t *pte, mask, value;
4573 int lvl, md_gen, pvh_gen;
4577 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4580 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4582 if (!PMAP_TRYLOCK(pmap)) {
4583 md_gen = m->md.pv_gen;
4587 if (md_gen != m->md.pv_gen) {
4592 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4594 ("pmap_page_test_mappings: Invalid level %d", lvl));
4598 mask |= ATTR_AP_RW_BIT;
4599 value |= ATTR_AP(ATTR_AP_RW);
4602 mask |= ATTR_AF | ATTR_DESCR_MASK;
4603 value |= ATTR_AF | L3_PAGE;
4605 rv = (pmap_load(pte) & mask) == value;
4610 if ((m->flags & PG_FICTITIOUS) == 0) {
4611 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4612 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
4614 if (!PMAP_TRYLOCK(pmap)) {
4615 md_gen = m->md.pv_gen;
4616 pvh_gen = pvh->pv_gen;
4620 if (md_gen != m->md.pv_gen ||
4621 pvh_gen != pvh->pv_gen) {
4626 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4628 ("pmap_page_test_mappings: Invalid level %d", lvl));
4632 mask |= ATTR_AP_RW_BIT;
4633 value |= ATTR_AP(ATTR_AP_RW);
4636 mask |= ATTR_AF | ATTR_DESCR_MASK;
4637 value |= ATTR_AF | L2_BLOCK;
4639 rv = (pmap_load(pte) & mask) == value;
4653 * Return whether or not the specified physical page was modified
4654 * in any physical maps.
4657 pmap_is_modified(vm_page_t m)
4660 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4661 ("pmap_is_modified: page %p is not managed", m));
4664 * If the page is not busied then this check is racy.
4666 if (!pmap_page_is_write_mapped(m))
4668 return (pmap_page_test_mappings(m, FALSE, TRUE));
4672 * pmap_is_prefaultable:
4674 * Return whether or not the specified virtual address is eligible
4678 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
4686 pte = pmap_pte(pmap, addr, &lvl);
4687 if (pte != NULL && pmap_load(pte) != 0) {
4695 * pmap_is_referenced:
4697 * Return whether or not the specified physical page was referenced
4698 * in any physical maps.
4701 pmap_is_referenced(vm_page_t m)
4704 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4705 ("pmap_is_referenced: page %p is not managed", m));
4706 return (pmap_page_test_mappings(m, TRUE, FALSE));
4710 * Clear the write and modified bits in each of the given page's mappings.
4713 pmap_remove_write(vm_page_t m)
4715 struct md_page *pvh;
4717 struct rwlock *lock;
4718 pv_entry_t next_pv, pv;
4719 pt_entry_t oldpte, *pte;
4721 int lvl, md_gen, pvh_gen;
4723 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4724 ("pmap_remove_write: page %p is not managed", m));
4725 vm_page_assert_busied(m);
4727 if (!pmap_page_is_write_mapped(m))
4729 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4730 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
4731 pa_to_pvh(VM_PAGE_TO_PHYS(m));
4734 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
4736 if (!PMAP_TRYLOCK(pmap)) {
4737 pvh_gen = pvh->pv_gen;
4741 if (pvh_gen != pvh->pv_gen) {
4748 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4749 if ((pmap_load(pte) & ATTR_SW_DBM) != 0)
4750 (void)pmap_demote_l2_locked(pmap, pte, va, &lock);
4751 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
4752 ("inconsistent pv lock %p %p for page %p",
4753 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
4756 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
4758 if (!PMAP_TRYLOCK(pmap)) {
4759 pvh_gen = pvh->pv_gen;
4760 md_gen = m->md.pv_gen;
4764 if (pvh_gen != pvh->pv_gen ||
4765 md_gen != m->md.pv_gen) {
4771 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4772 oldpte = pmap_load(pte);
4774 if ((oldpte & ATTR_SW_DBM) != 0) {
4775 if (!atomic_fcmpset_long(pte, &oldpte,
4776 (oldpte | ATTR_AP_RW_BIT) & ~ATTR_SW_DBM))
4778 if ((oldpte & ATTR_AP_RW_BIT) ==
4779 ATTR_AP(ATTR_AP_RW))
4781 pmap_invalidate_page(pmap, pv->pv_va);
4786 vm_page_aflag_clear(m, PGA_WRITEABLE);
4790 * pmap_ts_referenced:
4792 * Return a count of reference bits for a page, clearing those bits.
4793 * It is not necessary for every reference bit to be cleared, but it
4794 * is necessary that 0 only be returned when there are truly no
4795 * reference bits set.
4797 * As an optimization, update the page's dirty field if a modified bit is
4798 * found while counting reference bits. This opportunistic update can be
4799 * performed at low cost and can eliminate the need for some future calls
4800 * to pmap_is_modified(). However, since this function stops after
4801 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
4802 * dirty pages. Those dirty pages will only be detected by a future call
4803 * to pmap_is_modified().
4806 pmap_ts_referenced(vm_page_t m)
4808 struct md_page *pvh;
4811 struct rwlock *lock;
4812 pd_entry_t *pde, tpde;
4813 pt_entry_t *pte, tpte;
4816 int cleared, lvl, md_gen, not_cleared, pvh_gen;
4817 struct spglist free;
4819 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4820 ("pmap_ts_referenced: page %p is not managed", m));
4823 pa = VM_PAGE_TO_PHYS(m);
4824 lock = PHYS_TO_PV_LIST_LOCK(pa);
4825 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
4829 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
4830 goto small_mappings;
4836 if (!PMAP_TRYLOCK(pmap)) {
4837 pvh_gen = pvh->pv_gen;
4841 if (pvh_gen != pvh->pv_gen) {
4847 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4848 KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found"));
4850 ("pmap_ts_referenced: invalid pde level %d", lvl));
4851 tpde = pmap_load(pde);
4852 KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE,
4853 ("pmap_ts_referenced: found an invalid l1 table"));
4854 pte = pmap_l1_to_l2(pde, pv->pv_va);
4855 tpte = pmap_load(pte);
4856 if (pmap_pte_dirty(tpte)) {
4858 * Although "tpte" is mapping a 2MB page, because
4859 * this function is called at a 4KB page granularity,
4860 * we only update the 4KB page under test.
4865 if ((tpte & ATTR_AF) != 0) {
4867 * Since this reference bit is shared by 512 4KB pages,
4868 * it should not be cleared every time it is tested.
4869 * Apply a simple "hash" function on the physical page
4870 * number, the virtual superpage number, and the pmap
4871 * address to select one 4KB page out of the 512 on
4872 * which testing the reference bit will result in
4873 * clearing that reference bit. This function is
4874 * designed to avoid the selection of the same 4KB page
4875 * for every 2MB page mapping.
4877 * On demotion, a mapping that hasn't been referenced
4878 * is simply destroyed. To avoid the possibility of a
4879 * subsequent page fault on a demoted wired mapping,
4880 * always leave its reference bit set. Moreover,
4881 * since the superpage is wired, the current state of
4882 * its reference bit won't affect page replacement.
4884 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
4885 (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
4886 (tpte & ATTR_SW_WIRED) == 0) {
4887 pmap_clear_bits(pte, ATTR_AF);
4888 pmap_invalidate_page(pmap, pv->pv_va);
4894 /* Rotate the PV list if it has more than one entry. */
4895 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4896 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4897 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
4900 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
4902 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
4904 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
4911 if (!PMAP_TRYLOCK(pmap)) {
4912 pvh_gen = pvh->pv_gen;
4913 md_gen = m->md.pv_gen;
4917 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4922 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4923 KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found"));
4925 ("pmap_ts_referenced: invalid pde level %d", lvl));
4926 tpde = pmap_load(pde);
4927 KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE,
4928 ("pmap_ts_referenced: found an invalid l2 table"));
4929 pte = pmap_l2_to_l3(pde, pv->pv_va);
4930 tpte = pmap_load(pte);
4931 if (pmap_pte_dirty(tpte))
4933 if ((tpte & ATTR_AF) != 0) {
4934 if ((tpte & ATTR_SW_WIRED) == 0) {
4935 pmap_clear_bits(pte, ATTR_AF);
4936 pmap_invalidate_page(pmap, pv->pv_va);
4942 /* Rotate the PV list if it has more than one entry. */
4943 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4944 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4945 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4948 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
4949 not_cleared < PMAP_TS_REFERENCED_MAX);
4952 vm_page_free_pages_toq(&free, true);
4953 return (cleared + not_cleared);
4957 * Apply the given advice to the specified range of addresses within the
4958 * given pmap. Depending on the advice, clear the referenced and/or
4959 * modified flags in each mapping and set the mapped page's dirty field.
4962 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
4964 struct rwlock *lock;
4965 vm_offset_t va, va_next;
4967 pd_entry_t *l0, *l1, *l2, oldl2;
4968 pt_entry_t *l3, oldl3;
4970 if (advice != MADV_DONTNEED && advice != MADV_FREE)
4974 for (; sva < eva; sva = va_next) {
4975 l0 = pmap_l0(pmap, sva);
4976 if (pmap_load(l0) == 0) {
4977 va_next = (sva + L0_SIZE) & ~L0_OFFSET;
4982 l1 = pmap_l0_to_l1(l0, sva);
4983 if (pmap_load(l1) == 0) {
4984 va_next = (sva + L1_SIZE) & ~L1_OFFSET;
4989 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
4992 l2 = pmap_l1_to_l2(l1, sva);
4993 oldl2 = pmap_load(l2);
4996 if ((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK) {
4997 if ((oldl2 & ATTR_SW_MANAGED) == 0)
5000 if (!pmap_demote_l2_locked(pmap, l2, sva, &lock)) {
5005 * The 2MB page mapping was destroyed.
5011 * Unless the page mappings are wired, remove the
5012 * mapping to a single page so that a subsequent
5013 * access may repromote. Choosing the last page
5014 * within the address range [sva, min(va_next, eva))
5015 * generally results in more repromotions. Since the
5016 * underlying page table page is fully populated, this
5017 * removal never frees a page table page.
5019 if ((oldl2 & ATTR_SW_WIRED) == 0) {
5025 ("pmap_advise: no address gap"));
5026 l3 = pmap_l2_to_l3(l2, va);
5027 KASSERT(pmap_load(l3) != 0,
5028 ("pmap_advise: invalid PTE"));
5029 pmap_remove_l3(pmap, l3, va, pmap_load(l2),
5035 KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
5036 ("pmap_advise: invalid L2 entry after demotion"));
5040 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
5042 oldl3 = pmap_load(l3);
5043 if ((oldl3 & (ATTR_SW_MANAGED | ATTR_DESCR_MASK)) !=
5044 (ATTR_SW_MANAGED | L3_PAGE))
5046 else if (pmap_pte_dirty(oldl3)) {
5047 if (advice == MADV_DONTNEED) {
5049 * Future calls to pmap_is_modified()
5050 * can be avoided by making the page
5053 m = PHYS_TO_VM_PAGE(oldl3 & ~ATTR_MASK);
5056 while (!atomic_fcmpset_long(l3, &oldl3,
5057 (oldl3 & ~ATTR_AF) | ATTR_AP(ATTR_AP_RO)))
5059 } else if ((oldl3 & ATTR_AF) != 0)
5060 pmap_clear_bits(l3, ATTR_AF);
5067 if (va != va_next) {
5068 pmap_invalidate_range(pmap, va, sva);
5073 pmap_invalidate_range(pmap, va, sva);
5079 * Clear the modify bits on the specified physical page.
5082 pmap_clear_modify(vm_page_t m)
5084 struct md_page *pvh;
5085 struct rwlock *lock;
5087 pv_entry_t next_pv, pv;
5088 pd_entry_t *l2, oldl2;
5089 pt_entry_t *l3, oldl3;
5091 int md_gen, pvh_gen;
5093 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5094 ("pmap_clear_modify: page %p is not managed", m));
5095 vm_page_assert_busied(m);
5097 if (!pmap_page_is_write_mapped(m))
5099 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5100 pa_to_pvh(VM_PAGE_TO_PHYS(m));
5101 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5104 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
5106 if (!PMAP_TRYLOCK(pmap)) {
5107 pvh_gen = pvh->pv_gen;
5111 if (pvh_gen != pvh->pv_gen) {
5117 l2 = pmap_l2(pmap, va);
5118 oldl2 = pmap_load(l2);
5119 /* If oldl2 has ATTR_SW_DBM set, then it is also dirty. */
5120 if ((oldl2 & ATTR_SW_DBM) != 0 &&
5121 pmap_demote_l2_locked(pmap, l2, va, &lock) &&
5122 (oldl2 & ATTR_SW_WIRED) == 0) {
5124 * Write protect the mapping to a single page so that
5125 * a subsequent write access may repromote.
5127 va += VM_PAGE_TO_PHYS(m) - (oldl2 & ~ATTR_MASK);
5128 l3 = pmap_l2_to_l3(l2, va);
5129 oldl3 = pmap_load(l3);
5130 while (!atomic_fcmpset_long(l3, &oldl3,
5131 (oldl3 & ~ATTR_SW_DBM) | ATTR_AP(ATTR_AP_RO)))
5134 pmap_invalidate_page(pmap, va);
5138 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
5140 if (!PMAP_TRYLOCK(pmap)) {
5141 md_gen = m->md.pv_gen;
5142 pvh_gen = pvh->pv_gen;
5146 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5151 l2 = pmap_l2(pmap, pv->pv_va);
5152 l3 = pmap_l2_to_l3(l2, pv->pv_va);
5153 oldl3 = pmap_load(l3);
5154 if (pmap_l3_valid(oldl3) &&
5155 (oldl3 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM) {
5156 pmap_set_bits(l3, ATTR_AP(ATTR_AP_RO));
5157 pmap_invalidate_page(pmap, pv->pv_va);
5165 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
5167 struct pmap_preinit_mapping *ppim;
5168 vm_offset_t va, offset;
5171 int i, lvl, l2_blocks, free_l2_count, start_idx;
5173 if (!vm_initialized) {
5175 * No L3 ptables so map entire L2 blocks where start VA is:
5176 * preinit_map_va + start_idx * L2_SIZE
5177 * There may be duplicate mappings (multiple VA -> same PA) but
5178 * ARM64 dcache is always PIPT so that's acceptable.
5183 /* Calculate how many L2 blocks are needed for the mapping */
5184 l2_blocks = (roundup2(pa + size, L2_SIZE) -
5185 rounddown2(pa, L2_SIZE)) >> L2_SHIFT;
5187 offset = pa & L2_OFFSET;
5189 if (preinit_map_va == 0)
5192 /* Map 2MiB L2 blocks from reserved VA space */
5196 /* Find enough free contiguous VA space */
5197 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5198 ppim = pmap_preinit_mapping + i;
5199 if (free_l2_count > 0 && ppim->pa != 0) {
5200 /* Not enough space here */
5206 if (ppim->pa == 0) {
5208 if (start_idx == -1)
5211 if (free_l2_count == l2_blocks)
5215 if (free_l2_count != l2_blocks)
5216 panic("%s: too many preinit mappings", __func__);
5218 va = preinit_map_va + (start_idx * L2_SIZE);
5219 for (i = start_idx; i < start_idx + l2_blocks; i++) {
5220 /* Mark entries as allocated */
5221 ppim = pmap_preinit_mapping + i;
5223 ppim->va = va + offset;
5228 pa = rounddown2(pa, L2_SIZE);
5229 for (i = 0; i < l2_blocks; i++) {
5230 pde = pmap_pde(kernel_pmap, va, &lvl);
5231 KASSERT(pde != NULL,
5232 ("pmap_mapbios: Invalid page entry, va: 0x%lx",
5235 ("pmap_mapbios: Invalid level %d", lvl));
5237 /* Insert L2_BLOCK */
5238 l2 = pmap_l1_to_l2(pde, va);
5240 pa | ATTR_DEFAULT | ATTR_XN |
5241 ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
5246 pmap_invalidate_all(kernel_pmap);
5248 va = preinit_map_va + (start_idx * L2_SIZE);
5251 /* kva_alloc may be used to map the pages */
5252 offset = pa & PAGE_MASK;
5253 size = round_page(offset + size);
5255 va = kva_alloc(size);
5257 panic("%s: Couldn't allocate KVA", __func__);
5259 pde = pmap_pde(kernel_pmap, va, &lvl);
5260 KASSERT(lvl == 2, ("pmap_mapbios: Invalid level %d", lvl));
5262 /* L3 table is linked */
5263 va = trunc_page(va);
5264 pa = trunc_page(pa);
5265 pmap_kenter(va, size, pa, CACHED_MEMORY);
5268 return ((void *)(va + offset));
5272 pmap_unmapbios(vm_offset_t va, vm_size_t size)
5274 struct pmap_preinit_mapping *ppim;
5275 vm_offset_t offset, tmpsize, va_trunc;
5278 int i, lvl, l2_blocks, block;
5282 (roundup2(va + size, L2_SIZE) - rounddown2(va, L2_SIZE)) >> L2_SHIFT;
5283 KASSERT(l2_blocks > 0, ("pmap_unmapbios: invalid size %lx", size));
5285 /* Remove preinit mapping */
5286 preinit_map = false;
5288 for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
5289 ppim = pmap_preinit_mapping + i;
5290 if (ppim->va == va) {
5291 KASSERT(ppim->size == size,
5292 ("pmap_unmapbios: size mismatch"));
5297 offset = block * L2_SIZE;
5298 va_trunc = rounddown2(va, L2_SIZE) + offset;
5300 /* Remove L2_BLOCK */
5301 pde = pmap_pde(kernel_pmap, va_trunc, &lvl);
5302 KASSERT(pde != NULL,
5303 ("pmap_unmapbios: Invalid page entry, va: 0x%lx",
5305 l2 = pmap_l1_to_l2(pde, va_trunc);
5308 if (block == (l2_blocks - 1))
5314 pmap_invalidate_all(kernel_pmap);
5318 /* Unmap the pages reserved with kva_alloc. */
5319 if (vm_initialized) {
5320 offset = va & PAGE_MASK;
5321 size = round_page(offset + size);
5322 va = trunc_page(va);
5324 pde = pmap_pde(kernel_pmap, va, &lvl);
5325 KASSERT(pde != NULL,
5326 ("pmap_unmapbios: Invalid page entry, va: 0x%lx", va));
5327 KASSERT(lvl == 2, ("pmap_unmapbios: Invalid level %d", lvl));
5329 /* Unmap and invalidate the pages */
5330 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
5331 pmap_kremove(va + tmpsize);
5338 * Sets the memory attribute for the specified page.
5341 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5344 m->md.pv_memattr = ma;
5347 * If "m" is a normal page, update its direct mapping. This update
5348 * can be relied upon to perform any cache operations that are
5349 * required for data coherence.
5351 if ((m->flags & PG_FICTITIOUS) == 0 &&
5352 pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
5353 m->md.pv_memattr) != 0)
5354 panic("memory attribute change on the direct map failed");
5358 * Changes the specified virtual address range's memory type to that given by
5359 * the parameter "mode". The specified virtual address range must be
5360 * completely contained within either the direct map or the kernel map. If
5361 * the virtual address range is contained within the kernel map, then the
5362 * memory type for each of the corresponding ranges of the direct map is also
5363 * changed. (The corresponding ranges of the direct map are those ranges that
5364 * map the same physical pages as the specified virtual address range.) These
5365 * changes to the direct map are necessary because Intel describes the
5366 * behavior of their processors as "undefined" if two or more mappings to the
5367 * same physical page have different memory types.
5369 * Returns zero if the change completed successfully, and either EINVAL or
5370 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
5371 * of the virtual address range was not mapped, and ENOMEM is returned if
5372 * there was insufficient memory available to complete the change. In the
5373 * latter case, the memory type may have been changed on some part of the
5374 * virtual address range or the direct map.
5377 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
5381 PMAP_LOCK(kernel_pmap);
5382 error = pmap_change_attr_locked(va, size, mode);
5383 PMAP_UNLOCK(kernel_pmap);
5388 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
5390 vm_offset_t base, offset, tmpva;
5391 pt_entry_t l3, *pte, *newpte;
5394 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
5395 base = trunc_page(va);
5396 offset = va & PAGE_MASK;
5397 size = round_page(offset + size);
5399 if (!VIRT_IN_DMAP(base) &&
5400 !(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
5403 for (tmpva = base; tmpva < base + size; ) {
5404 pte = pmap_pte(kernel_pmap, tmpva, &lvl);
5408 if ((pmap_load(pte) & ATTR_IDX_MASK) == ATTR_IDX(mode)) {
5410 * We already have the correct attribute,
5411 * ignore this entry.
5415 panic("Invalid DMAP table level: %d\n", lvl);
5417 tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
5420 tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
5428 * Split the entry to an level 3 table, then
5429 * set the new attribute.
5433 panic("Invalid DMAP table level: %d\n", lvl);
5435 newpte = pmap_demote_l1(kernel_pmap, pte,
5436 tmpva & ~L1_OFFSET);
5439 pte = pmap_l1_to_l2(pte, tmpva);
5441 newpte = pmap_demote_l2(kernel_pmap, pte,
5445 pte = pmap_l2_to_l3(pte, tmpva);
5447 /* Update the entry */
5448 l3 = pmap_load(pte);
5449 l3 &= ~ATTR_IDX_MASK;
5450 l3 |= ATTR_IDX(mode);
5451 if (mode == DEVICE_MEMORY)
5454 pmap_update_entry(kernel_pmap, pte, l3, tmpva,
5458 * If moving to a non-cacheable entry flush
5461 if (mode == VM_MEMATTR_UNCACHEABLE)
5462 cpu_dcache_wbinv_range(tmpva, L3_SIZE);
5474 * Create an L2 table to map all addresses within an L1 mapping.
5477 pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
5479 pt_entry_t *l2, newl2, oldl1;
5481 vm_paddr_t l2phys, phys;
5485 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5486 oldl1 = pmap_load(l1);
5487 KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
5488 ("pmap_demote_l1: Demoting a non-block entry"));
5489 KASSERT((va & L1_OFFSET) == 0,
5490 ("pmap_demote_l1: Invalid virtual address %#lx", va));
5491 KASSERT((oldl1 & ATTR_SW_MANAGED) == 0,
5492 ("pmap_demote_l1: Level 1 table shouldn't be managed"));
5495 if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
5496 tmpl1 = kva_alloc(PAGE_SIZE);
5501 if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
5502 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
5503 CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
5504 " in pmap %p", va, pmap);
5508 l2phys = VM_PAGE_TO_PHYS(ml2);
5509 l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
5511 /* Address the range points at */
5512 phys = oldl1 & ~ATTR_MASK;
5513 /* The attributed from the old l1 table to be copied */
5514 newl2 = oldl1 & ATTR_MASK;
5516 /* Create the new entries */
5517 for (i = 0; i < Ln_ENTRIES; i++) {
5518 l2[i] = newl2 | phys;
5521 KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK),
5522 ("Invalid l2 page (%lx != %lx)", l2[0],
5523 (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
5526 pmap_kenter(tmpl1, PAGE_SIZE,
5527 DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, CACHED_MEMORY);
5528 l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
5531 pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
5534 pmap_kremove(tmpl1);
5535 kva_free(tmpl1, PAGE_SIZE);
5542 pmap_fill_l3(pt_entry_t *firstl3, pt_entry_t newl3)
5546 for (l3 = firstl3; l3 - firstl3 < Ln_ENTRIES; l3++) {
5553 pmap_demote_l2_abort(pmap_t pmap, vm_offset_t va, pt_entry_t *l2,
5554 struct rwlock **lockp)
5556 struct spglist free;
5559 (void)pmap_remove_l2(pmap, l2, va, pmap_load(pmap_l1(pmap, va)), &free,
5561 vm_page_free_pages_toq(&free, true);
5565 * Create an L3 table to map all addresses within an L2 mapping.
5568 pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
5569 struct rwlock **lockp)
5571 pt_entry_t *l3, newl3, oldl2;
5576 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5578 oldl2 = pmap_load(l2);
5579 KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
5580 ("pmap_demote_l2: Demoting a non-block entry"));
5584 if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
5585 tmpl2 = kva_alloc(PAGE_SIZE);
5591 * Invalidate the 2MB page mapping and return "failure" if the
5592 * mapping was never accessed.
5594 if ((oldl2 & ATTR_AF) == 0) {
5595 KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
5596 ("pmap_demote_l2: a wired mapping is missing ATTR_AF"));
5597 pmap_demote_l2_abort(pmap, va, l2, lockp);
5598 CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx in pmap %p",
5603 if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
5604 KASSERT((oldl2 & ATTR_SW_WIRED) == 0,
5605 ("pmap_demote_l2: page table page for a wired mapping"
5609 * If the page table page is missing and the mapping
5610 * is for a kernel address, the mapping must belong to
5611 * the direct map. Page table pages are preallocated
5612 * for every other part of the kernel address space,
5613 * so the direct map region is the only part of the
5614 * kernel address space that must be handled here.
5616 KASSERT(va < VM_MAXUSER_ADDRESS || VIRT_IN_DMAP(va),
5617 ("pmap_demote_l2: No saved mpte for va %#lx", va));
5620 * If the 2MB page mapping belongs to the direct map
5621 * region of the kernel's address space, then the page
5622 * allocation request specifies the highest possible
5623 * priority (VM_ALLOC_INTERRUPT). Otherwise, the
5624 * priority is normal.
5626 ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
5627 (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
5628 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
5631 * If the allocation of the new page table page fails,
5632 * invalidate the 2MB page mapping and return "failure".
5635 pmap_demote_l2_abort(pmap, va, l2, lockp);
5636 CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
5637 " in pmap %p", va, pmap);
5641 if (va < VM_MAXUSER_ADDRESS) {
5642 ml3->ref_count = NL3PG;
5643 pmap_resident_count_inc(pmap, 1);
5646 l3phys = VM_PAGE_TO_PHYS(ml3);
5647 l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
5648 newl3 = (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE;
5649 KASSERT((oldl2 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) !=
5650 (ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM),
5651 ("pmap_demote_l2: L2 entry is writeable but not dirty"));
5654 * If the page table page is not leftover from an earlier promotion,
5655 * or the mapping attributes have changed, (re)initialize the L3 table.
5657 * When pmap_update_entry() clears the old L2 mapping, it (indirectly)
5658 * performs a dsb(). That dsb() ensures that the stores for filling
5659 * "l3" are visible before "l3" is added to the page table.
5661 if (ml3->valid == 0 || (l3[0] & ATTR_MASK) != (newl3 & ATTR_MASK))
5662 pmap_fill_l3(l3, newl3);
5665 * Map the temporary page so we don't lose access to the l2 table.
5668 pmap_kenter(tmpl2, PAGE_SIZE,
5669 DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, CACHED_MEMORY);
5670 l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
5674 * The spare PV entries must be reserved prior to demoting the
5675 * mapping, that is, prior to changing the PDE. Otherwise, the state
5676 * of the L2 and the PV lists will be inconsistent, which can result
5677 * in reclaim_pv_chunk() attempting to remove a PV entry from the
5678 * wrong PV list and pmap_pv_demote_l2() failing to find the expected
5679 * PV entry for the 2MB page mapping that is being demoted.
5681 if ((oldl2 & ATTR_SW_MANAGED) != 0)
5682 reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
5685 * Pass PAGE_SIZE so that a single TLB invalidation is performed on
5686 * the 2MB page mapping.
5688 pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
5691 * Demote the PV entry.
5693 if ((oldl2 & ATTR_SW_MANAGED) != 0)
5694 pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp);
5696 atomic_add_long(&pmap_l2_demotions, 1);
5697 CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
5698 " in pmap %p %lx", va, pmap, l3[0]);
5702 pmap_kremove(tmpl2);
5703 kva_free(tmpl2, PAGE_SIZE);
5711 pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
5713 struct rwlock *lock;
5717 l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
5724 * Perform the pmap work for mincore(2). If the page is not both referenced and
5725 * modified by this pmap, returns its physical address so that the caller can
5726 * find other mappings.
5729 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
5731 pt_entry_t *pte, tpte;
5732 vm_paddr_t mask, pa;
5737 pte = pmap_pte(pmap, addr, &lvl);
5739 tpte = pmap_load(pte);
5752 panic("pmap_mincore: invalid level %d", lvl);
5755 managed = (tpte & ATTR_SW_MANAGED) != 0;
5756 val = MINCORE_INCORE;
5758 val |= MINCORE_SUPER;
5759 if ((managed && pmap_pte_dirty(tpte)) || (!managed &&
5760 (tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)))
5761 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5762 if ((tpte & ATTR_AF) == ATTR_AF)
5763 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5765 pa = (tpte & ~ATTR_MASK) | (addr & mask);
5771 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5772 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
5780 * Garbage collect every ASID that is neither active on a processor nor
5784 pmap_reset_asid_set(void)
5787 int asid, cpuid, epoch;
5789 mtx_assert(&asid_set_mutex, MA_OWNED);
5792 * Ensure that the store to asid_epoch is globally visible before the
5793 * loads from pc_curpmap are performed.
5795 epoch = asid_epoch + 1;
5796 if (epoch == INT_MAX)
5800 __asm __volatile("tlbi vmalle1is");
5802 bit_nclear(asid_set, ASID_FIRST_AVAILABLE, asid_set_size - 1);
5803 CPU_FOREACH(cpuid) {
5804 if (cpuid == curcpu)
5806 pmap = pcpu_find(cpuid)->pc_curpmap;
5807 asid = COOKIE_TO_ASID(pmap->pm_cookie);
5810 bit_set(asid_set, asid);
5811 pmap->pm_cookie = COOKIE_FROM(asid, epoch);
5816 * Allocate a new ASID for the specified pmap.
5819 pmap_alloc_asid(pmap_t pmap)
5823 mtx_lock_spin(&asid_set_mutex);
5826 * While this processor was waiting to acquire the asid set mutex,
5827 * pmap_reset_asid_set() running on another processor might have
5828 * updated this pmap's cookie to the current epoch. In which case, we
5829 * don't need to allocate a new ASID.
5831 if (COOKIE_TO_EPOCH(pmap->pm_cookie) == asid_epoch)
5834 bit_ffc_at(asid_set, asid_next, asid_set_size, &new_asid);
5835 if (new_asid == -1) {
5836 bit_ffc_at(asid_set, ASID_FIRST_AVAILABLE, asid_next,
5838 if (new_asid == -1) {
5839 pmap_reset_asid_set();
5840 bit_ffc_at(asid_set, ASID_FIRST_AVAILABLE,
5841 asid_set_size, &new_asid);
5842 KASSERT(new_asid != -1, ("ASID allocation failure"));
5845 bit_set(asid_set, new_asid);
5846 asid_next = new_asid + 1;
5847 pmap->pm_cookie = COOKIE_FROM(new_asid, asid_epoch);
5849 mtx_unlock_spin(&asid_set_mutex);
5853 * Compute the value that should be stored in ttbr0 to activate the specified
5854 * pmap. This value may change from time to time.
5857 pmap_to_ttbr0(pmap_t pmap)
5860 return (ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) |
5865 pmap_activate_int(pmap_t pmap)
5869 KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap"));
5870 KASSERT(pmap != kernel_pmap, ("kernel pmap activation"));
5871 if (pmap == PCPU_GET(curpmap))
5875 * Ensure that the store to curpmap is globally visible before the
5876 * load from asid_epoch is performed.
5878 PCPU_SET(curpmap, pmap);
5880 epoch = COOKIE_TO_EPOCH(pmap->pm_cookie);
5881 if (epoch >= 0 && epoch != asid_epoch)
5882 pmap_alloc_asid(pmap);
5884 set_ttbr0(pmap_to_ttbr0(pmap));
5885 if (PCPU_GET(bcast_tlbi_workaround) != 0)
5886 invalidate_local_icache();
5891 pmap_activate(struct thread *td)
5895 pmap = vmspace_pmap(td->td_proc->p_vmspace);
5897 (void)pmap_activate_int(pmap);
5902 * To eliminate the unused parameter "old", we would have to add an instruction
5906 pmap_switch(struct thread *old __unused, struct thread *new)
5908 pcpu_bp_harden bp_harden;
5911 /* Store the new curthread */
5912 PCPU_SET(curthread, new);
5914 /* And the new pcb */
5916 PCPU_SET(curpcb, pcb);
5919 * TODO: We may need to flush the cache here if switching
5920 * to a user process.
5923 if (pmap_activate_int(vmspace_pmap(new->td_proc->p_vmspace))) {
5925 * Stop userspace from training the branch predictor against
5926 * other processes. This will call into a CPU specific
5927 * function that clears the branch predictor state.
5929 bp_harden = PCPU_GET(bp_harden);
5930 if (bp_harden != NULL)
5938 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
5941 if (va >= VM_MIN_KERNEL_ADDRESS) {
5942 cpu_icache_sync_range(va, sz);
5947 /* Find the length of data in this page to flush */
5948 offset = va & PAGE_MASK;
5949 len = imin(PAGE_SIZE - offset, sz);
5952 /* Extract the physical address & find it in the DMAP */
5953 pa = pmap_extract(pmap, va);
5955 cpu_icache_sync_range(PHYS_TO_DMAP(pa), len);
5957 /* Move to the next page */
5960 /* Set the length for the next iteration */
5961 len = imin(PAGE_SIZE, sz);
5967 pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
5969 pt_entry_t pte, *ptep;
5976 ec = ESR_ELx_EXCEPTION(esr);
5978 case EXCP_INSN_ABORT_L:
5979 case EXCP_INSN_ABORT:
5980 case EXCP_DATA_ABORT_L:
5981 case EXCP_DATA_ABORT:
5987 /* Data and insn aborts use same encoding for FSC field. */
5988 switch (esr & ISS_DATA_DFSC_MASK) {
5989 case ISS_DATA_DFSC_AFF_L1:
5990 case ISS_DATA_DFSC_AFF_L2:
5991 case ISS_DATA_DFSC_AFF_L3:
5993 ptep = pmap_pte(pmap, far, &lvl);
5995 pmap_set_bits(ptep, ATTR_AF);
5998 * XXXMJ as an optimization we could mark the entry
5999 * dirty if this is a write fault.
6004 case ISS_DATA_DFSC_PF_L1:
6005 case ISS_DATA_DFSC_PF_L2:
6006 case ISS_DATA_DFSC_PF_L3:
6007 if ((ec != EXCP_DATA_ABORT_L && ec != EXCP_DATA_ABORT) ||
6008 (esr & ISS_DATA_WnR) == 0)
6011 ptep = pmap_pte(pmap, far, &lvl);
6013 ((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) {
6014 if ((pte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RO)) {
6015 pmap_clear_bits(ptep, ATTR_AP_RW_BIT);
6016 pmap_invalidate_page(pmap, far);
6022 case ISS_DATA_DFSC_TF_L0:
6023 case ISS_DATA_DFSC_TF_L1:
6024 case ISS_DATA_DFSC_TF_L2:
6025 case ISS_DATA_DFSC_TF_L3:
6027 * Retry the translation. A break-before-make sequence can
6028 * produce a transient fault.
6030 if (pmap == kernel_pmap) {
6032 * The translation fault may have occurred within a
6033 * critical section. Therefore, we must check the
6034 * address without acquiring the kernel pmap's lock.
6036 if (pmap_kextract(far) != 0)
6040 /* Ask the MMU to check the address. */
6041 intr = intr_disable();
6042 par = arm64_address_translate_s1e0r(far);
6047 * If the translation was successful, then we can
6048 * return success to the trap handler.
6050 if (PAR_SUCCESS(par))
6060 * Increase the starting virtual address of the given mapping if a
6061 * different alignment might result in more superpage mappings.
6064 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
6065 vm_offset_t *addr, vm_size_t size)
6067 vm_offset_t superpage_offset;
6071 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
6072 offset += ptoa(object->pg_color);
6073 superpage_offset = offset & L2_OFFSET;
6074 if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
6075 (*addr & L2_OFFSET) == superpage_offset)
6077 if ((*addr & L2_OFFSET) < superpage_offset)
6078 *addr = (*addr & ~L2_OFFSET) + superpage_offset;
6080 *addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
6084 * Get the kernel virtual address of a set of physical pages. If there are
6085 * physical addresses not covered by the DMAP perform a transient mapping
6086 * that will be removed when calling pmap_unmap_io_transient.
6088 * \param page The pages the caller wishes to obtain the virtual
6089 * address on the kernel memory map.
6090 * \param vaddr On return contains the kernel virtual memory address
6091 * of the pages passed in the page parameter.
6092 * \param count Number of pages passed in.
6093 * \param can_fault TRUE if the thread using the mapped pages can take
6094 * page faults, FALSE otherwise.
6096 * \returns TRUE if the caller must call pmap_unmap_io_transient when
6097 * finished or FALSE otherwise.
6101 pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
6102 boolean_t can_fault)
6105 boolean_t needs_mapping;
6109 * Allocate any KVA space that we need, this is done in a separate
6110 * loop to prevent calling vmem_alloc while pinned.
6112 needs_mapping = FALSE;
6113 for (i = 0; i < count; i++) {
6114 paddr = VM_PAGE_TO_PHYS(page[i]);
6115 if (__predict_false(!PHYS_IN_DMAP(paddr))) {
6116 error = vmem_alloc(kernel_arena, PAGE_SIZE,
6117 M_BESTFIT | M_WAITOK, &vaddr[i]);
6118 KASSERT(error == 0, ("vmem_alloc failed: %d", error));
6119 needs_mapping = TRUE;
6121 vaddr[i] = PHYS_TO_DMAP(paddr);
6125 /* Exit early if everything is covered by the DMAP */
6131 for (i = 0; i < count; i++) {
6132 paddr = VM_PAGE_TO_PHYS(page[i]);
6133 if (!PHYS_IN_DMAP(paddr)) {
6135 "pmap_map_io_transient: TODO: Map out of DMAP data");
6139 return (needs_mapping);
6143 pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
6144 boolean_t can_fault)
6151 for (i = 0; i < count; i++) {
6152 paddr = VM_PAGE_TO_PHYS(page[i]);
6153 if (!PHYS_IN_DMAP(paddr)) {
6154 panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
6160 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
6163 return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);