2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018 Matthew Macy
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "opt_platform.h"
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
37 #include <sys/bitstring.h>
38 #include <sys/queue.h>
39 #include <sys/cpuset.h>
40 #include <sys/endian.h>
41 #include <sys/kerneldump.h>
44 #include <sys/syslog.h>
45 #include <sys/msgbuf.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
50 #include <sys/rwlock.h>
51 #include <sys/sched.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
55 #include <sys/vmmeter.h>
60 #include <dev/ofw/openfirm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_phys.h>
72 #include <vm/vm_reserv.h>
73 #include <vm/vm_dumpset.h>
76 #include <machine/_inttypes.h>
77 #include <machine/cpu.h>
78 #include <machine/platform.h>
79 #include <machine/frame.h>
80 #include <machine/md_var.h>
81 #include <machine/psl.h>
82 #include <machine/bat.h>
83 #include <machine/hid.h>
84 #include <machine/pte.h>
85 #include <machine/sr.h>
86 #include <machine/trap.h>
87 #include <machine/mmuvar.h>
89 /* For pseries bit. */
90 #include <powerpc/pseries/phyp-hvcall.h>
93 #include <vm/uma_dbg.h>
96 #define PPC_BITLSHIFT(bit) (sizeof(long)*NBBY - 1 - (bit))
97 #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
98 #define PPC_BITLSHIFT_VAL(val, bit) ((val) << PPC_BITLSHIFT(bit))
103 static void pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va);
106 #define PG_W RPTE_WIRED
107 #define PG_V RPTE_VALID
108 #define PG_MANAGED RPTE_MANAGED
109 #define PG_PROMOTED RPTE_PROMOTED
112 #define PG_X RPTE_EAA_X
113 #define PG_RW RPTE_EAA_W
114 #define PG_PTE_CACHE RPTE_ATTR_MASK
117 #define NLS_MASK ((1UL<<5)-1)
118 #define RPTE_ENTRIES (1UL<<RPTE_SHIFT)
119 #define RPTE_MASK (RPTE_ENTRIES-1)
122 #define NLB_MASK (((1UL<<52)-1) << 8)
125 extern caddr_t crashdumpmap;
127 #define RIC_FLUSH_TLB 0
128 #define RIC_FLUSH_PWC 1
129 #define RIC_FLUSH_ALL 2
131 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */
133 #define PPC_INST_TLBIE 0x7c000264
134 #define PPC_INST_TLBIEL 0x7c000224
135 #define PPC_INST_SLBIA 0x7c0003e4
137 #define ___PPC_RA(a) (((a) & 0x1f) << 16)
138 #define ___PPC_RB(b) (((b) & 0x1f) << 11)
139 #define ___PPC_RS(s) (((s) & 0x1f) << 21)
140 #define ___PPC_RT(t) ___PPC_RS(t)
141 #define ___PPC_R(r) (((r) & 0x1) << 16)
142 #define ___PPC_PRS(prs) (((prs) & 0x1) << 17)
143 #define ___PPC_RIC(ric) (((ric) & 0x3) << 18)
145 #define PPC_SLBIA(IH) __XSTRING(.long PPC_INST_SLBIA | \
147 #define PPC_TLBIE_5(rb,rs,ric,prs,r) \
148 __XSTRING(.long PPC_INST_TLBIE | \
149 ___PPC_RB(rb) | ___PPC_RS(rs) | \
150 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
153 #define PPC_TLBIEL(rb,rs,ric,prs,r) \
154 __XSTRING(.long PPC_INST_TLBIEL | \
155 ___PPC_RB(rb) | ___PPC_RS(rs) | \
156 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
159 #define PPC_INVALIDATE_ERAT PPC_SLBIA(7)
164 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
167 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
168 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
169 #define TLBIEL_INVAL_SET_PID 0x400 /* invalidate a set for the current PID */
170 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
171 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
173 #define TLBIE_ACTUAL_PAGE_MASK 0xe0
174 #define TLBIE_ACTUAL_PAGE_4K 0x00
175 #define TLBIE_ACTUAL_PAGE_64K 0xa0
176 #define TLBIE_ACTUAL_PAGE_2M 0x20
177 #define TLBIE_ACTUAL_PAGE_1G 0x40
179 #define TLBIE_PRS_PARTITION_SCOPE 0x0
180 #define TLBIE_PRS_PROCESS_SCOPE 0x1
182 #define TLBIE_RIC_INVALIDATE_TLB 0x0 /* Invalidate just TLB */
183 #define TLBIE_RIC_INVALIDATE_PWC 0x1 /* Invalidate just PWC */
184 #define TLBIE_RIC_INVALIDATE_ALL 0x2 /* Invalidate TLB, PWC,
185 * cached {proc, part}tab entries
187 #define TLBIE_RIC_INVALIDATE_SEQ 0x3 /* HPT - only:
188 * Invalidate a range of translations
191 static __always_inline void
192 radix_tlbie(uint8_t ric, uint8_t prs, uint16_t is, uint32_t pid, uint32_t lpid,
193 vm_offset_t va, uint16_t ap)
197 MPASS((va & PAGE_MASK) == 0);
199 rs = ((uint64_t)pid << 32) | lpid;
201 __asm __volatile(PPC_TLBIE_5(%0, %1, %2, %3, 1) : :
202 "r" (rb), "r" (rs), "i" (ric), "i" (prs) : "memory");
206 radix_tlbie_fixup(uint32_t pid, vm_offset_t va, int ap)
209 __asm __volatile("ptesync" ::: "memory");
210 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
211 TLBIEL_INVAL_PAGE, 0, 0, va, ap);
212 __asm __volatile("ptesync" ::: "memory");
213 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
214 TLBIEL_INVAL_PAGE, pid, 0, va, ap);
218 radix_tlbie_invlpg_user_4k(uint32_t pid, vm_offset_t va)
221 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
222 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_4K);
223 radix_tlbie_fixup(pid, va, TLBIE_ACTUAL_PAGE_4K);
227 radix_tlbie_invlpg_user_2m(uint32_t pid, vm_offset_t va)
230 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
231 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_2M);
232 radix_tlbie_fixup(pid, va, TLBIE_ACTUAL_PAGE_2M);
236 radix_tlbie_invlpwc_user(uint32_t pid)
239 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
240 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
244 radix_tlbie_flush_user(uint32_t pid)
247 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
248 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
252 radix_tlbie_invlpg_kernel_4k(vm_offset_t va)
255 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
256 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_4K);
257 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_4K);
261 radix_tlbie_invlpg_kernel_2m(vm_offset_t va)
264 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
265 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_2M);
266 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_2M);
269 /* 1GB pages aren't currently supported. */
270 static __inline __unused void
271 radix_tlbie_invlpg_kernel_1g(vm_offset_t va)
274 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
275 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_1G);
276 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_1G);
280 radix_tlbie_invlpwc_kernel(void)
283 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
284 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
288 radix_tlbie_flush_kernel(void)
291 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
292 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
295 static __inline vm_pindex_t
296 pmap_l3e_pindex(vm_offset_t va)
298 return ((va & PG_FRAME) >> L3_PAGE_SIZE_SHIFT);
301 static __inline vm_pindex_t
302 pmap_pml3e_index(vm_offset_t va)
305 return ((va >> L3_PAGE_SIZE_SHIFT) & RPTE_MASK);
308 static __inline vm_pindex_t
309 pmap_pml2e_index(vm_offset_t va)
311 return ((va >> L2_PAGE_SIZE_SHIFT) & RPTE_MASK);
314 static __inline vm_pindex_t
315 pmap_pml1e_index(vm_offset_t va)
317 return ((va & PG_FRAME) >> L1_PAGE_SIZE_SHIFT);
320 /* Return various clipped indexes for a given VA */
321 static __inline vm_pindex_t
322 pmap_pte_index(vm_offset_t va)
325 return ((va >> PAGE_SHIFT) & RPTE_MASK);
328 /* Return a pointer to the PT slot that corresponds to a VA */
329 static __inline pt_entry_t *
330 pmap_l3e_to_pte(pt_entry_t *l3e, vm_offset_t va)
335 ptepa = (be64toh(*l3e) & NLB_MASK);
336 pte = (pt_entry_t *)PHYS_TO_DMAP(ptepa);
337 return (&pte[pmap_pte_index(va)]);
340 /* Return a pointer to the PD slot that corresponds to a VA */
341 static __inline pt_entry_t *
342 pmap_l2e_to_l3e(pt_entry_t *l2e, vm_offset_t va)
347 l3pa = (be64toh(*l2e) & NLB_MASK);
348 l3e = (pml3_entry_t *)PHYS_TO_DMAP(l3pa);
349 return (&l3e[pmap_pml3e_index(va)]);
352 /* Return a pointer to the PD slot that corresponds to a VA */
353 static __inline pt_entry_t *
354 pmap_l1e_to_l2e(pt_entry_t *l1e, vm_offset_t va)
359 l2pa = (be64toh(*l1e) & NLB_MASK);
361 l2e = (pml2_entry_t *)PHYS_TO_DMAP(l2pa);
362 return (&l2e[pmap_pml2e_index(va)]);
365 static __inline pml1_entry_t *
366 pmap_pml1e(pmap_t pmap, vm_offset_t va)
369 return (&pmap->pm_pml1[pmap_pml1e_index(va)]);
373 pmap_pml2e(pmap_t pmap, vm_offset_t va)
377 l1e = pmap_pml1e(pmap, va);
378 if (l1e == NULL || (be64toh(*l1e) & RPTE_VALID) == 0)
380 return (pmap_l1e_to_l2e(l1e, va));
383 static __inline pt_entry_t *
384 pmap_pml3e(pmap_t pmap, vm_offset_t va)
388 l2e = pmap_pml2e(pmap, va);
389 if (l2e == NULL || (be64toh(*l2e) & RPTE_VALID) == 0)
391 return (pmap_l2e_to_l3e(l2e, va));
394 static __inline pt_entry_t *
395 pmap_pte(pmap_t pmap, vm_offset_t va)
399 l3e = pmap_pml3e(pmap, va);
400 if (l3e == NULL || (be64toh(*l3e) & RPTE_VALID) == 0)
402 return (pmap_l3e_to_pte(l3e, va));
406 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
407 "Number of kernel page table pages allocated on bootup");
409 vm_paddr_t dmaplimit;
411 SYSCTL_DECL(_vm_pmap);
414 #define VERBOSE_PMAP 0
415 #define VERBOSE_PROTECT 0
416 static int pmap_logging;
417 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_logging, CTLFLAG_RWTUN,
418 &pmap_logging, 0, "verbose debug logging");
421 static u_int64_t KPTphys; /* phys addr of kernel level 1 */
423 //static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */
425 static vm_offset_t qframe = 0;
426 static struct mtx qframe_mtx;
428 void mmu_radix_activate(struct thread *);
429 void mmu_radix_advise(pmap_t, vm_offset_t, vm_offset_t, int);
430 void mmu_radix_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
432 void mmu_radix_clear_modify(vm_page_t);
433 void mmu_radix_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
434 int mmu_radix_decode_kernel_ptr(vm_offset_t, int *, vm_offset_t *);
435 int mmu_radix_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t);
436 void mmu_radix_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
438 void mmu_radix_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
439 vm_paddr_t mmu_radix_extract(pmap_t pmap, vm_offset_t va);
440 vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
441 void mmu_radix_kenter(vm_offset_t, vm_paddr_t);
442 vm_paddr_t mmu_radix_kextract(vm_offset_t);
443 void mmu_radix_kremove(vm_offset_t);
444 boolean_t mmu_radix_is_modified(vm_page_t);
445 boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t);
446 boolean_t mmu_radix_is_referenced(vm_page_t);
447 void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t,
448 vm_pindex_t, vm_size_t);
449 boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t);
450 void mmu_radix_page_init(vm_page_t);
451 boolean_t mmu_radix_page_is_mapped(vm_page_t m);
452 void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t);
453 int mmu_radix_page_wired_mappings(vm_page_t);
454 int mmu_radix_pinit(pmap_t);
455 void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
456 bool mmu_radix_ps_enabled(pmap_t);
457 void mmu_radix_qenter(vm_offset_t, vm_page_t *, int);
458 void mmu_radix_qremove(vm_offset_t, int);
459 vm_offset_t mmu_radix_quick_enter_page(vm_page_t);
460 void mmu_radix_quick_remove_page(vm_offset_t);
461 boolean_t mmu_radix_ts_referenced(vm_page_t);
462 void mmu_radix_release(pmap_t);
463 void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t);
464 void mmu_radix_remove_all(vm_page_t);
465 void mmu_radix_remove_pages(pmap_t);
466 void mmu_radix_remove_write(vm_page_t);
467 void mmu_radix_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz);
468 void mmu_radix_unwire(pmap_t, vm_offset_t, vm_offset_t);
469 void mmu_radix_zero_page(vm_page_t);
470 void mmu_radix_zero_page_area(vm_page_t, int, int);
471 int mmu_radix_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
472 void mmu_radix_page_array_startup(long pages);
474 #include "mmu_oea64.h"
477 * Kernel MMU interface
480 static void mmu_radix_bootstrap(vm_offset_t, vm_offset_t);
482 static void mmu_radix_copy_page(vm_page_t, vm_page_t);
483 static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
484 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
485 static void mmu_radix_growkernel(vm_offset_t);
486 static void mmu_radix_init(void);
487 static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
488 static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
489 static void mmu_radix_pinit0(pmap_t);
491 static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t);
492 static void *mmu_radix_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
493 static void mmu_radix_unmapdev(vm_offset_t, vm_size_t);
494 static void mmu_radix_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma);
495 static boolean_t mmu_radix_dev_direct_mapped(vm_paddr_t, vm_size_t);
496 static void mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
497 static void mmu_radix_scan_init(void);
498 static void mmu_radix_cpu_bootstrap(int ap);
499 static void mmu_radix_tlbie_all(void);
501 static struct pmap_funcs mmu_radix_methods = {
502 .bootstrap = mmu_radix_bootstrap,
503 .copy_page = mmu_radix_copy_page,
504 .copy_pages = mmu_radix_copy_pages,
505 .cpu_bootstrap = mmu_radix_cpu_bootstrap,
506 .growkernel = mmu_radix_growkernel,
507 .init = mmu_radix_init,
508 .map = mmu_radix_map,
509 .mincore = mmu_radix_mincore,
510 .pinit = mmu_radix_pinit,
511 .pinit0 = mmu_radix_pinit0,
513 .mapdev = mmu_radix_mapdev,
514 .mapdev_attr = mmu_radix_mapdev_attr,
515 .unmapdev = mmu_radix_unmapdev,
516 .kenter_attr = mmu_radix_kenter_attr,
517 .dev_direct_mapped = mmu_radix_dev_direct_mapped,
518 .dumpsys_pa_init = mmu_radix_scan_init,
519 .dumpsys_map_chunk = mmu_radix_dumpsys_map,
520 .page_is_mapped = mmu_radix_page_is_mapped,
521 .ps_enabled = mmu_radix_ps_enabled,
522 .object_init_pt = mmu_radix_object_init_pt,
523 .protect = mmu_radix_protect,
524 /* pmap dispatcher interface */
525 .clear_modify = mmu_radix_clear_modify,
526 .copy = mmu_radix_copy,
527 .enter = mmu_radix_enter,
528 .enter_object = mmu_radix_enter_object,
529 .enter_quick = mmu_radix_enter_quick,
530 .extract = mmu_radix_extract,
531 .extract_and_hold = mmu_radix_extract_and_hold,
532 .is_modified = mmu_radix_is_modified,
533 .is_prefaultable = mmu_radix_is_prefaultable,
534 .is_referenced = mmu_radix_is_referenced,
535 .ts_referenced = mmu_radix_ts_referenced,
536 .page_exists_quick = mmu_radix_page_exists_quick,
537 .page_init = mmu_radix_page_init,
538 .page_wired_mappings = mmu_radix_page_wired_mappings,
539 .qenter = mmu_radix_qenter,
540 .qremove = mmu_radix_qremove,
541 .release = mmu_radix_release,
542 .remove = mmu_radix_remove,
543 .remove_all = mmu_radix_remove_all,
544 .remove_write = mmu_radix_remove_write,
545 .sync_icache = mmu_radix_sync_icache,
546 .unwire = mmu_radix_unwire,
547 .zero_page = mmu_radix_zero_page,
548 .zero_page_area = mmu_radix_zero_page_area,
549 .activate = mmu_radix_activate,
550 .quick_enter_page = mmu_radix_quick_enter_page,
551 .quick_remove_page = mmu_radix_quick_remove_page,
552 .page_set_memattr = mmu_radix_page_set_memattr,
553 .page_array_startup = mmu_radix_page_array_startup,
555 /* Internal interfaces */
556 .kenter = mmu_radix_kenter,
557 .kextract = mmu_radix_kextract,
558 .kremove = mmu_radix_kremove,
559 .change_attr = mmu_radix_change_attr,
560 .decode_kernel_ptr = mmu_radix_decode_kernel_ptr,
562 .tlbie_all = mmu_radix_tlbie_all,
565 MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods);
567 static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
568 struct rwlock **lockp);
569 static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va);
570 static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *);
571 static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
572 struct spglist *free, struct rwlock **lockp);
573 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
574 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
575 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
576 static bool pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *pde,
577 struct spglist *free);
578 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
579 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp);
581 static bool pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e,
582 u_int flags, struct rwlock **lockp);
583 #if VM_NRESERVLEVEL > 0
584 static void pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
585 struct rwlock **lockp);
587 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
588 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
589 static vm_page_t mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
590 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate);
592 static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
593 vm_prot_t prot, struct rwlock **lockp);
594 static int pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde,
595 u_int flags, vm_page_t m, struct rwlock **lockp);
597 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
598 static void free_pv_chunk(struct pv_chunk *pc);
599 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp);
600 static vm_page_t pmap_allocl3e(pmap_t pmap, vm_offset_t va,
601 struct rwlock **lockp);
602 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
603 struct rwlock **lockp);
604 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
605 struct spglist *free);
606 static boolean_t pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free);
608 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t start);
609 static void pmap_invalidate_all(pmap_t pmap);
610 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush);
613 * Internal flags for pmap_enter()'s helper functions.
615 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
616 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
618 #define UNIMPLEMENTED() panic("%s not implemented", __func__)
619 #define UNTESTED() panic("%s not yet tested", __func__)
621 /* Number of supported PID bits */
622 static unsigned int isa3_pid_bits;
624 /* PID to start allocating from */
625 static unsigned int isa3_base_pid;
627 #define PROCTAB_SIZE_SHIFT (isa3_pid_bits + 4)
628 #define PROCTAB_ENTRIES (1ul << isa3_pid_bits)
631 * Map of physical memory regions.
633 static struct mem_region *regions, *pregions;
634 static struct numa_mem_region *numa_pregions;
635 static u_int phys_avail_count;
636 static int regions_sz, pregions_sz, numa_pregions_sz;
637 static struct pate *isa3_parttab;
638 static struct prte *isa3_proctab;
639 static vmem_t *asid_arena;
641 extern void bs_remap_earlyboot(void);
643 #define RADIX_PGD_SIZE_SHIFT 16
644 #define RADIX_PGD_SIZE (1UL << RADIX_PGD_SIZE_SHIFT)
646 #define RADIX_PGD_INDEX_SHIFT (RADIX_PGD_SIZE_SHIFT-3)
647 #define NL2EPG (PAGE_SIZE/sizeof(pml2_entry_t))
648 #define NL3EPG (PAGE_SIZE/sizeof(pml3_entry_t))
650 #define NUPML1E (RADIX_PGD_SIZE/sizeof(uint64_t)) /* number of userland PML1 pages */
651 #define NUPDPE (NUPML1E * NL2EPG)/* number of userland PDP pages */
652 #define NUPDE (NUPDPE * NL3EPG) /* number of userland PD entries */
654 /* POWER9 only permits a 64k partition table size. */
655 #define PARTTAB_SIZE_SHIFT 16
656 #define PARTTAB_SIZE (1UL << PARTTAB_SIZE_SHIFT)
658 #define PARTTAB_HR (1UL << 63) /* host uses radix */
659 #define PARTTAB_GR (1UL << 63) /* guest uses radix must match host */
661 /* TLB flush actions. Used as argument to tlbiel_flush() */
663 TLB_INVAL_SCOPE_LPID = 2, /* invalidate TLBs for current LPID */
664 TLB_INVAL_SCOPE_GLOBAL = 3, /* invalidate all TLBs */
667 #define NPV_LIST_LOCKS MAXCPU
668 static int pmap_initialized;
669 static vm_paddr_t proctab0pa;
670 static vm_paddr_t parttab_phys;
671 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
674 * Data for the pv entry allocation mechanism.
675 * Updates to pv_invl_gen are protected by the pv_list_locks[]
676 * elements, but reads are not.
678 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
679 static struct mtx __exclusive_cache_line pv_chunks_mutex;
680 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
681 static struct md_page *pv_table;
682 static struct md_page pv_dummy;
685 #define PV_STAT(x) do { x ; } while (0)
687 #define PV_STAT(x) do { } while (0)
690 #define pa_radix_index(pa) ((pa) >> L3_PAGE_SIZE_SHIFT)
691 #define pa_to_pvh(pa) (&pv_table[pa_radix_index(pa)])
693 #define PHYS_TO_PV_LIST_LOCK(pa) \
694 (&pv_list_locks[pa_radix_index(pa) % NPV_LIST_LOCKS])
696 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
697 struct rwlock **_lockp = (lockp); \
698 struct rwlock *_new_lock; \
700 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
701 if (_new_lock != *_lockp) { \
702 if (*_lockp != NULL) \
703 rw_wunlock(*_lockp); \
704 *_lockp = _new_lock; \
709 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
710 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
712 #define RELEASE_PV_LIST_LOCK(lockp) do { \
713 struct rwlock **_lockp = (lockp); \
715 if (*_lockp != NULL) { \
716 rw_wunlock(*_lockp); \
721 #define VM_PAGE_TO_PV_LIST_LOCK(m) \
722 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
725 * We support 52 bits, hence:
726 * bits 52 - 31 = 21, 0b10101
727 * RTS encoding details
728 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
729 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
731 #define RTS_SIZE ((0x2UL << 61) | (0x5UL << 5))
733 static int powernv_enabled = 1;
735 static __always_inline void
736 tlbiel_radix_set_isa300(uint32_t set, uint32_t is,
737 uint32_t pid, uint32_t ric, uint32_t prs)
742 rb = PPC_BITLSHIFT_VAL(set, 51) | PPC_BITLSHIFT_VAL(is, 53);
743 rs = PPC_BITLSHIFT_VAL((uint64_t)pid, 31);
745 __asm __volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
746 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
751 tlbiel_flush_isa3(uint32_t num_sets, uint32_t is)
755 __asm __volatile("ptesync": : :"memory");
758 * Flush the first set of the TLB, and the entire Page Walk Cache
759 * and partition table entries. Then flush the remaining sets of the
762 if (is == TLB_INVAL_SCOPE_GLOBAL) {
763 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
764 for (set = 1; set < num_sets; set++)
765 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
768 /* Do the same for process scoped entries. */
769 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
770 for (set = 1; set < num_sets; set++)
771 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
773 __asm __volatile("ptesync": : :"memory");
777 mmu_radix_tlbiel_flush(int scope)
779 MPASS(scope == TLB_INVAL_SCOPE_LPID ||
780 scope == TLB_INVAL_SCOPE_GLOBAL);
782 tlbiel_flush_isa3(POWER9_TLB_SETS_RADIX, scope);
783 __asm __volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
787 mmu_radix_tlbie_all(void)
790 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
792 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
796 mmu_radix_init_amor(void)
799 * In HV mode, we init AMOR (Authority Mask Override Register) so that
800 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
801 * Register), enable key 0 and set it to 1.
803 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
805 mtspr(SPR_AMOR, (3ul << 62));
809 mmu_radix_init_iamr(void)
812 * Radix always uses key0 of the IAMR to determine if an access is
813 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
816 mtspr(SPR_IAMR, (1ul << 62));
820 mmu_radix_pid_set(pmap_t pmap)
823 mtspr(SPR_PID, pmap->pm_pid);
827 /* Quick sort callout for comparing physical addresses. */
829 pa_cmp(const void *a, const void *b)
831 const vm_paddr_t *pa = a, *pb = b;
841 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte)
842 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0)
843 #define pte_store(ptep, pte) do { \
844 MPASS((pte) & (RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_X)); \
845 *(u_long *)(ptep) = htobe64((u_long)((pte) | PG_V | RPTE_LEAF)); \
848 * NB: should only be used for adding directories - not for direct mappings
850 #define pde_store(ptep, pa) do { \
851 *(u_long *)(ptep) = htobe64((u_long)(pa|RPTE_VALID|RPTE_SHIFT)); \
854 #define pte_clear(ptep) do { \
855 *(u_long *)(ptep) = (u_long)(0); \
858 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */
861 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
862 * (PTE) page mappings have identical settings for the following fields:
864 #define PG_PTE_PROMOTE (PG_X | PG_MANAGED | PG_W | PG_PTE_CACHE | \
865 PG_M | PG_A | RPTE_EAA_MASK | PG_V)
868 pmap_resident_count_inc(pmap_t pmap, int count)
871 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
872 pmap->pm_stats.resident_count += count;
876 pmap_resident_count_dec(pmap_t pmap, int count)
879 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
880 KASSERT(pmap->pm_stats.resident_count >= count,
881 ("pmap %p resident count underflow %ld %d", pmap,
882 pmap->pm_stats.resident_count, count));
883 pmap->pm_stats.resident_count -= count;
887 pagezero(vm_offset_t va)
891 bzero((void *)va, PAGE_SIZE);
899 ret = moea64_bootstrap_alloc(n * PAGE_SIZE, PAGE_SIZE);
900 for (int i = 0; i < n; i++)
901 pagezero(PHYS_TO_DMAP(ret + i * PAGE_SIZE));
906 kvtopte(vm_offset_t va)
910 l3e = pmap_pml3e(kernel_pmap, va);
911 if (l3e == NULL || (be64toh(*l3e) & RPTE_VALID) == 0)
913 return (pmap_l3e_to_pte(l3e, va));
917 mmu_radix_kenter(vm_offset_t va, vm_paddr_t pa)
923 *pte = htobe64(pa | RPTE_VALID | RPTE_LEAF | RPTE_EAA_R | \
924 RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A);
928 mmu_radix_ps_enabled(pmap_t pmap)
930 return (superpages_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
934 pmap_nofault_pte(pmap_t pmap, vm_offset_t va, int *is_l3e)
940 l3e = pmap_pml3e(pmap, va);
941 if (l3e == NULL || (be64toh(*l3e) & PG_V) == 0)
944 if (be64toh(*l3e) & RPTE_LEAF) {
950 pte = pmap_l3e_to_pte(l3e, va);
951 if (pte == NULL || (be64toh(*pte) & PG_V) == 0)
957 pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags)
960 pt_entry_t startpte, origpte, newpte;
966 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL)
967 return (KERN_INVALID_ADDRESS);
968 origpte = newpte = be64toh(*pte);
971 if (((flags & VM_PROT_WRITE) && (startpte & PG_M)) ||
972 ((flags & VM_PROT_READ) && (startpte & PG_A))) {
973 pmap_invalidate_all(pmap);
975 if (VERBOSE_PMAP || pmap_logging)
976 printf("%s(%p, %#lx, %#x) (%#lx) -- invalidate all\n",
977 __func__, pmap, va, flags, origpte);
979 return (KERN_FAILURE);
983 if (VERBOSE_PMAP || pmap_logging)
984 printf("%s(%p, %#lx, %#x) (%#lx)\n", __func__, pmap, va,
988 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL ||
989 be64toh(*pte) != origpte) {
991 return (KERN_FAILURE);
993 m = PHYS_TO_VM_PAGE(newpte & PG_FRAME);
997 if ((newpte & (RPTE_EAA_R|RPTE_EAA_X)) == 0)
1000 vm_page_aflag_set(m, PGA_REFERENCED);
1003 if ((newpte & RPTE_EAA_W) == 0)
1010 case VM_PROT_EXECUTE:
1011 if ((newpte & RPTE_EAA_X) == 0)
1014 vm_page_aflag_set(m, PGA_REFERENCED);
1018 if (!atomic_cmpset_long(pte, htobe64(origpte), htobe64(newpte)))
1022 if (startpte == newpte)
1023 return (KERN_FAILURE);
1027 return (KERN_PROTECTION_FAILURE);
1031 * Returns TRUE if the given page is mapped individually or as part of
1032 * a 2mpage. Otherwise, returns FALSE.
1035 mmu_radix_page_is_mapped(vm_page_t m)
1037 struct rwlock *lock;
1040 if ((m->oflags & VPO_UNMANAGED) != 0)
1042 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
1044 rv = !TAILQ_EMPTY(&m->md.pv_list) ||
1045 ((m->flags & PG_FICTITIOUS) == 0 &&
1046 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
1052 * Determine the appropriate bits to set in a PTE or PDE for a specified
1056 pmap_cache_bits(vm_memattr_t ma)
1058 if (ma != VM_MEMATTR_DEFAULT) {
1060 case VM_MEMATTR_UNCACHEABLE:
1061 return (RPTE_ATTR_GUARDEDIO);
1062 case VM_MEMATTR_CACHEABLE:
1063 return (RPTE_ATTR_MEM);
1064 case VM_MEMATTR_WRITE_BACK:
1065 case VM_MEMATTR_PREFETCHABLE:
1066 case VM_MEMATTR_WRITE_COMBINING:
1067 return (RPTE_ATTR_UNGUARDEDIO);
1074 pmap_invalidate_page(pmap_t pmap, vm_offset_t start)
1077 if (pmap == kernel_pmap)
1078 radix_tlbie_invlpg_kernel_4k(start);
1080 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1085 pmap_invalidate_page_2m(pmap_t pmap, vm_offset_t start)
1088 if (pmap == kernel_pmap)
1089 radix_tlbie_invlpg_kernel_2m(start);
1091 radix_tlbie_invlpg_user_2m(pmap->pm_pid, start);
1096 pmap_invalidate_pwc(pmap_t pmap)
1099 if (pmap == kernel_pmap)
1100 radix_tlbie_invlpwc_kernel();
1102 radix_tlbie_invlpwc_user(pmap->pm_pid);
1107 pmap_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end)
1109 if (((start - end) >> PAGE_SHIFT) > 8) {
1110 pmap_invalidate_all(pmap);
1114 if (pmap == kernel_pmap) {
1115 while (start < end) {
1116 radix_tlbie_invlpg_kernel_4k(start);
1120 while (start < end) {
1121 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1129 pmap_invalidate_all(pmap_t pmap)
1132 if (pmap == kernel_pmap)
1133 radix_tlbie_flush_kernel();
1135 radix_tlbie_flush_user(pmap->pm_pid);
1140 pmap_invalidate_l3e_page(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e)
1144 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
1145 * by a promotion that did not invalidate the 512 4KB page mappings
1146 * that might exist in the TLB. Consequently, at this point, the TLB
1147 * may hold both 4KB and 2MB page mappings for the address range [va,
1148 * va + L3_PAGE_SIZE). Therefore, the entire range must be invalidated here.
1149 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
1150 * 4KB page mappings for the address range [va, va + L3_PAGE_SIZE), and so a
1151 * single INVLPG suffices to invalidate the 2MB page mapping from the
1155 if ((l3e & PG_PROMOTED) != 0)
1156 pmap_invalidate_range(pmap, va, va + L3_PAGE_SIZE - 1);
1158 pmap_invalidate_page_2m(pmap, va);
1160 pmap_invalidate_pwc(pmap);
1163 static __inline struct pv_chunk *
1164 pv_to_chunk(pv_entry_t pv)
1167 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1170 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1172 #define PC_FREE0 0xfffffffffffffffful
1173 #define PC_FREE1 0x3ffffffffffffffful
1175 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1 };
1178 * Ensure that the number of spare PV entries in the specified pmap meets or
1179 * exceeds the given count, "needed".
1181 * The given PV list lock may be released.
1184 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
1186 struct pch new_tail;
1187 struct pv_chunk *pc;
1192 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1193 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
1196 * Newly allocated PV chunks must be stored in a private list until
1197 * the required number of PV chunks have been allocated. Otherwise,
1198 * reclaim_pv_chunk() could recycle one of these chunks. In
1199 * contrast, these chunks must be added to the pmap upon allocation.
1201 TAILQ_INIT(&new_tail);
1204 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
1205 // if ((cpu_feature2 & CPUID2_POPCNT) == 0)
1206 bit_count((bitstr_t *)pc->pc_map, 0,
1207 sizeof(pc->pc_map) * NBBY, &free);
1209 free = popcnt_pc_map_pq(pc->pc_map);
1214 if (avail >= needed)
1217 for (reclaimed = false; avail < needed; avail += _NPCPV) {
1218 m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
1220 m = reclaim_pv_chunk(pmap, lockp);
1225 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1226 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1227 dump_add_page(m->phys_addr);
1228 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1230 pc->pc_map[0] = PC_FREE0;
1231 pc->pc_map[1] = PC_FREE1;
1232 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1233 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
1234 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
1237 * The reclaim might have freed a chunk from the current pmap.
1238 * If that chunk contained available entries, we need to
1239 * re-count the number of available entries.
1244 if (!TAILQ_EMPTY(&new_tail)) {
1245 mtx_lock(&pv_chunks_mutex);
1246 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
1247 mtx_unlock(&pv_chunks_mutex);
1252 * First find and then remove the pv entry for the specified pmap and virtual
1253 * address from the specified pv list. Returns the pv entry if found and NULL
1254 * otherwise. This operation can be performed on pv lists for either 4KB or
1255 * 2MB page mappings.
1257 static __inline pv_entry_t
1258 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1262 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
1264 if (PV_PMAP(pv) == NULL) {
1265 printf("corrupted pv_chunk/pv %p\n", pv);
1266 printf("pv_chunk: %64D\n", pv_to_chunk(pv), ":");
1268 MPASS(PV_PMAP(pv) != NULL);
1269 MPASS(pv->pv_va != 0);
1271 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1272 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
1281 * After demotion from a 2MB page mapping to 512 4KB page mappings,
1282 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
1283 * entries for each of the 4KB page mappings.
1286 pmap_pv_demote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1287 struct rwlock **lockp)
1289 struct md_page *pvh;
1290 struct pv_chunk *pc;
1292 vm_offset_t va_last;
1296 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1297 KASSERT((pa & L3_PAGE_MASK) == 0,
1298 ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
1299 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1302 * Transfer the 2mpage's pv entry for this mapping to the first
1303 * page's pv list. Once this transfer begins, the pv list lock
1304 * must not be released until the last pv entry is reinstantiated.
1306 pvh = pa_to_pvh(pa);
1307 va = trunc_2mpage(va);
1308 pv = pmap_pvh_remove(pvh, pmap, va);
1309 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
1310 m = PHYS_TO_VM_PAGE(pa);
1311 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1314 /* Instantiate the remaining NPTEPG - 1 pv entries. */
1315 PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
1316 va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1318 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1319 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0
1320 , ("pmap_pv_demote_pde: missing spare"));
1321 for (field = 0; field < _NPCM; field++) {
1322 while (pc->pc_map[field]) {
1323 bit = cnttzd(pc->pc_map[field]);
1324 pc->pc_map[field] &= ~(1ul << bit);
1325 pv = &pc->pc_pventry[field * 64 + bit];
1329 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1330 ("pmap_pv_demote_pde: page %p is not managed", m));
1331 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1338 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1339 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1342 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1343 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1344 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1346 PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
1347 PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
1351 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap)
1356 pmap_invalidate_all(pmap);
1357 if (pmap != locked_pmap)
1362 * We are in a serious low memory condition. Resort to
1363 * drastic measures to free some pages so we can allocate
1364 * another pv entry chunk.
1366 * Returns NULL if PV entries were reclaimed from the specified pmap.
1368 * We do not, however, unmap 2mpages because subsequent accesses will
1369 * allocate per-page pv entries until repromotion occurs, thereby
1370 * exacerbating the shortage of free pv entries.
1372 static int active_reclaims = 0;
1374 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1376 struct pv_chunk *pc, *pc_marker, *pc_marker_end;
1377 struct pv_chunk_header pc_marker_b, pc_marker_end_b;
1378 struct md_page *pvh;
1380 pmap_t next_pmap, pmap;
1381 pt_entry_t *pte, tpte;
1385 struct spglist free;
1387 int bit, field, freed;
1389 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1390 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
1394 bzero(&pc_marker_b, sizeof(pc_marker_b));
1395 bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
1396 pc_marker = (struct pv_chunk *)&pc_marker_b;
1397 pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
1399 mtx_lock(&pv_chunks_mutex);
1401 TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
1402 TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
1403 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
1404 SLIST_EMPTY(&free)) {
1405 next_pmap = pc->pc_pmap;
1406 if (next_pmap == NULL) {
1408 * The next chunk is a marker. However, it is
1409 * not our marker, so active_reclaims must be
1410 * > 1. Consequently, the next_chunk code
1411 * will not rotate the pv_chunks list.
1415 mtx_unlock(&pv_chunks_mutex);
1418 * A pv_chunk can only be removed from the pc_lru list
1419 * when both pc_chunks_mutex is owned and the
1420 * corresponding pmap is locked.
1422 if (pmap != next_pmap) {
1423 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1425 /* Avoid deadlock and lock recursion. */
1426 if (pmap > locked_pmap) {
1427 RELEASE_PV_LIST_LOCK(lockp);
1429 mtx_lock(&pv_chunks_mutex);
1431 } else if (pmap != locked_pmap) {
1432 if (PMAP_TRYLOCK(pmap)) {
1433 mtx_lock(&pv_chunks_mutex);
1436 pmap = NULL; /* pmap is not locked */
1437 mtx_lock(&pv_chunks_mutex);
1438 pc = TAILQ_NEXT(pc_marker, pc_lru);
1440 pc->pc_pmap != next_pmap)
1448 * Destroy every non-wired, 4 KB page mapping in the chunk.
1451 for (field = 0; field < _NPCM; field++) {
1452 for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1453 inuse != 0; inuse &= ~(1UL << bit)) {
1454 bit = cnttzd(inuse);
1455 pv = &pc->pc_pventry[field * 64 + bit];
1457 l3e = pmap_pml3e(pmap, va);
1458 if ((be64toh(*l3e) & RPTE_LEAF) != 0)
1460 pte = pmap_l3e_to_pte(l3e, va);
1461 if ((be64toh(*pte) & PG_W) != 0)
1463 tpte = be64toh(pte_load_clear(pte));
1464 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
1465 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
1467 if ((tpte & PG_A) != 0)
1468 vm_page_aflag_set(m, PGA_REFERENCED);
1469 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1470 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
1473 if (TAILQ_EMPTY(&m->md.pv_list) &&
1474 (m->flags & PG_FICTITIOUS) == 0) {
1475 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
1476 if (TAILQ_EMPTY(&pvh->pv_list)) {
1477 vm_page_aflag_clear(m,
1481 pc->pc_map[field] |= 1UL << bit;
1482 pmap_unuse_pt(pmap, va, be64toh(*l3e), &free);
1487 mtx_lock(&pv_chunks_mutex);
1490 /* Every freed mapping is for a 4 KB page. */
1491 pmap_resident_count_dec(pmap, freed);
1492 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
1493 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
1494 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
1495 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1496 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1) {
1497 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1498 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1499 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1500 /* Entire chunk is free; return it. */
1501 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1502 dump_drop_page(m_pc->phys_addr);
1503 mtx_lock(&pv_chunks_mutex);
1504 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1507 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1508 mtx_lock(&pv_chunks_mutex);
1509 /* One freed pv entry in locked_pmap is sufficient. */
1510 if (pmap == locked_pmap)
1513 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1514 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
1515 if (active_reclaims == 1 && pmap != NULL) {
1517 * Rotate the pv chunks list so that we do not
1518 * scan the same pv chunks that could not be
1519 * freed (because they contained a wired
1520 * and/or superpage mapping) on every
1521 * invocation of reclaim_pv_chunk().
1523 while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
1524 MPASS(pc->pc_pmap != NULL);
1525 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1526 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1530 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1531 TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
1533 mtx_unlock(&pv_chunks_mutex);
1534 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1535 if (m_pc == NULL && !SLIST_EMPTY(&free)) {
1536 m_pc = SLIST_FIRST(&free);
1537 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
1538 /* Recycle a freed page table page. */
1539 m_pc->ref_count = 1;
1541 vm_page_free_pages_toq(&free, true);
1546 * free the pv_entry back to the free list
1549 free_pv_entry(pmap_t pmap, pv_entry_t pv)
1551 struct pv_chunk *pc;
1552 int idx, field, bit;
1555 if (pmap != kernel_pmap)
1556 printf("%s(%p, %p)\n", __func__, pmap, pv);
1558 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1559 PV_STAT(atomic_add_long(&pv_entry_frees, 1));
1560 PV_STAT(atomic_add_int(&pv_entry_spare, 1));
1561 PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
1562 pc = pv_to_chunk(pv);
1563 idx = pv - &pc->pc_pventry[0];
1566 pc->pc_map[field] |= 1ul << bit;
1567 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1) {
1568 /* 98% of the time, pc is already at the head of the list. */
1569 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1570 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1571 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1575 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1580 free_pv_chunk(struct pv_chunk *pc)
1584 mtx_lock(&pv_chunks_mutex);
1585 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1586 mtx_unlock(&pv_chunks_mutex);
1587 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1588 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1589 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1590 /* entire chunk is free, return it */
1591 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1592 dump_drop_page(m->phys_addr);
1593 vm_page_unwire_noq(m);
1598 * Returns a new PV entry, allocating a new PV chunk from the system when
1599 * needed. If this PV chunk allocation fails and a PV list lock pointer was
1600 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
1603 * The given PV list lock may be released.
1606 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
1610 struct pv_chunk *pc;
1613 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1614 PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
1616 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1618 for (field = 0; field < _NPCM; field++) {
1619 if (pc->pc_map[field]) {
1620 bit = cnttzd(pc->pc_map[field]);
1624 if (field < _NPCM) {
1625 pv = &pc->pc_pventry[field * 64 + bit];
1626 pc->pc_map[field] &= ~(1ul << bit);
1627 /* If this was the last item, move it to tail */
1628 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1629 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1630 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1633 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1634 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
1635 MPASS(PV_PMAP(pv) != NULL);
1639 /* No free items, allocate another chunk */
1640 m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
1642 if (lockp == NULL) {
1643 PV_STAT(pc_chunk_tryfail++);
1646 m = reclaim_pv_chunk(pmap, lockp);
1650 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1651 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1652 dump_add_page(m->phys_addr);
1653 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1655 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
1656 pc->pc_map[1] = PC_FREE1;
1657 mtx_lock(&pv_chunks_mutex);
1658 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1659 mtx_unlock(&pv_chunks_mutex);
1660 pv = &pc->pc_pventry[0];
1661 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1662 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1663 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1664 MPASS(PV_PMAP(pv) != NULL);
1668 #if VM_NRESERVLEVEL > 0
1670 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
1671 * replace the many pv entries for the 4KB page mappings by a single pv entry
1672 * for the 2MB page mapping.
1675 pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1676 struct rwlock **lockp)
1678 struct md_page *pvh;
1680 vm_offset_t va_last;
1683 KASSERT((pa & L3_PAGE_MASK) == 0,
1684 ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
1685 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1688 * Transfer the first page's pv entry for this mapping to the 2mpage's
1689 * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
1690 * a transfer avoids the possibility that get_pv_entry() calls
1691 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
1692 * mappings that is being promoted.
1694 m = PHYS_TO_VM_PAGE(pa);
1695 va = trunc_2mpage(va);
1696 pv = pmap_pvh_remove(&m->md, pmap, va);
1697 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
1698 pvh = pa_to_pvh(pa);
1699 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
1701 /* Free the remaining NPTEPG - 1 pv entries. */
1702 va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1706 pmap_pvh_free(&m->md, pmap, va);
1707 } while (va < va_last);
1709 #endif /* VM_NRESERVLEVEL > 0 */
1712 * First find and then destroy the pv entry for the specified pmap and virtual
1713 * address. This operation can be performed on pv lists for either 4KB or 2MB
1717 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1721 pv = pmap_pvh_remove(pvh, pmap, va);
1722 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
1723 free_pv_entry(pmap, pv);
1727 * Conditionally create the PV entry for a 4KB page mapping if the required
1728 * memory can be allocated without resorting to reclamation.
1731 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
1732 struct rwlock **lockp)
1736 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1737 /* Pass NULL instead of the lock pointer to disable reclamation. */
1738 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
1740 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1741 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1748 vm_paddr_t phys_avail_debug[2 * VM_PHYSSEG_MAX];
1751 validate_addr(vm_paddr_t addr, vm_size_t size)
1753 vm_paddr_t end = addr + size;
1756 for (int i = 0; i < 2 * phys_avail_count; i += 2) {
1757 if (addr >= phys_avail_debug[i] &&
1758 end <= phys_avail_debug[i + 1]) {
1763 KASSERT(found, ("%#lx-%#lx outside of initial phys_avail array",
1767 static void validate_addr(vm_paddr_t addr, vm_size_t size) {}
1769 #define DMAP_PAGE_BITS (RPTE_VALID | RPTE_LEAF | RPTE_EAA_MASK | PG_M | PG_A)
1776 page = allocpages(1);
1777 pagezero(PHYS_TO_DMAP(page));
1782 mmu_radix_dmap_range(vm_paddr_t start, vm_paddr_t end)
1784 pt_entry_t *pte, pteval;
1788 printf("%s %lx -> %lx\n", __func__, start, end);
1789 while (start < end) {
1790 pteval = start | DMAP_PAGE_BITS;
1791 pte = pmap_pml1e(kernel_pmap, PHYS_TO_DMAP(start));
1792 if ((be64toh(*pte) & RPTE_VALID) == 0) {
1793 page = alloc_pt_page();
1794 pde_store(pte, page);
1796 pte = pmap_l1e_to_l2e(pte, PHYS_TO_DMAP(start));
1797 if ((start & L2_PAGE_MASK) == 0 &&
1798 end - start >= L2_PAGE_SIZE) {
1799 start += L2_PAGE_SIZE;
1801 } else if ((be64toh(*pte) & RPTE_VALID) == 0) {
1802 page = alloc_pt_page();
1803 pde_store(pte, page);
1806 pte = pmap_l2e_to_l3e(pte, PHYS_TO_DMAP(start));
1807 if ((start & L3_PAGE_MASK) == 0 &&
1808 end - start >= L3_PAGE_SIZE) {
1809 start += L3_PAGE_SIZE;
1811 } else if ((be64toh(*pte) & RPTE_VALID) == 0) {
1812 page = alloc_pt_page();
1813 pde_store(pte, page);
1815 pte = pmap_l3e_to_pte(pte, PHYS_TO_DMAP(start));
1818 pte_store(pte, pteval);
1823 mmu_radix_dmap_populate(vm_size_t hwphyssz)
1825 vm_paddr_t start, end;
1827 for (int i = 0; i < pregions_sz; i++) {
1828 start = pregions[i].mr_start;
1829 end = start + pregions[i].mr_size;
1830 if (hwphyssz && start >= hwphyssz)
1832 if (hwphyssz && hwphyssz < end)
1834 mmu_radix_dmap_range(start, end);
1839 mmu_radix_setup_pagetables(vm_size_t hwphyssz)
1841 vm_paddr_t ptpages, pages;
1845 bzero(kernel_pmap, sizeof(struct pmap));
1846 PMAP_LOCK_INIT(kernel_pmap);
1848 ptpages = allocpages(3);
1849 l1phys = moea64_bootstrap_alloc(RADIX_PGD_SIZE, RADIX_PGD_SIZE);
1850 validate_addr(l1phys, RADIX_PGD_SIZE);
1852 printf("l1phys=%lx\n", l1phys);
1853 MPASS((l1phys & (RADIX_PGD_SIZE-1)) == 0);
1854 for (int i = 0; i < RADIX_PGD_SIZE/PAGE_SIZE; i++)
1855 pagezero(PHYS_TO_DMAP(l1phys + i * PAGE_SIZE));
1856 kernel_pmap->pm_pml1 = (pml1_entry_t *)PHYS_TO_DMAP(l1phys);
1858 mmu_radix_dmap_populate(hwphyssz);
1861 * Create page tables for first 128MB of KVA
1864 pte = pmap_pml1e(kernel_pmap, VM_MIN_KERNEL_ADDRESS);
1865 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1867 pte = pmap_l1e_to_l2e(pte, VM_MIN_KERNEL_ADDRESS);
1868 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1870 pte = pmap_l2e_to_l3e(pte, VM_MIN_KERNEL_ADDRESS);
1872 * the kernel page table pages need to be preserved in
1873 * phys_avail and not overlap with previous allocations
1875 pages = allocpages(nkpt);
1877 printf("phys_avail after dmap populate and nkpt allocation\n");
1878 for (int j = 0; j < 2 * phys_avail_count; j+=2)
1879 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
1880 j, phys_avail[j], j + 1, phys_avail[j + 1]);
1883 for (int i = 0; i < nkpt; i++, pte++, pages += PAGE_SIZE)
1884 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1885 kernel_vm_end = VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE;
1887 printf("kernel_pmap pml1 %p\n", kernel_pmap->pm_pml1);
1889 * Add a physical memory segment (vm_phys_seg) corresponding to the
1890 * preallocated kernel page table pages so that vm_page structures
1891 * representing these pages will be created. The vm_page structures
1892 * are required for promotion of the corresponding kernel virtual
1893 * addresses to superpage mappings.
1895 vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1899 mmu_radix_early_bootstrap(vm_offset_t start, vm_offset_t end)
1901 vm_paddr_t kpstart, kpend;
1902 vm_size_t physsz, hwphyssz;
1904 int rm_pavail, proctab_size;
1907 kpstart = start & ~DMAP_BASE_ADDRESS;
1908 kpend = end & ~DMAP_BASE_ADDRESS;
1910 /* Get physical memory regions from firmware */
1911 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
1912 CTR0(KTR_PMAP, "mmu_radix_early_bootstrap: physical memory");
1914 if (2 * VM_PHYSSEG_MAX < regions_sz)
1915 panic("mmu_radix_early_bootstrap: phys_avail too small");
1918 for (int i = 0; i < regions_sz; i++)
1919 printf("regions[%d].mr_start=%lx regions[%d].mr_size=%lx\n",
1920 i, regions[i].mr_start, i, regions[i].mr_size);
1922 * XXX workaround a simulator bug
1924 for (int i = 0; i < regions_sz; i++)
1925 if (regions[i].mr_start & PAGE_MASK) {
1926 regions[i].mr_start += PAGE_MASK;
1927 regions[i].mr_start &= ~PAGE_MASK;
1928 regions[i].mr_size &= ~PAGE_MASK;
1931 for (int i = 0; i < pregions_sz; i++)
1932 printf("pregions[%d].mr_start=%lx pregions[%d].mr_size=%lx\n",
1933 i, pregions[i].mr_start, i, pregions[i].mr_size);
1935 phys_avail_count = 0;
1938 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1939 for (i = 0, j = 0; i < regions_sz; i++) {
1941 printf("regions[%d].mr_start=%016lx regions[%d].mr_size=%016lx\n",
1942 i, regions[i].mr_start, i, regions[i].mr_size);
1944 if (regions[i].mr_size < PAGE_SIZE)
1947 if (hwphyssz != 0 &&
1948 (physsz + regions[i].mr_size) >= hwphyssz) {
1949 if (physsz < hwphyssz) {
1950 phys_avail[j] = regions[i].mr_start;
1951 phys_avail[j + 1] = regions[i].mr_start +
1952 (hwphyssz - physsz);
1955 dump_avail[j] = phys_avail[j];
1956 dump_avail[j + 1] = phys_avail[j + 1];
1960 phys_avail[j] = regions[i].mr_start;
1961 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
1962 dump_avail[j] = phys_avail[j];
1963 dump_avail[j + 1] = phys_avail[j + 1];
1966 physsz += regions[i].mr_size;
1970 /* Check for overlap with the kernel and exception vectors */
1972 for (j = 0; j < 2 * phys_avail_count; j+=2) {
1973 if (phys_avail[j] < EXC_LAST)
1974 phys_avail[j] += EXC_LAST;
1976 if (phys_avail[j] >= kpstart &&
1977 phys_avail[j + 1] <= kpend) {
1978 phys_avail[j] = phys_avail[j + 1] = ~0;
1983 if (kpstart >= phys_avail[j] &&
1984 kpstart < phys_avail[j + 1]) {
1985 if (kpend < phys_avail[j + 1]) {
1986 phys_avail[2 * phys_avail_count] =
1987 (kpend & ~PAGE_MASK) + PAGE_SIZE;
1988 phys_avail[2 * phys_avail_count + 1] =
1993 phys_avail[j + 1] = kpstart & ~PAGE_MASK;
1996 if (kpend >= phys_avail[j] &&
1997 kpend < phys_avail[j + 1]) {
1998 if (kpstart > phys_avail[j]) {
1999 phys_avail[2 * phys_avail_count] = phys_avail[j];
2000 phys_avail[2 * phys_avail_count + 1] =
2001 kpstart & ~PAGE_MASK;
2005 phys_avail[j] = (kpend & ~PAGE_MASK) +
2009 qsort(phys_avail, 2 * phys_avail_count, sizeof(phys_avail[0]), pa_cmp);
2010 for (i = 0; i < 2 * phys_avail_count; i++)
2011 phys_avail_debug[i] = phys_avail[i];
2013 /* Remove physical available regions marked for removal (~0) */
2015 phys_avail_count -= rm_pavail;
2016 for (i = 2 * phys_avail_count;
2017 i < 2*(phys_avail_count + rm_pavail); i+=2)
2018 phys_avail[i] = phys_avail[i + 1] = 0;
2021 printf("phys_avail ranges after filtering:\n");
2022 for (j = 0; j < 2 * phys_avail_count; j+=2)
2023 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
2024 j, phys_avail[j], j + 1, phys_avail[j + 1]);
2026 physmem = btoc(physsz);
2028 /* XXX assume we're running non-virtualized and
2029 * we don't support BHYVE
2031 if (isa3_pid_bits == 0)
2033 if (powernv_enabled) {
2035 moea64_bootstrap_alloc(PARTTAB_SIZE, PARTTAB_SIZE);
2036 validate_addr(parttab_phys, PARTTAB_SIZE);
2037 for (int i = 0; i < PARTTAB_SIZE/PAGE_SIZE; i++)
2038 pagezero(PHYS_TO_DMAP(parttab_phys + i * PAGE_SIZE));
2041 proctab_size = 1UL << PROCTAB_SIZE_SHIFT;
2042 proctab0pa = moea64_bootstrap_alloc(proctab_size, proctab_size);
2043 validate_addr(proctab0pa, proctab_size);
2044 for (int i = 0; i < proctab_size/PAGE_SIZE; i++)
2045 pagezero(PHYS_TO_DMAP(proctab0pa + i * PAGE_SIZE));
2047 mmu_radix_setup_pagetables(hwphyssz);
2051 mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end)
2059 * Set up the Open Firmware pmap and add its mappings if not in real
2063 printf("%s enter\n", __func__);
2066 * Calculate the last available physical address, and reserve the
2067 * vm_page_array (upper bound).
2070 for (i = 0; phys_avail[i + 1] != 0; i += 2)
2071 Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1]));
2074 * Remap any early IO mappings (console framebuffer, etc.)
2076 bs_remap_earlyboot();
2079 * Allocate a kernel stack with a guard page for thread0 and map it
2080 * into the kernel page map.
2082 pa = allocpages(kstack_pages);
2083 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
2084 virtual_avail = va + kstack_pages * PAGE_SIZE;
2085 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
2086 thread0.td_kstack = va;
2087 for (i = 0; i < kstack_pages; i++) {
2088 mmu_radix_kenter(va, pa);
2092 thread0.td_kstack_pages = kstack_pages;
2095 * Allocate virtual address space for the message buffer.
2097 pa = msgbuf_phys = allocpages((msgbufsize + PAGE_MASK) >> PAGE_SHIFT);
2098 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(pa);
2101 * Allocate virtual address space for the dynamic percpu area.
2103 pa = allocpages(DPCPU_SIZE >> PAGE_SHIFT);
2104 dpcpu = (void *)PHYS_TO_DMAP(pa);
2105 dpcpu_init(dpcpu, curcpu);
2107 crashdumpmap = (caddr_t)virtual_avail;
2108 virtual_avail += MAXDUMPPGS * PAGE_SIZE;
2111 * Reserve some special page table entries/VA space for temporary
2117 mmu_parttab_init(void)
2121 isa3_parttab = (struct pate *)PHYS_TO_DMAP(parttab_phys);
2124 printf("%s parttab: %p\n", __func__, isa3_parttab);
2125 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2127 printf("setting ptcr %lx\n", ptcr);
2128 mtspr(SPR_PTCR, ptcr);
2132 mmu_parttab_update(uint64_t lpid, uint64_t pagetab, uint64_t proctab)
2137 printf("%s isa3_parttab %p lpid %lx pagetab %lx proctab %lx\n", __func__, isa3_parttab,
2138 lpid, pagetab, proctab);
2139 prev = be64toh(isa3_parttab[lpid].pagetab);
2140 isa3_parttab[lpid].pagetab = htobe64(pagetab);
2141 isa3_parttab[lpid].proctab = htobe64(proctab);
2143 if (prev & PARTTAB_HR) {
2144 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
2145 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2146 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2147 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2149 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
2150 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2156 mmu_radix_parttab_init(void)
2161 pagetab = RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) | \
2162 RADIX_PGD_INDEX_SHIFT | PARTTAB_HR;
2163 mmu_parttab_update(0, pagetab, 0);
2167 mmu_radix_proctab_register(vm_paddr_t proctabpa, uint64_t table_size)
2169 uint64_t pagetab, proctab;
2171 pagetab = be64toh(isa3_parttab[0].pagetab);
2172 proctab = proctabpa | table_size | PARTTAB_GR;
2173 mmu_parttab_update(0, pagetab, proctab);
2177 mmu_radix_proctab_init(void)
2182 isa3_proctab = (void*)PHYS_TO_DMAP(proctab0pa);
2183 isa3_proctab->proctab0 =
2184 htobe64(RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) |
2185 RADIX_PGD_INDEX_SHIFT);
2187 if (powernv_enabled) {
2188 mmu_radix_proctab_register(proctab0pa, PROCTAB_SIZE_SHIFT - 12);
2189 __asm __volatile("ptesync" : : : "memory");
2190 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2191 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
2192 __asm __volatile("eieio; tlbsync; ptesync" : : : "memory");
2197 rc = phyp_hcall(H_REGISTER_PROC_TBL,
2198 PROC_TABLE_NEW | PROC_TABLE_RADIX | PROC_TABLE_GTSE,
2199 proctab0pa, 0, PROCTAB_SIZE_SHIFT - 12);
2200 if (rc != H_SUCCESS)
2201 panic("mmu_radix_proctab_init: "
2202 "failed to register process table: rc=%jd",
2208 printf("process table %p and kernel radix PDE: %p\n",
2209 isa3_proctab, kernel_pmap->pm_pml1);
2210 mtmsr(mfmsr() | PSL_DR );
2211 mtmsr(mfmsr() & ~PSL_DR);
2212 kernel_pmap->pm_pid = isa3_base_pid;
2217 mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2220 struct rwlock *lock;
2223 pml3_entry_t oldl3e, *l3e;
2225 vm_offset_t va, va_next;
2227 boolean_t anychanged;
2229 if (advice != MADV_DONTNEED && advice != MADV_FREE)
2233 for (; sva < eva; sva = va_next) {
2234 l1e = pmap_pml1e(pmap, sva);
2235 if ((be64toh(*l1e) & PG_V) == 0) {
2236 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2241 l2e = pmap_l1e_to_l2e(l1e, sva);
2242 if ((be64toh(*l2e) & PG_V) == 0) {
2243 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2248 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2251 l3e = pmap_l2e_to_l3e(l2e, sva);
2252 oldl3e = be64toh(*l3e);
2253 if ((oldl3e & PG_V) == 0)
2255 else if ((oldl3e & RPTE_LEAF) != 0) {
2256 if ((oldl3e & PG_MANAGED) == 0)
2259 if (!pmap_demote_l3e_locked(pmap, l3e, sva, &lock)) {
2264 * The large page mapping was destroyed.
2270 * Unless the page mappings are wired, remove the
2271 * mapping to a single page so that a subsequent
2272 * access may repromote. Since the underlying page
2273 * table page is fully populated, this removal never
2274 * frees a page table page.
2276 if ((oldl3e & PG_W) == 0) {
2277 pte = pmap_l3e_to_pte(l3e, sva);
2278 KASSERT((be64toh(*pte) & PG_V) != 0,
2279 ("pmap_advise: invalid PTE"));
2280 pmap_remove_pte(pmap, pte, sva, be64toh(*l3e), NULL,
2290 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next;
2291 pte++, sva += PAGE_SIZE) {
2292 MPASS(pte == pmap_pte(pmap, sva));
2294 if ((be64toh(*pte) & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
2296 else if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2297 if (advice == MADV_DONTNEED) {
2299 * Future calls to pmap_is_modified()
2300 * can be avoided by making the page
2303 m = PHYS_TO_VM_PAGE(be64toh(*pte) & PG_FRAME);
2306 atomic_clear_long(pte, htobe64(PG_M | PG_A));
2307 } else if ((be64toh(*pte) & PG_A) != 0)
2308 atomic_clear_long(pte, htobe64(PG_A));
2314 if (va != va_next) {
2323 pmap_invalidate_all(pmap);
2328 * Routines used in machine-dependent code
2331 mmu_radix_bootstrap(vm_offset_t start, vm_offset_t end)
2336 printf("%s\n", __func__);
2338 powernv_enabled = (mfmsr() & PSL_HV) ? 1 : 0;
2339 mmu_radix_early_bootstrap(start, end);
2341 printf("early bootstrap complete\n");
2342 if (powernv_enabled) {
2343 lpcr = mfspr(SPR_LPCR);
2344 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2345 mmu_radix_parttab_init();
2346 mmu_radix_init_amor();
2348 printf("powernv init complete\n");
2350 mmu_radix_init_iamr();
2351 mmu_radix_proctab_init();
2352 mmu_radix_pid_set(kernel_pmap);
2353 if (powernv_enabled)
2354 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2356 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
2358 mmu_radix_late_bootstrap(start, end);
2359 numa_mem_regions(&numa_pregions, &numa_pregions_sz);
2361 printf("%s done\n", __func__);
2362 pmap_bootstrapped = 1;
2363 dmaplimit = roundup2(powerpc_ptob(Maxmem), L2_PAGE_SIZE);
2364 PCPU_SET(flags, PCPU_GET(flags) | PC_FLAG_NOSRS);
2368 mmu_radix_cpu_bootstrap(int ap)
2373 if (powernv_enabled) {
2374 lpcr = mfspr(SPR_LPCR);
2375 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2377 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2378 mtspr(SPR_PTCR, ptcr);
2379 mmu_radix_init_amor();
2381 mmu_radix_init_iamr();
2382 mmu_radix_pid_set(kernel_pmap);
2383 if (powernv_enabled)
2384 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2386 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
2389 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l3e, CTLFLAG_RD, 0,
2390 "2MB page mapping counters");
2392 static u_long pmap_l3e_demotions;
2393 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, demotions, CTLFLAG_RD,
2394 &pmap_l3e_demotions, 0, "2MB page demotions");
2396 static u_long pmap_l3e_mappings;
2397 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, mappings, CTLFLAG_RD,
2398 &pmap_l3e_mappings, 0, "2MB page mappings");
2400 static u_long pmap_l3e_p_failures;
2401 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, p_failures, CTLFLAG_RD,
2402 &pmap_l3e_p_failures, 0, "2MB page promotion failures");
2404 static u_long pmap_l3e_promotions;
2405 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, promotions, CTLFLAG_RD,
2406 &pmap_l3e_promotions, 0, "2MB page promotions");
2408 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2e, CTLFLAG_RD, 0,
2409 "1GB page mapping counters");
2411 static u_long pmap_l2e_demotions;
2412 SYSCTL_ULONG(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD,
2413 &pmap_l2e_demotions, 0, "1GB page demotions");
2416 mmu_radix_clear_modify(vm_page_t m)
2418 struct md_page *pvh;
2420 pv_entry_t next_pv, pv;
2421 pml3_entry_t oldl3e, *l3e;
2422 pt_entry_t oldpte, *pte;
2423 struct rwlock *lock;
2425 int md_gen, pvh_gen;
2427 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2428 ("pmap_clear_modify: page %p is not managed", m));
2429 vm_page_assert_busied(m);
2430 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
2433 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
2434 * If the object containing the page is locked and the page is not
2435 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
2437 if ((m->a.flags & PGA_WRITEABLE) == 0)
2439 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2440 pa_to_pvh(VM_PAGE_TO_PHYS(m));
2441 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2444 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
2446 if (!PMAP_TRYLOCK(pmap)) {
2447 pvh_gen = pvh->pv_gen;
2451 if (pvh_gen != pvh->pv_gen) {
2457 l3e = pmap_pml3e(pmap, va);
2458 oldl3e = be64toh(*l3e);
2459 if ((oldl3e & PG_RW) != 0) {
2460 if (pmap_demote_l3e_locked(pmap, l3e, va, &lock)) {
2461 if ((oldl3e & PG_W) == 0) {
2463 * Write protect the mapping to a
2464 * single page so that a subsequent
2465 * write access may repromote.
2467 va += VM_PAGE_TO_PHYS(m) - (oldl3e &
2469 pte = pmap_l3e_to_pte(l3e, va);
2470 oldpte = be64toh(*pte);
2471 if ((oldpte & PG_V) != 0) {
2472 while (!atomic_cmpset_long(pte,
2474 htobe64((oldpte | RPTE_EAA_R) & ~(PG_M | PG_RW))))
2475 oldpte = be64toh(*pte);
2477 pmap_invalidate_page(pmap, va);
2484 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2486 if (!PMAP_TRYLOCK(pmap)) {
2487 md_gen = m->md.pv_gen;
2488 pvh_gen = pvh->pv_gen;
2492 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2497 l3e = pmap_pml3e(pmap, pv->pv_va);
2498 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, ("pmap_clear_modify: found"
2499 " a 2mpage in page %p's pv list", m));
2500 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
2501 if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2502 atomic_clear_long(pte, htobe64(PG_M));
2503 pmap_invalidate_page(pmap, pv->pv_va);
2511 mmu_radix_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2512 vm_size_t len, vm_offset_t src_addr)
2514 struct rwlock *lock;
2515 struct spglist free;
2517 vm_offset_t end_addr = src_addr + len;
2518 vm_offset_t va_next;
2519 vm_page_t dst_pdpg, dstmpte, srcmpte;
2520 bool invalidate_all;
2523 "%s(dst_pmap=%p, src_pmap=%p, dst_addr=%lx, len=%lu, src_addr=%lx)\n",
2524 __func__, dst_pmap, src_pmap, dst_addr, len, src_addr);
2526 if (dst_addr != src_addr)
2529 invalidate_all = false;
2530 if (dst_pmap < src_pmap) {
2531 PMAP_LOCK(dst_pmap);
2532 PMAP_LOCK(src_pmap);
2534 PMAP_LOCK(src_pmap);
2535 PMAP_LOCK(dst_pmap);
2538 for (addr = src_addr; addr < end_addr; addr = va_next) {
2541 pml3_entry_t srcptepaddr, *l3e;
2542 pt_entry_t *src_pte, *dst_pte;
2544 l1e = pmap_pml1e(src_pmap, addr);
2545 if ((be64toh(*l1e) & PG_V) == 0) {
2546 va_next = (addr + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2552 l2e = pmap_l1e_to_l2e(l1e, addr);
2553 if ((be64toh(*l2e) & PG_V) == 0) {
2554 va_next = (addr + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2560 va_next = (addr + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2564 l3e = pmap_l2e_to_l3e(l2e, addr);
2565 srcptepaddr = be64toh(*l3e);
2566 if (srcptepaddr == 0)
2569 if (srcptepaddr & RPTE_LEAF) {
2570 if ((addr & L3_PAGE_MASK) != 0 ||
2571 addr + L3_PAGE_SIZE > end_addr)
2573 dst_pdpg = pmap_allocl3e(dst_pmap, addr, NULL);
2574 if (dst_pdpg == NULL)
2576 l3e = (pml3_entry_t *)
2577 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg));
2578 l3e = &l3e[pmap_pml3e_index(addr)];
2579 if (be64toh(*l3e) == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
2580 pmap_pv_insert_l3e(dst_pmap, addr, srcptepaddr,
2581 PMAP_ENTER_NORECLAIM, &lock))) {
2582 *l3e = htobe64(srcptepaddr & ~PG_W);
2583 pmap_resident_count_inc(dst_pmap,
2584 L3_PAGE_SIZE / PAGE_SIZE);
2585 atomic_add_long(&pmap_l3e_mappings, 1);
2587 dst_pdpg->ref_count--;
2591 srcptepaddr &= PG_FRAME;
2592 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
2593 KASSERT(srcmpte->ref_count > 0,
2594 ("pmap_copy: source page table page is unused"));
2596 if (va_next > end_addr)
2599 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
2600 src_pte = &src_pte[pmap_pte_index(addr)];
2602 while (addr < va_next) {
2604 ptetemp = be64toh(*src_pte);
2606 * we only virtual copy managed pages
2608 if ((ptetemp & PG_MANAGED) != 0) {
2609 if (dstmpte != NULL &&
2610 dstmpte->pindex == pmap_l3e_pindex(addr))
2611 dstmpte->ref_count++;
2612 else if ((dstmpte = pmap_allocpte(dst_pmap,
2613 addr, NULL)) == NULL)
2615 dst_pte = (pt_entry_t *)
2616 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
2617 dst_pte = &dst_pte[pmap_pte_index(addr)];
2618 if (be64toh(*dst_pte) == 0 &&
2619 pmap_try_insert_pv_entry(dst_pmap, addr,
2620 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME),
2623 * Clear the wired, modified, and
2624 * accessed (referenced) bits
2627 *dst_pte = htobe64(ptetemp & ~(PG_W | PG_M |
2629 pmap_resident_count_inc(dst_pmap, 1);
2632 if (pmap_unwire_ptp(dst_pmap, addr,
2635 * Although "addr" is not
2636 * mapped, paging-structure
2637 * caches could nonetheless
2638 * have entries that refer to
2639 * the freed page table pages.
2640 * Invalidate those entries.
2642 invalidate_all = true;
2643 vm_page_free_pages_toq(&free,
2648 if (dstmpte->ref_count >= srcmpte->ref_count)
2652 if (__predict_false((addr & L3_PAGE_MASK) == 0))
2653 src_pte = pmap_pte(src_pmap, addr);
2660 pmap_invalidate_all(dst_pmap);
2663 PMAP_UNLOCK(src_pmap);
2664 PMAP_UNLOCK(dst_pmap);
2668 mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst)
2670 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
2671 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
2673 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
2677 bcopy((void *)src, (void *)dst, PAGE_SIZE);
2681 mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2682 vm_offset_t b_offset, int xfersize)
2685 vm_offset_t a_pg_offset, b_pg_offset;
2688 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
2689 a_offset, mb, b_offset, xfersize);
2691 while (xfersize > 0) {
2692 a_pg_offset = a_offset & PAGE_MASK;
2693 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2694 a_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
2695 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) +
2697 b_pg_offset = b_offset & PAGE_MASK;
2698 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2699 b_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
2700 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) +
2702 bcopy(a_cp, b_cp, cnt);
2709 #if VM_NRESERVLEVEL > 0
2711 * Tries to promote the 512, contiguous 4KB page mappings that are within a
2712 * single page table page (PTP) to a single 2MB page mapping. For promotion
2713 * to occur, two conditions must be met: (1) the 4KB page mappings must map
2714 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
2715 * identical characteristics.
2718 pmap_promote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va,
2719 struct rwlock **lockp)
2721 pml3_entry_t newpde;
2722 pt_entry_t *firstpte, oldpte, pa, *pte;
2725 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2728 * Examine the first PTE in the specified PTP. Abort if this PTE is
2729 * either invalid, unused, or does not map the first 4KB physical page
2730 * within a 2MB page.
2732 firstpte = (pt_entry_t *)PHYS_TO_DMAP(be64toh(*pde) & PG_FRAME);
2735 if ((newpde & ((PG_FRAME & L3_PAGE_MASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
2736 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2737 " in pmap %p", va, pmap);
2740 if ((newpde & (PG_M | PG_RW)) == PG_RW) {
2742 * When PG_M is already clear, PG_RW can be cleared without
2743 * a TLB invalidation.
2745 if (!atomic_cmpset_long(firstpte, htobe64(newpde), htobe64((newpde | RPTE_EAA_R) & ~RPTE_EAA_W)))
2747 newpde &= ~RPTE_EAA_W;
2751 * Examine each of the other PTEs in the specified PTP. Abort if this
2752 * PTE maps an unexpected 4KB physical page or does not have identical
2753 * characteristics to the first PTE.
2755 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + L3_PAGE_SIZE - PAGE_SIZE;
2756 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
2758 oldpte = be64toh(*pte);
2759 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
2760 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2761 " in pmap %p", va, pmap);
2764 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
2766 * When PG_M is already clear, PG_RW can be cleared
2767 * without a TLB invalidation.
2769 if (!atomic_cmpset_long(pte, htobe64(oldpte), htobe64((oldpte | RPTE_EAA_R) & ~RPTE_EAA_W)))
2771 oldpte &= ~RPTE_EAA_W;
2772 CTR2(KTR_PMAP, "pmap_promote_l3e: protect for va %#lx"
2773 " in pmap %p", (oldpte & PG_FRAME & L3_PAGE_MASK) |
2774 (va & ~L3_PAGE_MASK), pmap);
2776 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
2777 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2778 " in pmap %p", va, pmap);
2785 * Save the page table page in its current state until the PDE
2786 * mapping the superpage is demoted by pmap_demote_pde() or
2787 * destroyed by pmap_remove_pde().
2789 mpte = PHYS_TO_VM_PAGE(be64toh(*pde) & PG_FRAME);
2790 KASSERT(mpte >= vm_page_array &&
2791 mpte < &vm_page_array[vm_page_array_size],
2792 ("pmap_promote_l3e: page table page is out of range"));
2793 KASSERT(mpte->pindex == pmap_l3e_pindex(va),
2794 ("pmap_promote_l3e: page table page's pindex is wrong"));
2795 if (pmap_insert_pt_page(pmap, mpte)) {
2797 "pmap_promote_l3e: failure for va %#lx in pmap %p", va,
2803 * Promote the pv entries.
2805 if ((newpde & PG_MANAGED) != 0)
2806 pmap_pv_promote_l3e(pmap, va, newpde & PG_PS_FRAME, lockp);
2808 pte_store(pde, PG_PROMOTED | newpde);
2810 atomic_add_long(&pmap_l3e_promotions, 1);
2811 CTR2(KTR_PMAP, "pmap_promote_l3e: success for va %#lx"
2812 " in pmap %p", va, pmap);
2815 atomic_add_long(&pmap_l3e_p_failures, 1);
2816 return (KERN_FAILURE);
2818 #endif /* VM_NRESERVLEVEL > 0 */
2821 mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
2822 vm_prot_t prot, u_int flags, int8_t psind)
2824 struct rwlock *lock;
2827 pt_entry_t newpte, origpte;
2832 boolean_t nosleep, invalidate_all, invalidate_page;
2834 va = trunc_page(va);
2836 invalidate_page = invalidate_all = false;
2837 CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va,
2838 m, prot, flags, psind);
2839 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
2840 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
2841 ("pmap_enter: managed mapping within the clean submap"));
2842 if ((m->oflags & VPO_UNMANAGED) == 0)
2843 VM_PAGE_OBJECT_BUSY_ASSERT(m);
2845 KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
2846 ("pmap_enter: flags %u has reserved bits set", flags));
2847 pa = VM_PAGE_TO_PHYS(m);
2848 newpte = (pt_entry_t)(pa | PG_A | PG_V | RPTE_LEAF);
2849 if ((flags & VM_PROT_WRITE) != 0)
2851 if ((flags & VM_PROT_READ) != 0)
2853 if (prot & VM_PROT_READ)
2854 newpte |= RPTE_EAA_R;
2855 if ((prot & VM_PROT_WRITE) != 0)
2856 newpte |= RPTE_EAA_W;
2857 KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
2858 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
2860 if (prot & VM_PROT_EXECUTE)
2862 if ((flags & PMAP_ENTER_WIRED) != 0)
2864 if (va >= DMAP_MIN_ADDRESS)
2865 newpte |= RPTE_EAA_P;
2866 newpte |= pmap_cache_bits(m->md.mdpg_cache_attrs);
2868 * Set modified bit gratuitously for writeable mappings if
2869 * the page is unmanaged. We do not want to take a fault
2870 * to do the dirty bit accounting for these mappings.
2872 if ((m->oflags & VPO_UNMANAGED) != 0) {
2873 if ((newpte & PG_RW) != 0)
2876 newpte |= PG_MANAGED;
2881 /* Assert the required virtual and physical alignment. */
2882 KASSERT((va & L3_PAGE_MASK) == 0, ("pmap_enter: va unaligned"));
2883 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
2884 rv = pmap_enter_l3e(pmap, va, newpte | RPTE_LEAF, flags, m, &lock);
2890 * In the case that a page table page is not
2891 * resident, we are creating it here.
2894 l3e = pmap_pml3e(pmap, va);
2895 if (l3e != NULL && (be64toh(*l3e) & PG_V) != 0 && ((be64toh(*l3e) & RPTE_LEAF) == 0 ||
2896 pmap_demote_l3e_locked(pmap, l3e, va, &lock))) {
2897 pte = pmap_l3e_to_pte(l3e, va);
2898 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
2899 mpte = PHYS_TO_VM_PAGE(be64toh(*l3e) & PG_FRAME);
2902 } else if (va < VM_MAXUSER_ADDRESS) {
2904 * Here if the pte page isn't mapped, or if it has been
2907 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2908 mpte = _pmap_allocpte(pmap, pmap_l3e_pindex(va),
2909 nosleep ? NULL : &lock);
2910 if (mpte == NULL && nosleep) {
2911 rv = KERN_RESOURCE_SHORTAGE;
2914 if (__predict_false(retrycount++ == 6))
2915 panic("too many retries");
2916 invalidate_all = true;
2919 panic("pmap_enter: invalid page directory va=%#lx", va);
2921 origpte = be64toh(*pte);
2925 * Is the specified virtual address already mapped?
2927 if ((origpte & PG_V) != 0) {
2929 if (VERBOSE_PMAP || pmap_logging) {
2930 printf("cow fault pmap_enter(%p, %#lx, %p, %#x, %x, %d) --"
2931 " asid=%lu curpid=%d name=%s origpte0x%lx\n",
2932 pmap, va, m, prot, flags, psind, pmap->pm_pid,
2933 curproc->p_pid, curproc->p_comm, origpte);
2934 pmap_pte_walk(pmap->pm_pml1, va);
2938 * Wiring change, just update stats. We don't worry about
2939 * wiring PT pages as they remain resident as long as there
2940 * are valid mappings in them. Hence, if a user page is wired,
2941 * the PT page will be also.
2943 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
2944 pmap->pm_stats.wired_count++;
2945 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
2946 pmap->pm_stats.wired_count--;
2949 * Remove the extra PT page reference.
2953 KASSERT(mpte->ref_count > 0,
2954 ("pmap_enter: missing reference to page table page,"
2959 * Has the physical page changed?
2961 opa = origpte & PG_FRAME;
2964 * No, might be a protection or wiring change.
2966 if ((origpte & PG_MANAGED) != 0 &&
2967 (newpte & PG_RW) != 0)
2968 vm_page_aflag_set(m, PGA_WRITEABLE);
2969 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) {
2970 if ((newpte & (PG_A|PG_M)) != (origpte & (PG_A|PG_M))) {
2971 if (!atomic_cmpset_long(pte, htobe64(origpte), htobe64(newpte)))
2973 if ((newpte & PG_M) != (origpte & PG_M))
2975 if ((newpte & PG_A) != (origpte & PG_A))
2976 vm_page_aflag_set(m, PGA_REFERENCED);
2979 invalidate_all = true;
2980 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
2987 * The physical page has changed. Temporarily invalidate
2988 * the mapping. This ensures that all threads sharing the
2989 * pmap keep a consistent view of the mapping, which is
2990 * necessary for the correct handling of COW faults. It
2991 * also permits reuse of the old mapping's PV entry,
2992 * avoiding an allocation.
2994 * For consistency, handle unmanaged mappings the same way.
2996 origpte = be64toh(pte_load_clear(pte));
2997 KASSERT((origpte & PG_FRAME) == opa,
2998 ("pmap_enter: unexpected pa update for %#lx", va));
2999 if ((origpte & PG_MANAGED) != 0) {
3000 om = PHYS_TO_VM_PAGE(opa);
3003 * The pmap lock is sufficient to synchronize with
3004 * concurrent calls to pmap_page_test_mappings() and
3005 * pmap_ts_referenced().
3007 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3009 if ((origpte & PG_A) != 0)
3010 vm_page_aflag_set(om, PGA_REFERENCED);
3011 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
3012 pv = pmap_pvh_remove(&om->md, pmap, va);
3013 if ((newpte & PG_MANAGED) == 0)
3014 free_pv_entry(pmap, pv);
3016 else if (origpte & PG_MANAGED) {
3018 pmap_page_print_mappings(om);
3023 if ((om->a.flags & PGA_WRITEABLE) != 0 &&
3024 TAILQ_EMPTY(&om->md.pv_list) &&
3025 ((om->flags & PG_FICTITIOUS) != 0 ||
3026 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
3027 vm_page_aflag_clear(om, PGA_WRITEABLE);
3029 if ((origpte & PG_A) != 0)
3030 invalidate_page = true;
3033 if (pmap != kernel_pmap) {
3035 if (VERBOSE_PMAP || pmap_logging)
3036 printf("pmap_enter(%p, %#lx, %p, %#x, %x, %d) -- asid=%lu curpid=%d name=%s\n",
3037 pmap, va, m, prot, flags, psind,
3038 pmap->pm_pid, curproc->p_pid,
3044 * Increment the counters.
3046 if ((newpte & PG_W) != 0)
3047 pmap->pm_stats.wired_count++;
3048 pmap_resident_count_inc(pmap, 1);
3052 * Enter on the PV list if part of our managed memory.
3054 if ((newpte & PG_MANAGED) != 0) {
3056 pv = get_pv_entry(pmap, &lock);
3061 printf("reassigning pv: %p to pmap: %p\n",
3064 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3065 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3067 if ((newpte & PG_RW) != 0)
3068 vm_page_aflag_set(m, PGA_WRITEABLE);
3074 if ((origpte & PG_V) != 0) {
3076 origpte = be64toh(pte_load_store(pte, htobe64(newpte)));
3077 KASSERT((origpte & PG_FRAME) == pa,
3078 ("pmap_enter: unexpected pa update for %#lx", va));
3079 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
3081 if ((origpte & PG_MANAGED) != 0)
3083 invalidate_page = true;
3086 * Although the PTE may still have PG_RW set, TLB
3087 * invalidation may nonetheless be required because
3088 * the PTE no longer has PG_M set.
3090 } else if ((origpte & PG_X) != 0 || (newpte & PG_X) == 0) {
3092 * Removing capabilities requires invalidation on POWER
3094 invalidate_page = true;
3097 if ((origpte & PG_A) != 0)
3098 invalidate_page = true;
3100 pte_store(pte, newpte);
3105 #if VM_NRESERVLEVEL > 0
3107 * If both the page table page and the reservation are fully
3108 * populated, then attempt promotion.
3110 if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
3111 mmu_radix_ps_enabled(pmap) &&
3112 (m->flags & PG_FICTITIOUS) == 0 &&
3113 vm_reserv_level_iffullpop(m) == 0 &&
3114 pmap_promote_l3e(pmap, l3e, va, &lock) == 0)
3115 invalidate_all = true;
3118 pmap_invalidate_all(pmap);
3119 else if (invalidate_page)
3120 pmap_invalidate_page(pmap, va);
3132 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
3133 * if successful. Returns false if (1) a page table page cannot be allocated
3134 * without sleeping, (2) a mapping already exists at the specified virtual
3135 * address, or (3) a PV entry cannot be allocated without reclaiming another
3139 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3140 struct rwlock **lockp)
3142 pml3_entry_t newpde;
3144 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3145 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs) |
3147 if ((m->oflags & VPO_UNMANAGED) == 0)
3148 newpde |= PG_MANAGED;
3149 if (prot & VM_PROT_EXECUTE)
3151 if (prot & VM_PROT_READ)
3152 newpde |= RPTE_EAA_R;
3153 if (va >= DMAP_MIN_ADDRESS)
3154 newpde |= RPTE_EAA_P;
3155 return (pmap_enter_l3e(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
3156 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
3161 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
3162 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
3163 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
3164 * a mapping already exists at the specified virtual address. Returns
3165 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
3166 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
3167 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3169 * The parameter "m" is only used when creating a managed, writeable mapping.
3172 pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags,
3173 vm_page_t m, struct rwlock **lockp)
3175 struct spglist free;
3176 pml3_entry_t oldl3e, *l3e;
3179 KASSERT((newpde & (PG_M | PG_RW)) != PG_RW,
3180 ("pmap_enter_pde: newpde is missing PG_M"));
3181 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3183 if ((pdpg = pmap_allocl3e(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
3184 NULL : lockp)) == NULL) {
3185 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3186 " in pmap %p", va, pmap);
3187 return (KERN_RESOURCE_SHORTAGE);
3189 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
3190 l3e = &l3e[pmap_pml3e_index(va)];
3191 oldl3e = be64toh(*l3e);
3192 if ((oldl3e & PG_V) != 0) {
3193 KASSERT(pdpg->ref_count > 1,
3194 ("pmap_enter_pde: pdpg's wire count is too low"));
3195 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
3197 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3198 " in pmap %p", va, pmap);
3199 return (KERN_FAILURE);
3201 /* Break the existing mapping(s). */
3203 if ((oldl3e & RPTE_LEAF) != 0) {
3205 * The reference to the PD page that was acquired by
3206 * pmap_allocl3e() ensures that it won't be freed.
3207 * However, if the PDE resulted from a promotion, then
3208 * a reserved PT page could be freed.
3210 (void)pmap_remove_l3e(pmap, l3e, va, &free, lockp);
3211 pmap_invalidate_l3e_page(pmap, va, oldl3e);
3213 if (pmap_remove_ptes(pmap, va, va + L3_PAGE_SIZE, l3e,
3215 pmap_invalidate_all(pmap);
3217 vm_page_free_pages_toq(&free, true);
3218 if (va >= VM_MAXUSER_ADDRESS) {
3219 mt = PHYS_TO_VM_PAGE(be64toh(*l3e) & PG_FRAME);
3220 if (pmap_insert_pt_page(pmap, mt)) {
3222 * XXX Currently, this can't happen because
3223 * we do not perform pmap_enter(psind == 1)
3224 * on the kernel pmap.
3226 panic("pmap_enter_pde: trie insert failed");
3229 KASSERT(be64toh(*l3e) == 0, ("pmap_enter_pde: non-zero pde %p",
3232 if ((newpde & PG_MANAGED) != 0) {
3234 * Abort this mapping if its PV entry could not be created.
3236 if (!pmap_pv_insert_l3e(pmap, va, newpde, flags, lockp)) {
3238 if (pmap_unwire_ptp(pmap, va, pdpg, &free)) {
3240 * Although "va" is not mapped, paging-
3241 * structure caches could nonetheless have
3242 * entries that refer to the freed page table
3243 * pages. Invalidate those entries.
3245 pmap_invalidate_page(pmap, va);
3246 vm_page_free_pages_toq(&free, true);
3248 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3249 " in pmap %p", va, pmap);
3250 return (KERN_RESOURCE_SHORTAGE);
3252 if ((newpde & PG_RW) != 0) {
3253 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
3254 vm_page_aflag_set(mt, PGA_WRITEABLE);
3259 * Increment counters.
3261 if ((newpde & PG_W) != 0)
3262 pmap->pm_stats.wired_count += L3_PAGE_SIZE / PAGE_SIZE;
3263 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
3266 * Map the superpage. (This is not a promoted mapping; there will not
3267 * be any lingering 4KB page mappings in the TLB.)
3269 pte_store(l3e, newpde);
3272 atomic_add_long(&pmap_l3e_mappings, 1);
3273 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
3274 " in pmap %p", va, pmap);
3275 return (KERN_SUCCESS);
3279 mmu_radix_enter_object(pmap_t pmap, vm_offset_t start,
3280 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
3283 struct rwlock *lock;
3286 vm_pindex_t diff, psize;
3288 VM_OBJECT_ASSERT_LOCKED(m_start->object);
3290 CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
3291 end, m_start, prot);
3294 psize = atop(end - start);
3299 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3300 va = start + ptoa(diff);
3301 if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end &&
3302 m->psind == 1 && mmu_radix_ps_enabled(pmap) &&
3303 pmap_enter_2mpage(pmap, va, m, prot, &lock))
3304 m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1];
3306 mpte = mmu_radix_enter_quick_locked(pmap, va, m, prot,
3307 mpte, &lock, &invalidate);
3308 m = TAILQ_NEXT(m, listq);
3314 pmap_invalidate_all(pmap);
3319 mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3320 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate)
3322 struct spglist free;
3326 KASSERT(!VA_IS_CLEANMAP(va) ||
3327 (m->oflags & VPO_UNMANAGED) != 0,
3328 ("mmu_radix_enter_quick_locked: managed mapping within the clean submap"));
3329 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3332 * In the case that a page table page is not
3333 * resident, we are creating it here.
3335 if (va < VM_MAXUSER_ADDRESS) {
3336 vm_pindex_t ptepindex;
3337 pml3_entry_t *ptepa;
3340 * Calculate pagetable page index
3342 ptepindex = pmap_l3e_pindex(va);
3343 if (mpte && (mpte->pindex == ptepindex)) {
3347 * Get the page directory entry
3349 ptepa = pmap_pml3e(pmap, va);
3352 * If the page table page is mapped, we just increment
3353 * the hold count, and activate it. Otherwise, we
3354 * attempt to allocate a page table page. If this
3355 * attempt fails, we don't retry. Instead, we give up.
3357 if (ptepa && (be64toh(*ptepa) & PG_V) != 0) {
3358 if (be64toh(*ptepa) & RPTE_LEAF)
3360 mpte = PHYS_TO_VM_PAGE(be64toh(*ptepa) & PG_FRAME);
3364 * Pass NULL instead of the PV list lock
3365 * pointer, because we don't intend to sleep.
3367 mpte = _pmap_allocpte(pmap, ptepindex, NULL);
3372 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3373 pte = &pte[pmap_pte_index(va)];
3376 pte = pmap_pte(pmap, va);
3378 if (be64toh(*pte)) {
3387 * Enter on the PV list if part of our managed memory.
3389 if ((m->oflags & VPO_UNMANAGED) == 0 &&
3390 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3393 if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
3395 * Although "va" is not mapped, paging-
3396 * structure caches could nonetheless have
3397 * entries that refer to the freed page table
3398 * pages. Invalidate those entries.
3401 vm_page_free_pages_toq(&free, true);
3409 * Increment counters
3411 pmap_resident_count_inc(pmap, 1);
3413 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs);
3414 if (prot & VM_PROT_EXECUTE)
3418 if ((m->oflags & VPO_UNMANAGED) == 0)
3426 mmu_radix_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
3429 struct rwlock *lock;
3435 mmu_radix_enter_quick_locked(pmap, va, m, prot, NULL, &lock,
3441 pmap_invalidate_all(pmap);
3446 mmu_radix_extract(pmap_t pmap, vm_offset_t va)
3452 l3e = pmap_pml3e(pmap, va);
3453 if (__predict_false(l3e == NULL))
3455 if (be64toh(*l3e) & RPTE_LEAF) {
3456 pa = (be64toh(*l3e) & PG_PS_FRAME) | (va & L3_PAGE_MASK);
3457 pa |= (va & L3_PAGE_MASK);
3460 * Beware of a concurrent promotion that changes the
3461 * PDE at this point! For example, vtopte() must not
3462 * be used to access the PTE because it would use the
3463 * new PDE. It is, however, safe to use the old PDE
3464 * because the page table page is preserved by the
3467 pte = pmap_l3e_to_pte(l3e, va);
3468 if (__predict_false(pte == NULL))
3471 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
3472 pa |= (va & PAGE_MASK);
3478 mmu_radix_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3480 pml3_entry_t l3e, *l3ep;
3487 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
3489 l3ep = pmap_pml3e(pmap, va);
3490 if (l3ep != NULL && (l3e = be64toh(*l3ep))) {
3491 if (l3e & RPTE_LEAF) {
3492 if ((l3e & PG_RW) || (prot & VM_PROT_WRITE) == 0)
3493 m = PHYS_TO_VM_PAGE((l3e & PG_PS_FRAME) |
3494 (va & L3_PAGE_MASK));
3496 /* Native endian PTE, do not pass to pmap functions */
3497 pte = be64toh(*pmap_l3e_to_pte(l3ep, va));
3499 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0))
3500 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3502 if (m != NULL && !vm_page_wire_mapped(m))
3510 mmu_radix_growkernel(vm_offset_t addr)
3517 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
3518 if (VM_MIN_KERNEL_ADDRESS < addr &&
3519 addr < (VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE))
3522 addr = roundup2(addr, L3_PAGE_SIZE);
3523 if (addr - 1 >= vm_map_max(kernel_map))
3524 addr = vm_map_max(kernel_map);
3525 while (kernel_vm_end < addr) {
3526 l2e = pmap_pml2e(kernel_pmap, kernel_vm_end);
3527 if ((be64toh(*l2e) & PG_V) == 0) {
3528 /* We need a new PDP entry */
3529 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
3530 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
3532 panic("pmap_growkernel: no memory to grow kernel");
3533 nkpg->pindex = kernel_vm_end >> L2_PAGE_SIZE_SHIFT;
3534 paddr = VM_PAGE_TO_PHYS(nkpg);
3535 pde_store(l2e, paddr);
3536 continue; /* try again */
3538 l3e = pmap_l2e_to_l3e(l2e, kernel_vm_end);
3539 if ((be64toh(*l3e) & PG_V) != 0) {
3540 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3541 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3542 kernel_vm_end = vm_map_max(kernel_map);
3548 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
3551 panic("pmap_growkernel: no memory to grow kernel");
3552 nkpg->pindex = pmap_l3e_pindex(kernel_vm_end);
3553 paddr = VM_PAGE_TO_PHYS(nkpg);
3554 pde_store(l3e, paddr);
3556 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3557 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3558 kernel_vm_end = vm_map_max(kernel_map);
3565 static MALLOC_DEFINE(M_RADIX_PGD, "radix_pgd", "radix page table root directory");
3566 static uma_zone_t zone_radix_pgd;
3569 radix_pgd_import(void *arg __unused, void **store, int count, int domain __unused,
3574 req = VM_ALLOC_WIRED | malloc2vm_flags(flags);
3575 for (int i = 0; i < count; i++) {
3576 vm_page_t m = vm_page_alloc_noobj_contig(req,
3577 RADIX_PGD_SIZE / PAGE_SIZE,
3578 0, (vm_paddr_t)-1, RADIX_PGD_SIZE, L1_PAGE_SIZE,
3579 VM_MEMATTR_DEFAULT);
3580 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3586 radix_pgd_release(void *arg __unused, void **store, int count)
3589 struct spglist free;
3593 page_count = RADIX_PGD_SIZE/PAGE_SIZE;
3595 for (int i = 0; i < count; i++) {
3597 * XXX selectively remove dmap and KVA entries so we don't
3600 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
3601 for (int j = page_count-1; j >= 0; j--) {
3602 vm_page_unwire_noq(&m[j]);
3603 SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss);
3605 vm_page_free_pages_toq(&free, false);
3610 mmu_radix_init(void)
3614 int error, i, pv_npg;
3616 /* XXX is this really needed for POWER? */
3617 /* L1TF, reserve page @0 unconditionally */
3618 vm_page_blacklist_add(0, bootverbose);
3620 zone_radix_pgd = uma_zcache_create("radix_pgd_cache",
3621 RADIX_PGD_SIZE, NULL, NULL,
3623 trash_init, trash_fini,
3627 radix_pgd_import, radix_pgd_release,
3628 NULL, UMA_ZONE_NOBUCKET);
3631 * Initialize the vm page array entries for the kernel pmap's
3634 PMAP_LOCK(kernel_pmap);
3635 for (i = 0; i < nkpt; i++) {
3636 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
3637 KASSERT(mpte >= vm_page_array &&
3638 mpte < &vm_page_array[vm_page_array_size],
3639 ("pmap_init: page table page is out of range size: %lu",
3640 vm_page_array_size));
3641 mpte->pindex = pmap_l3e_pindex(VM_MIN_KERNEL_ADDRESS) + i;
3642 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
3643 MPASS(PHYS_TO_VM_PAGE(mpte->phys_addr) == mpte);
3644 //pmap_insert_pt_page(kernel_pmap, mpte);
3645 mpte->ref_count = 1;
3647 PMAP_UNLOCK(kernel_pmap);
3650 CTR1(KTR_PMAP, "%s()", __func__);
3651 TAILQ_INIT(&pv_dummy.pv_list);
3654 * Are large page mappings enabled?
3656 TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
3657 if (superpages_enabled) {
3658 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
3659 ("pmap_init: can't assign to pagesizes[1]"));
3660 pagesizes[1] = L3_PAGE_SIZE;
3664 * Initialize the pv chunk list mutex.
3666 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
3669 * Initialize the pool of pv list locks.
3671 for (i = 0; i < NPV_LIST_LOCKS; i++)
3672 rw_init(&pv_list_locks[i], "pmap pv list");
3675 * Calculate the size of the pv head table for superpages.
3677 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L3_PAGE_SIZE);
3680 * Allocate memory for the pv head table for superpages.
3682 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
3684 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
3685 for (i = 0; i < pv_npg; i++)
3686 TAILQ_INIT(&pv_table[i].pv_list);
3687 TAILQ_INIT(&pv_dummy.pv_list);
3689 pmap_initialized = 1;
3690 mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
3691 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
3692 (vmem_addr_t *)&qframe);
3695 panic("qframe allocation failed");
3696 asid_arena = vmem_create("ASID", isa3_base_pid + 1, (1<<isa3_pid_bits),
3701 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3703 struct rwlock *lock;
3705 struct md_page *pvh;
3706 pt_entry_t *pte, mask;
3708 int md_gen, pvh_gen;
3712 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3715 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3717 if (!PMAP_TRYLOCK(pmap)) {
3718 md_gen = m->md.pv_gen;
3722 if (md_gen != m->md.pv_gen) {
3727 pte = pmap_pte(pmap, pv->pv_va);
3730 mask |= PG_RW | PG_M;
3732 mask |= PG_V | PG_A;
3733 rv = (be64toh(*pte) & mask) == mask;
3738 if ((m->flags & PG_FICTITIOUS) == 0) {
3739 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3740 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
3742 if (!PMAP_TRYLOCK(pmap)) {
3743 md_gen = m->md.pv_gen;
3744 pvh_gen = pvh->pv_gen;
3748 if (md_gen != m->md.pv_gen ||
3749 pvh_gen != pvh->pv_gen) {
3754 pte = pmap_pml3e(pmap, pv->pv_va);
3757 mask |= PG_RW | PG_M;
3759 mask |= PG_V | PG_A;
3760 rv = (be64toh(*pte) & mask) == mask;
3774 * Return whether or not the specified physical page was modified
3775 * in any physical maps.
3778 mmu_radix_is_modified(vm_page_t m)
3781 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3782 ("pmap_is_modified: page %p is not managed", m));
3784 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3786 * If the page is not busied then this check is racy.
3788 if (!pmap_page_is_write_mapped(m))
3790 return (pmap_page_test_mappings(m, FALSE, TRUE));
3794 mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3800 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
3803 l3e = pmap_pml3e(pmap, addr);
3804 if (l3e != NULL && (be64toh(*l3e) & (RPTE_LEAF | PG_V)) == PG_V) {
3805 pte = pmap_l3e_to_pte(l3e, addr);
3806 rv = (be64toh(*pte) & PG_V) == 0;
3813 mmu_radix_is_referenced(vm_page_t m)
3815 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3816 ("pmap_is_referenced: page %p is not managed", m));
3817 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3818 return (pmap_page_test_mappings(m, TRUE, FALSE));
3822 * pmap_ts_referenced:
3824 * Return a count of reference bits for a page, clearing those bits.
3825 * It is not necessary for every reference bit to be cleared, but it
3826 * is necessary that 0 only be returned when there are truly no
3827 * reference bits set.
3829 * As an optimization, update the page's dirty field if a modified bit is
3830 * found while counting reference bits. This opportunistic update can be
3831 * performed at low cost and can eliminate the need for some future calls
3832 * to pmap_is_modified(). However, since this function stops after
3833 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3834 * dirty pages. Those dirty pages will only be detected by a future call
3835 * to pmap_is_modified().
3837 * A DI block is not needed within this function, because
3838 * invalidations are performed before the PV list lock is
3842 mmu_radix_ts_referenced(vm_page_t m)
3844 struct md_page *pvh;
3847 struct rwlock *lock;
3848 pml3_entry_t oldl3e, *l3e;
3851 int cleared, md_gen, not_cleared, pvh_gen;
3852 struct spglist free;
3854 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3855 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3856 ("pmap_ts_referenced: page %p is not managed", m));
3859 pa = VM_PAGE_TO_PHYS(m);
3860 lock = PHYS_TO_PV_LIST_LOCK(pa);
3861 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
3865 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
3866 goto small_mappings;
3872 if (!PMAP_TRYLOCK(pmap)) {
3873 pvh_gen = pvh->pv_gen;
3877 if (pvh_gen != pvh->pv_gen) {
3882 l3e = pmap_pml3e(pmap, pv->pv_va);
3883 oldl3e = be64toh(*l3e);
3884 if ((oldl3e & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
3886 * Although "oldpde" is mapping a 2MB page, because
3887 * this function is called at a 4KB page granularity,
3888 * we only update the 4KB page under test.
3892 if ((oldl3e & PG_A) != 0) {
3894 * Since this reference bit is shared by 512 4KB
3895 * pages, it should not be cleared every time it is
3896 * tested. Apply a simple "hash" function on the
3897 * physical page number, the virtual superpage number,
3898 * and the pmap address to select one 4KB page out of
3899 * the 512 on which testing the reference bit will
3900 * result in clearing that reference bit. This
3901 * function is designed to avoid the selection of the
3902 * same 4KB page for every 2MB page mapping.
3904 * On demotion, a mapping that hasn't been referenced
3905 * is simply destroyed. To avoid the possibility of a
3906 * subsequent page fault on a demoted wired mapping,
3907 * always leave its reference bit set. Moreover,
3908 * since the superpage is wired, the current state of
3909 * its reference bit won't affect page replacement.
3911 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L3_PAGE_SIZE_SHIFT) ^
3912 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
3913 (oldl3e & PG_W) == 0) {
3914 atomic_clear_long(l3e, htobe64(PG_A));
3915 pmap_invalidate_page(pmap, pv->pv_va);
3917 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
3918 ("inconsistent pv lock %p %p for page %p",
3919 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
3924 /* Rotate the PV list if it has more than one entry. */
3925 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3926 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
3927 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
3930 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
3932 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
3934 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
3941 if (!PMAP_TRYLOCK(pmap)) {
3942 pvh_gen = pvh->pv_gen;
3943 md_gen = m->md.pv_gen;
3947 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
3952 l3e = pmap_pml3e(pmap, pv->pv_va);
3953 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0,
3954 ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
3956 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
3957 if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW))
3959 if ((be64toh(*pte) & PG_A) != 0) {
3960 atomic_clear_long(pte, htobe64(PG_A));
3961 pmap_invalidate_page(pmap, pv->pv_va);
3965 /* Rotate the PV list if it has more than one entry. */
3966 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3967 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
3968 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3971 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
3972 not_cleared < PMAP_TS_REFERENCED_MAX);
3975 vm_page_free_pages_toq(&free, true);
3976 return (cleared + not_cleared);
3980 mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start,
3981 vm_paddr_t end, int prot __unused)
3984 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
3986 return (PHYS_TO_DMAP(start));
3990 mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
3991 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3994 vm_paddr_t pa, ptepa;
3998 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
3999 object, pindex, size);
4000 VM_OBJECT_ASSERT_WLOCKED(object);
4001 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
4002 ("pmap_object_init_pt: non-device object"));
4003 /* NB: size can be logically ored with addr here */
4004 if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) {
4005 if (!mmu_radix_ps_enabled(pmap))
4007 if (!vm_object_populate(object, pindex, pindex + atop(size)))
4009 p = vm_page_lookup(object, pindex);
4010 KASSERT(p->valid == VM_PAGE_BITS_ALL,
4011 ("pmap_object_init_pt: invalid page %p", p));
4012 ma = p->md.mdpg_cache_attrs;
4015 * Abort the mapping if the first page is not physically
4016 * aligned to a 2MB page boundary.
4018 ptepa = VM_PAGE_TO_PHYS(p);
4019 if (ptepa & L3_PAGE_MASK)
4023 * Skip the first page. Abort the mapping if the rest of
4024 * the pages are not physically contiguous or have differing
4025 * memory attributes.
4027 p = TAILQ_NEXT(p, listq);
4028 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
4030 KASSERT(p->valid == VM_PAGE_BITS_ALL,
4031 ("pmap_object_init_pt: invalid page %p", p));
4032 if (pa != VM_PAGE_TO_PHYS(p) ||
4033 ma != p->md.mdpg_cache_attrs)
4035 p = TAILQ_NEXT(p, listq);
4039 for (pa = ptepa | pmap_cache_bits(ma);
4040 pa < ptepa + size; pa += L3_PAGE_SIZE) {
4041 pdpg = pmap_allocl3e(pmap, addr, NULL);
4044 * The creation of mappings below is only an
4045 * optimization. If a page directory page
4046 * cannot be allocated without blocking,
4047 * continue on to the next mapping rather than
4050 addr += L3_PAGE_SIZE;
4053 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
4054 l3e = &l3e[pmap_pml3e_index(addr)];
4055 if ((be64toh(*l3e) & PG_V) == 0) {
4056 pa |= PG_M | PG_A | PG_RW;
4058 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
4059 atomic_add_long(&pmap_l3e_mappings, 1);
4061 /* Continue on if the PDE is already valid. */
4063 KASSERT(pdpg->ref_count > 0,
4064 ("pmap_object_init_pt: missing reference "
4065 "to page directory page, va: 0x%lx", addr));
4067 addr += L3_PAGE_SIZE;
4075 mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m)
4077 struct md_page *pvh;
4078 struct rwlock *lock;
4083 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4084 ("pmap_page_exists_quick: page %p is not managed", m));
4085 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
4087 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4089 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4090 if (PV_PMAP(pv) == pmap) {
4098 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4099 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4100 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4101 if (PV_PMAP(pv) == pmap) {
4115 mmu_radix_page_init(vm_page_t m)
4118 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4119 TAILQ_INIT(&m->md.pv_list);
4120 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
4124 mmu_radix_page_wired_mappings(vm_page_t m)
4126 struct rwlock *lock;
4127 struct md_page *pvh;
4131 int count, md_gen, pvh_gen;
4133 if ((m->oflags & VPO_UNMANAGED) != 0)
4135 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4136 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4140 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4142 if (!PMAP_TRYLOCK(pmap)) {
4143 md_gen = m->md.pv_gen;
4147 if (md_gen != m->md.pv_gen) {
4152 pte = pmap_pte(pmap, pv->pv_va);
4153 if ((be64toh(*pte) & PG_W) != 0)
4157 if ((m->flags & PG_FICTITIOUS) == 0) {
4158 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4159 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4161 if (!PMAP_TRYLOCK(pmap)) {
4162 md_gen = m->md.pv_gen;
4163 pvh_gen = pvh->pv_gen;
4167 if (md_gen != m->md.pv_gen ||
4168 pvh_gen != pvh->pv_gen) {
4173 pte = pmap_pml3e(pmap, pv->pv_va);
4174 if ((be64toh(*pte) & PG_W) != 0)
4184 mmu_radix_update_proctab(int pid, pml1_entry_t l1pa)
4186 isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE | l1pa | RADIX_PGD_INDEX_SHIFT);
4190 mmu_radix_pinit(pmap_t pmap)
4195 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4198 * allocate the page directory page
4200 pmap->pm_pml1 = uma_zalloc(zone_radix_pgd, M_WAITOK);
4202 for (int j = 0; j < RADIX_PGD_SIZE_SHIFT; j++)
4203 pagezero((vm_offset_t)pmap->pm_pml1 + j * PAGE_SIZE);
4204 vm_radix_init(&pmap->pm_radix);
4205 TAILQ_INIT(&pmap->pm_pvchunk);
4206 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4207 pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4208 vmem_alloc(asid_arena, 1, M_FIRSTFIT|M_WAITOK, &pid);
4211 l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1);
4212 mmu_radix_update_proctab(pid, l1pa);
4213 __asm __volatile("ptesync;isync" : : : "memory");
4219 * This routine is called if the desired page table page does not exist.
4221 * If page table page allocation fails, this routine may sleep before
4222 * returning NULL. It sleeps only if a lock pointer was given.
4224 * Note: If a page allocation fails at page table level two or three,
4225 * one or two pages may be held during the wait, only to be released
4226 * afterwards. This conservative approach is easily argued to avoid
4230 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
4232 vm_page_t m, pdppg, pdpg;
4234 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4237 * Allocate a page table page.
4239 if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
4240 if (lockp != NULL) {
4241 RELEASE_PV_LIST_LOCK(lockp);
4247 * Indicate the need to retry. While waiting, the page table
4248 * page may have been allocated.
4252 m->pindex = ptepindex;
4255 * Map the pagetable page into the process address space, if
4256 * it isn't already there.
4259 if (ptepindex >= (NUPDE + NUPDPE)) {
4261 vm_pindex_t pml1index;
4263 /* Wire up a new PDPE page */
4264 pml1index = ptepindex - (NUPDE + NUPDPE);
4265 l1e = &pmap->pm_pml1[pml1index];
4266 KASSERT((be64toh(*l1e) & PG_V) == 0,
4267 ("%s: L1 entry %#lx is valid", __func__, *l1e));
4268 pde_store(l1e, VM_PAGE_TO_PHYS(m));
4269 } else if (ptepindex >= NUPDE) {
4270 vm_pindex_t pml1index;
4271 vm_pindex_t pdpindex;
4275 /* Wire up a new l2e page */
4276 pdpindex = ptepindex - NUPDE;
4277 pml1index = pdpindex >> RPTE_SHIFT;
4279 l1e = &pmap->pm_pml1[pml1index];
4280 if ((be64toh(*l1e) & PG_V) == 0) {
4281 /* Have to allocate a new pdp, recurse */
4282 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml1index,
4284 vm_page_unwire_noq(m);
4285 vm_page_free_zero(m);
4289 /* Add reference to l2e page */
4290 pdppg = PHYS_TO_VM_PAGE(be64toh(*l1e) & PG_FRAME);
4293 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4295 /* Now find the pdp page */
4296 l2e = &l2e[pdpindex & RPTE_MASK];
4297 KASSERT((be64toh(*l2e) & PG_V) == 0,
4298 ("%s: L2 entry %#lx is valid", __func__, *l2e));
4299 pde_store(l2e, VM_PAGE_TO_PHYS(m));
4301 vm_pindex_t pml1index;
4302 vm_pindex_t pdpindex;
4307 /* Wire up a new PTE page */
4308 pdpindex = ptepindex >> RPTE_SHIFT;
4309 pml1index = pdpindex >> RPTE_SHIFT;
4311 /* First, find the pdp and check that its valid. */
4312 l1e = &pmap->pm_pml1[pml1index];
4313 if ((be64toh(*l1e) & PG_V) == 0) {
4314 /* Have to allocate a new pd, recurse */
4315 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4317 vm_page_unwire_noq(m);
4318 vm_page_free_zero(m);
4321 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4322 l2e = &l2e[pdpindex & RPTE_MASK];
4324 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4325 l2e = &l2e[pdpindex & RPTE_MASK];
4326 if ((be64toh(*l2e) & PG_V) == 0) {
4327 /* Have to allocate a new pd, recurse */
4328 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4330 vm_page_unwire_noq(m);
4331 vm_page_free_zero(m);
4335 /* Add reference to the pd page */
4336 pdpg = PHYS_TO_VM_PAGE(be64toh(*l2e) & PG_FRAME);
4340 l3e = (pml3_entry_t *)PHYS_TO_DMAP(be64toh(*l2e) & PG_FRAME);
4342 /* Now we know where the page directory page is */
4343 l3e = &l3e[ptepindex & RPTE_MASK];
4344 KASSERT((be64toh(*l3e) & PG_V) == 0,
4345 ("%s: L3 entry %#lx is valid", __func__, *l3e));
4346 pde_store(l3e, VM_PAGE_TO_PHYS(m));
4349 pmap_resident_count_inc(pmap, 1);
4353 pmap_allocl3e(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4355 vm_pindex_t pdpindex, ptepindex;
4360 pdpe = pmap_pml2e(pmap, va);
4361 if (pdpe != NULL && (be64toh(*pdpe) & PG_V) != 0) {
4362 /* Add a reference to the pd page. */
4363 pdpg = PHYS_TO_VM_PAGE(be64toh(*pdpe) & PG_FRAME);
4366 /* Allocate a pd page. */
4367 ptepindex = pmap_l3e_pindex(va);
4368 pdpindex = ptepindex >> RPTE_SHIFT;
4369 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
4370 if (pdpg == NULL && lockp != NULL)
4377 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4379 vm_pindex_t ptepindex;
4384 * Calculate pagetable page index
4386 ptepindex = pmap_l3e_pindex(va);
4389 * Get the page directory entry
4391 pd = pmap_pml3e(pmap, va);
4394 * This supports switching from a 2MB page to a
4397 if (pd != NULL && (be64toh(*pd) & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V)) {
4398 if (!pmap_demote_l3e_locked(pmap, pd, va, lockp)) {
4400 * Invalidation of the 2MB page mapping may have caused
4401 * the deallocation of the underlying PD page.
4408 * If the page table page is mapped, we just increment the
4409 * hold count, and activate it.
4411 if (pd != NULL && (be64toh(*pd) & PG_V) != 0) {
4412 m = PHYS_TO_VM_PAGE(be64toh(*pd) & PG_FRAME);
4416 * Here if the pte page isn't mapped, or if it has been
4419 m = _pmap_allocpte(pmap, ptepindex, lockp);
4420 if (m == NULL && lockp != NULL)
4427 mmu_radix_pinit0(pmap_t pmap)
4430 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4431 PMAP_LOCK_INIT(pmap);
4432 pmap->pm_pml1 = kernel_pmap->pm_pml1;
4433 pmap->pm_pid = kernel_pmap->pm_pid;
4435 vm_radix_init(&pmap->pm_radix);
4436 TAILQ_INIT(&pmap->pm_pvchunk);
4437 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4438 kernel_pmap->pm_flags =
4439 pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4442 * pmap_protect_l3e: do the things to protect a 2mpage in a process
4445 pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot)
4447 pt_entry_t newpde, oldpde;
4448 vm_offset_t eva, va;
4450 boolean_t anychanged;
4452 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4453 KASSERT((sva & L3_PAGE_MASK) == 0,
4454 ("pmap_protect_l3e: sva is not 2mpage aligned"));
4457 oldpde = newpde = be64toh(*l3e);
4458 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
4459 (PG_MANAGED | PG_M | PG_RW)) {
4460 eva = sva + L3_PAGE_SIZE;
4461 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
4462 va < eva; va += PAGE_SIZE, m++)
4465 if ((prot & VM_PROT_WRITE) == 0) {
4466 newpde &= ~(PG_RW | PG_M);
4467 newpde |= RPTE_EAA_R;
4469 if (prot & VM_PROT_EXECUTE)
4471 if (newpde != oldpde) {
4473 * As an optimization to future operations on this PDE, clear
4474 * PG_PROMOTED. The impending invalidation will remove any
4475 * lingering 4KB page mappings from the TLB.
4477 if (!atomic_cmpset_long(l3e, htobe64(oldpde), htobe64(newpde & ~PG_PROMOTED)))
4481 return (anychanged);
4485 mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
4488 vm_offset_t va_next;
4491 pml3_entry_t ptpaddr, *l3e;
4493 boolean_t anychanged;
4495 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva,
4498 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4499 if (prot == VM_PROT_NONE) {
4500 mmu_radix_remove(pmap, sva, eva);
4504 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
4505 (VM_PROT_WRITE|VM_PROT_EXECUTE))
4509 if (VERBOSE_PROTECT || pmap_logging)
4510 printf("pmap_protect(%p, %#lx, %#lx, %x) - asid: %lu\n",
4511 pmap, sva, eva, prot, pmap->pm_pid);
4516 for (; sva < eva; sva = va_next) {
4517 l1e = pmap_pml1e(pmap, sva);
4518 if ((be64toh(*l1e) & PG_V) == 0) {
4519 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
4525 l2e = pmap_l1e_to_l2e(l1e, sva);
4526 if ((be64toh(*l2e) & PG_V) == 0) {
4527 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
4533 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
4537 l3e = pmap_l2e_to_l3e(l2e, sva);
4538 ptpaddr = be64toh(*l3e);
4541 * Weed out invalid mappings.
4547 * Check for large page.
4549 if ((ptpaddr & RPTE_LEAF) != 0) {
4551 * Are we protecting the entire large page? If not,
4552 * demote the mapping and fall through.
4554 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
4555 if (pmap_protect_l3e(pmap, l3e, sva, prot))
4558 } else if (!pmap_demote_l3e(pmap, l3e, sva)) {
4560 * The large page mapping was destroyed.
4569 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
4571 pt_entry_t obits, pbits;
4575 MPASS(pte == pmap_pte(pmap, sva));
4576 obits = pbits = be64toh(*pte);
4577 if ((pbits & PG_V) == 0)
4580 if ((prot & VM_PROT_WRITE) == 0) {
4581 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
4582 (PG_MANAGED | PG_M | PG_RW)) {
4583 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
4586 pbits &= ~(PG_RW | PG_M);
4587 pbits |= RPTE_EAA_R;
4589 if (prot & VM_PROT_EXECUTE)
4592 if (pbits != obits) {
4593 if (!atomic_cmpset_long(pte, htobe64(obits), htobe64(pbits)))
4595 if (obits & (PG_A|PG_M)) {
4598 if (VERBOSE_PROTECT || pmap_logging)
4599 printf("%#lx %#lx -> %#lx\n",
4607 pmap_invalidate_all(pmap);
4612 mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count)
4615 CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count);
4616 pt_entry_t oldpte, pa, *pte;
4618 uint64_t cache_bits, attr_bits;
4622 attr_bits = RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
4625 while (va < sva + PAGE_SIZE * count) {
4626 if (__predict_false((va & L3_PAGE_MASK) == 0))
4628 MPASS(pte == pmap_pte(kernel_pmap, va));
4631 * XXX there has to be a more efficient way than traversing
4632 * the page table every time - but go for correctness for
4637 cache_bits = pmap_cache_bits(m->md.mdpg_cache_attrs);
4638 pa = VM_PAGE_TO_PHYS(m) | cache_bits | attr_bits;
4639 if (be64toh(*pte) != pa) {
4640 oldpte |= be64toh(*pte);
4646 if (__predict_false((oldpte & RPTE_VALID) != 0))
4647 pmap_invalidate_range(kernel_pmap, sva, sva + count *
4654 mmu_radix_qremove(vm_offset_t sva, int count)
4659 CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, sva, count);
4660 KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode or dmap va %lx", sva));
4664 while (va < sva + PAGE_SIZE * count) {
4665 if (__predict_false((va & L3_PAGE_MASK) == 0))
4671 pmap_invalidate_range(kernel_pmap, sva, va);
4674 /***************************************************
4675 * Page table page management routines.....
4676 ***************************************************/
4678 * Schedule the specified unused page table page to be freed. Specifically,
4679 * add the page to the specified list of pages that will be released to the
4680 * physical memory manager after the TLB has been updated.
4682 static __inline void
4683 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
4684 boolean_t set_PG_ZERO)
4688 m->flags |= PG_ZERO;
4690 m->flags &= ~PG_ZERO;
4691 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
4695 * Inserts the specified page table page into the specified pmap's collection
4696 * of idle page table pages. Each of a pmap's page table pages is responsible
4697 * for mapping a distinct range of virtual addresses. The pmap's collection is
4698 * ordered by this virtual address range.
4701 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
4704 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4705 return (vm_radix_insert(&pmap->pm_radix, mpte));
4709 * Removes the page table page mapping the specified virtual address from the
4710 * specified pmap's collection of idle page table pages, and returns it.
4711 * Otherwise, returns NULL if there is no page table page corresponding to the
4712 * specified virtual address.
4714 static __inline vm_page_t
4715 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
4718 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4719 return (vm_radix_remove(&pmap->pm_radix, pmap_l3e_pindex(va)));
4723 * Decrements a page table page's wire count, which is used to record the
4724 * number of valid page table entries within the page. If the wire count
4725 * drops to zero, then the page table page is unmapped. Returns TRUE if the
4726 * page table page was unmapped and FALSE otherwise.
4728 static inline boolean_t
4729 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4733 if (m->ref_count == 0) {
4734 _pmap_unwire_ptp(pmap, va, m, free);
4741 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4744 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4746 * unmap the page table page
4748 if (m->pindex >= (NUPDE + NUPDPE)) {
4751 pml1 = pmap_pml1e(pmap, va);
4753 } else if (m->pindex >= NUPDE) {
4756 l2e = pmap_pml2e(pmap, va);
4761 l3e = pmap_pml3e(pmap, va);
4764 pmap_resident_count_dec(pmap, 1);
4765 if (m->pindex < NUPDE) {
4766 /* We just released a PT, unhold the matching PD */
4769 pdpg = PHYS_TO_VM_PAGE(be64toh(*pmap_pml2e(pmap, va)) & PG_FRAME);
4770 pmap_unwire_ptp(pmap, va, pdpg, free);
4772 if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
4773 /* We just released a PD, unhold the matching PDP */
4776 pdppg = PHYS_TO_VM_PAGE(be64toh(*pmap_pml1e(pmap, va)) & PG_FRAME);
4777 pmap_unwire_ptp(pmap, va, pdppg, free);
4781 * Put page on a list so that it is released after
4782 * *ALL* TLB shootdown is done
4784 pmap_add_delayed_free_list(m, free, TRUE);
4788 * After removing a page table entry, this routine is used to
4789 * conditionally free the page, and manage the hold/wire counts.
4792 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pml3_entry_t ptepde,
4793 struct spglist *free)
4797 if (va >= VM_MAXUSER_ADDRESS)
4799 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
4800 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
4801 return (pmap_unwire_ptp(pmap, va, mpte, free));
4805 mmu_radix_release(pmap_t pmap)
4808 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4809 KASSERT(pmap->pm_stats.resident_count == 0,
4810 ("pmap_release: pmap resident count %ld != 0",
4811 pmap->pm_stats.resident_count));
4812 KASSERT(vm_radix_is_empty(&pmap->pm_radix),
4813 ("pmap_release: pmap has reserved page table page(s)"));
4815 pmap_invalidate_all(pmap);
4816 isa3_proctab[pmap->pm_pid].proctab0 = 0;
4817 uma_zfree(zone_radix_pgd, pmap->pm_pml1);
4818 vmem_free(asid_arena, pmap->pm_pid, 1);
4822 * Create the PV entry for a 2MB page mapping. Always returns true unless the
4823 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
4824 * false if the PV entry cannot be allocated without resorting to reclamation.
4827 pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t pde, u_int flags,
4828 struct rwlock **lockp)
4830 struct md_page *pvh;
4834 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4835 /* Pass NULL instead of the lock pointer to disable reclamation. */
4836 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
4837 NULL : lockp)) == NULL)
4840 pa = pde & PG_PS_FRAME;
4841 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
4842 pvh = pa_to_pvh(pa);
4843 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
4849 * Fills a page table page with mappings to consecutive physical pages.
4852 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
4856 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
4857 *pte = htobe64(newpte);
4858 newpte += PAGE_SIZE;
4863 pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va)
4865 struct rwlock *lock;
4869 rv = pmap_demote_l3e_locked(pmap, pde, va, &lock);
4876 pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
4877 struct rwlock **lockp)
4879 pml3_entry_t oldpde;
4880 pt_entry_t *firstpte;
4883 struct spglist free;
4886 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4887 oldpde = be64toh(*l3e);
4888 KASSERT((oldpde & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
4889 ("pmap_demote_l3e: oldpde is missing RPTE_LEAF and/or PG_V %lx",
4891 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
4893 KASSERT((oldpde & PG_W) == 0,
4894 ("pmap_demote_l3e: page table page for a wired mapping"
4898 * Invalidate the 2MB page mapping and return "failure" if the
4899 * mapping was never accessed or the allocation of the new
4900 * page table page fails. If the 2MB page mapping belongs to
4901 * the direct map region of the kernel's address space, then
4902 * the page allocation request specifies the highest possible
4903 * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is
4904 * normal. Page table pages are preallocated for every other
4905 * part of the kernel address space, so the direct map region
4906 * is the only part of the kernel address space that must be
4909 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc_noobj(
4910 (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS ?
4911 VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED)) == NULL) {
4913 sva = trunc_2mpage(va);
4914 pmap_remove_l3e(pmap, l3e, sva, &free, lockp);
4915 pmap_invalidate_l3e_page(pmap, sva, oldpde);
4916 vm_page_free_pages_toq(&free, true);
4917 CTR2(KTR_PMAP, "pmap_demote_l3e: failure for va %#lx"
4918 " in pmap %p", va, pmap);
4921 mpte->pindex = pmap_l3e_pindex(va);
4922 if (va < VM_MAXUSER_ADDRESS)
4923 pmap_resident_count_inc(pmap, 1);
4925 mptepa = VM_PAGE_TO_PHYS(mpte);
4926 firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
4927 KASSERT((oldpde & PG_A) != 0,
4928 ("pmap_demote_l3e: oldpde is missing PG_A"));
4929 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
4930 ("pmap_demote_l3e: oldpde is missing PG_M"));
4933 * If the page table page is new, initialize it.
4935 if (mpte->ref_count == 1) {
4936 mpte->ref_count = NPTEPG;
4937 pmap_fill_ptp(firstpte, oldpde);
4940 KASSERT((be64toh(*firstpte) & PG_FRAME) == (oldpde & PG_FRAME),
4941 ("pmap_demote_l3e: firstpte and newpte map different physical"
4945 * If the mapping has changed attributes, update the page table
4948 if ((be64toh(*firstpte) & PG_PTE_PROMOTE) != (oldpde & PG_PTE_PROMOTE))
4949 pmap_fill_ptp(firstpte, oldpde);
4952 * The spare PV entries must be reserved prior to demoting the
4953 * mapping, that is, prior to changing the PDE. Otherwise, the state
4954 * of the PDE and the PV lists will be inconsistent, which can result
4955 * in reclaim_pv_chunk() attempting to remove a PV entry from the
4956 * wrong PV list and pmap_pv_demote_l3e() failing to find the expected
4957 * PV entry for the 2MB page mapping that is being demoted.
4959 if ((oldpde & PG_MANAGED) != 0)
4960 reserve_pv_entries(pmap, NPTEPG - 1, lockp);
4963 * Demote the mapping. This pmap is locked. The old PDE has
4964 * PG_A set. If the old PDE has PG_RW set, it also has PG_M
4965 * set. Thus, there is no danger of a race with another
4966 * processor changing the setting of PG_A and/or PG_M between
4967 * the read above and the store below.
4969 pde_store(l3e, mptepa);
4970 pmap_invalidate_l3e_page(pmap, trunc_2mpage(va), oldpde);
4972 * Demote the PV entry.
4974 if ((oldpde & PG_MANAGED) != 0)
4975 pmap_pv_demote_l3e(pmap, va, oldpde & PG_PS_FRAME, lockp);
4977 atomic_add_long(&pmap_l3e_demotions, 1);
4978 CTR2(KTR_PMAP, "pmap_demote_l3e: success for va %#lx"
4979 " in pmap %p", va, pmap);
4984 * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
4987 pmap_remove_kernel_l3e(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va)
4992 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
4993 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4994 mpte = pmap_remove_pt_page(pmap, va);
4996 panic("pmap_remove_kernel_pde: Missing pt page.");
4998 mptepa = VM_PAGE_TO_PHYS(mpte);
5001 * Initialize the page table page.
5003 pagezero(PHYS_TO_DMAP(mptepa));
5006 * Demote the mapping.
5008 pde_store(l3e, mptepa);
5013 * pmap_remove_l3e: do the things to unmap a superpage in a process
5016 pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
5017 struct spglist *free, struct rwlock **lockp)
5019 struct md_page *pvh;
5020 pml3_entry_t oldpde;
5021 vm_offset_t eva, va;
5024 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5025 KASSERT((sva & L3_PAGE_MASK) == 0,
5026 ("pmap_remove_l3e: sva is not 2mpage aligned"));
5027 oldpde = be64toh(pte_load_clear(pdq));
5029 pmap->pm_stats.wired_count -= (L3_PAGE_SIZE / PAGE_SIZE);
5030 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
5031 if (oldpde & PG_MANAGED) {
5032 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
5033 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
5034 pmap_pvh_free(pvh, pmap, sva);
5035 eva = sva + L3_PAGE_SIZE;
5036 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
5037 va < eva; va += PAGE_SIZE, m++) {
5038 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
5041 vm_page_aflag_set(m, PGA_REFERENCED);
5042 if (TAILQ_EMPTY(&m->md.pv_list) &&
5043 TAILQ_EMPTY(&pvh->pv_list))
5044 vm_page_aflag_clear(m, PGA_WRITEABLE);
5047 if (pmap == kernel_pmap) {
5048 pmap_remove_kernel_l3e(pmap, pdq, sva);
5050 mpte = pmap_remove_pt_page(pmap, sva);
5052 pmap_resident_count_dec(pmap, 1);
5053 KASSERT(mpte->ref_count == NPTEPG,
5054 ("pmap_remove_l3e: pte page wire count error"));
5055 mpte->ref_count = 0;
5056 pmap_add_delayed_free_list(mpte, free, FALSE);
5059 return (pmap_unuse_pt(pmap, sva, be64toh(*pmap_pml2e(pmap, sva)), free));
5063 * pmap_remove_pte: do the things to unmap a page in a process
5066 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
5067 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
5069 struct md_page *pvh;
5073 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5074 oldpte = be64toh(pte_load_clear(ptq));
5075 if (oldpte & RPTE_WIRED)
5076 pmap->pm_stats.wired_count -= 1;
5077 pmap_resident_count_dec(pmap, 1);
5078 if (oldpte & RPTE_MANAGED) {
5079 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
5080 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5083 vm_page_aflag_set(m, PGA_REFERENCED);
5084 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5085 pmap_pvh_free(&m->md, pmap, va);
5086 if (TAILQ_EMPTY(&m->md.pv_list) &&
5087 (m->flags & PG_FICTITIOUS) == 0) {
5088 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5089 if (TAILQ_EMPTY(&pvh->pv_list))
5090 vm_page_aflag_clear(m, PGA_WRITEABLE);
5093 return (pmap_unuse_pt(pmap, va, ptepde, free));
5097 * Remove a single page from a process address space
5100 pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *l3e,
5101 struct spglist *free)
5103 struct rwlock *lock;
5105 bool invalidate_all;
5107 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5108 if ((be64toh(*l3e) & RPTE_VALID) == 0) {
5111 pte = pmap_l3e_to_pte(l3e, va);
5112 if ((be64toh(*pte) & RPTE_VALID) == 0) {
5117 invalidate_all = pmap_remove_pte(pmap, pte, va, be64toh(*l3e), free, &lock);
5120 if (!invalidate_all)
5121 pmap_invalidate_page(pmap, va);
5122 return (invalidate_all);
5126 * Removes the specified range of addresses from the page table page.
5129 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
5130 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp)
5136 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5139 for (pte = pmap_l3e_to_pte(l3e, sva); sva != eva; pte++,
5141 MPASS(pte == pmap_pte(pmap, sva));
5151 if (pmap_remove_pte(pmap, pte, sva, be64toh(*l3e), free, lockp)) {
5158 pmap_invalidate_all(pmap);
5160 pmap_invalidate_range(pmap, va, sva);
5165 mmu_radix_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5167 struct rwlock *lock;
5168 vm_offset_t va_next;
5171 pml3_entry_t ptpaddr, *l3e;
5172 struct spglist free;
5175 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5178 * Perform an unsynchronized read. This is, however, safe.
5180 if (pmap->pm_stats.resident_count == 0)
5186 /* XXX something fishy here */
5187 sva = (sva + PAGE_MASK) & ~PAGE_MASK;
5188 eva = (eva + PAGE_MASK) & ~PAGE_MASK;
5193 * special handling of removing one page. a very
5194 * common operation and easy to short circuit some
5197 if (sva + PAGE_SIZE == eva) {
5198 l3e = pmap_pml3e(pmap, sva);
5199 if (l3e && (be64toh(*l3e) & RPTE_LEAF) == 0) {
5200 anyvalid = pmap_remove_page(pmap, sva, l3e, &free);
5206 for (; sva < eva; sva = va_next) {
5207 if (pmap->pm_stats.resident_count == 0)
5209 l1e = pmap_pml1e(pmap, sva);
5210 if (l1e == NULL || (be64toh(*l1e) & PG_V) == 0) {
5211 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5217 l2e = pmap_l1e_to_l2e(l1e, sva);
5218 if (l2e == NULL || (be64toh(*l2e) & PG_V) == 0) {
5219 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5226 * Calculate index for next page table.
5228 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5232 l3e = pmap_l2e_to_l3e(l2e, sva);
5233 ptpaddr = be64toh(*l3e);
5236 * Weed out invalid mappings.
5242 * Check for large page.
5244 if ((ptpaddr & RPTE_LEAF) != 0) {
5246 * Are we removing the entire large page? If not,
5247 * demote the mapping and fall through.
5249 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5250 pmap_remove_l3e(pmap, l3e, sva, &free, &lock);
5253 } else if (!pmap_demote_l3e_locked(pmap, l3e, sva,
5255 /* The large page mapping was destroyed. */
5258 ptpaddr = be64toh(*l3e);
5262 * Limit our scan to either the end of the va represented
5263 * by the current page table page, or to the end of the
5264 * range being removed.
5269 if (pmap_remove_ptes(pmap, sva, va_next, l3e, &free, &lock))
5276 pmap_invalidate_all(pmap);
5278 vm_page_free_pages_toq(&free, true);
5282 mmu_radix_remove_all(vm_page_t m)
5284 struct md_page *pvh;
5287 struct rwlock *lock;
5288 pt_entry_t *pte, tpte;
5291 struct spglist free;
5292 int pvh_gen, md_gen;
5294 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5295 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5296 ("pmap_remove_all: page %p is not managed", m));
5298 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5299 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5300 pa_to_pvh(VM_PAGE_TO_PHYS(m));
5303 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
5305 if (!PMAP_TRYLOCK(pmap)) {
5306 pvh_gen = pvh->pv_gen;
5310 if (pvh_gen != pvh->pv_gen) {
5317 l3e = pmap_pml3e(pmap, va);
5318 (void)pmap_demote_l3e_locked(pmap, l3e, va, &lock);
5321 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
5323 if (!PMAP_TRYLOCK(pmap)) {
5324 pvh_gen = pvh->pv_gen;
5325 md_gen = m->md.pv_gen;
5329 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5335 pmap_resident_count_dec(pmap, 1);
5336 l3e = pmap_pml3e(pmap, pv->pv_va);
5337 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, ("pmap_remove_all: found"
5338 " a 2mpage in page %p's pv list", m));
5339 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5340 tpte = be64toh(pte_load_clear(pte));
5342 pmap->pm_stats.wired_count--;
5344 vm_page_aflag_set(m, PGA_REFERENCED);
5347 * Update the vm_page_t clean and reference bits.
5349 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5351 pmap_unuse_pt(pmap, pv->pv_va, be64toh(*l3e), &free);
5352 pmap_invalidate_page(pmap, pv->pv_va);
5353 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5355 free_pv_entry(pmap, pv);
5358 vm_page_aflag_clear(m, PGA_WRITEABLE);
5360 vm_page_free_pages_toq(&free, true);
5364 * Destroy all managed, non-wired mappings in the given user-space
5365 * pmap. This pmap cannot be active on any processor besides the
5368 * This function cannot be applied to the kernel pmap. Moreover, it
5369 * is not intended for general use. It is only to be used during
5370 * process termination. Consequently, it can be implemented in ways
5371 * that make it faster than pmap_remove(). First, it can more quickly
5372 * destroy mappings by iterating over the pmap's collection of PV
5373 * entries, rather than searching the page table. Second, it doesn't
5374 * have to test and clear the page table entries atomically, because
5375 * no processor is currently accessing the user address space. In
5376 * particular, a page table entry's dirty bit won't change state once
5377 * this function starts.
5379 * Although this function destroys all of the pmap's managed,
5380 * non-wired mappings, it can delay and batch the invalidation of TLB
5381 * entries without calling pmap_delayed_invl_started() and
5382 * pmap_delayed_invl_finished(). Because the pmap is not active on
5383 * any other processor, none of these TLB entries will ever be used
5384 * before their eventual invalidation. Consequently, there is no need
5385 * for either pmap_remove_all() or pmap_remove_write() to wait for
5386 * that eventual TLB invalidation.
5390 mmu_radix_remove_pages(pmap_t pmap)
5393 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
5394 pml3_entry_t ptel3e;
5395 pt_entry_t *pte, tpte;
5396 struct spglist free;
5397 vm_page_t m, mpte, mt;
5399 struct md_page *pvh;
5400 struct pv_chunk *pc, *npc;
5401 struct rwlock *lock;
5403 uint64_t inuse, bitmask;
5404 int allfree, field, idx;
5408 boolean_t superpage;
5412 * Assert that the given pmap is only active on the current
5413 * CPU. Unfortunately, we cannot block another CPU from
5414 * activating the pmap while this function is executing.
5416 KASSERT(pmap->pm_pid == mfspr(SPR_PID),
5417 ("non-current asid %lu - expected %lu", pmap->pm_pid,
5424 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
5429 for (field = 0; field < _NPCM; field++) {
5430 inuse = ~pc->pc_map[field] & pc_freemask[field];
5431 while (inuse != 0) {
5432 bit = cnttzd(inuse);
5433 bitmask = 1UL << bit;
5434 idx = field * 64 + bit;
5435 pv = &pc->pc_pventry[idx];
5438 pte = pmap_pml2e(pmap, pv->pv_va);
5439 ptel3e = be64toh(*pte);
5440 pte = pmap_l2e_to_l3e(pte, pv->pv_va);
5441 tpte = be64toh(*pte);
5442 if ((tpte & (RPTE_LEAF | PG_V)) == PG_V) {
5445 pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
5447 pte = &pte[pmap_pte_index(pv->pv_va)];
5448 tpte = be64toh(*pte);
5451 * Keep track whether 'tpte' is a
5452 * superpage explicitly instead of
5453 * relying on RPTE_LEAF being set.
5455 * This is because RPTE_LEAF is numerically
5456 * identical to PG_PTE_PAT and thus a
5457 * regular page could be mistaken for
5463 if ((tpte & PG_V) == 0) {
5464 panic("bad pte va %lx pte %lx",
5469 * We cannot remove wired pages from a process' mapping at this time
5477 pa = tpte & PG_PS_FRAME;
5479 pa = tpte & PG_FRAME;
5481 m = PHYS_TO_VM_PAGE(pa);
5482 KASSERT(m->phys_addr == pa,
5483 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
5484 m, (uintmax_t)m->phys_addr,
5487 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
5488 m < &vm_page_array[vm_page_array_size],
5489 ("pmap_remove_pages: bad tpte %#jx",
5495 * Update the vm_page_t clean/reference bits.
5497 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5499 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5505 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
5508 pc->pc_map[field] |= bitmask;
5510 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
5511 pvh = pa_to_pvh(tpte & PG_PS_FRAME);
5512 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
5514 if (TAILQ_EMPTY(&pvh->pv_list)) {
5515 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5516 if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
5517 TAILQ_EMPTY(&mt->md.pv_list))
5518 vm_page_aflag_clear(mt, PGA_WRITEABLE);
5520 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
5522 pmap_resident_count_dec(pmap, 1);
5523 KASSERT(mpte->ref_count == NPTEPG,
5524 ("pmap_remove_pages: pte page wire count error"));
5525 mpte->ref_count = 0;
5526 pmap_add_delayed_free_list(mpte, &free, FALSE);
5529 pmap_resident_count_dec(pmap, 1);
5531 printf("freeing pv (%p, %p)\n",
5534 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5536 if ((m->a.flags & PGA_WRITEABLE) != 0 &&
5537 TAILQ_EMPTY(&m->md.pv_list) &&
5538 (m->flags & PG_FICTITIOUS) == 0) {
5539 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5540 if (TAILQ_EMPTY(&pvh->pv_list))
5541 vm_page_aflag_clear(m, PGA_WRITEABLE);
5544 pmap_unuse_pt(pmap, pv->pv_va, ptel3e, &free);
5550 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
5551 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
5552 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
5554 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5560 pmap_invalidate_all(pmap);
5562 vm_page_free_pages_toq(&free, true);
5566 mmu_radix_remove_write(vm_page_t m)
5568 struct md_page *pvh;
5570 struct rwlock *lock;
5571 pv_entry_t next_pv, pv;
5573 pt_entry_t oldpte, *pte;
5574 int pvh_gen, md_gen;
5576 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5577 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5578 ("pmap_remove_write: page %p is not managed", m));
5579 vm_page_assert_busied(m);
5581 if (!pmap_page_is_write_mapped(m))
5583 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5584 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5585 pa_to_pvh(VM_PAGE_TO_PHYS(m));
5588 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
5590 if (!PMAP_TRYLOCK(pmap)) {
5591 pvh_gen = pvh->pv_gen;
5595 if (pvh_gen != pvh->pv_gen) {
5601 l3e = pmap_pml3e(pmap, pv->pv_va);
5602 if ((be64toh(*l3e) & PG_RW) != 0)
5603 (void)pmap_demote_l3e_locked(pmap, l3e, pv->pv_va, &lock);
5604 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
5605 ("inconsistent pv lock %p %p for page %p",
5606 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
5609 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
5611 if (!PMAP_TRYLOCK(pmap)) {
5612 pvh_gen = pvh->pv_gen;
5613 md_gen = m->md.pv_gen;
5617 if (pvh_gen != pvh->pv_gen ||
5618 md_gen != m->md.pv_gen) {
5624 l3e = pmap_pml3e(pmap, pv->pv_va);
5625 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0,
5626 ("pmap_remove_write: found a 2mpage in page %p's pv list",
5628 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5630 oldpte = be64toh(*pte);
5631 if (oldpte & PG_RW) {
5632 if (!atomic_cmpset_long(pte, htobe64(oldpte),
5633 htobe64((oldpte | RPTE_EAA_R) & ~(PG_RW | PG_M))))
5635 if ((oldpte & PG_M) != 0)
5637 pmap_invalidate_page(pmap, pv->pv_va);
5642 vm_page_aflag_clear(m, PGA_WRITEABLE);
5646 * Clear the wired attribute from the mappings for the specified range of
5647 * addresses in the given pmap. Every valid mapping within that range
5648 * must have the wired attribute set. In contrast, invalid mappings
5649 * cannot have the wired attribute set, so they are ignored.
5651 * The wired attribute of the page table entry is not a hardware
5652 * feature, so there is no need to invalidate any TLB entries.
5653 * Since pmap_demote_l3e() for the wired entry must never fail,
5654 * pmap_delayed_invl_started()/finished() calls around the
5655 * function are not needed.
5658 mmu_radix_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5660 vm_offset_t va_next;
5666 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5668 for (; sva < eva; sva = va_next) {
5669 l1e = pmap_pml1e(pmap, sva);
5670 if ((be64toh(*l1e) & PG_V) == 0) {
5671 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5676 l2e = pmap_l1e_to_l2e(l1e, sva);
5677 if ((be64toh(*l2e) & PG_V) == 0) {
5678 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5683 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5686 l3e = pmap_l2e_to_l3e(l2e, sva);
5687 if ((be64toh(*l3e) & PG_V) == 0)
5689 if ((be64toh(*l3e) & RPTE_LEAF) != 0) {
5690 if ((be64toh(*l3e) & PG_W) == 0)
5691 panic("pmap_unwire: pde %#jx is missing PG_W",
5692 (uintmax_t)(be64toh(*l3e)));
5695 * Are we unwiring the entire large page? If not,
5696 * demote the mapping and fall through.
5698 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5699 atomic_clear_long(l3e, htobe64(PG_W));
5700 pmap->pm_stats.wired_count -= L3_PAGE_SIZE /
5703 } else if (!pmap_demote_l3e(pmap, l3e, sva))
5704 panic("pmap_unwire: demotion failed");
5708 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
5710 MPASS(pte == pmap_pte(pmap, sva));
5711 if ((be64toh(*pte) & PG_V) == 0)
5713 if ((be64toh(*pte) & PG_W) == 0)
5714 panic("pmap_unwire: pte %#jx is missing PG_W",
5715 (uintmax_t)(be64toh(*pte)));
5718 * PG_W must be cleared atomically. Although the pmap
5719 * lock synchronizes access to PG_W, another processor
5720 * could be setting PG_M and/or PG_A concurrently.
5722 atomic_clear_long(pte, htobe64(PG_W));
5723 pmap->pm_stats.wired_count--;
5730 mmu_radix_zero_page(vm_page_t m)
5734 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5735 addr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5740 mmu_radix_zero_page_area(vm_page_t m, int off, int size)
5744 CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
5745 MPASS(off + size <= PAGE_SIZE);
5746 addr = (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5747 memset(addr + off, 0, size);
5751 mmu_radix_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
5758 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
5761 l3ep = pmap_pml3e(pmap, addr);
5762 if (l3ep != NULL && (be64toh(*l3ep) & PG_V)) {
5763 if (be64toh(*l3ep) & RPTE_LEAF) {
5764 pte = be64toh(*l3ep);
5765 /* Compute the physical address of the 4KB page. */
5766 pa = ((be64toh(*l3ep) & PG_PS_FRAME) | (addr & L3_PAGE_MASK)) &
5768 val = MINCORE_PSIND(1);
5770 /* Native endian PTE, do not pass to functions */
5771 pte = be64toh(*pmap_l3e_to_pte(l3ep, addr));
5772 pa = pte & PG_FRAME;
5780 if ((pte & PG_V) != 0) {
5781 val |= MINCORE_INCORE;
5782 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5783 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5784 if ((pte & PG_A) != 0)
5785 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5787 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5788 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
5789 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
5797 mmu_radix_activate(struct thread *td)
5802 CTR2(KTR_PMAP, "%s(%p)", __func__, td);
5804 pmap = vmspace_pmap(td->td_proc->p_vmspace);
5805 curpid = mfspr(SPR_PID);
5806 if (pmap->pm_pid > isa3_base_pid &&
5807 curpid != pmap->pm_pid) {
5808 mmu_radix_pid_set(pmap);
5814 * Increase the starting virtual address of the given mapping if a
5815 * different alignment might result in more superpage mappings.
5818 mmu_radix_align_superpage(vm_object_t object, vm_ooffset_t offset,
5819 vm_offset_t *addr, vm_size_t size)
5822 CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
5824 vm_offset_t superpage_offset;
5826 if (size < L3_PAGE_SIZE)
5828 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
5829 offset += ptoa(object->pg_color);
5830 superpage_offset = offset & L3_PAGE_MASK;
5831 if (size - ((L3_PAGE_SIZE - superpage_offset) & L3_PAGE_MASK) < L3_PAGE_SIZE ||
5832 (*addr & L3_PAGE_MASK) == superpage_offset)
5834 if ((*addr & L3_PAGE_MASK) < superpage_offset)
5835 *addr = (*addr & ~L3_PAGE_MASK) + superpage_offset;
5837 *addr = ((*addr + L3_PAGE_MASK) & ~L3_PAGE_MASK) + superpage_offset;
5841 mmu_radix_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
5843 vm_offset_t va, tmpva, ppa, offset;
5845 ppa = trunc_page(pa);
5846 offset = pa & PAGE_MASK;
5847 size = roundup2(offset + size, PAGE_SIZE);
5848 if (pa < powerpc_ptob(Maxmem))
5849 panic("bad pa: %#lx less than Maxmem %#lx\n",
5850 pa, powerpc_ptob(Maxmem));
5851 va = kva_alloc(size);
5853 printf("%s(%#lx, %lu, %d)\n", __func__, pa, size, attr);
5854 KASSERT(size > 0, ("%s(%#lx, %lu, %d)", __func__, pa, size, attr));
5857 panic("%s: Couldn't alloc kernel virtual memory", __func__);
5859 for (tmpva = va; size > 0;) {
5860 mmu_radix_kenter_attr(tmpva, ppa, attr);
5867 return ((void *)(va + offset));
5871 mmu_radix_mapdev(vm_paddr_t pa, vm_size_t size)
5874 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
5876 return (mmu_radix_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
5880 mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5883 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
5884 m->md.mdpg_cache_attrs = ma;
5887 * If "m" is a normal page, update its direct mapping. This update
5888 * can be relied upon to perform any cache operations that are
5889 * required for data coherence.
5891 if ((m->flags & PG_FICTITIOUS) == 0 &&
5892 mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
5893 PAGE_SIZE, m->md.mdpg_cache_attrs))
5894 panic("memory attribute change on the direct map failed");
5898 mmu_radix_unmapdev(vm_offset_t va, vm_size_t size)
5902 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
5903 /* If we gave a direct map region in pmap_mapdev, do nothing */
5904 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
5907 offset = va & PAGE_MASK;
5908 size = round_page(offset + size);
5909 va = trunc_page(va);
5911 if (pmap_initialized) {
5912 mmu_radix_qremove(va, atop(size));
5918 mmu_radix_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
5924 pa = pmap_extract(pm, va);
5925 sync_sz = PAGE_SIZE - (va & PAGE_MASK);
5926 sync_sz = min(sync_sz, sz);
5928 pa += (va & PAGE_MASK);
5929 __syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
5936 static __inline void
5937 pmap_pte_attr(pt_entry_t *pte, uint64_t cache_bits, uint64_t mask)
5939 uint64_t opte, npte;
5942 * The cache mode bits are all in the low 32-bits of the
5943 * PTE, so we can just spin on updating the low 32-bits.
5946 opte = be64toh(*pte);
5947 npte = opte & ~mask;
5949 } while (npte != opte && !atomic_cmpset_long(pte, htobe64(opte), htobe64(npte)));
5953 * Tries to demote a 1GB page mapping.
5956 pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
5958 pml2_entry_t oldpdpe;
5959 pml3_entry_t *firstpde, newpde, *pde;
5963 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5964 oldpdpe = be64toh(*l2e);
5965 KASSERT((oldpdpe & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
5966 ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
5967 pdpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
5969 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
5970 " in pmap %p", va, pmap);
5973 pdpg->pindex = va >> L2_PAGE_SIZE_SHIFT;
5974 pdpgpa = VM_PAGE_TO_PHYS(pdpg);
5975 firstpde = (pml3_entry_t *)PHYS_TO_DMAP(pdpgpa);
5976 KASSERT((oldpdpe & PG_A) != 0,
5977 ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
5978 KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
5979 ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
5983 * Initialize the page directory page.
5985 for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
5986 *pde = htobe64(newpde);
5987 newpde += L3_PAGE_SIZE;
5991 * Demote the mapping.
5993 pde_store(l2e, pdpgpa);
5996 * Flush PWC --- XXX revisit
5998 pmap_invalidate_all(pmap);
6000 pmap_l2e_demotions++;
6001 CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
6002 " in pmap %p", va, pmap);
6007 mmu_radix_kextract(vm_offset_t va)
6012 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
6013 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
6014 pa = DMAP_TO_PHYS(va);
6016 /* Big-endian PTE on stack */
6017 l3e = *pmap_pml3e(kernel_pmap, va);
6018 if (be64toh(l3e) & RPTE_LEAF) {
6019 pa = (be64toh(l3e) & PG_PS_FRAME) | (va & L3_PAGE_MASK);
6020 pa |= (va & L3_PAGE_MASK);
6023 * Beware of a concurrent promotion that changes the
6024 * PDE at this point! For example, vtopte() must not
6025 * be used to access the PTE because it would use the
6026 * new PDE. It is, however, safe to use the old PDE
6027 * because the page table page is preserved by the
6030 pa = be64toh(*pmap_l3e_to_pte(&l3e, va));
6031 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
6032 pa |= (va & PAGE_MASK);
6039 mmu_radix_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
6042 if (ma != VM_MEMATTR_DEFAULT) {
6043 return pmap_cache_bits(ma);
6047 * Assume the page is cache inhibited and access is guarded unless
6048 * it's in our available memory array.
6050 for (int i = 0; i < pregions_sz; i++) {
6051 if ((pa >= pregions[i].mr_start) &&
6052 (pa < (pregions[i].mr_start + pregions[i].mr_size)))
6053 return (RPTE_ATTR_MEM);
6055 return (RPTE_ATTR_GUARDEDIO);
6059 mmu_radix_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
6061 pt_entry_t *pte, pteval;
6062 uint64_t cache_bits;
6066 pteval = pa | RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
6067 cache_bits = mmu_radix_calc_wimg(pa, ma);
6068 pte_store(pte, pteval | cache_bits);
6072 mmu_radix_kremove(vm_offset_t va)
6076 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
6083 mmu_radix_decode_kernel_ptr(vm_offset_t addr,
6084 int *is_user, vm_offset_t *decoded)
6087 CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
6089 *is_user = (addr < VM_MAXUSER_ADDRESS);
6094 mmu_radix_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
6097 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
6098 return (mem_valid(pa, size));
6102 mmu_radix_scan_init(void)
6105 CTR1(KTR_PMAP, "%s()", __func__);
6110 mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz,
6113 CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
6118 mmu_radix_quick_enter_page(vm_page_t m)
6122 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
6123 paddr = VM_PAGE_TO_PHYS(m);
6124 return (PHYS_TO_DMAP(paddr));
6128 mmu_radix_quick_remove_page(vm_offset_t addr __unused)
6130 /* no work to do here */
6131 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
6135 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
6137 cpu_flush_dcache((void *)sva, eva - sva);
6141 mmu_radix_change_attr(vm_offset_t va, vm_size_t size,
6146 CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, va, size, mode);
6147 PMAP_LOCK(kernel_pmap);
6148 error = pmap_change_attr_locked(va, size, mode, true);
6149 PMAP_UNLOCK(kernel_pmap);
6154 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
6156 vm_offset_t base, offset, tmpva;
6157 vm_paddr_t pa_start, pa_end, pa_end1;
6161 int cache_bits, error;
6164 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
6165 base = trunc_page(va);
6166 offset = va & PAGE_MASK;
6167 size = round_page(offset + size);
6170 * Only supported on kernel virtual addresses, including the direct
6171 * map but excluding the recursive map.
6173 if (base < DMAP_MIN_ADDRESS)
6176 cache_bits = pmap_cache_bits(mode);
6180 * Pages that aren't mapped aren't supported. Also break down 2MB pages
6181 * into 4KB pages if required.
6183 for (tmpva = base; tmpva < base + size; ) {
6184 l2e = pmap_pml2e(kernel_pmap, tmpva);
6185 if (l2e == NULL || *l2e == 0)
6187 if (be64toh(*l2e) & RPTE_LEAF) {
6189 * If the current 1GB page already has the required
6190 * memory type, then we need not demote this page. Just
6191 * increment tmpva to the next 1GB page frame.
6193 if ((be64toh(*l2e) & RPTE_ATTR_MASK) == cache_bits) {
6194 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6199 * If the current offset aligns with a 1GB page frame
6200 * and there is at least 1GB left within the range, then
6201 * we need not break down this page into 2MB pages.
6203 if ((tmpva & L2_PAGE_MASK) == 0 &&
6204 tmpva + L2_PAGE_MASK < base + size) {
6205 tmpva += L2_PAGE_MASK;
6208 if (!pmap_demote_l2e(kernel_pmap, l2e, tmpva))
6211 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6212 KASSERT(l3e != NULL, ("no l3e entry for %#lx in %p\n",
6216 if (be64toh(*l3e) & RPTE_LEAF) {
6218 * If the current 2MB page already has the required
6219 * memory type, then we need not demote this page. Just
6220 * increment tmpva to the next 2MB page frame.
6222 if ((be64toh(*l3e) & RPTE_ATTR_MASK) == cache_bits) {
6223 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6228 * If the current offset aligns with a 2MB page frame
6229 * and there is at least 2MB left within the range, then
6230 * we need not break down this page into 4KB pages.
6232 if ((tmpva & L3_PAGE_MASK) == 0 &&
6233 tmpva + L3_PAGE_MASK < base + size) {
6234 tmpva += L3_PAGE_SIZE;
6237 if (!pmap_demote_l3e(kernel_pmap, l3e, tmpva))
6240 pte = pmap_l3e_to_pte(l3e, tmpva);
6248 * Ok, all the pages exist, so run through them updating their
6249 * cache mode if required.
6251 pa_start = pa_end = 0;
6252 for (tmpva = base; tmpva < base + size; ) {
6253 l2e = pmap_pml2e(kernel_pmap, tmpva);
6254 if (be64toh(*l2e) & RPTE_LEAF) {
6255 if ((be64toh(*l2e) & RPTE_ATTR_MASK) != cache_bits) {
6256 pmap_pte_attr(l2e, cache_bits,
6260 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6261 (*l2e & PG_PS_FRAME) < dmaplimit) {
6262 if (pa_start == pa_end) {
6263 /* Start physical address run. */
6264 pa_start = be64toh(*l2e) & PG_PS_FRAME;
6265 pa_end = pa_start + L2_PAGE_SIZE;
6266 } else if (pa_end == (be64toh(*l2e) & PG_PS_FRAME))
6267 pa_end += L2_PAGE_SIZE;
6269 /* Run ended, update direct map. */
6270 error = pmap_change_attr_locked(
6271 PHYS_TO_DMAP(pa_start),
6272 pa_end - pa_start, mode, flush);
6275 /* Start physical address run. */
6276 pa_start = be64toh(*l2e) & PG_PS_FRAME;
6277 pa_end = pa_start + L2_PAGE_SIZE;
6280 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6283 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6284 if (be64toh(*l3e) & RPTE_LEAF) {
6285 if ((be64toh(*l3e) & RPTE_ATTR_MASK) != cache_bits) {
6286 pmap_pte_attr(l3e, cache_bits,
6290 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6291 (be64toh(*l3e) & PG_PS_FRAME) < dmaplimit) {
6292 if (pa_start == pa_end) {
6293 /* Start physical address run. */
6294 pa_start = be64toh(*l3e) & PG_PS_FRAME;
6295 pa_end = pa_start + L3_PAGE_SIZE;
6296 } else if (pa_end == (be64toh(*l3e) & PG_PS_FRAME))
6297 pa_end += L3_PAGE_SIZE;
6299 /* Run ended, update direct map. */
6300 error = pmap_change_attr_locked(
6301 PHYS_TO_DMAP(pa_start),
6302 pa_end - pa_start, mode, flush);
6305 /* Start physical address run. */
6306 pa_start = be64toh(*l3e) & PG_PS_FRAME;
6307 pa_end = pa_start + L3_PAGE_SIZE;
6310 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6312 pte = pmap_l3e_to_pte(l3e, tmpva);
6313 if ((be64toh(*pte) & RPTE_ATTR_MASK) != cache_bits) {
6314 pmap_pte_attr(pte, cache_bits,
6318 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6319 (be64toh(*pte) & PG_FRAME) < dmaplimit) {
6320 if (pa_start == pa_end) {
6321 /* Start physical address run. */
6322 pa_start = be64toh(*pte) & PG_FRAME;
6323 pa_end = pa_start + PAGE_SIZE;
6324 } else if (pa_end == (be64toh(*pte) & PG_FRAME))
6325 pa_end += PAGE_SIZE;
6327 /* Run ended, update direct map. */
6328 error = pmap_change_attr_locked(
6329 PHYS_TO_DMAP(pa_start),
6330 pa_end - pa_start, mode, flush);
6333 /* Start physical address run. */
6334 pa_start = be64toh(*pte) & PG_FRAME;
6335 pa_end = pa_start + PAGE_SIZE;
6341 if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
6342 pa_end1 = MIN(pa_end, dmaplimit);
6343 if (pa_start != pa_end1)
6344 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
6345 pa_end1 - pa_start, mode, flush);
6349 * Flush CPU caches if required to make sure any data isn't cached that
6350 * shouldn't be, etc.
6353 pmap_invalidate_all(kernel_pmap);
6356 pmap_invalidate_cache_range(base, tmpva);
6362 * Allocate physical memory for the vm_page array and map it into KVA,
6363 * attempting to back the vm_pages with domain-local memory.
6366 mmu_radix_page_array_startup(long pages)
6377 vm_offset_t start, end;
6379 vm_page_array_size = pages;
6381 start = VM_MIN_KERNEL_ADDRESS;
6382 end = start + pages * sizeof(struct vm_page);
6384 pa = vm_phys_early_alloc(0, end - start);
6386 start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT);
6388 /* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */
6389 for (va = start; va < end; va += L3_PAGE_SIZE) {
6390 pfn = first_page + (va - start) / sizeof(struct vm_page);
6391 domain = vm_phys_domain(ptoa(pfn));
6392 l2e = pmap_pml2e(kernel_pmap, va);
6393 if ((be64toh(*l2e) & PG_V) == 0) {
6394 pa = vm_phys_early_alloc(domain, PAGE_SIZE);
6396 pagezero(PHYS_TO_DMAP(pa));
6397 pde_store(l2e, (pml2_entry_t)pa);
6399 pde = pmap_l2e_to_l3e(l2e, va);
6400 if ((be64toh(*pde) & PG_V) != 0)
6401 panic("Unexpected pde %p", pde);
6402 pa = vm_phys_early_alloc(domain, L3_PAGE_SIZE);
6403 for (i = 0; i < NPDEPG; i++)
6404 dump_add_page(pa + i * PAGE_SIZE);
6405 newl3 = (pml3_entry_t)(pa | RPTE_EAA_P | RPTE_EAA_R | RPTE_EAA_W);
6406 pte_store(pde, newl3);
6409 vm_page_array = (vm_page_t)start;
6413 #include <sys/kdb.h>
6414 #include <ddb/ddb.h>
6417 pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va)
6424 l1e = &l1[pmap_pml1e_index(va)];
6425 db_printf("VA %#016lx l1e %#016lx", va, be64toh(*l1e));
6426 if ((be64toh(*l1e) & PG_V) == 0) {
6430 l2e = pmap_l1e_to_l2e(l1e, va);
6431 db_printf(" l2e %#016lx", be64toh(*l2e));
6432 if ((be64toh(*l2e) & PG_V) == 0 || (be64toh(*l2e) & RPTE_LEAF) != 0) {
6436 l3e = pmap_l2e_to_l3e(l2e, va);
6437 db_printf(" l3e %#016lx", be64toh(*l3e));
6438 if ((be64toh(*l3e) & PG_V) == 0 || (be64toh(*l3e) & RPTE_LEAF) != 0) {
6442 pte = pmap_l3e_to_pte(l3e, va);
6443 db_printf(" pte %#016lx\n", be64toh(*pte));
6447 pmap_page_print_mappings(vm_page_t m)
6452 db_printf("page %p(%lx)\n", m, m->phys_addr);
6453 /* need to elide locks if running in ddb */
6454 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
6455 db_printf("pv: %p ", pv);
6456 db_printf("va: %#016lx ", pv->pv_va);
6458 db_printf("pmap %p ", pmap);
6460 db_printf("asid: %lu\n", pmap->pm_pid);
6461 pmap_pte_walk(pmap->pm_pml1, pv->pv_va);
6466 DB_SHOW_COMMAND(pte, pmap_print_pte)
6472 db_printf("show pte addr\n");
6475 va = (vm_offset_t)addr;
6477 if (va >= DMAP_MIN_ADDRESS)
6479 else if (kdb_thread != NULL)
6480 pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
6482 pmap = vmspace_pmap(curthread->td_proc->p_vmspace);
6484 pmap_pte_walk(pmap->pm_pml1, va);