2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018 Matthew Macy
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "opt_platform.h"
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
37 #include <sys/bitstring.h>
38 #include <sys/queue.h>
39 #include <sys/cpuset.h>
40 #include <sys/endian.h>
41 #include <sys/kerneldump.h>
44 #include <sys/syslog.h>
45 #include <sys/msgbuf.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
50 #include <sys/rwlock.h>
51 #include <sys/sched.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
55 #include <sys/vmmeter.h>
60 #include <dev/ofw/openfirm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_phys.h>
72 #include <vm/vm_reserv.h>
73 #include <vm/vm_dumpset.h>
76 #include <machine/_inttypes.h>
77 #include <machine/cpu.h>
78 #include <machine/platform.h>
79 #include <machine/frame.h>
80 #include <machine/md_var.h>
81 #include <machine/psl.h>
82 #include <machine/bat.h>
83 #include <machine/hid.h>
84 #include <machine/pte.h>
85 #include <machine/sr.h>
86 #include <machine/trap.h>
87 #include <machine/mmuvar.h>
89 /* For pseries bit. */
90 #include <powerpc/pseries/phyp-hvcall.h>
93 #include <vm/uma_dbg.h>
96 #define PPC_BITLSHIFT(bit) (sizeof(long)*NBBY - 1 - (bit))
97 #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
98 #define PPC_BITLSHIFT_VAL(val, bit) ((val) << PPC_BITLSHIFT(bit))
103 static void pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va);
106 #define PG_W RPTE_WIRED
107 #define PG_V RPTE_VALID
108 #define PG_MANAGED RPTE_MANAGED
109 #define PG_PROMOTED RPTE_PROMOTED
112 #define PG_X RPTE_EAA_X
113 #define PG_RW RPTE_EAA_W
114 #define PG_PTE_CACHE RPTE_ATTR_MASK
117 #define NLS_MASK ((1UL<<5)-1)
118 #define RPTE_ENTRIES (1UL<<RPTE_SHIFT)
119 #define RPTE_MASK (RPTE_ENTRIES-1)
122 #define NLB_MASK (((1UL<<52)-1) << 8)
125 extern caddr_t crashdumpmap;
127 #define RIC_FLUSH_TLB 0
128 #define RIC_FLUSH_PWC 1
129 #define RIC_FLUSH_ALL 2
131 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */
133 #define PPC_INST_TLBIE 0x7c000264
134 #define PPC_INST_TLBIEL 0x7c000224
135 #define PPC_INST_SLBIA 0x7c0003e4
137 #define ___PPC_RA(a) (((a) & 0x1f) << 16)
138 #define ___PPC_RB(b) (((b) & 0x1f) << 11)
139 #define ___PPC_RS(s) (((s) & 0x1f) << 21)
140 #define ___PPC_RT(t) ___PPC_RS(t)
141 #define ___PPC_R(r) (((r) & 0x1) << 16)
142 #define ___PPC_PRS(prs) (((prs) & 0x1) << 17)
143 #define ___PPC_RIC(ric) (((ric) & 0x3) << 18)
145 #define PPC_SLBIA(IH) __XSTRING(.long PPC_INST_SLBIA | \
147 #define PPC_TLBIE_5(rb,rs,ric,prs,r) \
148 __XSTRING(.long PPC_INST_TLBIE | \
149 ___PPC_RB(rb) | ___PPC_RS(rs) | \
150 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
153 #define PPC_TLBIEL(rb,rs,ric,prs,r) \
154 __XSTRING(.long PPC_INST_TLBIEL | \
155 ___PPC_RB(rb) | ___PPC_RS(rs) | \
156 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
159 #define PPC_INVALIDATE_ERAT PPC_SLBIA(7)
164 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
167 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
168 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
169 #define TLBIEL_INVAL_SET_PID 0x400 /* invalidate a set for the current PID */
170 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
171 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
173 #define TLBIE_ACTUAL_PAGE_MASK 0xe0
174 #define TLBIE_ACTUAL_PAGE_4K 0x00
175 #define TLBIE_ACTUAL_PAGE_64K 0xa0
176 #define TLBIE_ACTUAL_PAGE_2M 0x20
177 #define TLBIE_ACTUAL_PAGE_1G 0x40
179 #define TLBIE_PRS_PARTITION_SCOPE 0x0
180 #define TLBIE_PRS_PROCESS_SCOPE 0x1
182 #define TLBIE_RIC_INVALIDATE_TLB 0x0 /* Invalidate just TLB */
183 #define TLBIE_RIC_INVALIDATE_PWC 0x1 /* Invalidate just PWC */
184 #define TLBIE_RIC_INVALIDATE_ALL 0x2 /* Invalidate TLB, PWC,
185 * cached {proc, part}tab entries
187 #define TLBIE_RIC_INVALIDATE_SEQ 0x3 /* HPT - only:
188 * Invalidate a range of translations
191 static __always_inline void
192 radix_tlbie(uint8_t ric, uint8_t prs, uint16_t is, uint32_t pid, uint32_t lpid,
193 vm_offset_t va, uint16_t ap)
197 MPASS((va & PAGE_MASK) == 0);
199 rs = ((uint64_t)pid << 32) | lpid;
201 __asm __volatile(PPC_TLBIE_5(%0, %1, %2, %3, 1) : :
202 "r" (rb), "r" (rs), "i" (ric), "i" (prs) : "memory");
206 radix_tlbie_fixup(uint32_t pid, vm_offset_t va, int ap)
209 __asm __volatile("ptesync" ::: "memory");
210 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
211 TLBIEL_INVAL_PAGE, 0, 0, va, ap);
212 __asm __volatile("ptesync" ::: "memory");
213 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
214 TLBIEL_INVAL_PAGE, pid, 0, va, ap);
218 radix_tlbie_invlpg_user_4k(uint32_t pid, vm_offset_t va)
221 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
222 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_4K);
223 radix_tlbie_fixup(pid, va, TLBIE_ACTUAL_PAGE_4K);
227 radix_tlbie_invlpg_user_2m(uint32_t pid, vm_offset_t va)
230 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
231 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_2M);
232 radix_tlbie_fixup(pid, va, TLBIE_ACTUAL_PAGE_2M);
236 radix_tlbie_invlpwc_user(uint32_t pid)
239 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
240 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
244 radix_tlbie_flush_user(uint32_t pid)
247 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
248 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
252 radix_tlbie_invlpg_kernel_4k(vm_offset_t va)
255 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
256 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_4K);
257 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_4K);
261 radix_tlbie_invlpg_kernel_2m(vm_offset_t va)
264 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
265 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_2M);
266 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_2M);
269 /* 1GB pages aren't currently supported. */
270 static __inline __unused void
271 radix_tlbie_invlpg_kernel_1g(vm_offset_t va)
274 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
275 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_1G);
276 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_1G);
280 radix_tlbie_invlpwc_kernel(void)
283 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
284 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
288 radix_tlbie_flush_kernel(void)
291 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
292 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
295 static __inline vm_pindex_t
296 pmap_l3e_pindex(vm_offset_t va)
298 return ((va & PG_FRAME) >> L3_PAGE_SIZE_SHIFT);
301 static __inline vm_pindex_t
302 pmap_pml3e_index(vm_offset_t va)
305 return ((va >> L3_PAGE_SIZE_SHIFT) & RPTE_MASK);
308 static __inline vm_pindex_t
309 pmap_pml2e_index(vm_offset_t va)
311 return ((va >> L2_PAGE_SIZE_SHIFT) & RPTE_MASK);
314 static __inline vm_pindex_t
315 pmap_pml1e_index(vm_offset_t va)
317 return ((va & PG_FRAME) >> L1_PAGE_SIZE_SHIFT);
320 /* Return various clipped indexes for a given VA */
321 static __inline vm_pindex_t
322 pmap_pte_index(vm_offset_t va)
325 return ((va >> PAGE_SHIFT) & RPTE_MASK);
328 /* Return a pointer to the PT slot that corresponds to a VA */
329 static __inline pt_entry_t *
330 pmap_l3e_to_pte(pt_entry_t *l3e, vm_offset_t va)
335 ptepa = (be64toh(*l3e) & NLB_MASK);
336 pte = (pt_entry_t *)PHYS_TO_DMAP(ptepa);
337 return (&pte[pmap_pte_index(va)]);
340 /* Return a pointer to the PD slot that corresponds to a VA */
341 static __inline pt_entry_t *
342 pmap_l2e_to_l3e(pt_entry_t *l2e, vm_offset_t va)
347 l3pa = (be64toh(*l2e) & NLB_MASK);
348 l3e = (pml3_entry_t *)PHYS_TO_DMAP(l3pa);
349 return (&l3e[pmap_pml3e_index(va)]);
352 /* Return a pointer to the PD slot that corresponds to a VA */
353 static __inline pt_entry_t *
354 pmap_l1e_to_l2e(pt_entry_t *l1e, vm_offset_t va)
359 l2pa = (be64toh(*l1e) & NLB_MASK);
361 l2e = (pml2_entry_t *)PHYS_TO_DMAP(l2pa);
362 return (&l2e[pmap_pml2e_index(va)]);
365 static __inline pml1_entry_t *
366 pmap_pml1e(pmap_t pmap, vm_offset_t va)
369 return (&pmap->pm_pml1[pmap_pml1e_index(va)]);
373 pmap_pml2e(pmap_t pmap, vm_offset_t va)
377 l1e = pmap_pml1e(pmap, va);
378 if (l1e == NULL || (be64toh(*l1e) & RPTE_VALID) == 0)
380 return (pmap_l1e_to_l2e(l1e, va));
383 static __inline pt_entry_t *
384 pmap_pml3e(pmap_t pmap, vm_offset_t va)
388 l2e = pmap_pml2e(pmap, va);
389 if (l2e == NULL || (be64toh(*l2e) & RPTE_VALID) == 0)
391 return (pmap_l2e_to_l3e(l2e, va));
394 static __inline pt_entry_t *
395 pmap_pte(pmap_t pmap, vm_offset_t va)
399 l3e = pmap_pml3e(pmap, va);
400 if (l3e == NULL || (be64toh(*l3e) & RPTE_VALID) == 0)
402 return (pmap_l3e_to_pte(l3e, va));
406 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
407 "Number of kernel page table pages allocated on bootup");
409 vm_paddr_t dmaplimit;
411 SYSCTL_DECL(_vm_pmap);
414 #define VERBOSE_PMAP 0
415 #define VERBOSE_PROTECT 0
416 static int pmap_logging;
417 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_logging, CTLFLAG_RWTUN,
418 &pmap_logging, 0, "verbose debug logging");
421 static u_int64_t KPTphys; /* phys addr of kernel level 1 */
423 //static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */
425 static vm_offset_t qframe = 0;
426 static struct mtx qframe_mtx;
428 void mmu_radix_activate(struct thread *);
429 void mmu_radix_advise(pmap_t, vm_offset_t, vm_offset_t, int);
430 void mmu_radix_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
432 void mmu_radix_clear_modify(vm_page_t);
433 void mmu_radix_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
434 int mmu_radix_decode_kernel_ptr(vm_offset_t, int *, vm_offset_t *);
435 int mmu_radix_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t);
436 void mmu_radix_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
438 void mmu_radix_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
439 vm_paddr_t mmu_radix_extract(pmap_t pmap, vm_offset_t va);
440 vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
441 void mmu_radix_kenter(vm_offset_t, vm_paddr_t);
442 vm_paddr_t mmu_radix_kextract(vm_offset_t);
443 void mmu_radix_kremove(vm_offset_t);
444 boolean_t mmu_radix_is_modified(vm_page_t);
445 boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t);
446 boolean_t mmu_radix_is_referenced(vm_page_t);
447 void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t,
448 vm_pindex_t, vm_size_t);
449 boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t);
450 void mmu_radix_page_init(vm_page_t);
451 boolean_t mmu_radix_page_is_mapped(vm_page_t m);
452 void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t);
453 int mmu_radix_page_wired_mappings(vm_page_t);
454 int mmu_radix_pinit(pmap_t);
455 void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
456 bool mmu_radix_ps_enabled(pmap_t);
457 void mmu_radix_qenter(vm_offset_t, vm_page_t *, int);
458 void mmu_radix_qremove(vm_offset_t, int);
459 vm_offset_t mmu_radix_quick_enter_page(vm_page_t);
460 void mmu_radix_quick_remove_page(vm_offset_t);
461 boolean_t mmu_radix_ts_referenced(vm_page_t);
462 void mmu_radix_release(pmap_t);
463 void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t);
464 void mmu_radix_remove_all(vm_page_t);
465 void mmu_radix_remove_pages(pmap_t);
466 void mmu_radix_remove_write(vm_page_t);
467 void mmu_radix_unwire(pmap_t, vm_offset_t, vm_offset_t);
468 void mmu_radix_zero_page(vm_page_t);
469 void mmu_radix_zero_page_area(vm_page_t, int, int);
470 int mmu_radix_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
471 void mmu_radix_page_array_startup(long pages);
473 #include "mmu_oea64.h"
476 * Kernel MMU interface
479 static void mmu_radix_bootstrap(vm_offset_t, vm_offset_t);
481 static void mmu_radix_copy_page(vm_page_t, vm_page_t);
482 static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
483 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
484 static void mmu_radix_growkernel(vm_offset_t);
485 static void mmu_radix_init(void);
486 static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
487 static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
488 static void mmu_radix_pinit0(pmap_t);
490 static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t);
491 static void *mmu_radix_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
492 static void mmu_radix_unmapdev(vm_offset_t, vm_size_t);
493 static void mmu_radix_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma);
494 static boolean_t mmu_radix_dev_direct_mapped(vm_paddr_t, vm_size_t);
495 static void mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
496 static void mmu_radix_scan_init(void);
497 static void mmu_radix_cpu_bootstrap(int ap);
498 static void mmu_radix_tlbie_all(void);
500 static struct pmap_funcs mmu_radix_methods = {
501 .bootstrap = mmu_radix_bootstrap,
502 .copy_page = mmu_radix_copy_page,
503 .copy_pages = mmu_radix_copy_pages,
504 .cpu_bootstrap = mmu_radix_cpu_bootstrap,
505 .growkernel = mmu_radix_growkernel,
506 .init = mmu_radix_init,
507 .map = mmu_radix_map,
508 .mincore = mmu_radix_mincore,
509 .pinit = mmu_radix_pinit,
510 .pinit0 = mmu_radix_pinit0,
512 .mapdev = mmu_radix_mapdev,
513 .mapdev_attr = mmu_radix_mapdev_attr,
514 .unmapdev = mmu_radix_unmapdev,
515 .kenter_attr = mmu_radix_kenter_attr,
516 .dev_direct_mapped = mmu_radix_dev_direct_mapped,
517 .dumpsys_pa_init = mmu_radix_scan_init,
518 .dumpsys_map_chunk = mmu_radix_dumpsys_map,
519 .page_is_mapped = mmu_radix_page_is_mapped,
520 .ps_enabled = mmu_radix_ps_enabled,
521 .align_superpage = mmu_radix_align_superpage,
522 .object_init_pt = mmu_radix_object_init_pt,
523 .protect = mmu_radix_protect,
524 /* pmap dispatcher interface */
525 .clear_modify = mmu_radix_clear_modify,
526 .copy = mmu_radix_copy,
527 .enter = mmu_radix_enter,
528 .enter_object = mmu_radix_enter_object,
529 .enter_quick = mmu_radix_enter_quick,
530 .extract = mmu_radix_extract,
531 .extract_and_hold = mmu_radix_extract_and_hold,
532 .is_modified = mmu_radix_is_modified,
533 .is_prefaultable = mmu_radix_is_prefaultable,
534 .is_referenced = mmu_radix_is_referenced,
535 .ts_referenced = mmu_radix_ts_referenced,
536 .page_exists_quick = mmu_radix_page_exists_quick,
537 .page_init = mmu_radix_page_init,
538 .page_wired_mappings = mmu_radix_page_wired_mappings,
539 .qenter = mmu_radix_qenter,
540 .qremove = mmu_radix_qremove,
541 .release = mmu_radix_release,
542 .remove = mmu_radix_remove,
543 .remove_all = mmu_radix_remove_all,
544 .remove_write = mmu_radix_remove_write,
545 .unwire = mmu_radix_unwire,
546 .zero_page = mmu_radix_zero_page,
547 .zero_page_area = mmu_radix_zero_page_area,
548 .activate = mmu_radix_activate,
549 .quick_enter_page = mmu_radix_quick_enter_page,
550 .quick_remove_page = mmu_radix_quick_remove_page,
551 .page_set_memattr = mmu_radix_page_set_memattr,
552 .page_array_startup = mmu_radix_page_array_startup,
554 /* Internal interfaces */
555 .kenter = mmu_radix_kenter,
556 .kextract = mmu_radix_kextract,
557 .kremove = mmu_radix_kremove,
558 .change_attr = mmu_radix_change_attr,
559 .decode_kernel_ptr = mmu_radix_decode_kernel_ptr,
561 .tlbie_all = mmu_radix_tlbie_all,
564 MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods);
566 static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
567 struct rwlock **lockp);
568 static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va);
569 static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *);
570 static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
571 struct spglist *free, struct rwlock **lockp);
572 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
573 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
574 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
575 static bool pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *pde,
576 struct spglist *free);
577 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
578 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp);
580 static bool pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e,
581 u_int flags, struct rwlock **lockp);
582 #if VM_NRESERVLEVEL > 0
583 static void pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
584 struct rwlock **lockp);
586 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
587 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
588 static vm_page_t mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
589 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate);
591 static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
592 vm_prot_t prot, struct rwlock **lockp);
593 static int pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde,
594 u_int flags, vm_page_t m, struct rwlock **lockp);
596 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
597 static void free_pv_chunk(struct pv_chunk *pc);
598 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp);
599 static vm_page_t pmap_allocl3e(pmap_t pmap, vm_offset_t va,
600 struct rwlock **lockp);
601 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
602 struct rwlock **lockp);
603 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
604 struct spglist *free);
605 static boolean_t pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free);
607 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t start);
608 static void pmap_invalidate_all(pmap_t pmap);
609 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush);
612 * Internal flags for pmap_enter()'s helper functions.
614 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
615 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
617 #define UNIMPLEMENTED() panic("%s not implemented", __func__)
618 #define UNTESTED() panic("%s not yet tested", __func__)
620 /* Number of supported PID bits */
621 static unsigned int isa3_pid_bits;
623 /* PID to start allocating from */
624 static unsigned int isa3_base_pid;
626 #define PROCTAB_SIZE_SHIFT (isa3_pid_bits + 4)
627 #define PROCTAB_ENTRIES (1ul << isa3_pid_bits)
630 * Map of physical memory regions.
632 static struct mem_region *regions, *pregions;
633 static struct numa_mem_region *numa_pregions;
634 static u_int phys_avail_count;
635 static int regions_sz, pregions_sz, numa_pregions_sz;
636 static struct pate *isa3_parttab;
637 static struct prte *isa3_proctab;
638 static vmem_t *asid_arena;
640 extern void bs_remap_earlyboot(void);
642 #define RADIX_PGD_SIZE_SHIFT 16
643 #define RADIX_PGD_SIZE (1UL << RADIX_PGD_SIZE_SHIFT)
645 #define RADIX_PGD_INDEX_SHIFT (RADIX_PGD_SIZE_SHIFT-3)
646 #define NL2EPG (PAGE_SIZE/sizeof(pml2_entry_t))
647 #define NL3EPG (PAGE_SIZE/sizeof(pml3_entry_t))
649 #define NUPML1E (RADIX_PGD_SIZE/sizeof(uint64_t)) /* number of userland PML1 pages */
650 #define NUPDPE (NUPML1E * NL2EPG)/* number of userland PDP pages */
651 #define NUPDE (NUPDPE * NL3EPG) /* number of userland PD entries */
653 /* POWER9 only permits a 64k partition table size. */
654 #define PARTTAB_SIZE_SHIFT 16
655 #define PARTTAB_SIZE (1UL << PARTTAB_SIZE_SHIFT)
657 #define PARTTAB_HR (1UL << 63) /* host uses radix */
658 #define PARTTAB_GR (1UL << 63) /* guest uses radix must match host */
660 /* TLB flush actions. Used as argument to tlbiel_flush() */
662 TLB_INVAL_SCOPE_LPID = 2, /* invalidate TLBs for current LPID */
663 TLB_INVAL_SCOPE_GLOBAL = 3, /* invalidate all TLBs */
666 #define NPV_LIST_LOCKS MAXCPU
667 static int pmap_initialized;
668 static vm_paddr_t proctab0pa;
669 static vm_paddr_t parttab_phys;
670 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
673 * Data for the pv entry allocation mechanism.
674 * Updates to pv_invl_gen are protected by the pv_list_locks[]
675 * elements, but reads are not.
677 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
678 static struct mtx __exclusive_cache_line pv_chunks_mutex;
679 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
680 static struct md_page *pv_table;
681 static struct md_page pv_dummy;
684 #define PV_STAT(x) do { x ; } while (0)
686 #define PV_STAT(x) do { } while (0)
689 #define pa_radix_index(pa) ((pa) >> L3_PAGE_SIZE_SHIFT)
690 #define pa_to_pvh(pa) (&pv_table[pa_radix_index(pa)])
692 #define PHYS_TO_PV_LIST_LOCK(pa) \
693 (&pv_list_locks[pa_radix_index(pa) % NPV_LIST_LOCKS])
695 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
696 struct rwlock **_lockp = (lockp); \
697 struct rwlock *_new_lock; \
699 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
700 if (_new_lock != *_lockp) { \
701 if (*_lockp != NULL) \
702 rw_wunlock(*_lockp); \
703 *_lockp = _new_lock; \
708 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
709 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
711 #define RELEASE_PV_LIST_LOCK(lockp) do { \
712 struct rwlock **_lockp = (lockp); \
714 if (*_lockp != NULL) { \
715 rw_wunlock(*_lockp); \
720 #define VM_PAGE_TO_PV_LIST_LOCK(m) \
721 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
724 * We support 52 bits, hence:
725 * bits 52 - 31 = 21, 0b10101
726 * RTS encoding details
727 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
728 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
730 #define RTS_SIZE ((0x2UL << 61) | (0x5UL << 5))
732 static int powernv_enabled = 1;
734 static __always_inline void
735 tlbiel_radix_set_isa300(uint32_t set, uint32_t is,
736 uint32_t pid, uint32_t ric, uint32_t prs)
741 rb = PPC_BITLSHIFT_VAL(set, 51) | PPC_BITLSHIFT_VAL(is, 53);
742 rs = PPC_BITLSHIFT_VAL((uint64_t)pid, 31);
744 __asm __volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
745 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
750 tlbiel_flush_isa3(uint32_t num_sets, uint32_t is)
754 __asm __volatile("ptesync": : :"memory");
757 * Flush the first set of the TLB, and the entire Page Walk Cache
758 * and partition table entries. Then flush the remaining sets of the
761 if (is == TLB_INVAL_SCOPE_GLOBAL) {
762 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
763 for (set = 1; set < num_sets; set++)
764 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
767 /* Do the same for process scoped entries. */
768 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
769 for (set = 1; set < num_sets; set++)
770 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
772 __asm __volatile("ptesync": : :"memory");
776 mmu_radix_tlbiel_flush(int scope)
778 MPASS(scope == TLB_INVAL_SCOPE_LPID ||
779 scope == TLB_INVAL_SCOPE_GLOBAL);
781 tlbiel_flush_isa3(POWER9_TLB_SETS_RADIX, scope);
782 __asm __volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
786 mmu_radix_tlbie_all()
789 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
791 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
795 mmu_radix_init_amor(void)
798 * In HV mode, we init AMOR (Authority Mask Override Register) so that
799 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
800 * Register), enable key 0 and set it to 1.
802 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
804 mtspr(SPR_AMOR, (3ul << 62));
808 mmu_radix_init_iamr(void)
811 * Radix always uses key0 of the IAMR to determine if an access is
812 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
815 mtspr(SPR_IAMR, (1ul << 62));
819 mmu_radix_pid_set(pmap_t pmap)
822 mtspr(SPR_PID, pmap->pm_pid);
826 /* Quick sort callout for comparing physical addresses. */
828 pa_cmp(const void *a, const void *b)
830 const vm_paddr_t *pa = a, *pb = b;
840 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte)
841 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0)
842 #define pte_store(ptep, pte) do { \
843 MPASS((pte) & (RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_X)); \
844 *(u_long *)(ptep) = htobe64((u_long)((pte) | PG_V | RPTE_LEAF)); \
847 * NB: should only be used for adding directories - not for direct mappings
849 #define pde_store(ptep, pa) do { \
850 *(u_long *)(ptep) = htobe64((u_long)(pa|RPTE_VALID|RPTE_SHIFT)); \
853 #define pte_clear(ptep) do { \
854 *(u_long *)(ptep) = (u_long)(0); \
857 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */
860 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
861 * (PTE) page mappings have identical settings for the following fields:
863 #define PG_PTE_PROMOTE (PG_X | PG_MANAGED | PG_W | PG_PTE_CACHE | \
864 PG_M | PG_A | RPTE_EAA_MASK | PG_V)
867 pmap_resident_count_inc(pmap_t pmap, int count)
870 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
871 pmap->pm_stats.resident_count += count;
875 pmap_resident_count_dec(pmap_t pmap, int count)
878 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
879 KASSERT(pmap->pm_stats.resident_count >= count,
880 ("pmap %p resident count underflow %ld %d", pmap,
881 pmap->pm_stats.resident_count, count));
882 pmap->pm_stats.resident_count -= count;
886 pagezero(vm_offset_t va)
890 bzero((void *)va, PAGE_SIZE);
898 ret = moea64_bootstrap_alloc(n * PAGE_SIZE, PAGE_SIZE);
899 for (int i = 0; i < n; i++)
900 pagezero(PHYS_TO_DMAP(ret + i * PAGE_SIZE));
905 kvtopte(vm_offset_t va)
909 l3e = pmap_pml3e(kernel_pmap, va);
910 if (l3e == NULL || (be64toh(*l3e) & RPTE_VALID) == 0)
912 return (pmap_l3e_to_pte(l3e, va));
916 mmu_radix_kenter(vm_offset_t va, vm_paddr_t pa)
922 *pte = htobe64(pa | RPTE_VALID | RPTE_LEAF | RPTE_EAA_R | \
923 RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A);
927 mmu_radix_ps_enabled(pmap_t pmap)
929 return (superpages_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
933 pmap_nofault_pte(pmap_t pmap, vm_offset_t va, int *is_l3e)
939 l3e = pmap_pml3e(pmap, va);
940 if (l3e == NULL || (be64toh(*l3e) & PG_V) == 0)
943 if (be64toh(*l3e) & RPTE_LEAF) {
949 pte = pmap_l3e_to_pte(l3e, va);
950 if (pte == NULL || (be64toh(*pte) & PG_V) == 0)
956 pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags)
959 pt_entry_t startpte, origpte, newpte;
965 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL)
966 return (KERN_INVALID_ADDRESS);
967 origpte = newpte = be64toh(*pte);
970 if (((flags & VM_PROT_WRITE) && (startpte & PG_M)) ||
971 ((flags & VM_PROT_READ) && (startpte & PG_A))) {
972 pmap_invalidate_all(pmap);
974 if (VERBOSE_PMAP || pmap_logging)
975 printf("%s(%p, %#lx, %#x) (%#lx) -- invalidate all\n",
976 __func__, pmap, va, flags, origpte);
978 return (KERN_FAILURE);
982 if (VERBOSE_PMAP || pmap_logging)
983 printf("%s(%p, %#lx, %#x) (%#lx)\n", __func__, pmap, va,
987 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL ||
988 be64toh(*pte) != origpte) {
990 return (KERN_FAILURE);
992 m = PHYS_TO_VM_PAGE(newpte & PG_FRAME);
996 if ((newpte & (RPTE_EAA_R|RPTE_EAA_X)) == 0)
999 vm_page_aflag_set(m, PGA_REFERENCED);
1002 if ((newpte & RPTE_EAA_W) == 0)
1009 case VM_PROT_EXECUTE:
1010 if ((newpte & RPTE_EAA_X) == 0)
1013 vm_page_aflag_set(m, PGA_REFERENCED);
1017 if (!atomic_cmpset_long(pte, htobe64(origpte), htobe64(newpte)))
1021 if (startpte == newpte)
1022 return (KERN_FAILURE);
1026 return (KERN_PROTECTION_FAILURE);
1030 * Returns TRUE if the given page is mapped individually or as part of
1031 * a 2mpage. Otherwise, returns FALSE.
1034 mmu_radix_page_is_mapped(vm_page_t m)
1036 struct rwlock *lock;
1039 if ((m->oflags & VPO_UNMANAGED) != 0)
1041 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
1043 rv = !TAILQ_EMPTY(&m->md.pv_list) ||
1044 ((m->flags & PG_FICTITIOUS) == 0 &&
1045 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
1051 * Determine the appropriate bits to set in a PTE or PDE for a specified
1055 pmap_cache_bits(vm_memattr_t ma)
1057 if (ma != VM_MEMATTR_DEFAULT) {
1059 case VM_MEMATTR_UNCACHEABLE:
1060 return (RPTE_ATTR_GUARDEDIO);
1061 case VM_MEMATTR_CACHEABLE:
1062 return (RPTE_ATTR_MEM);
1063 case VM_MEMATTR_WRITE_BACK:
1064 case VM_MEMATTR_PREFETCHABLE:
1065 case VM_MEMATTR_WRITE_COMBINING:
1066 return (RPTE_ATTR_UNGUARDEDIO);
1073 pmap_invalidate_page(pmap_t pmap, vm_offset_t start)
1076 if (pmap == kernel_pmap)
1077 radix_tlbie_invlpg_kernel_4k(start);
1079 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1084 pmap_invalidate_page_2m(pmap_t pmap, vm_offset_t start)
1087 if (pmap == kernel_pmap)
1088 radix_tlbie_invlpg_kernel_2m(start);
1090 radix_tlbie_invlpg_user_2m(pmap->pm_pid, start);
1095 pmap_invalidate_pwc(pmap_t pmap)
1098 if (pmap == kernel_pmap)
1099 radix_tlbie_invlpwc_kernel();
1101 radix_tlbie_invlpwc_user(pmap->pm_pid);
1106 pmap_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end)
1108 if (((start - end) >> PAGE_SHIFT) > 8) {
1109 pmap_invalidate_all(pmap);
1113 if (pmap == kernel_pmap) {
1114 while (start < end) {
1115 radix_tlbie_invlpg_kernel_4k(start);
1119 while (start < end) {
1120 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1128 pmap_invalidate_all(pmap_t pmap)
1131 if (pmap == kernel_pmap)
1132 radix_tlbie_flush_kernel();
1134 radix_tlbie_flush_user(pmap->pm_pid);
1139 pmap_invalidate_l3e_page(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e)
1143 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
1144 * by a promotion that did not invalidate the 512 4KB page mappings
1145 * that might exist in the TLB. Consequently, at this point, the TLB
1146 * may hold both 4KB and 2MB page mappings for the address range [va,
1147 * va + L3_PAGE_SIZE). Therefore, the entire range must be invalidated here.
1148 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
1149 * 4KB page mappings for the address range [va, va + L3_PAGE_SIZE), and so a
1150 * single INVLPG suffices to invalidate the 2MB page mapping from the
1154 if ((l3e & PG_PROMOTED) != 0)
1155 pmap_invalidate_range(pmap, va, va + L3_PAGE_SIZE - 1);
1157 pmap_invalidate_page_2m(pmap, va);
1159 pmap_invalidate_pwc(pmap);
1162 static __inline struct pv_chunk *
1163 pv_to_chunk(pv_entry_t pv)
1166 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1169 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1171 #define PC_FREE0 0xfffffffffffffffful
1172 #define PC_FREE1 0x3ffffffffffffffful
1174 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1 };
1177 * Ensure that the number of spare PV entries in the specified pmap meets or
1178 * exceeds the given count, "needed".
1180 * The given PV list lock may be released.
1183 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
1185 struct pch new_tail;
1186 struct pv_chunk *pc;
1191 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1192 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
1195 * Newly allocated PV chunks must be stored in a private list until
1196 * the required number of PV chunks have been allocated. Otherwise,
1197 * reclaim_pv_chunk() could recycle one of these chunks. In
1198 * contrast, these chunks must be added to the pmap upon allocation.
1200 TAILQ_INIT(&new_tail);
1203 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
1204 // if ((cpu_feature2 & CPUID2_POPCNT) == 0)
1205 bit_count((bitstr_t *)pc->pc_map, 0,
1206 sizeof(pc->pc_map) * NBBY, &free);
1208 free = popcnt_pc_map_pq(pc->pc_map);
1213 if (avail >= needed)
1216 for (reclaimed = false; avail < needed; avail += _NPCPV) {
1217 m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
1219 m = reclaim_pv_chunk(pmap, lockp);
1224 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1225 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1226 dump_add_page(m->phys_addr);
1227 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1229 pc->pc_map[0] = PC_FREE0;
1230 pc->pc_map[1] = PC_FREE1;
1231 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1232 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
1233 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
1236 * The reclaim might have freed a chunk from the current pmap.
1237 * If that chunk contained available entries, we need to
1238 * re-count the number of available entries.
1243 if (!TAILQ_EMPTY(&new_tail)) {
1244 mtx_lock(&pv_chunks_mutex);
1245 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
1246 mtx_unlock(&pv_chunks_mutex);
1251 * First find and then remove the pv entry for the specified pmap and virtual
1252 * address from the specified pv list. Returns the pv entry if found and NULL
1253 * otherwise. This operation can be performed on pv lists for either 4KB or
1254 * 2MB page mappings.
1256 static __inline pv_entry_t
1257 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1261 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
1263 if (PV_PMAP(pv) == NULL) {
1264 printf("corrupted pv_chunk/pv %p\n", pv);
1265 printf("pv_chunk: %64D\n", pv_to_chunk(pv), ":");
1267 MPASS(PV_PMAP(pv) != NULL);
1268 MPASS(pv->pv_va != 0);
1270 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1271 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
1280 * After demotion from a 2MB page mapping to 512 4KB page mappings,
1281 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
1282 * entries for each of the 4KB page mappings.
1285 pmap_pv_demote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1286 struct rwlock **lockp)
1288 struct md_page *pvh;
1289 struct pv_chunk *pc;
1291 vm_offset_t va_last;
1295 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1296 KASSERT((pa & L3_PAGE_MASK) == 0,
1297 ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
1298 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1301 * Transfer the 2mpage's pv entry for this mapping to the first
1302 * page's pv list. Once this transfer begins, the pv list lock
1303 * must not be released until the last pv entry is reinstantiated.
1305 pvh = pa_to_pvh(pa);
1306 va = trunc_2mpage(va);
1307 pv = pmap_pvh_remove(pvh, pmap, va);
1308 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
1309 m = PHYS_TO_VM_PAGE(pa);
1310 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1313 /* Instantiate the remaining NPTEPG - 1 pv entries. */
1314 PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
1315 va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1317 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1318 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0
1319 , ("pmap_pv_demote_pde: missing spare"));
1320 for (field = 0; field < _NPCM; field++) {
1321 while (pc->pc_map[field]) {
1322 bit = cnttzd(pc->pc_map[field]);
1323 pc->pc_map[field] &= ~(1ul << bit);
1324 pv = &pc->pc_pventry[field * 64 + bit];
1328 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1329 ("pmap_pv_demote_pde: page %p is not managed", m));
1330 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1337 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1338 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1341 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1342 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1343 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1345 PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
1346 PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
1350 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap)
1355 pmap_invalidate_all(pmap);
1356 if (pmap != locked_pmap)
1361 * We are in a serious low memory condition. Resort to
1362 * drastic measures to free some pages so we can allocate
1363 * another pv entry chunk.
1365 * Returns NULL if PV entries were reclaimed from the specified pmap.
1367 * We do not, however, unmap 2mpages because subsequent accesses will
1368 * allocate per-page pv entries until repromotion occurs, thereby
1369 * exacerbating the shortage of free pv entries.
1371 static int active_reclaims = 0;
1373 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1375 struct pv_chunk *pc, *pc_marker, *pc_marker_end;
1376 struct pv_chunk_header pc_marker_b, pc_marker_end_b;
1377 struct md_page *pvh;
1379 pmap_t next_pmap, pmap;
1380 pt_entry_t *pte, tpte;
1384 struct spglist free;
1386 int bit, field, freed;
1388 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1389 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
1393 bzero(&pc_marker_b, sizeof(pc_marker_b));
1394 bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
1395 pc_marker = (struct pv_chunk *)&pc_marker_b;
1396 pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
1398 mtx_lock(&pv_chunks_mutex);
1400 TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
1401 TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
1402 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
1403 SLIST_EMPTY(&free)) {
1404 next_pmap = pc->pc_pmap;
1405 if (next_pmap == NULL) {
1407 * The next chunk is a marker. However, it is
1408 * not our marker, so active_reclaims must be
1409 * > 1. Consequently, the next_chunk code
1410 * will not rotate the pv_chunks list.
1414 mtx_unlock(&pv_chunks_mutex);
1417 * A pv_chunk can only be removed from the pc_lru list
1418 * when both pc_chunks_mutex is owned and the
1419 * corresponding pmap is locked.
1421 if (pmap != next_pmap) {
1422 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1424 /* Avoid deadlock and lock recursion. */
1425 if (pmap > locked_pmap) {
1426 RELEASE_PV_LIST_LOCK(lockp);
1428 mtx_lock(&pv_chunks_mutex);
1430 } else if (pmap != locked_pmap) {
1431 if (PMAP_TRYLOCK(pmap)) {
1432 mtx_lock(&pv_chunks_mutex);
1435 pmap = NULL; /* pmap is not locked */
1436 mtx_lock(&pv_chunks_mutex);
1437 pc = TAILQ_NEXT(pc_marker, pc_lru);
1439 pc->pc_pmap != next_pmap)
1447 * Destroy every non-wired, 4 KB page mapping in the chunk.
1450 for (field = 0; field < _NPCM; field++) {
1451 for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1452 inuse != 0; inuse &= ~(1UL << bit)) {
1453 bit = cnttzd(inuse);
1454 pv = &pc->pc_pventry[field * 64 + bit];
1456 l3e = pmap_pml3e(pmap, va);
1457 if ((be64toh(*l3e) & RPTE_LEAF) != 0)
1459 pte = pmap_l3e_to_pte(l3e, va);
1460 if ((be64toh(*pte) & PG_W) != 0)
1462 tpte = be64toh(pte_load_clear(pte));
1463 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
1464 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
1466 if ((tpte & PG_A) != 0)
1467 vm_page_aflag_set(m, PGA_REFERENCED);
1468 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1469 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
1472 if (TAILQ_EMPTY(&m->md.pv_list) &&
1473 (m->flags & PG_FICTITIOUS) == 0) {
1474 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
1475 if (TAILQ_EMPTY(&pvh->pv_list)) {
1476 vm_page_aflag_clear(m,
1480 pc->pc_map[field] |= 1UL << bit;
1481 pmap_unuse_pt(pmap, va, be64toh(*l3e), &free);
1486 mtx_lock(&pv_chunks_mutex);
1489 /* Every freed mapping is for a 4 KB page. */
1490 pmap_resident_count_dec(pmap, freed);
1491 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
1492 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
1493 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
1494 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1495 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1) {
1496 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1497 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1498 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1499 /* Entire chunk is free; return it. */
1500 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1501 dump_drop_page(m_pc->phys_addr);
1502 mtx_lock(&pv_chunks_mutex);
1503 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1506 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1507 mtx_lock(&pv_chunks_mutex);
1508 /* One freed pv entry in locked_pmap is sufficient. */
1509 if (pmap == locked_pmap)
1512 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1513 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
1514 if (active_reclaims == 1 && pmap != NULL) {
1516 * Rotate the pv chunks list so that we do not
1517 * scan the same pv chunks that could not be
1518 * freed (because they contained a wired
1519 * and/or superpage mapping) on every
1520 * invocation of reclaim_pv_chunk().
1522 while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
1523 MPASS(pc->pc_pmap != NULL);
1524 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1525 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1529 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1530 TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
1532 mtx_unlock(&pv_chunks_mutex);
1533 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1534 if (m_pc == NULL && !SLIST_EMPTY(&free)) {
1535 m_pc = SLIST_FIRST(&free);
1536 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
1537 /* Recycle a freed page table page. */
1538 m_pc->ref_count = 1;
1540 vm_page_free_pages_toq(&free, true);
1545 * free the pv_entry back to the free list
1548 free_pv_entry(pmap_t pmap, pv_entry_t pv)
1550 struct pv_chunk *pc;
1551 int idx, field, bit;
1554 if (pmap != kernel_pmap)
1555 printf("%s(%p, %p)\n", __func__, pmap, pv);
1557 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1558 PV_STAT(atomic_add_long(&pv_entry_frees, 1));
1559 PV_STAT(atomic_add_int(&pv_entry_spare, 1));
1560 PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
1561 pc = pv_to_chunk(pv);
1562 idx = pv - &pc->pc_pventry[0];
1565 pc->pc_map[field] |= 1ul << bit;
1566 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1) {
1567 /* 98% of the time, pc is already at the head of the list. */
1568 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1569 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1570 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1574 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1579 free_pv_chunk(struct pv_chunk *pc)
1583 mtx_lock(&pv_chunks_mutex);
1584 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1585 mtx_unlock(&pv_chunks_mutex);
1586 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1587 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1588 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1589 /* entire chunk is free, return it */
1590 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1591 dump_drop_page(m->phys_addr);
1592 vm_page_unwire_noq(m);
1597 * Returns a new PV entry, allocating a new PV chunk from the system when
1598 * needed. If this PV chunk allocation fails and a PV list lock pointer was
1599 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
1602 * The given PV list lock may be released.
1605 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
1609 struct pv_chunk *pc;
1612 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1613 PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
1615 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1617 for (field = 0; field < _NPCM; field++) {
1618 if (pc->pc_map[field]) {
1619 bit = cnttzd(pc->pc_map[field]);
1623 if (field < _NPCM) {
1624 pv = &pc->pc_pventry[field * 64 + bit];
1625 pc->pc_map[field] &= ~(1ul << bit);
1626 /* If this was the last item, move it to tail */
1627 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1628 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1629 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1632 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1633 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
1634 MPASS(PV_PMAP(pv) != NULL);
1638 /* No free items, allocate another chunk */
1639 m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
1641 if (lockp == NULL) {
1642 PV_STAT(pc_chunk_tryfail++);
1645 m = reclaim_pv_chunk(pmap, lockp);
1649 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1650 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1651 dump_add_page(m->phys_addr);
1652 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1654 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
1655 pc->pc_map[1] = PC_FREE1;
1656 mtx_lock(&pv_chunks_mutex);
1657 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1658 mtx_unlock(&pv_chunks_mutex);
1659 pv = &pc->pc_pventry[0];
1660 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1661 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1662 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1663 MPASS(PV_PMAP(pv) != NULL);
1667 #if VM_NRESERVLEVEL > 0
1669 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
1670 * replace the many pv entries for the 4KB page mappings by a single pv entry
1671 * for the 2MB page mapping.
1674 pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1675 struct rwlock **lockp)
1677 struct md_page *pvh;
1679 vm_offset_t va_last;
1682 KASSERT((pa & L3_PAGE_MASK) == 0,
1683 ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
1684 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1687 * Transfer the first page's pv entry for this mapping to the 2mpage's
1688 * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
1689 * a transfer avoids the possibility that get_pv_entry() calls
1690 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
1691 * mappings that is being promoted.
1693 m = PHYS_TO_VM_PAGE(pa);
1694 va = trunc_2mpage(va);
1695 pv = pmap_pvh_remove(&m->md, pmap, va);
1696 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
1697 pvh = pa_to_pvh(pa);
1698 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
1700 /* Free the remaining NPTEPG - 1 pv entries. */
1701 va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1705 pmap_pvh_free(&m->md, pmap, va);
1706 } while (va < va_last);
1708 #endif /* VM_NRESERVLEVEL > 0 */
1711 * First find and then destroy the pv entry for the specified pmap and virtual
1712 * address. This operation can be performed on pv lists for either 4KB or 2MB
1716 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1720 pv = pmap_pvh_remove(pvh, pmap, va);
1721 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
1722 free_pv_entry(pmap, pv);
1726 * Conditionally create the PV entry for a 4KB page mapping if the required
1727 * memory can be allocated without resorting to reclamation.
1730 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
1731 struct rwlock **lockp)
1735 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1736 /* Pass NULL instead of the lock pointer to disable reclamation. */
1737 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
1739 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1740 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1747 vm_paddr_t phys_avail_debug[2 * VM_PHYSSEG_MAX];
1750 validate_addr(vm_paddr_t addr, vm_size_t size)
1752 vm_paddr_t end = addr + size;
1755 for (int i = 0; i < 2 * phys_avail_count; i += 2) {
1756 if (addr >= phys_avail_debug[i] &&
1757 end <= phys_avail_debug[i + 1]) {
1762 KASSERT(found, ("%#lx-%#lx outside of initial phys_avail array",
1766 static void validate_addr(vm_paddr_t addr, vm_size_t size) {}
1768 #define DMAP_PAGE_BITS (RPTE_VALID | RPTE_LEAF | RPTE_EAA_MASK | PG_M | PG_A)
1775 page = allocpages(1);
1776 pagezero(PHYS_TO_DMAP(page));
1781 mmu_radix_dmap_range(vm_paddr_t start, vm_paddr_t end)
1783 pt_entry_t *pte, pteval;
1787 printf("%s %lx -> %lx\n", __func__, start, end);
1788 while (start < end) {
1789 pteval = start | DMAP_PAGE_BITS;
1790 pte = pmap_pml1e(kernel_pmap, PHYS_TO_DMAP(start));
1791 if ((be64toh(*pte) & RPTE_VALID) == 0) {
1792 page = alloc_pt_page();
1793 pde_store(pte, page);
1795 pte = pmap_l1e_to_l2e(pte, PHYS_TO_DMAP(start));
1796 if ((start & L2_PAGE_MASK) == 0 &&
1797 end - start >= L2_PAGE_SIZE) {
1798 start += L2_PAGE_SIZE;
1800 } else if ((be64toh(*pte) & RPTE_VALID) == 0) {
1801 page = alloc_pt_page();
1802 pde_store(pte, page);
1805 pte = pmap_l2e_to_l3e(pte, PHYS_TO_DMAP(start));
1806 if ((start & L3_PAGE_MASK) == 0 &&
1807 end - start >= L3_PAGE_SIZE) {
1808 start += L3_PAGE_SIZE;
1810 } else if ((be64toh(*pte) & RPTE_VALID) == 0) {
1811 page = alloc_pt_page();
1812 pde_store(pte, page);
1814 pte = pmap_l3e_to_pte(pte, PHYS_TO_DMAP(start));
1817 pte_store(pte, pteval);
1822 mmu_radix_dmap_populate(vm_size_t hwphyssz)
1824 vm_paddr_t start, end;
1826 for (int i = 0; i < pregions_sz; i++) {
1827 start = pregions[i].mr_start;
1828 end = start + pregions[i].mr_size;
1829 if (hwphyssz && start >= hwphyssz)
1831 if (hwphyssz && hwphyssz < end)
1833 mmu_radix_dmap_range(start, end);
1838 mmu_radix_setup_pagetables(vm_size_t hwphyssz)
1840 vm_paddr_t ptpages, pages;
1844 bzero(kernel_pmap, sizeof(struct pmap));
1845 PMAP_LOCK_INIT(kernel_pmap);
1847 ptpages = allocpages(3);
1848 l1phys = moea64_bootstrap_alloc(RADIX_PGD_SIZE, RADIX_PGD_SIZE);
1849 validate_addr(l1phys, RADIX_PGD_SIZE);
1851 printf("l1phys=%lx\n", l1phys);
1852 MPASS((l1phys & (RADIX_PGD_SIZE-1)) == 0);
1853 for (int i = 0; i < RADIX_PGD_SIZE/PAGE_SIZE; i++)
1854 pagezero(PHYS_TO_DMAP(l1phys + i * PAGE_SIZE));
1855 kernel_pmap->pm_pml1 = (pml1_entry_t *)PHYS_TO_DMAP(l1phys);
1857 mmu_radix_dmap_populate(hwphyssz);
1860 * Create page tables for first 128MB of KVA
1863 pte = pmap_pml1e(kernel_pmap, VM_MIN_KERNEL_ADDRESS);
1864 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1866 pte = pmap_l1e_to_l2e(pte, VM_MIN_KERNEL_ADDRESS);
1867 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1869 pte = pmap_l2e_to_l3e(pte, VM_MIN_KERNEL_ADDRESS);
1871 * the kernel page table pages need to be preserved in
1872 * phys_avail and not overlap with previous allocations
1874 pages = allocpages(nkpt);
1876 printf("phys_avail after dmap populate and nkpt allocation\n");
1877 for (int j = 0; j < 2 * phys_avail_count; j+=2)
1878 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
1879 j, phys_avail[j], j + 1, phys_avail[j + 1]);
1882 for (int i = 0; i < nkpt; i++, pte++, pages += PAGE_SIZE)
1883 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1884 kernel_vm_end = VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE;
1886 printf("kernel_pmap pml1 %p\n", kernel_pmap->pm_pml1);
1888 * Add a physical memory segment (vm_phys_seg) corresponding to the
1889 * preallocated kernel page table pages so that vm_page structures
1890 * representing these pages will be created. The vm_page structures
1891 * are required for promotion of the corresponding kernel virtual
1892 * addresses to superpage mappings.
1894 vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1898 mmu_radix_early_bootstrap(vm_offset_t start, vm_offset_t end)
1900 vm_paddr_t kpstart, kpend;
1901 vm_size_t physsz, hwphyssz;
1903 int rm_pavail, proctab_size;
1906 kpstart = start & ~DMAP_BASE_ADDRESS;
1907 kpend = end & ~DMAP_BASE_ADDRESS;
1909 /* Get physical memory regions from firmware */
1910 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
1911 CTR0(KTR_PMAP, "mmu_radix_early_bootstrap: physical memory");
1913 if (2 * VM_PHYSSEG_MAX < regions_sz)
1914 panic("mmu_radix_early_bootstrap: phys_avail too small");
1917 for (int i = 0; i < regions_sz; i++)
1918 printf("regions[%d].mr_start=%lx regions[%d].mr_size=%lx\n",
1919 i, regions[i].mr_start, i, regions[i].mr_size);
1921 * XXX workaround a simulator bug
1923 for (int i = 0; i < regions_sz; i++)
1924 if (regions[i].mr_start & PAGE_MASK) {
1925 regions[i].mr_start += PAGE_MASK;
1926 regions[i].mr_start &= ~PAGE_MASK;
1927 regions[i].mr_size &= ~PAGE_MASK;
1930 for (int i = 0; i < pregions_sz; i++)
1931 printf("pregions[%d].mr_start=%lx pregions[%d].mr_size=%lx\n",
1932 i, pregions[i].mr_start, i, pregions[i].mr_size);
1934 phys_avail_count = 0;
1937 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1938 for (i = 0, j = 0; i < regions_sz; i++) {
1940 printf("regions[%d].mr_start=%016lx regions[%d].mr_size=%016lx\n",
1941 i, regions[i].mr_start, i, regions[i].mr_size);
1943 if (regions[i].mr_size < PAGE_SIZE)
1946 if (hwphyssz != 0 &&
1947 (physsz + regions[i].mr_size) >= hwphyssz) {
1948 if (physsz < hwphyssz) {
1949 phys_avail[j] = regions[i].mr_start;
1950 phys_avail[j + 1] = regions[i].mr_start +
1951 (hwphyssz - physsz);
1954 dump_avail[j] = phys_avail[j];
1955 dump_avail[j + 1] = phys_avail[j + 1];
1959 phys_avail[j] = regions[i].mr_start;
1960 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
1961 dump_avail[j] = phys_avail[j];
1962 dump_avail[j + 1] = phys_avail[j + 1];
1965 physsz += regions[i].mr_size;
1969 /* Check for overlap with the kernel and exception vectors */
1971 for (j = 0; j < 2 * phys_avail_count; j+=2) {
1972 if (phys_avail[j] < EXC_LAST)
1973 phys_avail[j] += EXC_LAST;
1975 if (phys_avail[j] >= kpstart &&
1976 phys_avail[j + 1] <= kpend) {
1977 phys_avail[j] = phys_avail[j + 1] = ~0;
1982 if (kpstart >= phys_avail[j] &&
1983 kpstart < phys_avail[j + 1]) {
1984 if (kpend < phys_avail[j + 1]) {
1985 phys_avail[2 * phys_avail_count] =
1986 (kpend & ~PAGE_MASK) + PAGE_SIZE;
1987 phys_avail[2 * phys_avail_count + 1] =
1992 phys_avail[j + 1] = kpstart & ~PAGE_MASK;
1995 if (kpend >= phys_avail[j] &&
1996 kpend < phys_avail[j + 1]) {
1997 if (kpstart > phys_avail[j]) {
1998 phys_avail[2 * phys_avail_count] = phys_avail[j];
1999 phys_avail[2 * phys_avail_count + 1] =
2000 kpstart & ~PAGE_MASK;
2004 phys_avail[j] = (kpend & ~PAGE_MASK) +
2008 qsort(phys_avail, 2 * phys_avail_count, sizeof(phys_avail[0]), pa_cmp);
2009 for (i = 0; i < 2 * phys_avail_count; i++)
2010 phys_avail_debug[i] = phys_avail[i];
2012 /* Remove physical available regions marked for removal (~0) */
2014 phys_avail_count -= rm_pavail;
2015 for (i = 2 * phys_avail_count;
2016 i < 2*(phys_avail_count + rm_pavail); i+=2)
2017 phys_avail[i] = phys_avail[i + 1] = 0;
2020 printf("phys_avail ranges after filtering:\n");
2021 for (j = 0; j < 2 * phys_avail_count; j+=2)
2022 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
2023 j, phys_avail[j], j + 1, phys_avail[j + 1]);
2025 physmem = btoc(physsz);
2027 /* XXX assume we're running non-virtualized and
2028 * we don't support BHYVE
2030 if (isa3_pid_bits == 0)
2032 if (powernv_enabled) {
2034 moea64_bootstrap_alloc(PARTTAB_SIZE, PARTTAB_SIZE);
2035 validate_addr(parttab_phys, PARTTAB_SIZE);
2036 for (int i = 0; i < PARTTAB_SIZE/PAGE_SIZE; i++)
2037 pagezero(PHYS_TO_DMAP(parttab_phys + i * PAGE_SIZE));
2040 proctab_size = 1UL << PROCTAB_SIZE_SHIFT;
2041 proctab0pa = moea64_bootstrap_alloc(proctab_size, proctab_size);
2042 validate_addr(proctab0pa, proctab_size);
2043 for (int i = 0; i < proctab_size/PAGE_SIZE; i++)
2044 pagezero(PHYS_TO_DMAP(proctab0pa + i * PAGE_SIZE));
2046 mmu_radix_setup_pagetables(hwphyssz);
2050 mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end)
2058 * Set up the Open Firmware pmap and add its mappings if not in real
2062 printf("%s enter\n", __func__);
2065 * Calculate the last available physical address, and reserve the
2066 * vm_page_array (upper bound).
2069 for (i = 0; phys_avail[i + 1] != 0; i += 2)
2070 Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1]));
2073 * Remap any early IO mappings (console framebuffer, etc.)
2075 bs_remap_earlyboot();
2078 * Allocate a kernel stack with a guard page for thread0 and map it
2079 * into the kernel page map.
2081 pa = allocpages(kstack_pages);
2082 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
2083 virtual_avail = va + kstack_pages * PAGE_SIZE;
2084 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
2085 thread0.td_kstack = va;
2086 for (i = 0; i < kstack_pages; i++) {
2087 mmu_radix_kenter(va, pa);
2091 thread0.td_kstack_pages = kstack_pages;
2094 * Allocate virtual address space for the message buffer.
2096 pa = msgbuf_phys = allocpages((msgbufsize + PAGE_MASK) >> PAGE_SHIFT);
2097 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(pa);
2100 * Allocate virtual address space for the dynamic percpu area.
2102 pa = allocpages(DPCPU_SIZE >> PAGE_SHIFT);
2103 dpcpu = (void *)PHYS_TO_DMAP(pa);
2104 dpcpu_init(dpcpu, curcpu);
2106 crashdumpmap = (caddr_t)virtual_avail;
2107 virtual_avail += MAXDUMPPGS * PAGE_SIZE;
2110 * Reserve some special page table entries/VA space for temporary
2116 mmu_parttab_init(void)
2120 isa3_parttab = (struct pate *)PHYS_TO_DMAP(parttab_phys);
2123 printf("%s parttab: %p\n", __func__, isa3_parttab);
2124 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2126 printf("setting ptcr %lx\n", ptcr);
2127 mtspr(SPR_PTCR, ptcr);
2131 mmu_parttab_update(uint64_t lpid, uint64_t pagetab, uint64_t proctab)
2136 printf("%s isa3_parttab %p lpid %lx pagetab %lx proctab %lx\n", __func__, isa3_parttab,
2137 lpid, pagetab, proctab);
2138 prev = be64toh(isa3_parttab[lpid].pagetab);
2139 isa3_parttab[lpid].pagetab = htobe64(pagetab);
2140 isa3_parttab[lpid].proctab = htobe64(proctab);
2142 if (prev & PARTTAB_HR) {
2143 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
2144 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2145 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2146 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2148 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
2149 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2155 mmu_radix_parttab_init(void)
2160 pagetab = RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) | \
2161 RADIX_PGD_INDEX_SHIFT | PARTTAB_HR;
2162 mmu_parttab_update(0, pagetab, 0);
2166 mmu_radix_proctab_register(vm_paddr_t proctabpa, uint64_t table_size)
2168 uint64_t pagetab, proctab;
2170 pagetab = be64toh(isa3_parttab[0].pagetab);
2171 proctab = proctabpa | table_size | PARTTAB_GR;
2172 mmu_parttab_update(0, pagetab, proctab);
2176 mmu_radix_proctab_init(void)
2181 isa3_proctab = (void*)PHYS_TO_DMAP(proctab0pa);
2182 isa3_proctab->proctab0 =
2183 htobe64(RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) |
2184 RADIX_PGD_INDEX_SHIFT);
2186 if (powernv_enabled) {
2187 mmu_radix_proctab_register(proctab0pa, PROCTAB_SIZE_SHIFT - 12);
2188 __asm __volatile("ptesync" : : : "memory");
2189 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2190 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
2191 __asm __volatile("eieio; tlbsync; ptesync" : : : "memory");
2196 rc = phyp_hcall(H_REGISTER_PROC_TBL,
2197 PROC_TABLE_NEW | PROC_TABLE_RADIX | PROC_TABLE_GTSE,
2198 proctab0pa, 0, PROCTAB_SIZE_SHIFT - 12);
2199 if (rc != H_SUCCESS)
2200 panic("mmu_radix_proctab_init: "
2201 "failed to register process table: rc=%jd",
2207 printf("process table %p and kernel radix PDE: %p\n",
2208 isa3_proctab, kernel_pmap->pm_pml1);
2209 mtmsr(mfmsr() | PSL_DR );
2210 mtmsr(mfmsr() & ~PSL_DR);
2211 kernel_pmap->pm_pid = isa3_base_pid;
2216 mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2219 struct rwlock *lock;
2222 pml3_entry_t oldl3e, *l3e;
2224 vm_offset_t va, va_next;
2228 if (advice != MADV_DONTNEED && advice != MADV_FREE)
2232 for (; sva < eva; sva = va_next) {
2233 l1e = pmap_pml1e(pmap, sva);
2234 if ((be64toh(*l1e) & PG_V) == 0) {
2235 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2240 l2e = pmap_l1e_to_l2e(l1e, sva);
2241 if ((be64toh(*l2e) & PG_V) == 0) {
2242 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2247 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2250 l3e = pmap_l2e_to_l3e(l2e, sva);
2251 oldl3e = be64toh(*l3e);
2252 if ((oldl3e & PG_V) == 0)
2254 else if ((oldl3e & RPTE_LEAF) != 0) {
2255 if ((oldl3e & PG_MANAGED) == 0)
2258 if (!pmap_demote_l3e_locked(pmap, l3e, sva, &lock)) {
2263 * The large page mapping was destroyed.
2269 * Unless the page mappings are wired, remove the
2270 * mapping to a single page so that a subsequent
2271 * access may repromote. Choosing the last page
2272 * within the address range [sva, min(va_next, eva))
2273 * generally results in more repromotions. Since the
2274 * underlying page table page is fully populated, this
2275 * removal never frees a page table page.
2277 if ((oldl3e & PG_W) == 0) {
2283 ("mmu_radix_advise: no address gap"));
2284 pte = pmap_l3e_to_pte(l3e, va);
2285 KASSERT((be64toh(*pte) & PG_V) != 0,
2286 ("pmap_advise: invalid PTE"));
2287 pmap_remove_pte(pmap, pte, va, be64toh(*l3e), NULL,
2297 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next;
2298 pte++, sva += PAGE_SIZE) {
2299 MPASS(pte == pmap_pte(pmap, sva));
2301 if ((be64toh(*pte) & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
2303 else if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2304 if (advice == MADV_DONTNEED) {
2306 * Future calls to pmap_is_modified()
2307 * can be avoided by making the page
2310 m = PHYS_TO_VM_PAGE(be64toh(*pte) & PG_FRAME);
2313 atomic_clear_long(pte, htobe64(PG_M | PG_A));
2314 } else if ((be64toh(*pte) & PG_A) != 0)
2315 atomic_clear_long(pte, htobe64(PG_A));
2321 if (va != va_next) {
2330 pmap_invalidate_all(pmap);
2335 * Routines used in machine-dependent code
2338 mmu_radix_bootstrap(vm_offset_t start, vm_offset_t end)
2343 printf("%s\n", __func__);
2345 powernv_enabled = (mfmsr() & PSL_HV) ? 1 : 0;
2346 mmu_radix_early_bootstrap(start, end);
2348 printf("early bootstrap complete\n");
2349 if (powernv_enabled) {
2350 lpcr = mfspr(SPR_LPCR);
2351 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2352 mmu_radix_parttab_init();
2353 mmu_radix_init_amor();
2355 printf("powernv init complete\n");
2357 mmu_radix_init_iamr();
2358 mmu_radix_proctab_init();
2359 mmu_radix_pid_set(kernel_pmap);
2360 if (powernv_enabled)
2361 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2363 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
2365 mmu_radix_late_bootstrap(start, end);
2366 numa_mem_regions(&numa_pregions, &numa_pregions_sz);
2368 printf("%s done\n", __func__);
2369 pmap_bootstrapped = 1;
2370 dmaplimit = roundup2(powerpc_ptob(Maxmem), L2_PAGE_SIZE);
2371 PCPU_SET(flags, PCPU_GET(flags) | PC_FLAG_NOSRS);
2375 mmu_radix_cpu_bootstrap(int ap)
2380 if (powernv_enabled) {
2381 lpcr = mfspr(SPR_LPCR);
2382 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2384 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2385 mtspr(SPR_PTCR, ptcr);
2386 mmu_radix_init_amor();
2388 mmu_radix_init_iamr();
2389 mmu_radix_pid_set(kernel_pmap);
2390 if (powernv_enabled)
2391 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2393 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
2396 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l3e, CTLFLAG_RD, 0,
2397 "2MB page mapping counters");
2399 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_demotions);
2400 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, demotions, CTLFLAG_RD,
2401 &pmap_l3e_demotions, "2MB page demotions");
2403 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_mappings);
2404 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, mappings, CTLFLAG_RD,
2405 &pmap_l3e_mappings, "2MB page mappings");
2407 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_p_failures);
2408 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, p_failures, CTLFLAG_RD,
2409 &pmap_l3e_p_failures, "2MB page promotion failures");
2411 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_promotions);
2412 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, promotions, CTLFLAG_RD,
2413 &pmap_l3e_promotions, "2MB page promotions");
2415 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2e, CTLFLAG_RD, 0,
2416 "1GB page mapping counters");
2418 static COUNTER_U64_DEFINE_EARLY(pmap_l2e_demotions);
2419 SYSCTL_COUNTER_U64(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD,
2420 &pmap_l2e_demotions, "1GB page demotions");
2423 mmu_radix_clear_modify(vm_page_t m)
2425 struct md_page *pvh;
2427 pv_entry_t next_pv, pv;
2428 pml3_entry_t oldl3e, *l3e;
2429 pt_entry_t oldpte, *pte;
2430 struct rwlock *lock;
2432 int md_gen, pvh_gen;
2434 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2435 ("pmap_clear_modify: page %p is not managed", m));
2436 vm_page_assert_busied(m);
2437 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
2440 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
2441 * If the object containing the page is locked and the page is not
2442 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
2444 if ((m->a.flags & PGA_WRITEABLE) == 0)
2446 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2447 pa_to_pvh(VM_PAGE_TO_PHYS(m));
2448 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2451 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
2453 if (!PMAP_TRYLOCK(pmap)) {
2454 pvh_gen = pvh->pv_gen;
2458 if (pvh_gen != pvh->pv_gen) {
2464 l3e = pmap_pml3e(pmap, va);
2465 oldl3e = be64toh(*l3e);
2466 if ((oldl3e & PG_RW) != 0 &&
2467 pmap_demote_l3e_locked(pmap, l3e, va, &lock) &&
2468 (oldl3e & PG_W) == 0) {
2470 * Write protect the mapping to a
2471 * single page so that a subsequent
2472 * write access may repromote.
2474 va += VM_PAGE_TO_PHYS(m) - (oldl3e &
2476 pte = pmap_l3e_to_pte(l3e, va);
2477 oldpte = be64toh(*pte);
2478 while (!atomic_cmpset_long(pte,
2480 htobe64((oldpte | RPTE_EAA_R) & ~(PG_M | PG_RW))))
2481 oldpte = be64toh(*pte);
2483 pmap_invalidate_page(pmap, va);
2487 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2489 if (!PMAP_TRYLOCK(pmap)) {
2490 md_gen = m->md.pv_gen;
2491 pvh_gen = pvh->pv_gen;
2495 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2500 l3e = pmap_pml3e(pmap, pv->pv_va);
2501 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, ("pmap_clear_modify: found"
2502 " a 2mpage in page %p's pv list", m));
2503 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
2504 if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2505 atomic_clear_long(pte, htobe64(PG_M));
2506 pmap_invalidate_page(pmap, pv->pv_va);
2514 mmu_radix_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2515 vm_size_t len, vm_offset_t src_addr)
2517 struct rwlock *lock;
2518 struct spglist free;
2520 vm_offset_t end_addr = src_addr + len;
2521 vm_offset_t va_next;
2522 vm_page_t dst_pdpg, dstmpte, srcmpte;
2523 bool invalidate_all;
2526 "%s(dst_pmap=%p, src_pmap=%p, dst_addr=%lx, len=%lu, src_addr=%lx)\n",
2527 __func__, dst_pmap, src_pmap, dst_addr, len, src_addr);
2529 if (dst_addr != src_addr)
2532 invalidate_all = false;
2533 if (dst_pmap < src_pmap) {
2534 PMAP_LOCK(dst_pmap);
2535 PMAP_LOCK(src_pmap);
2537 PMAP_LOCK(src_pmap);
2538 PMAP_LOCK(dst_pmap);
2541 for (addr = src_addr; addr < end_addr; addr = va_next) {
2544 pml3_entry_t srcptepaddr, *l3e;
2545 pt_entry_t *src_pte, *dst_pte;
2547 l1e = pmap_pml1e(src_pmap, addr);
2548 if ((be64toh(*l1e) & PG_V) == 0) {
2549 va_next = (addr + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2555 l2e = pmap_l1e_to_l2e(l1e, addr);
2556 if ((be64toh(*l2e) & PG_V) == 0) {
2557 va_next = (addr + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2563 va_next = (addr + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2567 l3e = pmap_l2e_to_l3e(l2e, addr);
2568 srcptepaddr = be64toh(*l3e);
2569 if (srcptepaddr == 0)
2572 if (srcptepaddr & RPTE_LEAF) {
2573 if ((addr & L3_PAGE_MASK) != 0 ||
2574 addr + L3_PAGE_SIZE > end_addr)
2576 dst_pdpg = pmap_allocl3e(dst_pmap, addr, NULL);
2577 if (dst_pdpg == NULL)
2579 l3e = (pml3_entry_t *)
2580 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg));
2581 l3e = &l3e[pmap_pml3e_index(addr)];
2582 if (be64toh(*l3e) == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
2583 pmap_pv_insert_l3e(dst_pmap, addr, srcptepaddr,
2584 PMAP_ENTER_NORECLAIM, &lock))) {
2585 *l3e = htobe64(srcptepaddr & ~PG_W);
2586 pmap_resident_count_inc(dst_pmap,
2587 L3_PAGE_SIZE / PAGE_SIZE);
2588 counter_u64_add(pmap_l3e_mappings, 1);
2590 dst_pdpg->ref_count--;
2594 srcptepaddr &= PG_FRAME;
2595 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
2596 KASSERT(srcmpte->ref_count > 0,
2597 ("pmap_copy: source page table page is unused"));
2599 if (va_next > end_addr)
2602 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
2603 src_pte = &src_pte[pmap_pte_index(addr)];
2605 while (addr < va_next) {
2607 ptetemp = be64toh(*src_pte);
2609 * we only virtual copy managed pages
2611 if ((ptetemp & PG_MANAGED) != 0) {
2612 if (dstmpte != NULL &&
2613 dstmpte->pindex == pmap_l3e_pindex(addr))
2614 dstmpte->ref_count++;
2615 else if ((dstmpte = pmap_allocpte(dst_pmap,
2616 addr, NULL)) == NULL)
2618 dst_pte = (pt_entry_t *)
2619 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
2620 dst_pte = &dst_pte[pmap_pte_index(addr)];
2621 if (be64toh(*dst_pte) == 0 &&
2622 pmap_try_insert_pv_entry(dst_pmap, addr,
2623 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME),
2626 * Clear the wired, modified, and
2627 * accessed (referenced) bits
2630 *dst_pte = htobe64(ptetemp & ~(PG_W | PG_M |
2632 pmap_resident_count_inc(dst_pmap, 1);
2635 if (pmap_unwire_ptp(dst_pmap, addr,
2638 * Although "addr" is not
2639 * mapped, paging-structure
2640 * caches could nonetheless
2641 * have entries that refer to
2642 * the freed page table pages.
2643 * Invalidate those entries.
2645 invalidate_all = true;
2646 vm_page_free_pages_toq(&free,
2651 if (dstmpte->ref_count >= srcmpte->ref_count)
2655 if (__predict_false((addr & L3_PAGE_MASK) == 0))
2656 src_pte = pmap_pte(src_pmap, addr);
2663 pmap_invalidate_all(dst_pmap);
2666 PMAP_UNLOCK(src_pmap);
2667 PMAP_UNLOCK(dst_pmap);
2671 mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst)
2673 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
2674 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
2676 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
2680 bcopy((void *)src, (void *)dst, PAGE_SIZE);
2684 mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2685 vm_offset_t b_offset, int xfersize)
2688 vm_offset_t a_pg_offset, b_pg_offset;
2691 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
2692 a_offset, mb, b_offset, xfersize);
2694 while (xfersize > 0) {
2695 a_pg_offset = a_offset & PAGE_MASK;
2696 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2697 a_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
2698 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) +
2700 b_pg_offset = b_offset & PAGE_MASK;
2701 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2702 b_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
2703 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) +
2705 bcopy(a_cp, b_cp, cnt);
2712 #if VM_NRESERVLEVEL > 0
2714 * Tries to promote the 512, contiguous 4KB page mappings that are within a
2715 * single page table page (PTP) to a single 2MB page mapping. For promotion
2716 * to occur, two conditions must be met: (1) the 4KB page mappings must map
2717 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
2718 * identical characteristics.
2721 pmap_promote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va,
2722 struct rwlock **lockp)
2724 pml3_entry_t newpde;
2725 pt_entry_t *firstpte, oldpte, pa, *pte;
2728 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2731 * Examine the first PTE in the specified PTP. Abort if this PTE is
2732 * either invalid, unused, or does not map the first 4KB physical page
2733 * within a 2MB page.
2735 firstpte = (pt_entry_t *)PHYS_TO_DMAP(be64toh(*pde) & PG_FRAME);
2737 newpde = be64toh(*firstpte);
2738 if ((newpde & ((PG_FRAME & L3_PAGE_MASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
2739 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2740 " in pmap %p", va, pmap);
2743 if ((newpde & (PG_M | PG_RW)) == PG_RW) {
2745 * When PG_M is already clear, PG_RW can be cleared without
2746 * a TLB invalidation.
2748 if (!atomic_cmpset_long(firstpte, htobe64(newpde), htobe64((newpde | RPTE_EAA_R) & ~RPTE_EAA_W)))
2750 newpde &= ~RPTE_EAA_W;
2754 * Examine each of the other PTEs in the specified PTP. Abort if this
2755 * PTE maps an unexpected 4KB physical page or does not have identical
2756 * characteristics to the first PTE.
2758 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + L3_PAGE_SIZE - PAGE_SIZE;
2759 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
2761 oldpte = be64toh(*pte);
2762 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
2763 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2764 " in pmap %p", va, pmap);
2767 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
2769 * When PG_M is already clear, PG_RW can be cleared
2770 * without a TLB invalidation.
2772 if (!atomic_cmpset_long(pte, htobe64(oldpte), htobe64((oldpte | RPTE_EAA_R) & ~RPTE_EAA_W)))
2774 oldpte &= ~RPTE_EAA_W;
2775 CTR2(KTR_PMAP, "pmap_promote_l3e: protect for va %#lx"
2776 " in pmap %p", (oldpte & PG_FRAME & L3_PAGE_MASK) |
2777 (va & ~L3_PAGE_MASK), pmap);
2779 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
2780 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2781 " in pmap %p", va, pmap);
2788 * Save the page table page in its current state until the PDE
2789 * mapping the superpage is demoted by pmap_demote_pde() or
2790 * destroyed by pmap_remove_pde().
2792 mpte = PHYS_TO_VM_PAGE(be64toh(*pde) & PG_FRAME);
2793 KASSERT(mpte >= vm_page_array &&
2794 mpte < &vm_page_array[vm_page_array_size],
2795 ("pmap_promote_l3e: page table page is out of range"));
2796 KASSERT(mpte->pindex == pmap_l3e_pindex(va),
2797 ("pmap_promote_l3e: page table page's pindex is wrong"));
2798 if (pmap_insert_pt_page(pmap, mpte)) {
2800 "pmap_promote_l3e: failure for va %#lx in pmap %p", va,
2806 * Promote the pv entries.
2808 if ((newpde & PG_MANAGED) != 0)
2809 pmap_pv_promote_l3e(pmap, va, newpde & PG_PS_FRAME, lockp);
2811 pte_store(pde, PG_PROMOTED | newpde);
2813 counter_u64_add(pmap_l3e_promotions, 1);
2814 CTR2(KTR_PMAP, "pmap_promote_l3e: success for va %#lx"
2815 " in pmap %p", va, pmap);
2818 counter_u64_add(pmap_l3e_p_failures, 1);
2819 return (KERN_FAILURE);
2821 #endif /* VM_NRESERVLEVEL > 0 */
2824 mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
2825 vm_prot_t prot, u_int flags, int8_t psind)
2827 struct rwlock *lock;
2830 pt_entry_t newpte, origpte;
2835 boolean_t nosleep, invalidate_all, invalidate_page;
2837 va = trunc_page(va);
2839 invalidate_page = invalidate_all = false;
2840 CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va,
2841 m, prot, flags, psind);
2842 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
2843 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
2844 ("pmap_enter: managed mapping within the clean submap"));
2845 if ((m->oflags & VPO_UNMANAGED) == 0)
2846 VM_PAGE_OBJECT_BUSY_ASSERT(m);
2848 KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
2849 ("pmap_enter: flags %u has reserved bits set", flags));
2850 pa = VM_PAGE_TO_PHYS(m);
2851 newpte = (pt_entry_t)(pa | PG_A | PG_V | RPTE_LEAF);
2852 if ((flags & VM_PROT_WRITE) != 0)
2854 if ((flags & VM_PROT_READ) != 0)
2856 if (prot & VM_PROT_READ)
2857 newpte |= RPTE_EAA_R;
2858 if ((prot & VM_PROT_WRITE) != 0)
2859 newpte |= RPTE_EAA_W;
2860 KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
2861 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
2863 if (prot & VM_PROT_EXECUTE)
2865 if ((flags & PMAP_ENTER_WIRED) != 0)
2867 if (va >= DMAP_MIN_ADDRESS)
2868 newpte |= RPTE_EAA_P;
2869 newpte |= pmap_cache_bits(m->md.mdpg_cache_attrs);
2871 * Set modified bit gratuitously for writeable mappings if
2872 * the page is unmanaged. We do not want to take a fault
2873 * to do the dirty bit accounting for these mappings.
2875 if ((m->oflags & VPO_UNMANAGED) != 0) {
2876 if ((newpte & PG_RW) != 0)
2879 newpte |= PG_MANAGED;
2884 /* Assert the required virtual and physical alignment. */
2885 KASSERT((va & L3_PAGE_MASK) == 0, ("pmap_enter: va unaligned"));
2886 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
2887 rv = pmap_enter_l3e(pmap, va, newpte | RPTE_LEAF, flags, m, &lock);
2893 * In the case that a page table page is not
2894 * resident, we are creating it here.
2897 l3e = pmap_pml3e(pmap, va);
2898 if (l3e != NULL && (be64toh(*l3e) & PG_V) != 0 && ((be64toh(*l3e) & RPTE_LEAF) == 0 ||
2899 pmap_demote_l3e_locked(pmap, l3e, va, &lock))) {
2900 pte = pmap_l3e_to_pte(l3e, va);
2901 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
2902 mpte = PHYS_TO_VM_PAGE(be64toh(*l3e) & PG_FRAME);
2905 } else if (va < VM_MAXUSER_ADDRESS) {
2907 * Here if the pte page isn't mapped, or if it has been
2910 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2911 mpte = _pmap_allocpte(pmap, pmap_l3e_pindex(va),
2912 nosleep ? NULL : &lock);
2913 if (mpte == NULL && nosleep) {
2914 rv = KERN_RESOURCE_SHORTAGE;
2917 if (__predict_false(retrycount++ == 6))
2918 panic("too many retries");
2919 invalidate_all = true;
2922 panic("pmap_enter: invalid page directory va=%#lx", va);
2924 origpte = be64toh(*pte);
2928 * Is the specified virtual address already mapped?
2930 if ((origpte & PG_V) != 0) {
2932 if (VERBOSE_PMAP || pmap_logging) {
2933 printf("cow fault pmap_enter(%p, %#lx, %p, %#x, %x, %d) --"
2934 " asid=%lu curpid=%d name=%s origpte0x%lx\n",
2935 pmap, va, m, prot, flags, psind, pmap->pm_pid,
2936 curproc->p_pid, curproc->p_comm, origpte);
2938 pmap_pte_walk(pmap->pm_pml1, va);
2943 * Wiring change, just update stats. We don't worry about
2944 * wiring PT pages as they remain resident as long as there
2945 * are valid mappings in them. Hence, if a user page is wired,
2946 * the PT page will be also.
2948 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
2949 pmap->pm_stats.wired_count++;
2950 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
2951 pmap->pm_stats.wired_count--;
2954 * Remove the extra PT page reference.
2958 KASSERT(mpte->ref_count > 0,
2959 ("pmap_enter: missing reference to page table page,"
2964 * Has the physical page changed?
2966 opa = origpte & PG_FRAME;
2969 * No, might be a protection or wiring change.
2971 if ((origpte & PG_MANAGED) != 0 &&
2972 (newpte & PG_RW) != 0)
2973 vm_page_aflag_set(m, PGA_WRITEABLE);
2974 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) {
2975 if ((newpte & (PG_A|PG_M)) != (origpte & (PG_A|PG_M))) {
2976 if (!atomic_cmpset_long(pte, htobe64(origpte), htobe64(newpte)))
2978 if ((newpte & PG_M) != (origpte & PG_M))
2980 if ((newpte & PG_A) != (origpte & PG_A))
2981 vm_page_aflag_set(m, PGA_REFERENCED);
2984 invalidate_all = true;
2985 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
2992 * The physical page has changed. Temporarily invalidate
2993 * the mapping. This ensures that all threads sharing the
2994 * pmap keep a consistent view of the mapping, which is
2995 * necessary for the correct handling of COW faults. It
2996 * also permits reuse of the old mapping's PV entry,
2997 * avoiding an allocation.
2999 * For consistency, handle unmanaged mappings the same way.
3001 origpte = be64toh(pte_load_clear(pte));
3002 KASSERT((origpte & PG_FRAME) == opa,
3003 ("pmap_enter: unexpected pa update for %#lx", va));
3004 if ((origpte & PG_MANAGED) != 0) {
3005 om = PHYS_TO_VM_PAGE(opa);
3008 * The pmap lock is sufficient to synchronize with
3009 * concurrent calls to pmap_page_test_mappings() and
3010 * pmap_ts_referenced().
3012 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3014 if ((origpte & PG_A) != 0)
3015 vm_page_aflag_set(om, PGA_REFERENCED);
3016 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
3017 pv = pmap_pvh_remove(&om->md, pmap, va);
3018 if ((newpte & PG_MANAGED) == 0)
3019 free_pv_entry(pmap, pv);
3021 else if (origpte & PG_MANAGED) {
3024 pmap_page_print_mappings(om);
3030 if ((om->a.flags & PGA_WRITEABLE) != 0 &&
3031 TAILQ_EMPTY(&om->md.pv_list) &&
3032 ((om->flags & PG_FICTITIOUS) != 0 ||
3033 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
3034 vm_page_aflag_clear(om, PGA_WRITEABLE);
3036 if ((origpte & PG_A) != 0)
3037 invalidate_page = true;
3040 if (pmap != kernel_pmap) {
3042 if (VERBOSE_PMAP || pmap_logging)
3043 printf("pmap_enter(%p, %#lx, %p, %#x, %x, %d) -- asid=%lu curpid=%d name=%s\n",
3044 pmap, va, m, prot, flags, psind,
3045 pmap->pm_pid, curproc->p_pid,
3051 * Increment the counters.
3053 if ((newpte & PG_W) != 0)
3054 pmap->pm_stats.wired_count++;
3055 pmap_resident_count_inc(pmap, 1);
3059 * Enter on the PV list if part of our managed memory.
3061 if ((newpte & PG_MANAGED) != 0) {
3063 pv = get_pv_entry(pmap, &lock);
3068 printf("reassigning pv: %p to pmap: %p\n",
3071 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3072 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3074 if ((newpte & PG_RW) != 0)
3075 vm_page_aflag_set(m, PGA_WRITEABLE);
3081 if ((origpte & PG_V) != 0) {
3083 origpte = be64toh(pte_load_store(pte, htobe64(newpte)));
3084 KASSERT((origpte & PG_FRAME) == pa,
3085 ("pmap_enter: unexpected pa update for %#lx", va));
3086 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
3088 if ((origpte & PG_MANAGED) != 0)
3090 invalidate_page = true;
3093 * Although the PTE may still have PG_RW set, TLB
3094 * invalidation may nonetheless be required because
3095 * the PTE no longer has PG_M set.
3097 } else if ((origpte & PG_X) != 0 || (newpte & PG_X) == 0) {
3099 * Removing capabilities requires invalidation on POWER
3101 invalidate_page = true;
3104 if ((origpte & PG_A) != 0)
3105 invalidate_page = true;
3107 pte_store(pte, newpte);
3112 #if VM_NRESERVLEVEL > 0
3114 * If both the page table page and the reservation are fully
3115 * populated, then attempt promotion.
3117 if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
3118 mmu_radix_ps_enabled(pmap) &&
3119 (m->flags & PG_FICTITIOUS) == 0 &&
3120 vm_reserv_level_iffullpop(m) == 0 &&
3121 pmap_promote_l3e(pmap, l3e, va, &lock) == 0)
3122 invalidate_all = true;
3125 pmap_invalidate_all(pmap);
3126 else if (invalidate_page)
3127 pmap_invalidate_page(pmap, va);
3139 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
3140 * if successful. Returns false if (1) a page table page cannot be allocated
3141 * without sleeping, (2) a mapping already exists at the specified virtual
3142 * address, or (3) a PV entry cannot be allocated without reclaiming another
3146 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3147 struct rwlock **lockp)
3149 pml3_entry_t newpde;
3151 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3152 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs) |
3154 if ((m->oflags & VPO_UNMANAGED) == 0)
3155 newpde |= PG_MANAGED;
3156 if (prot & VM_PROT_EXECUTE)
3158 if (prot & VM_PROT_READ)
3159 newpde |= RPTE_EAA_R;
3160 if (va >= DMAP_MIN_ADDRESS)
3161 newpde |= RPTE_EAA_P;
3162 return (pmap_enter_l3e(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
3163 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
3168 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
3169 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
3170 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
3171 * a mapping already exists at the specified virtual address. Returns
3172 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
3173 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
3174 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3176 * The parameter "m" is only used when creating a managed, writeable mapping.
3179 pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags,
3180 vm_page_t m, struct rwlock **lockp)
3182 struct spglist free;
3183 pml3_entry_t oldl3e, *l3e;
3186 KASSERT((newpde & (PG_M | PG_RW)) != PG_RW,
3187 ("pmap_enter_pde: newpde is missing PG_M"));
3188 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3190 if ((pdpg = pmap_allocl3e(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
3191 NULL : lockp)) == NULL) {
3192 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3193 " in pmap %p", va, pmap);
3194 return (KERN_RESOURCE_SHORTAGE);
3196 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
3197 l3e = &l3e[pmap_pml3e_index(va)];
3198 oldl3e = be64toh(*l3e);
3199 if ((oldl3e & PG_V) != 0) {
3200 KASSERT(pdpg->ref_count > 1,
3201 ("pmap_enter_pde: pdpg's wire count is too low"));
3202 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
3204 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3205 " in pmap %p", va, pmap);
3206 return (KERN_FAILURE);
3208 /* Break the existing mapping(s). */
3210 if ((oldl3e & RPTE_LEAF) != 0) {
3212 * The reference to the PD page that was acquired by
3213 * pmap_allocl3e() ensures that it won't be freed.
3214 * However, if the PDE resulted from a promotion, then
3215 * a reserved PT page could be freed.
3217 (void)pmap_remove_l3e(pmap, l3e, va, &free, lockp);
3218 pmap_invalidate_l3e_page(pmap, va, oldl3e);
3220 if (pmap_remove_ptes(pmap, va, va + L3_PAGE_SIZE, l3e,
3222 pmap_invalidate_all(pmap);
3224 vm_page_free_pages_toq(&free, true);
3225 if (va >= VM_MAXUSER_ADDRESS) {
3226 mt = PHYS_TO_VM_PAGE(be64toh(*l3e) & PG_FRAME);
3227 if (pmap_insert_pt_page(pmap, mt)) {
3229 * XXX Currently, this can't happen because
3230 * we do not perform pmap_enter(psind == 1)
3231 * on the kernel pmap.
3233 panic("pmap_enter_pde: trie insert failed");
3236 KASSERT(be64toh(*l3e) == 0, ("pmap_enter_pde: non-zero pde %p",
3239 if ((newpde & PG_MANAGED) != 0) {
3241 * Abort this mapping if its PV entry could not be created.
3243 if (!pmap_pv_insert_l3e(pmap, va, newpde, flags, lockp)) {
3245 if (pmap_unwire_ptp(pmap, va, pdpg, &free)) {
3247 * Although "va" is not mapped, paging-
3248 * structure caches could nonetheless have
3249 * entries that refer to the freed page table
3250 * pages. Invalidate those entries.
3252 pmap_invalidate_page(pmap, va);
3253 vm_page_free_pages_toq(&free, true);
3255 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3256 " in pmap %p", va, pmap);
3257 return (KERN_RESOURCE_SHORTAGE);
3259 if ((newpde & PG_RW) != 0) {
3260 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
3261 vm_page_aflag_set(mt, PGA_WRITEABLE);
3266 * Increment counters.
3268 if ((newpde & PG_W) != 0)
3269 pmap->pm_stats.wired_count += L3_PAGE_SIZE / PAGE_SIZE;
3270 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
3273 * Map the superpage. (This is not a promoted mapping; there will not
3274 * be any lingering 4KB page mappings in the TLB.)
3276 pte_store(l3e, newpde);
3279 counter_u64_add(pmap_l3e_mappings, 1);
3280 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
3281 " in pmap %p", va, pmap);
3282 return (KERN_SUCCESS);
3286 mmu_radix_enter_object(pmap_t pmap, vm_offset_t start,
3287 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
3290 struct rwlock *lock;
3293 vm_pindex_t diff, psize;
3295 VM_OBJECT_ASSERT_LOCKED(m_start->object);
3297 CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
3298 end, m_start, prot);
3301 psize = atop(end - start);
3306 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3307 va = start + ptoa(diff);
3308 if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end &&
3309 m->psind == 1 && mmu_radix_ps_enabled(pmap) &&
3310 pmap_enter_2mpage(pmap, va, m, prot, &lock))
3311 m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1];
3313 mpte = mmu_radix_enter_quick_locked(pmap, va, m, prot,
3314 mpte, &lock, &invalidate);
3315 m = TAILQ_NEXT(m, listq);
3321 pmap_invalidate_all(pmap);
3326 mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3327 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate)
3329 struct spglist free;
3333 KASSERT(!VA_IS_CLEANMAP(va) ||
3334 (m->oflags & VPO_UNMANAGED) != 0,
3335 ("mmu_radix_enter_quick_locked: managed mapping within the clean submap"));
3336 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3339 * In the case that a page table page is not
3340 * resident, we are creating it here.
3342 if (va < VM_MAXUSER_ADDRESS) {
3343 vm_pindex_t ptepindex;
3344 pml3_entry_t *ptepa;
3347 * Calculate pagetable page index
3349 ptepindex = pmap_l3e_pindex(va);
3350 if (mpte && (mpte->pindex == ptepindex)) {
3354 * Get the page directory entry
3356 ptepa = pmap_pml3e(pmap, va);
3359 * If the page table page is mapped, we just increment
3360 * the hold count, and activate it. Otherwise, we
3361 * attempt to allocate a page table page. If this
3362 * attempt fails, we don't retry. Instead, we give up.
3364 if (ptepa && (be64toh(*ptepa) & PG_V) != 0) {
3365 if (be64toh(*ptepa) & RPTE_LEAF)
3367 mpte = PHYS_TO_VM_PAGE(be64toh(*ptepa) & PG_FRAME);
3371 * Pass NULL instead of the PV list lock
3372 * pointer, because we don't intend to sleep.
3374 mpte = _pmap_allocpte(pmap, ptepindex, NULL);
3379 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3380 pte = &pte[pmap_pte_index(va)];
3383 pte = pmap_pte(pmap, va);
3385 if (be64toh(*pte)) {
3394 * Enter on the PV list if part of our managed memory.
3396 if ((m->oflags & VPO_UNMANAGED) == 0 &&
3397 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3400 if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
3402 * Although "va" is not mapped, paging-
3403 * structure caches could nonetheless have
3404 * entries that refer to the freed page table
3405 * pages. Invalidate those entries.
3408 vm_page_free_pages_toq(&free, true);
3416 * Increment counters
3418 pmap_resident_count_inc(pmap, 1);
3420 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs);
3421 if (prot & VM_PROT_EXECUTE)
3425 if ((m->oflags & VPO_UNMANAGED) == 0)
3433 mmu_radix_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
3436 struct rwlock *lock;
3442 mmu_radix_enter_quick_locked(pmap, va, m, prot, NULL, &lock,
3448 pmap_invalidate_all(pmap);
3453 mmu_radix_extract(pmap_t pmap, vm_offset_t va)
3459 l3e = pmap_pml3e(pmap, va);
3460 if (__predict_false(l3e == NULL))
3462 if (be64toh(*l3e) & RPTE_LEAF) {
3463 pa = (be64toh(*l3e) & PG_PS_FRAME) | (va & L3_PAGE_MASK);
3464 pa |= (va & L3_PAGE_MASK);
3467 * Beware of a concurrent promotion that changes the
3468 * PDE at this point! For example, vtopte() must not
3469 * be used to access the PTE because it would use the
3470 * new PDE. It is, however, safe to use the old PDE
3471 * because the page table page is preserved by the
3474 pte = pmap_l3e_to_pte(l3e, va);
3475 if (__predict_false(pte == NULL))
3478 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
3479 pa |= (va & PAGE_MASK);
3485 mmu_radix_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3487 pml3_entry_t l3e, *l3ep;
3492 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
3494 l3ep = pmap_pml3e(pmap, va);
3495 if (l3ep != NULL && (l3e = be64toh(*l3ep))) {
3496 if (l3e & RPTE_LEAF) {
3497 if ((l3e & PG_RW) || (prot & VM_PROT_WRITE) == 0)
3498 m = PHYS_TO_VM_PAGE((l3e & PG_PS_FRAME) |
3499 (va & L3_PAGE_MASK));
3501 /* Native endian PTE, do not pass to pmap functions */
3502 pte = be64toh(*pmap_l3e_to_pte(l3ep, va));
3504 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0))
3505 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3507 if (m != NULL && !vm_page_wire_mapped(m))
3515 mmu_radix_growkernel(vm_offset_t addr)
3522 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
3523 if (VM_MIN_KERNEL_ADDRESS < addr &&
3524 addr < (VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE))
3527 addr = roundup2(addr, L3_PAGE_SIZE);
3528 if (addr - 1 >= vm_map_max(kernel_map))
3529 addr = vm_map_max(kernel_map);
3530 while (kernel_vm_end < addr) {
3531 l2e = pmap_pml2e(kernel_pmap, kernel_vm_end);
3532 if ((be64toh(*l2e) & PG_V) == 0) {
3533 /* We need a new PDP entry */
3534 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
3535 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
3537 panic("pmap_growkernel: no memory to grow kernel");
3538 nkpg->pindex = kernel_vm_end >> L2_PAGE_SIZE_SHIFT;
3539 paddr = VM_PAGE_TO_PHYS(nkpg);
3540 pde_store(l2e, paddr);
3541 continue; /* try again */
3543 l3e = pmap_l2e_to_l3e(l2e, kernel_vm_end);
3544 if ((be64toh(*l3e) & PG_V) != 0) {
3545 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3546 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3547 kernel_vm_end = vm_map_max(kernel_map);
3553 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
3556 panic("pmap_growkernel: no memory to grow kernel");
3557 nkpg->pindex = pmap_l3e_pindex(kernel_vm_end);
3558 paddr = VM_PAGE_TO_PHYS(nkpg);
3559 pde_store(l3e, paddr);
3561 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3562 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3563 kernel_vm_end = vm_map_max(kernel_map);
3570 static MALLOC_DEFINE(M_RADIX_PGD, "radix_pgd", "radix page table root directory");
3571 static uma_zone_t zone_radix_pgd;
3574 radix_pgd_import(void *arg __unused, void **store, int count, int domain __unused,
3579 req = VM_ALLOC_WIRED | malloc2vm_flags(flags);
3580 for (int i = 0; i < count; i++) {
3581 vm_page_t m = vm_page_alloc_noobj_contig(req,
3582 RADIX_PGD_SIZE / PAGE_SIZE,
3583 0, (vm_paddr_t)-1, RADIX_PGD_SIZE, L1_PAGE_SIZE,
3584 VM_MEMATTR_DEFAULT);
3585 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3591 radix_pgd_release(void *arg __unused, void **store, int count)
3594 struct spglist free;
3598 page_count = RADIX_PGD_SIZE/PAGE_SIZE;
3600 for (int i = 0; i < count; i++) {
3602 * XXX selectively remove dmap and KVA entries so we don't
3605 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
3606 for (int j = page_count-1; j >= 0; j--) {
3607 vm_page_unwire_noq(&m[j]);
3608 SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss);
3610 vm_page_free_pages_toq(&free, false);
3619 int error, i, pv_npg;
3621 /* XXX is this really needed for POWER? */
3622 /* L1TF, reserve page @0 unconditionally */
3623 vm_page_blacklist_add(0, bootverbose);
3625 zone_radix_pgd = uma_zcache_create("radix_pgd_cache",
3626 RADIX_PGD_SIZE, NULL, NULL,
3628 trash_init, trash_fini,
3632 radix_pgd_import, radix_pgd_release,
3633 NULL, UMA_ZONE_NOBUCKET);
3636 * Initialize the vm page array entries for the kernel pmap's
3639 PMAP_LOCK(kernel_pmap);
3640 for (i = 0; i < nkpt; i++) {
3641 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
3642 KASSERT(mpte >= vm_page_array &&
3643 mpte < &vm_page_array[vm_page_array_size],
3644 ("pmap_init: page table page is out of range size: %lu",
3645 vm_page_array_size));
3646 mpte->pindex = pmap_l3e_pindex(VM_MIN_KERNEL_ADDRESS) + i;
3647 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
3648 MPASS(PHYS_TO_VM_PAGE(mpte->phys_addr) == mpte);
3649 //pmap_insert_pt_page(kernel_pmap, mpte);
3650 mpte->ref_count = 1;
3652 PMAP_UNLOCK(kernel_pmap);
3655 CTR1(KTR_PMAP, "%s()", __func__);
3656 TAILQ_INIT(&pv_dummy.pv_list);
3659 * Are large page mappings enabled?
3661 TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
3662 if (superpages_enabled) {
3663 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
3664 ("pmap_init: can't assign to pagesizes[1]"));
3665 pagesizes[1] = L3_PAGE_SIZE;
3669 * Initialize the pv chunk list mutex.
3671 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
3674 * Initialize the pool of pv list locks.
3676 for (i = 0; i < NPV_LIST_LOCKS; i++)
3677 rw_init(&pv_list_locks[i], "pmap pv list");
3680 * Calculate the size of the pv head table for superpages.
3682 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L3_PAGE_SIZE);
3685 * Allocate memory for the pv head table for superpages.
3687 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
3689 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
3690 for (i = 0; i < pv_npg; i++)
3691 TAILQ_INIT(&pv_table[i].pv_list);
3692 TAILQ_INIT(&pv_dummy.pv_list);
3694 pmap_initialized = 1;
3695 mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
3696 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
3697 (vmem_addr_t *)&qframe);
3700 panic("qframe allocation failed");
3701 asid_arena = vmem_create("ASID", isa3_base_pid + 1, (1<<isa3_pid_bits),
3706 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3708 struct rwlock *lock;
3710 struct md_page *pvh;
3711 pt_entry_t *pte, mask;
3713 int md_gen, pvh_gen;
3717 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3720 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3722 if (!PMAP_TRYLOCK(pmap)) {
3723 md_gen = m->md.pv_gen;
3727 if (md_gen != m->md.pv_gen) {
3732 pte = pmap_pte(pmap, pv->pv_va);
3735 mask |= PG_RW | PG_M;
3737 mask |= PG_V | PG_A;
3738 rv = (be64toh(*pte) & mask) == mask;
3743 if ((m->flags & PG_FICTITIOUS) == 0) {
3744 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3745 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
3747 if (!PMAP_TRYLOCK(pmap)) {
3748 md_gen = m->md.pv_gen;
3749 pvh_gen = pvh->pv_gen;
3753 if (md_gen != m->md.pv_gen ||
3754 pvh_gen != pvh->pv_gen) {
3759 pte = pmap_pml3e(pmap, pv->pv_va);
3762 mask |= PG_RW | PG_M;
3764 mask |= PG_V | PG_A;
3765 rv = (be64toh(*pte) & mask) == mask;
3779 * Return whether or not the specified physical page was modified
3780 * in any physical maps.
3783 mmu_radix_is_modified(vm_page_t m)
3786 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3787 ("pmap_is_modified: page %p is not managed", m));
3789 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3791 * If the page is not busied then this check is racy.
3793 if (!pmap_page_is_write_mapped(m))
3795 return (pmap_page_test_mappings(m, FALSE, TRUE));
3799 mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3805 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
3808 l3e = pmap_pml3e(pmap, addr);
3809 if (l3e != NULL && (be64toh(*l3e) & (RPTE_LEAF | PG_V)) == PG_V) {
3810 pte = pmap_l3e_to_pte(l3e, addr);
3811 rv = (be64toh(*pte) & PG_V) == 0;
3818 mmu_radix_is_referenced(vm_page_t m)
3820 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3821 ("pmap_is_referenced: page %p is not managed", m));
3822 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3823 return (pmap_page_test_mappings(m, TRUE, FALSE));
3827 * pmap_ts_referenced:
3829 * Return a count of reference bits for a page, clearing those bits.
3830 * It is not necessary for every reference bit to be cleared, but it
3831 * is necessary that 0 only be returned when there are truly no
3832 * reference bits set.
3834 * As an optimization, update the page's dirty field if a modified bit is
3835 * found while counting reference bits. This opportunistic update can be
3836 * performed at low cost and can eliminate the need for some future calls
3837 * to pmap_is_modified(). However, since this function stops after
3838 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3839 * dirty pages. Those dirty pages will only be detected by a future call
3840 * to pmap_is_modified().
3842 * A DI block is not needed within this function, because
3843 * invalidations are performed before the PV list lock is
3847 mmu_radix_ts_referenced(vm_page_t m)
3849 struct md_page *pvh;
3852 struct rwlock *lock;
3853 pml3_entry_t oldl3e, *l3e;
3856 int cleared, md_gen, not_cleared, pvh_gen;
3857 struct spglist free;
3859 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3860 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3861 ("pmap_ts_referenced: page %p is not managed", m));
3864 pa = VM_PAGE_TO_PHYS(m);
3865 lock = PHYS_TO_PV_LIST_LOCK(pa);
3866 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
3870 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
3871 goto small_mappings;
3877 if (!PMAP_TRYLOCK(pmap)) {
3878 pvh_gen = pvh->pv_gen;
3882 if (pvh_gen != pvh->pv_gen) {
3887 l3e = pmap_pml3e(pmap, pv->pv_va);
3888 oldl3e = be64toh(*l3e);
3889 if ((oldl3e & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
3891 * Although "oldpde" is mapping a 2MB page, because
3892 * this function is called at a 4KB page granularity,
3893 * we only update the 4KB page under test.
3897 if ((oldl3e & PG_A) != 0) {
3899 * Since this reference bit is shared by 512 4KB
3900 * pages, it should not be cleared every time it is
3901 * tested. Apply a simple "hash" function on the
3902 * physical page number, the virtual superpage number,
3903 * and the pmap address to select one 4KB page out of
3904 * the 512 on which testing the reference bit will
3905 * result in clearing that reference bit. This
3906 * function is designed to avoid the selection of the
3907 * same 4KB page for every 2MB page mapping.
3909 * On demotion, a mapping that hasn't been referenced
3910 * is simply destroyed. To avoid the possibility of a
3911 * subsequent page fault on a demoted wired mapping,
3912 * always leave its reference bit set. Moreover,
3913 * since the superpage is wired, the current state of
3914 * its reference bit won't affect page replacement.
3916 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L3_PAGE_SIZE_SHIFT) ^
3917 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
3918 (oldl3e & PG_W) == 0) {
3919 atomic_clear_long(l3e, htobe64(PG_A));
3920 pmap_invalidate_page(pmap, pv->pv_va);
3922 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
3923 ("inconsistent pv lock %p %p for page %p",
3924 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
3929 /* Rotate the PV list if it has more than one entry. */
3930 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3931 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
3932 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
3935 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
3937 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
3939 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
3946 if (!PMAP_TRYLOCK(pmap)) {
3947 pvh_gen = pvh->pv_gen;
3948 md_gen = m->md.pv_gen;
3952 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
3957 l3e = pmap_pml3e(pmap, pv->pv_va);
3958 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0,
3959 ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
3961 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
3962 if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW))
3964 if ((be64toh(*pte) & PG_A) != 0) {
3965 atomic_clear_long(pte, htobe64(PG_A));
3966 pmap_invalidate_page(pmap, pv->pv_va);
3970 /* Rotate the PV list if it has more than one entry. */
3971 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3972 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
3973 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3976 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
3977 not_cleared < PMAP_TS_REFERENCED_MAX);
3980 vm_page_free_pages_toq(&free, true);
3981 return (cleared + not_cleared);
3985 mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start,
3986 vm_paddr_t end, int prot __unused)
3989 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
3991 return (PHYS_TO_DMAP(start));
3995 mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
3996 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3999 vm_paddr_t pa, ptepa;
4003 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
4004 object, pindex, size);
4005 VM_OBJECT_ASSERT_WLOCKED(object);
4006 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
4007 ("pmap_object_init_pt: non-device object"));
4008 /* NB: size can be logically ored with addr here */
4009 if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) {
4010 if (!mmu_radix_ps_enabled(pmap))
4012 if (!vm_object_populate(object, pindex, pindex + atop(size)))
4014 p = vm_page_lookup(object, pindex);
4015 KASSERT(p->valid == VM_PAGE_BITS_ALL,
4016 ("pmap_object_init_pt: invalid page %p", p));
4017 ma = p->md.mdpg_cache_attrs;
4020 * Abort the mapping if the first page is not physically
4021 * aligned to a 2MB page boundary.
4023 ptepa = VM_PAGE_TO_PHYS(p);
4024 if (ptepa & L3_PAGE_MASK)
4028 * Skip the first page. Abort the mapping if the rest of
4029 * the pages are not physically contiguous or have differing
4030 * memory attributes.
4032 p = TAILQ_NEXT(p, listq);
4033 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
4035 KASSERT(p->valid == VM_PAGE_BITS_ALL,
4036 ("pmap_object_init_pt: invalid page %p", p));
4037 if (pa != VM_PAGE_TO_PHYS(p) ||
4038 ma != p->md.mdpg_cache_attrs)
4040 p = TAILQ_NEXT(p, listq);
4044 for (pa = ptepa | pmap_cache_bits(ma);
4045 pa < ptepa + size; pa += L3_PAGE_SIZE) {
4046 pdpg = pmap_allocl3e(pmap, addr, NULL);
4049 * The creation of mappings below is only an
4050 * optimization. If a page directory page
4051 * cannot be allocated without blocking,
4052 * continue on to the next mapping rather than
4055 addr += L3_PAGE_SIZE;
4058 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
4059 l3e = &l3e[pmap_pml3e_index(addr)];
4060 if ((be64toh(*l3e) & PG_V) == 0) {
4061 pa |= PG_M | PG_A | PG_RW;
4063 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
4064 counter_u64_add(pmap_l3e_mappings, 1);
4066 /* Continue on if the PDE is already valid. */
4068 KASSERT(pdpg->ref_count > 0,
4069 ("pmap_object_init_pt: missing reference "
4070 "to page directory page, va: 0x%lx", addr));
4072 addr += L3_PAGE_SIZE;
4080 mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m)
4082 struct md_page *pvh;
4083 struct rwlock *lock;
4088 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4089 ("pmap_page_exists_quick: page %p is not managed", m));
4090 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
4092 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4094 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4095 if (PV_PMAP(pv) == pmap) {
4103 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4104 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4105 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4106 if (PV_PMAP(pv) == pmap) {
4120 mmu_radix_page_init(vm_page_t m)
4123 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4124 TAILQ_INIT(&m->md.pv_list);
4125 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
4129 mmu_radix_page_wired_mappings(vm_page_t m)
4131 struct rwlock *lock;
4132 struct md_page *pvh;
4136 int count, md_gen, pvh_gen;
4138 if ((m->oflags & VPO_UNMANAGED) != 0)
4140 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4141 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4145 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4147 if (!PMAP_TRYLOCK(pmap)) {
4148 md_gen = m->md.pv_gen;
4152 if (md_gen != m->md.pv_gen) {
4157 pte = pmap_pte(pmap, pv->pv_va);
4158 if ((be64toh(*pte) & PG_W) != 0)
4162 if ((m->flags & PG_FICTITIOUS) == 0) {
4163 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4164 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4166 if (!PMAP_TRYLOCK(pmap)) {
4167 md_gen = m->md.pv_gen;
4168 pvh_gen = pvh->pv_gen;
4172 if (md_gen != m->md.pv_gen ||
4173 pvh_gen != pvh->pv_gen) {
4178 pte = pmap_pml3e(pmap, pv->pv_va);
4179 if ((be64toh(*pte) & PG_W) != 0)
4189 mmu_radix_update_proctab(int pid, pml1_entry_t l1pa)
4191 isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE | l1pa | RADIX_PGD_INDEX_SHIFT);
4195 mmu_radix_pinit(pmap_t pmap)
4200 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4203 * allocate the page directory page
4205 pmap->pm_pml1 = uma_zalloc(zone_radix_pgd, M_WAITOK);
4207 for (int j = 0; j < RADIX_PGD_SIZE_SHIFT; j++)
4208 pagezero((vm_offset_t)pmap->pm_pml1 + j * PAGE_SIZE);
4209 vm_radix_init(&pmap->pm_radix);
4210 TAILQ_INIT(&pmap->pm_pvchunk);
4211 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4212 pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4213 vmem_alloc(asid_arena, 1, M_FIRSTFIT|M_WAITOK, &pid);
4216 l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1);
4217 mmu_radix_update_proctab(pid, l1pa);
4218 __asm __volatile("ptesync;isync" : : : "memory");
4224 * This routine is called if the desired page table page does not exist.
4226 * If page table page allocation fails, this routine may sleep before
4227 * returning NULL. It sleeps only if a lock pointer was given.
4229 * Note: If a page allocation fails at page table level two or three,
4230 * one or two pages may be held during the wait, only to be released
4231 * afterwards. This conservative approach is easily argued to avoid
4235 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
4237 vm_page_t m, pdppg, pdpg;
4239 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4242 * Allocate a page table page.
4244 if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
4245 if (lockp != NULL) {
4246 RELEASE_PV_LIST_LOCK(lockp);
4252 * Indicate the need to retry. While waiting, the page table
4253 * page may have been allocated.
4257 m->pindex = ptepindex;
4260 * Map the pagetable page into the process address space, if
4261 * it isn't already there.
4264 if (ptepindex >= (NUPDE + NUPDPE)) {
4266 vm_pindex_t pml1index;
4268 /* Wire up a new PDPE page */
4269 pml1index = ptepindex - (NUPDE + NUPDPE);
4270 l1e = &pmap->pm_pml1[pml1index];
4271 KASSERT((be64toh(*l1e) & PG_V) == 0,
4272 ("%s: L1 entry %#lx is valid", __func__, *l1e));
4273 pde_store(l1e, VM_PAGE_TO_PHYS(m));
4274 } else if (ptepindex >= NUPDE) {
4275 vm_pindex_t pml1index;
4276 vm_pindex_t pdpindex;
4280 /* Wire up a new l2e page */
4281 pdpindex = ptepindex - NUPDE;
4282 pml1index = pdpindex >> RPTE_SHIFT;
4284 l1e = &pmap->pm_pml1[pml1index];
4285 if ((be64toh(*l1e) & PG_V) == 0) {
4286 /* Have to allocate a new pdp, recurse */
4287 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml1index,
4289 vm_page_unwire_noq(m);
4290 vm_page_free_zero(m);
4294 /* Add reference to l2e page */
4295 pdppg = PHYS_TO_VM_PAGE(be64toh(*l1e) & PG_FRAME);
4298 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4300 /* Now find the pdp page */
4301 l2e = &l2e[pdpindex & RPTE_MASK];
4302 KASSERT((be64toh(*l2e) & PG_V) == 0,
4303 ("%s: L2 entry %#lx is valid", __func__, *l2e));
4304 pde_store(l2e, VM_PAGE_TO_PHYS(m));
4306 vm_pindex_t pml1index;
4307 vm_pindex_t pdpindex;
4312 /* Wire up a new PTE page */
4313 pdpindex = ptepindex >> RPTE_SHIFT;
4314 pml1index = pdpindex >> RPTE_SHIFT;
4316 /* First, find the pdp and check that its valid. */
4317 l1e = &pmap->pm_pml1[pml1index];
4318 if ((be64toh(*l1e) & PG_V) == 0) {
4319 /* Have to allocate a new pd, recurse */
4320 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4322 vm_page_unwire_noq(m);
4323 vm_page_free_zero(m);
4326 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4327 l2e = &l2e[pdpindex & RPTE_MASK];
4329 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4330 l2e = &l2e[pdpindex & RPTE_MASK];
4331 if ((be64toh(*l2e) & PG_V) == 0) {
4332 /* Have to allocate a new pd, recurse */
4333 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4335 vm_page_unwire_noq(m);
4336 vm_page_free_zero(m);
4340 /* Add reference to the pd page */
4341 pdpg = PHYS_TO_VM_PAGE(be64toh(*l2e) & PG_FRAME);
4345 l3e = (pml3_entry_t *)PHYS_TO_DMAP(be64toh(*l2e) & PG_FRAME);
4347 /* Now we know where the page directory page is */
4348 l3e = &l3e[ptepindex & RPTE_MASK];
4349 KASSERT((be64toh(*l3e) & PG_V) == 0,
4350 ("%s: L3 entry %#lx is valid", __func__, *l3e));
4351 pde_store(l3e, VM_PAGE_TO_PHYS(m));
4354 pmap_resident_count_inc(pmap, 1);
4358 pmap_allocl3e(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4360 vm_pindex_t pdpindex, ptepindex;
4365 pdpe = pmap_pml2e(pmap, va);
4366 if (pdpe != NULL && (be64toh(*pdpe) & PG_V) != 0) {
4367 /* Add a reference to the pd page. */
4368 pdpg = PHYS_TO_VM_PAGE(be64toh(*pdpe) & PG_FRAME);
4371 /* Allocate a pd page. */
4372 ptepindex = pmap_l3e_pindex(va);
4373 pdpindex = ptepindex >> RPTE_SHIFT;
4374 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
4375 if (pdpg == NULL && lockp != NULL)
4382 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4384 vm_pindex_t ptepindex;
4389 * Calculate pagetable page index
4391 ptepindex = pmap_l3e_pindex(va);
4394 * Get the page directory entry
4396 pd = pmap_pml3e(pmap, va);
4399 * This supports switching from a 2MB page to a
4402 if (pd != NULL && (be64toh(*pd) & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V)) {
4403 if (!pmap_demote_l3e_locked(pmap, pd, va, lockp)) {
4405 * Invalidation of the 2MB page mapping may have caused
4406 * the deallocation of the underlying PD page.
4413 * If the page table page is mapped, we just increment the
4414 * hold count, and activate it.
4416 if (pd != NULL && (be64toh(*pd) & PG_V) != 0) {
4417 m = PHYS_TO_VM_PAGE(be64toh(*pd) & PG_FRAME);
4421 * Here if the pte page isn't mapped, or if it has been
4424 m = _pmap_allocpte(pmap, ptepindex, lockp);
4425 if (m == NULL && lockp != NULL)
4432 mmu_radix_pinit0(pmap_t pmap)
4435 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4436 PMAP_LOCK_INIT(pmap);
4437 pmap->pm_pml1 = kernel_pmap->pm_pml1;
4438 pmap->pm_pid = kernel_pmap->pm_pid;
4440 vm_radix_init(&pmap->pm_radix);
4441 TAILQ_INIT(&pmap->pm_pvchunk);
4442 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4443 kernel_pmap->pm_flags =
4444 pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4447 * pmap_protect_l3e: do the things to protect a 2mpage in a process
4450 pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot)
4452 pt_entry_t newpde, oldpde;
4453 vm_offset_t eva, va;
4455 boolean_t anychanged;
4457 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4458 KASSERT((sva & L3_PAGE_MASK) == 0,
4459 ("pmap_protect_l3e: sva is not 2mpage aligned"));
4462 oldpde = newpde = be64toh(*l3e);
4463 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
4464 (PG_MANAGED | PG_M | PG_RW)) {
4465 eva = sva + L3_PAGE_SIZE;
4466 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
4467 va < eva; va += PAGE_SIZE, m++)
4470 if ((prot & VM_PROT_WRITE) == 0) {
4471 newpde &= ~(PG_RW | PG_M);
4472 newpde |= RPTE_EAA_R;
4474 if (prot & VM_PROT_EXECUTE)
4476 if (newpde != oldpde) {
4478 * As an optimization to future operations on this PDE, clear
4479 * PG_PROMOTED. The impending invalidation will remove any
4480 * lingering 4KB page mappings from the TLB.
4482 if (!atomic_cmpset_long(l3e, htobe64(oldpde), htobe64(newpde & ~PG_PROMOTED)))
4486 return (anychanged);
4490 mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
4493 vm_offset_t va_next;
4496 pml3_entry_t ptpaddr, *l3e;
4498 boolean_t anychanged;
4500 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva,
4503 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4504 if (prot == VM_PROT_NONE) {
4505 mmu_radix_remove(pmap, sva, eva);
4509 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
4510 (VM_PROT_WRITE|VM_PROT_EXECUTE))
4514 if (VERBOSE_PROTECT || pmap_logging)
4515 printf("pmap_protect(%p, %#lx, %#lx, %x) - asid: %lu\n",
4516 pmap, sva, eva, prot, pmap->pm_pid);
4521 for (; sva < eva; sva = va_next) {
4522 l1e = pmap_pml1e(pmap, sva);
4523 if ((be64toh(*l1e) & PG_V) == 0) {
4524 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
4530 l2e = pmap_l1e_to_l2e(l1e, sva);
4531 if ((be64toh(*l2e) & PG_V) == 0) {
4532 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
4538 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
4542 l3e = pmap_l2e_to_l3e(l2e, sva);
4543 ptpaddr = be64toh(*l3e);
4546 * Weed out invalid mappings.
4552 * Check for large page.
4554 if ((ptpaddr & RPTE_LEAF) != 0) {
4556 * Are we protecting the entire large page? If not,
4557 * demote the mapping and fall through.
4559 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
4560 if (pmap_protect_l3e(pmap, l3e, sva, prot))
4563 } else if (!pmap_demote_l3e(pmap, l3e, sva)) {
4565 * The large page mapping was destroyed.
4574 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
4576 pt_entry_t obits, pbits;
4580 MPASS(pte == pmap_pte(pmap, sva));
4581 obits = pbits = be64toh(*pte);
4582 if ((pbits & PG_V) == 0)
4585 if ((prot & VM_PROT_WRITE) == 0) {
4586 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
4587 (PG_MANAGED | PG_M | PG_RW)) {
4588 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
4591 pbits &= ~(PG_RW | PG_M);
4592 pbits |= RPTE_EAA_R;
4594 if (prot & VM_PROT_EXECUTE)
4597 if (pbits != obits) {
4598 if (!atomic_cmpset_long(pte, htobe64(obits), htobe64(pbits)))
4600 if (obits & (PG_A|PG_M)) {
4603 if (VERBOSE_PROTECT || pmap_logging)
4604 printf("%#lx %#lx -> %#lx\n",
4612 pmap_invalidate_all(pmap);
4617 mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count)
4620 CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count);
4621 pt_entry_t oldpte, pa, *pte;
4623 uint64_t cache_bits, attr_bits;
4627 attr_bits = RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
4630 while (va < sva + PAGE_SIZE * count) {
4631 if (__predict_false((va & L3_PAGE_MASK) == 0))
4633 MPASS(pte == pmap_pte(kernel_pmap, va));
4636 * XXX there has to be a more efficient way than traversing
4637 * the page table every time - but go for correctness for
4642 cache_bits = pmap_cache_bits(m->md.mdpg_cache_attrs);
4643 pa = VM_PAGE_TO_PHYS(m) | cache_bits | attr_bits;
4644 if (be64toh(*pte) != pa) {
4645 oldpte |= be64toh(*pte);
4651 if (__predict_false((oldpte & RPTE_VALID) != 0))
4652 pmap_invalidate_range(kernel_pmap, sva, sva + count *
4659 mmu_radix_qremove(vm_offset_t sva, int count)
4664 CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, sva, count);
4665 KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode or dmap va %lx", sva));
4669 while (va < sva + PAGE_SIZE * count) {
4670 if (__predict_false((va & L3_PAGE_MASK) == 0))
4676 pmap_invalidate_range(kernel_pmap, sva, va);
4679 /***************************************************
4680 * Page table page management routines.....
4681 ***************************************************/
4683 * Schedule the specified unused page table page to be freed. Specifically,
4684 * add the page to the specified list of pages that will be released to the
4685 * physical memory manager after the TLB has been updated.
4687 static __inline void
4688 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
4689 boolean_t set_PG_ZERO)
4693 m->flags |= PG_ZERO;
4695 m->flags &= ~PG_ZERO;
4696 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
4700 * Inserts the specified page table page into the specified pmap's collection
4701 * of idle page table pages. Each of a pmap's page table pages is responsible
4702 * for mapping a distinct range of virtual addresses. The pmap's collection is
4703 * ordered by this virtual address range.
4706 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
4709 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4710 return (vm_radix_insert(&pmap->pm_radix, mpte));
4714 * Removes the page table page mapping the specified virtual address from the
4715 * specified pmap's collection of idle page table pages, and returns it.
4716 * Otherwise, returns NULL if there is no page table page corresponding to the
4717 * specified virtual address.
4719 static __inline vm_page_t
4720 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
4723 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4724 return (vm_radix_remove(&pmap->pm_radix, pmap_l3e_pindex(va)));
4728 * Decrements a page table page's wire count, which is used to record the
4729 * number of valid page table entries within the page. If the wire count
4730 * drops to zero, then the page table page is unmapped. Returns TRUE if the
4731 * page table page was unmapped and FALSE otherwise.
4733 static inline boolean_t
4734 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4738 if (m->ref_count == 0) {
4739 _pmap_unwire_ptp(pmap, va, m, free);
4746 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4749 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4751 * unmap the page table page
4753 if (m->pindex >= NUPDE + NUPDPE) {
4756 pml1 = pmap_pml1e(pmap, va);
4758 } else if (m->pindex >= NUPDE) {
4761 l2e = pmap_pml2e(pmap, va);
4766 l3e = pmap_pml3e(pmap, va);
4769 pmap_resident_count_dec(pmap, 1);
4770 if (m->pindex < NUPDE) {
4771 /* We just released a PT, unhold the matching PD */
4774 pdpg = PHYS_TO_VM_PAGE(be64toh(*pmap_pml2e(pmap, va)) & PG_FRAME);
4775 pmap_unwire_ptp(pmap, va, pdpg, free);
4777 else if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
4778 /* We just released a PD, unhold the matching PDP */
4781 pdppg = PHYS_TO_VM_PAGE(be64toh(*pmap_pml1e(pmap, va)) & PG_FRAME);
4782 pmap_unwire_ptp(pmap, va, pdppg, free);
4786 * Put page on a list so that it is released after
4787 * *ALL* TLB shootdown is done
4789 pmap_add_delayed_free_list(m, free, TRUE);
4793 * After removing a page table entry, this routine is used to
4794 * conditionally free the page, and manage the hold/wire counts.
4797 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pml3_entry_t ptepde,
4798 struct spglist *free)
4802 if (va >= VM_MAXUSER_ADDRESS)
4804 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
4805 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
4806 return (pmap_unwire_ptp(pmap, va, mpte, free));
4810 mmu_radix_release(pmap_t pmap)
4813 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4814 KASSERT(pmap->pm_stats.resident_count == 0,
4815 ("pmap_release: pmap resident count %ld != 0",
4816 pmap->pm_stats.resident_count));
4817 KASSERT(vm_radix_is_empty(&pmap->pm_radix),
4818 ("pmap_release: pmap has reserved page table page(s)"));
4820 pmap_invalidate_all(pmap);
4821 isa3_proctab[pmap->pm_pid].proctab0 = 0;
4822 uma_zfree(zone_radix_pgd, pmap->pm_pml1);
4823 vmem_free(asid_arena, pmap->pm_pid, 1);
4827 * Create the PV entry for a 2MB page mapping. Always returns true unless the
4828 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
4829 * false if the PV entry cannot be allocated without resorting to reclamation.
4832 pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t pde, u_int flags,
4833 struct rwlock **lockp)
4835 struct md_page *pvh;
4839 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4840 /* Pass NULL instead of the lock pointer to disable reclamation. */
4841 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
4842 NULL : lockp)) == NULL)
4845 pa = pde & PG_PS_FRAME;
4846 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
4847 pvh = pa_to_pvh(pa);
4848 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
4854 * Fills a page table page with mappings to consecutive physical pages.
4857 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
4861 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
4862 *pte = htobe64(newpte);
4863 newpte += PAGE_SIZE;
4868 pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va)
4870 struct rwlock *lock;
4874 rv = pmap_demote_l3e_locked(pmap, pde, va, &lock);
4881 pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
4882 struct rwlock **lockp)
4884 pml3_entry_t oldpde;
4885 pt_entry_t *firstpte;
4888 struct spglist free;
4891 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4892 oldpde = be64toh(*l3e);
4893 KASSERT((oldpde & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
4894 ("pmap_demote_l3e: oldpde is missing RPTE_LEAF and/or PG_V %lx",
4896 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
4898 KASSERT((oldpde & PG_W) == 0,
4899 ("pmap_demote_l3e: page table page for a wired mapping"
4903 * Invalidate the 2MB page mapping and return "failure" if the
4904 * mapping was never accessed or the allocation of the new
4905 * page table page fails. If the 2MB page mapping belongs to
4906 * the direct map region of the kernel's address space, then
4907 * the page allocation request specifies the highest possible
4908 * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is
4909 * normal. Page table pages are preallocated for every other
4910 * part of the kernel address space, so the direct map region
4911 * is the only part of the kernel address space that must be
4914 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc_noobj(
4915 (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS ?
4916 VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED)) == NULL) {
4918 sva = trunc_2mpage(va);
4919 pmap_remove_l3e(pmap, l3e, sva, &free, lockp);
4920 pmap_invalidate_l3e_page(pmap, sva, oldpde);
4921 vm_page_free_pages_toq(&free, true);
4922 CTR2(KTR_PMAP, "pmap_demote_l3e: failure for va %#lx"
4923 " in pmap %p", va, pmap);
4926 mpte->pindex = pmap_l3e_pindex(va);
4927 if (va < VM_MAXUSER_ADDRESS)
4928 pmap_resident_count_inc(pmap, 1);
4930 mptepa = VM_PAGE_TO_PHYS(mpte);
4931 firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
4932 KASSERT((oldpde & PG_A) != 0,
4933 ("pmap_demote_l3e: oldpde is missing PG_A"));
4934 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
4935 ("pmap_demote_l3e: oldpde is missing PG_M"));
4938 * If the page table page is new, initialize it.
4940 if (mpte->ref_count == 1) {
4941 mpte->ref_count = NPTEPG;
4942 pmap_fill_ptp(firstpte, oldpde);
4945 KASSERT((be64toh(*firstpte) & PG_FRAME) == (oldpde & PG_FRAME),
4946 ("pmap_demote_l3e: firstpte and newpte map different physical"
4950 * If the mapping has changed attributes, update the page table
4953 if ((be64toh(*firstpte) & PG_PTE_PROMOTE) != (oldpde & PG_PTE_PROMOTE))
4954 pmap_fill_ptp(firstpte, oldpde);
4957 * The spare PV entries must be reserved prior to demoting the
4958 * mapping, that is, prior to changing the PDE. Otherwise, the state
4959 * of the PDE and the PV lists will be inconsistent, which can result
4960 * in reclaim_pv_chunk() attempting to remove a PV entry from the
4961 * wrong PV list and pmap_pv_demote_l3e() failing to find the expected
4962 * PV entry for the 2MB page mapping that is being demoted.
4964 if ((oldpde & PG_MANAGED) != 0)
4965 reserve_pv_entries(pmap, NPTEPG - 1, lockp);
4968 * Demote the mapping. This pmap is locked. The old PDE has
4969 * PG_A set. If the old PDE has PG_RW set, it also has PG_M
4970 * set. Thus, there is no danger of a race with another
4971 * processor changing the setting of PG_A and/or PG_M between
4972 * the read above and the store below.
4974 pde_store(l3e, mptepa);
4975 pmap_invalidate_l3e_page(pmap, trunc_2mpage(va), oldpde);
4977 * Demote the PV entry.
4979 if ((oldpde & PG_MANAGED) != 0)
4980 pmap_pv_demote_l3e(pmap, va, oldpde & PG_PS_FRAME, lockp);
4982 counter_u64_add(pmap_l3e_demotions, 1);
4983 CTR2(KTR_PMAP, "pmap_demote_l3e: success for va %#lx"
4984 " in pmap %p", va, pmap);
4989 * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
4992 pmap_remove_kernel_l3e(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va)
4997 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
4998 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4999 mpte = pmap_remove_pt_page(pmap, va);
5001 panic("pmap_remove_kernel_pde: Missing pt page.");
5003 mptepa = VM_PAGE_TO_PHYS(mpte);
5006 * Initialize the page table page.
5008 pagezero(PHYS_TO_DMAP(mptepa));
5011 * Demote the mapping.
5013 pde_store(l3e, mptepa);
5018 * pmap_remove_l3e: do the things to unmap a superpage in a process
5021 pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
5022 struct spglist *free, struct rwlock **lockp)
5024 struct md_page *pvh;
5025 pml3_entry_t oldpde;
5026 vm_offset_t eva, va;
5029 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5030 KASSERT((sva & L3_PAGE_MASK) == 0,
5031 ("pmap_remove_l3e: sva is not 2mpage aligned"));
5032 oldpde = be64toh(pte_load_clear(pdq));
5034 pmap->pm_stats.wired_count -= (L3_PAGE_SIZE / PAGE_SIZE);
5035 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
5036 if (oldpde & PG_MANAGED) {
5037 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
5038 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
5039 pmap_pvh_free(pvh, pmap, sva);
5040 eva = sva + L3_PAGE_SIZE;
5041 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
5042 va < eva; va += PAGE_SIZE, m++) {
5043 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
5046 vm_page_aflag_set(m, PGA_REFERENCED);
5047 if (TAILQ_EMPTY(&m->md.pv_list) &&
5048 TAILQ_EMPTY(&pvh->pv_list))
5049 vm_page_aflag_clear(m, PGA_WRITEABLE);
5052 if (pmap == kernel_pmap) {
5053 pmap_remove_kernel_l3e(pmap, pdq, sva);
5055 mpte = pmap_remove_pt_page(pmap, sva);
5057 pmap_resident_count_dec(pmap, 1);
5058 KASSERT(mpte->ref_count == NPTEPG,
5059 ("pmap_remove_l3e: pte page wire count error"));
5060 mpte->ref_count = 0;
5061 pmap_add_delayed_free_list(mpte, free, FALSE);
5064 return (pmap_unuse_pt(pmap, sva, be64toh(*pmap_pml2e(pmap, sva)), free));
5068 * pmap_remove_pte: do the things to unmap a page in a process
5071 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
5072 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
5074 struct md_page *pvh;
5078 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5079 oldpte = be64toh(pte_load_clear(ptq));
5080 if (oldpte & RPTE_WIRED)
5081 pmap->pm_stats.wired_count -= 1;
5082 pmap_resident_count_dec(pmap, 1);
5083 if (oldpte & RPTE_MANAGED) {
5084 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
5085 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5088 vm_page_aflag_set(m, PGA_REFERENCED);
5089 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5090 pmap_pvh_free(&m->md, pmap, va);
5091 if (TAILQ_EMPTY(&m->md.pv_list) &&
5092 (m->flags & PG_FICTITIOUS) == 0) {
5093 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5094 if (TAILQ_EMPTY(&pvh->pv_list))
5095 vm_page_aflag_clear(m, PGA_WRITEABLE);
5098 return (pmap_unuse_pt(pmap, va, ptepde, free));
5102 * Remove a single page from a process address space
5105 pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *l3e,
5106 struct spglist *free)
5108 struct rwlock *lock;
5110 bool invalidate_all;
5112 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5113 if ((be64toh(*l3e) & RPTE_VALID) == 0) {
5116 pte = pmap_l3e_to_pte(l3e, va);
5117 if ((be64toh(*pte) & RPTE_VALID) == 0) {
5122 invalidate_all = pmap_remove_pte(pmap, pte, va, be64toh(*l3e), free, &lock);
5125 if (!invalidate_all)
5126 pmap_invalidate_page(pmap, va);
5127 return (invalidate_all);
5131 * Removes the specified range of addresses from the page table page.
5134 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
5135 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp)
5141 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5144 for (pte = pmap_l3e_to_pte(l3e, sva); sva != eva; pte++,
5146 MPASS(pte == pmap_pte(pmap, sva));
5156 if (pmap_remove_pte(pmap, pte, sva, be64toh(*l3e), free, lockp)) {
5163 pmap_invalidate_all(pmap);
5165 pmap_invalidate_range(pmap, va, sva);
5170 mmu_radix_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5172 struct rwlock *lock;
5173 vm_offset_t va_next;
5176 pml3_entry_t ptpaddr, *l3e;
5177 struct spglist free;
5180 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5183 * Perform an unsynchronized read. This is, however, safe.
5185 if (pmap->pm_stats.resident_count == 0)
5191 /* XXX something fishy here */
5192 sva = (sva + PAGE_MASK) & ~PAGE_MASK;
5193 eva = (eva + PAGE_MASK) & ~PAGE_MASK;
5198 * special handling of removing one page. a very
5199 * common operation and easy to short circuit some
5202 if (sva + PAGE_SIZE == eva) {
5203 l3e = pmap_pml3e(pmap, sva);
5204 if (l3e && (be64toh(*l3e) & RPTE_LEAF) == 0) {
5205 anyvalid = pmap_remove_page(pmap, sva, l3e, &free);
5211 for (; sva < eva; sva = va_next) {
5212 if (pmap->pm_stats.resident_count == 0)
5214 l1e = pmap_pml1e(pmap, sva);
5215 if (l1e == NULL || (be64toh(*l1e) & PG_V) == 0) {
5216 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5222 l2e = pmap_l1e_to_l2e(l1e, sva);
5223 if (l2e == NULL || (be64toh(*l2e) & PG_V) == 0) {
5224 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5231 * Calculate index for next page table.
5233 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5237 l3e = pmap_l2e_to_l3e(l2e, sva);
5238 ptpaddr = be64toh(*l3e);
5241 * Weed out invalid mappings.
5247 * Check for large page.
5249 if ((ptpaddr & RPTE_LEAF) != 0) {
5251 * Are we removing the entire large page? If not,
5252 * demote the mapping and fall through.
5254 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5255 pmap_remove_l3e(pmap, l3e, sva, &free, &lock);
5258 } else if (!pmap_demote_l3e_locked(pmap, l3e, sva,
5260 /* The large page mapping was destroyed. */
5263 ptpaddr = be64toh(*l3e);
5267 * Limit our scan to either the end of the va represented
5268 * by the current page table page, or to the end of the
5269 * range being removed.
5274 if (pmap_remove_ptes(pmap, sva, va_next, l3e, &free, &lock))
5281 pmap_invalidate_all(pmap);
5283 vm_page_free_pages_toq(&free, true);
5287 mmu_radix_remove_all(vm_page_t m)
5289 struct md_page *pvh;
5292 struct rwlock *lock;
5293 pt_entry_t *pte, tpte;
5296 struct spglist free;
5297 int pvh_gen, md_gen;
5299 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5300 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5301 ("pmap_remove_all: page %p is not managed", m));
5303 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5304 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5305 pa_to_pvh(VM_PAGE_TO_PHYS(m));
5308 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
5310 if (!PMAP_TRYLOCK(pmap)) {
5311 pvh_gen = pvh->pv_gen;
5315 if (pvh_gen != pvh->pv_gen) {
5322 l3e = pmap_pml3e(pmap, va);
5323 (void)pmap_demote_l3e_locked(pmap, l3e, va, &lock);
5326 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
5328 if (!PMAP_TRYLOCK(pmap)) {
5329 pvh_gen = pvh->pv_gen;
5330 md_gen = m->md.pv_gen;
5334 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5340 pmap_resident_count_dec(pmap, 1);
5341 l3e = pmap_pml3e(pmap, pv->pv_va);
5342 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, ("pmap_remove_all: found"
5343 " a 2mpage in page %p's pv list", m));
5344 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5345 tpte = be64toh(pte_load_clear(pte));
5347 pmap->pm_stats.wired_count--;
5349 vm_page_aflag_set(m, PGA_REFERENCED);
5352 * Update the vm_page_t clean and reference bits.
5354 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5356 pmap_unuse_pt(pmap, pv->pv_va, be64toh(*l3e), &free);
5357 pmap_invalidate_page(pmap, pv->pv_va);
5358 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5360 free_pv_entry(pmap, pv);
5363 vm_page_aflag_clear(m, PGA_WRITEABLE);
5365 vm_page_free_pages_toq(&free, true);
5369 * Destroy all managed, non-wired mappings in the given user-space
5370 * pmap. This pmap cannot be active on any processor besides the
5373 * This function cannot be applied to the kernel pmap. Moreover, it
5374 * is not intended for general use. It is only to be used during
5375 * process termination. Consequently, it can be implemented in ways
5376 * that make it faster than pmap_remove(). First, it can more quickly
5377 * destroy mappings by iterating over the pmap's collection of PV
5378 * entries, rather than searching the page table. Second, it doesn't
5379 * have to test and clear the page table entries atomically, because
5380 * no processor is currently accessing the user address space. In
5381 * particular, a page table entry's dirty bit won't change state once
5382 * this function starts.
5384 * Although this function destroys all of the pmap's managed,
5385 * non-wired mappings, it can delay and batch the invalidation of TLB
5386 * entries without calling pmap_delayed_invl_started() and
5387 * pmap_delayed_invl_finished(). Because the pmap is not active on
5388 * any other processor, none of these TLB entries will ever be used
5389 * before their eventual invalidation. Consequently, there is no need
5390 * for either pmap_remove_all() or pmap_remove_write() to wait for
5391 * that eventual TLB invalidation.
5395 mmu_radix_remove_pages(pmap_t pmap)
5398 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
5399 pml3_entry_t ptel3e;
5400 pt_entry_t *pte, tpte;
5401 struct spglist free;
5402 vm_page_t m, mpte, mt;
5404 struct md_page *pvh;
5405 struct pv_chunk *pc, *npc;
5406 struct rwlock *lock;
5408 uint64_t inuse, bitmask;
5409 int allfree, field, freed, idx;
5410 boolean_t superpage;
5414 * Assert that the given pmap is only active on the current
5415 * CPU. Unfortunately, we cannot block another CPU from
5416 * activating the pmap while this function is executing.
5418 KASSERT(pmap->pm_pid == mfspr(SPR_PID),
5419 ("non-current asid %lu - expected %lu", pmap->pm_pid,
5426 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
5429 for (field = 0; field < _NPCM; field++) {
5430 inuse = ~pc->pc_map[field] & pc_freemask[field];
5431 while (inuse != 0) {
5432 bit = cnttzd(inuse);
5433 bitmask = 1UL << bit;
5434 idx = field * 64 + bit;
5435 pv = &pc->pc_pventry[idx];
5438 pte = pmap_pml2e(pmap, pv->pv_va);
5439 ptel3e = be64toh(*pte);
5440 pte = pmap_l2e_to_l3e(pte, pv->pv_va);
5441 tpte = be64toh(*pte);
5442 if ((tpte & (RPTE_LEAF | PG_V)) == PG_V) {
5445 pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
5447 pte = &pte[pmap_pte_index(pv->pv_va)];
5448 tpte = be64toh(*pte);
5451 * Keep track whether 'tpte' is a
5452 * superpage explicitly instead of
5453 * relying on RPTE_LEAF being set.
5455 * This is because RPTE_LEAF is numerically
5456 * identical to PG_PTE_PAT and thus a
5457 * regular page could be mistaken for
5463 if ((tpte & PG_V) == 0) {
5464 panic("bad pte va %lx pte %lx",
5469 * We cannot remove wired pages from a process' mapping at this time
5477 pa = tpte & PG_PS_FRAME;
5479 pa = tpte & PG_FRAME;
5481 m = PHYS_TO_VM_PAGE(pa);
5482 KASSERT(m->phys_addr == pa,
5483 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
5484 m, (uintmax_t)m->phys_addr,
5487 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
5488 m < &vm_page_array[vm_page_array_size],
5489 ("pmap_remove_pages: bad tpte %#jx",
5495 * Update the vm_page_t clean/reference bits.
5497 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5499 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5505 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
5508 pc->pc_map[field] |= bitmask;
5510 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
5511 pvh = pa_to_pvh(tpte & PG_PS_FRAME);
5512 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
5514 if (TAILQ_EMPTY(&pvh->pv_list)) {
5515 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5516 if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
5517 TAILQ_EMPTY(&mt->md.pv_list))
5518 vm_page_aflag_clear(mt, PGA_WRITEABLE);
5520 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
5522 pmap_resident_count_dec(pmap, 1);
5523 KASSERT(mpte->ref_count == NPTEPG,
5524 ("pmap_remove_pages: pte page wire count error"));
5525 mpte->ref_count = 0;
5526 pmap_add_delayed_free_list(mpte, &free, FALSE);
5529 pmap_resident_count_dec(pmap, 1);
5531 printf("freeing pv (%p, %p)\n",
5534 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5536 if ((m->a.flags & PGA_WRITEABLE) != 0 &&
5537 TAILQ_EMPTY(&m->md.pv_list) &&
5538 (m->flags & PG_FICTITIOUS) == 0) {
5539 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5540 if (TAILQ_EMPTY(&pvh->pv_list))
5541 vm_page_aflag_clear(m, PGA_WRITEABLE);
5544 pmap_unuse_pt(pmap, pv->pv_va, ptel3e, &free);
5548 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
5549 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
5550 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
5552 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5558 pmap_invalidate_all(pmap);
5560 vm_page_free_pages_toq(&free, true);
5564 mmu_radix_remove_write(vm_page_t m)
5566 struct md_page *pvh;
5568 struct rwlock *lock;
5569 pv_entry_t next_pv, pv;
5571 pt_entry_t oldpte, *pte;
5572 int pvh_gen, md_gen;
5574 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5575 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5576 ("pmap_remove_write: page %p is not managed", m));
5577 vm_page_assert_busied(m);
5579 if (!pmap_page_is_write_mapped(m))
5581 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5582 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5583 pa_to_pvh(VM_PAGE_TO_PHYS(m));
5586 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
5588 if (!PMAP_TRYLOCK(pmap)) {
5589 pvh_gen = pvh->pv_gen;
5593 if (pvh_gen != pvh->pv_gen) {
5599 l3e = pmap_pml3e(pmap, pv->pv_va);
5600 if ((be64toh(*l3e) & PG_RW) != 0)
5601 (void)pmap_demote_l3e_locked(pmap, l3e, pv->pv_va, &lock);
5602 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
5603 ("inconsistent pv lock %p %p for page %p",
5604 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
5607 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
5609 if (!PMAP_TRYLOCK(pmap)) {
5610 pvh_gen = pvh->pv_gen;
5611 md_gen = m->md.pv_gen;
5615 if (pvh_gen != pvh->pv_gen ||
5616 md_gen != m->md.pv_gen) {
5622 l3e = pmap_pml3e(pmap, pv->pv_va);
5623 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0,
5624 ("pmap_remove_write: found a 2mpage in page %p's pv list",
5626 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5628 oldpte = be64toh(*pte);
5629 if (oldpte & PG_RW) {
5630 if (!atomic_cmpset_long(pte, htobe64(oldpte),
5631 htobe64((oldpte | RPTE_EAA_R) & ~(PG_RW | PG_M))))
5633 if ((oldpte & PG_M) != 0)
5635 pmap_invalidate_page(pmap, pv->pv_va);
5640 vm_page_aflag_clear(m, PGA_WRITEABLE);
5644 * Clear the wired attribute from the mappings for the specified range of
5645 * addresses in the given pmap. Every valid mapping within that range
5646 * must have the wired attribute set. In contrast, invalid mappings
5647 * cannot have the wired attribute set, so they are ignored.
5649 * The wired attribute of the page table entry is not a hardware
5650 * feature, so there is no need to invalidate any TLB entries.
5651 * Since pmap_demote_l3e() for the wired entry must never fail,
5652 * pmap_delayed_invl_started()/finished() calls around the
5653 * function are not needed.
5656 mmu_radix_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5658 vm_offset_t va_next;
5664 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5666 for (; sva < eva; sva = va_next) {
5667 l1e = pmap_pml1e(pmap, sva);
5668 if ((be64toh(*l1e) & PG_V) == 0) {
5669 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5674 l2e = pmap_l1e_to_l2e(l1e, sva);
5675 if ((be64toh(*l2e) & PG_V) == 0) {
5676 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5681 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5684 l3e = pmap_l2e_to_l3e(l2e, sva);
5685 if ((be64toh(*l3e) & PG_V) == 0)
5687 if ((be64toh(*l3e) & RPTE_LEAF) != 0) {
5688 if ((be64toh(*l3e) & PG_W) == 0)
5689 panic("pmap_unwire: pde %#jx is missing PG_W",
5690 (uintmax_t)(be64toh(*l3e)));
5693 * Are we unwiring the entire large page? If not,
5694 * demote the mapping and fall through.
5696 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5697 atomic_clear_long(l3e, htobe64(PG_W));
5698 pmap->pm_stats.wired_count -= L3_PAGE_SIZE /
5701 } else if (!pmap_demote_l3e(pmap, l3e, sva))
5702 panic("pmap_unwire: demotion failed");
5706 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
5708 MPASS(pte == pmap_pte(pmap, sva));
5709 if ((be64toh(*pte) & PG_V) == 0)
5711 if ((be64toh(*pte) & PG_W) == 0)
5712 panic("pmap_unwire: pte %#jx is missing PG_W",
5713 (uintmax_t)(be64toh(*pte)));
5716 * PG_W must be cleared atomically. Although the pmap
5717 * lock synchronizes access to PG_W, another processor
5718 * could be setting PG_M and/or PG_A concurrently.
5720 atomic_clear_long(pte, htobe64(PG_W));
5721 pmap->pm_stats.wired_count--;
5728 mmu_radix_zero_page(vm_page_t m)
5732 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5733 addr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5738 mmu_radix_zero_page_area(vm_page_t m, int off, int size)
5742 CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
5743 MPASS(off + size <= PAGE_SIZE);
5744 addr = (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5745 memset(addr + off, 0, size);
5749 mmu_radix_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
5756 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
5759 l3ep = pmap_pml3e(pmap, addr);
5760 if (l3ep != NULL && (be64toh(*l3ep) & PG_V)) {
5761 if (be64toh(*l3ep) & RPTE_LEAF) {
5762 pte = be64toh(*l3ep);
5763 /* Compute the physical address of the 4KB page. */
5764 pa = ((be64toh(*l3ep) & PG_PS_FRAME) | (addr & L3_PAGE_MASK)) &
5766 val = MINCORE_PSIND(1);
5768 /* Native endian PTE, do not pass to functions */
5769 pte = be64toh(*pmap_l3e_to_pte(l3ep, addr));
5770 pa = pte & PG_FRAME;
5778 if ((pte & PG_V) != 0) {
5779 val |= MINCORE_INCORE;
5780 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5781 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5782 if ((pte & PG_A) != 0)
5783 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5785 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5786 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
5787 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
5795 mmu_radix_activate(struct thread *td)
5800 CTR2(KTR_PMAP, "%s(%p)", __func__, td);
5802 pmap = vmspace_pmap(td->td_proc->p_vmspace);
5803 curpid = mfspr(SPR_PID);
5804 if (pmap->pm_pid > isa3_base_pid &&
5805 curpid != pmap->pm_pid) {
5806 mmu_radix_pid_set(pmap);
5812 * Increase the starting virtual address of the given mapping if a
5813 * different alignment might result in more superpage mappings.
5816 mmu_radix_align_superpage(vm_object_t object, vm_ooffset_t offset,
5817 vm_offset_t *addr, vm_size_t size)
5820 CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
5822 vm_offset_t superpage_offset;
5824 if (size < L3_PAGE_SIZE)
5826 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
5827 offset += ptoa(object->pg_color);
5828 superpage_offset = offset & L3_PAGE_MASK;
5829 if (size - ((L3_PAGE_SIZE - superpage_offset) & L3_PAGE_MASK) < L3_PAGE_SIZE ||
5830 (*addr & L3_PAGE_MASK) == superpage_offset)
5832 if ((*addr & L3_PAGE_MASK) < superpage_offset)
5833 *addr = (*addr & ~L3_PAGE_MASK) + superpage_offset;
5835 *addr = ((*addr + L3_PAGE_MASK) & ~L3_PAGE_MASK) + superpage_offset;
5839 mmu_radix_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
5841 vm_offset_t va, tmpva, ppa, offset;
5843 ppa = trunc_page(pa);
5844 offset = pa & PAGE_MASK;
5845 size = roundup2(offset + size, PAGE_SIZE);
5846 if (pa < powerpc_ptob(Maxmem))
5847 panic("bad pa: %#lx less than Maxmem %#lx\n",
5848 pa, powerpc_ptob(Maxmem));
5849 va = kva_alloc(size);
5851 printf("%s(%#lx, %lu, %d)\n", __func__, pa, size, attr);
5852 KASSERT(size > 0, ("%s(%#lx, %lu, %d)", __func__, pa, size, attr));
5855 panic("%s: Couldn't alloc kernel virtual memory", __func__);
5857 for (tmpva = va; size > 0;) {
5858 mmu_radix_kenter_attr(tmpva, ppa, attr);
5865 return ((void *)(va + offset));
5869 mmu_radix_mapdev(vm_paddr_t pa, vm_size_t size)
5872 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
5874 return (mmu_radix_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
5878 mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5881 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
5882 m->md.mdpg_cache_attrs = ma;
5885 * If "m" is a normal page, update its direct mapping. This update
5886 * can be relied upon to perform any cache operations that are
5887 * required for data coherence.
5889 if ((m->flags & PG_FICTITIOUS) == 0 &&
5890 mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
5891 PAGE_SIZE, m->md.mdpg_cache_attrs))
5892 panic("memory attribute change on the direct map failed");
5896 mmu_radix_unmapdev(vm_offset_t va, vm_size_t size)
5900 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
5901 /* If we gave a direct map region in pmap_mapdev, do nothing */
5902 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
5905 offset = va & PAGE_MASK;
5906 size = round_page(offset + size);
5907 va = trunc_page(va);
5909 if (pmap_initialized) {
5910 mmu_radix_qremove(va, atop(size));
5915 static __inline void
5916 pmap_pte_attr(pt_entry_t *pte, uint64_t cache_bits, uint64_t mask)
5918 uint64_t opte, npte;
5921 * The cache mode bits are all in the low 32-bits of the
5922 * PTE, so we can just spin on updating the low 32-bits.
5925 opte = be64toh(*pte);
5926 npte = opte & ~mask;
5928 } while (npte != opte && !atomic_cmpset_long(pte, htobe64(opte), htobe64(npte)));
5932 * Tries to demote a 1GB page mapping.
5935 pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
5937 pml2_entry_t oldpdpe;
5938 pml3_entry_t *firstpde, newpde, *pde;
5942 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5943 oldpdpe = be64toh(*l2e);
5944 KASSERT((oldpdpe & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
5945 ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
5946 pdpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
5948 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
5949 " in pmap %p", va, pmap);
5952 pdpg->pindex = va >> L2_PAGE_SIZE_SHIFT;
5953 pdpgpa = VM_PAGE_TO_PHYS(pdpg);
5954 firstpde = (pml3_entry_t *)PHYS_TO_DMAP(pdpgpa);
5955 KASSERT((oldpdpe & PG_A) != 0,
5956 ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
5957 KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
5958 ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
5962 * Initialize the page directory page.
5964 for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
5965 *pde = htobe64(newpde);
5966 newpde += L3_PAGE_SIZE;
5970 * Demote the mapping.
5972 pde_store(l2e, pdpgpa);
5975 * Flush PWC --- XXX revisit
5977 pmap_invalidate_all(pmap);
5979 counter_u64_add(pmap_l2e_demotions, 1);
5980 CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
5981 " in pmap %p", va, pmap);
5986 mmu_radix_kextract(vm_offset_t va)
5991 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
5992 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
5993 pa = DMAP_TO_PHYS(va);
5995 /* Big-endian PTE on stack */
5996 l3e = *pmap_pml3e(kernel_pmap, va);
5997 if (be64toh(l3e) & RPTE_LEAF) {
5998 pa = (be64toh(l3e) & PG_PS_FRAME) | (va & L3_PAGE_MASK);
5999 pa |= (va & L3_PAGE_MASK);
6002 * Beware of a concurrent promotion that changes the
6003 * PDE at this point! For example, vtopte() must not
6004 * be used to access the PTE because it would use the
6005 * new PDE. It is, however, safe to use the old PDE
6006 * because the page table page is preserved by the
6009 pa = be64toh(*pmap_l3e_to_pte(&l3e, va));
6010 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
6011 pa |= (va & PAGE_MASK);
6018 mmu_radix_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
6021 if (ma != VM_MEMATTR_DEFAULT) {
6022 return pmap_cache_bits(ma);
6026 * Assume the page is cache inhibited and access is guarded unless
6027 * it's in our available memory array.
6029 for (int i = 0; i < pregions_sz; i++) {
6030 if ((pa >= pregions[i].mr_start) &&
6031 (pa < (pregions[i].mr_start + pregions[i].mr_size)))
6032 return (RPTE_ATTR_MEM);
6034 return (RPTE_ATTR_GUARDEDIO);
6038 mmu_radix_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
6040 pt_entry_t *pte, pteval;
6041 uint64_t cache_bits;
6045 pteval = pa | RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
6046 cache_bits = mmu_radix_calc_wimg(pa, ma);
6047 pte_store(pte, pteval | cache_bits);
6051 mmu_radix_kremove(vm_offset_t va)
6055 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
6062 mmu_radix_decode_kernel_ptr(vm_offset_t addr,
6063 int *is_user, vm_offset_t *decoded)
6066 CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
6068 *is_user = (addr < VM_MAXUSER_ADDRESS);
6073 mmu_radix_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
6076 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
6077 return (mem_valid(pa, size));
6081 mmu_radix_scan_init()
6084 CTR1(KTR_PMAP, "%s()", __func__);
6089 mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz,
6092 CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
6097 mmu_radix_quick_enter_page(vm_page_t m)
6101 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
6102 paddr = VM_PAGE_TO_PHYS(m);
6103 return (PHYS_TO_DMAP(paddr));
6107 mmu_radix_quick_remove_page(vm_offset_t addr __unused)
6109 /* no work to do here */
6110 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
6114 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
6116 cpu_flush_dcache((void *)sva, eva - sva);
6120 mmu_radix_change_attr(vm_offset_t va, vm_size_t size,
6125 CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, va, size, mode);
6126 PMAP_LOCK(kernel_pmap);
6127 error = pmap_change_attr_locked(va, size, mode, true);
6128 PMAP_UNLOCK(kernel_pmap);
6133 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
6135 vm_offset_t base, offset, tmpva;
6136 vm_paddr_t pa_start, pa_end, pa_end1;
6140 int cache_bits, error;
6143 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
6144 base = trunc_page(va);
6145 offset = va & PAGE_MASK;
6146 size = round_page(offset + size);
6149 * Only supported on kernel virtual addresses, including the direct
6150 * map but excluding the recursive map.
6152 if (base < DMAP_MIN_ADDRESS)
6155 cache_bits = pmap_cache_bits(mode);
6159 * Pages that aren't mapped aren't supported. Also break down 2MB pages
6160 * into 4KB pages if required.
6162 for (tmpva = base; tmpva < base + size; ) {
6163 l2e = pmap_pml2e(kernel_pmap, tmpva);
6164 if (l2e == NULL || *l2e == 0)
6166 if (be64toh(*l2e) & RPTE_LEAF) {
6168 * If the current 1GB page already has the required
6169 * memory type, then we need not demote this page. Just
6170 * increment tmpva to the next 1GB page frame.
6172 if ((be64toh(*l2e) & RPTE_ATTR_MASK) == cache_bits) {
6173 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6178 * If the current offset aligns with a 1GB page frame
6179 * and there is at least 1GB left within the range, then
6180 * we need not break down this page into 2MB pages.
6182 if ((tmpva & L2_PAGE_MASK) == 0 &&
6183 tmpva + L2_PAGE_MASK < base + size) {
6184 tmpva += L2_PAGE_MASK;
6187 if (!pmap_demote_l2e(kernel_pmap, l2e, tmpva))
6190 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6191 KASSERT(l3e != NULL, ("no l3e entry for %#lx in %p\n",
6195 if (be64toh(*l3e) & RPTE_LEAF) {
6197 * If the current 2MB page already has the required
6198 * memory type, then we need not demote this page. Just
6199 * increment tmpva to the next 2MB page frame.
6201 if ((be64toh(*l3e) & RPTE_ATTR_MASK) == cache_bits) {
6202 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6207 * If the current offset aligns with a 2MB page frame
6208 * and there is at least 2MB left within the range, then
6209 * we need not break down this page into 4KB pages.
6211 if ((tmpva & L3_PAGE_MASK) == 0 &&
6212 tmpva + L3_PAGE_MASK < base + size) {
6213 tmpva += L3_PAGE_SIZE;
6216 if (!pmap_demote_l3e(kernel_pmap, l3e, tmpva))
6219 pte = pmap_l3e_to_pte(l3e, tmpva);
6227 * Ok, all the pages exist, so run through them updating their
6228 * cache mode if required.
6230 pa_start = pa_end = 0;
6231 for (tmpva = base; tmpva < base + size; ) {
6232 l2e = pmap_pml2e(kernel_pmap, tmpva);
6233 if (be64toh(*l2e) & RPTE_LEAF) {
6234 if ((be64toh(*l2e) & RPTE_ATTR_MASK) != cache_bits) {
6235 pmap_pte_attr(l2e, cache_bits,
6239 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6240 (*l2e & PG_PS_FRAME) < dmaplimit) {
6241 if (pa_start == pa_end) {
6242 /* Start physical address run. */
6243 pa_start = be64toh(*l2e) & PG_PS_FRAME;
6244 pa_end = pa_start + L2_PAGE_SIZE;
6245 } else if (pa_end == (be64toh(*l2e) & PG_PS_FRAME))
6246 pa_end += L2_PAGE_SIZE;
6248 /* Run ended, update direct map. */
6249 error = pmap_change_attr_locked(
6250 PHYS_TO_DMAP(pa_start),
6251 pa_end - pa_start, mode, flush);
6254 /* Start physical address run. */
6255 pa_start = be64toh(*l2e) & PG_PS_FRAME;
6256 pa_end = pa_start + L2_PAGE_SIZE;
6259 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6262 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6263 if (be64toh(*l3e) & RPTE_LEAF) {
6264 if ((be64toh(*l3e) & RPTE_ATTR_MASK) != cache_bits) {
6265 pmap_pte_attr(l3e, cache_bits,
6269 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6270 (be64toh(*l3e) & PG_PS_FRAME) < dmaplimit) {
6271 if (pa_start == pa_end) {
6272 /* Start physical address run. */
6273 pa_start = be64toh(*l3e) & PG_PS_FRAME;
6274 pa_end = pa_start + L3_PAGE_SIZE;
6275 } else if (pa_end == (be64toh(*l3e) & PG_PS_FRAME))
6276 pa_end += L3_PAGE_SIZE;
6278 /* Run ended, update direct map. */
6279 error = pmap_change_attr_locked(
6280 PHYS_TO_DMAP(pa_start),
6281 pa_end - pa_start, mode, flush);
6284 /* Start physical address run. */
6285 pa_start = be64toh(*l3e) & PG_PS_FRAME;
6286 pa_end = pa_start + L3_PAGE_SIZE;
6289 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6291 pte = pmap_l3e_to_pte(l3e, tmpva);
6292 if ((be64toh(*pte) & RPTE_ATTR_MASK) != cache_bits) {
6293 pmap_pte_attr(pte, cache_bits,
6297 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6298 (be64toh(*pte) & PG_FRAME) < dmaplimit) {
6299 if (pa_start == pa_end) {
6300 /* Start physical address run. */
6301 pa_start = be64toh(*pte) & PG_FRAME;
6302 pa_end = pa_start + PAGE_SIZE;
6303 } else if (pa_end == (be64toh(*pte) & PG_FRAME))
6304 pa_end += PAGE_SIZE;
6306 /* Run ended, update direct map. */
6307 error = pmap_change_attr_locked(
6308 PHYS_TO_DMAP(pa_start),
6309 pa_end - pa_start, mode, flush);
6312 /* Start physical address run. */
6313 pa_start = be64toh(*pte) & PG_FRAME;
6314 pa_end = pa_start + PAGE_SIZE;
6320 if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
6321 pa_end1 = MIN(pa_end, dmaplimit);
6322 if (pa_start != pa_end1)
6323 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
6324 pa_end1 - pa_start, mode, flush);
6328 * Flush CPU caches if required to make sure any data isn't cached that
6329 * shouldn't be, etc.
6332 pmap_invalidate_all(kernel_pmap);
6335 pmap_invalidate_cache_range(base, tmpva);
6341 * Allocate physical memory for the vm_page array and map it into KVA,
6342 * attempting to back the vm_pages with domain-local memory.
6345 mmu_radix_page_array_startup(long pages)
6356 vm_offset_t start, end;
6358 vm_page_array_size = pages;
6360 start = VM_MIN_KERNEL_ADDRESS;
6361 end = start + pages * sizeof(struct vm_page);
6363 pa = vm_phys_early_alloc(0, end - start);
6365 start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT);
6367 /* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */
6368 for (va = start; va < end; va += L3_PAGE_SIZE) {
6369 pfn = first_page + (va - start) / sizeof(struct vm_page);
6370 domain = vm_phys_domain(ptoa(pfn));
6371 l2e = pmap_pml2e(kernel_pmap, va);
6372 if ((be64toh(*l2e) & PG_V) == 0) {
6373 pa = vm_phys_early_alloc(domain, PAGE_SIZE);
6375 pagezero(PHYS_TO_DMAP(pa));
6376 pde_store(l2e, (pml2_entry_t)pa);
6378 pde = pmap_l2e_to_l3e(l2e, va);
6379 if ((be64toh(*pde) & PG_V) != 0)
6380 panic("Unexpected pde %p", pde);
6381 pa = vm_phys_early_alloc(domain, L3_PAGE_SIZE);
6382 for (i = 0; i < NPDEPG; i++)
6383 dump_add_page(pa + i * PAGE_SIZE);
6384 newl3 = (pml3_entry_t)(pa | RPTE_EAA_P | RPTE_EAA_R | RPTE_EAA_W);
6385 pte_store(pde, newl3);
6388 vm_page_array = (vm_page_t)start;
6392 #include <sys/kdb.h>
6393 #include <ddb/ddb.h>
6396 pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va)
6403 l1e = &l1[pmap_pml1e_index(va)];
6404 db_printf("VA %#016lx l1e %#016lx", va, be64toh(*l1e));
6405 if ((be64toh(*l1e) & PG_V) == 0) {
6409 l2e = pmap_l1e_to_l2e(l1e, va);
6410 db_printf(" l2e %#016lx", be64toh(*l2e));
6411 if ((be64toh(*l2e) & PG_V) == 0 || (be64toh(*l2e) & RPTE_LEAF) != 0) {
6415 l3e = pmap_l2e_to_l3e(l2e, va);
6416 db_printf(" l3e %#016lx", be64toh(*l3e));
6417 if ((be64toh(*l3e) & PG_V) == 0 || (be64toh(*l3e) & RPTE_LEAF) != 0) {
6421 pte = pmap_l3e_to_pte(l3e, va);
6422 db_printf(" pte %#016lx\n", be64toh(*pte));
6426 pmap_page_print_mappings(vm_page_t m)
6431 db_printf("page %p(%lx)\n", m, m->phys_addr);
6432 /* need to elide locks if running in ddb */
6433 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
6434 db_printf("pv: %p ", pv);
6435 db_printf("va: %#016lx ", pv->pv_va);
6437 db_printf("pmap %p ", pmap);
6439 db_printf("asid: %lu\n", pmap->pm_pid);
6440 pmap_pte_walk(pmap->pm_pml1, pv->pv_va);
6445 DB_SHOW_COMMAND(pte, pmap_print_pte)
6451 db_printf("show pte addr\n");
6454 va = (vm_offset_t)addr;
6456 if (va >= DMAP_MIN_ADDRESS)
6458 else if (kdb_thread != NULL)
6459 pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
6461 pmap = vmspace_pmap(curthread->td_proc->p_vmspace);
6463 pmap_pte_walk(pmap->pm_pml1, va);