2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2018 Matthew Macy
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "opt_platform.h"
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
37 #include <sys/bitstring.h>
38 #include <sys/queue.h>
39 #include <sys/cpuset.h>
40 #include <sys/endian.h>
41 #include <sys/kerneldump.h>
44 #include <sys/syslog.h>
45 #include <sys/msgbuf.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
50 #include <sys/rwlock.h>
51 #include <sys/sched.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
55 #include <sys/vmmeter.h>
60 #include <dev/ofw/openfirm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_phys.h>
72 #include <vm/vm_reserv.h>
73 #include <vm/vm_dumpset.h>
76 #include <machine/_inttypes.h>
77 #include <machine/cpu.h>
78 #include <machine/platform.h>
79 #include <machine/frame.h>
80 #include <machine/md_var.h>
81 #include <machine/psl.h>
82 #include <machine/bat.h>
83 #include <machine/hid.h>
84 #include <machine/pte.h>
85 #include <machine/sr.h>
86 #include <machine/trap.h>
87 #include <machine/mmuvar.h>
89 /* For pseries bit. */
90 #include <powerpc/pseries/phyp-hvcall.h>
93 #include <vm/uma_dbg.h>
96 #define PPC_BITLSHIFT(bit) (sizeof(long)*NBBY - 1 - (bit))
97 #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
98 #define PPC_BITLSHIFT_VAL(val, bit) ((val) << PPC_BITLSHIFT(bit))
103 static void pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va);
106 #define PG_W RPTE_WIRED
107 #define PG_V RPTE_VALID
108 #define PG_MANAGED RPTE_MANAGED
109 #define PG_PROMOTED RPTE_PROMOTED
112 #define PG_X RPTE_EAA_X
113 #define PG_RW RPTE_EAA_W
114 #define PG_PTE_CACHE RPTE_ATTR_MASK
117 #define NLS_MASK ((1UL<<5)-1)
118 #define RPTE_ENTRIES (1UL<<RPTE_SHIFT)
119 #define RPTE_MASK (RPTE_ENTRIES-1)
122 #define NLB_MASK (((1UL<<52)-1) << 8)
125 extern caddr_t crashdumpmap;
127 #define RIC_FLUSH_TLB 0
128 #define RIC_FLUSH_PWC 1
129 #define RIC_FLUSH_ALL 2
131 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */
133 #define PPC_INST_TLBIE 0x7c000264
134 #define PPC_INST_TLBIEL 0x7c000224
135 #define PPC_INST_SLBIA 0x7c0003e4
137 #define ___PPC_RA(a) (((a) & 0x1f) << 16)
138 #define ___PPC_RB(b) (((b) & 0x1f) << 11)
139 #define ___PPC_RS(s) (((s) & 0x1f) << 21)
140 #define ___PPC_RT(t) ___PPC_RS(t)
141 #define ___PPC_R(r) (((r) & 0x1) << 16)
142 #define ___PPC_PRS(prs) (((prs) & 0x1) << 17)
143 #define ___PPC_RIC(ric) (((ric) & 0x3) << 18)
145 #define PPC_SLBIA(IH) __XSTRING(.long PPC_INST_SLBIA | \
147 #define PPC_TLBIE_5(rb,rs,ric,prs,r) \
148 __XSTRING(.long PPC_INST_TLBIE | \
149 ___PPC_RB(rb) | ___PPC_RS(rs) | \
150 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
153 #define PPC_TLBIEL(rb,rs,ric,prs,r) \
154 __XSTRING(.long PPC_INST_TLBIEL | \
155 ___PPC_RB(rb) | ___PPC_RS(rs) | \
156 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
159 #define PPC_INVALIDATE_ERAT PPC_SLBIA(7)
164 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
167 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
168 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
169 #define TLBIEL_INVAL_SET_PID 0x400 /* invalidate a set for the current PID */
170 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
171 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
173 #define TLBIE_ACTUAL_PAGE_MASK 0xe0
174 #define TLBIE_ACTUAL_PAGE_4K 0x00
175 #define TLBIE_ACTUAL_PAGE_64K 0xa0
176 #define TLBIE_ACTUAL_PAGE_2M 0x20
177 #define TLBIE_ACTUAL_PAGE_1G 0x40
179 #define TLBIE_PRS_PARTITION_SCOPE 0x0
180 #define TLBIE_PRS_PROCESS_SCOPE 0x1
182 #define TLBIE_RIC_INVALIDATE_TLB 0x0 /* Invalidate just TLB */
183 #define TLBIE_RIC_INVALIDATE_PWC 0x1 /* Invalidate just PWC */
184 #define TLBIE_RIC_INVALIDATE_ALL 0x2 /* Invalidate TLB, PWC,
185 * cached {proc, part}tab entries
187 #define TLBIE_RIC_INVALIDATE_SEQ 0x3 /* HPT - only:
188 * Invalidate a range of translations
191 static __always_inline void
192 radix_tlbie(uint8_t ric, uint8_t prs, uint16_t is, uint32_t pid, uint32_t lpid,
193 vm_offset_t va, uint16_t ap)
197 MPASS((va & PAGE_MASK) == 0);
199 rs = ((uint64_t)pid << 32) | lpid;
201 __asm __volatile(PPC_TLBIE_5(%0, %1, %2, %3, 1) : :
202 "r" (rb), "r" (rs), "i" (ric), "i" (prs) : "memory");
206 radix_tlbie_fixup(uint32_t pid, vm_offset_t va, int ap)
209 __asm __volatile("ptesync" ::: "memory");
210 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
211 TLBIEL_INVAL_PAGE, 0, 0, va, ap);
212 __asm __volatile("ptesync" ::: "memory");
213 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
214 TLBIEL_INVAL_PAGE, pid, 0, va, ap);
218 radix_tlbie_invlpg_user_4k(uint32_t pid, vm_offset_t va)
221 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
222 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_4K);
223 radix_tlbie_fixup(pid, va, TLBIE_ACTUAL_PAGE_4K);
227 radix_tlbie_invlpg_user_2m(uint32_t pid, vm_offset_t va)
230 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
231 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_2M);
232 radix_tlbie_fixup(pid, va, TLBIE_ACTUAL_PAGE_2M);
236 radix_tlbie_invlpwc_user(uint32_t pid)
239 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
240 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
244 radix_tlbie_flush_user(uint32_t pid)
247 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
248 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
252 radix_tlbie_invlpg_kernel_4k(vm_offset_t va)
255 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
256 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_4K);
257 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_4K);
261 radix_tlbie_invlpg_kernel_2m(vm_offset_t va)
264 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
265 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_2M);
266 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_2M);
269 /* 1GB pages aren't currently supported. */
270 static __inline __unused void
271 radix_tlbie_invlpg_kernel_1g(vm_offset_t va)
274 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
275 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_1G);
276 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_1G);
280 radix_tlbie_invlpwc_kernel(void)
283 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
284 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
288 radix_tlbie_flush_kernel(void)
291 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
292 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
295 static __inline vm_pindex_t
296 pmap_l3e_pindex(vm_offset_t va)
298 return ((va & PG_FRAME) >> L3_PAGE_SIZE_SHIFT);
301 static __inline vm_pindex_t
302 pmap_pml3e_index(vm_offset_t va)
305 return ((va >> L3_PAGE_SIZE_SHIFT) & RPTE_MASK);
308 static __inline vm_pindex_t
309 pmap_pml2e_index(vm_offset_t va)
311 return ((va >> L2_PAGE_SIZE_SHIFT) & RPTE_MASK);
314 static __inline vm_pindex_t
315 pmap_pml1e_index(vm_offset_t va)
317 return ((va & PG_FRAME) >> L1_PAGE_SIZE_SHIFT);
320 /* Return various clipped indexes for a given VA */
321 static __inline vm_pindex_t
322 pmap_pte_index(vm_offset_t va)
325 return ((va >> PAGE_SHIFT) & RPTE_MASK);
328 /* Return a pointer to the PT slot that corresponds to a VA */
329 static __inline pt_entry_t *
330 pmap_l3e_to_pte(pt_entry_t *l3e, vm_offset_t va)
335 ptepa = (be64toh(*l3e) & NLB_MASK);
336 pte = (pt_entry_t *)PHYS_TO_DMAP(ptepa);
337 return (&pte[pmap_pte_index(va)]);
340 /* Return a pointer to the PD slot that corresponds to a VA */
341 static __inline pt_entry_t *
342 pmap_l2e_to_l3e(pt_entry_t *l2e, vm_offset_t va)
347 l3pa = (be64toh(*l2e) & NLB_MASK);
348 l3e = (pml3_entry_t *)PHYS_TO_DMAP(l3pa);
349 return (&l3e[pmap_pml3e_index(va)]);
352 /* Return a pointer to the PD slot that corresponds to a VA */
353 static __inline pt_entry_t *
354 pmap_l1e_to_l2e(pt_entry_t *l1e, vm_offset_t va)
359 l2pa = (be64toh(*l1e) & NLB_MASK);
361 l2e = (pml2_entry_t *)PHYS_TO_DMAP(l2pa);
362 return (&l2e[pmap_pml2e_index(va)]);
365 static __inline pml1_entry_t *
366 pmap_pml1e(pmap_t pmap, vm_offset_t va)
369 return (&pmap->pm_pml1[pmap_pml1e_index(va)]);
373 pmap_pml2e(pmap_t pmap, vm_offset_t va)
377 l1e = pmap_pml1e(pmap, va);
378 if (l1e == NULL || (be64toh(*l1e) & RPTE_VALID) == 0)
380 return (pmap_l1e_to_l2e(l1e, va));
383 static __inline pt_entry_t *
384 pmap_pml3e(pmap_t pmap, vm_offset_t va)
388 l2e = pmap_pml2e(pmap, va);
389 if (l2e == NULL || (be64toh(*l2e) & RPTE_VALID) == 0)
391 return (pmap_l2e_to_l3e(l2e, va));
394 static __inline pt_entry_t *
395 pmap_pte(pmap_t pmap, vm_offset_t va)
399 l3e = pmap_pml3e(pmap, va);
400 if (l3e == NULL || (be64toh(*l3e) & RPTE_VALID) == 0)
402 return (pmap_l3e_to_pte(l3e, va));
406 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
407 "Number of kernel page table pages allocated on bootup");
409 vm_paddr_t dmaplimit;
411 SYSCTL_DECL(_vm_pmap);
414 #define VERBOSE_PMAP 0
415 #define VERBOSE_PROTECT 0
416 static int pmap_logging;
417 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_logging, CTLFLAG_RWTUN,
418 &pmap_logging, 0, "verbose debug logging");
421 static u_int64_t KPTphys; /* phys addr of kernel level 1 */
423 //static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */
425 static vm_offset_t qframe = 0;
426 static struct mtx qframe_mtx;
428 void mmu_radix_activate(struct thread *);
429 void mmu_radix_advise(pmap_t, vm_offset_t, vm_offset_t, int);
430 void mmu_radix_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
432 void mmu_radix_clear_modify(vm_page_t);
433 void mmu_radix_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
434 int mmu_radix_decode_kernel_ptr(vm_offset_t, int *, vm_offset_t *);
435 int mmu_radix_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t);
436 void mmu_radix_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
438 void mmu_radix_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
439 vm_paddr_t mmu_radix_extract(pmap_t pmap, vm_offset_t va);
440 vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
441 void mmu_radix_kenter(vm_offset_t, vm_paddr_t);
442 vm_paddr_t mmu_radix_kextract(vm_offset_t);
443 void mmu_radix_kremove(vm_offset_t);
444 boolean_t mmu_radix_is_modified(vm_page_t);
445 boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t);
446 boolean_t mmu_radix_is_referenced(vm_page_t);
447 void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t,
448 vm_pindex_t, vm_size_t);
449 boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t);
450 void mmu_radix_page_init(vm_page_t);
451 boolean_t mmu_radix_page_is_mapped(vm_page_t m);
452 void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t);
453 int mmu_radix_page_wired_mappings(vm_page_t);
454 int mmu_radix_pinit(pmap_t);
455 void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
456 bool mmu_radix_ps_enabled(pmap_t);
457 void mmu_radix_qenter(vm_offset_t, vm_page_t *, int);
458 void mmu_radix_qremove(vm_offset_t, int);
459 vm_offset_t mmu_radix_quick_enter_page(vm_page_t);
460 void mmu_radix_quick_remove_page(vm_offset_t);
461 int mmu_radix_ts_referenced(vm_page_t);
462 void mmu_radix_release(pmap_t);
463 void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t);
464 void mmu_radix_remove_all(vm_page_t);
465 void mmu_radix_remove_pages(pmap_t);
466 void mmu_radix_remove_write(vm_page_t);
467 void mmu_radix_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz);
468 void mmu_radix_unwire(pmap_t, vm_offset_t, vm_offset_t);
469 void mmu_radix_zero_page(vm_page_t);
470 void mmu_radix_zero_page_area(vm_page_t, int, int);
471 int mmu_radix_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
472 void mmu_radix_page_array_startup(long pages);
474 #include "mmu_oea64.h"
477 * Kernel MMU interface
480 static void mmu_radix_bootstrap(vm_offset_t, vm_offset_t);
482 static void mmu_radix_copy_page(vm_page_t, vm_page_t);
483 static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
484 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
485 static void mmu_radix_growkernel(vm_offset_t);
486 static void mmu_radix_init(void);
487 static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
488 static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
489 static void mmu_radix_pinit0(pmap_t);
491 static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t);
492 static void *mmu_radix_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
493 static void mmu_radix_unmapdev(void *, vm_size_t);
494 static void mmu_radix_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma);
495 static int mmu_radix_dev_direct_mapped(vm_paddr_t, vm_size_t);
496 static void mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
497 static void mmu_radix_scan_init(void);
498 static void mmu_radix_cpu_bootstrap(int ap);
499 static void mmu_radix_tlbie_all(void);
501 static struct pmap_funcs mmu_radix_methods = {
502 .bootstrap = mmu_radix_bootstrap,
503 .copy_page = mmu_radix_copy_page,
504 .copy_pages = mmu_radix_copy_pages,
505 .cpu_bootstrap = mmu_radix_cpu_bootstrap,
506 .growkernel = mmu_radix_growkernel,
507 .init = mmu_radix_init,
508 .map = mmu_radix_map,
509 .mincore = mmu_radix_mincore,
510 .pinit = mmu_radix_pinit,
511 .pinit0 = mmu_radix_pinit0,
513 .mapdev = mmu_radix_mapdev,
514 .mapdev_attr = mmu_radix_mapdev_attr,
515 .unmapdev = mmu_radix_unmapdev,
516 .kenter_attr = mmu_radix_kenter_attr,
517 .dev_direct_mapped = mmu_radix_dev_direct_mapped,
518 .dumpsys_pa_init = mmu_radix_scan_init,
519 .dumpsys_map_chunk = mmu_radix_dumpsys_map,
520 .page_is_mapped = mmu_radix_page_is_mapped,
521 .ps_enabled = mmu_radix_ps_enabled,
522 .align_superpage = mmu_radix_align_superpage,
523 .object_init_pt = mmu_radix_object_init_pt,
524 .protect = mmu_radix_protect,
525 /* pmap dispatcher interface */
526 .clear_modify = mmu_radix_clear_modify,
527 .copy = mmu_radix_copy,
528 .enter = mmu_radix_enter,
529 .enter_object = mmu_radix_enter_object,
530 .enter_quick = mmu_radix_enter_quick,
531 .extract = mmu_radix_extract,
532 .extract_and_hold = mmu_radix_extract_and_hold,
533 .is_modified = mmu_radix_is_modified,
534 .is_prefaultable = mmu_radix_is_prefaultable,
535 .is_referenced = mmu_radix_is_referenced,
536 .ts_referenced = mmu_radix_ts_referenced,
537 .page_exists_quick = mmu_radix_page_exists_quick,
538 .page_init = mmu_radix_page_init,
539 .page_wired_mappings = mmu_radix_page_wired_mappings,
540 .qenter = mmu_radix_qenter,
541 .qremove = mmu_radix_qremove,
542 .release = mmu_radix_release,
543 .remove = mmu_radix_remove,
544 .remove_all = mmu_radix_remove_all,
545 .remove_write = mmu_radix_remove_write,
546 .sync_icache = mmu_radix_sync_icache,
547 .unwire = mmu_radix_unwire,
548 .zero_page = mmu_radix_zero_page,
549 .zero_page_area = mmu_radix_zero_page_area,
550 .activate = mmu_radix_activate,
551 .quick_enter_page = mmu_radix_quick_enter_page,
552 .quick_remove_page = mmu_radix_quick_remove_page,
553 .page_set_memattr = mmu_radix_page_set_memattr,
554 .page_array_startup = mmu_radix_page_array_startup,
556 /* Internal interfaces */
557 .kenter = mmu_radix_kenter,
558 .kextract = mmu_radix_kextract,
559 .kremove = mmu_radix_kremove,
560 .change_attr = mmu_radix_change_attr,
561 .decode_kernel_ptr = mmu_radix_decode_kernel_ptr,
563 .tlbie_all = mmu_radix_tlbie_all,
566 MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods);
568 static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
569 struct rwlock **lockp);
570 static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va);
571 static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *);
572 static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
573 struct spglist *free, struct rwlock **lockp);
574 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
575 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
576 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
577 static bool pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *pde,
578 struct spglist *free);
579 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
580 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp);
582 static bool pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e,
583 u_int flags, struct rwlock **lockp);
584 #if VM_NRESERVLEVEL > 0
585 static void pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
586 struct rwlock **lockp);
588 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
589 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
590 static vm_page_t mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
591 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate);
593 static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
594 vm_prot_t prot, struct rwlock **lockp);
595 static int pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde,
596 u_int flags, vm_page_t m, struct rwlock **lockp);
598 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
599 static void free_pv_chunk(struct pv_chunk *pc);
600 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp);
601 static vm_page_t pmap_allocl3e(pmap_t pmap, vm_offset_t va,
602 struct rwlock **lockp);
603 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
604 struct rwlock **lockp);
605 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
606 struct spglist *free);
607 static boolean_t pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free);
609 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t start);
610 static void pmap_invalidate_all(pmap_t pmap);
611 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush);
614 * Internal flags for pmap_enter()'s helper functions.
616 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
617 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
619 #define UNIMPLEMENTED() panic("%s not implemented", __func__)
620 #define UNTESTED() panic("%s not yet tested", __func__)
622 /* Number of supported PID bits */
623 static unsigned int isa3_pid_bits;
625 /* PID to start allocating from */
626 static unsigned int isa3_base_pid;
628 #define PROCTAB_SIZE_SHIFT (isa3_pid_bits + 4)
629 #define PROCTAB_ENTRIES (1ul << isa3_pid_bits)
632 * Map of physical memory regions.
634 static struct mem_region *regions, *pregions;
635 static struct numa_mem_region *numa_pregions;
636 static u_int phys_avail_count;
637 static int regions_sz, pregions_sz, numa_pregions_sz;
638 static struct pate *isa3_parttab;
639 static struct prte *isa3_proctab;
640 static vmem_t *asid_arena;
642 extern void bs_remap_earlyboot(void);
644 #define RADIX_PGD_SIZE_SHIFT 16
645 #define RADIX_PGD_SIZE (1UL << RADIX_PGD_SIZE_SHIFT)
647 #define RADIX_PGD_INDEX_SHIFT (RADIX_PGD_SIZE_SHIFT-3)
648 #define NL2EPG (PAGE_SIZE/sizeof(pml2_entry_t))
649 #define NL3EPG (PAGE_SIZE/sizeof(pml3_entry_t))
651 #define NUPML1E (RADIX_PGD_SIZE/sizeof(uint64_t)) /* number of userland PML1 pages */
652 #define NUPDPE (NUPML1E * NL2EPG)/* number of userland PDP pages */
653 #define NUPDE (NUPDPE * NL3EPG) /* number of userland PD entries */
655 /* POWER9 only permits a 64k partition table size. */
656 #define PARTTAB_SIZE_SHIFT 16
657 #define PARTTAB_SIZE (1UL << PARTTAB_SIZE_SHIFT)
659 #define PARTTAB_HR (1UL << 63) /* host uses radix */
660 #define PARTTAB_GR (1UL << 63) /* guest uses radix must match host */
662 /* TLB flush actions. Used as argument to tlbiel_flush() */
664 TLB_INVAL_SCOPE_LPID = 2, /* invalidate TLBs for current LPID */
665 TLB_INVAL_SCOPE_GLOBAL = 3, /* invalidate all TLBs */
668 #define NPV_LIST_LOCKS MAXCPU
669 static int pmap_initialized;
670 static vm_paddr_t proctab0pa;
671 static vm_paddr_t parttab_phys;
672 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
675 * Data for the pv entry allocation mechanism.
676 * Updates to pv_invl_gen are protected by the pv_list_locks[]
677 * elements, but reads are not.
679 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
680 static struct mtx __exclusive_cache_line pv_chunks_mutex;
681 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
682 static struct md_page *pv_table;
683 static struct md_page pv_dummy;
686 #define PV_STAT(x) do { x ; } while (0)
688 #define PV_STAT(x) do { } while (0)
691 #define pa_radix_index(pa) ((pa) >> L3_PAGE_SIZE_SHIFT)
692 #define pa_to_pvh(pa) (&pv_table[pa_radix_index(pa)])
694 #define PHYS_TO_PV_LIST_LOCK(pa) \
695 (&pv_list_locks[pa_radix_index(pa) % NPV_LIST_LOCKS])
697 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
698 struct rwlock **_lockp = (lockp); \
699 struct rwlock *_new_lock; \
701 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
702 if (_new_lock != *_lockp) { \
703 if (*_lockp != NULL) \
704 rw_wunlock(*_lockp); \
705 *_lockp = _new_lock; \
710 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
711 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
713 #define RELEASE_PV_LIST_LOCK(lockp) do { \
714 struct rwlock **_lockp = (lockp); \
716 if (*_lockp != NULL) { \
717 rw_wunlock(*_lockp); \
722 #define VM_PAGE_TO_PV_LIST_LOCK(m) \
723 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
726 * We support 52 bits, hence:
727 * bits 52 - 31 = 21, 0b10101
728 * RTS encoding details
729 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
730 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
732 #define RTS_SIZE ((0x2UL << 61) | (0x5UL << 5))
734 static int powernv_enabled = 1;
736 static __always_inline void
737 tlbiel_radix_set_isa300(uint32_t set, uint32_t is,
738 uint32_t pid, uint32_t ric, uint32_t prs)
743 rb = PPC_BITLSHIFT_VAL(set, 51) | PPC_BITLSHIFT_VAL(is, 53);
744 rs = PPC_BITLSHIFT_VAL((uint64_t)pid, 31);
746 __asm __volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
747 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
752 tlbiel_flush_isa3(uint32_t num_sets, uint32_t is)
756 __asm __volatile("ptesync": : :"memory");
759 * Flush the first set of the TLB, and the entire Page Walk Cache
760 * and partition table entries. Then flush the remaining sets of the
763 if (is == TLB_INVAL_SCOPE_GLOBAL) {
764 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
765 for (set = 1; set < num_sets; set++)
766 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
769 /* Do the same for process scoped entries. */
770 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
771 for (set = 1; set < num_sets; set++)
772 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
774 __asm __volatile("ptesync": : :"memory");
778 mmu_radix_tlbiel_flush(int scope)
780 MPASS(scope == TLB_INVAL_SCOPE_LPID ||
781 scope == TLB_INVAL_SCOPE_GLOBAL);
783 tlbiel_flush_isa3(POWER9_TLB_SETS_RADIX, scope);
784 __asm __volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
788 mmu_radix_tlbie_all(void)
791 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
793 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
797 mmu_radix_init_amor(void)
800 * In HV mode, we init AMOR (Authority Mask Override Register) so that
801 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
802 * Register), enable key 0 and set it to 1.
804 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
806 mtspr(SPR_AMOR, (3ul << 62));
810 mmu_radix_init_iamr(void)
813 * Radix always uses key0 of the IAMR to determine if an access is
814 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
817 mtspr(SPR_IAMR, (1ul << 62));
821 mmu_radix_pid_set(pmap_t pmap)
824 mtspr(SPR_PID, pmap->pm_pid);
828 /* Quick sort callout for comparing physical addresses. */
830 pa_cmp(const void *a, const void *b)
832 const vm_paddr_t *pa = a, *pb = b;
842 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte)
843 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0)
844 #define pte_store(ptep, pte) do { \
845 MPASS((pte) & (RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_X)); \
846 *(u_long *)(ptep) = htobe64((u_long)((pte) | PG_V | RPTE_LEAF)); \
849 * NB: should only be used for adding directories - not for direct mappings
851 #define pde_store(ptep, pa) do { \
852 *(u_long *)(ptep) = htobe64((u_long)(pa|RPTE_VALID|RPTE_SHIFT)); \
855 #define pte_clear(ptep) do { \
856 *(u_long *)(ptep) = (u_long)(0); \
859 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */
862 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
863 * (PTE) page mappings have identical settings for the following fields:
865 #define PG_PTE_PROMOTE (PG_X | PG_MANAGED | PG_W | PG_PTE_CACHE | \
866 PG_M | PG_A | RPTE_EAA_MASK | PG_V)
869 pmap_resident_count_inc(pmap_t pmap, int count)
872 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
873 pmap->pm_stats.resident_count += count;
877 pmap_resident_count_dec(pmap_t pmap, int count)
880 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
881 KASSERT(pmap->pm_stats.resident_count >= count,
882 ("pmap %p resident count underflow %ld %d", pmap,
883 pmap->pm_stats.resident_count, count));
884 pmap->pm_stats.resident_count -= count;
888 pagezero(vm_offset_t va)
892 bzero((void *)va, PAGE_SIZE);
900 ret = moea64_bootstrap_alloc(n * PAGE_SIZE, PAGE_SIZE);
901 for (int i = 0; i < n; i++)
902 pagezero(PHYS_TO_DMAP(ret + i * PAGE_SIZE));
907 kvtopte(vm_offset_t va)
911 l3e = pmap_pml3e(kernel_pmap, va);
912 if (l3e == NULL || (be64toh(*l3e) & RPTE_VALID) == 0)
914 return (pmap_l3e_to_pte(l3e, va));
918 mmu_radix_kenter(vm_offset_t va, vm_paddr_t pa)
924 *pte = htobe64(pa | RPTE_VALID | RPTE_LEAF | RPTE_EAA_R | \
925 RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A);
929 mmu_radix_ps_enabled(pmap_t pmap)
931 return (superpages_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
935 pmap_nofault_pte(pmap_t pmap, vm_offset_t va, int *is_l3e)
941 l3e = pmap_pml3e(pmap, va);
942 if (l3e == NULL || (be64toh(*l3e) & PG_V) == 0)
945 if (be64toh(*l3e) & RPTE_LEAF) {
951 pte = pmap_l3e_to_pte(l3e, va);
952 if (pte == NULL || (be64toh(*pte) & PG_V) == 0)
958 pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags)
961 pt_entry_t startpte, origpte, newpte;
967 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL)
968 return (KERN_INVALID_ADDRESS);
969 origpte = newpte = be64toh(*pte);
972 if (((flags & VM_PROT_WRITE) && (startpte & PG_M)) ||
973 ((flags & VM_PROT_READ) && (startpte & PG_A))) {
974 pmap_invalidate_all(pmap);
976 if (VERBOSE_PMAP || pmap_logging)
977 printf("%s(%p, %#lx, %#x) (%#lx) -- invalidate all\n",
978 __func__, pmap, va, flags, origpte);
980 return (KERN_FAILURE);
984 if (VERBOSE_PMAP || pmap_logging)
985 printf("%s(%p, %#lx, %#x) (%#lx)\n", __func__, pmap, va,
989 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL ||
990 be64toh(*pte) != origpte) {
992 return (KERN_FAILURE);
994 m = PHYS_TO_VM_PAGE(newpte & PG_FRAME);
998 if ((newpte & (RPTE_EAA_R|RPTE_EAA_X)) == 0)
1001 vm_page_aflag_set(m, PGA_REFERENCED);
1004 if ((newpte & RPTE_EAA_W) == 0)
1011 case VM_PROT_EXECUTE:
1012 if ((newpte & RPTE_EAA_X) == 0)
1015 vm_page_aflag_set(m, PGA_REFERENCED);
1019 if (!atomic_cmpset_long(pte, htobe64(origpte), htobe64(newpte)))
1023 if (startpte == newpte)
1024 return (KERN_FAILURE);
1028 return (KERN_PROTECTION_FAILURE);
1032 * Returns TRUE if the given page is mapped individually or as part of
1033 * a 2mpage. Otherwise, returns FALSE.
1036 mmu_radix_page_is_mapped(vm_page_t m)
1038 struct rwlock *lock;
1041 if ((m->oflags & VPO_UNMANAGED) != 0)
1043 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
1045 rv = !TAILQ_EMPTY(&m->md.pv_list) ||
1046 ((m->flags & PG_FICTITIOUS) == 0 &&
1047 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
1053 * Determine the appropriate bits to set in a PTE or PDE for a specified
1057 pmap_cache_bits(vm_memattr_t ma)
1059 if (ma != VM_MEMATTR_DEFAULT) {
1061 case VM_MEMATTR_UNCACHEABLE:
1062 return (RPTE_ATTR_GUARDEDIO);
1063 case VM_MEMATTR_CACHEABLE:
1064 return (RPTE_ATTR_MEM);
1065 case VM_MEMATTR_WRITE_BACK:
1066 case VM_MEMATTR_PREFETCHABLE:
1067 case VM_MEMATTR_WRITE_COMBINING:
1068 return (RPTE_ATTR_UNGUARDEDIO);
1075 pmap_invalidate_page(pmap_t pmap, vm_offset_t start)
1078 if (pmap == kernel_pmap)
1079 radix_tlbie_invlpg_kernel_4k(start);
1081 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1086 pmap_invalidate_page_2m(pmap_t pmap, vm_offset_t start)
1089 if (pmap == kernel_pmap)
1090 radix_tlbie_invlpg_kernel_2m(start);
1092 radix_tlbie_invlpg_user_2m(pmap->pm_pid, start);
1097 pmap_invalidate_pwc(pmap_t pmap)
1100 if (pmap == kernel_pmap)
1101 radix_tlbie_invlpwc_kernel();
1103 radix_tlbie_invlpwc_user(pmap->pm_pid);
1108 pmap_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end)
1110 if (((start - end) >> PAGE_SHIFT) > 8) {
1111 pmap_invalidate_all(pmap);
1115 if (pmap == kernel_pmap) {
1116 while (start < end) {
1117 radix_tlbie_invlpg_kernel_4k(start);
1121 while (start < end) {
1122 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1130 pmap_invalidate_all(pmap_t pmap)
1133 if (pmap == kernel_pmap)
1134 radix_tlbie_flush_kernel();
1136 radix_tlbie_flush_user(pmap->pm_pid);
1141 pmap_invalidate_l3e_page(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e)
1145 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
1146 * by a promotion that did not invalidate the 512 4KB page mappings
1147 * that might exist in the TLB. Consequently, at this point, the TLB
1148 * may hold both 4KB and 2MB page mappings for the address range [va,
1149 * va + L3_PAGE_SIZE). Therefore, the entire range must be invalidated here.
1150 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
1151 * 4KB page mappings for the address range [va, va + L3_PAGE_SIZE), and so a
1152 * single INVLPG suffices to invalidate the 2MB page mapping from the
1156 if ((l3e & PG_PROMOTED) != 0)
1157 pmap_invalidate_range(pmap, va, va + L3_PAGE_SIZE - 1);
1159 pmap_invalidate_page_2m(pmap, va);
1161 pmap_invalidate_pwc(pmap);
1164 static __inline struct pv_chunk *
1165 pv_to_chunk(pv_entry_t pv)
1168 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1171 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1173 #define PC_FREE0 0xfffffffffffffffful
1174 #define PC_FREE1 ((1ul << (_NPCPV % 64)) - 1)
1176 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1 };
1179 * Ensure that the number of spare PV entries in the specified pmap meets or
1180 * exceeds the given count, "needed".
1182 * The given PV list lock may be released.
1185 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
1187 struct pch new_tail;
1188 struct pv_chunk *pc;
1193 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1194 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
1197 * Newly allocated PV chunks must be stored in a private list until
1198 * the required number of PV chunks have been allocated. Otherwise,
1199 * reclaim_pv_chunk() could recycle one of these chunks. In
1200 * contrast, these chunks must be added to the pmap upon allocation.
1202 TAILQ_INIT(&new_tail);
1205 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
1206 // if ((cpu_feature2 & CPUID2_POPCNT) == 0)
1207 bit_count((bitstr_t *)pc->pc_map, 0,
1208 sizeof(pc->pc_map) * NBBY, &free);
1210 free = popcnt_pc_map_pq(pc->pc_map);
1215 if (avail >= needed)
1218 for (reclaimed = false; avail < needed; avail += _NPCPV) {
1219 m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
1221 m = reclaim_pv_chunk(pmap, lockp);
1226 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1227 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1228 dump_add_page(m->phys_addr);
1229 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1231 pc->pc_map[0] = PC_FREE0;
1232 pc->pc_map[1] = PC_FREE1;
1233 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1234 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
1235 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
1238 * The reclaim might have freed a chunk from the current pmap.
1239 * If that chunk contained available entries, we need to
1240 * re-count the number of available entries.
1245 if (!TAILQ_EMPTY(&new_tail)) {
1246 mtx_lock(&pv_chunks_mutex);
1247 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
1248 mtx_unlock(&pv_chunks_mutex);
1253 * First find and then remove the pv entry for the specified pmap and virtual
1254 * address from the specified pv list. Returns the pv entry if found and NULL
1255 * otherwise. This operation can be performed on pv lists for either 4KB or
1256 * 2MB page mappings.
1258 static __inline pv_entry_t
1259 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1263 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
1265 if (PV_PMAP(pv) == NULL) {
1266 printf("corrupted pv_chunk/pv %p\n", pv);
1267 printf("pv_chunk: %64D\n", pv_to_chunk(pv), ":");
1269 MPASS(PV_PMAP(pv) != NULL);
1270 MPASS(pv->pv_va != 0);
1272 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1273 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
1282 * After demotion from a 2MB page mapping to 512 4KB page mappings,
1283 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
1284 * entries for each of the 4KB page mappings.
1287 pmap_pv_demote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1288 struct rwlock **lockp)
1290 struct md_page *pvh;
1291 struct pv_chunk *pc;
1293 vm_offset_t va_last;
1297 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1298 KASSERT((pa & L3_PAGE_MASK) == 0,
1299 ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
1300 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1303 * Transfer the 2mpage's pv entry for this mapping to the first
1304 * page's pv list. Once this transfer begins, the pv list lock
1305 * must not be released until the last pv entry is reinstantiated.
1307 pvh = pa_to_pvh(pa);
1308 va = trunc_2mpage(va);
1309 pv = pmap_pvh_remove(pvh, pmap, va);
1310 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
1311 m = PHYS_TO_VM_PAGE(pa);
1312 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1315 /* Instantiate the remaining NPTEPG - 1 pv entries. */
1316 PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
1317 va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1319 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1320 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0
1321 , ("pmap_pv_demote_pde: missing spare"));
1322 for (field = 0; field < _NPCM; field++) {
1323 while (pc->pc_map[field]) {
1324 bit = cnttzd(pc->pc_map[field]);
1325 pc->pc_map[field] &= ~(1ul << bit);
1326 pv = &pc->pc_pventry[field * 64 + bit];
1330 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1331 ("pmap_pv_demote_pde: page %p is not managed", m));
1332 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1339 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1340 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1343 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1344 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1345 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1347 PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
1348 PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
1352 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap)
1357 pmap_invalidate_all(pmap);
1358 if (pmap != locked_pmap)
1363 * We are in a serious low memory condition. Resort to
1364 * drastic measures to free some pages so we can allocate
1365 * another pv entry chunk.
1367 * Returns NULL if PV entries were reclaimed from the specified pmap.
1369 * We do not, however, unmap 2mpages because subsequent accesses will
1370 * allocate per-page pv entries until repromotion occurs, thereby
1371 * exacerbating the shortage of free pv entries.
1373 static int active_reclaims = 0;
1375 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1377 struct pv_chunk *pc, *pc_marker, *pc_marker_end;
1378 struct pv_chunk_header pc_marker_b, pc_marker_end_b;
1379 struct md_page *pvh;
1381 pmap_t next_pmap, pmap;
1382 pt_entry_t *pte, tpte;
1386 struct spglist free;
1388 int bit, field, freed;
1390 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1391 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
1395 bzero(&pc_marker_b, sizeof(pc_marker_b));
1396 bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
1397 pc_marker = (struct pv_chunk *)&pc_marker_b;
1398 pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
1400 mtx_lock(&pv_chunks_mutex);
1402 TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
1403 TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
1404 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
1405 SLIST_EMPTY(&free)) {
1406 next_pmap = pc->pc_pmap;
1407 if (next_pmap == NULL) {
1409 * The next chunk is a marker. However, it is
1410 * not our marker, so active_reclaims must be
1411 * > 1. Consequently, the next_chunk code
1412 * will not rotate the pv_chunks list.
1416 mtx_unlock(&pv_chunks_mutex);
1419 * A pv_chunk can only be removed from the pc_lru list
1420 * when both pc_chunks_mutex is owned and the
1421 * corresponding pmap is locked.
1423 if (pmap != next_pmap) {
1424 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1426 /* Avoid deadlock and lock recursion. */
1427 if (pmap > locked_pmap) {
1428 RELEASE_PV_LIST_LOCK(lockp);
1430 mtx_lock(&pv_chunks_mutex);
1432 } else if (pmap != locked_pmap) {
1433 if (PMAP_TRYLOCK(pmap)) {
1434 mtx_lock(&pv_chunks_mutex);
1437 pmap = NULL; /* pmap is not locked */
1438 mtx_lock(&pv_chunks_mutex);
1439 pc = TAILQ_NEXT(pc_marker, pc_lru);
1441 pc->pc_pmap != next_pmap)
1449 * Destroy every non-wired, 4 KB page mapping in the chunk.
1452 for (field = 0; field < _NPCM; field++) {
1453 for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1454 inuse != 0; inuse &= ~(1UL << bit)) {
1455 bit = cnttzd(inuse);
1456 pv = &pc->pc_pventry[field * 64 + bit];
1458 l3e = pmap_pml3e(pmap, va);
1459 if ((be64toh(*l3e) & RPTE_LEAF) != 0)
1461 pte = pmap_l3e_to_pte(l3e, va);
1462 if ((be64toh(*pte) & PG_W) != 0)
1464 tpte = be64toh(pte_load_clear(pte));
1465 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
1466 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
1468 if ((tpte & PG_A) != 0)
1469 vm_page_aflag_set(m, PGA_REFERENCED);
1470 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1471 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
1474 if (TAILQ_EMPTY(&m->md.pv_list) &&
1475 (m->flags & PG_FICTITIOUS) == 0) {
1476 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
1477 if (TAILQ_EMPTY(&pvh->pv_list)) {
1478 vm_page_aflag_clear(m,
1482 pc->pc_map[field] |= 1UL << bit;
1483 pmap_unuse_pt(pmap, va, be64toh(*l3e), &free);
1488 mtx_lock(&pv_chunks_mutex);
1491 /* Every freed mapping is for a 4 KB page. */
1492 pmap_resident_count_dec(pmap, freed);
1493 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
1494 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
1495 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
1496 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1497 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1) {
1498 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1499 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1500 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1501 /* Entire chunk is free; return it. */
1502 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1503 dump_drop_page(m_pc->phys_addr);
1504 mtx_lock(&pv_chunks_mutex);
1505 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1508 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1509 mtx_lock(&pv_chunks_mutex);
1510 /* One freed pv entry in locked_pmap is sufficient. */
1511 if (pmap == locked_pmap)
1514 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1515 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
1516 if (active_reclaims == 1 && pmap != NULL) {
1518 * Rotate the pv chunks list so that we do not
1519 * scan the same pv chunks that could not be
1520 * freed (because they contained a wired
1521 * and/or superpage mapping) on every
1522 * invocation of reclaim_pv_chunk().
1524 while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
1525 MPASS(pc->pc_pmap != NULL);
1526 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1527 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1531 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1532 TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
1534 mtx_unlock(&pv_chunks_mutex);
1535 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1536 if (m_pc == NULL && !SLIST_EMPTY(&free)) {
1537 m_pc = SLIST_FIRST(&free);
1538 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
1539 /* Recycle a freed page table page. */
1540 m_pc->ref_count = 1;
1542 vm_page_free_pages_toq(&free, true);
1547 * free the pv_entry back to the free list
1550 free_pv_entry(pmap_t pmap, pv_entry_t pv)
1552 struct pv_chunk *pc;
1553 int idx, field, bit;
1556 if (pmap != kernel_pmap)
1557 printf("%s(%p, %p)\n", __func__, pmap, pv);
1559 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1560 PV_STAT(atomic_add_long(&pv_entry_frees, 1));
1561 PV_STAT(atomic_add_int(&pv_entry_spare, 1));
1562 PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
1563 pc = pv_to_chunk(pv);
1564 idx = pv - &pc->pc_pventry[0];
1567 pc->pc_map[field] |= 1ul << bit;
1568 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1) {
1569 /* 98% of the time, pc is already at the head of the list. */
1570 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1571 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1572 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1576 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1581 free_pv_chunk(struct pv_chunk *pc)
1585 mtx_lock(&pv_chunks_mutex);
1586 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1587 mtx_unlock(&pv_chunks_mutex);
1588 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1589 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1590 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1591 /* entire chunk is free, return it */
1592 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1593 dump_drop_page(m->phys_addr);
1594 vm_page_unwire_noq(m);
1599 * Returns a new PV entry, allocating a new PV chunk from the system when
1600 * needed. If this PV chunk allocation fails and a PV list lock pointer was
1601 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
1604 * The given PV list lock may be released.
1607 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
1611 struct pv_chunk *pc;
1614 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1615 PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
1617 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1619 for (field = 0; field < _NPCM; field++) {
1620 if (pc->pc_map[field]) {
1621 bit = cnttzd(pc->pc_map[field]);
1625 if (field < _NPCM) {
1626 pv = &pc->pc_pventry[field * 64 + bit];
1627 pc->pc_map[field] &= ~(1ul << bit);
1628 /* If this was the last item, move it to tail */
1629 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1630 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1631 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1634 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1635 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
1636 MPASS(PV_PMAP(pv) != NULL);
1640 /* No free items, allocate another chunk */
1641 m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
1643 if (lockp == NULL) {
1644 PV_STAT(pc_chunk_tryfail++);
1647 m = reclaim_pv_chunk(pmap, lockp);
1651 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1652 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1653 dump_add_page(m->phys_addr);
1654 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1656 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
1657 pc->pc_map[1] = PC_FREE1;
1658 mtx_lock(&pv_chunks_mutex);
1659 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1660 mtx_unlock(&pv_chunks_mutex);
1661 pv = &pc->pc_pventry[0];
1662 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1663 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1664 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1665 MPASS(PV_PMAP(pv) != NULL);
1669 #if VM_NRESERVLEVEL > 0
1671 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
1672 * replace the many pv entries for the 4KB page mappings by a single pv entry
1673 * for the 2MB page mapping.
1676 pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1677 struct rwlock **lockp)
1679 struct md_page *pvh;
1681 vm_offset_t va_last;
1684 KASSERT((pa & L3_PAGE_MASK) == 0,
1685 ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
1686 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1689 * Transfer the first page's pv entry for this mapping to the 2mpage's
1690 * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
1691 * a transfer avoids the possibility that get_pv_entry() calls
1692 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
1693 * mappings that is being promoted.
1695 m = PHYS_TO_VM_PAGE(pa);
1696 va = trunc_2mpage(va);
1697 pv = pmap_pvh_remove(&m->md, pmap, va);
1698 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
1699 pvh = pa_to_pvh(pa);
1700 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
1702 /* Free the remaining NPTEPG - 1 pv entries. */
1703 va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1707 pmap_pvh_free(&m->md, pmap, va);
1708 } while (va < va_last);
1710 #endif /* VM_NRESERVLEVEL > 0 */
1713 * First find and then destroy the pv entry for the specified pmap and virtual
1714 * address. This operation can be performed on pv lists for either 4KB or 2MB
1718 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1722 pv = pmap_pvh_remove(pvh, pmap, va);
1723 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
1724 free_pv_entry(pmap, pv);
1728 * Conditionally create the PV entry for a 4KB page mapping if the required
1729 * memory can be allocated without resorting to reclamation.
1732 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
1733 struct rwlock **lockp)
1737 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1738 /* Pass NULL instead of the lock pointer to disable reclamation. */
1739 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
1741 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1742 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1749 vm_paddr_t phys_avail_debug[2 * VM_PHYSSEG_MAX];
1752 validate_addr(vm_paddr_t addr, vm_size_t size)
1754 vm_paddr_t end = addr + size;
1757 for (int i = 0; i < 2 * phys_avail_count; i += 2) {
1758 if (addr >= phys_avail_debug[i] &&
1759 end <= phys_avail_debug[i + 1]) {
1764 KASSERT(found, ("%#lx-%#lx outside of initial phys_avail array",
1768 static void validate_addr(vm_paddr_t addr, vm_size_t size) {}
1770 #define DMAP_PAGE_BITS (RPTE_VALID | RPTE_LEAF | RPTE_EAA_MASK | PG_M | PG_A)
1777 page = allocpages(1);
1778 pagezero(PHYS_TO_DMAP(page));
1783 mmu_radix_dmap_range(vm_paddr_t start, vm_paddr_t end)
1785 pt_entry_t *pte, pteval;
1789 printf("%s %lx -> %lx\n", __func__, start, end);
1790 while (start < end) {
1791 pteval = start | DMAP_PAGE_BITS;
1792 pte = pmap_pml1e(kernel_pmap, PHYS_TO_DMAP(start));
1793 if ((be64toh(*pte) & RPTE_VALID) == 0) {
1794 page = alloc_pt_page();
1795 pde_store(pte, page);
1797 pte = pmap_l1e_to_l2e(pte, PHYS_TO_DMAP(start));
1798 if ((start & L2_PAGE_MASK) == 0 &&
1799 end - start >= L2_PAGE_SIZE) {
1800 start += L2_PAGE_SIZE;
1802 } else if ((be64toh(*pte) & RPTE_VALID) == 0) {
1803 page = alloc_pt_page();
1804 pde_store(pte, page);
1807 pte = pmap_l2e_to_l3e(pte, PHYS_TO_DMAP(start));
1808 if ((start & L3_PAGE_MASK) == 0 &&
1809 end - start >= L3_PAGE_SIZE) {
1810 start += L3_PAGE_SIZE;
1812 } else if ((be64toh(*pte) & RPTE_VALID) == 0) {
1813 page = alloc_pt_page();
1814 pde_store(pte, page);
1816 pte = pmap_l3e_to_pte(pte, PHYS_TO_DMAP(start));
1819 pte_store(pte, pteval);
1824 mmu_radix_dmap_populate(vm_size_t hwphyssz)
1826 vm_paddr_t start, end;
1828 for (int i = 0; i < pregions_sz; i++) {
1829 start = pregions[i].mr_start;
1830 end = start + pregions[i].mr_size;
1831 if (hwphyssz && start >= hwphyssz)
1833 if (hwphyssz && hwphyssz < end)
1835 mmu_radix_dmap_range(start, end);
1840 mmu_radix_setup_pagetables(vm_size_t hwphyssz)
1842 vm_paddr_t ptpages, pages;
1846 bzero(kernel_pmap, sizeof(struct pmap));
1847 PMAP_LOCK_INIT(kernel_pmap);
1849 ptpages = allocpages(3);
1850 l1phys = moea64_bootstrap_alloc(RADIX_PGD_SIZE, RADIX_PGD_SIZE);
1851 validate_addr(l1phys, RADIX_PGD_SIZE);
1853 printf("l1phys=%lx\n", l1phys);
1854 MPASS((l1phys & (RADIX_PGD_SIZE-1)) == 0);
1855 for (int i = 0; i < RADIX_PGD_SIZE/PAGE_SIZE; i++)
1856 pagezero(PHYS_TO_DMAP(l1phys + i * PAGE_SIZE));
1857 kernel_pmap->pm_pml1 = (pml1_entry_t *)PHYS_TO_DMAP(l1phys);
1859 mmu_radix_dmap_populate(hwphyssz);
1862 * Create page tables for first 128MB of KVA
1865 pte = pmap_pml1e(kernel_pmap, VM_MIN_KERNEL_ADDRESS);
1866 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1868 pte = pmap_l1e_to_l2e(pte, VM_MIN_KERNEL_ADDRESS);
1869 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1871 pte = pmap_l2e_to_l3e(pte, VM_MIN_KERNEL_ADDRESS);
1873 * the kernel page table pages need to be preserved in
1874 * phys_avail and not overlap with previous allocations
1876 pages = allocpages(nkpt);
1878 printf("phys_avail after dmap populate and nkpt allocation\n");
1879 for (int j = 0; j < 2 * phys_avail_count; j+=2)
1880 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
1881 j, phys_avail[j], j + 1, phys_avail[j + 1]);
1884 for (int i = 0; i < nkpt; i++, pte++, pages += PAGE_SIZE)
1885 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1886 kernel_vm_end = VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE;
1888 printf("kernel_pmap pml1 %p\n", kernel_pmap->pm_pml1);
1890 * Add a physical memory segment (vm_phys_seg) corresponding to the
1891 * preallocated kernel page table pages so that vm_page structures
1892 * representing these pages will be created. The vm_page structures
1893 * are required for promotion of the corresponding kernel virtual
1894 * addresses to superpage mappings.
1896 vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1900 mmu_radix_early_bootstrap(vm_offset_t start, vm_offset_t end)
1902 vm_paddr_t kpstart, kpend;
1903 vm_size_t physsz, hwphyssz;
1905 int rm_pavail, proctab_size;
1908 kpstart = start & ~DMAP_BASE_ADDRESS;
1909 kpend = end & ~DMAP_BASE_ADDRESS;
1911 /* Get physical memory regions from firmware */
1912 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
1913 CTR0(KTR_PMAP, "mmu_radix_early_bootstrap: physical memory");
1915 if (2 * VM_PHYSSEG_MAX < regions_sz)
1916 panic("mmu_radix_early_bootstrap: phys_avail too small");
1919 for (int i = 0; i < regions_sz; i++)
1920 printf("regions[%d].mr_start=%lx regions[%d].mr_size=%lx\n",
1921 i, regions[i].mr_start, i, regions[i].mr_size);
1923 * XXX workaround a simulator bug
1925 for (int i = 0; i < regions_sz; i++)
1926 if (regions[i].mr_start & PAGE_MASK) {
1927 regions[i].mr_start += PAGE_MASK;
1928 regions[i].mr_start &= ~PAGE_MASK;
1929 regions[i].mr_size &= ~PAGE_MASK;
1932 for (int i = 0; i < pregions_sz; i++)
1933 printf("pregions[%d].mr_start=%lx pregions[%d].mr_size=%lx\n",
1934 i, pregions[i].mr_start, i, pregions[i].mr_size);
1936 phys_avail_count = 0;
1939 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1940 for (i = 0, j = 0; i < regions_sz; i++) {
1942 printf("regions[%d].mr_start=%016lx regions[%d].mr_size=%016lx\n",
1943 i, regions[i].mr_start, i, regions[i].mr_size);
1945 if (regions[i].mr_size < PAGE_SIZE)
1948 if (hwphyssz != 0 &&
1949 (physsz + regions[i].mr_size) >= hwphyssz) {
1950 if (physsz < hwphyssz) {
1951 phys_avail[j] = regions[i].mr_start;
1952 phys_avail[j + 1] = regions[i].mr_start +
1953 (hwphyssz - physsz);
1956 dump_avail[j] = phys_avail[j];
1957 dump_avail[j + 1] = phys_avail[j + 1];
1961 phys_avail[j] = regions[i].mr_start;
1962 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
1963 dump_avail[j] = phys_avail[j];
1964 dump_avail[j + 1] = phys_avail[j + 1];
1967 physsz += regions[i].mr_size;
1971 /* Check for overlap with the kernel and exception vectors */
1973 for (j = 0; j < 2 * phys_avail_count; j+=2) {
1974 if (phys_avail[j] < EXC_LAST)
1975 phys_avail[j] += EXC_LAST;
1977 if (phys_avail[j] >= kpstart &&
1978 phys_avail[j + 1] <= kpend) {
1979 phys_avail[j] = phys_avail[j + 1] = ~0;
1984 if (kpstart >= phys_avail[j] &&
1985 kpstart < phys_avail[j + 1]) {
1986 if (kpend < phys_avail[j + 1]) {
1987 phys_avail[2 * phys_avail_count] =
1988 (kpend & ~PAGE_MASK) + PAGE_SIZE;
1989 phys_avail[2 * phys_avail_count + 1] =
1994 phys_avail[j + 1] = kpstart & ~PAGE_MASK;
1997 if (kpend >= phys_avail[j] &&
1998 kpend < phys_avail[j + 1]) {
1999 if (kpstart > phys_avail[j]) {
2000 phys_avail[2 * phys_avail_count] = phys_avail[j];
2001 phys_avail[2 * phys_avail_count + 1] =
2002 kpstart & ~PAGE_MASK;
2006 phys_avail[j] = (kpend & ~PAGE_MASK) +
2010 qsort(phys_avail, 2 * phys_avail_count, sizeof(phys_avail[0]), pa_cmp);
2011 for (i = 0; i < 2 * phys_avail_count; i++)
2012 phys_avail_debug[i] = phys_avail[i];
2014 /* Remove physical available regions marked for removal (~0) */
2016 phys_avail_count -= rm_pavail;
2017 for (i = 2 * phys_avail_count;
2018 i < 2*(phys_avail_count + rm_pavail); i+=2)
2019 phys_avail[i] = phys_avail[i + 1] = 0;
2022 printf("phys_avail ranges after filtering:\n");
2023 for (j = 0; j < 2 * phys_avail_count; j+=2)
2024 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
2025 j, phys_avail[j], j + 1, phys_avail[j + 1]);
2027 physmem = btoc(physsz);
2029 /* XXX assume we're running non-virtualized and
2030 * we don't support BHYVE
2032 if (isa3_pid_bits == 0)
2034 if (powernv_enabled) {
2036 moea64_bootstrap_alloc(PARTTAB_SIZE, PARTTAB_SIZE);
2037 validate_addr(parttab_phys, PARTTAB_SIZE);
2038 for (int i = 0; i < PARTTAB_SIZE/PAGE_SIZE; i++)
2039 pagezero(PHYS_TO_DMAP(parttab_phys + i * PAGE_SIZE));
2042 proctab_size = 1UL << PROCTAB_SIZE_SHIFT;
2043 proctab0pa = moea64_bootstrap_alloc(proctab_size, proctab_size);
2044 validate_addr(proctab0pa, proctab_size);
2045 for (int i = 0; i < proctab_size/PAGE_SIZE; i++)
2046 pagezero(PHYS_TO_DMAP(proctab0pa + i * PAGE_SIZE));
2048 mmu_radix_setup_pagetables(hwphyssz);
2052 mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end)
2060 * Set up the Open Firmware pmap and add its mappings if not in real
2064 printf("%s enter\n", __func__);
2067 * Calculate the last available physical address, and reserve the
2068 * vm_page_array (upper bound).
2071 for (i = 0; phys_avail[i + 1] != 0; i += 2)
2072 Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1]));
2075 * Remap any early IO mappings (console framebuffer, etc.)
2077 bs_remap_earlyboot();
2080 * Allocate a kernel stack with a guard page for thread0 and map it
2081 * into the kernel page map.
2083 pa = allocpages(kstack_pages);
2084 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
2085 virtual_avail = va + kstack_pages * PAGE_SIZE;
2086 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
2087 thread0.td_kstack = va;
2088 for (i = 0; i < kstack_pages; i++) {
2089 mmu_radix_kenter(va, pa);
2093 thread0.td_kstack_pages = kstack_pages;
2096 * Allocate virtual address space for the message buffer.
2098 pa = msgbuf_phys = allocpages((msgbufsize + PAGE_MASK) >> PAGE_SHIFT);
2099 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(pa);
2102 * Allocate virtual address space for the dynamic percpu area.
2104 pa = allocpages(DPCPU_SIZE >> PAGE_SHIFT);
2105 dpcpu = (void *)PHYS_TO_DMAP(pa);
2106 dpcpu_init(dpcpu, curcpu);
2108 crashdumpmap = (caddr_t)virtual_avail;
2109 virtual_avail += MAXDUMPPGS * PAGE_SIZE;
2112 * Reserve some special page table entries/VA space for temporary
2118 mmu_parttab_init(void)
2122 isa3_parttab = (struct pate *)PHYS_TO_DMAP(parttab_phys);
2125 printf("%s parttab: %p\n", __func__, isa3_parttab);
2126 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2128 printf("setting ptcr %lx\n", ptcr);
2129 mtspr(SPR_PTCR, ptcr);
2133 mmu_parttab_update(uint64_t lpid, uint64_t pagetab, uint64_t proctab)
2138 printf("%s isa3_parttab %p lpid %lx pagetab %lx proctab %lx\n", __func__, isa3_parttab,
2139 lpid, pagetab, proctab);
2140 prev = be64toh(isa3_parttab[lpid].pagetab);
2141 isa3_parttab[lpid].pagetab = htobe64(pagetab);
2142 isa3_parttab[lpid].proctab = htobe64(proctab);
2144 if (prev & PARTTAB_HR) {
2145 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
2146 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2147 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2148 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2150 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
2151 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2157 mmu_radix_parttab_init(void)
2162 pagetab = RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) | \
2163 RADIX_PGD_INDEX_SHIFT | PARTTAB_HR;
2164 mmu_parttab_update(0, pagetab, 0);
2168 mmu_radix_proctab_register(vm_paddr_t proctabpa, uint64_t table_size)
2170 uint64_t pagetab, proctab;
2172 pagetab = be64toh(isa3_parttab[0].pagetab);
2173 proctab = proctabpa | table_size | PARTTAB_GR;
2174 mmu_parttab_update(0, pagetab, proctab);
2178 mmu_radix_proctab_init(void)
2183 isa3_proctab = (void*)PHYS_TO_DMAP(proctab0pa);
2184 isa3_proctab->proctab0 =
2185 htobe64(RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) |
2186 RADIX_PGD_INDEX_SHIFT);
2188 if (powernv_enabled) {
2189 mmu_radix_proctab_register(proctab0pa, PROCTAB_SIZE_SHIFT - 12);
2190 __asm __volatile("ptesync" : : : "memory");
2191 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2192 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
2193 __asm __volatile("eieio; tlbsync; ptesync" : : : "memory");
2198 rc = phyp_hcall(H_REGISTER_PROC_TBL,
2199 PROC_TABLE_NEW | PROC_TABLE_RADIX | PROC_TABLE_GTSE,
2200 proctab0pa, 0, PROCTAB_SIZE_SHIFT - 12);
2201 if (rc != H_SUCCESS)
2202 panic("mmu_radix_proctab_init: "
2203 "failed to register process table: rc=%jd",
2209 printf("process table %p and kernel radix PDE: %p\n",
2210 isa3_proctab, kernel_pmap->pm_pml1);
2211 mtmsr(mfmsr() | PSL_DR );
2212 mtmsr(mfmsr() & ~PSL_DR);
2213 kernel_pmap->pm_pid = isa3_base_pid;
2218 mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2221 struct rwlock *lock;
2224 pml3_entry_t oldl3e, *l3e;
2226 vm_offset_t va, va_next;
2230 if (advice != MADV_DONTNEED && advice != MADV_FREE)
2234 for (; sva < eva; sva = va_next) {
2235 l1e = pmap_pml1e(pmap, sva);
2236 if ((be64toh(*l1e) & PG_V) == 0) {
2237 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2242 l2e = pmap_l1e_to_l2e(l1e, sva);
2243 if ((be64toh(*l2e) & PG_V) == 0) {
2244 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2249 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2252 l3e = pmap_l2e_to_l3e(l2e, sva);
2253 oldl3e = be64toh(*l3e);
2254 if ((oldl3e & PG_V) == 0)
2256 else if ((oldl3e & RPTE_LEAF) != 0) {
2257 if ((oldl3e & PG_MANAGED) == 0)
2260 if (!pmap_demote_l3e_locked(pmap, l3e, sva, &lock)) {
2265 * The large page mapping was destroyed.
2271 * Unless the page mappings are wired, remove the
2272 * mapping to a single page so that a subsequent
2273 * access may repromote. Choosing the last page
2274 * within the address range [sva, min(va_next, eva))
2275 * generally results in more repromotions. Since the
2276 * underlying page table page is fully populated, this
2277 * removal never frees a page table page.
2279 if ((oldl3e & PG_W) == 0) {
2285 ("mmu_radix_advise: no address gap"));
2286 pte = pmap_l3e_to_pte(l3e, va);
2287 KASSERT((be64toh(*pte) & PG_V) != 0,
2288 ("pmap_advise: invalid PTE"));
2289 pmap_remove_pte(pmap, pte, va, be64toh(*l3e), NULL,
2299 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next;
2300 pte++, sva += PAGE_SIZE) {
2301 MPASS(pte == pmap_pte(pmap, sva));
2303 if ((be64toh(*pte) & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
2305 else if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2306 if (advice == MADV_DONTNEED) {
2308 * Future calls to pmap_is_modified()
2309 * can be avoided by making the page
2312 m = PHYS_TO_VM_PAGE(be64toh(*pte) & PG_FRAME);
2315 atomic_clear_long(pte, htobe64(PG_M | PG_A));
2316 } else if ((be64toh(*pte) & PG_A) != 0)
2317 atomic_clear_long(pte, htobe64(PG_A));
2323 if (va != va_next) {
2332 pmap_invalidate_all(pmap);
2337 * Routines used in machine-dependent code
2340 mmu_radix_bootstrap(vm_offset_t start, vm_offset_t end)
2345 printf("%s\n", __func__);
2347 powernv_enabled = (mfmsr() & PSL_HV) ? 1 : 0;
2348 mmu_radix_early_bootstrap(start, end);
2350 printf("early bootstrap complete\n");
2351 if (powernv_enabled) {
2352 lpcr = mfspr(SPR_LPCR);
2353 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2354 mmu_radix_parttab_init();
2355 mmu_radix_init_amor();
2357 printf("powernv init complete\n");
2359 mmu_radix_init_iamr();
2360 mmu_radix_proctab_init();
2361 mmu_radix_pid_set(kernel_pmap);
2362 if (powernv_enabled)
2363 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2365 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
2367 mmu_radix_late_bootstrap(start, end);
2368 numa_mem_regions(&numa_pregions, &numa_pregions_sz);
2370 printf("%s done\n", __func__);
2371 pmap_bootstrapped = 1;
2372 dmaplimit = roundup2(powerpc_ptob(Maxmem), L2_PAGE_SIZE);
2373 PCPU_SET(flags, PCPU_GET(flags) | PC_FLAG_NOSRS);
2377 mmu_radix_cpu_bootstrap(int ap)
2382 if (powernv_enabled) {
2383 lpcr = mfspr(SPR_LPCR);
2384 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2386 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2387 mtspr(SPR_PTCR, ptcr);
2388 mmu_radix_init_amor();
2390 mmu_radix_init_iamr();
2391 mmu_radix_pid_set(kernel_pmap);
2392 if (powernv_enabled)
2393 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2395 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
2398 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l3e, CTLFLAG_RD, 0,
2399 "2MB page mapping counters");
2401 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_demotions);
2402 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, demotions, CTLFLAG_RD,
2403 &pmap_l3e_demotions, "2MB page demotions");
2405 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_mappings);
2406 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, mappings, CTLFLAG_RD,
2407 &pmap_l3e_mappings, "2MB page mappings");
2409 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_p_failures);
2410 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, p_failures, CTLFLAG_RD,
2411 &pmap_l3e_p_failures, "2MB page promotion failures");
2413 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_promotions);
2414 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, promotions, CTLFLAG_RD,
2415 &pmap_l3e_promotions, "2MB page promotions");
2417 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2e, CTLFLAG_RD, 0,
2418 "1GB page mapping counters");
2420 static COUNTER_U64_DEFINE_EARLY(pmap_l2e_demotions);
2421 SYSCTL_COUNTER_U64(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD,
2422 &pmap_l2e_demotions, "1GB page demotions");
2425 mmu_radix_clear_modify(vm_page_t m)
2427 struct md_page *pvh;
2429 pv_entry_t next_pv, pv;
2430 pml3_entry_t oldl3e, *l3e;
2431 pt_entry_t oldpte, *pte;
2432 struct rwlock *lock;
2434 int md_gen, pvh_gen;
2436 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2437 ("pmap_clear_modify: page %p is not managed", m));
2438 vm_page_assert_busied(m);
2439 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
2442 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
2443 * If the object containing the page is locked and the page is not
2444 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
2446 if ((m->a.flags & PGA_WRITEABLE) == 0)
2448 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2449 pa_to_pvh(VM_PAGE_TO_PHYS(m));
2450 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2453 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
2455 if (!PMAP_TRYLOCK(pmap)) {
2456 pvh_gen = pvh->pv_gen;
2460 if (pvh_gen != pvh->pv_gen) {
2466 l3e = pmap_pml3e(pmap, va);
2467 oldl3e = be64toh(*l3e);
2468 if ((oldl3e & PG_RW) != 0 &&
2469 pmap_demote_l3e_locked(pmap, l3e, va, &lock) &&
2470 (oldl3e & PG_W) == 0) {
2472 * Write protect the mapping to a
2473 * single page so that a subsequent
2474 * write access may repromote.
2476 va += VM_PAGE_TO_PHYS(m) - (oldl3e &
2478 pte = pmap_l3e_to_pte(l3e, va);
2479 oldpte = be64toh(*pte);
2480 while (!atomic_cmpset_long(pte,
2482 htobe64((oldpte | RPTE_EAA_R) & ~(PG_M | PG_RW))))
2483 oldpte = be64toh(*pte);
2485 pmap_invalidate_page(pmap, va);
2489 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2491 if (!PMAP_TRYLOCK(pmap)) {
2492 md_gen = m->md.pv_gen;
2493 pvh_gen = pvh->pv_gen;
2497 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2502 l3e = pmap_pml3e(pmap, pv->pv_va);
2503 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, ("pmap_clear_modify: found"
2504 " a 2mpage in page %p's pv list", m));
2505 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
2506 if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2507 atomic_clear_long(pte, htobe64(PG_M));
2508 pmap_invalidate_page(pmap, pv->pv_va);
2516 mmu_radix_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2517 vm_size_t len, vm_offset_t src_addr)
2519 struct rwlock *lock;
2520 struct spglist free;
2522 vm_offset_t end_addr = src_addr + len;
2523 vm_offset_t va_next;
2524 vm_page_t dst_pdpg, dstmpte, srcmpte;
2525 bool invalidate_all;
2528 "%s(dst_pmap=%p, src_pmap=%p, dst_addr=%lx, len=%lu, src_addr=%lx)\n",
2529 __func__, dst_pmap, src_pmap, dst_addr, len, src_addr);
2531 if (dst_addr != src_addr)
2534 invalidate_all = false;
2535 if (dst_pmap < src_pmap) {
2536 PMAP_LOCK(dst_pmap);
2537 PMAP_LOCK(src_pmap);
2539 PMAP_LOCK(src_pmap);
2540 PMAP_LOCK(dst_pmap);
2543 for (addr = src_addr; addr < end_addr; addr = va_next) {
2546 pml3_entry_t srcptepaddr, *l3e;
2547 pt_entry_t *src_pte, *dst_pte;
2549 l1e = pmap_pml1e(src_pmap, addr);
2550 if ((be64toh(*l1e) & PG_V) == 0) {
2551 va_next = (addr + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2557 l2e = pmap_l1e_to_l2e(l1e, addr);
2558 if ((be64toh(*l2e) & PG_V) == 0) {
2559 va_next = (addr + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2565 va_next = (addr + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2569 l3e = pmap_l2e_to_l3e(l2e, addr);
2570 srcptepaddr = be64toh(*l3e);
2571 if (srcptepaddr == 0)
2574 if (srcptepaddr & RPTE_LEAF) {
2575 if ((addr & L3_PAGE_MASK) != 0 ||
2576 addr + L3_PAGE_SIZE > end_addr)
2578 dst_pdpg = pmap_allocl3e(dst_pmap, addr, NULL);
2579 if (dst_pdpg == NULL)
2581 l3e = (pml3_entry_t *)
2582 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg));
2583 l3e = &l3e[pmap_pml3e_index(addr)];
2584 if (be64toh(*l3e) == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
2585 pmap_pv_insert_l3e(dst_pmap, addr, srcptepaddr,
2586 PMAP_ENTER_NORECLAIM, &lock))) {
2587 *l3e = htobe64(srcptepaddr & ~PG_W);
2588 pmap_resident_count_inc(dst_pmap,
2589 L3_PAGE_SIZE / PAGE_SIZE);
2590 counter_u64_add(pmap_l3e_mappings, 1);
2592 dst_pdpg->ref_count--;
2596 srcptepaddr &= PG_FRAME;
2597 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
2598 KASSERT(srcmpte->ref_count > 0,
2599 ("pmap_copy: source page table page is unused"));
2601 if (va_next > end_addr)
2604 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
2605 src_pte = &src_pte[pmap_pte_index(addr)];
2607 while (addr < va_next) {
2609 ptetemp = be64toh(*src_pte);
2611 * we only virtual copy managed pages
2613 if ((ptetemp & PG_MANAGED) != 0) {
2614 if (dstmpte != NULL &&
2615 dstmpte->pindex == pmap_l3e_pindex(addr))
2616 dstmpte->ref_count++;
2617 else if ((dstmpte = pmap_allocpte(dst_pmap,
2618 addr, NULL)) == NULL)
2620 dst_pte = (pt_entry_t *)
2621 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
2622 dst_pte = &dst_pte[pmap_pte_index(addr)];
2623 if (be64toh(*dst_pte) == 0 &&
2624 pmap_try_insert_pv_entry(dst_pmap, addr,
2625 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME),
2628 * Clear the wired, modified, and
2629 * accessed (referenced) bits
2632 *dst_pte = htobe64(ptetemp & ~(PG_W | PG_M |
2634 pmap_resident_count_inc(dst_pmap, 1);
2637 if (pmap_unwire_ptp(dst_pmap, addr,
2640 * Although "addr" is not
2641 * mapped, paging-structure
2642 * caches could nonetheless
2643 * have entries that refer to
2644 * the freed page table pages.
2645 * Invalidate those entries.
2647 invalidate_all = true;
2648 vm_page_free_pages_toq(&free,
2653 if (dstmpte->ref_count >= srcmpte->ref_count)
2657 if (__predict_false((addr & L3_PAGE_MASK) == 0))
2658 src_pte = pmap_pte(src_pmap, addr);
2665 pmap_invalidate_all(dst_pmap);
2668 PMAP_UNLOCK(src_pmap);
2669 PMAP_UNLOCK(dst_pmap);
2673 mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst)
2675 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
2676 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
2678 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
2682 bcopy((void *)src, (void *)dst, PAGE_SIZE);
2686 mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2687 vm_offset_t b_offset, int xfersize)
2690 vm_offset_t a_pg_offset, b_pg_offset;
2693 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
2694 a_offset, mb, b_offset, xfersize);
2696 while (xfersize > 0) {
2697 a_pg_offset = a_offset & PAGE_MASK;
2698 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2699 a_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
2700 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) +
2702 b_pg_offset = b_offset & PAGE_MASK;
2703 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2704 b_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
2705 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) +
2707 bcopy(a_cp, b_cp, cnt);
2714 #if VM_NRESERVLEVEL > 0
2716 * Tries to promote the 512, contiguous 4KB page mappings that are within a
2717 * single page table page (PTP) to a single 2MB page mapping. For promotion
2718 * to occur, two conditions must be met: (1) the 4KB page mappings must map
2719 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
2720 * identical characteristics.
2723 pmap_promote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va,
2724 struct rwlock **lockp)
2726 pml3_entry_t newpde;
2727 pt_entry_t *firstpte, oldpte, pa, *pte;
2730 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2733 * Examine the first PTE in the specified PTP. Abort if this PTE is
2734 * either invalid, unused, or does not map the first 4KB physical page
2735 * within a 2MB page.
2737 firstpte = (pt_entry_t *)PHYS_TO_DMAP(be64toh(*pde) & PG_FRAME);
2739 newpde = be64toh(*firstpte);
2740 if ((newpde & ((PG_FRAME & L3_PAGE_MASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
2741 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2742 " in pmap %p", va, pmap);
2745 if ((newpde & (PG_M | PG_RW)) == PG_RW) {
2747 * When PG_M is already clear, PG_RW can be cleared without
2748 * a TLB invalidation.
2750 if (!atomic_cmpset_long(firstpte, htobe64(newpde), htobe64((newpde | RPTE_EAA_R) & ~RPTE_EAA_W)))
2752 newpde &= ~RPTE_EAA_W;
2756 * Examine each of the other PTEs in the specified PTP. Abort if this
2757 * PTE maps an unexpected 4KB physical page or does not have identical
2758 * characteristics to the first PTE.
2760 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + L3_PAGE_SIZE - PAGE_SIZE;
2761 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
2763 oldpte = be64toh(*pte);
2764 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
2765 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2766 " in pmap %p", va, pmap);
2769 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
2771 * When PG_M is already clear, PG_RW can be cleared
2772 * without a TLB invalidation.
2774 if (!atomic_cmpset_long(pte, htobe64(oldpte), htobe64((oldpte | RPTE_EAA_R) & ~RPTE_EAA_W)))
2776 oldpte &= ~RPTE_EAA_W;
2777 CTR2(KTR_PMAP, "pmap_promote_l3e: protect for va %#lx"
2778 " in pmap %p", (oldpte & PG_FRAME & L3_PAGE_MASK) |
2779 (va & ~L3_PAGE_MASK), pmap);
2781 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
2782 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2783 " in pmap %p", va, pmap);
2790 * Save the page table page in its current state until the PDE
2791 * mapping the superpage is demoted by pmap_demote_pde() or
2792 * destroyed by pmap_remove_pde().
2794 mpte = PHYS_TO_VM_PAGE(be64toh(*pde) & PG_FRAME);
2795 KASSERT(mpte >= vm_page_array &&
2796 mpte < &vm_page_array[vm_page_array_size],
2797 ("pmap_promote_l3e: page table page is out of range"));
2798 KASSERT(mpte->pindex == pmap_l3e_pindex(va),
2799 ("pmap_promote_l3e: page table page's pindex is wrong"));
2800 if (pmap_insert_pt_page(pmap, mpte)) {
2802 "pmap_promote_l3e: failure for va %#lx in pmap %p", va,
2808 * Promote the pv entries.
2810 if ((newpde & PG_MANAGED) != 0)
2811 pmap_pv_promote_l3e(pmap, va, newpde & PG_PS_FRAME, lockp);
2813 pte_store(pde, PG_PROMOTED | newpde);
2815 counter_u64_add(pmap_l3e_promotions, 1);
2816 CTR2(KTR_PMAP, "pmap_promote_l3e: success for va %#lx"
2817 " in pmap %p", va, pmap);
2820 counter_u64_add(pmap_l3e_p_failures, 1);
2821 return (KERN_FAILURE);
2823 #endif /* VM_NRESERVLEVEL > 0 */
2826 mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
2827 vm_prot_t prot, u_int flags, int8_t psind)
2829 struct rwlock *lock;
2832 pt_entry_t newpte, origpte;
2837 boolean_t nosleep, invalidate_all, invalidate_page;
2839 va = trunc_page(va);
2841 invalidate_page = invalidate_all = false;
2842 CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va,
2843 m, prot, flags, psind);
2844 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
2845 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
2846 ("pmap_enter: managed mapping within the clean submap"));
2847 if ((m->oflags & VPO_UNMANAGED) == 0)
2848 VM_PAGE_OBJECT_BUSY_ASSERT(m);
2850 KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
2851 ("pmap_enter: flags %u has reserved bits set", flags));
2852 pa = VM_PAGE_TO_PHYS(m);
2853 newpte = (pt_entry_t)(pa | PG_A | PG_V | RPTE_LEAF);
2854 if ((flags & VM_PROT_WRITE) != 0)
2856 if ((flags & VM_PROT_READ) != 0)
2858 if (prot & VM_PROT_READ)
2859 newpte |= RPTE_EAA_R;
2860 if ((prot & VM_PROT_WRITE) != 0)
2861 newpte |= RPTE_EAA_W;
2862 KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
2863 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
2865 if (prot & VM_PROT_EXECUTE)
2867 if ((flags & PMAP_ENTER_WIRED) != 0)
2869 if (va >= DMAP_MIN_ADDRESS)
2870 newpte |= RPTE_EAA_P;
2871 newpte |= pmap_cache_bits(m->md.mdpg_cache_attrs);
2873 * Set modified bit gratuitously for writeable mappings if
2874 * the page is unmanaged. We do not want to take a fault
2875 * to do the dirty bit accounting for these mappings.
2877 if ((m->oflags & VPO_UNMANAGED) != 0) {
2878 if ((newpte & PG_RW) != 0)
2881 newpte |= PG_MANAGED;
2886 /* Assert the required virtual and physical alignment. */
2887 KASSERT((va & L3_PAGE_MASK) == 0, ("pmap_enter: va unaligned"));
2888 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
2889 rv = pmap_enter_l3e(pmap, va, newpte | RPTE_LEAF, flags, m, &lock);
2895 * In the case that a page table page is not
2896 * resident, we are creating it here.
2899 l3e = pmap_pml3e(pmap, va);
2900 if (l3e != NULL && (be64toh(*l3e) & PG_V) != 0 && ((be64toh(*l3e) & RPTE_LEAF) == 0 ||
2901 pmap_demote_l3e_locked(pmap, l3e, va, &lock))) {
2902 pte = pmap_l3e_to_pte(l3e, va);
2903 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
2904 mpte = PHYS_TO_VM_PAGE(be64toh(*l3e) & PG_FRAME);
2907 } else if (va < VM_MAXUSER_ADDRESS) {
2909 * Here if the pte page isn't mapped, or if it has been
2912 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2913 mpte = _pmap_allocpte(pmap, pmap_l3e_pindex(va),
2914 nosleep ? NULL : &lock);
2915 if (mpte == NULL && nosleep) {
2916 rv = KERN_RESOURCE_SHORTAGE;
2919 if (__predict_false(retrycount++ == 6))
2920 panic("too many retries");
2921 invalidate_all = true;
2924 panic("pmap_enter: invalid page directory va=%#lx", va);
2926 origpte = be64toh(*pte);
2930 * Is the specified virtual address already mapped?
2932 if ((origpte & PG_V) != 0) {
2934 if (VERBOSE_PMAP || pmap_logging) {
2935 printf("cow fault pmap_enter(%p, %#lx, %p, %#x, %x, %d) --"
2936 " asid=%lu curpid=%d name=%s origpte0x%lx\n",
2937 pmap, va, m, prot, flags, psind, pmap->pm_pid,
2938 curproc->p_pid, curproc->p_comm, origpte);
2940 pmap_pte_walk(pmap->pm_pml1, va);
2945 * Wiring change, just update stats. We don't worry about
2946 * wiring PT pages as they remain resident as long as there
2947 * are valid mappings in them. Hence, if a user page is wired,
2948 * the PT page will be also.
2950 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
2951 pmap->pm_stats.wired_count++;
2952 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
2953 pmap->pm_stats.wired_count--;
2956 * Remove the extra PT page reference.
2960 KASSERT(mpte->ref_count > 0,
2961 ("pmap_enter: missing reference to page table page,"
2966 * Has the physical page changed?
2968 opa = origpte & PG_FRAME;
2971 * No, might be a protection or wiring change.
2973 if ((origpte & PG_MANAGED) != 0 &&
2974 (newpte & PG_RW) != 0)
2975 vm_page_aflag_set(m, PGA_WRITEABLE);
2976 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) {
2977 if ((newpte & (PG_A|PG_M)) != (origpte & (PG_A|PG_M))) {
2978 if (!atomic_cmpset_long(pte, htobe64(origpte), htobe64(newpte)))
2980 if ((newpte & PG_M) != (origpte & PG_M))
2982 if ((newpte & PG_A) != (origpte & PG_A))
2983 vm_page_aflag_set(m, PGA_REFERENCED);
2986 invalidate_all = true;
2987 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
2994 * The physical page has changed. Temporarily invalidate
2995 * the mapping. This ensures that all threads sharing the
2996 * pmap keep a consistent view of the mapping, which is
2997 * necessary for the correct handling of COW faults. It
2998 * also permits reuse of the old mapping's PV entry,
2999 * avoiding an allocation.
3001 * For consistency, handle unmanaged mappings the same way.
3003 origpte = be64toh(pte_load_clear(pte));
3004 KASSERT((origpte & PG_FRAME) == opa,
3005 ("pmap_enter: unexpected pa update for %#lx", va));
3006 if ((origpte & PG_MANAGED) != 0) {
3007 om = PHYS_TO_VM_PAGE(opa);
3010 * The pmap lock is sufficient to synchronize with
3011 * concurrent calls to pmap_page_test_mappings() and
3012 * pmap_ts_referenced().
3014 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3016 if ((origpte & PG_A) != 0)
3017 vm_page_aflag_set(om, PGA_REFERENCED);
3018 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
3019 pv = pmap_pvh_remove(&om->md, pmap, va);
3020 if ((newpte & PG_MANAGED) == 0)
3021 free_pv_entry(pmap, pv);
3023 else if (origpte & PG_MANAGED) {
3026 pmap_page_print_mappings(om);
3032 if ((om->a.flags & PGA_WRITEABLE) != 0 &&
3033 TAILQ_EMPTY(&om->md.pv_list) &&
3034 ((om->flags & PG_FICTITIOUS) != 0 ||
3035 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
3036 vm_page_aflag_clear(om, PGA_WRITEABLE);
3038 if ((origpte & PG_A) != 0)
3039 invalidate_page = true;
3042 if (pmap != kernel_pmap) {
3044 if (VERBOSE_PMAP || pmap_logging)
3045 printf("pmap_enter(%p, %#lx, %p, %#x, %x, %d) -- asid=%lu curpid=%d name=%s\n",
3046 pmap, va, m, prot, flags, psind,
3047 pmap->pm_pid, curproc->p_pid,
3053 * Increment the counters.
3055 if ((newpte & PG_W) != 0)
3056 pmap->pm_stats.wired_count++;
3057 pmap_resident_count_inc(pmap, 1);
3061 * Enter on the PV list if part of our managed memory.
3063 if ((newpte & PG_MANAGED) != 0) {
3065 pv = get_pv_entry(pmap, &lock);
3070 printf("reassigning pv: %p to pmap: %p\n",
3073 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3074 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3076 if ((newpte & PG_RW) != 0)
3077 vm_page_aflag_set(m, PGA_WRITEABLE);
3083 if ((origpte & PG_V) != 0) {
3085 origpte = be64toh(pte_load_store(pte, htobe64(newpte)));
3086 KASSERT((origpte & PG_FRAME) == pa,
3087 ("pmap_enter: unexpected pa update for %#lx", va));
3088 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
3090 if ((origpte & PG_MANAGED) != 0)
3092 invalidate_page = true;
3095 * Although the PTE may still have PG_RW set, TLB
3096 * invalidation may nonetheless be required because
3097 * the PTE no longer has PG_M set.
3099 } else if ((origpte & PG_X) != 0 || (newpte & PG_X) == 0) {
3101 * Removing capabilities requires invalidation on POWER
3103 invalidate_page = true;
3106 if ((origpte & PG_A) != 0)
3107 invalidate_page = true;
3109 pte_store(pte, newpte);
3114 #if VM_NRESERVLEVEL > 0
3116 * If both the page table page and the reservation are fully
3117 * populated, then attempt promotion.
3119 if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
3120 mmu_radix_ps_enabled(pmap) &&
3121 (m->flags & PG_FICTITIOUS) == 0 &&
3122 vm_reserv_level_iffullpop(m) == 0 &&
3123 pmap_promote_l3e(pmap, l3e, va, &lock) == 0)
3124 invalidate_all = true;
3127 pmap_invalidate_all(pmap);
3128 else if (invalidate_page)
3129 pmap_invalidate_page(pmap, va);
3141 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
3142 * if successful. Returns false if (1) a page table page cannot be allocated
3143 * without sleeping, (2) a mapping already exists at the specified virtual
3144 * address, or (3) a PV entry cannot be allocated without reclaiming another
3148 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3149 struct rwlock **lockp)
3151 pml3_entry_t newpde;
3153 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3154 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs) |
3156 if ((m->oflags & VPO_UNMANAGED) == 0)
3157 newpde |= PG_MANAGED;
3158 if (prot & VM_PROT_EXECUTE)
3160 if (prot & VM_PROT_READ)
3161 newpde |= RPTE_EAA_R;
3162 if (va >= DMAP_MIN_ADDRESS)
3163 newpde |= RPTE_EAA_P;
3164 return (pmap_enter_l3e(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
3165 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
3170 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
3171 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
3172 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
3173 * a mapping already exists at the specified virtual address. Returns
3174 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
3175 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
3176 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3178 * The parameter "m" is only used when creating a managed, writeable mapping.
3181 pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags,
3182 vm_page_t m, struct rwlock **lockp)
3184 struct spglist free;
3185 pml3_entry_t oldl3e, *l3e;
3188 KASSERT((newpde & (PG_M | PG_RW)) != PG_RW,
3189 ("pmap_enter_pde: newpde is missing PG_M"));
3190 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3192 if ((pdpg = pmap_allocl3e(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
3193 NULL : lockp)) == NULL) {
3194 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3195 " in pmap %p", va, pmap);
3196 return (KERN_RESOURCE_SHORTAGE);
3198 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
3199 l3e = &l3e[pmap_pml3e_index(va)];
3200 oldl3e = be64toh(*l3e);
3201 if ((oldl3e & PG_V) != 0) {
3202 KASSERT(pdpg->ref_count > 1,
3203 ("pmap_enter_pde: pdpg's wire count is too low"));
3204 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
3206 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3207 " in pmap %p", va, pmap);
3208 return (KERN_FAILURE);
3210 /* Break the existing mapping(s). */
3212 if ((oldl3e & RPTE_LEAF) != 0) {
3214 * The reference to the PD page that was acquired by
3215 * pmap_allocl3e() ensures that it won't be freed.
3216 * However, if the PDE resulted from a promotion, then
3217 * a reserved PT page could be freed.
3219 (void)pmap_remove_l3e(pmap, l3e, va, &free, lockp);
3220 pmap_invalidate_l3e_page(pmap, va, oldl3e);
3222 if (pmap_remove_ptes(pmap, va, va + L3_PAGE_SIZE, l3e,
3224 pmap_invalidate_all(pmap);
3226 vm_page_free_pages_toq(&free, true);
3227 if (va >= VM_MAXUSER_ADDRESS) {
3228 mt = PHYS_TO_VM_PAGE(be64toh(*l3e) & PG_FRAME);
3229 if (pmap_insert_pt_page(pmap, mt)) {
3231 * XXX Currently, this can't happen because
3232 * we do not perform pmap_enter(psind == 1)
3233 * on the kernel pmap.
3235 panic("pmap_enter_pde: trie insert failed");
3238 KASSERT(be64toh(*l3e) == 0, ("pmap_enter_pde: non-zero pde %p",
3241 if ((newpde & PG_MANAGED) != 0) {
3243 * Abort this mapping if its PV entry could not be created.
3245 if (!pmap_pv_insert_l3e(pmap, va, newpde, flags, lockp)) {
3247 if (pmap_unwire_ptp(pmap, va, pdpg, &free)) {
3249 * Although "va" is not mapped, paging-
3250 * structure caches could nonetheless have
3251 * entries that refer to the freed page table
3252 * pages. Invalidate those entries.
3254 pmap_invalidate_page(pmap, va);
3255 vm_page_free_pages_toq(&free, true);
3257 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3258 " in pmap %p", va, pmap);
3259 return (KERN_RESOURCE_SHORTAGE);
3261 if ((newpde & PG_RW) != 0) {
3262 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
3263 vm_page_aflag_set(mt, PGA_WRITEABLE);
3268 * Increment counters.
3270 if ((newpde & PG_W) != 0)
3271 pmap->pm_stats.wired_count += L3_PAGE_SIZE / PAGE_SIZE;
3272 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
3275 * Map the superpage. (This is not a promoted mapping; there will not
3276 * be any lingering 4KB page mappings in the TLB.)
3278 pte_store(l3e, newpde);
3281 counter_u64_add(pmap_l3e_mappings, 1);
3282 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
3283 " in pmap %p", va, pmap);
3284 return (KERN_SUCCESS);
3288 mmu_radix_enter_object(pmap_t pmap, vm_offset_t start,
3289 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
3292 struct rwlock *lock;
3295 vm_pindex_t diff, psize;
3297 VM_OBJECT_ASSERT_LOCKED(m_start->object);
3299 CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
3300 end, m_start, prot);
3303 psize = atop(end - start);
3308 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3309 va = start + ptoa(diff);
3310 if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end &&
3311 m->psind == 1 && mmu_radix_ps_enabled(pmap) &&
3312 pmap_enter_2mpage(pmap, va, m, prot, &lock))
3313 m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1];
3315 mpte = mmu_radix_enter_quick_locked(pmap, va, m, prot,
3316 mpte, &lock, &invalidate);
3317 m = TAILQ_NEXT(m, listq);
3323 pmap_invalidate_all(pmap);
3328 mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3329 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate)
3331 struct spglist free;
3335 KASSERT(!VA_IS_CLEANMAP(va) ||
3336 (m->oflags & VPO_UNMANAGED) != 0,
3337 ("mmu_radix_enter_quick_locked: managed mapping within the clean submap"));
3338 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3341 * In the case that a page table page is not
3342 * resident, we are creating it here.
3344 if (va < VM_MAXUSER_ADDRESS) {
3345 vm_pindex_t ptepindex;
3346 pml3_entry_t *ptepa;
3349 * Calculate pagetable page index
3351 ptepindex = pmap_l3e_pindex(va);
3352 if (mpte && (mpte->pindex == ptepindex)) {
3356 * Get the page directory entry
3358 ptepa = pmap_pml3e(pmap, va);
3361 * If the page table page is mapped, we just increment
3362 * the hold count, and activate it. Otherwise, we
3363 * attempt to allocate a page table page. If this
3364 * attempt fails, we don't retry. Instead, we give up.
3366 if (ptepa && (be64toh(*ptepa) & PG_V) != 0) {
3367 if (be64toh(*ptepa) & RPTE_LEAF)
3369 mpte = PHYS_TO_VM_PAGE(be64toh(*ptepa) & PG_FRAME);
3373 * Pass NULL instead of the PV list lock
3374 * pointer, because we don't intend to sleep.
3376 mpte = _pmap_allocpte(pmap, ptepindex, NULL);
3381 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3382 pte = &pte[pmap_pte_index(va)];
3385 pte = pmap_pte(pmap, va);
3387 if (be64toh(*pte)) {
3396 * Enter on the PV list if part of our managed memory.
3398 if ((m->oflags & VPO_UNMANAGED) == 0 &&
3399 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3402 if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
3404 * Although "va" is not mapped, paging-
3405 * structure caches could nonetheless have
3406 * entries that refer to the freed page table
3407 * pages. Invalidate those entries.
3410 vm_page_free_pages_toq(&free, true);
3418 * Increment counters
3420 pmap_resident_count_inc(pmap, 1);
3422 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs);
3423 if (prot & VM_PROT_EXECUTE)
3427 if ((m->oflags & VPO_UNMANAGED) == 0)
3435 mmu_radix_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
3438 struct rwlock *lock;
3444 mmu_radix_enter_quick_locked(pmap, va, m, prot, NULL, &lock,
3450 pmap_invalidate_all(pmap);
3455 mmu_radix_extract(pmap_t pmap, vm_offset_t va)
3461 l3e = pmap_pml3e(pmap, va);
3462 if (__predict_false(l3e == NULL))
3464 if (be64toh(*l3e) & RPTE_LEAF) {
3465 pa = (be64toh(*l3e) & PG_PS_FRAME) | (va & L3_PAGE_MASK);
3466 pa |= (va & L3_PAGE_MASK);
3469 * Beware of a concurrent promotion that changes the
3470 * PDE at this point! For example, vtopte() must not
3471 * be used to access the PTE because it would use the
3472 * new PDE. It is, however, safe to use the old PDE
3473 * because the page table page is preserved by the
3476 pte = pmap_l3e_to_pte(l3e, va);
3477 if (__predict_false(pte == NULL))
3480 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
3481 pa |= (va & PAGE_MASK);
3487 mmu_radix_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3489 pml3_entry_t l3e, *l3ep;
3494 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
3496 l3ep = pmap_pml3e(pmap, va);
3497 if (l3ep != NULL && (l3e = be64toh(*l3ep))) {
3498 if (l3e & RPTE_LEAF) {
3499 if ((l3e & PG_RW) || (prot & VM_PROT_WRITE) == 0)
3500 m = PHYS_TO_VM_PAGE((l3e & PG_PS_FRAME) |
3501 (va & L3_PAGE_MASK));
3503 /* Native endian PTE, do not pass to pmap functions */
3504 pte = be64toh(*pmap_l3e_to_pte(l3ep, va));
3506 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0))
3507 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3509 if (m != NULL && !vm_page_wire_mapped(m))
3517 mmu_radix_growkernel(vm_offset_t addr)
3524 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
3525 if (VM_MIN_KERNEL_ADDRESS < addr &&
3526 addr < (VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE))
3529 addr = roundup2(addr, L3_PAGE_SIZE);
3530 if (addr - 1 >= vm_map_max(kernel_map))
3531 addr = vm_map_max(kernel_map);
3532 while (kernel_vm_end < addr) {
3533 l2e = pmap_pml2e(kernel_pmap, kernel_vm_end);
3534 if ((be64toh(*l2e) & PG_V) == 0) {
3535 /* We need a new PDP entry */
3536 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
3537 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
3539 panic("pmap_growkernel: no memory to grow kernel");
3540 nkpg->pindex = kernel_vm_end >> L2_PAGE_SIZE_SHIFT;
3541 paddr = VM_PAGE_TO_PHYS(nkpg);
3542 pde_store(l2e, paddr);
3543 continue; /* try again */
3545 l3e = pmap_l2e_to_l3e(l2e, kernel_vm_end);
3546 if ((be64toh(*l3e) & PG_V) != 0) {
3547 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3548 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3549 kernel_vm_end = vm_map_max(kernel_map);
3555 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
3558 panic("pmap_growkernel: no memory to grow kernel");
3559 nkpg->pindex = pmap_l3e_pindex(kernel_vm_end);
3560 paddr = VM_PAGE_TO_PHYS(nkpg);
3561 pde_store(l3e, paddr);
3563 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3564 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3565 kernel_vm_end = vm_map_max(kernel_map);
3572 static MALLOC_DEFINE(M_RADIX_PGD, "radix_pgd", "radix page table root directory");
3573 static uma_zone_t zone_radix_pgd;
3576 radix_pgd_import(void *arg __unused, void **store, int count, int domain __unused,
3581 req = VM_ALLOC_WIRED | malloc2vm_flags(flags);
3582 for (int i = 0; i < count; i++) {
3583 vm_page_t m = vm_page_alloc_noobj_contig(req,
3584 RADIX_PGD_SIZE / PAGE_SIZE,
3585 0, (vm_paddr_t)-1, RADIX_PGD_SIZE, L1_PAGE_SIZE,
3586 VM_MEMATTR_DEFAULT);
3587 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3593 radix_pgd_release(void *arg __unused, void **store, int count)
3596 struct spglist free;
3600 page_count = RADIX_PGD_SIZE/PAGE_SIZE;
3602 for (int i = 0; i < count; i++) {
3604 * XXX selectively remove dmap and KVA entries so we don't
3607 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
3608 for (int j = page_count-1; j >= 0; j--) {
3609 vm_page_unwire_noq(&m[j]);
3610 SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss);
3612 vm_page_free_pages_toq(&free, false);
3617 mmu_radix_init(void)
3621 int error, i, pv_npg;
3623 /* XXX is this really needed for POWER? */
3624 /* L1TF, reserve page @0 unconditionally */
3625 vm_page_blacklist_add(0, bootverbose);
3627 zone_radix_pgd = uma_zcache_create("radix_pgd_cache",
3628 RADIX_PGD_SIZE, NULL, NULL,
3630 trash_init, trash_fini,
3634 radix_pgd_import, radix_pgd_release,
3635 NULL, UMA_ZONE_NOBUCKET);
3638 * Initialize the vm page array entries for the kernel pmap's
3641 PMAP_LOCK(kernel_pmap);
3642 for (i = 0; i < nkpt; i++) {
3643 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
3644 KASSERT(mpte >= vm_page_array &&
3645 mpte < &vm_page_array[vm_page_array_size],
3646 ("pmap_init: page table page is out of range size: %lu",
3647 vm_page_array_size));
3648 mpte->pindex = pmap_l3e_pindex(VM_MIN_KERNEL_ADDRESS) + i;
3649 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
3650 MPASS(PHYS_TO_VM_PAGE(mpte->phys_addr) == mpte);
3651 //pmap_insert_pt_page(kernel_pmap, mpte);
3652 mpte->ref_count = 1;
3654 PMAP_UNLOCK(kernel_pmap);
3657 CTR1(KTR_PMAP, "%s()", __func__);
3658 TAILQ_INIT(&pv_dummy.pv_list);
3661 * Are large page mappings enabled?
3663 TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
3664 if (superpages_enabled) {
3665 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
3666 ("pmap_init: can't assign to pagesizes[1]"));
3667 pagesizes[1] = L3_PAGE_SIZE;
3671 * Initialize the pv chunk list mutex.
3673 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
3676 * Initialize the pool of pv list locks.
3678 for (i = 0; i < NPV_LIST_LOCKS; i++)
3679 rw_init(&pv_list_locks[i], "pmap pv list");
3682 * Calculate the size of the pv head table for superpages.
3684 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L3_PAGE_SIZE);
3687 * Allocate memory for the pv head table for superpages.
3689 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
3691 pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
3692 for (i = 0; i < pv_npg; i++)
3693 TAILQ_INIT(&pv_table[i].pv_list);
3694 TAILQ_INIT(&pv_dummy.pv_list);
3696 pmap_initialized = 1;
3697 mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
3698 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
3699 (vmem_addr_t *)&qframe);
3702 panic("qframe allocation failed");
3703 asid_arena = vmem_create("ASID", isa3_base_pid + 1, (1<<isa3_pid_bits),
3708 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3710 struct rwlock *lock;
3712 struct md_page *pvh;
3713 pt_entry_t *pte, mask;
3715 int md_gen, pvh_gen;
3719 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3722 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3724 if (!PMAP_TRYLOCK(pmap)) {
3725 md_gen = m->md.pv_gen;
3729 if (md_gen != m->md.pv_gen) {
3734 pte = pmap_pte(pmap, pv->pv_va);
3737 mask |= PG_RW | PG_M;
3739 mask |= PG_V | PG_A;
3740 rv = (be64toh(*pte) & mask) == mask;
3745 if ((m->flags & PG_FICTITIOUS) == 0) {
3746 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3747 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
3749 if (!PMAP_TRYLOCK(pmap)) {
3750 md_gen = m->md.pv_gen;
3751 pvh_gen = pvh->pv_gen;
3755 if (md_gen != m->md.pv_gen ||
3756 pvh_gen != pvh->pv_gen) {
3761 pte = pmap_pml3e(pmap, pv->pv_va);
3764 mask |= PG_RW | PG_M;
3766 mask |= PG_V | PG_A;
3767 rv = (be64toh(*pte) & mask) == mask;
3781 * Return whether or not the specified physical page was modified
3782 * in any physical maps.
3785 mmu_radix_is_modified(vm_page_t m)
3788 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3789 ("pmap_is_modified: page %p is not managed", m));
3791 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3793 * If the page is not busied then this check is racy.
3795 if (!pmap_page_is_write_mapped(m))
3797 return (pmap_page_test_mappings(m, FALSE, TRUE));
3801 mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3807 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
3810 l3e = pmap_pml3e(pmap, addr);
3811 if (l3e != NULL && (be64toh(*l3e) & (RPTE_LEAF | PG_V)) == PG_V) {
3812 pte = pmap_l3e_to_pte(l3e, addr);
3813 rv = (be64toh(*pte) & PG_V) == 0;
3820 mmu_radix_is_referenced(vm_page_t m)
3822 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3823 ("pmap_is_referenced: page %p is not managed", m));
3824 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3825 return (pmap_page_test_mappings(m, TRUE, FALSE));
3829 * pmap_ts_referenced:
3831 * Return a count of reference bits for a page, clearing those bits.
3832 * It is not necessary for every reference bit to be cleared, but it
3833 * is necessary that 0 only be returned when there are truly no
3834 * reference bits set.
3836 * As an optimization, update the page's dirty field if a modified bit is
3837 * found while counting reference bits. This opportunistic update can be
3838 * performed at low cost and can eliminate the need for some future calls
3839 * to pmap_is_modified(). However, since this function stops after
3840 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3841 * dirty pages. Those dirty pages will only be detected by a future call
3842 * to pmap_is_modified().
3844 * A DI block is not needed within this function, because
3845 * invalidations are performed before the PV list lock is
3849 mmu_radix_ts_referenced(vm_page_t m)
3851 struct md_page *pvh;
3854 struct rwlock *lock;
3855 pml3_entry_t oldl3e, *l3e;
3858 int cleared, md_gen, not_cleared, pvh_gen;
3859 struct spglist free;
3861 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3862 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3863 ("pmap_ts_referenced: page %p is not managed", m));
3866 pa = VM_PAGE_TO_PHYS(m);
3867 lock = PHYS_TO_PV_LIST_LOCK(pa);
3868 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
3872 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
3873 goto small_mappings;
3879 if (!PMAP_TRYLOCK(pmap)) {
3880 pvh_gen = pvh->pv_gen;
3884 if (pvh_gen != pvh->pv_gen) {
3889 l3e = pmap_pml3e(pmap, pv->pv_va);
3890 oldl3e = be64toh(*l3e);
3891 if ((oldl3e & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
3893 * Although "oldpde" is mapping a 2MB page, because
3894 * this function is called at a 4KB page granularity,
3895 * we only update the 4KB page under test.
3899 if ((oldl3e & PG_A) != 0) {
3901 * Since this reference bit is shared by 512 4KB
3902 * pages, it should not be cleared every time it is
3903 * tested. Apply a simple "hash" function on the
3904 * physical page number, the virtual superpage number,
3905 * and the pmap address to select one 4KB page out of
3906 * the 512 on which testing the reference bit will
3907 * result in clearing that reference bit. This
3908 * function is designed to avoid the selection of the
3909 * same 4KB page for every 2MB page mapping.
3911 * On demotion, a mapping that hasn't been referenced
3912 * is simply destroyed. To avoid the possibility of a
3913 * subsequent page fault on a demoted wired mapping,
3914 * always leave its reference bit set. Moreover,
3915 * since the superpage is wired, the current state of
3916 * its reference bit won't affect page replacement.
3918 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L3_PAGE_SIZE_SHIFT) ^
3919 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
3920 (oldl3e & PG_W) == 0) {
3921 atomic_clear_long(l3e, htobe64(PG_A));
3922 pmap_invalidate_page(pmap, pv->pv_va);
3924 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
3925 ("inconsistent pv lock %p %p for page %p",
3926 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
3931 /* Rotate the PV list if it has more than one entry. */
3932 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3933 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
3934 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
3937 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
3939 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
3941 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
3948 if (!PMAP_TRYLOCK(pmap)) {
3949 pvh_gen = pvh->pv_gen;
3950 md_gen = m->md.pv_gen;
3954 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
3959 l3e = pmap_pml3e(pmap, pv->pv_va);
3960 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0,
3961 ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
3963 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
3964 if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW))
3966 if ((be64toh(*pte) & PG_A) != 0) {
3967 atomic_clear_long(pte, htobe64(PG_A));
3968 pmap_invalidate_page(pmap, pv->pv_va);
3972 /* Rotate the PV list if it has more than one entry. */
3973 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3974 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
3975 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3978 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
3979 not_cleared < PMAP_TS_REFERENCED_MAX);
3982 vm_page_free_pages_toq(&free, true);
3983 return (cleared + not_cleared);
3987 mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start,
3988 vm_paddr_t end, int prot __unused)
3991 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
3993 return (PHYS_TO_DMAP(start));
3997 mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
3998 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
4001 vm_paddr_t pa, ptepa;
4005 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
4006 object, pindex, size);
4007 VM_OBJECT_ASSERT_WLOCKED(object);
4008 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
4009 ("pmap_object_init_pt: non-device object"));
4010 /* NB: size can be logically ored with addr here */
4011 if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) {
4012 if (!mmu_radix_ps_enabled(pmap))
4014 if (!vm_object_populate(object, pindex, pindex + atop(size)))
4016 p = vm_page_lookup(object, pindex);
4017 KASSERT(p->valid == VM_PAGE_BITS_ALL,
4018 ("pmap_object_init_pt: invalid page %p", p));
4019 ma = p->md.mdpg_cache_attrs;
4022 * Abort the mapping if the first page is not physically
4023 * aligned to a 2MB page boundary.
4025 ptepa = VM_PAGE_TO_PHYS(p);
4026 if (ptepa & L3_PAGE_MASK)
4030 * Skip the first page. Abort the mapping if the rest of
4031 * the pages are not physically contiguous or have differing
4032 * memory attributes.
4034 p = TAILQ_NEXT(p, listq);
4035 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
4037 KASSERT(p->valid == VM_PAGE_BITS_ALL,
4038 ("pmap_object_init_pt: invalid page %p", p));
4039 if (pa != VM_PAGE_TO_PHYS(p) ||
4040 ma != p->md.mdpg_cache_attrs)
4042 p = TAILQ_NEXT(p, listq);
4046 for (pa = ptepa | pmap_cache_bits(ma);
4047 pa < ptepa + size; pa += L3_PAGE_SIZE) {
4048 pdpg = pmap_allocl3e(pmap, addr, NULL);
4051 * The creation of mappings below is only an
4052 * optimization. If a page directory page
4053 * cannot be allocated without blocking,
4054 * continue on to the next mapping rather than
4057 addr += L3_PAGE_SIZE;
4060 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
4061 l3e = &l3e[pmap_pml3e_index(addr)];
4062 if ((be64toh(*l3e) & PG_V) == 0) {
4063 pa |= PG_M | PG_A | PG_RW;
4065 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
4066 counter_u64_add(pmap_l3e_mappings, 1);
4068 /* Continue on if the PDE is already valid. */
4070 KASSERT(pdpg->ref_count > 0,
4071 ("pmap_object_init_pt: missing reference "
4072 "to page directory page, va: 0x%lx", addr));
4074 addr += L3_PAGE_SIZE;
4082 mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m)
4084 struct md_page *pvh;
4085 struct rwlock *lock;
4090 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4091 ("pmap_page_exists_quick: page %p is not managed", m));
4092 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
4094 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4096 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4097 if (PV_PMAP(pv) == pmap) {
4105 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4106 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4107 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4108 if (PV_PMAP(pv) == pmap) {
4122 mmu_radix_page_init(vm_page_t m)
4125 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4126 TAILQ_INIT(&m->md.pv_list);
4127 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
4131 mmu_radix_page_wired_mappings(vm_page_t m)
4133 struct rwlock *lock;
4134 struct md_page *pvh;
4138 int count, md_gen, pvh_gen;
4140 if ((m->oflags & VPO_UNMANAGED) != 0)
4142 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4143 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4147 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4149 if (!PMAP_TRYLOCK(pmap)) {
4150 md_gen = m->md.pv_gen;
4154 if (md_gen != m->md.pv_gen) {
4159 pte = pmap_pte(pmap, pv->pv_va);
4160 if ((be64toh(*pte) & PG_W) != 0)
4164 if ((m->flags & PG_FICTITIOUS) == 0) {
4165 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4166 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4168 if (!PMAP_TRYLOCK(pmap)) {
4169 md_gen = m->md.pv_gen;
4170 pvh_gen = pvh->pv_gen;
4174 if (md_gen != m->md.pv_gen ||
4175 pvh_gen != pvh->pv_gen) {
4180 pte = pmap_pml3e(pmap, pv->pv_va);
4181 if ((be64toh(*pte) & PG_W) != 0)
4191 mmu_radix_update_proctab(int pid, pml1_entry_t l1pa)
4193 isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE | l1pa | RADIX_PGD_INDEX_SHIFT);
4197 mmu_radix_pinit(pmap_t pmap)
4202 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4205 * allocate the page directory page
4207 pmap->pm_pml1 = uma_zalloc(zone_radix_pgd, M_WAITOK);
4209 for (int j = 0; j < RADIX_PGD_SIZE_SHIFT; j++)
4210 pagezero((vm_offset_t)pmap->pm_pml1 + j * PAGE_SIZE);
4211 vm_radix_init(&pmap->pm_radix);
4212 TAILQ_INIT(&pmap->pm_pvchunk);
4213 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4214 pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4215 vmem_alloc(asid_arena, 1, M_FIRSTFIT|M_WAITOK, &pid);
4218 l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1);
4219 mmu_radix_update_proctab(pid, l1pa);
4220 __asm __volatile("ptesync;isync" : : : "memory");
4226 * This routine is called if the desired page table page does not exist.
4228 * If page table page allocation fails, this routine may sleep before
4229 * returning NULL. It sleeps only if a lock pointer was given.
4231 * Note: If a page allocation fails at page table level two or three,
4232 * one or two pages may be held during the wait, only to be released
4233 * afterwards. This conservative approach is easily argued to avoid
4237 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
4239 vm_page_t m, pdppg, pdpg;
4241 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4244 * Allocate a page table page.
4246 if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
4247 if (lockp != NULL) {
4248 RELEASE_PV_LIST_LOCK(lockp);
4254 * Indicate the need to retry. While waiting, the page table
4255 * page may have been allocated.
4259 m->pindex = ptepindex;
4262 * Map the pagetable page into the process address space, if
4263 * it isn't already there.
4266 if (ptepindex >= (NUPDE + NUPDPE)) {
4268 vm_pindex_t pml1index;
4270 /* Wire up a new PDPE page */
4271 pml1index = ptepindex - (NUPDE + NUPDPE);
4272 l1e = &pmap->pm_pml1[pml1index];
4273 KASSERT((be64toh(*l1e) & PG_V) == 0,
4274 ("%s: L1 entry %#lx is valid", __func__, *l1e));
4275 pde_store(l1e, VM_PAGE_TO_PHYS(m));
4276 } else if (ptepindex >= NUPDE) {
4277 vm_pindex_t pml1index;
4278 vm_pindex_t pdpindex;
4282 /* Wire up a new l2e page */
4283 pdpindex = ptepindex - NUPDE;
4284 pml1index = pdpindex >> RPTE_SHIFT;
4286 l1e = &pmap->pm_pml1[pml1index];
4287 if ((be64toh(*l1e) & PG_V) == 0) {
4288 /* Have to allocate a new pdp, recurse */
4289 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml1index,
4291 vm_page_unwire_noq(m);
4292 vm_page_free_zero(m);
4296 /* Add reference to l2e page */
4297 pdppg = PHYS_TO_VM_PAGE(be64toh(*l1e) & PG_FRAME);
4300 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4302 /* Now find the pdp page */
4303 l2e = &l2e[pdpindex & RPTE_MASK];
4304 KASSERT((be64toh(*l2e) & PG_V) == 0,
4305 ("%s: L2 entry %#lx is valid", __func__, *l2e));
4306 pde_store(l2e, VM_PAGE_TO_PHYS(m));
4308 vm_pindex_t pml1index;
4309 vm_pindex_t pdpindex;
4314 /* Wire up a new PTE page */
4315 pdpindex = ptepindex >> RPTE_SHIFT;
4316 pml1index = pdpindex >> RPTE_SHIFT;
4318 /* First, find the pdp and check that its valid. */
4319 l1e = &pmap->pm_pml1[pml1index];
4320 if ((be64toh(*l1e) & PG_V) == 0) {
4321 /* Have to allocate a new pd, recurse */
4322 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4324 vm_page_unwire_noq(m);
4325 vm_page_free_zero(m);
4328 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4329 l2e = &l2e[pdpindex & RPTE_MASK];
4331 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4332 l2e = &l2e[pdpindex & RPTE_MASK];
4333 if ((be64toh(*l2e) & PG_V) == 0) {
4334 /* Have to allocate a new pd, recurse */
4335 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4337 vm_page_unwire_noq(m);
4338 vm_page_free_zero(m);
4342 /* Add reference to the pd page */
4343 pdpg = PHYS_TO_VM_PAGE(be64toh(*l2e) & PG_FRAME);
4347 l3e = (pml3_entry_t *)PHYS_TO_DMAP(be64toh(*l2e) & PG_FRAME);
4349 /* Now we know where the page directory page is */
4350 l3e = &l3e[ptepindex & RPTE_MASK];
4351 KASSERT((be64toh(*l3e) & PG_V) == 0,
4352 ("%s: L3 entry %#lx is valid", __func__, *l3e));
4353 pde_store(l3e, VM_PAGE_TO_PHYS(m));
4356 pmap_resident_count_inc(pmap, 1);
4360 pmap_allocl3e(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4362 vm_pindex_t pdpindex, ptepindex;
4367 pdpe = pmap_pml2e(pmap, va);
4368 if (pdpe != NULL && (be64toh(*pdpe) & PG_V) != 0) {
4369 /* Add a reference to the pd page. */
4370 pdpg = PHYS_TO_VM_PAGE(be64toh(*pdpe) & PG_FRAME);
4373 /* Allocate a pd page. */
4374 ptepindex = pmap_l3e_pindex(va);
4375 pdpindex = ptepindex >> RPTE_SHIFT;
4376 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
4377 if (pdpg == NULL && lockp != NULL)
4384 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4386 vm_pindex_t ptepindex;
4391 * Calculate pagetable page index
4393 ptepindex = pmap_l3e_pindex(va);
4396 * Get the page directory entry
4398 pd = pmap_pml3e(pmap, va);
4401 * This supports switching from a 2MB page to a
4404 if (pd != NULL && (be64toh(*pd) & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V)) {
4405 if (!pmap_demote_l3e_locked(pmap, pd, va, lockp)) {
4407 * Invalidation of the 2MB page mapping may have caused
4408 * the deallocation of the underlying PD page.
4415 * If the page table page is mapped, we just increment the
4416 * hold count, and activate it.
4418 if (pd != NULL && (be64toh(*pd) & PG_V) != 0) {
4419 m = PHYS_TO_VM_PAGE(be64toh(*pd) & PG_FRAME);
4423 * Here if the pte page isn't mapped, or if it has been
4426 m = _pmap_allocpte(pmap, ptepindex, lockp);
4427 if (m == NULL && lockp != NULL)
4434 mmu_radix_pinit0(pmap_t pmap)
4437 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4438 PMAP_LOCK_INIT(pmap);
4439 pmap->pm_pml1 = kernel_pmap->pm_pml1;
4440 pmap->pm_pid = kernel_pmap->pm_pid;
4442 vm_radix_init(&pmap->pm_radix);
4443 TAILQ_INIT(&pmap->pm_pvchunk);
4444 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4445 kernel_pmap->pm_flags =
4446 pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4449 * pmap_protect_l3e: do the things to protect a 2mpage in a process
4452 pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot)
4454 pt_entry_t newpde, oldpde;
4455 vm_offset_t eva, va;
4457 boolean_t anychanged;
4459 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4460 KASSERT((sva & L3_PAGE_MASK) == 0,
4461 ("pmap_protect_l3e: sva is not 2mpage aligned"));
4464 oldpde = newpde = be64toh(*l3e);
4465 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
4466 (PG_MANAGED | PG_M | PG_RW)) {
4467 eva = sva + L3_PAGE_SIZE;
4468 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
4469 va < eva; va += PAGE_SIZE, m++)
4472 if ((prot & VM_PROT_WRITE) == 0) {
4473 newpde &= ~(PG_RW | PG_M);
4474 newpde |= RPTE_EAA_R;
4476 if (prot & VM_PROT_EXECUTE)
4478 if (newpde != oldpde) {
4480 * As an optimization to future operations on this PDE, clear
4481 * PG_PROMOTED. The impending invalidation will remove any
4482 * lingering 4KB page mappings from the TLB.
4484 if (!atomic_cmpset_long(l3e, htobe64(oldpde), htobe64(newpde & ~PG_PROMOTED)))
4488 return (anychanged);
4492 mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
4495 vm_offset_t va_next;
4498 pml3_entry_t ptpaddr, *l3e;
4500 boolean_t anychanged;
4502 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva,
4505 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4506 if (prot == VM_PROT_NONE) {
4507 mmu_radix_remove(pmap, sva, eva);
4511 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
4512 (VM_PROT_WRITE|VM_PROT_EXECUTE))
4516 if (VERBOSE_PROTECT || pmap_logging)
4517 printf("pmap_protect(%p, %#lx, %#lx, %x) - asid: %lu\n",
4518 pmap, sva, eva, prot, pmap->pm_pid);
4523 for (; sva < eva; sva = va_next) {
4524 l1e = pmap_pml1e(pmap, sva);
4525 if ((be64toh(*l1e) & PG_V) == 0) {
4526 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
4532 l2e = pmap_l1e_to_l2e(l1e, sva);
4533 if ((be64toh(*l2e) & PG_V) == 0) {
4534 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
4540 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
4544 l3e = pmap_l2e_to_l3e(l2e, sva);
4545 ptpaddr = be64toh(*l3e);
4548 * Weed out invalid mappings.
4554 * Check for large page.
4556 if ((ptpaddr & RPTE_LEAF) != 0) {
4558 * Are we protecting the entire large page? If not,
4559 * demote the mapping and fall through.
4561 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
4562 if (pmap_protect_l3e(pmap, l3e, sva, prot))
4565 } else if (!pmap_demote_l3e(pmap, l3e, sva)) {
4567 * The large page mapping was destroyed.
4576 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
4578 pt_entry_t obits, pbits;
4582 MPASS(pte == pmap_pte(pmap, sva));
4583 obits = pbits = be64toh(*pte);
4584 if ((pbits & PG_V) == 0)
4587 if ((prot & VM_PROT_WRITE) == 0) {
4588 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
4589 (PG_MANAGED | PG_M | PG_RW)) {
4590 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
4593 pbits &= ~(PG_RW | PG_M);
4594 pbits |= RPTE_EAA_R;
4596 if (prot & VM_PROT_EXECUTE)
4599 if (pbits != obits) {
4600 if (!atomic_cmpset_long(pte, htobe64(obits), htobe64(pbits)))
4602 if (obits & (PG_A|PG_M)) {
4605 if (VERBOSE_PROTECT || pmap_logging)
4606 printf("%#lx %#lx -> %#lx\n",
4614 pmap_invalidate_all(pmap);
4619 mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count)
4622 CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count);
4623 pt_entry_t oldpte, pa, *pte;
4625 uint64_t cache_bits, attr_bits;
4629 attr_bits = RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
4632 while (va < sva + PAGE_SIZE * count) {
4633 if (__predict_false((va & L3_PAGE_MASK) == 0))
4635 MPASS(pte == pmap_pte(kernel_pmap, va));
4638 * XXX there has to be a more efficient way than traversing
4639 * the page table every time - but go for correctness for
4644 cache_bits = pmap_cache_bits(m->md.mdpg_cache_attrs);
4645 pa = VM_PAGE_TO_PHYS(m) | cache_bits | attr_bits;
4646 if (be64toh(*pte) != pa) {
4647 oldpte |= be64toh(*pte);
4653 if (__predict_false((oldpte & RPTE_VALID) != 0))
4654 pmap_invalidate_range(kernel_pmap, sva, sva + count *
4661 mmu_radix_qremove(vm_offset_t sva, int count)
4666 CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, sva, count);
4667 KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode or dmap va %lx", sva));
4671 while (va < sva + PAGE_SIZE * count) {
4672 if (__predict_false((va & L3_PAGE_MASK) == 0))
4678 pmap_invalidate_range(kernel_pmap, sva, va);
4681 /***************************************************
4682 * Page table page management routines.....
4683 ***************************************************/
4685 * Schedule the specified unused page table page to be freed. Specifically,
4686 * add the page to the specified list of pages that will be released to the
4687 * physical memory manager after the TLB has been updated.
4689 static __inline void
4690 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
4691 boolean_t set_PG_ZERO)
4695 m->flags |= PG_ZERO;
4697 m->flags &= ~PG_ZERO;
4698 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
4702 * Inserts the specified page table page into the specified pmap's collection
4703 * of idle page table pages. Each of a pmap's page table pages is responsible
4704 * for mapping a distinct range of virtual addresses. The pmap's collection is
4705 * ordered by this virtual address range.
4708 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
4711 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4712 return (vm_radix_insert(&pmap->pm_radix, mpte));
4716 * Removes the page table page mapping the specified virtual address from the
4717 * specified pmap's collection of idle page table pages, and returns it.
4718 * Otherwise, returns NULL if there is no page table page corresponding to the
4719 * specified virtual address.
4721 static __inline vm_page_t
4722 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
4725 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4726 return (vm_radix_remove(&pmap->pm_radix, pmap_l3e_pindex(va)));
4730 * Decrements a page table page's wire count, which is used to record the
4731 * number of valid page table entries within the page. If the wire count
4732 * drops to zero, then the page table page is unmapped. Returns TRUE if the
4733 * page table page was unmapped and FALSE otherwise.
4735 static inline boolean_t
4736 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4740 if (m->ref_count == 0) {
4741 _pmap_unwire_ptp(pmap, va, m, free);
4748 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4751 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4753 * unmap the page table page
4755 if (m->pindex >= NUPDE + NUPDPE) {
4758 pml1 = pmap_pml1e(pmap, va);
4760 } else if (m->pindex >= NUPDE) {
4763 l2e = pmap_pml2e(pmap, va);
4768 l3e = pmap_pml3e(pmap, va);
4771 pmap_resident_count_dec(pmap, 1);
4772 if (m->pindex < NUPDE) {
4773 /* We just released a PT, unhold the matching PD */
4776 pdpg = PHYS_TO_VM_PAGE(be64toh(*pmap_pml2e(pmap, va)) & PG_FRAME);
4777 pmap_unwire_ptp(pmap, va, pdpg, free);
4779 else if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
4780 /* We just released a PD, unhold the matching PDP */
4783 pdppg = PHYS_TO_VM_PAGE(be64toh(*pmap_pml1e(pmap, va)) & PG_FRAME);
4784 pmap_unwire_ptp(pmap, va, pdppg, free);
4788 * Put page on a list so that it is released after
4789 * *ALL* TLB shootdown is done
4791 pmap_add_delayed_free_list(m, free, TRUE);
4795 * After removing a page table entry, this routine is used to
4796 * conditionally free the page, and manage the hold/wire counts.
4799 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pml3_entry_t ptepde,
4800 struct spglist *free)
4804 if (va >= VM_MAXUSER_ADDRESS)
4806 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
4807 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
4808 return (pmap_unwire_ptp(pmap, va, mpte, free));
4812 mmu_radix_release(pmap_t pmap)
4815 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4816 KASSERT(pmap->pm_stats.resident_count == 0,
4817 ("pmap_release: pmap resident count %ld != 0",
4818 pmap->pm_stats.resident_count));
4819 KASSERT(vm_radix_is_empty(&pmap->pm_radix),
4820 ("pmap_release: pmap has reserved page table page(s)"));
4822 pmap_invalidate_all(pmap);
4823 isa3_proctab[pmap->pm_pid].proctab0 = 0;
4824 uma_zfree(zone_radix_pgd, pmap->pm_pml1);
4825 vmem_free(asid_arena, pmap->pm_pid, 1);
4829 * Create the PV entry for a 2MB page mapping. Always returns true unless the
4830 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
4831 * false if the PV entry cannot be allocated without resorting to reclamation.
4834 pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t pde, u_int flags,
4835 struct rwlock **lockp)
4837 struct md_page *pvh;
4841 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4842 /* Pass NULL instead of the lock pointer to disable reclamation. */
4843 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
4844 NULL : lockp)) == NULL)
4847 pa = pde & PG_PS_FRAME;
4848 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
4849 pvh = pa_to_pvh(pa);
4850 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
4856 * Fills a page table page with mappings to consecutive physical pages.
4859 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
4863 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
4864 *pte = htobe64(newpte);
4865 newpte += PAGE_SIZE;
4870 pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va)
4872 struct rwlock *lock;
4876 rv = pmap_demote_l3e_locked(pmap, pde, va, &lock);
4883 pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
4884 struct rwlock **lockp)
4886 pml3_entry_t oldpde;
4887 pt_entry_t *firstpte;
4890 struct spglist free;
4893 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4894 oldpde = be64toh(*l3e);
4895 KASSERT((oldpde & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
4896 ("pmap_demote_l3e: oldpde is missing RPTE_LEAF and/or PG_V %lx",
4898 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
4900 KASSERT((oldpde & PG_W) == 0,
4901 ("pmap_demote_l3e: page table page for a wired mapping"
4905 * Invalidate the 2MB page mapping and return "failure" if the
4906 * mapping was never accessed or the allocation of the new
4907 * page table page fails. If the 2MB page mapping belongs to
4908 * the direct map region of the kernel's address space, then
4909 * the page allocation request specifies the highest possible
4910 * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is
4911 * normal. Page table pages are preallocated for every other
4912 * part of the kernel address space, so the direct map region
4913 * is the only part of the kernel address space that must be
4916 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc_noobj(
4917 (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS ?
4918 VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED)) == NULL) {
4920 sva = trunc_2mpage(va);
4921 pmap_remove_l3e(pmap, l3e, sva, &free, lockp);
4922 pmap_invalidate_l3e_page(pmap, sva, oldpde);
4923 vm_page_free_pages_toq(&free, true);
4924 CTR2(KTR_PMAP, "pmap_demote_l3e: failure for va %#lx"
4925 " in pmap %p", va, pmap);
4928 mpte->pindex = pmap_l3e_pindex(va);
4929 if (va < VM_MAXUSER_ADDRESS)
4930 pmap_resident_count_inc(pmap, 1);
4932 mptepa = VM_PAGE_TO_PHYS(mpte);
4933 firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
4934 KASSERT((oldpde & PG_A) != 0,
4935 ("pmap_demote_l3e: oldpde is missing PG_A"));
4936 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
4937 ("pmap_demote_l3e: oldpde is missing PG_M"));
4940 * If the page table page is new, initialize it.
4942 if (mpte->ref_count == 1) {
4943 mpte->ref_count = NPTEPG;
4944 pmap_fill_ptp(firstpte, oldpde);
4947 KASSERT((be64toh(*firstpte) & PG_FRAME) == (oldpde & PG_FRAME),
4948 ("pmap_demote_l3e: firstpte and newpte map different physical"
4952 * If the mapping has changed attributes, update the page table
4955 if ((be64toh(*firstpte) & PG_PTE_PROMOTE) != (oldpde & PG_PTE_PROMOTE))
4956 pmap_fill_ptp(firstpte, oldpde);
4959 * The spare PV entries must be reserved prior to demoting the
4960 * mapping, that is, prior to changing the PDE. Otherwise, the state
4961 * of the PDE and the PV lists will be inconsistent, which can result
4962 * in reclaim_pv_chunk() attempting to remove a PV entry from the
4963 * wrong PV list and pmap_pv_demote_l3e() failing to find the expected
4964 * PV entry for the 2MB page mapping that is being demoted.
4966 if ((oldpde & PG_MANAGED) != 0)
4967 reserve_pv_entries(pmap, NPTEPG - 1, lockp);
4970 * Demote the mapping. This pmap is locked. The old PDE has
4971 * PG_A set. If the old PDE has PG_RW set, it also has PG_M
4972 * set. Thus, there is no danger of a race with another
4973 * processor changing the setting of PG_A and/or PG_M between
4974 * the read above and the store below.
4976 pde_store(l3e, mptepa);
4977 pmap_invalidate_l3e_page(pmap, trunc_2mpage(va), oldpde);
4979 * Demote the PV entry.
4981 if ((oldpde & PG_MANAGED) != 0)
4982 pmap_pv_demote_l3e(pmap, va, oldpde & PG_PS_FRAME, lockp);
4984 counter_u64_add(pmap_l3e_demotions, 1);
4985 CTR2(KTR_PMAP, "pmap_demote_l3e: success for va %#lx"
4986 " in pmap %p", va, pmap);
4991 * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
4994 pmap_remove_kernel_l3e(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va)
4999 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
5000 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5001 mpte = pmap_remove_pt_page(pmap, va);
5003 panic("pmap_remove_kernel_pde: Missing pt page.");
5005 mptepa = VM_PAGE_TO_PHYS(mpte);
5008 * Initialize the page table page.
5010 pagezero(PHYS_TO_DMAP(mptepa));
5013 * Demote the mapping.
5015 pde_store(l3e, mptepa);
5020 * pmap_remove_l3e: do the things to unmap a superpage in a process
5023 pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
5024 struct spglist *free, struct rwlock **lockp)
5026 struct md_page *pvh;
5027 pml3_entry_t oldpde;
5028 vm_offset_t eva, va;
5031 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5032 KASSERT((sva & L3_PAGE_MASK) == 0,
5033 ("pmap_remove_l3e: sva is not 2mpage aligned"));
5034 oldpde = be64toh(pte_load_clear(pdq));
5036 pmap->pm_stats.wired_count -= (L3_PAGE_SIZE / PAGE_SIZE);
5037 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
5038 if (oldpde & PG_MANAGED) {
5039 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
5040 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
5041 pmap_pvh_free(pvh, pmap, sva);
5042 eva = sva + L3_PAGE_SIZE;
5043 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
5044 va < eva; va += PAGE_SIZE, m++) {
5045 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
5048 vm_page_aflag_set(m, PGA_REFERENCED);
5049 if (TAILQ_EMPTY(&m->md.pv_list) &&
5050 TAILQ_EMPTY(&pvh->pv_list))
5051 vm_page_aflag_clear(m, PGA_WRITEABLE);
5054 if (pmap == kernel_pmap) {
5055 pmap_remove_kernel_l3e(pmap, pdq, sva);
5057 mpte = pmap_remove_pt_page(pmap, sva);
5059 pmap_resident_count_dec(pmap, 1);
5060 KASSERT(mpte->ref_count == NPTEPG,
5061 ("pmap_remove_l3e: pte page wire count error"));
5062 mpte->ref_count = 0;
5063 pmap_add_delayed_free_list(mpte, free, FALSE);
5066 return (pmap_unuse_pt(pmap, sva, be64toh(*pmap_pml2e(pmap, sva)), free));
5070 * pmap_remove_pte: do the things to unmap a page in a process
5073 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
5074 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
5076 struct md_page *pvh;
5080 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5081 oldpte = be64toh(pte_load_clear(ptq));
5082 if (oldpte & RPTE_WIRED)
5083 pmap->pm_stats.wired_count -= 1;
5084 pmap_resident_count_dec(pmap, 1);
5085 if (oldpte & RPTE_MANAGED) {
5086 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
5087 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5090 vm_page_aflag_set(m, PGA_REFERENCED);
5091 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5092 pmap_pvh_free(&m->md, pmap, va);
5093 if (TAILQ_EMPTY(&m->md.pv_list) &&
5094 (m->flags & PG_FICTITIOUS) == 0) {
5095 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5096 if (TAILQ_EMPTY(&pvh->pv_list))
5097 vm_page_aflag_clear(m, PGA_WRITEABLE);
5100 return (pmap_unuse_pt(pmap, va, ptepde, free));
5104 * Remove a single page from a process address space
5107 pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *l3e,
5108 struct spglist *free)
5110 struct rwlock *lock;
5112 bool invalidate_all;
5114 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5115 if ((be64toh(*l3e) & RPTE_VALID) == 0) {
5118 pte = pmap_l3e_to_pte(l3e, va);
5119 if ((be64toh(*pte) & RPTE_VALID) == 0) {
5124 invalidate_all = pmap_remove_pte(pmap, pte, va, be64toh(*l3e), free, &lock);
5127 if (!invalidate_all)
5128 pmap_invalidate_page(pmap, va);
5129 return (invalidate_all);
5133 * Removes the specified range of addresses from the page table page.
5136 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
5137 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp)
5143 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5146 for (pte = pmap_l3e_to_pte(l3e, sva); sva != eva; pte++,
5148 MPASS(pte == pmap_pte(pmap, sva));
5158 if (pmap_remove_pte(pmap, pte, sva, be64toh(*l3e), free, lockp)) {
5165 pmap_invalidate_all(pmap);
5167 pmap_invalidate_range(pmap, va, sva);
5172 mmu_radix_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5174 struct rwlock *lock;
5175 vm_offset_t va_next;
5178 pml3_entry_t ptpaddr, *l3e;
5179 struct spglist free;
5182 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5185 * Perform an unsynchronized read. This is, however, safe.
5187 if (pmap->pm_stats.resident_count == 0)
5193 /* XXX something fishy here */
5194 sva = (sva + PAGE_MASK) & ~PAGE_MASK;
5195 eva = (eva + PAGE_MASK) & ~PAGE_MASK;
5200 * special handling of removing one page. a very
5201 * common operation and easy to short circuit some
5204 if (sva + PAGE_SIZE == eva) {
5205 l3e = pmap_pml3e(pmap, sva);
5206 if (l3e && (be64toh(*l3e) & RPTE_LEAF) == 0) {
5207 anyvalid = pmap_remove_page(pmap, sva, l3e, &free);
5213 for (; sva < eva; sva = va_next) {
5214 if (pmap->pm_stats.resident_count == 0)
5216 l1e = pmap_pml1e(pmap, sva);
5217 if (l1e == NULL || (be64toh(*l1e) & PG_V) == 0) {
5218 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5224 l2e = pmap_l1e_to_l2e(l1e, sva);
5225 if (l2e == NULL || (be64toh(*l2e) & PG_V) == 0) {
5226 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5233 * Calculate index for next page table.
5235 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5239 l3e = pmap_l2e_to_l3e(l2e, sva);
5240 ptpaddr = be64toh(*l3e);
5243 * Weed out invalid mappings.
5249 * Check for large page.
5251 if ((ptpaddr & RPTE_LEAF) != 0) {
5253 * Are we removing the entire large page? If not,
5254 * demote the mapping and fall through.
5256 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5257 pmap_remove_l3e(pmap, l3e, sva, &free, &lock);
5260 } else if (!pmap_demote_l3e_locked(pmap, l3e, sva,
5262 /* The large page mapping was destroyed. */
5265 ptpaddr = be64toh(*l3e);
5269 * Limit our scan to either the end of the va represented
5270 * by the current page table page, or to the end of the
5271 * range being removed.
5276 if (pmap_remove_ptes(pmap, sva, va_next, l3e, &free, &lock))
5283 pmap_invalidate_all(pmap);
5285 vm_page_free_pages_toq(&free, true);
5289 mmu_radix_remove_all(vm_page_t m)
5291 struct md_page *pvh;
5294 struct rwlock *lock;
5295 pt_entry_t *pte, tpte;
5298 struct spglist free;
5299 int pvh_gen, md_gen;
5301 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5302 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5303 ("pmap_remove_all: page %p is not managed", m));
5305 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5306 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5307 pa_to_pvh(VM_PAGE_TO_PHYS(m));
5310 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
5312 if (!PMAP_TRYLOCK(pmap)) {
5313 pvh_gen = pvh->pv_gen;
5317 if (pvh_gen != pvh->pv_gen) {
5324 l3e = pmap_pml3e(pmap, va);
5325 (void)pmap_demote_l3e_locked(pmap, l3e, va, &lock);
5328 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
5330 if (!PMAP_TRYLOCK(pmap)) {
5331 pvh_gen = pvh->pv_gen;
5332 md_gen = m->md.pv_gen;
5336 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5342 pmap_resident_count_dec(pmap, 1);
5343 l3e = pmap_pml3e(pmap, pv->pv_va);
5344 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, ("pmap_remove_all: found"
5345 " a 2mpage in page %p's pv list", m));
5346 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5347 tpte = be64toh(pte_load_clear(pte));
5349 pmap->pm_stats.wired_count--;
5351 vm_page_aflag_set(m, PGA_REFERENCED);
5354 * Update the vm_page_t clean and reference bits.
5356 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5358 pmap_unuse_pt(pmap, pv->pv_va, be64toh(*l3e), &free);
5359 pmap_invalidate_page(pmap, pv->pv_va);
5360 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5362 free_pv_entry(pmap, pv);
5365 vm_page_aflag_clear(m, PGA_WRITEABLE);
5367 vm_page_free_pages_toq(&free, true);
5371 * Destroy all managed, non-wired mappings in the given user-space
5372 * pmap. This pmap cannot be active on any processor besides the
5375 * This function cannot be applied to the kernel pmap. Moreover, it
5376 * is not intended for general use. It is only to be used during
5377 * process termination. Consequently, it can be implemented in ways
5378 * that make it faster than pmap_remove(). First, it can more quickly
5379 * destroy mappings by iterating over the pmap's collection of PV
5380 * entries, rather than searching the page table. Second, it doesn't
5381 * have to test and clear the page table entries atomically, because
5382 * no processor is currently accessing the user address space. In
5383 * particular, a page table entry's dirty bit won't change state once
5384 * this function starts.
5386 * Although this function destroys all of the pmap's managed,
5387 * non-wired mappings, it can delay and batch the invalidation of TLB
5388 * entries without calling pmap_delayed_invl_started() and
5389 * pmap_delayed_invl_finished(). Because the pmap is not active on
5390 * any other processor, none of these TLB entries will ever be used
5391 * before their eventual invalidation. Consequently, there is no need
5392 * for either pmap_remove_all() or pmap_remove_write() to wait for
5393 * that eventual TLB invalidation.
5397 mmu_radix_remove_pages(pmap_t pmap)
5400 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
5401 pml3_entry_t ptel3e;
5402 pt_entry_t *pte, tpte;
5403 struct spglist free;
5404 vm_page_t m, mpte, mt;
5406 struct md_page *pvh;
5407 struct pv_chunk *pc, *npc;
5408 struct rwlock *lock;
5410 uint64_t inuse, bitmask;
5411 int allfree, field, idx;
5415 boolean_t superpage;
5419 * Assert that the given pmap is only active on the current
5420 * CPU. Unfortunately, we cannot block another CPU from
5421 * activating the pmap while this function is executing.
5423 KASSERT(pmap->pm_pid == mfspr(SPR_PID),
5424 ("non-current asid %lu - expected %lu", pmap->pm_pid,
5431 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
5436 for (field = 0; field < _NPCM; field++) {
5437 inuse = ~pc->pc_map[field] & pc_freemask[field];
5438 while (inuse != 0) {
5439 bit = cnttzd(inuse);
5440 bitmask = 1UL << bit;
5441 idx = field * 64 + bit;
5442 pv = &pc->pc_pventry[idx];
5445 pte = pmap_pml2e(pmap, pv->pv_va);
5446 ptel3e = be64toh(*pte);
5447 pte = pmap_l2e_to_l3e(pte, pv->pv_va);
5448 tpte = be64toh(*pte);
5449 if ((tpte & (RPTE_LEAF | PG_V)) == PG_V) {
5452 pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
5454 pte = &pte[pmap_pte_index(pv->pv_va)];
5455 tpte = be64toh(*pte);
5458 * Keep track whether 'tpte' is a
5459 * superpage explicitly instead of
5460 * relying on RPTE_LEAF being set.
5462 * This is because RPTE_LEAF is numerically
5463 * identical to PG_PTE_PAT and thus a
5464 * regular page could be mistaken for
5470 if ((tpte & PG_V) == 0) {
5471 panic("bad pte va %lx pte %lx",
5476 * We cannot remove wired pages from a process' mapping at this time
5484 pa = tpte & PG_PS_FRAME;
5486 pa = tpte & PG_FRAME;
5488 m = PHYS_TO_VM_PAGE(pa);
5489 KASSERT(m->phys_addr == pa,
5490 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
5491 m, (uintmax_t)m->phys_addr,
5494 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
5495 m < &vm_page_array[vm_page_array_size],
5496 ("pmap_remove_pages: bad tpte %#jx",
5502 * Update the vm_page_t clean/reference bits.
5504 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5506 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5512 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
5515 pc->pc_map[field] |= bitmask;
5517 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
5518 pvh = pa_to_pvh(tpte & PG_PS_FRAME);
5519 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
5521 if (TAILQ_EMPTY(&pvh->pv_list)) {
5522 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5523 if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
5524 TAILQ_EMPTY(&mt->md.pv_list))
5525 vm_page_aflag_clear(mt, PGA_WRITEABLE);
5527 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
5529 pmap_resident_count_dec(pmap, 1);
5530 KASSERT(mpte->ref_count == NPTEPG,
5531 ("pmap_remove_pages: pte page wire count error"));
5532 mpte->ref_count = 0;
5533 pmap_add_delayed_free_list(mpte, &free, FALSE);
5536 pmap_resident_count_dec(pmap, 1);
5538 printf("freeing pv (%p, %p)\n",
5541 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5543 if ((m->a.flags & PGA_WRITEABLE) != 0 &&
5544 TAILQ_EMPTY(&m->md.pv_list) &&
5545 (m->flags & PG_FICTITIOUS) == 0) {
5546 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5547 if (TAILQ_EMPTY(&pvh->pv_list))
5548 vm_page_aflag_clear(m, PGA_WRITEABLE);
5551 pmap_unuse_pt(pmap, pv->pv_va, ptel3e, &free);
5557 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
5558 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
5559 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
5561 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5567 pmap_invalidate_all(pmap);
5569 vm_page_free_pages_toq(&free, true);
5573 mmu_radix_remove_write(vm_page_t m)
5575 struct md_page *pvh;
5577 struct rwlock *lock;
5578 pv_entry_t next_pv, pv;
5580 pt_entry_t oldpte, *pte;
5581 int pvh_gen, md_gen;
5583 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5584 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5585 ("pmap_remove_write: page %p is not managed", m));
5586 vm_page_assert_busied(m);
5588 if (!pmap_page_is_write_mapped(m))
5590 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5591 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5592 pa_to_pvh(VM_PAGE_TO_PHYS(m));
5595 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
5597 if (!PMAP_TRYLOCK(pmap)) {
5598 pvh_gen = pvh->pv_gen;
5602 if (pvh_gen != pvh->pv_gen) {
5608 l3e = pmap_pml3e(pmap, pv->pv_va);
5609 if ((be64toh(*l3e) & PG_RW) != 0)
5610 (void)pmap_demote_l3e_locked(pmap, l3e, pv->pv_va, &lock);
5611 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
5612 ("inconsistent pv lock %p %p for page %p",
5613 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
5616 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
5618 if (!PMAP_TRYLOCK(pmap)) {
5619 pvh_gen = pvh->pv_gen;
5620 md_gen = m->md.pv_gen;
5624 if (pvh_gen != pvh->pv_gen ||
5625 md_gen != m->md.pv_gen) {
5631 l3e = pmap_pml3e(pmap, pv->pv_va);
5632 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0,
5633 ("pmap_remove_write: found a 2mpage in page %p's pv list",
5635 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5637 oldpte = be64toh(*pte);
5638 if (oldpte & PG_RW) {
5639 if (!atomic_cmpset_long(pte, htobe64(oldpte),
5640 htobe64((oldpte | RPTE_EAA_R) & ~(PG_RW | PG_M))))
5642 if ((oldpte & PG_M) != 0)
5644 pmap_invalidate_page(pmap, pv->pv_va);
5649 vm_page_aflag_clear(m, PGA_WRITEABLE);
5653 * Clear the wired attribute from the mappings for the specified range of
5654 * addresses in the given pmap. Every valid mapping within that range
5655 * must have the wired attribute set. In contrast, invalid mappings
5656 * cannot have the wired attribute set, so they are ignored.
5658 * The wired attribute of the page table entry is not a hardware
5659 * feature, so there is no need to invalidate any TLB entries.
5660 * Since pmap_demote_l3e() for the wired entry must never fail,
5661 * pmap_delayed_invl_started()/finished() calls around the
5662 * function are not needed.
5665 mmu_radix_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5667 vm_offset_t va_next;
5673 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5675 for (; sva < eva; sva = va_next) {
5676 l1e = pmap_pml1e(pmap, sva);
5677 if ((be64toh(*l1e) & PG_V) == 0) {
5678 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5683 l2e = pmap_l1e_to_l2e(l1e, sva);
5684 if ((be64toh(*l2e) & PG_V) == 0) {
5685 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5690 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5693 l3e = pmap_l2e_to_l3e(l2e, sva);
5694 if ((be64toh(*l3e) & PG_V) == 0)
5696 if ((be64toh(*l3e) & RPTE_LEAF) != 0) {
5697 if ((be64toh(*l3e) & PG_W) == 0)
5698 panic("pmap_unwire: pde %#jx is missing PG_W",
5699 (uintmax_t)(be64toh(*l3e)));
5702 * Are we unwiring the entire large page? If not,
5703 * demote the mapping and fall through.
5705 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5706 atomic_clear_long(l3e, htobe64(PG_W));
5707 pmap->pm_stats.wired_count -= L3_PAGE_SIZE /
5710 } else if (!pmap_demote_l3e(pmap, l3e, sva))
5711 panic("pmap_unwire: demotion failed");
5715 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
5717 MPASS(pte == pmap_pte(pmap, sva));
5718 if ((be64toh(*pte) & PG_V) == 0)
5720 if ((be64toh(*pte) & PG_W) == 0)
5721 panic("pmap_unwire: pte %#jx is missing PG_W",
5722 (uintmax_t)(be64toh(*pte)));
5725 * PG_W must be cleared atomically. Although the pmap
5726 * lock synchronizes access to PG_W, another processor
5727 * could be setting PG_M and/or PG_A concurrently.
5729 atomic_clear_long(pte, htobe64(PG_W));
5730 pmap->pm_stats.wired_count--;
5737 mmu_radix_zero_page(vm_page_t m)
5741 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5742 addr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5747 mmu_radix_zero_page_area(vm_page_t m, int off, int size)
5751 CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
5752 MPASS(off + size <= PAGE_SIZE);
5753 addr = (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5754 memset(addr + off, 0, size);
5758 mmu_radix_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
5765 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
5768 l3ep = pmap_pml3e(pmap, addr);
5769 if (l3ep != NULL && (be64toh(*l3ep) & PG_V)) {
5770 if (be64toh(*l3ep) & RPTE_LEAF) {
5771 pte = be64toh(*l3ep);
5772 /* Compute the physical address of the 4KB page. */
5773 pa = ((be64toh(*l3ep) & PG_PS_FRAME) | (addr & L3_PAGE_MASK)) &
5775 val = MINCORE_PSIND(1);
5777 /* Native endian PTE, do not pass to functions */
5778 pte = be64toh(*pmap_l3e_to_pte(l3ep, addr));
5779 pa = pte & PG_FRAME;
5787 if ((pte & PG_V) != 0) {
5788 val |= MINCORE_INCORE;
5789 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5790 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5791 if ((pte & PG_A) != 0)
5792 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5794 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5795 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
5796 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
5804 mmu_radix_activate(struct thread *td)
5809 CTR2(KTR_PMAP, "%s(%p)", __func__, td);
5811 pmap = vmspace_pmap(td->td_proc->p_vmspace);
5812 curpid = mfspr(SPR_PID);
5813 if (pmap->pm_pid > isa3_base_pid &&
5814 curpid != pmap->pm_pid) {
5815 mmu_radix_pid_set(pmap);
5821 * Increase the starting virtual address of the given mapping if a
5822 * different alignment might result in more superpage mappings.
5825 mmu_radix_align_superpage(vm_object_t object, vm_ooffset_t offset,
5826 vm_offset_t *addr, vm_size_t size)
5829 CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
5831 vm_offset_t superpage_offset;
5833 if (size < L3_PAGE_SIZE)
5835 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
5836 offset += ptoa(object->pg_color);
5837 superpage_offset = offset & L3_PAGE_MASK;
5838 if (size - ((L3_PAGE_SIZE - superpage_offset) & L3_PAGE_MASK) < L3_PAGE_SIZE ||
5839 (*addr & L3_PAGE_MASK) == superpage_offset)
5841 if ((*addr & L3_PAGE_MASK) < superpage_offset)
5842 *addr = (*addr & ~L3_PAGE_MASK) + superpage_offset;
5844 *addr = ((*addr + L3_PAGE_MASK) & ~L3_PAGE_MASK) + superpage_offset;
5848 mmu_radix_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
5850 vm_offset_t va, tmpva, ppa, offset;
5852 ppa = trunc_page(pa);
5853 offset = pa & PAGE_MASK;
5854 size = roundup2(offset + size, PAGE_SIZE);
5855 if (pa < powerpc_ptob(Maxmem))
5856 panic("bad pa: %#lx less than Maxmem %#lx\n",
5857 pa, powerpc_ptob(Maxmem));
5858 va = kva_alloc(size);
5860 printf("%s(%#lx, %lu, %d)\n", __func__, pa, size, attr);
5861 KASSERT(size > 0, ("%s(%#lx, %lu, %d)", __func__, pa, size, attr));
5864 panic("%s: Couldn't alloc kernel virtual memory", __func__);
5866 for (tmpva = va; size > 0;) {
5867 mmu_radix_kenter_attr(tmpva, ppa, attr);
5874 return ((void *)(va + offset));
5878 mmu_radix_mapdev(vm_paddr_t pa, vm_size_t size)
5881 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
5883 return (mmu_radix_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
5887 mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5890 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
5891 m->md.mdpg_cache_attrs = ma;
5894 * If "m" is a normal page, update its direct mapping. This update
5895 * can be relied upon to perform any cache operations that are
5896 * required for data coherence.
5898 if ((m->flags & PG_FICTITIOUS) == 0 &&
5899 mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
5900 PAGE_SIZE, m->md.mdpg_cache_attrs))
5901 panic("memory attribute change on the direct map failed");
5905 mmu_radix_unmapdev(void *p, vm_size_t size)
5907 vm_offset_t offset, va;
5909 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, p, size);
5911 /* If we gave a direct map region in pmap_mapdev, do nothing */
5912 va = (vm_offset_t)p;
5913 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
5916 offset = va & PAGE_MASK;
5917 size = round_page(offset + size);
5918 va = trunc_page(va);
5920 if (pmap_initialized) {
5921 mmu_radix_qremove(va, atop(size));
5927 mmu_radix_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
5932 if (__predict_false(pm == NULL))
5933 pm = &curthread->td_proc->p_vmspace->vm_pmap;
5936 pa = pmap_extract(pm, va);
5937 sync_sz = PAGE_SIZE - (va & PAGE_MASK);
5938 sync_sz = min(sync_sz, sz);
5940 pa += (va & PAGE_MASK);
5941 __syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
5948 static __inline void
5949 pmap_pte_attr(pt_entry_t *pte, uint64_t cache_bits, uint64_t mask)
5951 uint64_t opte, npte;
5954 * The cache mode bits are all in the low 32-bits of the
5955 * PTE, so we can just spin on updating the low 32-bits.
5958 opte = be64toh(*pte);
5959 npte = opte & ~mask;
5961 } while (npte != opte && !atomic_cmpset_long(pte, htobe64(opte), htobe64(npte)));
5965 * Tries to demote a 1GB page mapping.
5968 pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
5970 pml2_entry_t oldpdpe;
5971 pml3_entry_t *firstpde, newpde, *pde;
5975 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5976 oldpdpe = be64toh(*l2e);
5977 KASSERT((oldpdpe & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
5978 ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
5979 pdpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
5981 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
5982 " in pmap %p", va, pmap);
5985 pdpg->pindex = va >> L2_PAGE_SIZE_SHIFT;
5986 pdpgpa = VM_PAGE_TO_PHYS(pdpg);
5987 firstpde = (pml3_entry_t *)PHYS_TO_DMAP(pdpgpa);
5988 KASSERT((oldpdpe & PG_A) != 0,
5989 ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
5990 KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
5991 ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
5995 * Initialize the page directory page.
5997 for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
5998 *pde = htobe64(newpde);
5999 newpde += L3_PAGE_SIZE;
6003 * Demote the mapping.
6005 pde_store(l2e, pdpgpa);
6008 * Flush PWC --- XXX revisit
6010 pmap_invalidate_all(pmap);
6012 counter_u64_add(pmap_l2e_demotions, 1);
6013 CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
6014 " in pmap %p", va, pmap);
6019 mmu_radix_kextract(vm_offset_t va)
6024 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
6025 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
6026 pa = DMAP_TO_PHYS(va);
6028 /* Big-endian PTE on stack */
6029 l3e = *pmap_pml3e(kernel_pmap, va);
6030 if (be64toh(l3e) & RPTE_LEAF) {
6031 pa = (be64toh(l3e) & PG_PS_FRAME) | (va & L3_PAGE_MASK);
6032 pa |= (va & L3_PAGE_MASK);
6035 * Beware of a concurrent promotion that changes the
6036 * PDE at this point! For example, vtopte() must not
6037 * be used to access the PTE because it would use the
6038 * new PDE. It is, however, safe to use the old PDE
6039 * because the page table page is preserved by the
6042 pa = be64toh(*pmap_l3e_to_pte(&l3e, va));
6043 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
6044 pa |= (va & PAGE_MASK);
6051 mmu_radix_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
6054 if (ma != VM_MEMATTR_DEFAULT) {
6055 return pmap_cache_bits(ma);
6059 * Assume the page is cache inhibited and access is guarded unless
6060 * it's in our available memory array.
6062 for (int i = 0; i < pregions_sz; i++) {
6063 if ((pa >= pregions[i].mr_start) &&
6064 (pa < (pregions[i].mr_start + pregions[i].mr_size)))
6065 return (RPTE_ATTR_MEM);
6067 return (RPTE_ATTR_GUARDEDIO);
6071 mmu_radix_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
6073 pt_entry_t *pte, pteval;
6074 uint64_t cache_bits;
6078 pteval = pa | RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
6079 cache_bits = mmu_radix_calc_wimg(pa, ma);
6080 pte_store(pte, pteval | cache_bits);
6084 mmu_radix_kremove(vm_offset_t va)
6088 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
6095 mmu_radix_decode_kernel_ptr(vm_offset_t addr,
6096 int *is_user, vm_offset_t *decoded)
6099 CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
6101 *is_user = (addr < VM_MAXUSER_ADDRESS);
6106 mmu_radix_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
6109 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
6110 return (mem_valid(pa, size));
6114 mmu_radix_scan_init(void)
6117 CTR1(KTR_PMAP, "%s()", __func__);
6122 mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz,
6125 CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
6130 mmu_radix_quick_enter_page(vm_page_t m)
6134 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
6135 paddr = VM_PAGE_TO_PHYS(m);
6136 return (PHYS_TO_DMAP(paddr));
6140 mmu_radix_quick_remove_page(vm_offset_t addr __unused)
6142 /* no work to do here */
6143 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
6147 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
6149 cpu_flush_dcache((void *)sva, eva - sva);
6153 mmu_radix_change_attr(vm_offset_t va, vm_size_t size,
6158 CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, va, size, mode);
6159 PMAP_LOCK(kernel_pmap);
6160 error = pmap_change_attr_locked(va, size, mode, true);
6161 PMAP_UNLOCK(kernel_pmap);
6166 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
6168 vm_offset_t base, offset, tmpva;
6169 vm_paddr_t pa_start, pa_end, pa_end1;
6173 int cache_bits, error;
6176 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
6177 base = trunc_page(va);
6178 offset = va & PAGE_MASK;
6179 size = round_page(offset + size);
6182 * Only supported on kernel virtual addresses, including the direct
6183 * map but excluding the recursive map.
6185 if (base < DMAP_MIN_ADDRESS)
6188 cache_bits = pmap_cache_bits(mode);
6192 * Pages that aren't mapped aren't supported. Also break down 2MB pages
6193 * into 4KB pages if required.
6195 for (tmpva = base; tmpva < base + size; ) {
6196 l2e = pmap_pml2e(kernel_pmap, tmpva);
6197 if (l2e == NULL || *l2e == 0)
6199 if (be64toh(*l2e) & RPTE_LEAF) {
6201 * If the current 1GB page already has the required
6202 * memory type, then we need not demote this page. Just
6203 * increment tmpva to the next 1GB page frame.
6205 if ((be64toh(*l2e) & RPTE_ATTR_MASK) == cache_bits) {
6206 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6211 * If the current offset aligns with a 1GB page frame
6212 * and there is at least 1GB left within the range, then
6213 * we need not break down this page into 2MB pages.
6215 if ((tmpva & L2_PAGE_MASK) == 0 &&
6216 tmpva + L2_PAGE_MASK < base + size) {
6217 tmpva += L2_PAGE_MASK;
6220 if (!pmap_demote_l2e(kernel_pmap, l2e, tmpva))
6223 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6224 KASSERT(l3e != NULL, ("no l3e entry for %#lx in %p\n",
6228 if (be64toh(*l3e) & RPTE_LEAF) {
6230 * If the current 2MB page already has the required
6231 * memory type, then we need not demote this page. Just
6232 * increment tmpva to the next 2MB page frame.
6234 if ((be64toh(*l3e) & RPTE_ATTR_MASK) == cache_bits) {
6235 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6240 * If the current offset aligns with a 2MB page frame
6241 * and there is at least 2MB left within the range, then
6242 * we need not break down this page into 4KB pages.
6244 if ((tmpva & L3_PAGE_MASK) == 0 &&
6245 tmpva + L3_PAGE_MASK < base + size) {
6246 tmpva += L3_PAGE_SIZE;
6249 if (!pmap_demote_l3e(kernel_pmap, l3e, tmpva))
6252 pte = pmap_l3e_to_pte(l3e, tmpva);
6260 * Ok, all the pages exist, so run through them updating their
6261 * cache mode if required.
6263 pa_start = pa_end = 0;
6264 for (tmpva = base; tmpva < base + size; ) {
6265 l2e = pmap_pml2e(kernel_pmap, tmpva);
6266 if (be64toh(*l2e) & RPTE_LEAF) {
6267 if ((be64toh(*l2e) & RPTE_ATTR_MASK) != cache_bits) {
6268 pmap_pte_attr(l2e, cache_bits,
6272 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6273 (*l2e & PG_PS_FRAME) < dmaplimit) {
6274 if (pa_start == pa_end) {
6275 /* Start physical address run. */
6276 pa_start = be64toh(*l2e) & PG_PS_FRAME;
6277 pa_end = pa_start + L2_PAGE_SIZE;
6278 } else if (pa_end == (be64toh(*l2e) & PG_PS_FRAME))
6279 pa_end += L2_PAGE_SIZE;
6281 /* Run ended, update direct map. */
6282 error = pmap_change_attr_locked(
6283 PHYS_TO_DMAP(pa_start),
6284 pa_end - pa_start, mode, flush);
6287 /* Start physical address run. */
6288 pa_start = be64toh(*l2e) & PG_PS_FRAME;
6289 pa_end = pa_start + L2_PAGE_SIZE;
6292 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6295 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6296 if (be64toh(*l3e) & RPTE_LEAF) {
6297 if ((be64toh(*l3e) & RPTE_ATTR_MASK) != cache_bits) {
6298 pmap_pte_attr(l3e, cache_bits,
6302 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6303 (be64toh(*l3e) & PG_PS_FRAME) < dmaplimit) {
6304 if (pa_start == pa_end) {
6305 /* Start physical address run. */
6306 pa_start = be64toh(*l3e) & PG_PS_FRAME;
6307 pa_end = pa_start + L3_PAGE_SIZE;
6308 } else if (pa_end == (be64toh(*l3e) & PG_PS_FRAME))
6309 pa_end += L3_PAGE_SIZE;
6311 /* Run ended, update direct map. */
6312 error = pmap_change_attr_locked(
6313 PHYS_TO_DMAP(pa_start),
6314 pa_end - pa_start, mode, flush);
6317 /* Start physical address run. */
6318 pa_start = be64toh(*l3e) & PG_PS_FRAME;
6319 pa_end = pa_start + L3_PAGE_SIZE;
6322 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6324 pte = pmap_l3e_to_pte(l3e, tmpva);
6325 if ((be64toh(*pte) & RPTE_ATTR_MASK) != cache_bits) {
6326 pmap_pte_attr(pte, cache_bits,
6330 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6331 (be64toh(*pte) & PG_FRAME) < dmaplimit) {
6332 if (pa_start == pa_end) {
6333 /* Start physical address run. */
6334 pa_start = be64toh(*pte) & PG_FRAME;
6335 pa_end = pa_start + PAGE_SIZE;
6336 } else if (pa_end == (be64toh(*pte) & PG_FRAME))
6337 pa_end += PAGE_SIZE;
6339 /* Run ended, update direct map. */
6340 error = pmap_change_attr_locked(
6341 PHYS_TO_DMAP(pa_start),
6342 pa_end - pa_start, mode, flush);
6345 /* Start physical address run. */
6346 pa_start = be64toh(*pte) & PG_FRAME;
6347 pa_end = pa_start + PAGE_SIZE;
6353 if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
6354 pa_end1 = MIN(pa_end, dmaplimit);
6355 if (pa_start != pa_end1)
6356 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
6357 pa_end1 - pa_start, mode, flush);
6361 * Flush CPU caches if required to make sure any data isn't cached that
6362 * shouldn't be, etc.
6365 pmap_invalidate_all(kernel_pmap);
6368 pmap_invalidate_cache_range(base, tmpva);
6374 * Allocate physical memory for the vm_page array and map it into KVA,
6375 * attempting to back the vm_pages with domain-local memory.
6378 mmu_radix_page_array_startup(long pages)
6389 vm_offset_t start, end;
6391 vm_page_array_size = pages;
6393 start = VM_MIN_KERNEL_ADDRESS;
6394 end = start + pages * sizeof(struct vm_page);
6396 pa = vm_phys_early_alloc(0, end - start);
6398 start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT);
6400 /* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */
6401 for (va = start; va < end; va += L3_PAGE_SIZE) {
6402 pfn = first_page + (va - start) / sizeof(struct vm_page);
6403 domain = vm_phys_domain(ptoa(pfn));
6404 l2e = pmap_pml2e(kernel_pmap, va);
6405 if ((be64toh(*l2e) & PG_V) == 0) {
6406 pa = vm_phys_early_alloc(domain, PAGE_SIZE);
6408 pagezero(PHYS_TO_DMAP(pa));
6409 pde_store(l2e, (pml2_entry_t)pa);
6411 pde = pmap_l2e_to_l3e(l2e, va);
6412 if ((be64toh(*pde) & PG_V) != 0)
6413 panic("Unexpected pde %p", pde);
6414 pa = vm_phys_early_alloc(domain, L3_PAGE_SIZE);
6415 for (i = 0; i < NPDEPG; i++)
6416 dump_add_page(pa + i * PAGE_SIZE);
6417 newl3 = (pml3_entry_t)(pa | RPTE_EAA_P | RPTE_EAA_R | RPTE_EAA_W);
6418 pte_store(pde, newl3);
6421 vm_page_array = (vm_page_t)start;
6425 #include <sys/kdb.h>
6426 #include <ddb/ddb.h>
6429 pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va)
6436 l1e = &l1[pmap_pml1e_index(va)];
6437 db_printf("VA %#016lx l1e %#016lx", va, be64toh(*l1e));
6438 if ((be64toh(*l1e) & PG_V) == 0) {
6442 l2e = pmap_l1e_to_l2e(l1e, va);
6443 db_printf(" l2e %#016lx", be64toh(*l2e));
6444 if ((be64toh(*l2e) & PG_V) == 0 || (be64toh(*l2e) & RPTE_LEAF) != 0) {
6448 l3e = pmap_l2e_to_l3e(l2e, va);
6449 db_printf(" l3e %#016lx", be64toh(*l3e));
6450 if ((be64toh(*l3e) & PG_V) == 0 || (be64toh(*l3e) & RPTE_LEAF) != 0) {
6454 pte = pmap_l3e_to_pte(l3e, va);
6455 db_printf(" pte %#016lx\n", be64toh(*pte));
6459 pmap_page_print_mappings(vm_page_t m)
6464 db_printf("page %p(%lx)\n", m, m->phys_addr);
6465 /* need to elide locks if running in ddb */
6466 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
6467 db_printf("pv: %p ", pv);
6468 db_printf("va: %#016lx ", pv->pv_va);
6470 db_printf("pmap %p ", pmap);
6472 db_printf("asid: %lu\n", pmap->pm_pid);
6473 pmap_pte_walk(pmap->pm_pml1, pv->pv_va);
6478 DB_SHOW_COMMAND(pte, pmap_print_pte)
6484 db_printf("show pte addr\n");
6487 va = (vm_offset_t)addr;
6489 if (va >= DMAP_MIN_ADDRESS)
6491 else if (kdb_thread != NULL)
6492 pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
6494 pmap = vmspace_pmap(curthread->td_proc->p_vmspace);
6496 pmap_pte_walk(pmap->pm_pml1, va);