2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018 Matthew Macy
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
37 #include <sys/bitstring.h>
38 #include <sys/queue.h>
39 #include <sys/cpuset.h>
40 #include <sys/endian.h>
41 #include <sys/kerneldump.h>
44 #include <sys/syslog.h>
45 #include <sys/msgbuf.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
50 #include <sys/rwlock.h>
51 #include <sys/sched.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
55 #include <sys/vmmeter.h>
60 #include <dev/ofw/openfirm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_phys.h>
72 #include <vm/vm_reserv.h>
75 #include <machine/_inttypes.h>
76 #include <machine/cpu.h>
77 #include <machine/platform.h>
78 #include <machine/frame.h>
79 #include <machine/md_var.h>
80 #include <machine/psl.h>
81 #include <machine/bat.h>
82 #include <machine/hid.h>
83 #include <machine/pte.h>
84 #include <machine/sr.h>
85 #include <machine/trap.h>
86 #include <machine/mmuvar.h>
89 #include <vm/uma_dbg.h>
92 #define PPC_BITLSHIFT(bit) (sizeof(long)*NBBY - 1 - (bit))
93 #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
94 #define PPC_BITLSHIFT_VAL(val, bit) ((val) << PPC_BITLSHIFT(bit))
98 static void pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va);
101 #define PG_W RPTE_WIRED
102 #define PG_V RPTE_VALID
103 #define PG_MANAGED RPTE_MANAGED
104 #define PG_PROMOTED RPTE_PROMOTED
107 #define PG_X RPTE_EAA_X
108 #define PG_RW RPTE_EAA_W
109 #define PG_PTE_CACHE RPTE_ATTR_MASK
112 #define NLS_MASK ((1UL<<5)-1)
113 #define RPTE_ENTRIES (1UL<<RPTE_SHIFT)
114 #define RPTE_MASK (RPTE_ENTRIES-1)
117 #define NLB_MASK (((1UL<<52)-1) << 8)
120 extern caddr_t crashdumpmap;
122 #define RIC_FLUSH_TLB 0
123 #define RIC_FLUSH_PWC 1
124 #define RIC_FLUSH_ALL 2
126 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */
128 #define PPC_INST_TLBIE 0x7c000264
129 #define PPC_INST_TLBIEL 0x7c000224
130 #define PPC_INST_SLBIA 0x7c0003e4
132 #define ___PPC_RA(a) (((a) & 0x1f) << 16)
133 #define ___PPC_RB(b) (((b) & 0x1f) << 11)
134 #define ___PPC_RS(s) (((s) & 0x1f) << 21)
135 #define ___PPC_RT(t) ___PPC_RS(t)
136 #define ___PPC_R(r) (((r) & 0x1) << 16)
137 #define ___PPC_PRS(prs) (((prs) & 0x1) << 17)
138 #define ___PPC_RIC(ric) (((ric) & 0x3) << 18)
140 #define PPC_SLBIA(IH) __XSTRING(.long PPC_INST_SLBIA | \
142 #define PPC_TLBIE_5(rb,rs,ric,prs,r) \
143 __XSTRING(.long PPC_INST_TLBIE | \
144 ___PPC_RB(rb) | ___PPC_RS(rs) | \
145 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
148 #define PPC_TLBIEL(rb,rs,ric,prs,r) \
149 __XSTRING(.long PPC_INST_TLBIEL | \
150 ___PPC_RB(rb) | ___PPC_RS(rs) | \
151 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
154 #define PPC_INVALIDATE_ERAT PPC_SLBIA(7)
159 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
162 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
163 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
164 #define TLBIEL_INVAL_SET_PID 0x400 /* invalidate a set for the current PID */
165 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
166 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
168 #define TLBIE_ACTUAL_PAGE_MASK 0xe0
169 #define TLBIE_ACTUAL_PAGE_4K 0x00
170 #define TLBIE_ACTUAL_PAGE_64K 0xa0
171 #define TLBIE_ACTUAL_PAGE_2M 0x20
172 #define TLBIE_ACTUAL_PAGE_1G 0x40
174 #define TLBIE_PRS_PARTITION_SCOPE 0x0
175 #define TLBIE_PRS_PROCESS_SCOPE 0x1
177 #define TLBIE_RIC_INVALIDATE_TLB 0x0 /* Invalidate just TLB */
178 #define TLBIE_RIC_INVALIDATE_PWC 0x1 /* Invalidate just PWC */
179 #define TLBIE_RIC_INVALIDATE_ALL 0x2 /* Invalidate TLB, PWC,
180 * cached {proc, part}tab entries
182 #define TLBIE_RIC_INVALIDATE_SEQ 0x3 /* HPT - only:
183 * Invalidate a range of translations
187 radix_tlbie(uint8_t ric, uint8_t prs, uint16_t is, uint32_t pid, uint32_t lpid,
188 vm_offset_t va, uint16_t ap)
192 MPASS((va & PAGE_MASK) == 0);
194 rs = ((uint64_t)pid << 32) | lpid;
196 __asm __volatile(PPC_TLBIE_5(%0, %1, %2, %3, 1) : :
197 "r" (rb), "r" (rs), "i" (ric), "i" (prs));
201 radix_tlbie_invlpg_user_4k(uint32_t pid, vm_offset_t va)
204 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
205 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_4K);
209 radix_tlbie_invlpg_user_2m(uint32_t pid, vm_offset_t va)
212 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
213 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_2M);
217 radix_tlbie_invlpwc_user(uint32_t pid)
220 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
221 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
225 radix_tlbie_flush_user(uint32_t pid)
228 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
229 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
233 radix_tlbie_invlpg_kernel_4k(vm_offset_t va)
236 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
237 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_4K);
241 radix_tlbie_invlpg_kernel_2m(vm_offset_t va)
244 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
245 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_2M);
248 /* 1GB pages aren't currently supported. */
249 static __inline __unused void
250 radix_tlbie_invlpg_kernel_1g(vm_offset_t va)
253 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
254 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_1G);
258 radix_tlbie_invlpwc_kernel(void)
261 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
262 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
266 radix_tlbie_flush_kernel(void)
269 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
270 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
273 static __inline vm_pindex_t
274 pmap_l3e_pindex(vm_offset_t va)
276 return ((va & PG_FRAME) >> L3_PAGE_SIZE_SHIFT);
279 static __inline vm_pindex_t
280 pmap_pml3e_index(vm_offset_t va)
283 return ((va >> L3_PAGE_SIZE_SHIFT) & RPTE_MASK);
286 static __inline vm_pindex_t
287 pmap_pml2e_index(vm_offset_t va)
289 return ((va >> L2_PAGE_SIZE_SHIFT) & RPTE_MASK);
292 static __inline vm_pindex_t
293 pmap_pml1e_index(vm_offset_t va)
295 return ((va & PG_FRAME) >> L1_PAGE_SIZE_SHIFT);
298 /* Return various clipped indexes for a given VA */
299 static __inline vm_pindex_t
300 pmap_pte_index(vm_offset_t va)
303 return ((va >> PAGE_SHIFT) & RPTE_MASK);
306 /* Return a pointer to the PT slot that corresponds to a VA */
307 static __inline pt_entry_t *
308 pmap_l3e_to_pte(pt_entry_t *l3e, vm_offset_t va)
313 ptepa = (*l3e & NLB_MASK);
314 pte = (pt_entry_t *)PHYS_TO_DMAP(ptepa);
315 return (&pte[pmap_pte_index(va)]);
318 /* Return a pointer to the PD slot that corresponds to a VA */
319 static __inline pt_entry_t *
320 pmap_l2e_to_l3e(pt_entry_t *l2e, vm_offset_t va)
325 l3pa = (*l2e & NLB_MASK);
326 l3e = (pml3_entry_t *)PHYS_TO_DMAP(l3pa);
327 return (&l3e[pmap_pml3e_index(va)]);
330 /* Return a pointer to the PD slot that corresponds to a VA */
331 static __inline pt_entry_t *
332 pmap_l1e_to_l2e(pt_entry_t *l1e, vm_offset_t va)
337 l2pa = (*l1e & NLB_MASK);
339 l2e = (pml2_entry_t *)PHYS_TO_DMAP(l2pa);
340 return (&l2e[pmap_pml2e_index(va)]);
343 static __inline pml1_entry_t *
344 pmap_pml1e(pmap_t pmap, vm_offset_t va)
347 return (&pmap->pm_pml1[pmap_pml1e_index(va)]);
351 pmap_pml2e(pmap_t pmap, vm_offset_t va)
355 l1e = pmap_pml1e(pmap, va);
356 if (l1e == NULL || (*l1e & RPTE_VALID) == 0)
358 return (pmap_l1e_to_l2e(l1e, va));
361 static __inline pt_entry_t *
362 pmap_pml3e(pmap_t pmap, vm_offset_t va)
366 l2e = pmap_pml2e(pmap, va);
367 if (l2e == NULL || (*l2e & RPTE_VALID) == 0)
369 return (pmap_l2e_to_l3e(l2e, va));
372 static __inline pt_entry_t *
373 pmap_pte(pmap_t pmap, vm_offset_t va)
377 l3e = pmap_pml3e(pmap, va);
378 if (l3e == NULL || (*l3e & RPTE_VALID) == 0)
380 return (pmap_l3e_to_pte(l3e, va));
384 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
385 "Number of kernel page table pages allocated on bootup");
387 vm_paddr_t dmaplimit;
389 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
391 static int pg_ps_enabled = 1;
392 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
393 &pg_ps_enabled, 0, "Are large page mappings enabled?");
395 #define VERBOSE_PMAP 0
396 #define VERBOSE_PROTECT 0
397 static int pmap_logging;
398 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_logging, CTLFLAG_RWTUN,
399 &pmap_logging, 0, "verbose debug logging");
402 static u_int64_t KPTphys; /* phys addr of kernel level 1 */
404 //static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */
406 static vm_offset_t qframe = 0;
407 static struct mtx qframe_mtx;
409 void mmu_radix_activate(struct thread *);
410 void mmu_radix_advise(pmap_t, vm_offset_t, vm_offset_t, int);
411 void mmu_radix_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
413 void mmu_radix_clear_modify(vm_page_t);
414 void mmu_radix_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
415 int mmu_radix_decode_kernel_ptr(vm_offset_t, int *, vm_offset_t *);
416 int mmu_radix_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t);
417 void mmu_radix_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
419 void mmu_radix_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
420 vm_paddr_t mmu_radix_extract(pmap_t pmap, vm_offset_t va);
421 vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
422 void mmu_radix_kenter(vm_offset_t, vm_paddr_t);
423 vm_paddr_t mmu_radix_kextract(vm_offset_t);
424 void mmu_radix_kremove(vm_offset_t);
425 boolean_t mmu_radix_is_modified(vm_page_t);
426 boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t);
427 boolean_t mmu_radix_is_referenced(vm_page_t);
428 void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t,
429 vm_pindex_t, vm_size_t);
430 boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t);
431 void mmu_radix_page_init(vm_page_t);
432 boolean_t mmu_radix_page_is_mapped(vm_page_t m);
433 void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t);
434 int mmu_radix_page_wired_mappings(vm_page_t);
435 int mmu_radix_pinit(pmap_t);
436 void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
437 bool mmu_radix_ps_enabled(pmap_t);
438 void mmu_radix_qenter(vm_offset_t, vm_page_t *, int);
439 void mmu_radix_qremove(vm_offset_t, int);
440 vm_offset_t mmu_radix_quick_enter_page(vm_page_t);
441 void mmu_radix_quick_remove_page(vm_offset_t);
442 boolean_t mmu_radix_ts_referenced(vm_page_t);
443 void mmu_radix_release(pmap_t);
444 void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t);
445 void mmu_radix_remove_all(vm_page_t);
446 void mmu_radix_remove_pages(pmap_t);
447 void mmu_radix_remove_write(vm_page_t);
448 void mmu_radix_unwire(pmap_t, vm_offset_t, vm_offset_t);
449 void mmu_radix_zero_page(vm_page_t);
450 void mmu_radix_zero_page_area(vm_page_t, int, int);
451 int mmu_radix_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
452 void mmu_radix_page_array_startup(long pages);
454 #include "mmu_oea64.h"
457 * Kernel MMU interface
460 static void mmu_radix_bootstrap(vm_offset_t, vm_offset_t);
462 static void mmu_radix_copy_page(vm_page_t, vm_page_t);
463 static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
464 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
465 static void mmu_radix_growkernel(vm_offset_t);
466 static void mmu_radix_init(void);
467 static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
468 static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
469 static void mmu_radix_pinit0(pmap_t);
471 static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t);
472 static void *mmu_radix_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
473 static void mmu_radix_unmapdev(vm_offset_t, vm_size_t);
474 static void mmu_radix_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma);
475 static boolean_t mmu_radix_dev_direct_mapped(vm_paddr_t, vm_size_t);
476 static void mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
477 static void mmu_radix_scan_init(void);
478 static void mmu_radix_cpu_bootstrap(int ap);
479 static void mmu_radix_tlbie_all(void);
481 static struct pmap_funcs mmu_radix_methods = {
482 .bootstrap = mmu_radix_bootstrap,
483 .copy_page = mmu_radix_copy_page,
484 .copy_pages = mmu_radix_copy_pages,
485 .cpu_bootstrap = mmu_radix_cpu_bootstrap,
486 .growkernel = mmu_radix_growkernel,
487 .init = mmu_radix_init,
488 .map = mmu_radix_map,
489 .mincore = mmu_radix_mincore,
490 .pinit = mmu_radix_pinit,
491 .pinit0 = mmu_radix_pinit0,
493 .mapdev = mmu_radix_mapdev,
494 .mapdev_attr = mmu_radix_mapdev_attr,
495 .unmapdev = mmu_radix_unmapdev,
496 .kenter_attr = mmu_radix_kenter_attr,
497 .dev_direct_mapped = mmu_radix_dev_direct_mapped,
498 .dumpsys_pa_init = mmu_radix_scan_init,
499 .dumpsys_map_chunk = mmu_radix_dumpsys_map,
500 .page_is_mapped = mmu_radix_page_is_mapped,
501 .ps_enabled = mmu_radix_ps_enabled,
502 .object_init_pt = mmu_radix_object_init_pt,
503 .protect = mmu_radix_protect,
504 /* pmap dispatcher interface */
505 .clear_modify = mmu_radix_clear_modify,
506 .copy = mmu_radix_copy,
507 .enter = mmu_radix_enter,
508 .enter_object = mmu_radix_enter_object,
509 .enter_quick = mmu_radix_enter_quick,
510 .extract = mmu_radix_extract,
511 .extract_and_hold = mmu_radix_extract_and_hold,
512 .is_modified = mmu_radix_is_modified,
513 .is_prefaultable = mmu_radix_is_prefaultable,
514 .is_referenced = mmu_radix_is_referenced,
515 .ts_referenced = mmu_radix_ts_referenced,
516 .page_exists_quick = mmu_radix_page_exists_quick,
517 .page_init = mmu_radix_page_init,
518 .page_wired_mappings = mmu_radix_page_wired_mappings,
519 .qenter = mmu_radix_qenter,
520 .qremove = mmu_radix_qremove,
521 .release = mmu_radix_release,
522 .remove = mmu_radix_remove,
523 .remove_all = mmu_radix_remove_all,
524 .remove_write = mmu_radix_remove_write,
525 .unwire = mmu_radix_unwire,
526 .zero_page = mmu_radix_zero_page,
527 .zero_page_area = mmu_radix_zero_page_area,
528 .activate = mmu_radix_activate,
529 .quick_enter_page = mmu_radix_quick_enter_page,
530 .quick_remove_page = mmu_radix_quick_remove_page,
531 .page_set_memattr = mmu_radix_page_set_memattr,
532 .page_array_startup = mmu_radix_page_array_startup,
534 /* Internal interfaces */
535 .kenter = mmu_radix_kenter,
536 .kextract = mmu_radix_kextract,
537 .kremove = mmu_radix_kremove,
538 .change_attr = mmu_radix_change_attr,
539 .decode_kernel_ptr = mmu_radix_decode_kernel_ptr,
541 .tlbie_all = mmu_radix_tlbie_all,
544 MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods);
546 static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
547 struct rwlock **lockp);
548 static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va);
549 static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *);
550 static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
551 struct spglist *free, struct rwlock **lockp);
552 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
553 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
554 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
555 static bool pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *pde,
556 struct spglist *free);
557 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
558 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp);
560 static bool pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e,
561 u_int flags, struct rwlock **lockp);
562 #if VM_NRESERVLEVEL > 0
563 static void pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
564 struct rwlock **lockp);
566 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
567 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
568 static vm_page_t mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
569 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate);
571 static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
572 vm_prot_t prot, struct rwlock **lockp);
573 static int pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde,
574 u_int flags, vm_page_t m, struct rwlock **lockp);
576 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
577 static void free_pv_chunk(struct pv_chunk *pc);
578 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp);
579 static vm_page_t pmap_allocl3e(pmap_t pmap, vm_offset_t va,
580 struct rwlock **lockp);
581 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
582 struct rwlock **lockp);
583 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
584 struct spglist *free);
585 static boolean_t pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free);
587 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t start);
588 static void pmap_invalidate_all(pmap_t pmap);
589 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush);
592 * Internal flags for pmap_enter()'s helper functions.
594 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
595 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
597 #define UNIMPLEMENTED() panic("%s not implemented", __func__)
598 #define UNTESTED() panic("%s not yet tested", __func__)
602 /* Number of supported PID bits */
603 static unsigned int isa3_pid_bits;
605 /* PID to start allocating from */
606 static unsigned int isa3_base_pid;
608 #define PROCTAB_SIZE_SHIFT (isa3_pid_bits + 4)
609 #define PROCTAB_ENTRIES (1ul << isa3_pid_bits)
613 * Map of physical memory regions.
615 static struct mem_region *regions, *pregions;
616 static struct numa_mem_region *numa_pregions;
617 static u_int phys_avail_count;
618 static int regions_sz, pregions_sz, numa_pregions_sz;
619 static struct pate *isa3_parttab;
620 static struct prte *isa3_proctab;
621 static vmem_t *asid_arena;
623 extern void bs_remap_earlyboot(void);
625 #define RADIX_PGD_SIZE_SHIFT 16
626 #define RADIX_PGD_SIZE (1UL << RADIX_PGD_SIZE_SHIFT)
628 #define RADIX_PGD_INDEX_SHIFT (RADIX_PGD_SIZE_SHIFT-3)
629 #define NL2EPG (PAGE_SIZE/sizeof(pml2_entry_t))
630 #define NL3EPG (PAGE_SIZE/sizeof(pml3_entry_t))
632 #define NUPML1E (RADIX_PGD_SIZE/sizeof(uint64_t)) /* number of userland PML1 pages */
633 #define NUPDPE (NUPML1E * NL2EPG)/* number of userland PDP pages */
634 #define NUPDE (NUPDPE * NL3EPG) /* number of userland PD entries */
636 /* POWER9 only permits a 64k partition table size. */
637 #define PARTTAB_SIZE_SHIFT 16
638 #define PARTTAB_SIZE (1UL << PARTTAB_SIZE_SHIFT)
640 #define PARTTAB_HR (1UL << 63) /* host uses radix */
641 #define PARTTAB_GR (1UL << 63) /* guest uses radix must match host */
643 /* TLB flush actions. Used as argument to tlbiel_all() */
645 TLB_INVAL_SCOPE_LPID = 0, /* invalidate TLBs for current LPID */
646 TLB_INVAL_SCOPE_GLOBAL = 1, /* invalidate all TLBs */
649 #define NPV_LIST_LOCKS MAXCPU
650 static int pmap_initialized;
651 static vm_paddr_t proctab0pa;
652 static vm_paddr_t parttab_phys;
653 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
656 * Data for the pv entry allocation mechanism.
657 * Updates to pv_invl_gen are protected by the pv_list_locks[]
658 * elements, but reads are not.
660 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
661 static struct mtx __exclusive_cache_line pv_chunks_mutex;
662 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
663 static struct md_page *pv_table;
664 static struct md_page pv_dummy;
667 #define PV_STAT(x) do { x ; } while (0)
669 #define PV_STAT(x) do { } while (0)
672 #define pa_radix_index(pa) ((pa) >> L3_PAGE_SIZE_SHIFT)
673 #define pa_to_pvh(pa) (&pv_table[pa_radix_index(pa)])
675 #define PHYS_TO_PV_LIST_LOCK(pa) \
676 (&pv_list_locks[pa_radix_index(pa) % NPV_LIST_LOCKS])
678 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
679 struct rwlock **_lockp = (lockp); \
680 struct rwlock *_new_lock; \
682 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
683 if (_new_lock != *_lockp) { \
684 if (*_lockp != NULL) \
685 rw_wunlock(*_lockp); \
686 *_lockp = _new_lock; \
691 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
692 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
694 #define RELEASE_PV_LIST_LOCK(lockp) do { \
695 struct rwlock **_lockp = (lockp); \
697 if (*_lockp != NULL) { \
698 rw_wunlock(*_lockp); \
703 #define VM_PAGE_TO_PV_LIST_LOCK(m) \
704 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
707 * We support 52 bits, hence:
708 * bits 52 - 31 = 21, 0b10101
709 * RTS encoding details
710 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
711 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
713 #define RTS_SIZE ((0x2UL << 61) | (0x5UL << 5))
716 static int powernv_enabled = 1;
719 tlbiel_radix_set_isa300(uint32_t set, uint32_t is,
720 uint32_t pid, uint32_t ric, uint32_t prs)
725 rb = PPC_BITLSHIFT_VAL(set, 51) | PPC_BITLSHIFT_VAL(is, 53);
726 rs = PPC_BITLSHIFT_VAL((uint64_t)pid, 31);
728 __asm __volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
729 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
734 tlbiel_flush_isa3(uint32_t num_sets, uint32_t is)
738 __asm __volatile("ptesync": : :"memory");
741 * Flush the first set of the TLB, and the entire Page Walk Cache
742 * and partition table entries. Then flush the remaining sets of the
745 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
746 for (set = 1; set < num_sets; set++)
747 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
749 /* Do the same for process scoped entries. */
750 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
751 for (set = 1; set < num_sets; set++)
752 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
754 __asm __volatile("ptesync": : :"memory");
758 mmu_radix_tlbiel_flush(int scope)
762 MPASS(scope == TLB_INVAL_SCOPE_LPID ||
763 scope == TLB_INVAL_SCOPE_GLOBAL);
766 tlbiel_flush_isa3(POWER9_TLB_SETS_RADIX, is);
767 __asm __volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
771 mmu_radix_tlbie_all()
773 /* TODO: LPID invalidate */
774 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
778 mmu_radix_init_amor(void)
781 * In HV mode, we init AMOR (Authority Mask Override Register) so that
782 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
783 * Register), enable key 0 and set it to 1.
785 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
787 mtspr(SPR_AMOR, (3ul << 62));
791 mmu_radix_init_iamr(void)
794 * Radix always uses key0 of the IAMR to determine if an access is
795 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
798 mtspr(SPR_IAMR, (1ul << 62));
802 mmu_radix_pid_set(pmap_t pmap)
805 mtspr(SPR_PID, pmap->pm_pid);
809 /* Quick sort callout for comparing physical addresses. */
811 pa_cmp(const void *a, const void *b)
813 const vm_paddr_t *pa = a, *pb = b;
823 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte)
824 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0)
825 #define pte_store(ptep, pte) do { \
826 MPASS((pte) & (RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_X)); \
827 *(u_long *)(ptep) = (u_long)((pte) | PG_V | RPTE_LEAF); \
830 * NB: should only be used for adding directories - not for direct mappings
832 #define pde_store(ptep, pa) do { \
833 *(u_long *)(ptep) = (u_long)(pa|RPTE_VALID|RPTE_SHIFT); \
836 #define pte_clear(ptep) do { \
837 *(u_long *)(ptep) = (u_long)(0); \
840 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */
843 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
844 * (PTE) page mappings have identical settings for the following fields:
846 #define PG_PTE_PROMOTE (PG_X | PG_MANAGED | PG_W | PG_PTE_CACHE | \
847 PG_M | PG_A | RPTE_EAA_MASK | PG_V)
851 pmap_resident_count_inc(pmap_t pmap, int count)
854 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
855 pmap->pm_stats.resident_count += count;
859 pmap_resident_count_dec(pmap_t pmap, int count)
862 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
863 KASSERT(pmap->pm_stats.resident_count >= count,
864 ("pmap %p resident count underflow %ld %d", pmap,
865 pmap->pm_stats.resident_count, count));
866 pmap->pm_stats.resident_count -= count;
870 pagezero(vm_offset_t va)
874 bzero((void *)va, PAGE_SIZE);
882 ret = moea64_bootstrap_alloc(n * PAGE_SIZE, PAGE_SIZE);
883 for (int i = 0; i < n; i++)
884 pagezero(PHYS_TO_DMAP(ret + i * PAGE_SIZE));
889 kvtopte(vm_offset_t va)
893 l3e = pmap_pml3e(kernel_pmap, va);
894 if ((*l3e & RPTE_VALID) == 0)
896 return (pmap_l3e_to_pte(l3e, va));
900 mmu_radix_kenter(vm_offset_t va, vm_paddr_t pa)
906 *pte = pa | RPTE_VALID | RPTE_LEAF | RPTE_EAA_R | RPTE_EAA_W | \
907 RPTE_EAA_P | PG_M | PG_A;
911 mmu_radix_ps_enabled(pmap_t pmap)
913 return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
917 pmap_nofault_pte(pmap_t pmap, vm_offset_t va, int *is_l3e)
923 l3e = pmap_pml3e(pmap, va);
924 if (l3e == NULL || (*l3e & PG_V) == 0)
927 if (*l3e & RPTE_LEAF) {
933 pte = pmap_l3e_to_pte(l3e, va);
934 if (pte == NULL || (*pte & PG_V) == 0)
940 pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags)
943 pt_entry_t startpte, origpte, newpte;
949 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL)
950 return (KERN_INVALID_ADDRESS);
951 origpte = newpte = *pte;
954 if (((flags & VM_PROT_WRITE) && (startpte & PG_M)) ||
955 ((flags & VM_PROT_READ) && (startpte & PG_A))) {
956 pmap_invalidate_all(pmap);
958 if (VERBOSE_PMAP || pmap_logging)
959 printf("%s(%p, %#lx, %#x) (%#lx) -- invalidate all\n",
960 __func__, pmap, va, flags, origpte);
962 return (KERN_FAILURE);
966 if (VERBOSE_PMAP || pmap_logging)
967 printf("%s(%p, %#lx, %#x) (%#lx)\n", __func__, pmap, va,
971 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL ||
974 return (KERN_FAILURE);
976 m = PHYS_TO_VM_PAGE(newpte & PG_FRAME);
980 if ((newpte & (RPTE_EAA_R|RPTE_EAA_X)) == 0)
983 vm_page_aflag_set(m, PGA_REFERENCED);
986 if ((newpte & RPTE_EAA_W) == 0)
993 case VM_PROT_EXECUTE:
994 if ((newpte & RPTE_EAA_X) == 0)
997 vm_page_aflag_set(m, PGA_REFERENCED);
1001 if (!atomic_cmpset_long(pte, origpte, newpte))
1005 if (startpte == newpte)
1006 return (KERN_FAILURE);
1010 return (KERN_PROTECTION_FAILURE);
1014 * Returns TRUE if the given page is mapped individually or as part of
1015 * a 2mpage. Otherwise, returns FALSE.
1018 mmu_radix_page_is_mapped(vm_page_t m)
1020 struct rwlock *lock;
1023 if ((m->oflags & VPO_UNMANAGED) != 0)
1025 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
1027 rv = !TAILQ_EMPTY(&m->md.pv_list) ||
1028 ((m->flags & PG_FICTITIOUS) == 0 &&
1029 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
1035 * Determine the appropriate bits to set in a PTE or PDE for a specified
1039 pmap_cache_bits(vm_memattr_t ma)
1041 if (ma != VM_MEMATTR_DEFAULT) {
1043 case VM_MEMATTR_UNCACHEABLE:
1044 return (RPTE_ATTR_GUARDEDIO);
1045 case VM_MEMATTR_CACHEABLE:
1046 return (RPTE_ATTR_MEM);
1047 case VM_MEMATTR_WRITE_BACK:
1048 case VM_MEMATTR_PREFETCHABLE:
1049 case VM_MEMATTR_WRITE_COMBINING:
1050 return (RPTE_ATTR_UNGUARDEDIO);
1057 pmap_invalidate_page(pmap_t pmap, vm_offset_t start)
1060 if (pmap == kernel_pmap)
1061 radix_tlbie_invlpg_kernel_4k(start);
1063 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1068 pmap_invalidate_page_2m(pmap_t pmap, vm_offset_t start)
1071 if (pmap == kernel_pmap)
1072 radix_tlbie_invlpg_kernel_2m(start);
1074 radix_tlbie_invlpg_user_2m(pmap->pm_pid, start);
1079 pmap_invalidate_pwc(pmap_t pmap)
1082 if (pmap == kernel_pmap)
1083 radix_tlbie_invlpwc_kernel();
1085 radix_tlbie_invlpwc_user(pmap->pm_pid);
1090 pmap_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end)
1092 if (((start - end) >> PAGE_SHIFT) > 8) {
1093 pmap_invalidate_all(pmap);
1097 if (pmap == kernel_pmap) {
1098 while (start < end) {
1099 radix_tlbie_invlpg_kernel_4k(start);
1103 while (start < end) {
1104 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1112 pmap_invalidate_all(pmap_t pmap)
1115 if (pmap == kernel_pmap)
1116 radix_tlbie_flush_kernel();
1118 radix_tlbie_flush_user(pmap->pm_pid);
1123 pmap_invalidate_l3e_page(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e)
1127 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
1128 * by a promotion that did not invalidate the 512 4KB page mappings
1129 * that might exist in the TLB. Consequently, at this point, the TLB
1130 * may hold both 4KB and 2MB page mappings for the address range [va,
1131 * va + L3_PAGE_SIZE). Therefore, the entire range must be invalidated here.
1132 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
1133 * 4KB page mappings for the address range [va, va + L3_PAGE_SIZE), and so a
1134 * single INVLPG suffices to invalidate the 2MB page mapping from the
1138 if ((l3e & PG_PROMOTED) != 0)
1139 pmap_invalidate_range(pmap, va, va + L3_PAGE_SIZE - 1);
1141 pmap_invalidate_page_2m(pmap, va);
1143 pmap_invalidate_pwc(pmap);
1146 static __inline struct pv_chunk *
1147 pv_to_chunk(pv_entry_t pv)
1150 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1153 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1155 #define PC_FREE0 0xfffffffffffffffful
1156 #define PC_FREE1 0x3ffffffffffffffful
1158 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1 };
1161 * Ensure that the number of spare PV entries in the specified pmap meets or
1162 * exceeds the given count, "needed".
1164 * The given PV list lock may be released.
1167 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
1169 struct pch new_tail;
1170 struct pv_chunk *pc;
1175 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1176 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
1179 * Newly allocated PV chunks must be stored in a private list until
1180 * the required number of PV chunks have been allocated. Otherwise,
1181 * reclaim_pv_chunk() could recycle one of these chunks. In
1182 * contrast, these chunks must be added to the pmap upon allocation.
1184 TAILQ_INIT(&new_tail);
1187 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
1188 // if ((cpu_feature2 & CPUID2_POPCNT) == 0)
1189 bit_count((bitstr_t *)pc->pc_map, 0,
1190 sizeof(pc->pc_map) * NBBY, &free);
1192 free = popcnt_pc_map_pq(pc->pc_map);
1197 if (avail >= needed)
1200 for (reclaimed = false; avail < needed; avail += _NPCPV) {
1201 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
1204 m = reclaim_pv_chunk(pmap, lockp);
1209 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1210 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1211 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1213 pc->pc_map[0] = PC_FREE0;
1214 pc->pc_map[1] = PC_FREE1;
1215 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1216 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
1217 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
1220 * The reclaim might have freed a chunk from the current pmap.
1221 * If that chunk contained available entries, we need to
1222 * re-count the number of available entries.
1227 if (!TAILQ_EMPTY(&new_tail)) {
1228 mtx_lock(&pv_chunks_mutex);
1229 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
1230 mtx_unlock(&pv_chunks_mutex);
1235 * First find and then remove the pv entry for the specified pmap and virtual
1236 * address from the specified pv list. Returns the pv entry if found and NULL
1237 * otherwise. This operation can be performed on pv lists for either 4KB or
1238 * 2MB page mappings.
1240 static __inline pv_entry_t
1241 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1245 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
1247 if (PV_PMAP(pv) == NULL) {
1248 printf("corrupted pv_chunk/pv %p\n", pv);
1249 printf("pv_chunk: %64D\n", pv_to_chunk(pv), ":");
1251 MPASS(PV_PMAP(pv) != NULL);
1252 MPASS(pv->pv_va != 0);
1254 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1255 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
1264 * After demotion from a 2MB page mapping to 512 4KB page mappings,
1265 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
1266 * entries for each of the 4KB page mappings.
1269 pmap_pv_demote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1270 struct rwlock **lockp)
1272 struct md_page *pvh;
1273 struct pv_chunk *pc;
1275 vm_offset_t va_last;
1279 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1280 KASSERT((pa & L3_PAGE_MASK) == 0,
1281 ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
1282 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1285 * Transfer the 2mpage's pv entry for this mapping to the first
1286 * page's pv list. Once this transfer begins, the pv list lock
1287 * must not be released until the last pv entry is reinstantiated.
1289 pvh = pa_to_pvh(pa);
1290 va = trunc_2mpage(va);
1291 pv = pmap_pvh_remove(pvh, pmap, va);
1292 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
1293 m = PHYS_TO_VM_PAGE(pa);
1294 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1297 /* Instantiate the remaining NPTEPG - 1 pv entries. */
1298 PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
1299 va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1301 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1302 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0
1303 , ("pmap_pv_demote_pde: missing spare"));
1304 for (field = 0; field < _NPCM; field++) {
1305 while (pc->pc_map[field]) {
1306 bit = cnttzd(pc->pc_map[field]);
1307 pc->pc_map[field] &= ~(1ul << bit);
1308 pv = &pc->pc_pventry[field * 64 + bit];
1312 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1313 ("pmap_pv_demote_pde: page %p is not managed", m));
1314 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1321 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1322 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1325 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1326 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1327 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1329 PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
1330 PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
1334 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap)
1339 pmap_invalidate_all(pmap);
1340 if (pmap != locked_pmap)
1345 * We are in a serious low memory condition. Resort to
1346 * drastic measures to free some pages so we can allocate
1347 * another pv entry chunk.
1349 * Returns NULL if PV entries were reclaimed from the specified pmap.
1351 * We do not, however, unmap 2mpages because subsequent accesses will
1352 * allocate per-page pv entries until repromotion occurs, thereby
1353 * exacerbating the shortage of free pv entries.
1355 static int active_reclaims = 0;
1357 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1359 struct pv_chunk *pc, *pc_marker, *pc_marker_end;
1360 struct pv_chunk_header pc_marker_b, pc_marker_end_b;
1361 struct md_page *pvh;
1363 pmap_t next_pmap, pmap;
1364 pt_entry_t *pte, tpte;
1368 struct spglist free;
1370 int bit, field, freed;
1372 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1373 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
1377 bzero(&pc_marker_b, sizeof(pc_marker_b));
1378 bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
1379 pc_marker = (struct pv_chunk *)&pc_marker_b;
1380 pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
1382 mtx_lock(&pv_chunks_mutex);
1384 TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
1385 TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
1386 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
1387 SLIST_EMPTY(&free)) {
1388 next_pmap = pc->pc_pmap;
1389 if (next_pmap == NULL) {
1391 * The next chunk is a marker. However, it is
1392 * not our marker, so active_reclaims must be
1393 * > 1. Consequently, the next_chunk code
1394 * will not rotate the pv_chunks list.
1398 mtx_unlock(&pv_chunks_mutex);
1401 * A pv_chunk can only be removed from the pc_lru list
1402 * when both pc_chunks_mutex is owned and the
1403 * corresponding pmap is locked.
1405 if (pmap != next_pmap) {
1406 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1408 /* Avoid deadlock and lock recursion. */
1409 if (pmap > locked_pmap) {
1410 RELEASE_PV_LIST_LOCK(lockp);
1412 mtx_lock(&pv_chunks_mutex);
1414 } else if (pmap != locked_pmap) {
1415 if (PMAP_TRYLOCK(pmap)) {
1416 mtx_lock(&pv_chunks_mutex);
1419 pmap = NULL; /* pmap is not locked */
1420 mtx_lock(&pv_chunks_mutex);
1421 pc = TAILQ_NEXT(pc_marker, pc_lru);
1423 pc->pc_pmap != next_pmap)
1431 * Destroy every non-wired, 4 KB page mapping in the chunk.
1434 for (field = 0; field < _NPCM; field++) {
1435 for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1436 inuse != 0; inuse &= ~(1UL << bit)) {
1437 bit = cnttzd(inuse);
1438 pv = &pc->pc_pventry[field * 64 + bit];
1440 l3e = pmap_pml3e(pmap, va);
1441 if ((*l3e & RPTE_LEAF) != 0)
1443 pte = pmap_l3e_to_pte(l3e, va);
1444 if ((*pte & PG_W) != 0)
1446 tpte = pte_load_clear(pte);
1447 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
1448 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
1450 if ((tpte & PG_A) != 0)
1451 vm_page_aflag_set(m, PGA_REFERENCED);
1452 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1453 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
1456 if (TAILQ_EMPTY(&m->md.pv_list) &&
1457 (m->flags & PG_FICTITIOUS) == 0) {
1458 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
1459 if (TAILQ_EMPTY(&pvh->pv_list)) {
1460 vm_page_aflag_clear(m,
1464 pc->pc_map[field] |= 1UL << bit;
1465 pmap_unuse_pt(pmap, va, *l3e, &free);
1470 mtx_lock(&pv_chunks_mutex);
1473 /* Every freed mapping is for a 4 KB page. */
1474 pmap_resident_count_dec(pmap, freed);
1475 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
1476 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
1477 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
1478 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1479 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1) {
1480 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1481 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1482 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1483 /* Entire chunk is free; return it. */
1484 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1485 mtx_lock(&pv_chunks_mutex);
1486 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1489 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1490 mtx_lock(&pv_chunks_mutex);
1491 /* One freed pv entry in locked_pmap is sufficient. */
1492 if (pmap == locked_pmap)
1495 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1496 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
1497 if (active_reclaims == 1 && pmap != NULL) {
1499 * Rotate the pv chunks list so that we do not
1500 * scan the same pv chunks that could not be
1501 * freed (because they contained a wired
1502 * and/or superpage mapping) on every
1503 * invocation of reclaim_pv_chunk().
1505 while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
1506 MPASS(pc->pc_pmap != NULL);
1507 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1508 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1512 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1513 TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
1515 mtx_unlock(&pv_chunks_mutex);
1516 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1517 if (m_pc == NULL && !SLIST_EMPTY(&free)) {
1518 m_pc = SLIST_FIRST(&free);
1519 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
1520 /* Recycle a freed page table page. */
1521 m_pc->ref_count = 1;
1523 vm_page_free_pages_toq(&free, true);
1528 * free the pv_entry back to the free list
1531 free_pv_entry(pmap_t pmap, pv_entry_t pv)
1533 struct pv_chunk *pc;
1534 int idx, field, bit;
1537 if (pmap != kernel_pmap)
1538 printf("%s(%p, %p)\n", __func__, pmap, pv);
1540 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1541 PV_STAT(atomic_add_long(&pv_entry_frees, 1));
1542 PV_STAT(atomic_add_int(&pv_entry_spare, 1));
1543 PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
1544 pc = pv_to_chunk(pv);
1545 idx = pv - &pc->pc_pventry[0];
1548 pc->pc_map[field] |= 1ul << bit;
1549 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1) {
1550 /* 98% of the time, pc is already at the head of the list. */
1551 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1552 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1553 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1557 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1562 free_pv_chunk(struct pv_chunk *pc)
1566 mtx_lock(&pv_chunks_mutex);
1567 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1568 mtx_unlock(&pv_chunks_mutex);
1569 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1570 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1571 PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1572 /* entire chunk is free, return it */
1573 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1574 vm_page_unwire_noq(m);
1579 * Returns a new PV entry, allocating a new PV chunk from the system when
1580 * needed. If this PV chunk allocation fails and a PV list lock pointer was
1581 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
1584 * The given PV list lock may be released.
1587 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
1591 struct pv_chunk *pc;
1594 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1595 PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
1597 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1599 for (field = 0; field < _NPCM; field++) {
1600 if (pc->pc_map[field]) {
1601 bit = cnttzd(pc->pc_map[field]);
1605 if (field < _NPCM) {
1606 pv = &pc->pc_pventry[field * 64 + bit];
1607 pc->pc_map[field] &= ~(1ul << bit);
1608 /* If this was the last item, move it to tail */
1609 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1610 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1611 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1614 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1615 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
1616 MPASS(PV_PMAP(pv) != NULL);
1620 /* No free items, allocate another chunk */
1621 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
1624 if (lockp == NULL) {
1625 PV_STAT(pc_chunk_tryfail++);
1628 m = reclaim_pv_chunk(pmap, lockp);
1632 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1633 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1634 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1636 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
1637 pc->pc_map[1] = PC_FREE1;
1638 mtx_lock(&pv_chunks_mutex);
1639 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1640 mtx_unlock(&pv_chunks_mutex);
1641 pv = &pc->pc_pventry[0];
1642 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1643 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1644 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1645 MPASS(PV_PMAP(pv) != NULL);
1649 #if VM_NRESERVLEVEL > 0
1651 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
1652 * replace the many pv entries for the 4KB page mappings by a single pv entry
1653 * for the 2MB page mapping.
1656 pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1657 struct rwlock **lockp)
1659 struct md_page *pvh;
1661 vm_offset_t va_last;
1664 KASSERT((pa & L3_PAGE_MASK) == 0,
1665 ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
1666 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1669 * Transfer the first page's pv entry for this mapping to the 2mpage's
1670 * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
1671 * a transfer avoids the possibility that get_pv_entry() calls
1672 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
1673 * mappings that is being promoted.
1675 m = PHYS_TO_VM_PAGE(pa);
1676 va = trunc_2mpage(va);
1677 pv = pmap_pvh_remove(&m->md, pmap, va);
1678 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
1679 pvh = pa_to_pvh(pa);
1680 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
1682 /* Free the remaining NPTEPG - 1 pv entries. */
1683 va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1687 pmap_pvh_free(&m->md, pmap, va);
1688 } while (va < va_last);
1690 #endif /* VM_NRESERVLEVEL > 0 */
1693 * First find and then destroy the pv entry for the specified pmap and virtual
1694 * address. This operation can be performed on pv lists for either 4KB or 2MB
1698 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1702 pv = pmap_pvh_remove(pvh, pmap, va);
1703 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
1704 free_pv_entry(pmap, pv);
1708 * Conditionally create the PV entry for a 4KB page mapping if the required
1709 * memory can be allocated without resorting to reclamation.
1712 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
1713 struct rwlock **lockp)
1717 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1718 /* Pass NULL instead of the lock pointer to disable reclamation. */
1719 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
1721 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1722 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1729 vm_paddr_t phys_avail_debug[2 * VM_PHYSSEG_MAX];
1732 validate_addr(vm_paddr_t addr, vm_size_t size)
1734 vm_paddr_t end = addr + size;
1737 for (int i = 0; i < 2 * phys_avail_count; i += 2) {
1738 if (addr >= phys_avail_debug[i] &&
1739 end <= phys_avail_debug[i + 1]) {
1744 KASSERT(found, ("%#lx-%#lx outside of initial phys_avail array",
1748 static void validate_addr(vm_paddr_t addr, vm_size_t size) {}
1750 #define DMAP_PAGE_BITS (RPTE_VALID | RPTE_LEAF | RPTE_EAA_MASK | PG_M | PG_A)
1757 page = allocpages(1);
1758 pagezero(PHYS_TO_DMAP(page));
1763 mmu_radix_dmap_range(vm_paddr_t start, vm_paddr_t end)
1765 pt_entry_t *pte, pteval;
1769 printf("%s %lx -> %lx\n", __func__, start, end);
1770 while (start < end) {
1771 pteval = start | DMAP_PAGE_BITS;
1772 pte = pmap_pml1e(kernel_pmap, PHYS_TO_DMAP(start));
1773 if ((*pte & RPTE_VALID) == 0) {
1774 page = alloc_pt_page();
1775 pde_store(pte, page);
1777 pte = pmap_l1e_to_l2e(pte, PHYS_TO_DMAP(start));
1778 if ((start & L2_PAGE_MASK) == 0 &&
1779 end - start >= L2_PAGE_SIZE) {
1780 start += L2_PAGE_SIZE;
1782 } else if ((*pte & RPTE_VALID) == 0) {
1783 page = alloc_pt_page();
1784 pde_store(pte, page);
1787 pte = pmap_l2e_to_l3e(pte, PHYS_TO_DMAP(start));
1788 if ((start & L3_PAGE_MASK) == 0 &&
1789 end - start >= L3_PAGE_SIZE) {
1790 start += L3_PAGE_SIZE;
1792 } else if ((*pte & RPTE_VALID) == 0) {
1793 page = alloc_pt_page();
1794 pde_store(pte, page);
1796 pte = pmap_l3e_to_pte(pte, PHYS_TO_DMAP(start));
1799 pte_store(pte, pteval);
1804 mmu_radix_dmap_populate(vm_size_t hwphyssz)
1806 vm_paddr_t start, end;
1808 for (int i = 0; i < pregions_sz; i++) {
1809 start = pregions[i].mr_start;
1810 end = start + pregions[i].mr_size;
1811 if (hwphyssz && start >= hwphyssz)
1813 if (hwphyssz && hwphyssz < end)
1815 mmu_radix_dmap_range(start, end);
1820 mmu_radix_setup_pagetables(vm_size_t hwphyssz)
1822 vm_paddr_t ptpages, pages;
1826 bzero(kernel_pmap, sizeof(struct pmap));
1827 PMAP_LOCK_INIT(kernel_pmap);
1829 ptpages = allocpages(2);
1830 l1phys = moea64_bootstrap_alloc(RADIX_PGD_SIZE, RADIX_PGD_SIZE);
1831 validate_addr(l1phys, RADIX_PGD_SIZE);
1833 printf("l1phys=%lx\n", l1phys);
1834 MPASS((l1phys & (RADIX_PGD_SIZE-1)) == 0);
1835 for (int i = 0; i < RADIX_PGD_SIZE/PAGE_SIZE; i++)
1836 pagezero(PHYS_TO_DMAP(l1phys + i * PAGE_SIZE));
1837 kernel_pmap->pm_pml1 = (pml1_entry_t *)PHYS_TO_DMAP(l1phys);
1839 mmu_radix_dmap_populate(hwphyssz);
1842 * Create page tables for first 128MB of KVA
1845 pte = pmap_pml1e(kernel_pmap, VM_MIN_KERNEL_ADDRESS);
1846 *pte = (pages | RPTE_VALID | RPTE_SHIFT);
1848 pte = pmap_l1e_to_l2e(pte, VM_MIN_KERNEL_ADDRESS);
1849 *pte = (pages | RPTE_VALID | RPTE_SHIFT);
1851 pte = pmap_l2e_to_l3e(pte, VM_MIN_KERNEL_ADDRESS);
1853 * the kernel page table pages need to be preserved in
1854 * phys_avail and not overlap with previous allocations
1856 pages = allocpages(nkpt);
1858 printf("phys_avail after dmap populate and nkpt allocation\n");
1859 for (int j = 0; j < 2 * phys_avail_count; j+=2)
1860 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
1861 j, phys_avail[j], j + 1, phys_avail[j + 1]);
1864 for (int i = 0; i < nkpt; i++, pte++, pages += PAGE_SIZE)
1865 *pte = (pages | RPTE_VALID | RPTE_SHIFT);
1866 kernel_vm_end = VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE;
1868 printf("kernel_pmap pml1 %p\n", kernel_pmap->pm_pml1);
1870 * Add a physical memory segment (vm_phys_seg) corresponding to the
1871 * preallocated kernel page table pages so that vm_page structures
1872 * representing these pages will be created. The vm_page structures
1873 * are required for promotion of the corresponding kernel virtual
1874 * addresses to superpage mappings.
1876 vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1880 mmu_radix_early_bootstrap(vm_offset_t start, vm_offset_t end)
1882 vm_paddr_t kpstart, kpend;
1883 vm_size_t physsz, hwphyssz;
1885 int rm_pavail, proctab_size;
1888 kpstart = start & ~DMAP_BASE_ADDRESS;
1889 kpend = end & ~DMAP_BASE_ADDRESS;
1891 /* Get physical memory regions from firmware */
1892 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
1893 CTR0(KTR_PMAP, "mmu_radix_early_bootstrap: physical memory");
1895 if (2 * VM_PHYSSEG_MAX < regions_sz)
1896 panic("mmu_radix_early_bootstrap: phys_avail too small");
1899 for (int i = 0; i < regions_sz; i++)
1900 printf("regions[%d].mr_start=%lx regions[%d].mr_size=%lx\n",
1901 i, regions[i].mr_start, i, regions[i].mr_size);
1903 * XXX workaround a simulator bug
1905 for (int i = 0; i < regions_sz; i++)
1906 if (regions[i].mr_start & PAGE_MASK) {
1907 regions[i].mr_start += PAGE_MASK;
1908 regions[i].mr_start &= ~PAGE_MASK;
1909 regions[i].mr_size &= ~PAGE_MASK;
1912 for (int i = 0; i < pregions_sz; i++)
1913 printf("pregions[%d].mr_start=%lx pregions[%d].mr_size=%lx\n",
1914 i, pregions[i].mr_start, i, pregions[i].mr_size);
1916 phys_avail_count = 0;
1919 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1920 for (i = 0, j = 0; i < regions_sz; i++) {
1922 printf("regions[%d].mr_start=%016lx regions[%d].mr_size=%016lx\n",
1923 i, regions[i].mr_start, i, regions[i].mr_size);
1925 if (regions[i].mr_size < PAGE_SIZE)
1928 if (hwphyssz != 0 &&
1929 (physsz + regions[i].mr_size) >= hwphyssz) {
1930 if (physsz < hwphyssz) {
1931 phys_avail[j] = regions[i].mr_start;
1932 phys_avail[j + 1] = regions[i].mr_start +
1933 (hwphyssz - physsz);
1936 dump_avail[j] = phys_avail[j];
1937 dump_avail[j + 1] = phys_avail[j + 1];
1941 phys_avail[j] = regions[i].mr_start;
1942 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
1943 dump_avail[j] = phys_avail[j];
1944 dump_avail[j + 1] = phys_avail[j + 1];
1947 physsz += regions[i].mr_size;
1951 /* Check for overlap with the kernel and exception vectors */
1953 for (j = 0; j < 2 * phys_avail_count; j+=2) {
1954 if (phys_avail[j] < EXC_LAST)
1955 phys_avail[j] += EXC_LAST;
1957 if (phys_avail[j] >= kpstart &&
1958 phys_avail[j + 1] <= kpend) {
1959 phys_avail[j] = phys_avail[j + 1] = ~0;
1964 if (kpstart >= phys_avail[j] &&
1965 kpstart < phys_avail[j + 1]) {
1966 if (kpend < phys_avail[j + 1]) {
1967 phys_avail[2 * phys_avail_count] =
1968 (kpend & ~PAGE_MASK) + PAGE_SIZE;
1969 phys_avail[2 * phys_avail_count + 1] =
1974 phys_avail[j + 1] = kpstart & ~PAGE_MASK;
1977 if (kpend >= phys_avail[j] &&
1978 kpend < phys_avail[j + 1]) {
1979 if (kpstart > phys_avail[j]) {
1980 phys_avail[2 * phys_avail_count] = phys_avail[j];
1981 phys_avail[2 * phys_avail_count + 1] =
1982 kpstart & ~PAGE_MASK;
1986 phys_avail[j] = (kpend & ~PAGE_MASK) +
1990 qsort(phys_avail, 2 * phys_avail_count, sizeof(phys_avail[0]), pa_cmp);
1991 for (i = 0; i < 2 * phys_avail_count; i++)
1992 phys_avail_debug[i] = phys_avail[i];
1994 /* Remove physical available regions marked for removal (~0) */
1996 phys_avail_count -= rm_pavail;
1997 for (i = 2 * phys_avail_count;
1998 i < 2*(phys_avail_count + rm_pavail); i+=2)
1999 phys_avail[i] = phys_avail[i + 1] = 0;
2002 printf("phys_avail ranges after filtering:\n");
2003 for (j = 0; j < 2 * phys_avail_count; j+=2)
2004 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
2005 j, phys_avail[j], j + 1, phys_avail[j + 1]);
2007 physmem = btoc(physsz);
2009 /* XXX assume we're running non-virtualized and
2010 * we don't support BHYVE
2012 if (isa3_pid_bits == 0)
2014 parttab_phys = moea64_bootstrap_alloc(PARTTAB_SIZE, PARTTAB_SIZE);
2015 validate_addr(parttab_phys, PARTTAB_SIZE);
2016 for (int i = 0; i < PARTTAB_SIZE/PAGE_SIZE; i++)
2017 pagezero(PHYS_TO_DMAP(parttab_phys + i * PAGE_SIZE));
2019 proctab_size = 1UL << PROCTAB_SIZE_SHIFT;
2020 proctab0pa = moea64_bootstrap_alloc(proctab_size, proctab_size);
2021 validate_addr(proctab0pa, proctab_size);
2022 for (int i = 0; i < proctab_size/PAGE_SIZE; i++)
2023 pagezero(PHYS_TO_DMAP(proctab0pa + i * PAGE_SIZE));
2025 mmu_radix_setup_pagetables(hwphyssz);
2029 mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end)
2037 * Set up the Open Firmware pmap and add its mappings if not in real
2041 printf("%s enter\n", __func__);
2044 * Calculate the last available physical address, and reserve the
2045 * vm_page_array (upper bound).
2048 for (i = 0; phys_avail[i + 2] != 0; i += 2)
2049 Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1]));
2052 * Set the start and end of kva.
2054 virtual_avail = VM_MIN_KERNEL_ADDRESS;
2055 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
2058 * Remap any early IO mappings (console framebuffer, etc.)
2060 bs_remap_earlyboot();
2063 * Allocate a kernel stack with a guard page for thread0 and map it
2064 * into the kernel page map.
2066 pa = allocpages(kstack_pages);
2067 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
2068 virtual_avail = va + kstack_pages * PAGE_SIZE;
2069 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
2070 thread0.td_kstack = va;
2071 for (i = 0; i < kstack_pages; i++) {
2072 mmu_radix_kenter(va, pa);
2076 thread0.td_kstack_pages = kstack_pages;
2079 * Allocate virtual address space for the message buffer.
2081 pa = msgbuf_phys = allocpages((msgbufsize + PAGE_MASK) >> PAGE_SHIFT);
2082 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(pa);
2085 * Allocate virtual address space for the dynamic percpu area.
2087 pa = allocpages(DPCPU_SIZE >> PAGE_SHIFT);
2088 dpcpu = (void *)PHYS_TO_DMAP(pa);
2089 dpcpu_init(dpcpu, curcpu);
2091 * Reserve some special page table entries/VA space for temporary
2097 mmu_parttab_init(void)
2101 isa3_parttab = (struct pate *)PHYS_TO_DMAP(parttab_phys);
2104 printf("%s parttab: %p\n", __func__, isa3_parttab);
2105 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2107 printf("setting ptcr %lx\n", ptcr);
2108 mtspr(SPR_PTCR, ptcr);
2112 mmu_parttab_update(uint64_t lpid, uint64_t pagetab, uint64_t proctab)
2117 printf("%s isa3_parttab %p lpid %lx pagetab %lx proctab %lx\n", __func__, isa3_parttab,
2118 lpid, pagetab, proctab);
2119 prev = be64toh(isa3_parttab[lpid].pagetab);
2120 isa3_parttab[lpid].pagetab = htobe64(pagetab);
2121 isa3_parttab[lpid].proctab = htobe64(proctab);
2123 if (prev & PARTTAB_HR) {
2124 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
2125 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2126 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2127 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2129 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
2130 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2136 mmu_radix_parttab_init(void)
2141 pagetab = RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) | \
2142 RADIX_PGD_INDEX_SHIFT | PARTTAB_HR;
2143 mmu_parttab_update(0, pagetab, 0);
2147 mmu_radix_proctab_register(vm_paddr_t proctabpa, uint64_t table_size)
2149 uint64_t pagetab, proctab;
2151 pagetab = be64toh(isa3_parttab[0].pagetab);
2152 proctab = proctabpa | table_size | PARTTAB_GR;
2153 mmu_parttab_update(0, pagetab, proctab);
2157 mmu_radix_proctab_init(void)
2162 isa3_proctab = (void*)PHYS_TO_DMAP(proctab0pa);
2163 isa3_proctab->proctab0 =
2164 htobe64(RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) |
2165 RADIX_PGD_INDEX_SHIFT);
2167 mmu_radix_proctab_register(proctab0pa, PROCTAB_SIZE_SHIFT - 12);
2169 __asm __volatile("ptesync" : : : "memory");
2170 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2171 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
2172 __asm __volatile("eieio; tlbsync; ptesync" : : : "memory");
2174 printf("process table %p and kernel radix PDE: %p\n",
2175 isa3_proctab, kernel_pmap->pm_pml1);
2176 mtmsr(mfmsr() | PSL_DR );
2177 mtmsr(mfmsr() & ~PSL_DR);
2178 kernel_pmap->pm_pid = isa3_base_pid;
2183 mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2186 struct rwlock *lock;
2189 pml3_entry_t oldl3e, *l3e;
2191 vm_offset_t va, va_next;
2193 boolean_t anychanged;
2195 if (advice != MADV_DONTNEED && advice != MADV_FREE)
2199 for (; sva < eva; sva = va_next) {
2200 l1e = pmap_pml1e(pmap, sva);
2201 if ((*l1e & PG_V) == 0) {
2202 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2207 l2e = pmap_l1e_to_l2e(l1e, sva);
2208 if ((*l2e & PG_V) == 0) {
2209 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2214 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2217 l3e = pmap_l2e_to_l3e(l2e, sva);
2219 if ((oldl3e & PG_V) == 0)
2221 else if ((oldl3e & RPTE_LEAF) != 0) {
2222 if ((oldl3e & PG_MANAGED) == 0)
2225 if (!pmap_demote_l3e_locked(pmap, l3e, sva, &lock)) {
2230 * The large page mapping was destroyed.
2236 * Unless the page mappings are wired, remove the
2237 * mapping to a single page so that a subsequent
2238 * access may repromote. Since the underlying page
2239 * table page is fully populated, this removal never
2240 * frees a page table page.
2242 if ((oldl3e & PG_W) == 0) {
2243 pte = pmap_l3e_to_pte(l3e, sva);
2244 KASSERT((*pte & PG_V) != 0,
2245 ("pmap_advise: invalid PTE"));
2246 pmap_remove_pte(pmap, pte, sva, *l3e, NULL,
2256 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next;
2257 pte++, sva += PAGE_SIZE) {
2258 MPASS(pte == pmap_pte(pmap, sva));
2260 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
2262 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2263 if (advice == MADV_DONTNEED) {
2265 * Future calls to pmap_is_modified()
2266 * can be avoided by making the page
2269 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
2272 atomic_clear_long(pte, PG_M | PG_A);
2273 } else if ((*pte & PG_A) != 0)
2274 atomic_clear_long(pte, PG_A);
2280 if (va != va_next) {
2289 pmap_invalidate_all(pmap);
2294 * Routines used in machine-dependent code
2297 mmu_radix_bootstrap(vm_offset_t start, vm_offset_t end)
2302 printf("%s\n", __func__);
2304 mmu_radix_early_bootstrap(start, end);
2306 printf("early bootstrap complete\n");
2307 if (powernv_enabled) {
2308 lpcr = mfspr(SPR_LPCR);
2309 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2310 mmu_radix_parttab_init();
2311 mmu_radix_init_amor();
2313 printf("powernv init complete\n");
2315 mmu_radix_init_iamr();
2316 mmu_radix_proctab_init();
2317 mmu_radix_pid_set(kernel_pmap);
2318 /* XXX assume CPU_FTR_HVMODE */
2319 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2321 mmu_radix_late_bootstrap(start, end);
2322 numa_mem_regions(&numa_pregions, &numa_pregions_sz);
2324 printf("%s done\n", __func__);
2325 pmap_bootstrapped = 1;
2326 dmaplimit = roundup2(powerpc_ptob(Maxmem), L2_PAGE_SIZE);
2327 PCPU_SET(flags, PCPU_GET(flags) | PC_FLAG_NOSRS);
2331 mmu_radix_cpu_bootstrap(int ap)
2336 if (powernv_enabled) {
2337 lpcr = mfspr(SPR_LPCR);
2338 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2340 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2341 mtspr(SPR_PTCR, ptcr);
2342 mmu_radix_init_amor();
2344 mmu_radix_init_iamr();
2345 mmu_radix_pid_set(kernel_pmap);
2346 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2349 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l3e, CTLFLAG_RD, 0,
2350 "2MB page mapping counters");
2352 static u_long pmap_l3e_demotions;
2353 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, demotions, CTLFLAG_RD,
2354 &pmap_l3e_demotions, 0, "2MB page demotions");
2356 static u_long pmap_l3e_mappings;
2357 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, mappings, CTLFLAG_RD,
2358 &pmap_l3e_mappings, 0, "2MB page mappings");
2360 static u_long pmap_l3e_p_failures;
2361 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, p_failures, CTLFLAG_RD,
2362 &pmap_l3e_p_failures, 0, "2MB page promotion failures");
2364 static u_long pmap_l3e_promotions;
2365 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, promotions, CTLFLAG_RD,
2366 &pmap_l3e_promotions, 0, "2MB page promotions");
2368 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2e, CTLFLAG_RD, 0,
2369 "1GB page mapping counters");
2371 static u_long pmap_l2e_demotions;
2372 SYSCTL_ULONG(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD,
2373 &pmap_l2e_demotions, 0, "1GB page demotions");
2376 mmu_radix_clear_modify(vm_page_t m)
2378 struct md_page *pvh;
2380 pv_entry_t next_pv, pv;
2381 pml3_entry_t oldl3e, *l3e;
2382 pt_entry_t oldpte, *pte;
2383 struct rwlock *lock;
2385 int md_gen, pvh_gen;
2387 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2388 ("pmap_clear_modify: page %p is not managed", m));
2389 vm_page_assert_busied(m);
2390 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
2393 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
2394 * If the object containing the page is locked and the page is not
2395 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
2397 if ((m->a.flags & PGA_WRITEABLE) == 0)
2399 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2400 pa_to_pvh(VM_PAGE_TO_PHYS(m));
2401 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2404 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
2406 if (!PMAP_TRYLOCK(pmap)) {
2407 pvh_gen = pvh->pv_gen;
2411 if (pvh_gen != pvh->pv_gen) {
2417 l3e = pmap_pml3e(pmap, va);
2419 if ((oldl3e & PG_RW) != 0) {
2420 if (pmap_demote_l3e_locked(pmap, l3e, va, &lock)) {
2421 if ((oldl3e & PG_W) == 0) {
2423 * Write protect the mapping to a
2424 * single page so that a subsequent
2425 * write access may repromote.
2427 va += VM_PAGE_TO_PHYS(m) - (oldl3e &
2429 pte = pmap_l3e_to_pte(l3e, va);
2431 if ((oldpte & PG_V) != 0) {
2432 while (!atomic_cmpset_long(pte,
2434 (oldpte | RPTE_EAA_R) & ~(PG_M | PG_RW)))
2437 pmap_invalidate_page(pmap, va);
2444 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2446 if (!PMAP_TRYLOCK(pmap)) {
2447 md_gen = m->md.pv_gen;
2448 pvh_gen = pvh->pv_gen;
2452 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2457 l3e = pmap_pml3e(pmap, pv->pv_va);
2458 KASSERT((*l3e & RPTE_LEAF) == 0, ("pmap_clear_modify: found"
2459 " a 2mpage in page %p's pv list", m));
2460 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
2461 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2462 atomic_clear_long(pte, PG_M);
2463 pmap_invalidate_page(pmap, pv->pv_va);
2471 mmu_radix_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2472 vm_size_t len, vm_offset_t src_addr)
2474 struct rwlock *lock;
2475 struct spglist free;
2477 vm_offset_t end_addr = src_addr + len;
2478 vm_offset_t va_next;
2479 vm_page_t dst_pdpg, dstmpte, srcmpte;
2480 bool invalidate_all;
2483 "%s(dst_pmap=%p, src_pmap=%p, dst_addr=%lx, len=%lu, src_addr=%lx)\n",
2484 __func__, dst_pmap, src_pmap, dst_addr, len, src_addr);
2486 if (dst_addr != src_addr)
2489 invalidate_all = false;
2490 if (dst_pmap < src_pmap) {
2491 PMAP_LOCK(dst_pmap);
2492 PMAP_LOCK(src_pmap);
2494 PMAP_LOCK(src_pmap);
2495 PMAP_LOCK(dst_pmap);
2498 for (addr = src_addr; addr < end_addr; addr = va_next) {
2501 pml3_entry_t srcptepaddr, *l3e;
2502 pt_entry_t *src_pte, *dst_pte;
2504 l1e = pmap_pml1e(src_pmap, addr);
2505 if ((*l1e & PG_V) == 0) {
2506 va_next = (addr + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2512 l2e = pmap_l1e_to_l2e(l1e, addr);
2513 if ((*l2e & PG_V) == 0) {
2514 va_next = (addr + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2520 va_next = (addr + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2524 l3e = pmap_l2e_to_l3e(l2e, addr);
2526 if (srcptepaddr == 0)
2529 if (srcptepaddr & RPTE_LEAF) {
2530 if ((addr & L3_PAGE_MASK) != 0 ||
2531 addr + L3_PAGE_SIZE > end_addr)
2533 dst_pdpg = pmap_allocl3e(dst_pmap, addr, NULL);
2534 if (dst_pdpg == NULL)
2536 l3e = (pml3_entry_t *)
2537 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg));
2538 l3e = &l3e[pmap_pml3e_index(addr)];
2539 if (*l3e == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
2540 pmap_pv_insert_l3e(dst_pmap, addr, srcptepaddr,
2541 PMAP_ENTER_NORECLAIM, &lock))) {
2542 *l3e = srcptepaddr & ~PG_W;
2543 pmap_resident_count_inc(dst_pmap,
2544 L3_PAGE_SIZE / PAGE_SIZE);
2545 atomic_add_long(&pmap_l3e_mappings, 1);
2547 dst_pdpg->ref_count--;
2551 srcptepaddr &= PG_FRAME;
2552 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
2553 KASSERT(srcmpte->ref_count > 0,
2554 ("pmap_copy: source page table page is unused"));
2556 if (va_next > end_addr)
2559 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
2560 src_pte = &src_pte[pmap_pte_index(addr)];
2562 while (addr < va_next) {
2566 * we only virtual copy managed pages
2568 if ((ptetemp & PG_MANAGED) != 0) {
2569 if (dstmpte != NULL &&
2570 dstmpte->pindex == pmap_l3e_pindex(addr))
2571 dstmpte->ref_count++;
2572 else if ((dstmpte = pmap_allocpte(dst_pmap,
2573 addr, NULL)) == NULL)
2575 dst_pte = (pt_entry_t *)
2576 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
2577 dst_pte = &dst_pte[pmap_pte_index(addr)];
2578 if (*dst_pte == 0 &&
2579 pmap_try_insert_pv_entry(dst_pmap, addr,
2580 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME),
2583 * Clear the wired, modified, and
2584 * accessed (referenced) bits
2587 *dst_pte = ptetemp & ~(PG_W | PG_M |
2589 pmap_resident_count_inc(dst_pmap, 1);
2592 if (pmap_unwire_ptp(dst_pmap, addr,
2595 * Although "addr" is not
2596 * mapped, paging-structure
2597 * caches could nonetheless
2598 * have entries that refer to
2599 * the freed page table pages.
2600 * Invalidate those entries.
2602 invalidate_all = true;
2603 vm_page_free_pages_toq(&free,
2608 if (dstmpte->ref_count >= srcmpte->ref_count)
2612 if (__predict_false((addr & L3_PAGE_MASK) == 0))
2613 src_pte = pmap_pte(src_pmap, addr);
2620 pmap_invalidate_all(dst_pmap);
2623 PMAP_UNLOCK(src_pmap);
2624 PMAP_UNLOCK(dst_pmap);
2628 mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst)
2630 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
2631 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
2633 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
2637 bcopy((void *)src, (void *)dst, PAGE_SIZE);
2641 mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2642 vm_offset_t b_offset, int xfersize)
2645 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
2646 a_offset, mb, b_offset, xfersize);
2650 #if VM_NRESERVLEVEL > 0
2652 * Tries to promote the 512, contiguous 4KB page mappings that are within a
2653 * single page table page (PTP) to a single 2MB page mapping. For promotion
2654 * to occur, two conditions must be met: (1) the 4KB page mappings must map
2655 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
2656 * identical characteristics.
2659 pmap_promote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va,
2660 struct rwlock **lockp)
2662 pml3_entry_t newpde;
2663 pt_entry_t *firstpte, oldpte, pa, *pte;
2666 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2669 * Examine the first PTE in the specified PTP. Abort if this PTE is
2670 * either invalid, unused, or does not map the first 4KB physical page
2671 * within a 2MB page.
2673 firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
2676 if ((newpde & ((PG_FRAME & L3_PAGE_MASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
2677 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2678 " in pmap %p", va, pmap);
2681 if ((newpde & (PG_M | PG_RW)) == PG_RW) {
2683 * When PG_M is already clear, PG_RW can be cleared without
2684 * a TLB invalidation.
2686 if (!atomic_cmpset_long(firstpte, newpde, (newpde | RPTE_EAA_R) & ~RPTE_EAA_W))
2688 newpde &= ~RPTE_EAA_W;
2692 * Examine each of the other PTEs in the specified PTP. Abort if this
2693 * PTE maps an unexpected 4KB physical page or does not have identical
2694 * characteristics to the first PTE.
2696 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + L3_PAGE_SIZE - PAGE_SIZE;
2697 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
2700 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
2701 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2702 " in pmap %p", va, pmap);
2705 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
2707 * When PG_M is already clear, PG_RW can be cleared
2708 * without a TLB invalidation.
2710 if (!atomic_cmpset_long(pte, oldpte, (oldpte | RPTE_EAA_R) & ~RPTE_EAA_W))
2712 oldpte &= ~RPTE_EAA_W;
2713 CTR2(KTR_PMAP, "pmap_promote_l3e: protect for va %#lx"
2714 " in pmap %p", (oldpte & PG_FRAME & L3_PAGE_MASK) |
2715 (va & ~L3_PAGE_MASK), pmap);
2717 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
2718 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2719 " in pmap %p", va, pmap);
2726 * Save the page table page in its current state until the PDE
2727 * mapping the superpage is demoted by pmap_demote_pde() or
2728 * destroyed by pmap_remove_pde().
2730 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
2731 KASSERT(mpte >= vm_page_array &&
2732 mpte < &vm_page_array[vm_page_array_size],
2733 ("pmap_promote_l3e: page table page is out of range"));
2734 KASSERT(mpte->pindex == pmap_l3e_pindex(va),
2735 ("pmap_promote_l3e: page table page's pindex is wrong"));
2736 if (pmap_insert_pt_page(pmap, mpte)) {
2738 "pmap_promote_l3e: failure for va %#lx in pmap %p", va,
2744 * Promote the pv entries.
2746 if ((newpde & PG_MANAGED) != 0)
2747 pmap_pv_promote_l3e(pmap, va, newpde & PG_PS_FRAME, lockp);
2749 pte_store(pde, PG_PROMOTED | newpde);
2750 atomic_add_long(&pmap_l3e_promotions, 1);
2751 CTR2(KTR_PMAP, "pmap_promote_l3e: success for va %#lx"
2752 " in pmap %p", va, pmap);
2755 atomic_add_long(&pmap_l3e_p_failures, 1);
2756 return (KERN_FAILURE);
2758 #endif /* VM_NRESERVLEVEL > 0 */
2761 mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
2762 vm_prot_t prot, u_int flags, int8_t psind)
2764 struct rwlock *lock;
2767 pt_entry_t newpte, origpte;
2772 boolean_t nosleep, invalidate_all, invalidate_page;
2774 va = trunc_page(va);
2776 invalidate_page = invalidate_all = false;
2777 CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va,
2778 m, prot, flags, psind);
2779 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
2780 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
2781 va >= kmi.clean_eva,
2782 ("pmap_enter: managed mapping within the clean submap"));
2783 if ((m->oflags & VPO_UNMANAGED) == 0)
2784 VM_PAGE_OBJECT_BUSY_ASSERT(m);
2786 KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
2787 ("pmap_enter: flags %u has reserved bits set", flags));
2788 pa = VM_PAGE_TO_PHYS(m);
2789 newpte = (pt_entry_t)(pa | PG_A | PG_V | RPTE_LEAF);
2790 if ((flags & VM_PROT_WRITE) != 0)
2792 if ((flags & VM_PROT_READ) != 0)
2794 if (prot & VM_PROT_READ)
2795 newpte |= RPTE_EAA_R;
2796 if ((prot & VM_PROT_WRITE) != 0)
2797 newpte |= RPTE_EAA_W;
2798 KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
2799 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
2801 if (prot & VM_PROT_EXECUTE)
2803 if ((flags & PMAP_ENTER_WIRED) != 0)
2805 if (va >= DMAP_MIN_ADDRESS)
2806 newpte |= RPTE_EAA_P;
2807 newpte |= pmap_cache_bits(m->md.mdpg_cache_attrs);
2809 * Set modified bit gratuitously for writeable mappings if
2810 * the page is unmanaged. We do not want to take a fault
2811 * to do the dirty bit accounting for these mappings.
2813 if ((m->oflags & VPO_UNMANAGED) != 0) {
2814 if ((newpte & PG_RW) != 0)
2817 newpte |= PG_MANAGED;
2822 /* Assert the required virtual and physical alignment. */
2823 KASSERT((va & L3_PAGE_MASK) == 0, ("pmap_enter: va unaligned"));
2824 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
2825 rv = pmap_enter_l3e(pmap, va, newpte | RPTE_LEAF, flags, m, &lock);
2831 * In the case that a page table page is not
2832 * resident, we are creating it here.
2835 l3e = pmap_pml3e(pmap, va);
2836 if (l3e != NULL && (*l3e & PG_V) != 0 && ((*l3e & RPTE_LEAF) == 0 ||
2837 pmap_demote_l3e_locked(pmap, l3e, va, &lock))) {
2838 pte = pmap_l3e_to_pte(l3e, va);
2839 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
2840 mpte = PHYS_TO_VM_PAGE(*l3e & PG_FRAME);
2843 } else if (va < VM_MAXUSER_ADDRESS) {
2845 * Here if the pte page isn't mapped, or if it has been
2848 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2849 mpte = _pmap_allocpte(pmap, pmap_l3e_pindex(va),
2850 nosleep ? NULL : &lock);
2851 if (mpte == NULL && nosleep) {
2852 rv = KERN_RESOURCE_SHORTAGE;
2855 if (__predict_false(retrycount++ == 6))
2856 panic("too many retries");
2857 invalidate_all = true;
2860 panic("pmap_enter: invalid page directory va=%#lx", va);
2866 * Is the specified virtual address already mapped?
2868 if ((origpte & PG_V) != 0) {
2870 if (VERBOSE_PMAP || pmap_logging) {
2871 printf("cow fault pmap_enter(%p, %#lx, %p, %#x, %x, %d) --"
2872 " asid=%lu curpid=%d name=%s origpte0x%lx\n",
2873 pmap, va, m, prot, flags, psind, pmap->pm_pid,
2874 curproc->p_pid, curproc->p_comm, origpte);
2875 pmap_pte_walk(pmap->pm_pml1, va);
2879 * Wiring change, just update stats. We don't worry about
2880 * wiring PT pages as they remain resident as long as there
2881 * are valid mappings in them. Hence, if a user page is wired,
2882 * the PT page will be also.
2884 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
2885 pmap->pm_stats.wired_count++;
2886 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
2887 pmap->pm_stats.wired_count--;
2890 * Remove the extra PT page reference.
2894 KASSERT(mpte->ref_count > 0,
2895 ("pmap_enter: missing reference to page table page,"
2900 * Has the physical page changed?
2902 opa = origpte & PG_FRAME;
2905 * No, might be a protection or wiring change.
2907 if ((origpte & PG_MANAGED) != 0 &&
2908 (newpte & PG_RW) != 0)
2909 vm_page_aflag_set(m, PGA_WRITEABLE);
2910 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) {
2911 if ((newpte & (PG_A|PG_M)) != (origpte & (PG_A|PG_M))) {
2912 if (!atomic_cmpset_long(pte, origpte, newpte))
2914 if ((newpte & PG_M) != (origpte & PG_M))
2916 if ((newpte & PG_A) != (origpte & PG_A))
2917 vm_page_aflag_set(m, PGA_REFERENCED);
2920 invalidate_all = true;
2921 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
2928 * The physical page has changed. Temporarily invalidate
2929 * the mapping. This ensures that all threads sharing the
2930 * pmap keep a consistent view of the mapping, which is
2931 * necessary for the correct handling of COW faults. It
2932 * also permits reuse of the old mapping's PV entry,
2933 * avoiding an allocation.
2935 * For consistency, handle unmanaged mappings the same way.
2937 origpte = pte_load_clear(pte);
2938 KASSERT((origpte & PG_FRAME) == opa,
2939 ("pmap_enter: unexpected pa update for %#lx", va));
2940 if ((origpte & PG_MANAGED) != 0) {
2941 om = PHYS_TO_VM_PAGE(opa);
2944 * The pmap lock is sufficient to synchronize with
2945 * concurrent calls to pmap_page_test_mappings() and
2946 * pmap_ts_referenced().
2948 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
2950 if ((origpte & PG_A) != 0)
2951 vm_page_aflag_set(om, PGA_REFERENCED);
2952 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
2953 pv = pmap_pvh_remove(&om->md, pmap, va);
2954 if ((newpte & PG_MANAGED) == 0)
2955 free_pv_entry(pmap, pv);
2957 else if (origpte & PG_MANAGED) {
2959 pmap_page_print_mappings(om);
2964 if ((om->a.flags & PGA_WRITEABLE) != 0 &&
2965 TAILQ_EMPTY(&om->md.pv_list) &&
2966 ((om->flags & PG_FICTITIOUS) != 0 ||
2967 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
2968 vm_page_aflag_clear(om, PGA_WRITEABLE);
2970 if ((origpte & PG_A) != 0)
2971 invalidate_page = true;
2974 if (pmap != kernel_pmap) {
2976 if (VERBOSE_PMAP || pmap_logging)
2977 printf("pmap_enter(%p, %#lx, %p, %#x, %x, %d) -- asid=%lu curpid=%d name=%s\n",
2978 pmap, va, m, prot, flags, psind,
2979 pmap->pm_pid, curproc->p_pid,
2985 * Increment the counters.
2987 if ((newpte & PG_W) != 0)
2988 pmap->pm_stats.wired_count++;
2989 pmap_resident_count_inc(pmap, 1);
2993 * Enter on the PV list if part of our managed memory.
2995 if ((newpte & PG_MANAGED) != 0) {
2997 pv = get_pv_entry(pmap, &lock);
3002 printf("reassigning pv: %p to pmap: %p\n",
3005 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3006 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3008 if ((newpte & PG_RW) != 0)
3009 vm_page_aflag_set(m, PGA_WRITEABLE);
3015 if ((origpte & PG_V) != 0) {
3017 origpte = pte_load_store(pte, newpte);
3018 KASSERT((origpte & PG_FRAME) == pa,
3019 ("pmap_enter: unexpected pa update for %#lx", va));
3020 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
3022 if ((origpte & PG_MANAGED) != 0)
3024 invalidate_page = true;
3027 * Although the PTE may still have PG_RW set, TLB
3028 * invalidation may nonetheless be required because
3029 * the PTE no longer has PG_M set.
3031 } else if ((origpte & PG_X) != 0 || (newpte & PG_X) == 0) {
3033 * Removing capabilities requires invalidation on POWER
3035 invalidate_page = true;
3038 if ((origpte & PG_A) != 0)
3039 invalidate_page = true;
3041 pte_store(pte, newpte);
3046 #if VM_NRESERVLEVEL > 0
3048 * If both the page table page and the reservation are fully
3049 * populated, then attempt promotion.
3051 if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
3052 mmu_radix_ps_enabled(pmap) &&
3053 (m->flags & PG_FICTITIOUS) == 0 &&
3054 vm_reserv_level_iffullpop(m) == 0 &&
3055 pmap_promote_l3e(pmap, l3e, va, &lock) == 0)
3056 invalidate_all = true;
3059 pmap_invalidate_all(pmap);
3060 else if (invalidate_page)
3061 pmap_invalidate_page(pmap, va);
3074 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true
3075 * if successful. Returns false if (1) a page table page cannot be allocated
3076 * without sleeping, (2) a mapping already exists at the specified virtual
3077 * address, or (3) a PV entry cannot be allocated without reclaiming another
3081 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3082 struct rwlock **lockp)
3084 pml3_entry_t newpde;
3086 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3087 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs) |
3089 if ((m->oflags & VPO_UNMANAGED) == 0)
3090 newpde |= PG_MANAGED;
3091 if (prot & VM_PROT_EXECUTE)
3093 if (prot & VM_PROT_READ)
3094 newpde |= RPTE_EAA_R;
3095 if (va >= DMAP_MIN_ADDRESS)
3096 newpde |= RPTE_EAA_P;
3097 return (pmap_enter_l3e(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
3098 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
3103 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
3104 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
3105 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
3106 * a mapping already exists at the specified virtual address. Returns
3107 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
3108 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if
3109 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3111 * The parameter "m" is only used when creating a managed, writeable mapping.
3114 pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags,
3115 vm_page_t m, struct rwlock **lockp)
3117 struct spglist free;
3118 pml3_entry_t oldl3e, *l3e;
3121 KASSERT((newpde & (PG_M | PG_RW)) != PG_RW,
3122 ("pmap_enter_pde: newpde is missing PG_M"));
3123 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3125 if ((pdpg = pmap_allocl3e(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
3126 NULL : lockp)) == NULL) {
3127 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3128 " in pmap %p", va, pmap);
3129 return (KERN_RESOURCE_SHORTAGE);
3131 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
3132 l3e = &l3e[pmap_pml3e_index(va)];
3134 if ((oldl3e & PG_V) != 0) {
3135 KASSERT(pdpg->ref_count > 1,
3136 ("pmap_enter_pde: pdpg's wire count is too low"));
3137 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
3139 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3140 " in pmap %p", va, pmap);
3141 return (KERN_FAILURE);
3143 /* Break the existing mapping(s). */
3145 if ((oldl3e & RPTE_LEAF) != 0) {
3147 * The reference to the PD page that was acquired by
3148 * pmap_allocl3e() ensures that it won't be freed.
3149 * However, if the PDE resulted from a promotion, then
3150 * a reserved PT page could be freed.
3152 (void)pmap_remove_l3e(pmap, l3e, va, &free, lockp);
3154 if (pmap_remove_ptes(pmap, va, va + L3_PAGE_SIZE, l3e,
3156 pmap_invalidate_all(pmap);
3158 vm_page_free_pages_toq(&free, true);
3159 if (va >= VM_MAXUSER_ADDRESS) {
3160 mt = PHYS_TO_VM_PAGE(*l3e & PG_FRAME);
3161 if (pmap_insert_pt_page(pmap, mt)) {
3163 * XXX Currently, this can't happen because
3164 * we do not perform pmap_enter(psind == 1)
3165 * on the kernel pmap.
3167 panic("pmap_enter_pde: trie insert failed");
3170 KASSERT(*l3e == 0, ("pmap_enter_pde: non-zero pde %p",
3173 if ((newpde & PG_MANAGED) != 0) {
3175 * Abort this mapping if its PV entry could not be created.
3177 if (!pmap_pv_insert_l3e(pmap, va, newpde, flags, lockp)) {
3179 if (pmap_unwire_ptp(pmap, va, pdpg, &free)) {
3181 * Although "va" is not mapped, paging-
3182 * structure caches could nonetheless have
3183 * entries that refer to the freed page table
3184 * pages. Invalidate those entries.
3186 pmap_invalidate_page(pmap, va);
3187 vm_page_free_pages_toq(&free, true);
3189 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3190 " in pmap %p", va, pmap);
3191 return (KERN_RESOURCE_SHORTAGE);
3193 if ((newpde & PG_RW) != 0) {
3194 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
3195 vm_page_aflag_set(mt, PGA_WRITEABLE);
3200 * Increment counters.
3202 if ((newpde & PG_W) != 0)
3203 pmap->pm_stats.wired_count += L3_PAGE_SIZE / PAGE_SIZE;
3204 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
3207 * Map the superpage. (This is not a promoted mapping; there will not
3208 * be any lingering 4KB page mappings in the TLB.)
3210 pte_store(l3e, newpde);
3212 atomic_add_long(&pmap_l3e_mappings, 1);
3213 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
3214 " in pmap %p", va, pmap);
3215 return (KERN_SUCCESS);
3219 mmu_radix_enter_object(pmap_t pmap, vm_offset_t start,
3220 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
3223 struct rwlock *lock;
3226 vm_pindex_t diff, psize;
3228 VM_OBJECT_ASSERT_LOCKED(m_start->object);
3230 CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
3231 end, m_start, prot);
3234 psize = atop(end - start);
3239 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3240 va = start + ptoa(diff);
3241 if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end &&
3242 m->psind == 1 && mmu_radix_ps_enabled(pmap) &&
3243 pmap_enter_2mpage(pmap, va, m, prot, &lock))
3244 m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1];
3246 mpte = mmu_radix_enter_quick_locked(pmap, va, m, prot,
3247 mpte, &lock, &invalidate);
3248 m = TAILQ_NEXT(m, listq);
3254 pmap_invalidate_all(pmap);
3259 mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3260 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate)
3262 struct spglist free;
3266 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3267 (m->oflags & VPO_UNMANAGED) != 0,
3268 ("mmu_radix_enter_quick_locked: managed mapping within the clean submap"));
3269 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3272 * In the case that a page table page is not
3273 * resident, we are creating it here.
3275 if (va < VM_MAXUSER_ADDRESS) {
3276 vm_pindex_t ptepindex;
3277 pml3_entry_t *ptepa;
3280 * Calculate pagetable page index
3282 ptepindex = pmap_l3e_pindex(va);
3283 if (mpte && (mpte->pindex == ptepindex)) {
3287 * Get the page directory entry
3289 ptepa = pmap_pml3e(pmap, va);
3292 * If the page table page is mapped, we just increment
3293 * the hold count, and activate it. Otherwise, we
3294 * attempt to allocate a page table page. If this
3295 * attempt fails, we don't retry. Instead, we give up.
3297 if (ptepa && (*ptepa & PG_V) != 0) {
3298 if (*ptepa & RPTE_LEAF)
3300 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
3304 * Pass NULL instead of the PV list lock
3305 * pointer, because we don't intend to sleep.
3307 mpte = _pmap_allocpte(pmap, ptepindex, NULL);
3312 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3313 pte = &pte[pmap_pte_index(va)];
3316 pte = pmap_pte(pmap, va);
3327 * Enter on the PV list if part of our managed memory.
3329 if ((m->oflags & VPO_UNMANAGED) == 0 &&
3330 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3333 if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
3335 * Although "va" is not mapped, paging-
3336 * structure caches could nonetheless have
3337 * entries that refer to the freed page table
3338 * pages. Invalidate those entries.
3341 vm_page_free_pages_toq(&free, true);
3349 * Increment counters
3351 pmap_resident_count_inc(pmap, 1);
3353 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs);
3354 if (prot & VM_PROT_EXECUTE)
3358 if ((m->oflags & VPO_UNMANAGED) == 0)
3366 mmu_radix_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
3369 struct rwlock *lock;
3375 mmu_radix_enter_quick_locked(pmap, va, m, prot, NULL, &lock,
3381 pmap_invalidate_all(pmap);
3386 mmu_radix_extract(pmap_t pmap, vm_offset_t va)
3392 l3e = pmap_pml3e(pmap, va);
3393 if (__predict_false(l3e == NULL))
3395 if (*l3e & RPTE_LEAF) {
3396 pa = (*l3e & PG_PS_FRAME) | (va & L3_PAGE_MASK);
3397 pa |= (va & L3_PAGE_MASK);
3400 * Beware of a concurrent promotion that changes the
3401 * PDE at this point! For example, vtopte() must not
3402 * be used to access the PTE because it would use the
3403 * new PDE. It is, however, safe to use the old PDE
3404 * because the page table page is preserved by the
3407 pte = pmap_l3e_to_pte(l3e, va);
3408 if (__predict_false(pte == NULL))
3411 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
3412 pa |= (va & PAGE_MASK);
3418 mmu_radix_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3420 pml3_entry_t l3e, *l3ep;
3427 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
3429 l3ep = pmap_pml3e(pmap, va);
3430 if (l3ep != NULL && (l3e = *l3ep)) {
3431 if (l3e & RPTE_LEAF) {
3432 if ((l3e & PG_RW) || (prot & VM_PROT_WRITE) == 0)
3433 m = PHYS_TO_VM_PAGE((l3e & PG_PS_FRAME) |
3434 (va & L3_PAGE_MASK));
3436 pte = *pmap_l3e_to_pte(l3ep, va);
3438 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0))
3439 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3441 if (m != NULL && !vm_page_wire_mapped(m))
3449 mmu_radix_growkernel(vm_offset_t addr)
3456 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
3457 if (VM_MIN_KERNEL_ADDRESS < addr &&
3458 addr < (VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE))
3461 addr = roundup2(addr, L3_PAGE_SIZE);
3462 if (addr - 1 >= vm_map_max(kernel_map))
3463 addr = vm_map_max(kernel_map);
3464 while (kernel_vm_end < addr) {
3465 l2e = pmap_pml2e(kernel_pmap, kernel_vm_end);
3466 if ((*l2e & PG_V) == 0) {
3467 /* We need a new PDP entry */
3468 nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_PAGE_SIZE_SHIFT,
3469 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
3470 VM_ALLOC_WIRED | VM_ALLOC_ZERO);
3472 panic("pmap_growkernel: no memory to grow kernel");
3473 if ((nkpg->flags & PG_ZERO) == 0)
3474 mmu_radix_zero_page(nkpg);
3475 paddr = VM_PAGE_TO_PHYS(nkpg);
3476 pde_store(l2e, paddr);
3477 continue; /* try again */
3479 l3e = pmap_l2e_to_l3e(l2e, kernel_vm_end);
3480 if ((*l3e & PG_V) != 0) {
3481 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3482 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3483 kernel_vm_end = vm_map_max(kernel_map);
3489 nkpg = vm_page_alloc(NULL, pmap_l3e_pindex(kernel_vm_end),
3490 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
3493 panic("pmap_growkernel: no memory to grow kernel");
3494 if ((nkpg->flags & PG_ZERO) == 0)
3495 mmu_radix_zero_page(nkpg);
3496 paddr = VM_PAGE_TO_PHYS(nkpg);
3497 pde_store(l3e, paddr);
3499 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3500 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3501 kernel_vm_end = vm_map_max(kernel_map);
3508 static MALLOC_DEFINE(M_RADIX_PGD, "radix_pgd", "radix page table root directory");
3509 static uma_zone_t zone_radix_pgd;
3512 radix_pgd_import(void *arg __unused, void **store, int count, int domain __unused,
3516 for (int i = 0; i < count; i++) {
3517 vm_page_t m = vm_page_alloc_contig(NULL, 0,
3518 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
3519 VM_ALLOC_ZERO | VM_ALLOC_WAITOK, RADIX_PGD_SIZE/PAGE_SIZE,
3520 0, (vm_paddr_t)-1, RADIX_PGD_SIZE, L1_PAGE_SIZE,
3521 VM_MEMATTR_DEFAULT);
3522 /* XXX zero on alloc here so we don't have to later */
3523 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3529 radix_pgd_release(void *arg __unused, void **store, int count)
3532 struct spglist free;
3536 page_count = RADIX_PGD_SIZE/PAGE_SIZE;
3538 for (int i = 0; i < count; i++) {
3540 * XXX selectively remove dmap and KVA entries so we don't
3543 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
3544 for (int j = page_count-1; j >= 0; j--) {
3545 vm_page_unwire_noq(&m[j]);
3546 SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss);
3548 vm_page_free_pages_toq(&free, false);
3557 int error, i, pv_npg;
3559 /* L1TF, reserve page @0 unconditionally */
3560 vm_page_excludelist_add(0, bootverbose);
3562 zone_radix_pgd = uma_zcache_create("radix_pgd_cache",
3563 RADIX_PGD_SIZE, NULL, NULL,
3565 trash_init, trash_fini,
3569 radix_pgd_import, radix_pgd_release,
3570 NULL, UMA_ZONE_NOBUCKET);
3573 * Initialize the vm page array entries for the kernel pmap's
3576 PMAP_LOCK(kernel_pmap);
3577 for (i = 0; i < nkpt; i++) {
3578 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
3579 KASSERT(mpte >= vm_page_array &&
3580 mpte < &vm_page_array[vm_page_array_size],
3581 ("pmap_init: page table page is out of range size: %lu",
3582 vm_page_array_size));
3583 mpte->pindex = pmap_l3e_pindex(VM_MIN_KERNEL_ADDRESS) + i;
3584 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
3585 MPASS(PHYS_TO_VM_PAGE(mpte->phys_addr) == mpte);
3586 //pmap_insert_pt_page(kernel_pmap, mpte);
3587 mpte->ref_count = 1;
3589 PMAP_UNLOCK(kernel_pmap);
3592 CTR1(KTR_PMAP, "%s()", __func__);
3593 TAILQ_INIT(&pv_dummy.pv_list);
3596 * Are large page mappings enabled?
3598 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
3599 if (pg_ps_enabled) {
3600 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
3601 ("pmap_init: can't assign to pagesizes[1]"));
3602 pagesizes[1] = L3_PAGE_SIZE;
3606 * Initialize the pv chunk list mutex.
3608 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
3611 * Initialize the pool of pv list locks.
3613 for (i = 0; i < NPV_LIST_LOCKS; i++)
3614 rw_init(&pv_list_locks[i], "pmap pv list");
3617 * Calculate the size of the pv head table for superpages.
3619 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L3_PAGE_SIZE);
3622 * Allocate memory for the pv head table for superpages.
3624 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
3626 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
3627 for (i = 0; i < pv_npg; i++)
3628 TAILQ_INIT(&pv_table[i].pv_list);
3629 TAILQ_INIT(&pv_dummy.pv_list);
3631 pmap_initialized = 1;
3632 mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
3633 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
3634 (vmem_addr_t *)&qframe);
3637 panic("qframe allocation failed");
3638 asid_arena = vmem_create("ASID", isa3_base_pid + 1, (1<<isa3_pid_bits),
3643 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3645 struct rwlock *lock;
3647 struct md_page *pvh;
3648 pt_entry_t *pte, mask;
3650 int md_gen, pvh_gen;
3654 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3657 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3659 if (!PMAP_TRYLOCK(pmap)) {
3660 md_gen = m->md.pv_gen;
3664 if (md_gen != m->md.pv_gen) {
3669 pte = pmap_pte(pmap, pv->pv_va);
3672 mask |= PG_RW | PG_M;
3674 mask |= PG_V | PG_A;
3675 rv = (*pte & mask) == mask;
3680 if ((m->flags & PG_FICTITIOUS) == 0) {
3681 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3682 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
3684 if (!PMAP_TRYLOCK(pmap)) {
3685 md_gen = m->md.pv_gen;
3686 pvh_gen = pvh->pv_gen;
3690 if (md_gen != m->md.pv_gen ||
3691 pvh_gen != pvh->pv_gen) {
3696 pte = pmap_pml3e(pmap, pv->pv_va);
3699 mask |= PG_RW | PG_M;
3701 mask |= PG_V | PG_A;
3702 rv = (*pte & mask) == mask;
3716 * Return whether or not the specified physical page was modified
3717 * in any physical maps.
3720 mmu_radix_is_modified(vm_page_t m)
3723 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3724 ("pmap_is_modified: page %p is not managed", m));
3726 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3728 * If the page is not busied then this check is racy.
3730 if (!pmap_page_is_write_mapped(m))
3732 return (pmap_page_test_mappings(m, FALSE, TRUE));
3736 mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3742 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
3745 l3e = pmap_pml3e(pmap, addr);
3746 if (l3e != NULL && (*l3e & (RPTE_LEAF | PG_V)) == PG_V) {
3747 pte = pmap_l3e_to_pte(l3e, addr);
3748 rv = (*pte & PG_V) == 0;
3755 mmu_radix_is_referenced(vm_page_t m)
3757 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3758 ("pmap_is_referenced: page %p is not managed", m));
3759 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3760 return (pmap_page_test_mappings(m, TRUE, FALSE));
3764 * pmap_ts_referenced:
3766 * Return a count of reference bits for a page, clearing those bits.
3767 * It is not necessary for every reference bit to be cleared, but it
3768 * is necessary that 0 only be returned when there are truly no
3769 * reference bits set.
3771 * As an optimization, update the page's dirty field if a modified bit is
3772 * found while counting reference bits. This opportunistic update can be
3773 * performed at low cost and can eliminate the need for some future calls
3774 * to pmap_is_modified(). However, since this function stops after
3775 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3776 * dirty pages. Those dirty pages will only be detected by a future call
3777 * to pmap_is_modified().
3779 * A DI block is not needed within this function, because
3780 * invalidations are performed before the PV list lock is
3784 mmu_radix_ts_referenced(vm_page_t m)
3786 struct md_page *pvh;
3789 struct rwlock *lock;
3790 pml3_entry_t oldl3e, *l3e;
3793 int cleared, md_gen, not_cleared, pvh_gen;
3794 struct spglist free;
3796 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3797 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3798 ("pmap_ts_referenced: page %p is not managed", m));
3801 pa = VM_PAGE_TO_PHYS(m);
3802 lock = PHYS_TO_PV_LIST_LOCK(pa);
3803 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
3807 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
3808 goto small_mappings;
3814 if (!PMAP_TRYLOCK(pmap)) {
3815 pvh_gen = pvh->pv_gen;
3819 if (pvh_gen != pvh->pv_gen) {
3824 l3e = pmap_pml3e(pmap, pv->pv_va);
3826 if ((oldl3e & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
3828 * Although "oldpde" is mapping a 2MB page, because
3829 * this function is called at a 4KB page granularity,
3830 * we only update the 4KB page under test.
3834 if ((oldl3e & PG_A) != 0) {
3836 * Since this reference bit is shared by 512 4KB
3837 * pages, it should not be cleared every time it is
3838 * tested. Apply a simple "hash" function on the
3839 * physical page number, the virtual superpage number,
3840 * and the pmap address to select one 4KB page out of
3841 * the 512 on which testing the reference bit will
3842 * result in clearing that reference bit. This
3843 * function is designed to avoid the selection of the
3844 * same 4KB page for every 2MB page mapping.
3846 * On demotion, a mapping that hasn't been referenced
3847 * is simply destroyed. To avoid the possibility of a
3848 * subsequent page fault on a demoted wired mapping,
3849 * always leave its reference bit set. Moreover,
3850 * since the superpage is wired, the current state of
3851 * its reference bit won't affect page replacement.
3853 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L3_PAGE_SIZE_SHIFT) ^
3854 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
3855 (oldl3e & PG_W) == 0) {
3856 atomic_clear_long(l3e, PG_A);
3857 pmap_invalidate_page(pmap, pv->pv_va);
3859 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
3860 ("inconsistent pv lock %p %p for page %p",
3861 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
3866 /* Rotate the PV list if it has more than one entry. */
3867 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3868 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
3869 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
3872 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
3874 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
3876 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
3883 if (!PMAP_TRYLOCK(pmap)) {
3884 pvh_gen = pvh->pv_gen;
3885 md_gen = m->md.pv_gen;
3889 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
3894 l3e = pmap_pml3e(pmap, pv->pv_va);
3895 KASSERT((*l3e & RPTE_LEAF) == 0,
3896 ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
3898 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
3899 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3901 if ((*pte & PG_A) != 0) {
3902 atomic_clear_long(pte, PG_A);
3903 pmap_invalidate_page(pmap, pv->pv_va);
3907 /* Rotate the PV list if it has more than one entry. */
3908 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3909 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
3910 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3913 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
3914 not_cleared < PMAP_TS_REFERENCED_MAX);
3917 vm_page_free_pages_toq(&free, true);
3918 return (cleared + not_cleared);
3922 mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start,
3923 vm_paddr_t end, int prot __unused)
3926 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
3928 return (PHYS_TO_DMAP(start));
3932 mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
3933 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3936 vm_paddr_t pa, ptepa;
3940 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
3941 object, pindex, size);
3942 VM_OBJECT_ASSERT_WLOCKED(object);
3943 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3944 ("pmap_object_init_pt: non-device object"));
3945 /* NB: size can be logically ored with addr here */
3946 if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) {
3947 if (!mmu_radix_ps_enabled(pmap))
3949 if (!vm_object_populate(object, pindex, pindex + atop(size)))
3951 p = vm_page_lookup(object, pindex);
3952 KASSERT(p->valid == VM_PAGE_BITS_ALL,
3953 ("pmap_object_init_pt: invalid page %p", p));
3954 ma = p->md.mdpg_cache_attrs;
3957 * Abort the mapping if the first page is not physically
3958 * aligned to a 2MB page boundary.
3960 ptepa = VM_PAGE_TO_PHYS(p);
3961 if (ptepa & L3_PAGE_MASK)
3965 * Skip the first page. Abort the mapping if the rest of
3966 * the pages are not physically contiguous or have differing
3967 * memory attributes.
3969 p = TAILQ_NEXT(p, listq);
3970 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
3972 KASSERT(p->valid == VM_PAGE_BITS_ALL,
3973 ("pmap_object_init_pt: invalid page %p", p));
3974 if (pa != VM_PAGE_TO_PHYS(p) ||
3975 ma != p->md.mdpg_cache_attrs)
3977 p = TAILQ_NEXT(p, listq);
3981 for (pa = ptepa | pmap_cache_bits(ma);
3982 pa < ptepa + size; pa += L3_PAGE_SIZE) {
3983 pdpg = pmap_allocl3e(pmap, addr, NULL);
3986 * The creation of mappings below is only an
3987 * optimization. If a page directory page
3988 * cannot be allocated without blocking,
3989 * continue on to the next mapping rather than
3992 addr += L3_PAGE_SIZE;
3995 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
3996 l3e = &l3e[pmap_pml3e_index(addr)];
3997 if ((*l3e & PG_V) == 0) {
3998 pa |= PG_M | PG_A | PG_RW;
4000 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
4001 atomic_add_long(&pmap_l3e_mappings, 1);
4003 /* Continue on if the PDE is already valid. */
4005 KASSERT(pdpg->ref_count > 0,
4006 ("pmap_object_init_pt: missing reference "
4007 "to page directory page, va: 0x%lx", addr));
4009 addr += L3_PAGE_SIZE;
4017 mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m)
4019 struct md_page *pvh;
4020 struct rwlock *lock;
4025 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4026 ("pmap_page_exists_quick: page %p is not managed", m));
4027 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
4029 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4031 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4032 if (PV_PMAP(pv) == pmap) {
4040 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4041 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4042 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4043 if (PV_PMAP(pv) == pmap) {
4057 mmu_radix_page_init(vm_page_t m)
4060 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4061 TAILQ_INIT(&m->md.pv_list);
4062 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
4066 mmu_radix_page_wired_mappings(vm_page_t m)
4068 struct rwlock *lock;
4069 struct md_page *pvh;
4073 int count, md_gen, pvh_gen;
4075 if ((m->oflags & VPO_UNMANAGED) != 0)
4077 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4078 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4082 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4084 if (!PMAP_TRYLOCK(pmap)) {
4085 md_gen = m->md.pv_gen;
4089 if (md_gen != m->md.pv_gen) {
4094 pte = pmap_pte(pmap, pv->pv_va);
4095 if ((*pte & PG_W) != 0)
4099 if ((m->flags & PG_FICTITIOUS) == 0) {
4100 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4101 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4103 if (!PMAP_TRYLOCK(pmap)) {
4104 md_gen = m->md.pv_gen;
4105 pvh_gen = pvh->pv_gen;
4109 if (md_gen != m->md.pv_gen ||
4110 pvh_gen != pvh->pv_gen) {
4115 pte = pmap_pml3e(pmap, pv->pv_va);
4116 if ((*pte & PG_W) != 0)
4126 mmu_radix_update_proctab(int pid, pml1_entry_t l1pa)
4128 isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE | l1pa | RADIX_PGD_INDEX_SHIFT);
4132 mmu_radix_pinit(pmap_t pmap)
4137 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4140 * allocate the page directory page
4142 pmap->pm_pml1 = uma_zalloc(zone_radix_pgd, M_WAITOK);
4144 for (int j = 0; j < RADIX_PGD_SIZE_SHIFT; j++)
4145 pagezero((vm_offset_t)pmap->pm_pml1 + j * PAGE_SIZE);
4146 pmap->pm_radix.rt_root = 0;
4147 TAILQ_INIT(&pmap->pm_pvchunk);
4148 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4149 pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4150 vmem_alloc(asid_arena, 1, M_FIRSTFIT|M_WAITOK, &pid);
4153 l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1);
4154 mmu_radix_update_proctab(pid, l1pa);
4155 __asm __volatile("ptesync;isync" : : : "memory");
4161 * This routine is called if the desired page table page does not exist.
4163 * If page table page allocation fails, this routine may sleep before
4164 * returning NULL. It sleeps only if a lock pointer was given.
4166 * Note: If a page allocation fails at page table level two or three,
4167 * one or two pages may be held during the wait, only to be released
4168 * afterwards. This conservative approach is easily argued to avoid
4172 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
4174 vm_page_t m, pdppg, pdpg;
4176 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4179 * Allocate a page table page.
4181 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
4182 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
4183 if (lockp != NULL) {
4184 RELEASE_PV_LIST_LOCK(lockp);
4190 * Indicate the need to retry. While waiting, the page table
4191 * page may have been allocated.
4195 if ((m->flags & PG_ZERO) == 0)
4196 mmu_radix_zero_page(m);
4199 * Map the pagetable page into the process address space, if
4200 * it isn't already there.
4203 if (ptepindex >= (NUPDE + NUPDPE)) {
4205 vm_pindex_t pml1index;
4207 /* Wire up a new PDPE page */
4208 pml1index = ptepindex - (NUPDE + NUPDPE);
4209 l1e = &pmap->pm_pml1[pml1index];
4210 pde_store(l1e, VM_PAGE_TO_PHYS(m));
4212 } else if (ptepindex >= NUPDE) {
4213 vm_pindex_t pml1index;
4214 vm_pindex_t pdpindex;
4218 /* Wire up a new l2e page */
4219 pdpindex = ptepindex - NUPDE;
4220 pml1index = pdpindex >> RPTE_SHIFT;
4222 l1e = &pmap->pm_pml1[pml1index];
4223 if ((*l1e & PG_V) == 0) {
4224 /* Have to allocate a new pdp, recurse */
4225 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml1index,
4227 vm_page_unwire_noq(m);
4228 vm_page_free_zero(m);
4232 /* Add reference to l2e page */
4233 pdppg = PHYS_TO_VM_PAGE(*l1e & PG_FRAME);
4236 l2e = (pml2_entry_t *)PHYS_TO_DMAP(*l1e & PG_FRAME);
4238 /* Now find the pdp page */
4239 l2e = &l2e[pdpindex & RPTE_MASK];
4240 pde_store(l2e, VM_PAGE_TO_PHYS(m));
4243 vm_pindex_t pml1index;
4244 vm_pindex_t pdpindex;
4249 /* Wire up a new PTE page */
4250 pdpindex = ptepindex >> RPTE_SHIFT;
4251 pml1index = pdpindex >> RPTE_SHIFT;
4253 /* First, find the pdp and check that its valid. */
4254 l1e = &pmap->pm_pml1[pml1index];
4255 if ((*l1e & PG_V) == 0) {
4256 /* Have to allocate a new pd, recurse */
4257 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4259 vm_page_unwire_noq(m);
4260 vm_page_free_zero(m);
4263 l2e = (pml2_entry_t *)PHYS_TO_DMAP(*l1e & PG_FRAME);
4264 l2e = &l2e[pdpindex & RPTE_MASK];
4266 l2e = (pml2_entry_t *)PHYS_TO_DMAP(*l1e & PG_FRAME);
4267 l2e = &l2e[pdpindex & RPTE_MASK];
4268 if ((*l2e & PG_V) == 0) {
4269 /* Have to allocate a new pd, recurse */
4270 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4272 vm_page_unwire_noq(m);
4273 vm_page_free_zero(m);
4277 /* Add reference to the pd page */
4278 pdpg = PHYS_TO_VM_PAGE(*l2e & PG_FRAME);
4282 l3e = (pml3_entry_t *)PHYS_TO_DMAP(*l2e & PG_FRAME);
4284 /* Now we know where the page directory page is */
4285 l3e = &l3e[ptepindex & RPTE_MASK];
4286 pde_store(l3e, VM_PAGE_TO_PHYS(m));
4289 pmap_resident_count_inc(pmap, 1);
4293 pmap_allocl3e(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4295 vm_pindex_t pdpindex, ptepindex;
4300 pdpe = pmap_pml2e(pmap, va);
4301 if (pdpe != NULL && (*pdpe & PG_V) != 0) {
4302 /* Add a reference to the pd page. */
4303 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
4306 /* Allocate a pd page. */
4307 ptepindex = pmap_l3e_pindex(va);
4308 pdpindex = ptepindex >> RPTE_SHIFT;
4309 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
4310 if (pdpg == NULL && lockp != NULL)
4317 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4319 vm_pindex_t ptepindex;
4324 * Calculate pagetable page index
4326 ptepindex = pmap_l3e_pindex(va);
4329 * Get the page directory entry
4331 pd = pmap_pml3e(pmap, va);
4334 * This supports switching from a 2MB page to a
4337 if (pd != NULL && (*pd & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V)) {
4338 if (!pmap_demote_l3e_locked(pmap, pd, va, lockp)) {
4340 * Invalidation of the 2MB page mapping may have caused
4341 * the deallocation of the underlying PD page.
4348 * If the page table page is mapped, we just increment the
4349 * hold count, and activate it.
4351 if (pd != NULL && (*pd & PG_V) != 0) {
4352 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
4356 * Here if the pte page isn't mapped, or if it has been
4359 m = _pmap_allocpte(pmap, ptepindex, lockp);
4360 if (m == NULL && lockp != NULL)
4367 mmu_radix_pinit0(pmap_t pmap)
4370 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4371 PMAP_LOCK_INIT(pmap);
4372 pmap->pm_pml1 = kernel_pmap->pm_pml1;
4373 pmap->pm_pid = kernel_pmap->pm_pid;
4375 pmap->pm_radix.rt_root = 0;
4376 TAILQ_INIT(&pmap->pm_pvchunk);
4377 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4378 kernel_pmap->pm_flags =
4379 pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4382 * pmap_protect_l3e: do the things to protect a 2mpage in a process
4385 pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot)
4387 pt_entry_t newpde, oldpde;
4388 vm_offset_t eva, va;
4390 boolean_t anychanged;
4392 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4393 KASSERT((sva & L3_PAGE_MASK) == 0,
4394 ("pmap_protect_l3e: sva is not 2mpage aligned"));
4397 oldpde = newpde = *l3e;
4398 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
4399 (PG_MANAGED | PG_M | PG_RW)) {
4400 eva = sva + L3_PAGE_SIZE;
4401 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
4402 va < eva; va += PAGE_SIZE, m++)
4405 if ((prot & VM_PROT_WRITE) == 0) {
4406 newpde &= ~(PG_RW | PG_M);
4407 newpde |= RPTE_EAA_R;
4409 if (prot & VM_PROT_EXECUTE)
4411 if (newpde != oldpde) {
4413 * As an optimization to future operations on this PDE, clear
4414 * PG_PROMOTED. The impending invalidation will remove any
4415 * lingering 4KB page mappings from the TLB.
4417 if (!atomic_cmpset_long(l3e, oldpde, newpde & ~PG_PROMOTED))
4421 return (anychanged);
4425 mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
4428 vm_offset_t va_next;
4431 pml3_entry_t ptpaddr, *l3e;
4433 boolean_t anychanged;
4435 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva,
4438 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4439 if (prot == VM_PROT_NONE) {
4440 mmu_radix_remove(pmap, sva, eva);
4444 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
4445 (VM_PROT_WRITE|VM_PROT_EXECUTE))
4449 if (VERBOSE_PROTECT || pmap_logging)
4450 printf("pmap_protect(%p, %#lx, %#lx, %x) - asid: %lu\n",
4451 pmap, sva, eva, prot, pmap->pm_pid);
4456 for (; sva < eva; sva = va_next) {
4457 l1e = pmap_pml1e(pmap, sva);
4458 if ((*l1e & PG_V) == 0) {
4459 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
4465 l2e = pmap_l1e_to_l2e(l1e, sva);
4466 if ((*l2e & PG_V) == 0) {
4467 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
4473 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
4477 l3e = pmap_l2e_to_l3e(l2e, sva);
4481 * Weed out invalid mappings.
4487 * Check for large page.
4489 if ((ptpaddr & RPTE_LEAF) != 0) {
4491 * Are we protecting the entire large page? If not,
4492 * demote the mapping and fall through.
4494 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
4495 if (pmap_protect_l3e(pmap, l3e, sva, prot))
4498 } else if (!pmap_demote_l3e(pmap, l3e, sva)) {
4500 * The large page mapping was destroyed.
4509 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
4511 pt_entry_t obits, pbits;
4515 MPASS(pte == pmap_pte(pmap, sva));
4516 obits = pbits = *pte;
4517 if ((pbits & PG_V) == 0)
4520 if ((prot & VM_PROT_WRITE) == 0) {
4521 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
4522 (PG_MANAGED | PG_M | PG_RW)) {
4523 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
4526 pbits &= ~(PG_RW | PG_M);
4527 pbits |= RPTE_EAA_R;
4529 if (prot & VM_PROT_EXECUTE)
4532 if (pbits != obits) {
4533 if (!atomic_cmpset_long(pte, obits, pbits))
4535 if (obits & (PG_A|PG_M)) {
4538 if (VERBOSE_PROTECT || pmap_logging)
4539 printf("%#lx %#lx -> %#lx\n",
4547 pmap_invalidate_all(pmap);
4552 mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count)
4555 CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count);
4556 pt_entry_t oldpte, pa, *pte;
4558 uint64_t cache_bits, attr_bits;
4562 attr_bits = RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
4565 while (va < sva + PAGE_SIZE * count) {
4566 if (__predict_false((va & L3_PAGE_MASK) == 0))
4568 MPASS(pte == pmap_pte(kernel_pmap, va));
4571 * XXX there has to be a more efficient way than traversing
4572 * the page table every time - but go for correctness for
4577 cache_bits = pmap_cache_bits(m->md.mdpg_cache_attrs);
4578 pa = VM_PAGE_TO_PHYS(m) | cache_bits | attr_bits;
4586 if (__predict_false((oldpte & RPTE_VALID) != 0))
4587 pmap_invalidate_range(kernel_pmap, sva, sva + count *
4594 mmu_radix_qremove(vm_offset_t sva, int count)
4599 CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, sva, count);
4600 KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode or dmap va %lx", sva));
4604 while (va < sva + PAGE_SIZE * count) {
4605 if (__predict_false((va & L3_PAGE_MASK) == 0))
4611 pmap_invalidate_range(kernel_pmap, sva, va);
4614 /***************************************************
4615 * Page table page management routines.....
4616 ***************************************************/
4618 * Schedule the specified unused page table page to be freed. Specifically,
4619 * add the page to the specified list of pages that will be released to the
4620 * physical memory manager after the TLB has been updated.
4622 static __inline void
4623 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
4624 boolean_t set_PG_ZERO)
4628 m->flags |= PG_ZERO;
4630 m->flags &= ~PG_ZERO;
4631 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
4635 * Inserts the specified page table page into the specified pmap's collection
4636 * of idle page table pages. Each of a pmap's page table pages is responsible
4637 * for mapping a distinct range of virtual addresses. The pmap's collection is
4638 * ordered by this virtual address range.
4641 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
4644 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4645 return (vm_radix_insert(&pmap->pm_radix, mpte));
4649 * Removes the page table page mapping the specified virtual address from the
4650 * specified pmap's collection of idle page table pages, and returns it.
4651 * Otherwise, returns NULL if there is no page table page corresponding to the
4652 * specified virtual address.
4654 static __inline vm_page_t
4655 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
4658 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4659 return (vm_radix_remove(&pmap->pm_radix, pmap_l3e_pindex(va)));
4663 * Decrements a page table page's wire count, which is used to record the
4664 * number of valid page table entries within the page. If the wire count
4665 * drops to zero, then the page table page is unmapped. Returns TRUE if the
4666 * page table page was unmapped and FALSE otherwise.
4668 static inline boolean_t
4669 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4673 if (m->ref_count == 0) {
4674 _pmap_unwire_ptp(pmap, va, m, free);
4681 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4684 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4686 * unmap the page table page
4688 if (m->pindex >= (NUPDE + NUPDPE)) {
4691 pml1 = pmap_pml1e(pmap, va);
4693 } else if (m->pindex >= NUPDE) {
4696 l2e = pmap_pml2e(pmap, va);
4701 l3e = pmap_pml3e(pmap, va);
4704 pmap_resident_count_dec(pmap, 1);
4705 if (m->pindex < NUPDE) {
4706 /* We just released a PT, unhold the matching PD */
4709 pdpg = PHYS_TO_VM_PAGE(*pmap_pml2e(pmap, va) & PG_FRAME);
4710 pmap_unwire_ptp(pmap, va, pdpg, free);
4712 if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
4713 /* We just released a PD, unhold the matching PDP */
4716 pdppg = PHYS_TO_VM_PAGE(*pmap_pml1e(pmap, va) & PG_FRAME);
4717 pmap_unwire_ptp(pmap, va, pdppg, free);
4721 * Put page on a list so that it is released after
4722 * *ALL* TLB shootdown is done
4724 pmap_add_delayed_free_list(m, free, TRUE);
4728 * After removing a page table entry, this routine is used to
4729 * conditionally free the page, and manage the hold/wire counts.
4732 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pml3_entry_t ptepde,
4733 struct spglist *free)
4737 if (va >= VM_MAXUSER_ADDRESS)
4739 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
4740 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
4741 return (pmap_unwire_ptp(pmap, va, mpte, free));
4745 mmu_radix_release(pmap_t pmap)
4748 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4749 KASSERT(pmap->pm_stats.resident_count == 0,
4750 ("pmap_release: pmap resident count %ld != 0",
4751 pmap->pm_stats.resident_count));
4752 KASSERT(vm_radix_is_empty(&pmap->pm_radix),
4753 ("pmap_release: pmap has reserved page table page(s)"));
4755 pmap_invalidate_all(pmap);
4756 isa3_proctab[pmap->pm_pid].proctab0 = 0;
4757 uma_zfree(zone_radix_pgd, pmap->pm_pml1);
4758 vmem_free(asid_arena, pmap->pm_pid, 1);
4762 * Create the PV entry for a 2MB page mapping. Always returns true unless the
4763 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns
4764 * false if the PV entry cannot be allocated without resorting to reclamation.
4767 pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t pde, u_int flags,
4768 struct rwlock **lockp)
4770 struct md_page *pvh;
4774 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4775 /* Pass NULL instead of the lock pointer to disable reclamation. */
4776 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
4777 NULL : lockp)) == NULL)
4780 pa = pde & PG_PS_FRAME;
4781 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
4782 pvh = pa_to_pvh(pa);
4783 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
4789 * Fills a page table page with mappings to consecutive physical pages.
4792 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
4796 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
4798 newpte += PAGE_SIZE;
4803 pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va)
4805 struct rwlock *lock;
4809 rv = pmap_demote_l3e_locked(pmap, pde, va, &lock);
4816 pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
4817 struct rwlock **lockp)
4819 pml3_entry_t oldpde;
4820 pt_entry_t *firstpte;
4823 struct spglist free;
4826 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4828 KASSERT((oldpde & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
4829 ("pmap_demote_l3e: oldpde is missing RPTE_LEAF and/or PG_V %lx",
4831 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
4833 KASSERT((oldpde & PG_W) == 0,
4834 ("pmap_demote_l3e: page table page for a wired mapping"
4838 * Invalidate the 2MB page mapping and return "failure" if the
4839 * mapping was never accessed or the allocation of the new
4840 * page table page fails. If the 2MB page mapping belongs to
4841 * the direct map region of the kernel's address space, then
4842 * the page allocation request specifies the highest possible
4843 * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is
4844 * normal. Page table pages are preallocated for every other
4845 * part of the kernel address space, so the direct map region
4846 * is the only part of the kernel address space that must be
4849 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
4850 pmap_l3e_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
4851 DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
4852 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
4854 sva = trunc_2mpage(va);
4855 pmap_remove_l3e(pmap, l3e, sva, &free, lockp);
4856 pmap_invalidate_l3e_page(pmap, sva, oldpde);
4857 vm_page_free_pages_toq(&free, true);
4858 CTR2(KTR_PMAP, "pmap_demote_l3e: failure for va %#lx"
4859 " in pmap %p", va, pmap);
4862 if (va < VM_MAXUSER_ADDRESS)
4863 pmap_resident_count_inc(pmap, 1);
4865 mptepa = VM_PAGE_TO_PHYS(mpte);
4866 firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
4867 KASSERT((oldpde & PG_A) != 0,
4868 ("pmap_demote_l3e: oldpde is missing PG_A"));
4869 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
4870 ("pmap_demote_l3e: oldpde is missing PG_M"));
4873 * If the page table page is new, initialize it.
4875 if (mpte->ref_count == 1) {
4876 mpte->ref_count = NPTEPG;
4877 pmap_fill_ptp(firstpte, oldpde);
4880 KASSERT((*firstpte & PG_FRAME) == (oldpde & PG_FRAME),
4881 ("pmap_demote_l3e: firstpte and newpte map different physical"
4885 * If the mapping has changed attributes, update the page table
4888 if ((*firstpte & PG_PTE_PROMOTE) != (oldpde & PG_PTE_PROMOTE))
4889 pmap_fill_ptp(firstpte, oldpde);
4892 * The spare PV entries must be reserved prior to demoting the
4893 * mapping, that is, prior to changing the PDE. Otherwise, the state
4894 * of the PDE and the PV lists will be inconsistent, which can result
4895 * in reclaim_pv_chunk() attempting to remove a PV entry from the
4896 * wrong PV list and pmap_pv_demote_l3e() failing to find the expected
4897 * PV entry for the 2MB page mapping that is being demoted.
4899 if ((oldpde & PG_MANAGED) != 0)
4900 reserve_pv_entries(pmap, NPTEPG - 1, lockp);
4903 * Demote the mapping. This pmap is locked. The old PDE has
4904 * PG_A set. If the old PDE has PG_RW set, it also has PG_M
4905 * set. Thus, there is no danger of a race with another
4906 * processor changing the setting of PG_A and/or PG_M between
4907 * the read above and the store below.
4909 pde_store(l3e, mptepa);
4912 * Demote the PV entry.
4914 if ((oldpde & PG_MANAGED) != 0)
4915 pmap_pv_demote_l3e(pmap, va, oldpde & PG_PS_FRAME, lockp);
4918 atomic_add_long(&pmap_l3e_demotions, 1);
4919 CTR2(KTR_PMAP, "pmap_demote_l3e: success for va %#lx"
4920 " in pmap %p", va, pmap);
4925 * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
4928 pmap_remove_kernel_l3e(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va)
4933 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
4934 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4935 mpte = pmap_remove_pt_page(pmap, va);
4937 panic("pmap_remove_kernel_pde: Missing pt page.");
4939 mptepa = VM_PAGE_TO_PHYS(mpte);
4942 * Initialize the page table page.
4944 pagezero(PHYS_TO_DMAP(mptepa));
4947 * Demote the mapping.
4949 pde_store(l3e, mptepa);
4954 * pmap_remove_l3e: do the things to unmap a superpage in a process
4957 pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
4958 struct spglist *free, struct rwlock **lockp)
4960 struct md_page *pvh;
4961 pml3_entry_t oldpde;
4962 vm_offset_t eva, va;
4965 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4966 KASSERT((sva & L3_PAGE_MASK) == 0,
4967 ("pmap_remove_l3e: sva is not 2mpage aligned"));
4968 oldpde = pte_load_clear(pdq);
4970 pmap->pm_stats.wired_count -= (L3_PAGE_SIZE / PAGE_SIZE);
4971 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
4972 if (oldpde & PG_MANAGED) {
4973 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
4974 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
4975 pmap_pvh_free(pvh, pmap, sva);
4976 eva = sva + L3_PAGE_SIZE;
4977 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
4978 va < eva; va += PAGE_SIZE, m++) {
4979 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
4982 vm_page_aflag_set(m, PGA_REFERENCED);
4983 if (TAILQ_EMPTY(&m->md.pv_list) &&
4984 TAILQ_EMPTY(&pvh->pv_list))
4985 vm_page_aflag_clear(m, PGA_WRITEABLE);
4988 if (pmap == kernel_pmap) {
4989 pmap_remove_kernel_l3e(pmap, pdq, sva);
4991 mpte = pmap_remove_pt_page(pmap, sva);
4993 pmap_resident_count_dec(pmap, 1);
4994 KASSERT(mpte->ref_count == NPTEPG,
4995 ("pmap_remove_l3e: pte page wire count error"));
4996 mpte->ref_count = 0;
4997 pmap_add_delayed_free_list(mpte, free, FALSE);
5000 return (pmap_unuse_pt(pmap, sva, *pmap_pml2e(pmap, sva), free));
5005 * pmap_remove_pte: do the things to unmap a page in a process
5008 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
5009 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
5011 struct md_page *pvh;
5015 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5016 oldpte = pte_load_clear(ptq);
5017 if (oldpte & RPTE_WIRED)
5018 pmap->pm_stats.wired_count -= 1;
5019 pmap_resident_count_dec(pmap, 1);
5020 if (oldpte & RPTE_MANAGED) {
5021 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
5022 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5025 vm_page_aflag_set(m, PGA_REFERENCED);
5026 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5027 pmap_pvh_free(&m->md, pmap, va);
5028 if (TAILQ_EMPTY(&m->md.pv_list) &&
5029 (m->flags & PG_FICTITIOUS) == 0) {
5030 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5031 if (TAILQ_EMPTY(&pvh->pv_list))
5032 vm_page_aflag_clear(m, PGA_WRITEABLE);
5035 return (pmap_unuse_pt(pmap, va, ptepde, free));
5039 * Remove a single page from a process address space
5042 pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *l3e,
5043 struct spglist *free)
5045 struct rwlock *lock;
5047 bool invalidate_all;
5049 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5050 if ((*l3e & RPTE_VALID) == 0) {
5053 pte = pmap_l3e_to_pte(l3e, va);
5054 if ((*pte & RPTE_VALID) == 0) {
5059 invalidate_all = pmap_remove_pte(pmap, pte, va, *l3e, free, &lock);
5062 if (!invalidate_all)
5063 pmap_invalidate_page(pmap, va);
5064 return (invalidate_all);
5068 * Removes the specified range of addresses from the page table page.
5071 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
5072 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp)
5078 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5081 for (pte = pmap_l3e_to_pte(l3e, sva); sva != eva; pte++,
5083 MPASS(pte == pmap_pte(pmap, sva));
5093 if (pmap_remove_pte(pmap, pte, sva, *l3e, free, lockp)) {
5100 pmap_invalidate_all(pmap);
5102 pmap_invalidate_range(pmap, va, sva);
5108 mmu_radix_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5110 struct rwlock *lock;
5111 vm_offset_t va_next;
5114 pml3_entry_t ptpaddr, *l3e;
5115 struct spglist free;
5118 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5121 * Perform an unsynchronized read. This is, however, safe.
5123 if (pmap->pm_stats.resident_count == 0)
5129 /* XXX something fishy here */
5130 sva = (sva + PAGE_MASK) & ~PAGE_MASK;
5131 eva = (eva + PAGE_MASK) & ~PAGE_MASK;
5136 * special handling of removing one page. a very
5137 * common operation and easy to short circuit some
5140 if (sva + PAGE_SIZE == eva) {
5141 l3e = pmap_pml3e(pmap, sva);
5142 if (l3e && (*l3e & RPTE_LEAF) == 0) {
5143 anyvalid = pmap_remove_page(pmap, sva, l3e, &free);
5149 for (; sva < eva; sva = va_next) {
5151 if (pmap->pm_stats.resident_count == 0)
5153 l1e = pmap_pml1e(pmap, sva);
5154 if (l1e == NULL || (*l1e & PG_V) == 0) {
5155 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5161 l2e = pmap_l1e_to_l2e(l1e, sva);
5162 if (l2e == NULL || (*l2e & PG_V) == 0) {
5163 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5170 * Calculate index for next page table.
5172 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5176 l3e = pmap_l2e_to_l3e(l2e, sva);
5180 * Weed out invalid mappings.
5186 * Check for large page.
5188 if ((ptpaddr & RPTE_LEAF) != 0) {
5190 * Are we removing the entire large page? If not,
5191 * demote the mapping and fall through.
5193 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5194 pmap_remove_l3e(pmap, l3e, sva, &free, &lock);
5196 } else if (!pmap_demote_l3e_locked(pmap, l3e, sva,
5198 /* The large page mapping was destroyed. */
5205 * Limit our scan to either the end of the va represented
5206 * by the current page table page, or to the end of the
5207 * range being removed.
5212 if (pmap_remove_ptes(pmap, sva, va_next, l3e, &free, &lock))
5219 pmap_invalidate_all(pmap);
5221 vm_page_free_pages_toq(&free, true);
5225 mmu_radix_remove_all(vm_page_t m)
5227 struct md_page *pvh;
5230 struct rwlock *lock;
5231 pt_entry_t *pte, tpte;
5234 struct spglist free;
5235 int pvh_gen, md_gen;
5237 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5238 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5239 ("pmap_remove_all: page %p is not managed", m));
5241 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5242 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5243 pa_to_pvh(VM_PAGE_TO_PHYS(m));
5246 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
5248 if (!PMAP_TRYLOCK(pmap)) {
5249 pvh_gen = pvh->pv_gen;
5253 if (pvh_gen != pvh->pv_gen) {
5260 l3e = pmap_pml3e(pmap, va);
5261 (void)pmap_demote_l3e_locked(pmap, l3e, va, &lock);
5264 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
5266 if (!PMAP_TRYLOCK(pmap)) {
5267 pvh_gen = pvh->pv_gen;
5268 md_gen = m->md.pv_gen;
5272 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5278 pmap_resident_count_dec(pmap, 1);
5279 l3e = pmap_pml3e(pmap, pv->pv_va);
5280 KASSERT((*l3e & RPTE_LEAF) == 0, ("pmap_remove_all: found"
5281 " a 2mpage in page %p's pv list", m));
5282 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5283 tpte = pte_load_clear(pte);
5285 pmap->pm_stats.wired_count--;
5287 vm_page_aflag_set(m, PGA_REFERENCED);
5290 * Update the vm_page_t clean and reference bits.
5292 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5294 pmap_unuse_pt(pmap, pv->pv_va, *l3e, &free);
5295 pmap_invalidate_page(pmap, pv->pv_va);
5296 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5298 free_pv_entry(pmap, pv);
5301 vm_page_aflag_clear(m, PGA_WRITEABLE);
5303 vm_page_free_pages_toq(&free, true);
5307 * Destroy all managed, non-wired mappings in the given user-space
5308 * pmap. This pmap cannot be active on any processor besides the
5311 * This function cannot be applied to the kernel pmap. Moreover, it
5312 * is not intended for general use. It is only to be used during
5313 * process termination. Consequently, it can be implemented in ways
5314 * that make it faster than pmap_remove(). First, it can more quickly
5315 * destroy mappings by iterating over the pmap's collection of PV
5316 * entries, rather than searching the page table. Second, it doesn't
5317 * have to test and clear the page table entries atomically, because
5318 * no processor is currently accessing the user address space. In
5319 * particular, a page table entry's dirty bit won't change state once
5320 * this function starts.
5322 * Although this function destroys all of the pmap's managed,
5323 * non-wired mappings, it can delay and batch the invalidation of TLB
5324 * entries without calling pmap_delayed_invl_started() and
5325 * pmap_delayed_invl_finished(). Because the pmap is not active on
5326 * any other processor, none of these TLB entries will ever be used
5327 * before their eventual invalidation. Consequently, there is no need
5328 * for either pmap_remove_all() or pmap_remove_write() to wait for
5329 * that eventual TLB invalidation.
5333 mmu_radix_remove_pages(pmap_t pmap)
5336 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
5337 pml3_entry_t ptel3e;
5338 pt_entry_t *pte, tpte;
5339 struct spglist free;
5340 vm_page_t m, mpte, mt;
5342 struct md_page *pvh;
5343 struct pv_chunk *pc, *npc;
5344 struct rwlock *lock;
5346 uint64_t inuse, bitmask;
5347 int allfree, field, freed, idx;
5348 boolean_t superpage;
5352 * Assert that the given pmap is only active on the current
5353 * CPU. Unfortunately, we cannot block another CPU from
5354 * activating the pmap while this function is executing.
5356 KASSERT(pmap->pm_pid == mfspr(SPR_PID),
5357 ("non-current asid %lu - expected %lu", pmap->pm_pid,
5364 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
5367 for (field = 0; field < _NPCM; field++) {
5368 inuse = ~pc->pc_map[field] & pc_freemask[field];
5369 while (inuse != 0) {
5370 bit = cnttzd(inuse);
5371 bitmask = 1UL << bit;
5372 idx = field * 64 + bit;
5373 pv = &pc->pc_pventry[idx];
5376 pte = pmap_pml2e(pmap, pv->pv_va);
5378 pte = pmap_l2e_to_l3e(pte, pv->pv_va);
5380 if ((tpte & (RPTE_LEAF | PG_V)) == PG_V) {
5383 pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
5385 pte = &pte[pmap_pte_index(pv->pv_va)];
5389 * Keep track whether 'tpte' is a
5390 * superpage explicitly instead of
5391 * relying on RPTE_LEAF being set.
5393 * This is because RPTE_LEAF is numerically
5394 * identical to PG_PTE_PAT and thus a
5395 * regular page could be mistaken for
5401 if ((tpte & PG_V) == 0) {
5402 panic("bad pte va %lx pte %lx",
5407 * We cannot remove wired pages from a process' mapping at this time
5415 pa = tpte & PG_PS_FRAME;
5417 pa = tpte & PG_FRAME;
5419 m = PHYS_TO_VM_PAGE(pa);
5420 KASSERT(m->phys_addr == pa,
5421 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
5422 m, (uintmax_t)m->phys_addr,
5425 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
5426 m < &vm_page_array[vm_page_array_size],
5427 ("pmap_remove_pages: bad tpte %#jx",
5433 * Update the vm_page_t clean/reference bits.
5435 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5437 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5443 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
5446 pc->pc_map[field] |= bitmask;
5448 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
5449 pvh = pa_to_pvh(tpte & PG_PS_FRAME);
5450 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
5452 if (TAILQ_EMPTY(&pvh->pv_list)) {
5453 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5454 if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
5455 TAILQ_EMPTY(&mt->md.pv_list))
5456 vm_page_aflag_clear(mt, PGA_WRITEABLE);
5458 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
5460 pmap_resident_count_dec(pmap, 1);
5461 KASSERT(mpte->ref_count == NPTEPG,
5462 ("pmap_remove_pages: pte page wire count error"));
5463 mpte->ref_count = 0;
5464 pmap_add_delayed_free_list(mpte, &free, FALSE);
5467 pmap_resident_count_dec(pmap, 1);
5469 printf("freeing pv (%p, %p)\n",
5472 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5474 if ((m->a.flags & PGA_WRITEABLE) != 0 &&
5475 TAILQ_EMPTY(&m->md.pv_list) &&
5476 (m->flags & PG_FICTITIOUS) == 0) {
5477 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5478 if (TAILQ_EMPTY(&pvh->pv_list))
5479 vm_page_aflag_clear(m, PGA_WRITEABLE);
5482 pmap_unuse_pt(pmap, pv->pv_va, ptel3e, &free);
5486 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
5487 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
5488 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
5490 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5496 pmap_invalidate_all(pmap);
5498 vm_page_free_pages_toq(&free, true);
5502 mmu_radix_remove_write(vm_page_t m)
5504 struct md_page *pvh;
5506 struct rwlock *lock;
5507 pv_entry_t next_pv, pv;
5509 pt_entry_t oldpte, *pte;
5510 int pvh_gen, md_gen;
5512 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5513 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5514 ("pmap_remove_write: page %p is not managed", m));
5515 vm_page_assert_busied(m);
5517 if (!pmap_page_is_write_mapped(m))
5519 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5520 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5521 pa_to_pvh(VM_PAGE_TO_PHYS(m));
5524 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
5526 if (!PMAP_TRYLOCK(pmap)) {
5527 pvh_gen = pvh->pv_gen;
5531 if (pvh_gen != pvh->pv_gen) {
5537 l3e = pmap_pml3e(pmap, pv->pv_va);
5538 if ((*l3e & PG_RW) != 0)
5539 (void)pmap_demote_l3e_locked(pmap, l3e, pv->pv_va, &lock);
5540 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
5541 ("inconsistent pv lock %p %p for page %p",
5542 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
5545 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
5547 if (!PMAP_TRYLOCK(pmap)) {
5548 pvh_gen = pvh->pv_gen;
5549 md_gen = m->md.pv_gen;
5553 if (pvh_gen != pvh->pv_gen ||
5554 md_gen != m->md.pv_gen) {
5560 l3e = pmap_pml3e(pmap, pv->pv_va);
5561 KASSERT((*l3e & RPTE_LEAF) == 0,
5562 ("pmap_remove_write: found a 2mpage in page %p's pv list",
5564 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5567 if (oldpte & PG_RW) {
5568 if (!atomic_cmpset_long(pte, oldpte,
5569 (oldpte | RPTE_EAA_R) & ~(PG_RW | PG_M)))
5571 if ((oldpte & PG_M) != 0)
5573 pmap_invalidate_page(pmap, pv->pv_va);
5578 vm_page_aflag_clear(m, PGA_WRITEABLE);
5582 * Clear the wired attribute from the mappings for the specified range of
5583 * addresses in the given pmap. Every valid mapping within that range
5584 * must have the wired attribute set. In contrast, invalid mappings
5585 * cannot have the wired attribute set, so they are ignored.
5587 * The wired attribute of the page table entry is not a hardware
5588 * feature, so there is no need to invalidate any TLB entries.
5589 * Since pmap_demote_l3e() for the wired entry must never fail,
5590 * pmap_delayed_invl_started()/finished() calls around the
5591 * function are not needed.
5594 mmu_radix_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5596 vm_offset_t va_next;
5602 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5604 for (; sva < eva; sva = va_next) {
5605 l1e = pmap_pml1e(pmap, sva);
5606 if ((*l1e & PG_V) == 0) {
5607 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5612 l2e = pmap_l1e_to_l2e(l1e, sva);
5613 if ((*l2e & PG_V) == 0) {
5614 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5619 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5622 l3e = pmap_l2e_to_l3e(l2e, sva);
5623 if ((*l3e & PG_V) == 0)
5625 if ((*l3e & RPTE_LEAF) != 0) {
5626 if ((*l3e & PG_W) == 0)
5627 panic("pmap_unwire: pde %#jx is missing PG_W",
5631 * Are we unwiring the entire large page? If not,
5632 * demote the mapping and fall through.
5634 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5635 atomic_clear_long(l3e, PG_W);
5636 pmap->pm_stats.wired_count -= L3_PAGE_SIZE /
5639 } else if (!pmap_demote_l3e(pmap, l3e, sva))
5640 panic("pmap_unwire: demotion failed");
5644 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
5646 MPASS(pte == pmap_pte(pmap, sva));
5647 if ((*pte & PG_V) == 0)
5649 if ((*pte & PG_W) == 0)
5650 panic("pmap_unwire: pte %#jx is missing PG_W",
5654 * PG_W must be cleared atomically. Although the pmap
5655 * lock synchronizes access to PG_W, another processor
5656 * could be setting PG_M and/or PG_A concurrently.
5658 atomic_clear_long(pte, PG_W);
5659 pmap->pm_stats.wired_count--;
5666 mmu_radix_zero_page(vm_page_t m)
5670 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5671 addr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5676 mmu_radix_zero_page_area(vm_page_t m, int off, int size)
5680 CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
5681 MPASS(off + size <= PAGE_SIZE);
5682 addr = (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5683 memset(addr + off, 0, size);
5690 mmu_radix_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
5697 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
5700 l3ep = pmap_pml3e(pmap, addr);
5701 if (l3ep != NULL && (*l3ep & PG_V)) {
5702 if (*l3ep & RPTE_LEAF) {
5704 /* Compute the physical address of the 4KB page. */
5705 pa = ((*l3ep & PG_PS_FRAME) | (addr & L3_PAGE_MASK)) &
5707 val = MINCORE_SUPER;
5709 pte = *pmap_l3e_to_pte(l3ep, addr);
5710 pa = pte & PG_FRAME;
5718 if ((pte & PG_V) != 0) {
5719 val |= MINCORE_INCORE;
5720 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5721 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5722 if ((pte & PG_A) != 0)
5723 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5725 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5726 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
5727 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
5735 mmu_radix_activate(struct thread *td)
5740 CTR2(KTR_PMAP, "%s(%p)", __func__, td);
5742 pmap = vmspace_pmap(td->td_proc->p_vmspace);
5743 curpid = mfspr(SPR_PID);
5744 if (pmap->pm_pid > isa3_base_pid &&
5745 curpid != pmap->pm_pid) {
5746 mmu_radix_pid_set(pmap);
5752 * Increase the starting virtual address of the given mapping if a
5753 * different alignment might result in more superpage mappings.
5756 mmu_radix_align_superpage(vm_object_t object, vm_ooffset_t offset,
5757 vm_offset_t *addr, vm_size_t size)
5760 CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
5762 vm_offset_t superpage_offset;
5764 if (size < L3_PAGE_SIZE)
5766 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
5767 offset += ptoa(object->pg_color);
5768 superpage_offset = offset & L3_PAGE_MASK;
5769 if (size - ((L3_PAGE_SIZE - superpage_offset) & L3_PAGE_MASK) < L3_PAGE_SIZE ||
5770 (*addr & L3_PAGE_MASK) == superpage_offset)
5772 if ((*addr & L3_PAGE_MASK) < superpage_offset)
5773 *addr = (*addr & ~L3_PAGE_MASK) + superpage_offset;
5775 *addr = ((*addr + L3_PAGE_MASK) & ~L3_PAGE_MASK) + superpage_offset;
5779 mmu_radix_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
5781 vm_offset_t va, tmpva, ppa, offset;
5783 ppa = trunc_page(pa);
5784 offset = pa & PAGE_MASK;
5785 size = roundup2(offset + size, PAGE_SIZE);
5786 if (pa < powerpc_ptob(Maxmem))
5787 panic("bad pa: %#lx less than Maxmem %#lx\n",
5788 pa, powerpc_ptob(Maxmem));
5789 va = kva_alloc(size);
5791 printf("%s(%#lx, %lu, %d)\n", __func__, pa, size, attr);
5792 KASSERT(size > 0, ("%s(%#lx, %lu, %d)", __func__, pa, size, attr));
5795 panic("%s: Couldn't alloc kernel virtual memory", __func__);
5797 for (tmpva = va; size > 0;) {
5798 mmu_radix_kenter_attr(tmpva, ppa, attr);
5805 return ((void *)(va + offset));
5809 mmu_radix_mapdev(vm_paddr_t pa, vm_size_t size)
5812 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
5814 return (mmu_radix_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
5818 mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5821 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
5822 m->md.mdpg_cache_attrs = ma;
5825 * If "m" is a normal page, update its direct mapping. This update
5826 * can be relied upon to perform any cache operations that are
5827 * required for data coherence.
5829 if ((m->flags & PG_FICTITIOUS) == 0 &&
5830 mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
5831 PAGE_SIZE, m->md.mdpg_cache_attrs))
5832 panic("memory attribute change on the direct map failed");
5836 mmu_radix_unmapdev(vm_offset_t va, vm_size_t size)
5840 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
5841 /* If we gave a direct map region in pmap_mapdev, do nothing */
5842 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
5845 offset = va & PAGE_MASK;
5846 size = round_page(offset + size);
5847 va = trunc_page(va);
5849 if (pmap_initialized)
5853 static __inline void
5854 pmap_pte_attr(pt_entry_t *pte, uint64_t cache_bits, uint64_t mask)
5856 uint64_t opte, npte;
5859 * The cache mode bits are all in the low 32-bits of the
5860 * PTE, so we can just spin on updating the low 32-bits.
5864 npte = opte & ~mask;
5866 } while (npte != opte && !atomic_cmpset_long(pte, opte, npte));
5870 * Tries to demote a 1GB page mapping.
5873 pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
5875 pml2_entry_t oldpdpe;
5876 pml3_entry_t *firstpde, newpde, *pde;
5880 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5882 KASSERT((oldpdpe & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
5883 ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
5884 pdpg = vm_page_alloc(NULL, va >> L2_PAGE_SIZE_SHIFT,
5885 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
5887 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
5888 " in pmap %p", va, pmap);
5891 pdpgpa = VM_PAGE_TO_PHYS(pdpg);
5892 firstpde = (pml3_entry_t *)PHYS_TO_DMAP(pdpgpa);
5893 KASSERT((oldpdpe & PG_A) != 0,
5894 ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
5895 KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
5896 ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
5900 * Initialize the page directory page.
5902 for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
5904 newpde += L3_PAGE_SIZE;
5908 * Demote the mapping.
5910 pde_store(l2e, pdpgpa);
5913 * Flush PWC --- XXX revisit
5915 pmap_invalidate_all(pmap);
5917 pmap_l2e_demotions++;
5918 CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
5919 " in pmap %p", va, pmap);
5924 mmu_radix_kextract(vm_offset_t va)
5929 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
5930 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
5931 pa = DMAP_TO_PHYS(va);
5933 l3e = *pmap_pml3e(kernel_pmap, va);
5934 if (l3e & RPTE_LEAF) {
5935 pa = (l3e & PG_PS_FRAME) | (va & L3_PAGE_MASK);
5936 pa |= (va & L3_PAGE_MASK);
5939 * Beware of a concurrent promotion that changes the
5940 * PDE at this point! For example, vtopte() must not
5941 * be used to access the PTE because it would use the
5942 * new PDE. It is, however, safe to use the old PDE
5943 * because the page table page is preserved by the
5946 pa = *pmap_l3e_to_pte(&l3e, va);
5947 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
5948 pa |= (va & PAGE_MASK);
5955 mmu_radix_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
5958 if (ma != VM_MEMATTR_DEFAULT) {
5959 return pmap_cache_bits(ma);
5963 * Assume the page is cache inhibited and access is guarded unless
5964 * it's in our available memory array.
5966 for (int i = 0; i < pregions_sz; i++) {
5967 if ((pa >= pregions[i].mr_start) &&
5968 (pa < (pregions[i].mr_start + pregions[i].mr_size)))
5969 return (RPTE_ATTR_MEM);
5971 return (RPTE_ATTR_GUARDEDIO);
5975 mmu_radix_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
5977 pt_entry_t *pte, pteval;
5978 uint64_t cache_bits;
5982 pteval = pa | RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
5983 cache_bits = mmu_radix_calc_wimg(pa, ma);
5984 pte_store(pte, pteval | cache_bits);
5988 mmu_radix_kremove(vm_offset_t va)
5992 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
5999 mmu_radix_decode_kernel_ptr(vm_offset_t addr,
6000 int *is_user, vm_offset_t *decoded)
6003 CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
6005 *is_user = (addr < VM_MAXUSER_ADDRESS);
6010 mmu_radix_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
6013 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
6014 return (mem_valid(pa, size));
6018 mmu_radix_scan_init()
6021 CTR1(KTR_PMAP, "%s()", __func__);
6026 mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz,
6029 CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
6034 mmu_radix_quick_enter_page(vm_page_t m)
6038 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
6039 paddr = VM_PAGE_TO_PHYS(m);
6040 return (PHYS_TO_DMAP(paddr));
6044 mmu_radix_quick_remove_page(vm_offset_t addr __unused)
6046 /* no work to do here */
6047 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
6051 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
6053 cpu_flush_dcache((void *)sva, eva - sva);
6057 mmu_radix_change_attr(vm_offset_t va, vm_size_t size,
6062 CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, va, size, mode);
6063 PMAP_LOCK(kernel_pmap);
6064 error = pmap_change_attr_locked(va, size, mode, true);
6065 PMAP_UNLOCK(kernel_pmap);
6070 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
6072 vm_offset_t base, offset, tmpva;
6073 vm_paddr_t pa_start, pa_end, pa_end1;
6077 int cache_bits, error;
6080 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
6081 base = trunc_page(va);
6082 offset = va & PAGE_MASK;
6083 size = round_page(offset + size);
6086 * Only supported on kernel virtual addresses, including the direct
6087 * map but excluding the recursive map.
6089 if (base < DMAP_MIN_ADDRESS)
6092 cache_bits = pmap_cache_bits(mode);
6096 * Pages that aren't mapped aren't supported. Also break down 2MB pages
6097 * into 4KB pages if required.
6099 for (tmpva = base; tmpva < base + size; ) {
6100 l2e = pmap_pml2e(kernel_pmap, tmpva);
6101 if (l2e == NULL || *l2e == 0)
6103 if (*l2e & RPTE_LEAF) {
6105 * If the current 1GB page already has the required
6106 * memory type, then we need not demote this page. Just
6107 * increment tmpva to the next 1GB page frame.
6109 if ((*l2e & RPTE_ATTR_MASK) == cache_bits) {
6110 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6115 * If the current offset aligns with a 1GB page frame
6116 * and there is at least 1GB left within the range, then
6117 * we need not break down this page into 2MB pages.
6119 if ((tmpva & L2_PAGE_MASK) == 0 &&
6120 tmpva + L2_PAGE_MASK < base + size) {
6121 tmpva += L2_PAGE_MASK;
6124 if (!pmap_demote_l2e(kernel_pmap, l2e, tmpva))
6127 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6128 KASSERT(l3e != NULL, ("no l3e entry for %#lx in %p\n",
6132 if (*l3e & RPTE_LEAF) {
6134 * If the current 2MB page already has the required
6135 * memory type, then we need not demote this page. Just
6136 * increment tmpva to the next 2MB page frame.
6138 if ((*l3e & RPTE_ATTR_MASK) == cache_bits) {
6139 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6144 * If the current offset aligns with a 2MB page frame
6145 * and there is at least 2MB left within the range, then
6146 * we need not break down this page into 4KB pages.
6148 if ((tmpva & L3_PAGE_MASK) == 0 &&
6149 tmpva + L3_PAGE_MASK < base + size) {
6150 tmpva += L3_PAGE_SIZE;
6153 if (!pmap_demote_l3e(kernel_pmap, l3e, tmpva))
6156 pte = pmap_l3e_to_pte(l3e, tmpva);
6164 * Ok, all the pages exist, so run through them updating their
6165 * cache mode if required.
6167 pa_start = pa_end = 0;
6168 for (tmpva = base; tmpva < base + size; ) {
6169 l2e = pmap_pml2e(kernel_pmap, tmpva);
6170 if (*l2e & RPTE_LEAF) {
6171 if ((*l2e & RPTE_ATTR_MASK) != cache_bits) {
6172 pmap_pte_attr(l2e, cache_bits,
6176 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6177 (*l2e & PG_PS_FRAME) < dmaplimit) {
6178 if (pa_start == pa_end) {
6179 /* Start physical address run. */
6180 pa_start = *l2e & PG_PS_FRAME;
6181 pa_end = pa_start + L2_PAGE_SIZE;
6182 } else if (pa_end == (*l2e & PG_PS_FRAME))
6183 pa_end += L2_PAGE_SIZE;
6185 /* Run ended, update direct map. */
6186 error = pmap_change_attr_locked(
6187 PHYS_TO_DMAP(pa_start),
6188 pa_end - pa_start, mode, flush);
6191 /* Start physical address run. */
6192 pa_start = *l2e & PG_PS_FRAME;
6193 pa_end = pa_start + L2_PAGE_SIZE;
6196 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6199 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6200 if (*l3e & RPTE_LEAF) {
6201 if ((*l3e & RPTE_ATTR_MASK) != cache_bits) {
6202 pmap_pte_attr(l3e, cache_bits,
6206 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6207 (*l3e & PG_PS_FRAME) < dmaplimit) {
6208 if (pa_start == pa_end) {
6209 /* Start physical address run. */
6210 pa_start = *l3e & PG_PS_FRAME;
6211 pa_end = pa_start + L3_PAGE_SIZE;
6212 } else if (pa_end == (*l3e & PG_PS_FRAME))
6213 pa_end += L3_PAGE_SIZE;
6215 /* Run ended, update direct map. */
6216 error = pmap_change_attr_locked(
6217 PHYS_TO_DMAP(pa_start),
6218 pa_end - pa_start, mode, flush);
6221 /* Start physical address run. */
6222 pa_start = *l3e & PG_PS_FRAME;
6223 pa_end = pa_start + L3_PAGE_SIZE;
6226 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6228 pte = pmap_l3e_to_pte(l3e, tmpva);
6229 if ((*pte & RPTE_ATTR_MASK) != cache_bits) {
6230 pmap_pte_attr(pte, cache_bits,
6234 if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6235 (*pte & PG_FRAME) < dmaplimit) {
6236 if (pa_start == pa_end) {
6237 /* Start physical address run. */
6238 pa_start = *pte & PG_FRAME;
6239 pa_end = pa_start + PAGE_SIZE;
6240 } else if (pa_end == (*pte & PG_FRAME))
6241 pa_end += PAGE_SIZE;
6243 /* Run ended, update direct map. */
6244 error = pmap_change_attr_locked(
6245 PHYS_TO_DMAP(pa_start),
6246 pa_end - pa_start, mode, flush);
6249 /* Start physical address run. */
6250 pa_start = *pte & PG_FRAME;
6251 pa_end = pa_start + PAGE_SIZE;
6257 if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
6258 pa_end1 = MIN(pa_end, dmaplimit);
6259 if (pa_start != pa_end1)
6260 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
6261 pa_end1 - pa_start, mode, flush);
6265 * Flush CPU caches if required to make sure any data isn't cached that
6266 * shouldn't be, etc.
6269 pmap_invalidate_all(kernel_pmap);
6272 pmap_invalidate_cache_range(base, tmpva);
6279 * Allocate physical memory for the vm_page array and map it into KVA,
6280 * attempting to back the vm_pages with domain-local memory.
6283 mmu_radix_page_array_startup(long pages)
6294 vm_offset_t start, end;
6296 vm_page_array_size = pages;
6298 start = VM_MIN_KERNEL_ADDRESS;
6299 end = start + pages * sizeof(struct vm_page);
6301 pa = vm_phys_early_alloc(0, end - start);
6303 start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT);
6305 /* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */
6306 for (va = start; va < end; va += L3_PAGE_SIZE) {
6307 pfn = first_page + (va - start) / sizeof(struct vm_page);
6308 domain = _vm_phys_domain(ptoa(pfn));
6309 l2e = pmap_pml2e(kernel_pmap, va);
6310 if ((*l2e & PG_V) == 0) {
6311 pa = vm_phys_early_alloc(domain, PAGE_SIZE);
6313 pagezero(PHYS_TO_DMAP(pa));
6314 pde_store(l2e, (pml2_entry_t)pa);
6316 pde = pmap_l2e_to_l3e(l2e, va);
6317 if ((*pde & PG_V) != 0)
6318 panic("Unexpected pde %p", pde);
6319 pa = vm_phys_early_alloc(domain, L3_PAGE_SIZE);
6320 for (i = 0; i < NPDEPG; i++)
6321 dump_add_page(pa + i * PAGE_SIZE);
6322 newl3 = (pml3_entry_t)(pa | RPTE_EAA_P | RPTE_EAA_R | RPTE_EAA_W);
6323 pte_store(pde, newl3);
6326 vm_page_array = (vm_page_t)start;
6330 #include <sys/kdb.h>
6331 #include <ddb/ddb.h>
6334 pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va)
6341 l1e = &l1[pmap_pml1e_index(va)];
6342 db_printf("VA %#016lx l1e %#016lx", va, *l1e);
6343 if ((*l1e & PG_V) == 0) {
6347 l2e = pmap_l1e_to_l2e(l1e, va);
6348 db_printf(" l2e %#016lx", *l2e);
6349 if ((*l2e & PG_V) == 0 || (*l2e & RPTE_LEAF) != 0) {
6353 l3e = pmap_l2e_to_l3e(l2e, va);
6354 db_printf(" l3e %#016lx", *l3e);
6355 if ((*l3e & PG_V) == 0 || (*l3e & RPTE_LEAF) != 0) {
6359 pte = pmap_l3e_to_pte(l3e, va);
6360 db_printf(" pte %#016lx\n", *pte);
6364 pmap_page_print_mappings(vm_page_t m)
6369 db_printf("page %p(%lx)\n", m, m->phys_addr);
6370 /* need to elide locks if running in ddb */
6371 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
6372 db_printf("pv: %p ", pv);
6373 db_printf("va: %#016lx ", pv->pv_va);
6375 db_printf("pmap %p ", pmap);
6377 db_printf("asid: %lu\n", pmap->pm_pid);
6378 pmap_pte_walk(pmap->pm_pml1, pv->pv_va);
6383 DB_SHOW_COMMAND(pte, pmap_print_pte)
6389 db_printf("show pte addr\n");
6392 va = (vm_offset_t)addr;
6394 if (va >= DMAP_MIN_ADDRESS)
6396 else if (kdb_thread != NULL)
6397 pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
6399 pmap = vmspace_pmap(curthread->td_proc->p_vmspace);
6401 pmap_pte_walk(pmap->pm_pml1, va);