2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008-2015 Nathan Whitehorn
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 * Manages physical address maps.
35 * Since the information managed by this module is also stored by the
36 * logical address mapping module, this module may throw away valid virtual
37 * to physical mappings at almost any time. However, invalidations of
38 * mappings must be done as requested.
40 * In order to cope with hardware architectures which make virtual to
41 * physical map invalidates expensive, this module may delay invalidate
42 * reduced protection operations until such time as they are actually
43 * necessary. This module is given full information as to which processors
44 * are currently using which maps, and to when physical maps must be made
48 #include "opt_kstack_pages.h"
50 #include <sys/param.h>
51 #include <sys/kernel.h>
53 #include <sys/queue.h>
54 #include <sys/cpuset.h>
55 #include <sys/kerneldump.h>
58 #include <sys/msgbuf.h>
59 #include <sys/malloc.h>
61 #include <sys/mutex.h>
63 #include <sys/rwlock.h>
64 #include <sys/sched.h>
65 #include <sys/sysctl.h>
66 #include <sys/systm.h>
67 #include <sys/vmmeter.h>
69 #include <sys/reboot.h>
73 #include <dev/ofw/openfirm.h>
77 #include <vm/vm_param.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_phys.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_extern.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_dumpset.h>
86 #include <vm/vm_reserv.h>
89 #include <machine/_inttypes.h>
90 #include <machine/cpu.h>
91 #include <machine/ifunc.h>
92 #include <machine/platform.h>
93 #include <machine/frame.h>
94 #include <machine/md_var.h>
95 #include <machine/psl.h>
96 #include <machine/bat.h>
97 #include <machine/hid.h>
98 #include <machine/pte.h>
99 #include <machine/sr.h>
100 #include <machine/trap.h>
101 #include <machine/mmuvar.h>
103 #include "mmu_oea64.h"
105 void moea64_release_vsid(uint64_t vsid);
106 uintptr_t moea64_get_unique_vsid(void);
108 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
109 #define ENABLE_TRANS(msr) mtmsr(msr)
111 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
112 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
113 #define VSID_HASH_MASK 0x0000007fffffffffULL
118 * There are two locks of interest: the page locks and the pmap locks, which
119 * protect their individual PVO lists and are locked in that order. The contents
120 * of all PVO entries are protected by the locks of their respective pmaps.
121 * The pmap of any PVO is guaranteed not to change so long as the PVO is linked
126 #define PV_LOCK_COUNT PA_LOCK_COUNT
127 static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
130 * Cheap NUMA-izing of the pv locks, to reduce contention across domains.
131 * NUMA domains on POWER9 appear to be indexed as sparse memory spaces, with the
132 * index at (N << 45).
135 #define PV_LOCK_IDX(pa) ((pa_index(pa) * (((pa) >> 45) + 1)) % PV_LOCK_COUNT)
137 #define PV_LOCK_IDX(pa) (pa_index(pa) % PV_LOCK_COUNT)
139 #define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[PV_LOCK_IDX(pa)]))
140 #define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa))
141 #define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa))
142 #define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
143 #define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m))
144 #define PV_PAGE_UNLOCK(m) PV_UNLOCK(VM_PAGE_TO_PHYS(m))
145 #define PV_PAGE_LOCKASSERT(m) PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
147 /* Superpage PV lock */
149 #define PV_LOCK_SIZE (1<<PDRSHIFT)
151 static __always_inline void
152 moea64_sp_pv_lock(vm_paddr_t pa)
156 /* Note: breaking when pa_end is reached to avoid overflows */
157 pa_end = pa + (HPT_SP_SIZE - PV_LOCK_SIZE);
159 mtx_lock_flags(PV_LOCKPTR(pa), MTX_DUPOK);
166 static __always_inline void
167 moea64_sp_pv_unlock(vm_paddr_t pa)
171 /* Note: breaking when pa_end is reached to avoid overflows */
173 pa += HPT_SP_SIZE - PV_LOCK_SIZE;
175 mtx_unlock_flags(PV_LOCKPTR(pa), MTX_DUPOK);
182 #define SP_PV_LOCK_ALIGNED(pa) moea64_sp_pv_lock(pa)
183 #define SP_PV_UNLOCK_ALIGNED(pa) moea64_sp_pv_unlock(pa)
184 #define SP_PV_LOCK(pa) moea64_sp_pv_lock((pa) & ~HPT_SP_MASK)
185 #define SP_PV_UNLOCK(pa) moea64_sp_pv_unlock((pa) & ~HPT_SP_MASK)
186 #define SP_PV_PAGE_LOCK(m) SP_PV_LOCK(VM_PAGE_TO_PHYS(m))
187 #define SP_PV_PAGE_UNLOCK(m) SP_PV_UNLOCK(VM_PAGE_TO_PHYS(m))
196 extern unsigned char _etext[];
197 extern unsigned char _end[];
199 extern void *slbtrap, *slbtrapend;
202 * Map of physical memory regions.
204 static struct mem_region *regions;
205 static struct mem_region *pregions;
206 static struct numa_mem_region *numa_pregions;
207 static u_int phys_avail_count;
208 static int regions_sz, pregions_sz, numapregions_sz;
210 extern void bs_remap_earlyboot(void);
213 * Lock for the SLB tables.
215 struct mtx moea64_slb_mutex;
220 u_long moea64_pteg_count;
221 u_long moea64_pteg_mask;
227 uma_zone_t moea64_pvo_zone; /* zone for pvo entries */
229 static struct pvo_entry *moea64_bpvo_pool;
230 static int moea64_bpvo_pool_index = 0;
231 static int moea64_bpvo_pool_size = 0;
232 SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
233 &moea64_bpvo_pool_index, 0, "");
235 #define BPVO_POOL_SIZE 327680 /* Sensible historical default value */
236 #define BPVO_POOL_EXPANSION_FACTOR 3
237 #define VSID_NBPW (sizeof(u_int32_t) * 8)
239 #define NVSIDS (NPMAPS * 16)
240 #define VSID_HASHMASK 0xffffffffUL
242 #define NVSIDS NPMAPS
243 #define VSID_HASHMASK 0xfffffUL
245 static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
247 static boolean_t moea64_initialized = FALSE;
253 u_int moea64_pte_valid = 0;
254 u_int moea64_pte_overflow = 0;
255 u_int moea64_pvo_entries = 0;
256 u_int moea64_pvo_enter_calls = 0;
257 u_int moea64_pvo_remove_calls = 0;
258 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
259 &moea64_pte_valid, 0, "");
260 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
261 &moea64_pte_overflow, 0, "");
262 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
263 &moea64_pvo_entries, 0, "");
264 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
265 &moea64_pvo_enter_calls, 0, "");
266 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
267 &moea64_pvo_remove_calls, 0, "");
270 vm_offset_t moea64_scratchpage_va[2];
271 struct pvo_entry *moea64_scratchpage_pvo[2];
272 struct mtx moea64_scratchpage_mtx;
274 uint64_t moea64_large_page_mask = 0;
275 uint64_t moea64_large_page_size = 0;
276 int moea64_large_page_shift = 0;
277 bool moea64_has_lp_4k_16m = false;
282 static int moea64_pvo_enter(struct pvo_entry *pvo,
283 struct pvo_head *pvo_head, struct pvo_entry **oldpvo);
284 static void moea64_pvo_remove_from_pmap(struct pvo_entry *pvo);
285 static void moea64_pvo_remove_from_page(struct pvo_entry *pvo);
286 static void moea64_pvo_remove_from_page_locked(
287 struct pvo_entry *pvo, vm_page_t m);
288 static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
293 static boolean_t moea64_query_bit(vm_page_t, uint64_t);
294 static u_int moea64_clear_bit(vm_page_t, uint64_t);
295 static void moea64_kremove(vm_offset_t);
296 static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
297 vm_paddr_t pa, vm_size_t sz);
298 static void moea64_pmap_init_qpages(void);
299 static void moea64_remove_locked(pmap_t, vm_offset_t,
300 vm_offset_t, struct pvo_dlist *);
303 * Superpages data and routines.
307 * PVO flags (in vaddr) that must match for promotion to succeed.
308 * Note that protection bits are checked separately, as they reside in
311 #define PVO_FLAGS_PROMOTE (PVO_WIRED | PVO_MANAGED | PVO_PTEGIDX_VALID)
313 #define PVO_IS_SP(pvo) (((pvo)->pvo_vaddr & PVO_LARGE) && \
314 (pvo)->pvo_pmap != kernel_pmap)
316 /* Get physical address from PVO. */
317 #define PVO_PADDR(pvo) moea64_pvo_paddr(pvo)
319 /* MD page flag indicating that the page is a superpage. */
320 #define MDPG_ATTR_SP 0x40000000
322 SYSCTL_DECL(_vm_pmap);
324 static SYSCTL_NODE(_vm_pmap, OID_AUTO, sp, CTLFLAG_RD, 0,
325 "SP page mapping counters");
327 static u_long sp_demotions;
328 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, demotions, CTLFLAG_RD,
329 &sp_demotions, 0, "SP page demotions");
331 static u_long sp_mappings;
332 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, mappings, CTLFLAG_RD,
333 &sp_mappings, 0, "SP page mappings");
335 static u_long sp_p_failures;
336 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_failures, CTLFLAG_RD,
337 &sp_p_failures, 0, "SP page promotion failures");
339 static u_long sp_p_fail_pa;
340 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_pa, CTLFLAG_RD,
341 &sp_p_fail_pa, 0, "SP page promotion failure: PAs don't match");
343 static u_long sp_p_fail_flags;
344 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_flags, CTLFLAG_RD,
345 &sp_p_fail_flags, 0, "SP page promotion failure: page flags don't match");
347 static u_long sp_p_fail_prot;
348 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_prot, CTLFLAG_RD,
350 "SP page promotion failure: page protections don't match");
352 static u_long sp_p_fail_wimg;
353 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_wimg, CTLFLAG_RD,
354 &sp_p_fail_wimg, 0, "SP page promotion failure: WIMG bits don't match");
356 static u_long sp_promotions;
357 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, promotions, CTLFLAG_RD,
358 &sp_promotions, 0, "SP page promotions");
360 static bool moea64_ps_enabled(pmap_t);
361 static void moea64_align_superpage(vm_object_t, vm_ooffset_t,
362 vm_offset_t *, vm_size_t);
364 static int moea64_sp_enter(pmap_t pmap, vm_offset_t va,
365 vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind);
366 static struct pvo_entry *moea64_sp_remove(struct pvo_entry *sp,
367 struct pvo_dlist *tofree);
369 static void moea64_sp_promote(pmap_t pmap, vm_offset_t va, vm_page_t m);
370 static void moea64_sp_demote_aligned(struct pvo_entry *sp);
371 static void moea64_sp_demote(struct pvo_entry *pvo);
373 static struct pvo_entry *moea64_sp_unwire(struct pvo_entry *sp);
374 static struct pvo_entry *moea64_sp_protect(struct pvo_entry *sp,
377 static int64_t moea64_sp_query(struct pvo_entry *pvo, uint64_t ptebit);
378 static int64_t moea64_sp_clear(struct pvo_entry *pvo, vm_page_t m,
381 static __inline bool moea64_sp_pvo_in_range(struct pvo_entry *pvo,
382 vm_offset_t sva, vm_offset_t eva);
385 * Kernel MMU interface
387 void moea64_clear_modify(vm_page_t);
388 void moea64_copy_page(vm_page_t, vm_page_t);
389 void moea64_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
390 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
391 int moea64_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
392 u_int flags, int8_t psind);
393 void moea64_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
395 void moea64_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
396 vm_paddr_t moea64_extract(pmap_t, vm_offset_t);
397 vm_page_t moea64_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
398 void moea64_init(void);
399 boolean_t moea64_is_modified(vm_page_t);
400 boolean_t moea64_is_prefaultable(pmap_t, vm_offset_t);
401 boolean_t moea64_is_referenced(vm_page_t);
402 int moea64_ts_referenced(vm_page_t);
403 vm_offset_t moea64_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
404 boolean_t moea64_page_exists_quick(pmap_t, vm_page_t);
405 void moea64_page_init(vm_page_t);
406 int moea64_page_wired_mappings(vm_page_t);
407 int moea64_pinit(pmap_t);
408 void moea64_pinit0(pmap_t);
409 void moea64_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
410 void moea64_qenter(vm_offset_t, vm_page_t *, int);
411 void moea64_qremove(vm_offset_t, int);
412 void moea64_release(pmap_t);
413 void moea64_remove(pmap_t, vm_offset_t, vm_offset_t);
414 void moea64_remove_pages(pmap_t);
415 void moea64_remove_all(vm_page_t);
416 void moea64_remove_write(vm_page_t);
417 void moea64_unwire(pmap_t, vm_offset_t, vm_offset_t);
418 void moea64_zero_page(vm_page_t);
419 void moea64_zero_page_area(vm_page_t, int, int);
420 void moea64_activate(struct thread *);
421 void moea64_deactivate(struct thread *);
422 void *moea64_mapdev(vm_paddr_t, vm_size_t);
423 void *moea64_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
424 void moea64_unmapdev(vm_offset_t, vm_size_t);
425 vm_paddr_t moea64_kextract(vm_offset_t);
426 void moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma);
427 void moea64_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma);
428 void moea64_kenter(vm_offset_t, vm_paddr_t);
429 boolean_t moea64_dev_direct_mapped(vm_paddr_t, vm_size_t);
430 static void moea64_sync_icache(pmap_t, vm_offset_t, vm_size_t);
431 void moea64_dumpsys_map(vm_paddr_t pa, size_t sz,
433 void moea64_scan_init(void);
434 vm_offset_t moea64_quick_enter_page(vm_page_t m);
435 void moea64_quick_remove_page(vm_offset_t addr);
436 boolean_t moea64_page_is_mapped(vm_page_t m);
437 static int moea64_map_user_ptr(pmap_t pm,
438 volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
439 static int moea64_decode_kernel_ptr(vm_offset_t addr,
440 int *is_user, vm_offset_t *decoded_addr);
441 static size_t moea64_scan_pmap(void);
442 static void *moea64_dump_pmap_init(unsigned blkpgs);
444 static void moea64_page_array_startup(long);
446 static int moea64_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
448 static struct pmap_funcs moea64_methods = {
449 .clear_modify = moea64_clear_modify,
450 .copy_page = moea64_copy_page,
451 .copy_pages = moea64_copy_pages,
452 .enter = moea64_enter,
453 .enter_object = moea64_enter_object,
454 .enter_quick = moea64_enter_quick,
455 .extract = moea64_extract,
456 .extract_and_hold = moea64_extract_and_hold,
458 .is_modified = moea64_is_modified,
459 .is_prefaultable = moea64_is_prefaultable,
460 .is_referenced = moea64_is_referenced,
461 .ts_referenced = moea64_ts_referenced,
463 .mincore = moea64_mincore,
464 .page_exists_quick = moea64_page_exists_quick,
465 .page_init = moea64_page_init,
466 .page_wired_mappings = moea64_page_wired_mappings,
467 .pinit = moea64_pinit,
468 .pinit0 = moea64_pinit0,
469 .protect = moea64_protect,
470 .qenter = moea64_qenter,
471 .qremove = moea64_qremove,
472 .release = moea64_release,
473 .remove = moea64_remove,
474 .remove_pages = moea64_remove_pages,
475 .remove_all = moea64_remove_all,
476 .remove_write = moea64_remove_write,
477 .sync_icache = moea64_sync_icache,
478 .unwire = moea64_unwire,
479 .zero_page = moea64_zero_page,
480 .zero_page_area = moea64_zero_page_area,
481 .activate = moea64_activate,
482 .deactivate = moea64_deactivate,
483 .page_set_memattr = moea64_page_set_memattr,
484 .quick_enter_page = moea64_quick_enter_page,
485 .quick_remove_page = moea64_quick_remove_page,
486 .page_is_mapped = moea64_page_is_mapped,
488 .page_array_startup = moea64_page_array_startup,
490 .ps_enabled = moea64_ps_enabled,
491 .align_superpage = moea64_align_superpage,
493 /* Internal interfaces */
494 .mapdev = moea64_mapdev,
495 .mapdev_attr = moea64_mapdev_attr,
496 .unmapdev = moea64_unmapdev,
497 .kextract = moea64_kextract,
498 .kenter = moea64_kenter,
499 .kenter_attr = moea64_kenter_attr,
500 .dev_direct_mapped = moea64_dev_direct_mapped,
501 .dumpsys_pa_init = moea64_scan_init,
502 .dumpsys_scan_pmap = moea64_scan_pmap,
503 .dumpsys_dump_pmap_init = moea64_dump_pmap_init,
504 .dumpsys_map_chunk = moea64_dumpsys_map,
505 .map_user_ptr = moea64_map_user_ptr,
506 .decode_kernel_ptr = moea64_decode_kernel_ptr,
509 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods);
512 * Get physical address from PVO.
514 * For superpages, the lower bits are not stored on pvo_pte.pa and must be
517 static __always_inline vm_paddr_t
518 moea64_pvo_paddr(struct pvo_entry *pvo)
522 pa = (pvo)->pvo_pte.pa & LPTE_RPGN;
524 if (PVO_IS_SP(pvo)) {
525 pa &= ~HPT_SP_MASK; /* This is needed to clear LPTE_LP bits. */
526 pa |= PVO_VADDR(pvo) & HPT_SP_MASK;
531 static struct pvo_head *
532 vm_page_to_pvoh(vm_page_t m)
535 mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
536 return (&m->md.mdpg_pvoh);
539 static struct pvo_entry *
540 alloc_pvo_entry(int bootstrap)
542 struct pvo_entry *pvo;
544 if (!moea64_initialized || bootstrap) {
545 if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) {
546 panic("%s: bpvo pool exhausted, index=%d, size=%d, bytes=%zd."
547 "Try setting machdep.moea64_bpvo_pool_size tunable",
548 __func__, moea64_bpvo_pool_index,
549 moea64_bpvo_pool_size,
550 moea64_bpvo_pool_size * sizeof(struct pvo_entry));
552 pvo = &moea64_bpvo_pool[
553 atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)];
554 bzero(pvo, sizeof(*pvo));
555 pvo->pvo_vaddr = PVO_BOOTSTRAP;
557 pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT | M_ZERO);
563 init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va)
569 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
571 pvo->pvo_pmap = pmap;
573 pvo->pvo_vaddr |= va;
574 vsid = va_to_vsid(pmap, va);
575 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
578 if (pmap == kernel_pmap && (pvo->pvo_vaddr & PVO_LARGE) != 0)
579 shift = moea64_large_page_shift;
581 shift = ADDR_PIDX_SHFT;
582 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift);
583 pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3;
587 free_pvo_entry(struct pvo_entry *pvo)
590 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
591 uma_zfree(moea64_pvo_zone, pvo);
595 moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte)
598 lpte->pte_hi = moea64_pte_vpn_from_pvo_vpn(pvo);
599 lpte->pte_hi |= LPTE_VALID;
601 if (pvo->pvo_vaddr & PVO_LARGE)
602 lpte->pte_hi |= LPTE_BIG;
603 if (pvo->pvo_vaddr & PVO_WIRED)
604 lpte->pte_hi |= LPTE_WIRED;
605 if (pvo->pvo_vaddr & PVO_HID)
606 lpte->pte_hi |= LPTE_HID;
608 lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */
609 if (pvo->pvo_pte.prot & VM_PROT_WRITE)
610 lpte->pte_lo |= LPTE_BW;
612 lpte->pte_lo |= LPTE_BR;
614 if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
615 lpte->pte_lo |= LPTE_NOEXEC;
618 static __inline uint64_t
619 moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
624 if (ma != VM_MEMATTR_DEFAULT) {
626 case VM_MEMATTR_UNCACHEABLE:
627 return (LPTE_I | LPTE_G);
628 case VM_MEMATTR_CACHEABLE:
630 case VM_MEMATTR_WRITE_COMBINING:
631 case VM_MEMATTR_WRITE_BACK:
632 case VM_MEMATTR_PREFETCHABLE:
634 case VM_MEMATTR_WRITE_THROUGH:
635 return (LPTE_W | LPTE_M);
640 * Assume the page is cache inhibited and access is guarded unless
641 * it's in our available memory array.
643 pte_lo = LPTE_I | LPTE_G;
644 for (i = 0; i < pregions_sz; i++) {
645 if ((pa >= pregions[i].mr_start) &&
646 (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
647 pte_lo &= ~(LPTE_I | LPTE_G);
657 * Quick sort callout for comparing memory regions.
659 static int om_cmp(const void *a, const void *b);
662 om_cmp(const void *a, const void *b)
664 const struct ofw_map *mapa;
665 const struct ofw_map *mapb;
669 if (mapa->om_pa < mapb->om_pa)
671 else if (mapa->om_pa > mapb->om_pa)
678 moea64_add_ofw_mappings(phandle_t mmu, size_t sz)
680 struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
681 pcell_t acells, trans_cells[sz/sizeof(cell_t)];
682 struct pvo_entry *pvo;
688 bzero(translations, sz);
689 OF_getencprop(OF_finddevice("/"), "#address-cells", &acells,
691 if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1)
692 panic("moea64_bootstrap: can't get ofw translations");
694 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
695 sz /= sizeof(cell_t);
696 for (i = 0, j = 0; i < sz; j++) {
697 translations[j].om_va = trans_cells[i++];
698 translations[j].om_len = trans_cells[i++];
699 translations[j].om_pa = trans_cells[i++];
701 translations[j].om_pa <<= 32;
702 translations[j].om_pa |= trans_cells[i++];
704 translations[j].om_mode = trans_cells[i++];
706 KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)",
710 qsort(translations, sz, sizeof (*translations), om_cmp);
712 for (i = 0; i < sz; i++) {
713 pa_base = translations[i].om_pa;
714 #ifndef __powerpc64__
715 if ((translations[i].om_pa >> 32) != 0)
716 panic("OFW translations above 32-bit boundary!");
719 if (pa_base % PAGE_SIZE)
720 panic("OFW translation not page-aligned (phys)!");
721 if (translations[i].om_va % PAGE_SIZE)
722 panic("OFW translation not page-aligned (virt)!");
724 CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
725 pa_base, translations[i].om_va, translations[i].om_len);
727 /* Now enter the pages for this mapping */
730 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
731 /* If this address is direct-mapped, skip remapping */
733 translations[i].om_va == PHYS_TO_DMAP(pa_base) &&
734 moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT)
738 PMAP_LOCK(kernel_pmap);
739 pvo = moea64_pvo_find_va(kernel_pmap,
740 translations[i].om_va + off);
741 PMAP_UNLOCK(kernel_pmap);
745 moea64_kenter(translations[i].om_va + off,
754 moea64_probe_large_page(void)
756 uint16_t pvr = mfpvr() >> 16;
762 powerpc_sync(); isync();
763 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
764 powerpc_sync(); isync();
768 if (moea64_large_page_size == 0) {
769 moea64_large_page_size = 0x1000000; /* 16 MB */
770 moea64_large_page_shift = 24;
774 moea64_large_page_mask = moea64_large_page_size - 1;
778 moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
785 cache = PCPU_GET(aim.slb);
786 esid = va >> ADDR_SR_SHFT;
787 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
789 for (i = 0; i < 64; i++) {
790 if (cache[i].slbe == (slbe | i))
795 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
797 entry.slbv |= SLBV_L;
799 slb_insert_kernel(entry.slbe, entry.slbv);
804 moea64_kenter_large(vm_offset_t va, vm_paddr_t pa, uint64_t attr, int bootstrap)
806 struct pvo_entry *pvo;
813 pvo = alloc_pvo_entry(bootstrap);
814 pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE;
815 init_pvo_entry(pvo, kernel_pmap, va);
817 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE |
819 pvo->pvo_pte.pa = pa | pte_lo;
820 error = moea64_pvo_enter(pvo, NULL, NULL);
822 panic("Error %d inserting large page\n", error);
827 moea64_setup_direct_map(vm_offset_t kernelstart,
828 vm_offset_t kernelend)
831 vm_paddr_t pa, pkernelstart, pkernelend;
832 vm_offset_t size, off;
836 if (moea64_large_page_size == 0)
841 PMAP_LOCK(kernel_pmap);
842 for (i = 0; i < pregions_sz; i++) {
843 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
844 pregions[i].mr_size; pa += moea64_large_page_size) {
846 if (pa & moea64_large_page_mask) {
847 pa &= moea64_large_page_mask;
850 if (pa + moea64_large_page_size >
851 pregions[i].mr_start + pregions[i].mr_size)
854 moea64_kenter_large(PHYS_TO_DMAP(pa), pa, pte_lo, 1);
857 PMAP_UNLOCK(kernel_pmap);
861 * Make sure the kernel and BPVO pool stay mapped on systems either
862 * without a direct map or on which the kernel is not already executing
863 * out of the direct-mapped region.
865 if (kernelstart < DMAP_BASE_ADDRESS) {
867 * For pre-dmap execution, we need to use identity mapping
868 * because we will be operating with the mmu on but in the
869 * wrong address configuration until we __restartkernel().
871 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
873 moea64_kenter(pa, pa);
874 } else if (!hw_direct_map) {
875 pkernelstart = kernelstart & ~DMAP_BASE_ADDRESS;
876 pkernelend = kernelend & ~DMAP_BASE_ADDRESS;
877 for (pa = pkernelstart & ~PAGE_MASK; pa < pkernelend;
879 moea64_kenter(pa | DMAP_BASE_ADDRESS, pa);
882 if (!hw_direct_map) {
883 size = moea64_bpvo_pool_size*sizeof(struct pvo_entry);
884 off = (vm_offset_t)(moea64_bpvo_pool);
885 for (pa = off; pa < off + size; pa += PAGE_SIZE)
886 moea64_kenter(pa, pa);
888 /* Map exception vectors */
889 for (pa = EXC_RSVD; pa < EXC_LAST; pa += PAGE_SIZE)
890 moea64_kenter(pa | DMAP_BASE_ADDRESS, pa);
895 * Allow user to override unmapped_buf_allowed for testing.
896 * XXXKIB Only direct map implementation was tested.
898 if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed",
899 &unmapped_buf_allowed))
900 unmapped_buf_allowed = hw_direct_map;
903 /* Quick sort callout for comparing physical addresses. */
905 pa_cmp(const void *a, const void *b)
907 const vm_paddr_t *pa = a, *pb = b;
918 moea64_early_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
921 vm_size_t physsz, hwphyssz;
922 vm_paddr_t kernelphysstart, kernelphysend;
925 /* Level 0 reservations consist of 4096 pages (16MB superpage). */
926 vm_level_0_order = 12;
928 #ifndef __powerpc64__
929 /* We don't have a direct map since there is no BAT */
932 /* Make sure battable is zero, since we have no BAT */
933 for (i = 0; i < 16; i++) {
934 battable[i].batu = 0;
935 battable[i].batl = 0;
938 moea64_probe_large_page();
940 /* Use a direct map if we have large page support */
941 if (moea64_large_page_size > 0)
946 /* Install trap handlers for SLBs */
947 bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap);
948 bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap);
949 __syncicache((void *)EXC_DSE, 0x80);
950 __syncicache((void *)EXC_ISE, 0x80);
953 kernelphysstart = kernelstart & ~DMAP_BASE_ADDRESS;
954 kernelphysend = kernelend & ~DMAP_BASE_ADDRESS;
956 /* Get physical memory regions from firmware */
957 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
958 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
960 if (PHYS_AVAIL_ENTRIES < regions_sz)
961 panic("moea64_bootstrap: phys_avail too small");
963 phys_avail_count = 0;
966 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
967 for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
968 CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
969 regions[i].mr_start, regions[i].mr_start +
970 regions[i].mr_size, regions[i].mr_size);
972 (physsz + regions[i].mr_size) >= hwphyssz) {
973 if (physsz < hwphyssz) {
974 phys_avail[j] = regions[i].mr_start;
975 phys_avail[j + 1] = regions[i].mr_start +
979 dump_avail[j] = phys_avail[j];
980 dump_avail[j + 1] = phys_avail[j + 1];
984 phys_avail[j] = regions[i].mr_start;
985 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
987 physsz += regions[i].mr_size;
988 dump_avail[j] = phys_avail[j];
989 dump_avail[j + 1] = phys_avail[j + 1];
992 /* Check for overlap with the kernel and exception vectors */
994 for (j = 0; j < 2*phys_avail_count; j+=2) {
995 if (phys_avail[j] < EXC_LAST)
996 phys_avail[j] += EXC_LAST;
998 if (phys_avail[j] >= kernelphysstart &&
999 phys_avail[j+1] <= kernelphysend) {
1000 phys_avail[j] = phys_avail[j+1] = ~0;
1005 if (kernelphysstart >= phys_avail[j] &&
1006 kernelphysstart < phys_avail[j+1]) {
1007 if (kernelphysend < phys_avail[j+1]) {
1008 phys_avail[2*phys_avail_count] =
1009 (kernelphysend & ~PAGE_MASK) + PAGE_SIZE;
1010 phys_avail[2*phys_avail_count + 1] =
1015 phys_avail[j+1] = kernelphysstart & ~PAGE_MASK;
1018 if (kernelphysend >= phys_avail[j] &&
1019 kernelphysend < phys_avail[j+1]) {
1020 if (kernelphysstart > phys_avail[j]) {
1021 phys_avail[2*phys_avail_count] = phys_avail[j];
1022 phys_avail[2*phys_avail_count + 1] =
1023 kernelphysstart & ~PAGE_MASK;
1027 phys_avail[j] = (kernelphysend & ~PAGE_MASK) +
1032 /* Remove physical available regions marked for removal (~0) */
1034 qsort(phys_avail, 2*phys_avail_count, sizeof(phys_avail[0]),
1036 phys_avail_count -= rm_pavail;
1037 for (i = 2*phys_avail_count;
1038 i < 2*(phys_avail_count + rm_pavail); i+=2)
1039 phys_avail[i] = phys_avail[i+1] = 0;
1042 physmem = btoc(physsz);
1045 moea64_pteg_count = PTEGCOUNT;
1047 moea64_pteg_count = 0x1000;
1049 while (moea64_pteg_count < physmem)
1050 moea64_pteg_count <<= 1;
1052 moea64_pteg_count >>= 1;
1053 #endif /* PTEGCOUNT */
1057 moea64_mid_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
1064 moea64_pteg_mask = moea64_pteg_count - 1;
1067 * Initialize SLB table lock and page locks
1069 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
1070 for (i = 0; i < PV_LOCK_COUNT; i++)
1071 mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
1074 * Initialise the bootstrap pvo pool.
1076 TUNABLE_INT_FETCH("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size);
1077 if (moea64_bpvo_pool_size == 0) {
1079 moea64_bpvo_pool_size = ((ptoa((uintmax_t)physmem) * sizeof(struct vm_page)) /
1080 (PAGE_SIZE * PAGE_SIZE)) * BPVO_POOL_EXPANSION_FACTOR;
1082 moea64_bpvo_pool_size = BPVO_POOL_SIZE;
1085 if (boothowto & RB_VERBOSE) {
1086 printf("mmu_oea64: bpvo pool entries = %d, bpvo pool size = %zu MB\n",
1087 moea64_bpvo_pool_size,
1088 moea64_bpvo_pool_size*sizeof(struct pvo_entry) / 1048576);
1091 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
1092 moea64_bpvo_pool_size*sizeof(struct pvo_entry), PAGE_SIZE);
1093 moea64_bpvo_pool_index = 0;
1095 /* Place at address usable through the direct map */
1097 moea64_bpvo_pool = (struct pvo_entry *)
1098 PHYS_TO_DMAP((uintptr_t)moea64_bpvo_pool);
1101 * Make sure kernel vsid is allocated as well as VSID 0.
1103 #ifndef __powerpc64__
1104 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
1105 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
1106 moea64_vsid_bitmap[0] |= 1;
1110 * Initialize the kernel pmap (which is statically allocated).
1112 #ifdef __powerpc64__
1113 for (i = 0; i < 64; i++) {
1114 pcpup->pc_aim.slb[i].slbv = 0;
1115 pcpup->pc_aim.slb[i].slbe = 0;
1118 for (i = 0; i < 16; i++)
1119 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
1122 kernel_pmap->pmap_phys = kernel_pmap;
1123 CPU_FILL(&kernel_pmap->pm_active);
1124 RB_INIT(&kernel_pmap->pmap_pvo);
1126 PMAP_LOCK_INIT(kernel_pmap);
1129 * Now map in all the other buffers we allocated earlier
1132 moea64_setup_direct_map(kernelstart, kernelend);
1136 moea64_late_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
1147 * Set up the Open Firmware pmap and add its mappings if not in real
1151 chosen = OF_finddevice("/chosen");
1152 if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) {
1153 mmu = OF_instance_to_package(mmui);
1155 (sz = OF_getproplen(mmu, "translations")) == -1)
1157 if (sz > 6144 /* tmpstksz - 2 KB headroom */)
1158 panic("moea64_bootstrap: too many ofw translations");
1161 moea64_add_ofw_mappings(mmu, sz);
1165 * Calculate the last available physical address.
1168 for (i = 0; phys_avail[i + 2] != 0; i += 2)
1169 Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1]));
1174 pmap_cpu_bootstrap(0);
1175 mtmsr(mfmsr() | PSL_DR | PSL_IR);
1176 pmap_bootstrapped++;
1179 * Set the start and end of kva.
1181 virtual_avail = VM_MIN_KERNEL_ADDRESS;
1182 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
1185 * Map the entire KVA range into the SLB. We must not fault there.
1187 #ifdef __powerpc64__
1188 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
1189 moea64_bootstrap_slb_prefault(va, 0);
1193 * Remap any early IO mappings (console framebuffer, etc.)
1195 bs_remap_earlyboot();
1198 * Figure out how far we can extend virtual_end into segment 16
1199 * without running into existing mappings. Segment 16 is guaranteed
1200 * to contain neither RAM nor devices (at least on Apple hardware),
1201 * but will generally contain some OFW mappings we should not
1205 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */
1206 PMAP_LOCK(kernel_pmap);
1207 while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
1208 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
1209 virtual_end += PAGE_SIZE;
1210 PMAP_UNLOCK(kernel_pmap);
1214 * Allocate a kernel stack with a guard page for thread0 and map it
1215 * into the kernel page map.
1217 pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
1218 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1219 virtual_avail = va + kstack_pages * PAGE_SIZE;
1220 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
1221 thread0.td_kstack = va;
1222 thread0.td_kstack_pages = kstack_pages;
1223 for (i = 0; i < kstack_pages; i++) {
1224 moea64_kenter(va, pa);
1230 * Allocate virtual address space for the message buffer.
1232 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
1233 msgbufp = (struct msgbuf *)virtual_avail;
1235 virtual_avail += round_page(msgbufsize);
1236 while (va < virtual_avail) {
1237 moea64_kenter(va, pa);
1243 * Allocate virtual address space for the dynamic percpu area.
1245 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
1246 dpcpu = (void *)virtual_avail;
1248 virtual_avail += DPCPU_SIZE;
1249 while (va < virtual_avail) {
1250 moea64_kenter(va, pa);
1254 dpcpu_init(dpcpu, curcpu);
1256 crashdumpmap = (caddr_t)virtual_avail;
1257 virtual_avail += MAXDUMPPGS * PAGE_SIZE;
1260 * Allocate some things for page zeroing. We put this directly
1261 * in the page table and use MOEA64_PTE_REPLACE to avoid any
1262 * of the PVO book-keeping or other parts of the VM system
1263 * from even knowing that this hack exists.
1266 if (!hw_direct_map) {
1267 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
1269 for (i = 0; i < 2; i++) {
1270 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
1271 virtual_end -= PAGE_SIZE;
1273 moea64_kenter(moea64_scratchpage_va[i], 0);
1275 PMAP_LOCK(kernel_pmap);
1276 moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
1277 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
1278 PMAP_UNLOCK(kernel_pmap);
1282 numa_mem_regions(&numa_pregions, &numapregions_sz);
1286 moea64_pmap_init_qpages(void)
1296 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
1297 if (pc->pc_qmap_addr == 0)
1298 panic("pmap_init_qpages: unable to allocate KVA");
1299 PMAP_LOCK(kernel_pmap);
1300 pc->pc_aim.qmap_pvo =
1301 moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
1302 PMAP_UNLOCK(kernel_pmap);
1303 mtx_init(&pc->pc_aim.qmap_lock, "qmap lock", NULL, MTX_DEF);
1307 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL);
1310 * Activate a user pmap. This mostly involves setting some non-CPU
1314 moea64_activate(struct thread *td)
1318 pm = &td->td_proc->p_vmspace->vm_pmap;
1319 CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1321 #ifdef __powerpc64__
1322 PCPU_SET(aim.userslb, pm->pm_slb);
1323 __asm __volatile("slbmte %0, %1; isync" ::
1324 "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
1326 PCPU_SET(curpmap, pm->pmap_phys);
1327 mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid);
1332 moea64_deactivate(struct thread *td)
1336 __asm __volatile("isync; slbie %0" :: "r"(USER_ADDR));
1338 pm = &td->td_proc->p_vmspace->vm_pmap;
1339 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1340 #ifdef __powerpc64__
1341 PCPU_SET(aim.userslb, NULL);
1343 PCPU_SET(curpmap, NULL);
1348 moea64_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1350 struct pvo_entry key, *pvo;
1354 key.pvo_vaddr = sva;
1356 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1357 pvo != NULL && PVO_VADDR(pvo) < eva;
1358 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1359 if (PVO_IS_SP(pvo)) {
1360 if (moea64_sp_pvo_in_range(pvo, sva, eva)) {
1361 pvo = moea64_sp_unwire(pvo);
1364 CTR1(KTR_PMAP, "%s: demote before unwire",
1366 moea64_sp_demote(pvo);
1370 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1371 panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1373 pvo->pvo_vaddr &= ~PVO_WIRED;
1374 refchg = moea64_pte_replace(pvo, 0 /* No invalidation */);
1375 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1376 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1379 m = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
1381 refchg |= atomic_readandclear_32(&m->md.mdpg_attrs);
1382 if (refchg & LPTE_CHG)
1384 if (refchg & LPTE_REF)
1385 vm_page_aflag_set(m, PGA_REFERENCED);
1387 pm->pm_stats.wired_count--;
1393 moea64_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
1395 struct pvo_entry *pvo;
1403 /* XXX Add support for superpages */
1404 pvo = moea64_pvo_find_va(pmap, addr);
1406 pa = PVO_PADDR(pvo);
1407 m = PHYS_TO_VM_PAGE(pa);
1408 managed = (pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED;
1409 val = MINCORE_INCORE;
1421 if (moea64_is_modified(m))
1422 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
1424 if (moea64_is_referenced(m))
1425 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
1428 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
1429 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
1438 * This goes through and sets the physical address of our
1439 * special scratch PTE to the PA we want to zero or copy. Because
1440 * of locking issues (this can get called in pvo_enter() by
1441 * the UMA allocator), we can't use most other utility functions here
1445 void moea64_set_scratchpage_pa(int which, vm_paddr_t pa)
1447 struct pvo_entry *pvo;
1449 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1450 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1452 pvo = moea64_scratchpage_pvo[which];
1453 PMAP_LOCK(pvo->pvo_pmap);
1455 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1456 moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE);
1457 PMAP_UNLOCK(pvo->pvo_pmap);
1462 moea64_copy_page(vm_page_t msrc, vm_page_t mdst)
1467 dst = VM_PAGE_TO_PHYS(mdst);
1468 src = VM_PAGE_TO_PHYS(msrc);
1470 if (hw_direct_map) {
1471 bcopy((void *)PHYS_TO_DMAP(src), (void *)PHYS_TO_DMAP(dst),
1474 mtx_lock(&moea64_scratchpage_mtx);
1476 moea64_set_scratchpage_pa(0, src);
1477 moea64_set_scratchpage_pa(1, dst);
1479 bcopy((void *)moea64_scratchpage_va[0],
1480 (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1482 mtx_unlock(&moea64_scratchpage_mtx);
1487 moea64_copy_pages_dmap(vm_page_t *ma, vm_offset_t a_offset,
1488 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1491 vm_offset_t a_pg_offset, b_pg_offset;
1494 while (xfersize > 0) {
1495 a_pg_offset = a_offset & PAGE_MASK;
1496 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1497 a_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
1498 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) +
1500 b_pg_offset = b_offset & PAGE_MASK;
1501 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1502 b_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
1503 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) +
1505 bcopy(a_cp, b_cp, cnt);
1513 moea64_copy_pages_nodmap(vm_page_t *ma, vm_offset_t a_offset,
1514 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1517 vm_offset_t a_pg_offset, b_pg_offset;
1520 mtx_lock(&moea64_scratchpage_mtx);
1521 while (xfersize > 0) {
1522 a_pg_offset = a_offset & PAGE_MASK;
1523 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1524 moea64_set_scratchpage_pa(0,
1525 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
1526 a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
1527 b_pg_offset = b_offset & PAGE_MASK;
1528 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1529 moea64_set_scratchpage_pa(1,
1530 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
1531 b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
1532 bcopy(a_cp, b_cp, cnt);
1537 mtx_unlock(&moea64_scratchpage_mtx);
1541 moea64_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
1542 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1545 if (hw_direct_map) {
1546 moea64_copy_pages_dmap(ma, a_offset, mb, b_offset,
1549 moea64_copy_pages_nodmap(ma, a_offset, mb, b_offset,
1555 moea64_zero_page_area(vm_page_t m, int off, int size)
1557 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1559 if (size + off > PAGE_SIZE)
1560 panic("moea64_zero_page: size + off > PAGE_SIZE");
1562 if (hw_direct_map) {
1563 bzero((caddr_t)(uintptr_t)PHYS_TO_DMAP(pa) + off, size);
1565 mtx_lock(&moea64_scratchpage_mtx);
1566 moea64_set_scratchpage_pa(0, pa);
1567 bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1568 mtx_unlock(&moea64_scratchpage_mtx);
1573 * Zero a page of physical memory by temporarily mapping it
1576 moea64_zero_page(vm_page_t m)
1578 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1579 vm_offset_t va, off;
1581 if (!hw_direct_map) {
1582 mtx_lock(&moea64_scratchpage_mtx);
1584 moea64_set_scratchpage_pa(0, pa);
1585 va = moea64_scratchpage_va[0];
1587 va = PHYS_TO_DMAP(pa);
1590 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1591 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
1594 mtx_unlock(&moea64_scratchpage_mtx);
1598 moea64_quick_enter_page(vm_page_t m)
1600 struct pvo_entry *pvo;
1601 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1604 return (PHYS_TO_DMAP(pa));
1607 * MOEA64_PTE_REPLACE does some locking, so we can't just grab
1608 * a critical section and access the PCPU data like on i386.
1609 * Instead, pin the thread and grab the PCPU lock to prevent
1610 * a preempting thread from using the same PCPU data.
1614 mtx_assert(PCPU_PTR(aim.qmap_lock), MA_NOTOWNED);
1615 pvo = PCPU_GET(aim.qmap_pvo);
1617 mtx_lock(PCPU_PTR(aim.qmap_lock));
1618 pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
1620 moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE);
1623 return (PCPU_GET(qmap_addr));
1627 moea64_quick_remove_page(vm_offset_t addr)
1632 mtx_assert(PCPU_PTR(aim.qmap_lock), MA_OWNED);
1633 KASSERT(PCPU_GET(qmap_addr) == addr,
1634 ("moea64_quick_remove_page: invalid address"));
1635 mtx_unlock(PCPU_PTR(aim.qmap_lock));
1640 moea64_page_is_mapped(vm_page_t m)
1642 return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
1646 * Map the given physical page at the specified virtual address in the
1647 * target pmap with the protection requested. If specified the page
1648 * will be wired down.
1652 moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
1653 vm_prot_t prot, u_int flags, int8_t psind)
1655 struct pvo_entry *pvo, *oldpvo, *tpvo;
1656 struct pvo_head *pvo_head;
1661 if ((m->oflags & VPO_UNMANAGED) == 0) {
1662 if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0)
1663 VM_PAGE_OBJECT_BUSY_ASSERT(m);
1665 VM_OBJECT_ASSERT_LOCKED(m->object);
1669 return (moea64_sp_enter(pmap, va, m, prot, flags, psind));
1671 pvo = alloc_pvo_entry(0);
1673 return (KERN_RESOURCE_SHORTAGE);
1674 pvo->pvo_pmap = NULL; /* to be filled in later */
1675 pvo->pvo_pte.prot = prot;
1677 pa = VM_PAGE_TO_PHYS(m);
1678 pte_lo = moea64_calc_wimg(pa, pmap_page_get_memattr(m));
1679 pvo->pvo_pte.pa = pa | pte_lo;
1681 if ((flags & PMAP_ENTER_WIRED) != 0)
1682 pvo->pvo_vaddr |= PVO_WIRED;
1684 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
1687 pvo_head = &m->md.mdpg_pvoh;
1688 pvo->pvo_vaddr |= PVO_MANAGED;
1693 if (pvo->pvo_pmap == NULL)
1694 init_pvo_entry(pvo, pmap, va);
1696 if (moea64_ps_enabled(pmap) &&
1697 (tpvo = moea64_pvo_find_va(pmap, va & ~HPT_SP_MASK)) != NULL &&
1699 /* Demote SP before entering a regular page */
1700 CTR2(KTR_PMAP, "%s: demote before enter: va=%#jx",
1701 __func__, (uintmax_t)va);
1702 moea64_sp_demote_aligned(tpvo);
1705 if (prot & VM_PROT_WRITE)
1706 if (pmap_bootstrapped &&
1707 (m->oflags & VPO_UNMANAGED) == 0)
1708 vm_page_aflag_set(m, PGA_WRITEABLE);
1710 error = moea64_pvo_enter(pvo, pvo_head, &oldpvo);
1711 if (error == EEXIST) {
1712 if (oldpvo->pvo_vaddr == pvo->pvo_vaddr &&
1713 oldpvo->pvo_pte.pa == pvo->pvo_pte.pa &&
1714 oldpvo->pvo_pte.prot == prot) {
1715 /* Identical mapping already exists */
1718 /* If not in page table, reinsert it */
1719 if (moea64_pte_synch(oldpvo) < 0) {
1720 STAT_MOEA64(moea64_pte_overflow--);
1721 moea64_pte_insert(oldpvo);
1724 /* Then just clean up and go home */
1727 free_pvo_entry(pvo);
1731 /* Otherwise, need to kill it first */
1732 KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old "
1733 "mapping does not match new mapping"));
1734 moea64_pvo_remove_from_pmap(oldpvo);
1735 moea64_pvo_enter(pvo, pvo_head, NULL);
1741 /* Free any dead pages */
1742 if (error == EEXIST) {
1743 moea64_pvo_remove_from_page(oldpvo);
1744 free_pvo_entry(oldpvo);
1749 * Flush the page from the instruction cache if this page is
1750 * mapped executable and cacheable.
1752 if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 &&
1753 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1754 vm_page_aflag_set(m, PGA_EXECUTABLE);
1755 moea64_syncicache(pmap, va, pa, PAGE_SIZE);
1758 #if VM_NRESERVLEVEL > 0
1760 * Try to promote pages.
1762 * If the VA of the entered page is not aligned with its PA,
1763 * don't try page promotion as it is not possible.
1764 * This reduces the number of promotion failures dramatically.
1766 if (moea64_ps_enabled(pmap) && pmap != kernel_pmap && pvo != NULL &&
1767 (pvo->pvo_vaddr & PVO_MANAGED) != 0 &&
1768 (va & HPT_SP_MASK) == (pa & HPT_SP_MASK) &&
1769 (m->flags & PG_FICTITIOUS) == 0 &&
1770 vm_reserv_level_iffullpop(m) == 0)
1771 moea64_sp_promote(pmap, va, m);
1774 return (KERN_SUCCESS);
1778 moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1783 * This is much trickier than on older systems because
1784 * we can't sync the icache on physical addresses directly
1785 * without a direct map. Instead we check a couple of cases
1786 * where the memory is already mapped in and, failing that,
1787 * use the same trick we use for page zeroing to create
1788 * a temporary mapping for this physical address.
1791 if (!pmap_bootstrapped) {
1793 * If PMAP is not bootstrapped, we are likely to be
1796 __syncicache((void *)(uintptr_t)pa, sz);
1797 } else if (pmap == kernel_pmap) {
1798 __syncicache((void *)va, sz);
1799 } else if (hw_direct_map) {
1800 __syncicache((void *)(uintptr_t)PHYS_TO_DMAP(pa), sz);
1802 /* Use the scratch page to set up a temp mapping */
1804 mtx_lock(&moea64_scratchpage_mtx);
1806 moea64_set_scratchpage_pa(1, pa & ~ADDR_POFF);
1807 __syncicache((void *)(moea64_scratchpage_va[1] +
1808 (va & ADDR_POFF)), sz);
1810 mtx_unlock(&moea64_scratchpage_mtx);
1815 * Maps a sequence of resident pages belonging to the same object.
1816 * The sequence begins with the given page m_start. This page is
1817 * mapped at the given virtual address start. Each subsequent page is
1818 * mapped at a virtual address that is offset from start by the same
1819 * amount as the page is offset from m_start within the object. The
1820 * last page in the sequence is the page with the largest offset from
1821 * m_start that can be mapped at a virtual address less than the given
1822 * virtual address end. Not every virtual page between start and end
1823 * is mapped; only those for which a resident page exists with the
1824 * corresponding offset from m_start are mapped.
1827 moea64_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
1828 vm_page_t m_start, vm_prot_t prot)
1831 vm_pindex_t diff, psize;
1835 VM_OBJECT_ASSERT_LOCKED(m_start->object);
1837 psize = atop(end - start);
1839 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1840 va = start + ptoa(diff);
1841 if ((va & HPT_SP_MASK) == 0 && va + HPT_SP_SIZE <= end &&
1842 m->psind == 1 && moea64_ps_enabled(pm))
1846 moea64_enter(pm, va, m, prot &
1847 (VM_PROT_READ | VM_PROT_EXECUTE),
1848 PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, psind);
1850 m = &m[HPT_SP_SIZE / PAGE_SIZE - 1];
1851 m = TAILQ_NEXT(m, listq);
1856 moea64_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m,
1860 moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1861 PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
1865 moea64_extract(pmap_t pm, vm_offset_t va)
1867 struct pvo_entry *pvo;
1871 pvo = moea64_pvo_find_va(pm, va);
1875 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo));
1882 * Atomically extract and hold the physical page with the given
1883 * pmap and virtual address pair if that mapping permits the given
1887 moea64_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1889 struct pvo_entry *pvo;
1894 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1895 if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) {
1896 m = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
1897 if (!vm_page_wire_mapped(m))
1905 moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
1906 uint8_t *flags, int wait)
1908 struct pvo_entry *pvo;
1914 * This entire routine is a horrible hack to avoid bothering kmem
1915 * for new KVA addresses. Because this can get called from inside
1916 * kmem allocation routines, calling kmem for a new address here
1917 * can lead to multiply locking non-recursive mutexes.
1920 *flags = UMA_SLAB_PRIV;
1921 needed_lock = !PMAP_LOCKED(kernel_pmap);
1923 m = vm_page_alloc_domain(NULL, 0, domain,
1924 malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1928 va = VM_PAGE_TO_PHYS(m);
1930 pvo = alloc_pvo_entry(1 /* bootstrap */);
1932 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE;
1933 pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M;
1936 PMAP_LOCK(kernel_pmap);
1938 init_pvo_entry(pvo, kernel_pmap, va);
1939 pvo->pvo_vaddr |= PVO_WIRED;
1941 moea64_pvo_enter(pvo, NULL, NULL);
1944 PMAP_UNLOCK(kernel_pmap);
1946 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1947 bzero((void *)va, PAGE_SIZE);
1952 extern int elf32_nxstack;
1958 CTR0(KTR_PMAP, "moea64_init");
1960 moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1961 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1962 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1965 * Are large page mappings enabled?
1967 * While HPT superpages are not better tested, leave it disabled by
1970 superpages_enabled = 0;
1971 TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
1972 if (superpages_enabled) {
1973 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1974 ("moea64_init: can't assign to pagesizes[1]"));
1976 if (moea64_large_page_size == 0) {
1977 printf("mmu_oea64: HW does not support large pages. "
1978 "Disabling superpages...\n");
1979 superpages_enabled = 0;
1980 } else if (!moea64_has_lp_4k_16m) {
1981 printf("mmu_oea64: "
1982 "HW does not support mixed 4KB/16MB page sizes. "
1983 "Disabling superpages...\n");
1984 superpages_enabled = 0;
1986 pagesizes[1] = HPT_SP_SIZE;
1989 if (!hw_direct_map) {
1990 uma_zone_set_allocf(moea64_pvo_zone, moea64_uma_page_alloc);
1993 #ifdef COMPAT_FREEBSD32
1997 moea64_initialized = TRUE;
2001 moea64_is_referenced(vm_page_t m)
2004 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2005 ("moea64_is_referenced: page %p is not managed", m));
2007 return (moea64_query_bit(m, LPTE_REF));
2011 moea64_is_modified(vm_page_t m)
2014 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2015 ("moea64_is_modified: page %p is not managed", m));
2018 * If the page is not busied then this check is racy.
2020 if (!pmap_page_is_write_mapped(m))
2023 return (moea64_query_bit(m, LPTE_CHG));
2027 moea64_is_prefaultable(pmap_t pmap, vm_offset_t va)
2029 struct pvo_entry *pvo;
2030 boolean_t rv = TRUE;
2033 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
2041 moea64_clear_modify(vm_page_t m)
2044 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2045 ("moea64_clear_modify: page %p is not managed", m));
2046 vm_page_assert_busied(m);
2048 if (!pmap_page_is_write_mapped(m))
2050 moea64_clear_bit(m, LPTE_CHG);
2054 * Clear the write and modified bits in each of the given page's mappings.
2057 moea64_remove_write(vm_page_t m)
2059 struct pvo_entry *pvo;
2060 int64_t refchg, ret;
2063 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2064 ("moea64_remove_write: page %p is not managed", m));
2065 vm_page_assert_busied(m);
2067 if (!pmap_page_is_write_mapped(m))
2073 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2074 pmap = pvo->pvo_pmap;
2076 if (!(pvo->pvo_vaddr & PVO_DEAD) &&
2077 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2078 if (PVO_IS_SP(pvo)) {
2079 CTR1(KTR_PMAP, "%s: demote before remwr",
2081 moea64_sp_demote(pvo);
2083 pvo->pvo_pte.prot &= ~VM_PROT_WRITE;
2084 ret = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE);
2088 if (pvo->pvo_pmap == kernel_pmap)
2093 if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG)
2095 vm_page_aflag_clear(m, PGA_WRITEABLE);
2100 * moea64_ts_referenced:
2102 * Return a count of reference bits for a page, clearing those bits.
2103 * It is not necessary for every reference bit to be cleared, but it
2104 * is necessary that 0 only be returned when there are truly no
2105 * reference bits set.
2107 * XXX: The exact number of bits to check and clear is a matter that
2108 * should be tested and standardized at some point in the future for
2109 * optimal aging of shared pages.
2112 moea64_ts_referenced(vm_page_t m)
2115 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2116 ("moea64_ts_referenced: page %p is not managed", m));
2117 return (moea64_clear_bit(m, LPTE_REF));
2121 * Modify the WIMG settings of all mappings for a page.
2124 moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma)
2126 struct pvo_entry *pvo;
2131 CTR3(KTR_PMAP, "%s: pa=%#jx, ma=%#x",
2132 __func__, (uintmax_t)VM_PAGE_TO_PHYS(m), ma);
2134 if ((m->oflags & VPO_UNMANAGED) != 0) {
2135 m->md.mdpg_cache_attrs = ma;
2139 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
2142 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2143 pmap = pvo->pvo_pmap;
2145 if (!(pvo->pvo_vaddr & PVO_DEAD)) {
2146 if (PVO_IS_SP(pvo)) {
2148 "%s: demote before set_memattr", __func__);
2149 moea64_sp_demote(pvo);
2151 pvo->pvo_pte.pa &= ~LPTE_WIMG;
2152 pvo->pvo_pte.pa |= lo;
2153 refchg = moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE);
2155 refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ?
2157 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
2158 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2160 atomic_readandclear_32(&m->md.mdpg_attrs);
2161 if (refchg & LPTE_CHG)
2163 if (refchg & LPTE_REF)
2164 vm_page_aflag_set(m, PGA_REFERENCED);
2166 if (pvo->pvo_pmap == kernel_pmap)
2171 m->md.mdpg_cache_attrs = ma;
2176 * Map a wired page into kernel virtual address space.
2179 moea64_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
2182 struct pvo_entry *pvo, *oldpvo;
2185 pvo = alloc_pvo_entry(0);
2188 } while (pvo == NULL);
2189 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
2190 pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
2191 pvo->pvo_vaddr |= PVO_WIRED;
2193 PMAP_LOCK(kernel_pmap);
2194 oldpvo = moea64_pvo_find_va(kernel_pmap, va);
2196 moea64_pvo_remove_from_pmap(oldpvo);
2197 init_pvo_entry(pvo, kernel_pmap, va);
2198 error = moea64_pvo_enter(pvo, NULL, NULL);
2199 PMAP_UNLOCK(kernel_pmap);
2201 /* Free any dead pages */
2202 if (oldpvo != NULL) {
2203 moea64_pvo_remove_from_page(oldpvo);
2204 free_pvo_entry(oldpvo);
2208 panic("moea64_kenter: failed to enter va %#zx pa %#jx: %d", va,
2209 (uintmax_t)pa, error);
2213 moea64_kenter(vm_offset_t va, vm_paddr_t pa)
2216 moea64_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
2220 * Extract the physical page address associated with the given kernel virtual
2224 moea64_kextract(vm_offset_t va)
2226 struct pvo_entry *pvo;
2230 * Shortcut the direct-mapped case when applicable. We never put
2231 * anything but 1:1 (or 62-bit aliased) mappings below
2232 * VM_MIN_KERNEL_ADDRESS.
2234 if (va < VM_MIN_KERNEL_ADDRESS)
2235 return (va & ~DMAP_BASE_ADDRESS);
2237 PMAP_LOCK(kernel_pmap);
2238 pvo = moea64_pvo_find_va(kernel_pmap, va);
2239 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
2241 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo));
2242 PMAP_UNLOCK(kernel_pmap);
2247 * Remove a wired page from kernel virtual address space.
2250 moea64_kremove(vm_offset_t va)
2252 moea64_remove(kernel_pmap, va, va + PAGE_SIZE);
2256 * Provide a kernel pointer corresponding to a given userland pointer.
2257 * The returned pointer is valid until the next time this function is
2258 * called in this thread. This is used internally in copyin/copyout.
2261 moea64_map_user_ptr(pmap_t pm, volatile const void *uaddr,
2262 void **kaddr, size_t ulen, size_t *klen)
2265 #ifdef __powerpc64__
2270 *kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK);
2271 l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr);
2279 #ifdef __powerpc64__
2280 /* Try lockless look-up first */
2281 slb = user_va_to_slb_entry(pm, (vm_offset_t)uaddr);
2284 /* If it isn't there, we need to pre-fault the VSID */
2286 slbv = va_to_vsid(pm, (vm_offset_t)uaddr) << SLBV_VSID_SHIFT;
2292 /* Mark segment no-execute */
2295 slbv = va_to_vsid(pm, (vm_offset_t)uaddr);
2297 /* Mark segment no-execute */
2301 /* If we have already set this VSID, we can just return */
2302 if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv)
2305 __asm __volatile("isync");
2306 curthread->td_pcb->pcb_cpu.aim.usr_segm =
2307 (uintptr_t)uaddr >> ADDR_SR_SHFT;
2308 curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv;
2309 #ifdef __powerpc64__
2310 __asm __volatile ("slbie %0; slbmte %1, %2; isync" ::
2311 "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE));
2313 __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(slbv));
2320 * Figure out where a given kernel pointer (usually in a fault) points
2321 * to from the VM's perspective, potentially remapping into userland's
2325 moea64_decode_kernel_ptr(vm_offset_t addr, int *is_user,
2326 vm_offset_t *decoded_addr)
2328 vm_offset_t user_sr;
2330 if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
2331 user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm;
2332 addr &= ADDR_PIDX | ADDR_POFF;
2333 addr |= user_sr << ADDR_SR_SHFT;
2334 *decoded_addr = addr;
2337 *decoded_addr = addr;
2345 * Map a range of physical addresses into kernel virtual address space.
2347 * The value passed in *virt is a suggested virtual address for the mapping.
2348 * Architectures which can support a direct-mapped physical to virtual region
2349 * can return the appropriate address within that region, leaving '*virt'
2350 * unchanged. Other architectures should map the pages starting at '*virt' and
2351 * update '*virt' with the first usable address after the mapped region.
2354 moea64_map(vm_offset_t *virt, vm_paddr_t pa_start,
2355 vm_paddr_t pa_end, int prot)
2357 vm_offset_t sva, va;
2359 if (hw_direct_map) {
2361 * Check if every page in the region is covered by the direct
2362 * map. The direct map covers all of physical memory. Use
2363 * moea64_calc_wimg() as a shortcut to see if the page is in
2364 * physical memory as a way to see if the direct map covers it.
2366 for (va = pa_start; va < pa_end; va += PAGE_SIZE)
2367 if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
2370 return (PHYS_TO_DMAP(pa_start));
2374 /* XXX respect prot argument */
2375 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
2376 moea64_kenter(va, pa_start);
2383 * Returns true if the pmap's pv is one of the first
2384 * 16 pvs linked to from this page. This count may
2385 * be changed upwards or downwards in the future; it
2386 * is only necessary that true be returned for a small
2387 * subset of pmaps for proper page aging.
2390 moea64_page_exists_quick(pmap_t pmap, vm_page_t m)
2393 struct pvo_entry *pvo;
2396 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2397 ("moea64_page_exists_quick: page %p is not managed", m));
2401 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2402 if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
2414 moea64_page_init(vm_page_t m)
2417 m->md.mdpg_attrs = 0;
2418 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
2419 LIST_INIT(&m->md.mdpg_pvoh);
2423 * Return the number of managed mappings to the given physical page
2427 moea64_page_wired_mappings(vm_page_t m)
2429 struct pvo_entry *pvo;
2433 if ((m->oflags & VPO_UNMANAGED) != 0)
2436 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
2437 if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
2443 static uintptr_t moea64_vsidcontext;
2446 moea64_get_unique_vsid(void) {
2453 __asm __volatile("mftb %0" : "=r"(entropy));
2455 mtx_lock(&moea64_slb_mutex);
2456 for (i = 0; i < NVSIDS; i += VSID_NBPW) {
2460 * Create a new value by mutiplying by a prime and adding in
2461 * entropy from the timebase register. This is to make the
2462 * VSID more random so that the PT hash function collides
2463 * less often. (Note that the prime casues gcc to do shifts
2464 * instead of a multiply.)
2466 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
2467 hash = moea64_vsidcontext & (NVSIDS - 1);
2468 if (hash == 0) /* 0 is special, avoid it */
2471 mask = 1 << (hash & (VSID_NBPW - 1));
2472 hash = (moea64_vsidcontext & VSID_HASHMASK);
2473 if (moea64_vsid_bitmap[n] & mask) { /* collision? */
2474 /* anything free in this bucket? */
2475 if (moea64_vsid_bitmap[n] == 0xffffffff) {
2476 entropy = (moea64_vsidcontext >> 20);
2479 i = ffs(~moea64_vsid_bitmap[n]) - 1;
2481 hash &= rounddown2(VSID_HASHMASK, VSID_NBPW);
2484 if (hash == VSID_VRMA) /* also special, avoid this too */
2486 KASSERT(!(moea64_vsid_bitmap[n] & mask),
2487 ("Allocating in-use VSID %#zx\n", hash));
2488 moea64_vsid_bitmap[n] |= mask;
2489 mtx_unlock(&moea64_slb_mutex);
2493 mtx_unlock(&moea64_slb_mutex);
2494 panic("%s: out of segments",__func__);
2497 #ifdef __powerpc64__
2499 moea64_pinit(pmap_t pmap)
2502 RB_INIT(&pmap->pmap_pvo);
2504 pmap->pm_slb_tree_root = slb_alloc_tree();
2505 pmap->pm_slb = slb_alloc_user_cache();
2506 pmap->pm_slb_len = 0;
2512 moea64_pinit(pmap_t pmap)
2517 RB_INIT(&pmap->pmap_pvo);
2519 if (pmap_bootstrapped)
2520 pmap->pmap_phys = (pmap_t)moea64_kextract((vm_offset_t)pmap);
2522 pmap->pmap_phys = pmap;
2525 * Allocate some segment registers for this pmap.
2527 hash = moea64_get_unique_vsid();
2529 for (i = 0; i < 16; i++)
2530 pmap->pm_sr[i] = VSID_MAKE(i, hash);
2532 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
2539 * Initialize the pmap associated with process 0.
2542 moea64_pinit0(pmap_t pm)
2547 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
2551 * Set the physical protection on the specified range of this map as requested.
2554 moea64_pvo_protect( pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
2560 PMAP_LOCK_ASSERT(pm, MA_OWNED);
2563 * Change the protection of the page.
2565 oldprot = pvo->pvo_pte.prot;
2566 pvo->pvo_pte.prot = prot;
2567 pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
2570 * If the PVO is in the page table, update mapping
2572 refchg = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE);
2574 refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
2576 if (pm != kernel_pmap && pg != NULL &&
2577 (pg->a.flags & PGA_EXECUTABLE) == 0 &&
2578 (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
2579 if ((pg->oflags & VPO_UNMANAGED) == 0)
2580 vm_page_aflag_set(pg, PGA_EXECUTABLE);
2581 moea64_syncicache(pm, PVO_VADDR(pvo),
2582 PVO_PADDR(pvo), PAGE_SIZE);
2586 * Update vm about the REF/CHG bits if the page is managed and we have
2587 * removed write access.
2589 if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) &&
2590 (oldprot & VM_PROT_WRITE)) {
2591 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2592 if (refchg & LPTE_CHG)
2594 if (refchg & LPTE_REF)
2595 vm_page_aflag_set(pg, PGA_REFERENCED);
2600 moea64_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva,
2603 struct pvo_entry *pvo, key;
2605 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
2608 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
2609 ("moea64_protect: non current pmap"));
2611 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2612 moea64_remove(pm, sva, eva);
2617 key.pvo_vaddr = sva;
2618 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2619 pvo != NULL && PVO_VADDR(pvo) < eva;
2620 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
2621 if (PVO_IS_SP(pvo)) {
2622 if (moea64_sp_pvo_in_range(pvo, sva, eva)) {
2623 pvo = moea64_sp_protect(pvo, prot);
2626 CTR1(KTR_PMAP, "%s: demote before protect",
2628 moea64_sp_demote(pvo);
2631 moea64_pvo_protect(pm, pvo, prot);
2637 * Map a list of wired pages into kernel virtual address space. This is
2638 * intended for temporary mappings which do not need page modification or
2639 * references recorded. Existing mappings in the region are overwritten.
2642 moea64_qenter(vm_offset_t va, vm_page_t *m, int count)
2644 while (count-- > 0) {
2645 moea64_kenter(va, VM_PAGE_TO_PHYS(*m));
2652 * Remove page mappings from kernel virtual address space. Intended for
2653 * temporary mappings entered by moea64_qenter.
2656 moea64_qremove(vm_offset_t va, int count)
2658 while (count-- > 0) {
2665 moea64_release_vsid(uint64_t vsid)
2669 mtx_lock(&moea64_slb_mutex);
2670 idx = vsid & (NVSIDS-1);
2671 mask = 1 << (idx % VSID_NBPW);
2673 KASSERT(moea64_vsid_bitmap[idx] & mask,
2674 ("Freeing unallocated VSID %#jx", vsid));
2675 moea64_vsid_bitmap[idx] &= ~mask;
2676 mtx_unlock(&moea64_slb_mutex);
2680 moea64_release(pmap_t pmap)
2684 * Free segment registers' VSIDs
2686 #ifdef __powerpc64__
2687 slb_free_tree(pmap);
2688 slb_free_user_cache(pmap->pm_slb);
2690 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2692 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2697 * Remove all pages mapped by the specified pmap
2700 moea64_remove_pages(pmap_t pm)
2702 struct pvo_entry *pvo, *tpvo;
2703 struct pvo_dlist tofree;
2705 SLIST_INIT(&tofree);
2708 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2709 if (pvo->pvo_vaddr & PVO_WIRED)
2713 * For locking reasons, remove this from the page table and
2714 * pmap, but save delinking from the vm_page for a second
2717 moea64_pvo_remove_from_pmap(pvo);
2718 SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink);
2722 while (!SLIST_EMPTY(&tofree)) {
2723 pvo = SLIST_FIRST(&tofree);
2724 SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
2725 moea64_pvo_remove_from_page(pvo);
2726 free_pvo_entry(pvo);
2731 moea64_remove_locked(pmap_t pm, vm_offset_t sva, vm_offset_t eva,
2732 struct pvo_dlist *tofree)
2734 struct pvo_entry *pvo, *tpvo, key;
2736 PMAP_LOCK_ASSERT(pm, MA_OWNED);
2738 key.pvo_vaddr = sva;
2739 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2740 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2741 if (PVO_IS_SP(pvo)) {
2742 if (moea64_sp_pvo_in_range(pvo, sva, eva)) {
2743 tpvo = moea64_sp_remove(pvo, tofree);
2746 CTR1(KTR_PMAP, "%s: demote before remove",
2748 moea64_sp_demote(pvo);
2751 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2754 * For locking reasons, remove this from the page table and
2755 * pmap, but save delinking from the vm_page for a second
2758 moea64_pvo_remove_from_pmap(pvo);
2759 SLIST_INSERT_HEAD(tofree, pvo, pvo_dlink);
2764 * Remove the given range of addresses from the specified map.
2767 moea64_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2769 struct pvo_entry *pvo;
2770 struct pvo_dlist tofree;
2773 * Perform an unsynchronized read. This is, however, safe.
2775 if (pm->pm_stats.resident_count == 0)
2778 SLIST_INIT(&tofree);
2780 moea64_remove_locked(pm, sva, eva, &tofree);
2783 while (!SLIST_EMPTY(&tofree)) {
2784 pvo = SLIST_FIRST(&tofree);
2785 SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
2786 moea64_pvo_remove_from_page(pvo);
2787 free_pvo_entry(pvo);
2792 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2793 * will reflect changes in pte's back to the vm_page.
2796 moea64_remove_all(vm_page_t m)
2798 struct pvo_entry *pvo, *next_pvo;
2799 struct pvo_head freequeue;
2803 LIST_INIT(&freequeue);
2806 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2807 pmap = pvo->pvo_pmap;
2809 wasdead = (pvo->pvo_vaddr & PVO_DEAD);
2811 if (PVO_IS_SP(pvo)) {
2812 CTR1(KTR_PMAP, "%s: demote before remove_all",
2814 moea64_sp_demote(pvo);
2816 moea64_pvo_remove_from_pmap(pvo);
2818 moea64_pvo_remove_from_page_locked(pvo, m);
2820 LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
2824 KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
2825 KASSERT((m->a.flags & PGA_WRITEABLE) == 0, ("Page still writable"));
2828 /* Clean up UMA allocations */
2829 LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo)
2830 free_pvo_entry(pvo);
2834 * Allocate a physical page of memory directly from the phys_avail map.
2835 * Can only be called from moea64_bootstrap before avail start and end are
2839 moea64_bootstrap_alloc(vm_size_t size, vm_size_t align)
2844 size = round_page(size);
2845 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2847 s = roundup2(phys_avail[i], align);
2852 if (s < phys_avail[i] || e > phys_avail[i + 1])
2855 if (s + size > platform_real_maxaddr())
2858 if (s == phys_avail[i]) {
2859 phys_avail[i] += size;
2860 } else if (e == phys_avail[i + 1]) {
2861 phys_avail[i + 1] -= size;
2863 for (j = phys_avail_count * 2; j > i; j -= 2) {
2864 phys_avail[j] = phys_avail[j - 2];
2865 phys_avail[j + 1] = phys_avail[j - 1];
2868 phys_avail[i + 3] = phys_avail[i + 1];
2869 phys_avail[i + 1] = s;
2870 phys_avail[i + 2] = e;
2876 panic("moea64_bootstrap_alloc: could not allocate memory");
2880 moea64_pvo_enter(struct pvo_entry *pvo, struct pvo_head *pvo_head,
2881 struct pvo_entry **oldpvop)
2883 struct pvo_entry *old_pvo;
2886 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2888 STAT_MOEA64(moea64_pvo_enter_calls++);
2893 old_pvo = RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2895 if (old_pvo != NULL) {
2896 if (oldpvop != NULL)
2901 if (pvo_head != NULL) {
2902 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2905 if (pvo->pvo_vaddr & PVO_WIRED)
2906 pvo->pvo_pmap->pm_stats.wired_count++;
2907 pvo->pvo_pmap->pm_stats.resident_count++;
2910 * Insert it into the hardware page table
2912 err = moea64_pte_insert(pvo);
2914 panic("moea64_pvo_enter: overflow");
2917 STAT_MOEA64(moea64_pvo_entries++);
2919 if (pvo->pvo_pmap == kernel_pmap)
2922 #ifdef __powerpc64__
2924 * Make sure all our bootstrap mappings are in the SLB as soon
2925 * as virtual memory is switched on.
2927 if (!pmap_bootstrapped)
2928 moea64_bootstrap_slb_prefault(PVO_VADDR(pvo),
2929 pvo->pvo_vaddr & PVO_LARGE);
2936 moea64_pvo_remove_from_pmap(struct pvo_entry *pvo)
2941 KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap"));
2942 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2943 KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO"));
2946 * If there is an active pte entry, we need to deactivate it
2948 refchg = moea64_pte_unset(pvo);
2951 * If it was evicted from the page table, be pessimistic and
2954 if (pvo->pvo_pte.prot & VM_PROT_WRITE)
2961 * Update our statistics.
2963 pvo->pvo_pmap->pm_stats.resident_count--;
2964 if (pvo->pvo_vaddr & PVO_WIRED)
2965 pvo->pvo_pmap->pm_stats.wired_count--;
2968 * Remove this PVO from the pmap list.
2970 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2973 * Mark this for the next sweep
2975 pvo->pvo_vaddr |= PVO_DEAD;
2977 /* Send RC bits to VM */
2978 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
2979 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2980 pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
2982 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2983 if (refchg & LPTE_CHG)
2985 if (refchg & LPTE_REF)
2986 vm_page_aflag_set(pg, PGA_REFERENCED);
2992 moea64_pvo_remove_from_page_locked(struct pvo_entry *pvo,
2996 KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
2998 /* Use NULL pmaps as a sentinel for races in page deletion */
2999 if (pvo->pvo_pmap == NULL)
3001 pvo->pvo_pmap = NULL;
3004 * Update vm about page writeability/executability if managed
3006 PV_LOCKASSERT(PVO_PADDR(pvo));
3007 if (pvo->pvo_vaddr & PVO_MANAGED) {
3009 LIST_REMOVE(pvo, pvo_vlink);
3010 if (LIST_EMPTY(vm_page_to_pvoh(m)))
3011 vm_page_aflag_clear(m,
3012 PGA_WRITEABLE | PGA_EXECUTABLE);
3016 STAT_MOEA64(moea64_pvo_entries--);
3017 STAT_MOEA64(moea64_pvo_remove_calls++);
3021 moea64_pvo_remove_from_page(struct pvo_entry *pvo)
3023 vm_page_t pg = NULL;
3025 if (pvo->pvo_vaddr & PVO_MANAGED)
3026 pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
3028 PV_LOCK(PVO_PADDR(pvo));
3029 moea64_pvo_remove_from_page_locked(pvo, pg);
3030 PV_UNLOCK(PVO_PADDR(pvo));
3033 static struct pvo_entry *
3034 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
3036 struct pvo_entry key;
3038 PMAP_LOCK_ASSERT(pm, MA_OWNED);
3040 key.pvo_vaddr = va & ~ADDR_POFF;
3041 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
3045 moea64_query_bit(vm_page_t m, uint64_t ptebit)
3047 struct pvo_entry *pvo;
3053 * See if this bit is stored in the page already.
3055 * For superpages, the bit is stored in the first vm page.
3057 if ((m->md.mdpg_attrs & ptebit) != 0 ||
3058 ((sp = PHYS_TO_VM_PAGE(VM_PAGE_TO_PHYS(m) & ~HPT_SP_MASK)) != NULL &&
3059 (sp->md.mdpg_attrs & (ptebit | MDPG_ATTR_SP)) ==
3060 (ptebit | MDPG_ATTR_SP)))
3064 * Examine each PTE. Sync so that any pending REF/CHG bits are
3065 * flushed to the PTEs.
3070 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
3071 if (PVO_IS_SP(pvo)) {
3072 ret = moea64_sp_query(pvo, ptebit);
3074 * If SP was not demoted, check its REF/CHG bits here.
3077 if ((ret & ptebit) != 0) {
3083 /* else, fallthrough */
3089 * See if this pvo has a valid PTE. if so, fetch the
3090 * REF/CHG bits from the valid PTE. If the appropriate
3091 * ptebit is set, return success.
3093 PMAP_LOCK(pvo->pvo_pmap);
3094 if (!(pvo->pvo_vaddr & PVO_DEAD))
3095 ret = moea64_pte_synch(pvo);
3096 PMAP_UNLOCK(pvo->pvo_pmap);
3099 atomic_set_32(&m->md.mdpg_attrs,
3100 ret & (LPTE_CHG | LPTE_REF));
3113 moea64_clear_bit(vm_page_t m, u_int64_t ptebit)
3116 struct pvo_entry *pvo;
3120 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
3121 * we can reset the right ones).
3126 * For each pvo entry, clear the pte's ptebit.
3130 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
3131 if (PVO_IS_SP(pvo)) {
3132 if ((ret = moea64_sp_clear(pvo, m, ptebit)) != -1) {
3139 PMAP_LOCK(pvo->pvo_pmap);
3140 if (!(pvo->pvo_vaddr & PVO_DEAD))
3141 ret = moea64_pte_clear(pvo, ptebit);
3142 PMAP_UNLOCK(pvo->pvo_pmap);
3144 if (ret > 0 && (ret & ptebit))
3147 atomic_clear_32(&m->md.mdpg_attrs, ptebit);
3154 moea64_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
3156 struct pvo_entry *pvo, key;
3160 if (hw_direct_map && mem_valid(pa, size) == 0)
3163 PMAP_LOCK(kernel_pmap);
3164 ppa = pa & ~ADDR_POFF;
3165 key.pvo_vaddr = DMAP_BASE_ADDRESS + ppa;
3166 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
3167 ppa < pa + size; ppa += PAGE_SIZE,
3168 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
3169 if (pvo == NULL || PVO_PADDR(pvo) != ppa) {
3174 PMAP_UNLOCK(kernel_pmap);
3180 * Map a set of physical memory pages into the kernel virtual
3181 * address space. Return a pointer to where it is mapped. This
3182 * routine is intended to be used for mapping device memory,
3186 moea64_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
3188 vm_offset_t va, tmpva, ppa, offset;
3190 ppa = trunc_page(pa);
3191 offset = pa & PAGE_MASK;
3192 size = roundup2(offset + size, PAGE_SIZE);
3194 va = kva_alloc(size);
3197 panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
3199 for (tmpva = va; size > 0;) {
3200 moea64_kenter_attr(tmpva, ppa, ma);
3206 return ((void *)(va + offset));
3210 moea64_mapdev(vm_paddr_t pa, vm_size_t size)
3213 return moea64_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT);
3217 moea64_unmapdev(vm_offset_t va, vm_size_t size)
3219 vm_offset_t base, offset;
3221 base = trunc_page(va);
3222 offset = va & PAGE_MASK;
3223 size = roundup2(offset + size, PAGE_SIZE);
3225 moea64_qremove(base, atop(size));
3226 kva_free(base, size);
3230 moea64_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
3232 struct pvo_entry *pvo;
3237 if (__predict_false(pm == NULL))
3238 pm = &curthread->td_proc->p_vmspace->vm_pmap;
3242 lim = round_page(va+1);
3243 len = MIN(lim - va, sz);
3244 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
3245 if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) {
3246 pa = PVO_PADDR(pvo) | (va & ADDR_POFF);
3247 moea64_syncicache(pm, va, pa, len);
3256 moea64_dumpsys_map(vm_paddr_t pa, size_t sz, void **va)
3259 *va = (void *)(uintptr_t)pa;
3262 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
3267 struct pvo_entry *pvo;
3272 /* Initialize phys. segments for dumpsys(). */
3273 memset(&dump_map, 0, sizeof(dump_map));
3274 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
3275 for (i = 0; i < pregions_sz; i++) {
3276 dump_map[i].pa_start = pregions[i].mr_start;
3277 dump_map[i].pa_size = pregions[i].mr_size;
3282 /* Virtual segments for minidumps: */
3283 memset(&dump_map, 0, sizeof(dump_map));
3285 /* 1st: kernel .data and .bss. */
3286 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
3287 dump_map[0].pa_size = round_page((uintptr_t)_end) -
3288 dump_map[0].pa_start;
3290 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
3291 dump_map[1].pa_start = (vm_paddr_t)(uintptr_t)msgbufp->msg_ptr;
3292 dump_map[1].pa_size = round_page(msgbufp->msg_size);
3294 /* 3rd: kernel VM. */
3295 va = dump_map[1].pa_start + dump_map[1].pa_size;
3296 /* Find start of next chunk (from va). */
3297 while (va < virtual_end) {
3298 /* Don't dump the buffer cache. */
3299 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
3300 va = kmi.buffer_eva;
3303 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
3304 if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
3308 if (va < virtual_end) {
3309 dump_map[2].pa_start = va;
3311 /* Find last page in chunk. */
3312 while (va < virtual_end) {
3313 /* Don't run into the buffer cache. */
3314 if (va == kmi.buffer_sva)
3316 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
3317 if (pvo == NULL || (pvo->pvo_vaddr & PVO_DEAD))
3321 dump_map[2].pa_size = va - dump_map[2].pa_start;
3325 #ifdef __powerpc64__
3330 struct pvo_entry *pvo;
3331 vm_paddr_t pa, pa_end;
3332 vm_offset_t va, pgva, kstart, kend, kstart_lp, kend_lp;
3335 lpsize = moea64_large_page_size;
3336 kstart = trunc_page((vm_offset_t)_etext);
3337 kend = round_page((vm_offset_t)_end);
3338 kstart_lp = kstart & ~moea64_large_page_mask;
3339 kend_lp = (kend + moea64_large_page_mask) & ~moea64_large_page_mask;
3341 CTR4(KTR_PMAP, "moea64_scan_pmap: kstart=0x%016lx, kend=0x%016lx, "
3342 "kstart_lp=0x%016lx, kend_lp=0x%016lx",
3343 kstart, kend, kstart_lp, kend_lp);
3345 PMAP_LOCK(kernel_pmap);
3346 RB_FOREACH(pvo, pvo_tree, &kernel_pmap->pmap_pvo) {
3347 va = pvo->pvo_vaddr;
3352 /* Skip DMAP (except kernel area) */
3353 if (va >= DMAP_BASE_ADDRESS && va <= DMAP_MAX_ADDRESS) {
3354 if (va & PVO_LARGE) {
3355 pgva = va & ~moea64_large_page_mask;
3356 if (pgva < kstart_lp || pgva >= kend_lp)
3359 pgva = trunc_page(va);
3360 if (pgva < kstart || pgva >= kend)
3365 pa = PVO_PADDR(pvo);
3367 if (va & PVO_LARGE) {
3368 pa_end = pa + lpsize;
3369 for (; pa < pa_end; pa += PAGE_SIZE) {
3370 if (is_dumpable(pa))
3374 if (is_dumpable(pa))
3378 PMAP_UNLOCK(kernel_pmap);
3380 return (sizeof(struct lpte) * moea64_pteg_count * 8);
3383 static struct dump_context dump_ctx;
3386 moea64_dump_pmap_init(unsigned blkpgs)
3389 dump_ctx.ptex_end = moea64_pteg_count * 8;
3390 dump_ctx.blksz = blkpgs * PAGE_SIZE;
3403 moea64_dump_pmap_init(unsigned blkpgs)
3410 #ifdef __powerpc64__
3412 moea64_map_range(vm_offset_t va, vm_paddr_t pa, vm_size_t npages)
3415 for (; npages > 0; --npages) {
3416 if (moea64_large_page_size != 0 &&
3417 (pa & moea64_large_page_mask) == 0 &&
3418 (va & moea64_large_page_mask) == 0 &&
3419 npages >= (moea64_large_page_size >> PAGE_SHIFT)) {
3420 PMAP_LOCK(kernel_pmap);
3421 moea64_kenter_large(va, pa, 0, 0);
3422 PMAP_UNLOCK(kernel_pmap);
3423 pa += moea64_large_page_size;
3424 va += moea64_large_page_size;
3425 npages -= (moea64_large_page_size >> PAGE_SHIFT) - 1;
3427 moea64_kenter(va, pa);
3435 moea64_page_array_startup(long pages)
3437 long dom_pages[MAXMEMDOM];
3439 vm_offset_t va, vm_page_base;
3440 vm_size_t needed, size;
3445 vm_page_base = 0xd000000000000000ULL;
3447 /* Short-circuit single-domain systems. */
3448 if (vm_ndomains == 1) {
3449 size = round_page(pages * sizeof(struct vm_page));
3450 pa = vm_phys_early_alloc(0, size);
3451 vm_page_base = moea64_map(&vm_page_base,
3452 pa, pa + size, VM_PROT_READ | VM_PROT_WRITE);
3453 vm_page_array_size = pages;
3454 vm_page_array = (vm_page_t)vm_page_base;
3459 for (i = 0; i < MAXMEMDOM; i++)
3462 /* Now get the number of pages required per domain. */
3463 for (i = 0; i < vm_phys_nsegs; i++) {
3464 domain = vm_phys_segs[i].domain;
3465 KASSERT(domain < MAXMEMDOM,
3466 ("Invalid vm_phys_segs NUMA domain %d!\n", domain));
3467 /* Get size of vm_page_array needed for this segment. */
3468 size = btoc(vm_phys_segs[i].end - vm_phys_segs[i].start);
3469 dom_pages[domain] += size;
3472 for (i = 0; phys_avail[i + 1] != 0; i+= 2) {
3473 domain = vm_phys_domain(phys_avail[i]);
3474 KASSERT(domain < MAXMEMDOM,
3475 ("Invalid phys_avail NUMA domain %d!\n", domain));
3476 size = btoc(phys_avail[i + 1] - phys_avail[i]);
3477 dom_pages[domain] += size;
3481 * Map in chunks that can get us all 16MB pages. There will be some
3482 * overlap between domains, but that's acceptable for now.
3484 vm_page_array_size = 0;
3486 for (i = 0; i < MAXMEMDOM && vm_page_array_size < pages; i++) {
3487 if (dom_pages[i] == 0)
3489 size = ulmin(pages - vm_page_array_size, dom_pages[i]);
3490 size = round_page(size * sizeof(struct vm_page));
3492 size = roundup2(size, moea64_large_page_size);
3493 pa = vm_phys_early_alloc(i, size);
3494 vm_page_array_size += size / sizeof(struct vm_page);
3495 moea64_map_range(va, pa, size >> PAGE_SHIFT);
3496 /* Scoot up domain 0, to reduce the domain page overlap. */
3498 vm_page_base += size - needed;
3501 vm_page_array = (vm_page_t)vm_page_base;
3502 vm_page_array_size = pages;
3507 moea64_null_method(void)
3512 static int64_t moea64_pte_replace_default(struct pvo_entry *pvo, int flags)
3516 refchg = moea64_pte_unset(pvo);
3517 moea64_pte_insert(pvo);
3522 struct moea64_funcs *moea64_ops;
3524 #define DEFINE_OEA64_IFUNC(ret, func, args, def) \
3525 DEFINE_IFUNC(, ret, moea64_##func, args) { \
3526 moea64_##func##_t f; \
3527 if (moea64_ops == NULL) \
3528 return ((moea64_##func##_t)def); \
3529 f = moea64_ops->func; \
3530 return (f != NULL ? f : (moea64_##func##_t)def);\
3533 DEFINE_OEA64_IFUNC(int64_t, pte_replace, (struct pvo_entry *, int),
3534 moea64_pte_replace_default)
3535 DEFINE_OEA64_IFUNC(int64_t, pte_insert, (struct pvo_entry *), moea64_null_method)
3536 DEFINE_OEA64_IFUNC(int64_t, pte_unset, (struct pvo_entry *), moea64_null_method)
3537 DEFINE_OEA64_IFUNC(int64_t, pte_clear, (struct pvo_entry *, uint64_t),
3539 DEFINE_OEA64_IFUNC(int64_t, pte_synch, (struct pvo_entry *), moea64_null_method)
3540 DEFINE_OEA64_IFUNC(int64_t, pte_insert_sp, (struct pvo_entry *), moea64_null_method)
3541 DEFINE_OEA64_IFUNC(int64_t, pte_unset_sp, (struct pvo_entry *), moea64_null_method)
3542 DEFINE_OEA64_IFUNC(int64_t, pte_replace_sp, (struct pvo_entry *), moea64_null_method)
3544 /* Superpage functions */
3549 moea64_ps_enabled(pmap_t pmap)
3551 return (superpages_enabled);
3555 moea64_align_superpage(vm_object_t object, vm_ooffset_t offset,
3556 vm_offset_t *addr, vm_size_t size)
3558 vm_offset_t sp_offset;
3560 if (size < HPT_SP_SIZE)
3563 CTR4(KTR_PMAP, "%s: offs=%#jx, addr=%p, size=%#jx",
3564 __func__, (uintmax_t)offset, addr, (uintmax_t)size);
3566 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
3567 offset += ptoa(object->pg_color);
3568 sp_offset = offset & HPT_SP_MASK;
3569 if (size - ((HPT_SP_SIZE - sp_offset) & HPT_SP_MASK) < HPT_SP_SIZE ||
3570 (*addr & HPT_SP_MASK) == sp_offset)
3572 if ((*addr & HPT_SP_MASK) < sp_offset)
3573 *addr = (*addr & ~HPT_SP_MASK) + sp_offset;
3575 *addr = ((*addr + HPT_SP_MASK) & ~HPT_SP_MASK) + sp_offset;
3580 static __inline void
3581 moea64_pvo_cleanup(struct pvo_dlist *tofree)
3583 struct pvo_entry *pvo;
3586 while (!SLIST_EMPTY(tofree)) {
3587 pvo = SLIST_FIRST(tofree);
3588 SLIST_REMOVE_HEAD(tofree, pvo_dlink);
3589 if (pvo->pvo_vaddr & PVO_DEAD)
3590 moea64_pvo_remove_from_page(pvo);
3591 free_pvo_entry(pvo);
3595 static __inline uint16_t
3596 pvo_to_vmpage_flags(struct pvo_entry *pvo)
3601 if ((pvo->pvo_pte.prot & VM_PROT_WRITE) != 0)
3602 flags |= PGA_WRITEABLE;
3603 if ((pvo->pvo_pte.prot & VM_PROT_EXECUTE) != 0)
3604 flags |= PGA_EXECUTABLE;
3610 * Check if the given pvo and its superpage are in sva-eva range.
3612 static __inline bool
3613 moea64_sp_pvo_in_range(struct pvo_entry *pvo, vm_offset_t sva, vm_offset_t eva)
3617 spva = PVO_VADDR(pvo) & ~HPT_SP_MASK;
3618 if (spva >= sva && spva + HPT_SP_SIZE <= eva) {
3620 * Because this function is intended to be called from loops
3621 * that iterate over ordered pvo entries, if the condition
3622 * above is true then the pvo must be the first of its
3625 KASSERT(PVO_VADDR(pvo) == spva,
3626 ("%s: unexpected unaligned superpage pvo", __func__));
3633 * Update vm about the REF/CHG bits if the superpage is managed and
3634 * has (or had) write access.
3637 moea64_sp_refchg_process(struct pvo_entry *sp, vm_page_t m,
3638 int64_t sp_refchg, vm_prot_t prot)
3643 if ((sp->pvo_vaddr & PVO_MANAGED) != 0 && (prot & VM_PROT_WRITE) != 0) {
3644 for (m_end = &m[HPT_SP_PAGES]; m < m_end; m++) {
3645 refchg = sp_refchg |
3646 atomic_readandclear_32(&m->md.mdpg_attrs);
3647 if (refchg & LPTE_CHG)
3649 if (refchg & LPTE_REF)
3650 vm_page_aflag_set(m, PGA_REFERENCED);
3658 moea64_sp_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
3659 vm_prot_t prot, u_int flags, int8_t psind)
3661 struct pvo_entry *pvo, **pvos;
3662 struct pvo_head *pvo_head;
3667 struct pvo_dlist tofree;
3671 KASSERT((va & HPT_SP_MASK) == 0, ("%s: va %#jx unaligned",
3672 __func__, (uintmax_t)va));
3673 KASSERT(psind == 1, ("%s: invalid psind: %d", __func__, psind));
3674 KASSERT(m->psind == 1, ("%s: invalid m->psind: %d",
3675 __func__, m->psind));
3676 KASSERT(pmap != kernel_pmap,
3677 ("%s: function called with kernel pmap", __func__));
3679 CTR5(KTR_PMAP, "%s: va=%#jx, pa=%#jx, prot=%#x, flags=%#x, psind=1",
3680 __func__, (uintmax_t)va, (uintmax_t)VM_PAGE_TO_PHYS(m),
3683 SLIST_INIT(&tofree);
3687 spa = pa = VM_PAGE_TO_PHYS(sm);
3689 /* Try to allocate all PVOs first, to make failure handling easier. */
3690 pvos = malloc(HPT_SP_PAGES * sizeof(struct pvo_entry *), M_TEMP,
3693 CTR1(KTR_PMAP, "%s: failed to alloc pvo array", __func__);
3694 return (KERN_RESOURCE_SHORTAGE);
3697 for (i = 0; i < HPT_SP_PAGES; i++) {
3698 pvos[i] = alloc_pvo_entry(0);
3699 if (pvos[i] == NULL) {
3700 CTR1(KTR_PMAP, "%s: failed to alloc pvo", __func__);
3701 for (i = i - 1; i >= 0; i--)
3702 free_pvo_entry(pvos[i]);
3704 return (KERN_RESOURCE_SHORTAGE);
3708 SP_PV_LOCK_ALIGNED(spa);
3711 /* Note: moea64_remove_locked() also clears cached REF/CHG bits. */
3712 moea64_remove_locked(pmap, va, va + HPT_SP_SIZE, &tofree);
3715 for (i = 0; i < HPT_SP_PAGES;
3716 i++, va += PAGE_SIZE, pa += PAGE_SIZE, m++) {
3719 pvo->pvo_pte.prot = prot;
3720 pvo->pvo_pte.pa = (pa & ~LPTE_LP_MASK) | LPTE_LP_4K_16M |
3721 moea64_calc_wimg(pa, pmap_page_get_memattr(m));
3723 if ((flags & PMAP_ENTER_WIRED) != 0)
3724 pvo->pvo_vaddr |= PVO_WIRED;
3725 pvo->pvo_vaddr |= PVO_LARGE;
3727 if ((m->oflags & VPO_UNMANAGED) != 0)
3730 pvo_head = &m->md.mdpg_pvoh;
3731 pvo->pvo_vaddr |= PVO_MANAGED;
3734 init_pvo_entry(pvo, pmap, va);
3736 error = moea64_pvo_enter(pvo, pvo_head, NULL);
3738 * All superpage PVOs were previously removed, so no errors
3739 * should occur while inserting the new ones.
3741 KASSERT(error == 0, ("%s: unexpected error "
3742 "when inserting superpage PVO: %d",
3747 SP_PV_UNLOCK_ALIGNED(spa);
3749 sync = (sm->a.flags & PGA_EXECUTABLE) == 0;
3750 /* Note: moea64_pvo_cleanup() also clears page prot. flags. */
3751 moea64_pvo_cleanup(&tofree);
3754 /* Set vm page flags */
3755 aflags = pvo_to_vmpage_flags(pvo);
3757 for (m = sm; m < &sm[HPT_SP_PAGES]; m++)
3758 vm_page_aflag_set(m, aflags);
3761 * Flush the page from the instruction cache if this page is
3762 * mapped executable and cacheable.
3764 if (sync && (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0)
3765 moea64_syncicache(pmap, sva, spa, HPT_SP_SIZE);
3767 atomic_add_long(&sp_mappings, 1);
3768 CTR3(KTR_PMAP, "%s: SP success for va %#jx in pmap %p",
3769 __func__, (uintmax_t)sva, pmap);
3772 return (KERN_SUCCESS);
3776 moea64_sp_promote(pmap_t pmap, vm_offset_t va, vm_page_t m)
3778 struct pvo_entry *first, *pvo;
3779 vm_paddr_t pa, pa_end;
3780 vm_offset_t sva, va_end;
3783 /* This CTR may generate a lot of output. */
3784 /* CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)va); */
3789 pa = VM_PAGE_TO_PHYS(m) & ~HPT_SP_MASK;
3790 m = PHYS_TO_VM_PAGE(pa);
3795 * Check if all pages meet promotion criteria.
3797 * XXX In some cases the loop below may be executed for each or most
3798 * of the entered pages of a superpage, which can be expensive
3799 * (although it was not profiled) and need some optimization.
3801 * Some cases where this seems to happen are:
3802 * - When a superpage is first entered read-only and later becomes
3804 * - When some of the superpage's virtual addresses map to previously
3805 * wired/cached pages while others map to pages allocated from a
3806 * different physical address range. A common scenario where this
3807 * happens is when mmap'ing a file that is already present in FS
3808 * block cache and doesn't fill a superpage.
3810 first = pvo = moea64_pvo_find_va(pmap, sva);
3811 for (pa_end = pa + HPT_SP_SIZE;
3812 pa < pa_end; pa += PAGE_SIZE, va += PAGE_SIZE) {
3813 if (pvo == NULL || (pvo->pvo_vaddr & PVO_DEAD) != 0) {
3815 "%s: NULL or dead PVO: pmap=%p, va=%#jx",
3816 __func__, pmap, (uintmax_t)va);
3819 if (PVO_PADDR(pvo) != pa) {
3820 CTR5(KTR_PMAP, "%s: PAs don't match: "
3821 "pmap=%p, va=%#jx, pvo_pa=%#jx, exp_pa=%#jx",
3822 __func__, pmap, (uintmax_t)va,
3823 (uintmax_t)PVO_PADDR(pvo), (uintmax_t)pa);
3824 atomic_add_long(&sp_p_fail_pa, 1);
3827 if ((first->pvo_vaddr & PVO_FLAGS_PROMOTE) !=
3828 (pvo->pvo_vaddr & PVO_FLAGS_PROMOTE)) {
3829 CTR5(KTR_PMAP, "%s: PVO flags don't match: "
3830 "pmap=%p, va=%#jx, pvo_flags=%#jx, exp_flags=%#jx",
3831 __func__, pmap, (uintmax_t)va,
3832 (uintmax_t)(pvo->pvo_vaddr & PVO_FLAGS_PROMOTE),
3833 (uintmax_t)(first->pvo_vaddr & PVO_FLAGS_PROMOTE));
3834 atomic_add_long(&sp_p_fail_flags, 1);
3837 if (first->pvo_pte.prot != pvo->pvo_pte.prot) {
3838 CTR5(KTR_PMAP, "%s: PVO protections don't match: "
3839 "pmap=%p, va=%#jx, pvo_prot=%#x, exp_prot=%#x",
3840 __func__, pmap, (uintmax_t)va,
3841 pvo->pvo_pte.prot, first->pvo_pte.prot);
3842 atomic_add_long(&sp_p_fail_prot, 1);
3845 if ((first->pvo_pte.pa & LPTE_WIMG) !=
3846 (pvo->pvo_pte.pa & LPTE_WIMG)) {
3847 CTR5(KTR_PMAP, "%s: WIMG bits don't match: "
3848 "pmap=%p, va=%#jx, pvo_wimg=%#jx, exp_wimg=%#jx",
3849 __func__, pmap, (uintmax_t)va,
3850 (uintmax_t)(pvo->pvo_pte.pa & LPTE_WIMG),
3851 (uintmax_t)(first->pvo_pte.pa & LPTE_WIMG));
3852 atomic_add_long(&sp_p_fail_wimg, 1);
3856 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo);
3859 /* All OK, promote. */
3862 * Handle superpage REF/CHG bits. If REF or CHG is set in
3863 * any page, then it must be set in the superpage.
3865 * Instead of querying each page, we take advantage of two facts:
3866 * 1- If a page is being promoted, it was referenced.
3867 * 2- If promoted pages are writable, they were modified.
3869 sp_refchg = LPTE_REF |
3870 ((first->pvo_pte.prot & VM_PROT_WRITE) != 0 ? LPTE_CHG : 0);
3874 for (pvo = first, va_end = PVO_VADDR(pvo) + HPT_SP_SIZE;
3875 pvo != NULL && PVO_VADDR(pvo) < va_end;
3876 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) {
3877 pvo->pvo_pte.pa &= ~LPTE_LP_MASK;
3878 pvo->pvo_pte.pa |= LPTE_LP_4K_16M;
3879 pvo->pvo_vaddr |= PVO_LARGE;
3881 moea64_pte_replace_sp(first);
3883 /* Send REF/CHG bits to VM */
3884 moea64_sp_refchg_process(first, m, sp_refchg, first->pvo_pte.prot);
3886 /* Use first page to cache REF/CHG bits */
3887 atomic_set_32(&m->md.mdpg_attrs, sp_refchg | MDPG_ATTR_SP);
3891 atomic_add_long(&sp_mappings, 1);
3892 atomic_add_long(&sp_promotions, 1);
3893 CTR3(KTR_PMAP, "%s: success for va %#jx in pmap %p",
3894 __func__, (uintmax_t)sva, pmap);
3898 atomic_add_long(&sp_p_failures, 1);
3903 moea64_sp_demote_aligned(struct pvo_entry *sp)
3905 struct pvo_entry *pvo;
3906 vm_offset_t va, va_end;
3912 CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)PVO_VADDR(sp));
3914 pmap = sp->pvo_pmap;
3915 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3921 va = PVO_VADDR(pvo);
3922 pa = PVO_PADDR(pvo);
3923 m = PHYS_TO_VM_PAGE(pa);
3925 for (pvo = sp, va_end = va + HPT_SP_SIZE;
3926 pvo != NULL && PVO_VADDR(pvo) < va_end;
3927 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo),
3928 va += PAGE_SIZE, pa += PAGE_SIZE) {
3929 KASSERT(pvo && PVO_VADDR(pvo) == va,
3930 ("%s: missing PVO for va %#jx", __func__, (uintmax_t)va));
3932 pvo->pvo_vaddr &= ~PVO_LARGE;
3933 pvo->pvo_pte.pa &= ~LPTE_RPGN;
3934 pvo->pvo_pte.pa |= pa;
3937 refchg = moea64_pte_replace_sp(sp);
3942 * XXX It is possible that another pmap has this page mapped as
3943 * part of a superpage, but as the SP flag is used only for
3944 * caching SP REF/CHG bits, that will be queried if not set
3945 * in cache, it should be ok to clear it here.
3947 atomic_clear_32(&m->md.mdpg_attrs, MDPG_ATTR_SP);
3950 * Handle superpage REF/CHG bits. A bit set in the superpage
3951 * means all pages should consider it set.
3953 moea64_sp_refchg_process(sp, m, refchg, sp->pvo_pte.prot);
3955 atomic_add_long(&sp_demotions, 1);
3956 CTR3(KTR_PMAP, "%s: success for va %#jx in pmap %p",
3957 __func__, (uintmax_t)PVO_VADDR(sp), pmap);
3961 moea64_sp_demote(struct pvo_entry *pvo)
3963 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
3965 if ((PVO_VADDR(pvo) & HPT_SP_MASK) != 0) {
3966 pvo = moea64_pvo_find_va(pvo->pvo_pmap,
3967 PVO_VADDR(pvo) & ~HPT_SP_MASK);
3968 KASSERT(pvo != NULL, ("%s: missing PVO for va %#jx",
3969 __func__, (uintmax_t)(PVO_VADDR(pvo) & ~HPT_SP_MASK)));
3971 moea64_sp_demote_aligned(pvo);
3974 static struct pvo_entry *
3975 moea64_sp_unwire(struct pvo_entry *sp)
3977 struct pvo_entry *pvo, *prev;
3980 int64_t ret, refchg;
3982 CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)PVO_VADDR(sp));
3985 PMAP_LOCK_ASSERT(pm, MA_OWNED);
3987 eva = PVO_VADDR(sp) + HPT_SP_SIZE;
3989 for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
3990 prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
3991 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
3992 panic("%s: pvo %p is missing PVO_WIRED",
3994 pvo->pvo_vaddr &= ~PVO_WIRED;
3996 ret = moea64_pte_replace(pvo, 0 /* No invalidation */);
4002 pm->pm_stats.wired_count--;
4005 /* Send REF/CHG bits to VM */
4006 moea64_sp_refchg_process(sp, PHYS_TO_VM_PAGE(PVO_PADDR(sp)),
4007 refchg, sp->pvo_pte.prot);
4012 static struct pvo_entry *
4013 moea64_sp_protect(struct pvo_entry *sp, vm_prot_t prot)
4015 struct pvo_entry *pvo, *prev;
4019 int64_t ret, refchg;
4022 CTR3(KTR_PMAP, "%s: va=%#jx, prot=%x",
4023 __func__, (uintmax_t)PVO_VADDR(sp), prot);
4026 PMAP_LOCK_ASSERT(pm, MA_OWNED);
4028 oldprot = sp->pvo_pte.prot;
4029 m = PHYS_TO_VM_PAGE(PVO_PADDR(sp));
4030 KASSERT(m != NULL, ("%s: missing vm page for pa %#jx",
4031 __func__, (uintmax_t)PVO_PADDR(sp)));
4032 eva = PVO_VADDR(sp) + HPT_SP_SIZE;
4035 for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
4036 prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
4037 pvo->pvo_pte.prot = prot;
4039 * If the PVO is in the page table, update mapping
4041 ret = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE);
4048 /* Send REF/CHG bits to VM */
4049 moea64_sp_refchg_process(sp, m, refchg, oldprot);
4051 /* Handle pages that became executable */
4052 if ((m->a.flags & PGA_EXECUTABLE) == 0 &&
4053 (sp->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
4054 if ((m->oflags & VPO_UNMANAGED) == 0)
4055 for (m_end = &m[HPT_SP_PAGES]; m < m_end; m++)
4056 vm_page_aflag_set(m, PGA_EXECUTABLE);
4057 moea64_syncicache(pm, PVO_VADDR(sp), PVO_PADDR(sp),
4064 static struct pvo_entry *
4065 moea64_sp_remove(struct pvo_entry *sp, struct pvo_dlist *tofree)
4067 struct pvo_entry *pvo, *tpvo;
4071 CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)PVO_VADDR(sp));
4074 PMAP_LOCK_ASSERT(pm, MA_OWNED);
4076 eva = PVO_VADDR(sp) + HPT_SP_SIZE;
4077 for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
4078 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
4081 * For locking reasons, remove this from the page table and
4082 * pmap, but save delinking from the vm_page for a second
4085 moea64_pvo_remove_from_pmap(pvo);
4086 SLIST_INSERT_HEAD(tofree, pvo, pvo_dlink);
4092 * XXX See comment in moea64_sp_demote_aligned() for why it's
4093 * ok to always clear the SP bit on remove/demote.
4095 atomic_clear_32(&PHYS_TO_VM_PAGE(PVO_PADDR(sp))->md.mdpg_attrs,
4102 moea64_sp_query_locked(struct pvo_entry *pvo, uint64_t ptebit)
4104 int64_t refchg, ret;
4108 struct pvo_entry *sp;
4110 pmap = pvo->pvo_pmap;
4111 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4113 /* Get first SP PVO */
4114 if ((PVO_VADDR(pvo) & HPT_SP_MASK) != 0) {
4115 sp = moea64_pvo_find_va(pmap, PVO_VADDR(pvo) & ~HPT_SP_MASK);
4116 KASSERT(sp != NULL, ("%s: missing PVO for va %#jx",
4117 __func__, (uintmax_t)(PVO_VADDR(pvo) & ~HPT_SP_MASK)));
4120 eva = PVO_VADDR(sp) + HPT_SP_SIZE;
4123 for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
4124 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) {
4125 ret = moea64_pte_synch(pvo);
4127 refchg |= ret & (LPTE_CHG | LPTE_REF);
4128 if ((refchg & ptebit) != 0)
4135 m = PHYS_TO_VM_PAGE(PVO_PADDR(sp));
4136 atomic_set_32(&m->md.mdpg_attrs, refchg | MDPG_ATTR_SP);
4143 moea64_sp_query(struct pvo_entry *pvo, uint64_t ptebit)
4148 pmap = pvo->pvo_pmap;
4152 * Check if SP was demoted/removed before pmap lock was acquired.
4154 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) {
4155 CTR2(KTR_PMAP, "%s: demoted/removed: pa=%#jx",
4156 __func__, (uintmax_t)PVO_PADDR(pvo));
4161 refchg = moea64_sp_query_locked(pvo, ptebit);
4164 CTR4(KTR_PMAP, "%s: va=%#jx, pa=%#jx: refchg=%#jx",
4165 __func__, (uintmax_t)PVO_VADDR(pvo),
4166 (uintmax_t)PVO_PADDR(pvo), (uintmax_t)refchg);
4172 moea64_sp_pvo_clear(struct pvo_entry *pvo, uint64_t ptebit)
4174 int64_t refchg, ret;
4176 struct pvo_entry *sp;
4180 pmap = pvo->pvo_pmap;
4184 * Check if SP was demoted/removed before pmap lock was acquired.
4186 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) {
4187 CTR2(KTR_PMAP, "%s: demoted/removed: pa=%#jx",
4188 __func__, (uintmax_t)PVO_PADDR(pvo));
4193 /* Get first SP PVO */
4194 if ((PVO_VADDR(pvo) & HPT_SP_MASK) != 0) {
4195 sp = moea64_pvo_find_va(pmap, PVO_VADDR(pvo) & ~HPT_SP_MASK);
4196 KASSERT(sp != NULL, ("%s: missing PVO for va %#jx",
4197 __func__, (uintmax_t)(PVO_VADDR(pvo) & ~HPT_SP_MASK)));
4200 eva = PVO_VADDR(sp) + HPT_SP_SIZE;
4203 for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
4204 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) {
4205 ret = moea64_pte_clear(pvo, ptebit);
4207 refchg |= ret & (LPTE_CHG | LPTE_REF);
4210 m = PHYS_TO_VM_PAGE(PVO_PADDR(sp));
4211 atomic_clear_32(&m->md.mdpg_attrs, ptebit);
4214 CTR4(KTR_PMAP, "%s: va=%#jx, pa=%#jx: refchg=%#jx",
4215 __func__, (uintmax_t)PVO_VADDR(sp),
4216 (uintmax_t)PVO_PADDR(sp), (uintmax_t)refchg);
4222 moea64_sp_clear(struct pvo_entry *pvo, vm_page_t m, uint64_t ptebit)
4228 pmap = pvo->pvo_pmap;
4231 * Since this reference bit is shared by 4096 4KB pages, it
4232 * should not be cleared every time it is tested. Apply a
4233 * simple "hash" function on the physical page number, the
4234 * virtual superpage number, and the pmap address to select
4235 * one 4KB page out of the 4096 on which testing the
4236 * reference bit will result in clearing that reference bit.
4237 * This function is designed to avoid the selection of the
4238 * same 4KB page for every 16MB page mapping.
4240 * Always leave the reference bit of a wired mapping set, as
4241 * the current state of its reference bit won't affect page
4244 if (ptebit == LPTE_REF && (((VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) ^
4245 (PVO_VADDR(pvo) >> HPT_SP_SHIFT) ^ (uintptr_t)pmap) &
4246 (HPT_SP_PAGES - 1)) == 0 && (pvo->pvo_vaddr & PVO_WIRED) == 0) {
4247 if ((ret = moea64_sp_pvo_clear(pvo, ptebit)) == -1)
4250 if ((ret & ptebit) != 0)
4254 * If this page was not selected by the hash function, then assume
4255 * its REF bit was set.
4257 } else if (ptebit == LPTE_REF) {
4261 * To clear the CHG bit of a single SP page, first it must be demoted.
4262 * But if no CHG bit is set, no bit clear and thus no SP demotion is
4266 CTR4(KTR_PMAP, "%s: ptebit=%#jx, va=%#jx, pa=%#jx",
4267 __func__, (uintmax_t)ptebit, (uintmax_t)PVO_VADDR(pvo),
4268 (uintmax_t)PVO_PADDR(pvo));
4273 * Make sure SP wasn't demoted/removed before pmap lock
4276 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) {
4277 CTR2(KTR_PMAP, "%s: demoted/removed: pa=%#jx",
4278 __func__, (uintmax_t)PVO_PADDR(pvo));
4283 ret = moea64_sp_query_locked(pvo, ptebit);
4284 if ((ret & ptebit) != 0)
4291 moea64_sp_demote(pvo);
4292 moea64_pte_clear(pvo, ptebit);
4295 * Write protect the mapping to a single page so that a
4296 * subsequent write access may repromote.
4298 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
4299 moea64_pvo_protect(pmap, pvo,
4300 pvo->pvo_pte.prot & ~VM_PROT_WRITE);