2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008-2015 Nathan Whitehorn
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 * Manages physical address maps.
35 * Since the information managed by this module is also stored by the
36 * logical address mapping module, this module may throw away valid virtual
37 * to physical mappings at almost any time. However, invalidations of
38 * mappings must be done as requested.
40 * In order to cope with hardware architectures which make virtual to
41 * physical map invalidates expensive, this module may delay invalidate
42 * reduced protection operations until such time as they are actually
43 * necessary. This module is given full information as to which processors
44 * are currently using which maps, and to when physical maps must be made
48 #include "opt_kstack_pages.h"
50 #include <sys/param.h>
51 #include <sys/kernel.h>
53 #include <sys/queue.h>
54 #include <sys/cpuset.h>
55 #include <sys/kerneldump.h>
58 #include <sys/msgbuf.h>
59 #include <sys/malloc.h>
60 #include <sys/mutex.h>
62 #include <sys/rwlock.h>
63 #include <sys/sched.h>
64 #include <sys/sysctl.h>
65 #include <sys/systm.h>
66 #include <sys/vmmeter.h>
71 #include <dev/ofw/openfirm.h>
74 #include <vm/vm_param.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_phys.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_extern.h>
81 #include <vm/vm_pageout.h>
84 #include <machine/_inttypes.h>
85 #include <machine/cpu.h>
86 #include <machine/platform.h>
87 #include <machine/frame.h>
88 #include <machine/md_var.h>
89 #include <machine/psl.h>
90 #include <machine/bat.h>
91 #include <machine/hid.h>
92 #include <machine/pte.h>
93 #include <machine/sr.h>
94 #include <machine/trap.h>
95 #include <machine/mmuvar.h>
97 #include "mmu_oea64.h"
99 #include "moea64_if.h"
101 void moea64_release_vsid(uint64_t vsid);
102 uintptr_t moea64_get_unique_vsid(void);
104 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
105 #define ENABLE_TRANS(msr) mtmsr(msr)
107 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
108 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
109 #define VSID_HASH_MASK 0x0000007fffffffffULL
114 * There are two locks of interest: the page locks and the pmap locks, which
115 * protect their individual PVO lists and are locked in that order. The contents
116 * of all PVO entries are protected by the locks of their respective pmaps.
117 * The pmap of any PVO is guaranteed not to change so long as the PVO is linked
122 #define PV_LOCK_PER_DOM (PA_LOCK_COUNT * 3)
123 #define PV_LOCK_COUNT (PV_LOCK_PER_DOM * MAXMEMDOM)
124 static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
127 * Cheap NUMA-izing of the pv locks, to reduce contention across domains.
128 * NUMA domains on POWER9 appear to be indexed as sparse memory spaces, with the
129 * index at (N << 45).
132 #define PV_LOCK_IDX(pa) (pa_index(pa) % PV_LOCK_PER_DOM + \
133 (((pa) >> 45) % MAXMEMDOM) * PV_LOCK_PER_DOM)
135 #define PV_LOCK_IDX(pa) (pa_index(pa) % PV_LOCK_COUNT)
137 #define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[PV_LOCK_IDX(pa)]))
138 #define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa))
139 #define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa))
140 #define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
141 #define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m))
142 #define PV_PAGE_UNLOCK(m) PV_UNLOCK(VM_PAGE_TO_PHYS(m))
143 #define PV_PAGE_LOCKASSERT(m) PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
152 extern unsigned char _etext[];
153 extern unsigned char _end[];
155 extern void *slbtrap, *slbtrapend;
158 * Map of physical memory regions.
160 static struct mem_region *regions;
161 static struct mem_region *pregions;
162 static struct numa_mem_region *numa_pregions;
163 static u_int phys_avail_count;
164 static int regions_sz, pregions_sz, numapregions_sz;
166 extern void bs_remap_earlyboot(void);
169 * Lock for the SLB tables.
171 struct mtx moea64_slb_mutex;
176 u_long moea64_pteg_count;
177 u_long moea64_pteg_mask;
183 uma_zone_t moea64_pvo_zone; /* zone for pvo entries */
185 static struct pvo_entry *moea64_bpvo_pool;
186 static int moea64_bpvo_pool_index = 0;
187 static int moea64_bpvo_pool_size = 327680;
188 SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
189 &moea64_bpvo_pool_index, 0, "");
191 #define VSID_NBPW (sizeof(u_int32_t) * 8)
193 #define NVSIDS (NPMAPS * 16)
194 #define VSID_HASHMASK 0xffffffffUL
196 #define NVSIDS NPMAPS
197 #define VSID_HASHMASK 0xfffffUL
199 static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
201 static boolean_t moea64_initialized = FALSE;
207 u_int moea64_pte_valid = 0;
208 u_int moea64_pte_overflow = 0;
209 u_int moea64_pvo_entries = 0;
210 u_int moea64_pvo_enter_calls = 0;
211 u_int moea64_pvo_remove_calls = 0;
212 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
213 &moea64_pte_valid, 0, "");
214 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
215 &moea64_pte_overflow, 0, "");
216 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
217 &moea64_pvo_entries, 0, "");
218 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
219 &moea64_pvo_enter_calls, 0, "");
220 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
221 &moea64_pvo_remove_calls, 0, "");
224 vm_offset_t moea64_scratchpage_va[2];
225 struct pvo_entry *moea64_scratchpage_pvo[2];
226 struct mtx moea64_scratchpage_mtx;
228 uint64_t moea64_large_page_mask = 0;
229 uint64_t moea64_large_page_size = 0;
230 int moea64_large_page_shift = 0;
235 static int moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo,
236 struct pvo_head *pvo_head, struct pvo_entry **oldpvo);
237 static void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
238 static void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
239 static void moea64_pvo_remove_from_page_locked(mmu_t mmu,
240 struct pvo_entry *pvo, vm_page_t m);
241 static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
246 static boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t);
247 static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
248 static void moea64_kremove(mmu_t, vm_offset_t);
249 static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
250 vm_paddr_t pa, vm_size_t sz);
251 static void moea64_pmap_init_qpages(void);
254 * Kernel MMU interface
256 void moea64_clear_modify(mmu_t, vm_page_t);
257 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
258 void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
259 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
260 int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
261 u_int flags, int8_t psind);
262 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
264 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
265 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
266 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
267 void moea64_init(mmu_t);
268 boolean_t moea64_is_modified(mmu_t, vm_page_t);
269 boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
270 boolean_t moea64_is_referenced(mmu_t, vm_page_t);
271 int moea64_ts_referenced(mmu_t, vm_page_t);
272 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
273 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
274 void moea64_page_init(mmu_t, vm_page_t);
275 int moea64_page_wired_mappings(mmu_t, vm_page_t);
276 void moea64_pinit(mmu_t, pmap_t);
277 void moea64_pinit0(mmu_t, pmap_t);
278 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
279 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
280 void moea64_qremove(mmu_t, vm_offset_t, int);
281 void moea64_release(mmu_t, pmap_t);
282 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
283 void moea64_remove_pages(mmu_t, pmap_t);
284 void moea64_remove_all(mmu_t, vm_page_t);
285 void moea64_remove_write(mmu_t, vm_page_t);
286 void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
287 void moea64_zero_page(mmu_t, vm_page_t);
288 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
289 void moea64_activate(mmu_t, struct thread *);
290 void moea64_deactivate(mmu_t, struct thread *);
291 void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
292 void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
293 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
294 vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
295 void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
296 void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
297 void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
298 boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
299 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
300 void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
302 void moea64_scan_init(mmu_t mmu);
303 vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m);
304 void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
305 static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm,
306 volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
307 static int moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
308 int *is_user, vm_offset_t *decoded_addr);
309 static size_t moea64_scan_pmap(mmu_t mmu);
310 static void *moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs);
312 static void moea64_page_array_startup(mmu_t, long);
316 static mmu_method_t moea64_methods[] = {
317 MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
318 MMUMETHOD(mmu_copy_page, moea64_copy_page),
319 MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
320 MMUMETHOD(mmu_enter, moea64_enter),
321 MMUMETHOD(mmu_enter_object, moea64_enter_object),
322 MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
323 MMUMETHOD(mmu_extract, moea64_extract),
324 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),
325 MMUMETHOD(mmu_init, moea64_init),
326 MMUMETHOD(mmu_is_modified, moea64_is_modified),
327 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable),
328 MMUMETHOD(mmu_is_referenced, moea64_is_referenced),
329 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced),
330 MMUMETHOD(mmu_map, moea64_map),
331 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
332 MMUMETHOD(mmu_page_init, moea64_page_init),
333 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
334 MMUMETHOD(mmu_pinit, moea64_pinit),
335 MMUMETHOD(mmu_pinit0, moea64_pinit0),
336 MMUMETHOD(mmu_protect, moea64_protect),
337 MMUMETHOD(mmu_qenter, moea64_qenter),
338 MMUMETHOD(mmu_qremove, moea64_qremove),
339 MMUMETHOD(mmu_release, moea64_release),
340 MMUMETHOD(mmu_remove, moea64_remove),
341 MMUMETHOD(mmu_remove_pages, moea64_remove_pages),
342 MMUMETHOD(mmu_remove_all, moea64_remove_all),
343 MMUMETHOD(mmu_remove_write, moea64_remove_write),
344 MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
345 MMUMETHOD(mmu_unwire, moea64_unwire),
346 MMUMETHOD(mmu_zero_page, moea64_zero_page),
347 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
348 MMUMETHOD(mmu_activate, moea64_activate),
349 MMUMETHOD(mmu_deactivate, moea64_deactivate),
350 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
351 MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page),
352 MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page),
354 MMUMETHOD(mmu_page_array_startup, moea64_page_array_startup),
357 /* Internal interfaces */
358 MMUMETHOD(mmu_mapdev, moea64_mapdev),
359 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr),
360 MMUMETHOD(mmu_unmapdev, moea64_unmapdev),
361 MMUMETHOD(mmu_kextract, moea64_kextract),
362 MMUMETHOD(mmu_kenter, moea64_kenter),
363 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr),
364 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
365 MMUMETHOD(mmu_scan_init, moea64_scan_init),
366 MMUMETHOD(mmu_scan_pmap, moea64_scan_pmap),
367 MMUMETHOD(mmu_dump_pmap_init, moea64_dump_pmap_init),
368 MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map),
369 MMUMETHOD(mmu_map_user_ptr, moea64_map_user_ptr),
370 MMUMETHOD(mmu_decode_kernel_ptr, moea64_decode_kernel_ptr),
375 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
377 static struct pvo_head *
378 vm_page_to_pvoh(vm_page_t m)
381 mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
382 return (&m->md.mdpg_pvoh);
385 static struct pvo_entry *
386 alloc_pvo_entry(int bootstrap)
388 struct pvo_entry *pvo;
390 if (!moea64_initialized || bootstrap) {
391 if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) {
392 panic("%s: bpvo pool exhausted, index=%d, size=%d, bytes=%zd."
393 "Try setting machdep.moea64_bpvo_pool_size tunable",
394 __func__, moea64_bpvo_pool_index,
395 moea64_bpvo_pool_size,
396 moea64_bpvo_pool_size * sizeof(struct pvo_entry));
398 pvo = &moea64_bpvo_pool[
399 atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)];
400 bzero(pvo, sizeof(*pvo));
401 pvo->pvo_vaddr = PVO_BOOTSTRAP;
403 pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT | M_ZERO);
410 init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va)
416 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
418 pvo->pvo_pmap = pmap;
420 pvo->pvo_vaddr |= va;
421 vsid = va_to_vsid(pmap, va);
422 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
425 shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift :
427 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift);
428 pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3;
432 free_pvo_entry(struct pvo_entry *pvo)
435 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
436 uma_zfree(moea64_pvo_zone, pvo);
440 moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte)
443 lpte->pte_hi = moea64_pte_vpn_from_pvo_vpn(pvo);
444 lpte->pte_hi |= LPTE_VALID;
446 if (pvo->pvo_vaddr & PVO_LARGE)
447 lpte->pte_hi |= LPTE_BIG;
448 if (pvo->pvo_vaddr & PVO_WIRED)
449 lpte->pte_hi |= LPTE_WIRED;
450 if (pvo->pvo_vaddr & PVO_HID)
451 lpte->pte_hi |= LPTE_HID;
453 lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */
454 if (pvo->pvo_pte.prot & VM_PROT_WRITE)
455 lpte->pte_lo |= LPTE_BW;
457 lpte->pte_lo |= LPTE_BR;
459 if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
460 lpte->pte_lo |= LPTE_NOEXEC;
463 static __inline uint64_t
464 moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
469 if (ma != VM_MEMATTR_DEFAULT) {
471 case VM_MEMATTR_UNCACHEABLE:
472 return (LPTE_I | LPTE_G);
473 case VM_MEMATTR_CACHEABLE:
475 case VM_MEMATTR_WRITE_COMBINING:
476 case VM_MEMATTR_WRITE_BACK:
477 case VM_MEMATTR_PREFETCHABLE:
479 case VM_MEMATTR_WRITE_THROUGH:
480 return (LPTE_W | LPTE_M);
485 * Assume the page is cache inhibited and access is guarded unless
486 * it's in our available memory array.
488 pte_lo = LPTE_I | LPTE_G;
489 for (i = 0; i < pregions_sz; i++) {
490 if ((pa >= pregions[i].mr_start) &&
491 (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
492 pte_lo &= ~(LPTE_I | LPTE_G);
502 * Quick sort callout for comparing memory regions.
504 static int om_cmp(const void *a, const void *b);
507 om_cmp(const void *a, const void *b)
509 const struct ofw_map *mapa;
510 const struct ofw_map *mapb;
514 if (mapa->om_pa < mapb->om_pa)
516 else if (mapa->om_pa > mapb->om_pa)
523 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
525 struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
526 pcell_t acells, trans_cells[sz/sizeof(cell_t)];
527 struct pvo_entry *pvo;
533 bzero(translations, sz);
534 OF_getencprop(OF_finddevice("/"), "#address-cells", &acells,
536 if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1)
537 panic("moea64_bootstrap: can't get ofw translations");
539 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
540 sz /= sizeof(cell_t);
541 for (i = 0, j = 0; i < sz; j++) {
542 translations[j].om_va = trans_cells[i++];
543 translations[j].om_len = trans_cells[i++];
544 translations[j].om_pa = trans_cells[i++];
546 translations[j].om_pa <<= 32;
547 translations[j].om_pa |= trans_cells[i++];
549 translations[j].om_mode = trans_cells[i++];
551 KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)",
555 qsort(translations, sz, sizeof (*translations), om_cmp);
557 for (i = 0; i < sz; i++) {
558 pa_base = translations[i].om_pa;
559 #ifndef __powerpc64__
560 if ((translations[i].om_pa >> 32) != 0)
561 panic("OFW translations above 32-bit boundary!");
564 if (pa_base % PAGE_SIZE)
565 panic("OFW translation not page-aligned (phys)!");
566 if (translations[i].om_va % PAGE_SIZE)
567 panic("OFW translation not page-aligned (virt)!");
569 CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
570 pa_base, translations[i].om_va, translations[i].om_len);
572 /* Now enter the pages for this mapping */
575 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
576 /* If this address is direct-mapped, skip remapping */
578 translations[i].om_va == PHYS_TO_DMAP(pa_base) &&
579 moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT)
583 PMAP_LOCK(kernel_pmap);
584 pvo = moea64_pvo_find_va(kernel_pmap,
585 translations[i].om_va + off);
586 PMAP_UNLOCK(kernel_pmap);
590 moea64_kenter(mmup, translations[i].om_va + off,
599 moea64_probe_large_page(void)
601 uint16_t pvr = mfpvr() >> 16;
607 powerpc_sync(); isync();
608 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
609 powerpc_sync(); isync();
613 if (moea64_large_page_size == 0) {
614 moea64_large_page_size = 0x1000000; /* 16 MB */
615 moea64_large_page_shift = 24;
619 moea64_large_page_mask = moea64_large_page_size - 1;
623 moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
630 cache = PCPU_GET(aim.slb);
631 esid = va >> ADDR_SR_SHFT;
632 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
634 for (i = 0; i < 64; i++) {
635 if (cache[i].slbe == (slbe | i))
640 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
642 entry.slbv |= SLBV_L;
644 slb_insert_kernel(entry.slbe, entry.slbv);
649 moea64_kenter_large(mmu_t mmup, vm_offset_t va, vm_paddr_t pa, uint64_t attr, int bootstrap)
651 struct pvo_entry *pvo;
658 pvo = alloc_pvo_entry(bootstrap);
659 pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE;
660 init_pvo_entry(pvo, kernel_pmap, va);
662 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE |
664 pvo->pvo_pte.pa = pa | pte_lo;
665 error = moea64_pvo_enter(mmup, pvo, NULL, NULL);
667 panic("Error %d inserting large page\n", error);
672 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
673 vm_offset_t kernelend)
676 vm_paddr_t pa, pkernelstart, pkernelend;
677 vm_offset_t size, off;
681 if (moea64_large_page_size == 0)
686 PMAP_LOCK(kernel_pmap);
687 for (i = 0; i < pregions_sz; i++) {
688 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
689 pregions[i].mr_size; pa += moea64_large_page_size) {
691 if (pa & moea64_large_page_mask) {
692 pa &= moea64_large_page_mask;
695 if (pa + moea64_large_page_size >
696 pregions[i].mr_start + pregions[i].mr_size)
699 moea64_kenter_large(mmup, PHYS_TO_DMAP(pa), pa, pte_lo, 1);
702 PMAP_UNLOCK(kernel_pmap);
706 * Make sure the kernel and BPVO pool stay mapped on systems either
707 * without a direct map or on which the kernel is not already executing
708 * out of the direct-mapped region.
710 if (kernelstart < DMAP_BASE_ADDRESS) {
712 * For pre-dmap execution, we need to use identity mapping
713 * because we will be operating with the mmu on but in the
714 * wrong address configuration until we __restartkernel().
716 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
718 moea64_kenter(mmup, pa, pa);
719 } else if (!hw_direct_map) {
720 pkernelstart = kernelstart & ~DMAP_BASE_ADDRESS;
721 pkernelend = kernelend & ~DMAP_BASE_ADDRESS;
722 for (pa = pkernelstart & ~PAGE_MASK; pa < pkernelend;
724 moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa);
727 if (!hw_direct_map) {
728 size = moea64_bpvo_pool_size*sizeof(struct pvo_entry);
729 off = (vm_offset_t)(moea64_bpvo_pool);
730 for (pa = off; pa < off + size; pa += PAGE_SIZE)
731 moea64_kenter(mmup, pa, pa);
733 /* Map exception vectors */
734 for (pa = EXC_RSVD; pa < EXC_LAST; pa += PAGE_SIZE)
735 moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa);
740 * Allow user to override unmapped_buf_allowed for testing.
741 * XXXKIB Only direct map implementation was tested.
743 if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed",
744 &unmapped_buf_allowed))
745 unmapped_buf_allowed = hw_direct_map;
748 /* Quick sort callout for comparing physical addresses. */
750 pa_cmp(const void *a, const void *b)
752 const vm_paddr_t *pa = a, *pb = b;
763 moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
766 vm_size_t physsz, hwphyssz;
767 vm_paddr_t kernelphysstart, kernelphysend;
770 #ifndef __powerpc64__
771 /* We don't have a direct map since there is no BAT */
774 /* Make sure battable is zero, since we have no BAT */
775 for (i = 0; i < 16; i++) {
776 battable[i].batu = 0;
777 battable[i].batl = 0;
780 moea64_probe_large_page();
782 /* Use a direct map if we have large page support */
783 if (moea64_large_page_size > 0)
788 /* Install trap handlers for SLBs */
789 bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap);
790 bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap);
791 __syncicache((void *)EXC_DSE, 0x80);
792 __syncicache((void *)EXC_ISE, 0x80);
795 kernelphysstart = kernelstart & ~DMAP_BASE_ADDRESS;
796 kernelphysend = kernelend & ~DMAP_BASE_ADDRESS;
798 /* Get physical memory regions from firmware */
799 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
800 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
802 if (PHYS_AVAIL_ENTRIES < regions_sz)
803 panic("moea64_bootstrap: phys_avail too small");
805 phys_avail_count = 0;
808 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
809 for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
810 CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
811 regions[i].mr_start, regions[i].mr_start +
812 regions[i].mr_size, regions[i].mr_size);
814 (physsz + regions[i].mr_size) >= hwphyssz) {
815 if (physsz < hwphyssz) {
816 phys_avail[j] = regions[i].mr_start;
817 phys_avail[j + 1] = regions[i].mr_start +
821 dump_avail[j] = phys_avail[j];
822 dump_avail[j + 1] = phys_avail[j + 1];
826 phys_avail[j] = regions[i].mr_start;
827 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
829 physsz += regions[i].mr_size;
830 dump_avail[j] = phys_avail[j];
831 dump_avail[j + 1] = phys_avail[j + 1];
834 /* Check for overlap with the kernel and exception vectors */
836 for (j = 0; j < 2*phys_avail_count; j+=2) {
837 if (phys_avail[j] < EXC_LAST)
838 phys_avail[j] += EXC_LAST;
840 if (phys_avail[j] >= kernelphysstart &&
841 phys_avail[j+1] <= kernelphysend) {
842 phys_avail[j] = phys_avail[j+1] = ~0;
847 if (kernelphysstart >= phys_avail[j] &&
848 kernelphysstart < phys_avail[j+1]) {
849 if (kernelphysend < phys_avail[j+1]) {
850 phys_avail[2*phys_avail_count] =
851 (kernelphysend & ~PAGE_MASK) + PAGE_SIZE;
852 phys_avail[2*phys_avail_count + 1] =
857 phys_avail[j+1] = kernelphysstart & ~PAGE_MASK;
860 if (kernelphysend >= phys_avail[j] &&
861 kernelphysend < phys_avail[j+1]) {
862 if (kernelphysstart > phys_avail[j]) {
863 phys_avail[2*phys_avail_count] = phys_avail[j];
864 phys_avail[2*phys_avail_count + 1] =
865 kernelphysstart & ~PAGE_MASK;
869 phys_avail[j] = (kernelphysend & ~PAGE_MASK) +
874 /* Remove physical available regions marked for removal (~0) */
876 qsort(phys_avail, 2*phys_avail_count, sizeof(phys_avail[0]),
878 phys_avail_count -= rm_pavail;
879 for (i = 2*phys_avail_count;
880 i < 2*(phys_avail_count + rm_pavail); i+=2)
881 phys_avail[i] = phys_avail[i+1] = 0;
884 physmem = btoc(physsz);
887 moea64_pteg_count = PTEGCOUNT;
889 moea64_pteg_count = 0x1000;
891 while (moea64_pteg_count < physmem)
892 moea64_pteg_count <<= 1;
894 moea64_pteg_count >>= 1;
895 #endif /* PTEGCOUNT */
899 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
906 moea64_pteg_mask = moea64_pteg_count - 1;
909 * Initialize SLB table lock and page locks
911 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
912 for (i = 0; i < PV_LOCK_COUNT; i++)
913 mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
916 * Initialise the bootstrap pvo pool.
918 TUNABLE_INT_FETCH("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size);
919 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
920 moea64_bpvo_pool_size*sizeof(struct pvo_entry), PAGE_SIZE);
921 moea64_bpvo_pool_index = 0;
923 /* Place at address usable through the direct map */
925 moea64_bpvo_pool = (struct pvo_entry *)
926 PHYS_TO_DMAP((uintptr_t)moea64_bpvo_pool);
929 * Make sure kernel vsid is allocated as well as VSID 0.
931 #ifndef __powerpc64__
932 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
933 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
934 moea64_vsid_bitmap[0] |= 1;
938 * Initialize the kernel pmap (which is statically allocated).
941 for (i = 0; i < 64; i++) {
942 pcpup->pc_aim.slb[i].slbv = 0;
943 pcpup->pc_aim.slb[i].slbe = 0;
946 for (i = 0; i < 16; i++)
947 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
950 kernel_pmap->pmap_phys = kernel_pmap;
951 CPU_FILL(&kernel_pmap->pm_active);
952 RB_INIT(&kernel_pmap->pmap_pvo);
954 PMAP_LOCK_INIT(kernel_pmap);
957 * Now map in all the other buffers we allocated earlier
960 moea64_setup_direct_map(mmup, kernelstart, kernelend);
964 moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
975 * Set up the Open Firmware pmap and add its mappings if not in real
979 chosen = OF_finddevice("/chosen");
980 if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) {
981 mmu = OF_instance_to_package(mmui);
983 (sz = OF_getproplen(mmu, "translations")) == -1)
985 if (sz > 6144 /* tmpstksz - 2 KB headroom */)
986 panic("moea64_bootstrap: too many ofw translations");
989 moea64_add_ofw_mappings(mmup, mmu, sz);
993 * Calculate the last available physical address.
996 for (i = 0; phys_avail[i + 2] != 0; i += 2)
997 Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1]));
1002 MMU_CPU_BOOTSTRAP(mmup,0);
1003 mtmsr(mfmsr() | PSL_DR | PSL_IR);
1004 pmap_bootstrapped++;
1007 * Set the start and end of kva.
1009 virtual_avail = VM_MIN_KERNEL_ADDRESS;
1010 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
1013 * Map the entire KVA range into the SLB. We must not fault there.
1015 #ifdef __powerpc64__
1016 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
1017 moea64_bootstrap_slb_prefault(va, 0);
1021 * Remap any early IO mappings (console framebuffer, etc.)
1023 bs_remap_earlyboot();
1026 * Figure out how far we can extend virtual_end into segment 16
1027 * without running into existing mappings. Segment 16 is guaranteed
1028 * to contain neither RAM nor devices (at least on Apple hardware),
1029 * but will generally contain some OFW mappings we should not
1033 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */
1034 PMAP_LOCK(kernel_pmap);
1035 while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
1036 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
1037 virtual_end += PAGE_SIZE;
1038 PMAP_UNLOCK(kernel_pmap);
1042 * Allocate a kernel stack with a guard page for thread0 and map it
1043 * into the kernel page map.
1045 pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
1046 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1047 virtual_avail = va + kstack_pages * PAGE_SIZE;
1048 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
1049 thread0.td_kstack = va;
1050 thread0.td_kstack_pages = kstack_pages;
1051 for (i = 0; i < kstack_pages; i++) {
1052 moea64_kenter(mmup, va, pa);
1058 * Allocate virtual address space for the message buffer.
1060 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
1061 msgbufp = (struct msgbuf *)virtual_avail;
1063 virtual_avail += round_page(msgbufsize);
1064 while (va < virtual_avail) {
1065 moea64_kenter(mmup, va, pa);
1071 * Allocate virtual address space for the dynamic percpu area.
1073 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
1074 dpcpu = (void *)virtual_avail;
1076 virtual_avail += DPCPU_SIZE;
1077 while (va < virtual_avail) {
1078 moea64_kenter(mmup, va, pa);
1082 dpcpu_init(dpcpu, curcpu);
1084 crashdumpmap = (caddr_t)virtual_avail;
1085 virtual_avail += MAXDUMPPGS * PAGE_SIZE;
1088 * Allocate some things for page zeroing. We put this directly
1089 * in the page table and use MOEA64_PTE_REPLACE to avoid any
1090 * of the PVO book-keeping or other parts of the VM system
1091 * from even knowing that this hack exists.
1094 if (!hw_direct_map) {
1095 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
1097 for (i = 0; i < 2; i++) {
1098 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
1099 virtual_end -= PAGE_SIZE;
1101 moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
1103 PMAP_LOCK(kernel_pmap);
1104 moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
1105 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
1106 PMAP_UNLOCK(kernel_pmap);
1110 numa_mem_regions(&numa_pregions, &numapregions_sz);
1114 moea64_pmap_init_qpages(void)
1124 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
1125 if (pc->pc_qmap_addr == 0)
1126 panic("pmap_init_qpages: unable to allocate KVA");
1127 PMAP_LOCK(kernel_pmap);
1128 pc->pc_aim.qmap_pvo =
1129 moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
1130 PMAP_UNLOCK(kernel_pmap);
1131 mtx_init(&pc->pc_aim.qmap_lock, "qmap lock", NULL, MTX_DEF);
1135 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL);
1138 * Activate a user pmap. This mostly involves setting some non-CPU
1142 moea64_activate(mmu_t mmu, struct thread *td)
1146 pm = &td->td_proc->p_vmspace->vm_pmap;
1147 CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1149 #ifdef __powerpc64__
1150 PCPU_SET(aim.userslb, pm->pm_slb);
1151 __asm __volatile("slbmte %0, %1; isync" ::
1152 "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
1154 PCPU_SET(curpmap, pm->pmap_phys);
1155 mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid);
1160 moea64_deactivate(mmu_t mmu, struct thread *td)
1164 __asm __volatile("isync; slbie %0" :: "r"(USER_ADDR));
1166 pm = &td->td_proc->p_vmspace->vm_pmap;
1167 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1168 #ifdef __powerpc64__
1169 PCPU_SET(aim.userslb, NULL);
1171 PCPU_SET(curpmap, NULL);
1176 moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1178 struct pvo_entry key, *pvo;
1182 key.pvo_vaddr = sva;
1184 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1185 pvo != NULL && PVO_VADDR(pvo) < eva;
1186 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1187 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1188 panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1190 pvo->pvo_vaddr &= ~PVO_WIRED;
1191 refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */);
1192 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1193 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1196 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1198 refchg |= atomic_readandclear_32(&m->md.mdpg_attrs);
1199 if (refchg & LPTE_CHG)
1201 if (refchg & LPTE_REF)
1202 vm_page_aflag_set(m, PGA_REFERENCED);
1204 pm->pm_stats.wired_count--;
1210 * This goes through and sets the physical address of our
1211 * special scratch PTE to the PA we want to zero or copy. Because
1212 * of locking issues (this can get called in pvo_enter() by
1213 * the UMA allocator), we can't use most other utility functions here
1217 void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa)
1219 struct pvo_entry *pvo;
1221 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1222 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1224 pvo = moea64_scratchpage_pvo[which];
1225 PMAP_LOCK(pvo->pvo_pmap);
1227 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1228 MOEA64_PTE_REPLACE(mmup, pvo, MOEA64_PTE_INVALIDATE);
1229 PMAP_UNLOCK(pvo->pvo_pmap);
1234 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1239 dst = VM_PAGE_TO_PHYS(mdst);
1240 src = VM_PAGE_TO_PHYS(msrc);
1242 if (hw_direct_map) {
1243 bcopy((void *)PHYS_TO_DMAP(src), (void *)PHYS_TO_DMAP(dst),
1246 mtx_lock(&moea64_scratchpage_mtx);
1248 moea64_set_scratchpage_pa(mmu, 0, src);
1249 moea64_set_scratchpage_pa(mmu, 1, dst);
1251 bcopy((void *)moea64_scratchpage_va[0],
1252 (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1254 mtx_unlock(&moea64_scratchpage_mtx);
1259 moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1260 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1263 vm_offset_t a_pg_offset, b_pg_offset;
1266 while (xfersize > 0) {
1267 a_pg_offset = a_offset & PAGE_MASK;
1268 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1269 a_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
1270 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) +
1272 b_pg_offset = b_offset & PAGE_MASK;
1273 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1274 b_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
1275 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) +
1277 bcopy(a_cp, b_cp, cnt);
1285 moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1286 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1289 vm_offset_t a_pg_offset, b_pg_offset;
1292 mtx_lock(&moea64_scratchpage_mtx);
1293 while (xfersize > 0) {
1294 a_pg_offset = a_offset & PAGE_MASK;
1295 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1296 moea64_set_scratchpage_pa(mmu, 0,
1297 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
1298 a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
1299 b_pg_offset = b_offset & PAGE_MASK;
1300 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1301 moea64_set_scratchpage_pa(mmu, 1,
1302 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
1303 b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
1304 bcopy(a_cp, b_cp, cnt);
1309 mtx_unlock(&moea64_scratchpage_mtx);
1313 moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1314 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1317 if (hw_direct_map) {
1318 moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
1321 moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
1327 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1329 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1331 if (size + off > PAGE_SIZE)
1332 panic("moea64_zero_page: size + off > PAGE_SIZE");
1334 if (hw_direct_map) {
1335 bzero((caddr_t)(uintptr_t)PHYS_TO_DMAP(pa) + off, size);
1337 mtx_lock(&moea64_scratchpage_mtx);
1338 moea64_set_scratchpage_pa(mmu, 0, pa);
1339 bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1340 mtx_unlock(&moea64_scratchpage_mtx);
1345 * Zero a page of physical memory by temporarily mapping it
1348 moea64_zero_page(mmu_t mmu, vm_page_t m)
1350 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1351 vm_offset_t va, off;
1353 if (!hw_direct_map) {
1354 mtx_lock(&moea64_scratchpage_mtx);
1356 moea64_set_scratchpage_pa(mmu, 0, pa);
1357 va = moea64_scratchpage_va[0];
1359 va = PHYS_TO_DMAP(pa);
1362 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1363 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
1366 mtx_unlock(&moea64_scratchpage_mtx);
1370 moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
1372 struct pvo_entry *pvo;
1373 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1376 return (PHYS_TO_DMAP(pa));
1379 * MOEA64_PTE_REPLACE does some locking, so we can't just grab
1380 * a critical section and access the PCPU data like on i386.
1381 * Instead, pin the thread and grab the PCPU lock to prevent
1382 * a preempting thread from using the same PCPU data.
1386 mtx_assert(PCPU_PTR(aim.qmap_lock), MA_NOTOWNED);
1387 pvo = PCPU_GET(aim.qmap_pvo);
1389 mtx_lock(PCPU_PTR(aim.qmap_lock));
1390 pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
1392 MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE);
1395 return (PCPU_GET(qmap_addr));
1399 moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
1404 mtx_assert(PCPU_PTR(aim.qmap_lock), MA_OWNED);
1405 KASSERT(PCPU_GET(qmap_addr) == addr,
1406 ("moea64_quick_remove_page: invalid address"));
1407 mtx_unlock(PCPU_PTR(aim.qmap_lock));
1412 * Map the given physical page at the specified virtual address in the
1413 * target pmap with the protection requested. If specified the page
1414 * will be wired down.
1418 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1419 vm_prot_t prot, u_int flags, int8_t psind)
1421 struct pvo_entry *pvo, *oldpvo;
1422 struct pvo_head *pvo_head;
1426 if ((m->oflags & VPO_UNMANAGED) == 0) {
1427 if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0)
1428 VM_PAGE_OBJECT_BUSY_ASSERT(m);
1430 VM_OBJECT_ASSERT_LOCKED(m->object);
1433 pvo = alloc_pvo_entry(0);
1435 return (KERN_RESOURCE_SHORTAGE);
1436 pvo->pvo_pmap = NULL; /* to be filled in later */
1437 pvo->pvo_pte.prot = prot;
1439 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1440 pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo;
1442 if ((flags & PMAP_ENTER_WIRED) != 0)
1443 pvo->pvo_vaddr |= PVO_WIRED;
1445 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
1448 pvo_head = &m->md.mdpg_pvoh;
1449 pvo->pvo_vaddr |= PVO_MANAGED;
1454 if (pvo->pvo_pmap == NULL)
1455 init_pvo_entry(pvo, pmap, va);
1456 if (prot & VM_PROT_WRITE)
1457 if (pmap_bootstrapped &&
1458 (m->oflags & VPO_UNMANAGED) == 0)
1459 vm_page_aflag_set(m, PGA_WRITEABLE);
1461 error = moea64_pvo_enter(mmu, pvo, pvo_head, &oldpvo);
1462 if (error == EEXIST) {
1463 if (oldpvo->pvo_vaddr == pvo->pvo_vaddr &&
1464 oldpvo->pvo_pte.pa == pvo->pvo_pte.pa &&
1465 oldpvo->pvo_pte.prot == prot) {
1466 /* Identical mapping already exists */
1469 /* If not in page table, reinsert it */
1470 if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) {
1471 STAT_MOEA64(moea64_pte_overflow--);
1472 MOEA64_PTE_INSERT(mmu, oldpvo);
1475 /* Then just clean up and go home */
1478 free_pvo_entry(pvo);
1481 /* Otherwise, need to kill it first */
1482 KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old "
1483 "mapping does not match new mapping"));
1484 moea64_pvo_remove_from_pmap(mmu, oldpvo);
1485 moea64_pvo_enter(mmu, pvo, pvo_head, NULL);
1491 /* Free any dead pages */
1492 if (error == EEXIST) {
1493 moea64_pvo_remove_from_page(mmu, oldpvo);
1494 free_pvo_entry(oldpvo);
1499 * Flush the page from the instruction cache if this page is
1500 * mapped executable and cacheable.
1502 if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 &&
1503 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1504 vm_page_aflag_set(m, PGA_EXECUTABLE);
1505 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1507 return (KERN_SUCCESS);
1511 moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1516 * This is much trickier than on older systems because
1517 * we can't sync the icache on physical addresses directly
1518 * without a direct map. Instead we check a couple of cases
1519 * where the memory is already mapped in and, failing that,
1520 * use the same trick we use for page zeroing to create
1521 * a temporary mapping for this physical address.
1524 if (!pmap_bootstrapped) {
1526 * If PMAP is not bootstrapped, we are likely to be
1529 __syncicache((void *)(uintptr_t)pa, sz);
1530 } else if (pmap == kernel_pmap) {
1531 __syncicache((void *)va, sz);
1532 } else if (hw_direct_map) {
1533 __syncicache((void *)(uintptr_t)PHYS_TO_DMAP(pa), sz);
1535 /* Use the scratch page to set up a temp mapping */
1537 mtx_lock(&moea64_scratchpage_mtx);
1539 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1540 __syncicache((void *)(moea64_scratchpage_va[1] +
1541 (va & ADDR_POFF)), sz);
1543 mtx_unlock(&moea64_scratchpage_mtx);
1548 * Maps a sequence of resident pages belonging to the same object.
1549 * The sequence begins with the given page m_start. This page is
1550 * mapped at the given virtual address start. Each subsequent page is
1551 * mapped at a virtual address that is offset from start by the same
1552 * amount as the page is offset from m_start within the object. The
1553 * last page in the sequence is the page with the largest offset from
1554 * m_start that can be mapped at a virtual address less than the given
1555 * virtual address end. Not every virtual page between start and end
1556 * is mapped; only those for which a resident page exists with the
1557 * corresponding offset from m_start are mapped.
1560 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1561 vm_page_t m_start, vm_prot_t prot)
1564 vm_pindex_t diff, psize;
1566 VM_OBJECT_ASSERT_LOCKED(m_start->object);
1568 psize = atop(end - start);
1570 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1571 moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
1572 (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP |
1573 PMAP_ENTER_QUICK_LOCKED, 0);
1574 m = TAILQ_NEXT(m, listq);
1579 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1583 moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1584 PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
1588 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1590 struct pvo_entry *pvo;
1594 pvo = moea64_pvo_find_va(pm, va);
1598 pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1605 * Atomically extract and hold the physical page with the given
1606 * pmap and virtual address pair if that mapping permits the given
1610 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1612 struct pvo_entry *pvo;
1617 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1618 if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) {
1619 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1620 if (!vm_page_wire_mapped(m))
1627 static mmu_t installed_mmu;
1630 moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
1631 uint8_t *flags, int wait)
1633 struct pvo_entry *pvo;
1639 * This entire routine is a horrible hack to avoid bothering kmem
1640 * for new KVA addresses. Because this can get called from inside
1641 * kmem allocation routines, calling kmem for a new address here
1642 * can lead to multiply locking non-recursive mutexes.
1645 *flags = UMA_SLAB_PRIV;
1646 needed_lock = !PMAP_LOCKED(kernel_pmap);
1648 m = vm_page_alloc_domain(NULL, 0, domain,
1649 malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1653 va = VM_PAGE_TO_PHYS(m);
1655 pvo = alloc_pvo_entry(1 /* bootstrap */);
1657 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE;
1658 pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M;
1661 PMAP_LOCK(kernel_pmap);
1663 init_pvo_entry(pvo, kernel_pmap, va);
1664 pvo->pvo_vaddr |= PVO_WIRED;
1666 moea64_pvo_enter(installed_mmu, pvo, NULL, NULL);
1669 PMAP_UNLOCK(kernel_pmap);
1671 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1672 bzero((void *)va, PAGE_SIZE);
1677 extern int elf32_nxstack;
1680 moea64_init(mmu_t mmu)
1683 CTR0(KTR_PMAP, "moea64_init");
1685 moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1686 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1687 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1689 if (!hw_direct_map) {
1690 installed_mmu = mmu;
1691 uma_zone_set_allocf(moea64_pvo_zone, moea64_uma_page_alloc);
1694 #ifdef COMPAT_FREEBSD32
1698 moea64_initialized = TRUE;
1702 moea64_is_referenced(mmu_t mmu, vm_page_t m)
1705 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1706 ("moea64_is_referenced: page %p is not managed", m));
1708 return (moea64_query_bit(mmu, m, LPTE_REF));
1712 moea64_is_modified(mmu_t mmu, vm_page_t m)
1715 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1716 ("moea64_is_modified: page %p is not managed", m));
1719 * If the page is not busied then this check is racy.
1721 if (!pmap_page_is_write_mapped(m))
1724 return (moea64_query_bit(mmu, m, LPTE_CHG));
1728 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1730 struct pvo_entry *pvo;
1731 boolean_t rv = TRUE;
1734 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1742 moea64_clear_modify(mmu_t mmu, vm_page_t m)
1745 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1746 ("moea64_clear_modify: page %p is not managed", m));
1747 vm_page_assert_busied(m);
1749 if (!pmap_page_is_write_mapped(m))
1751 moea64_clear_bit(mmu, m, LPTE_CHG);
1755 * Clear the write and modified bits in each of the given page's mappings.
1758 moea64_remove_write(mmu_t mmu, vm_page_t m)
1760 struct pvo_entry *pvo;
1761 int64_t refchg, ret;
1764 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1765 ("moea64_remove_write: page %p is not managed", m));
1766 vm_page_assert_busied(m);
1768 if (!pmap_page_is_write_mapped(m))
1774 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1775 pmap = pvo->pvo_pmap;
1777 if (!(pvo->pvo_vaddr & PVO_DEAD) &&
1778 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1779 pvo->pvo_pte.prot &= ~VM_PROT_WRITE;
1780 ret = MOEA64_PTE_REPLACE(mmu, pvo,
1781 MOEA64_PTE_PROT_UPDATE);
1785 if (pvo->pvo_pmap == kernel_pmap)
1790 if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG)
1792 vm_page_aflag_clear(m, PGA_WRITEABLE);
1797 * moea64_ts_referenced:
1799 * Return a count of reference bits for a page, clearing those bits.
1800 * It is not necessary for every reference bit to be cleared, but it
1801 * is necessary that 0 only be returned when there are truly no
1802 * reference bits set.
1804 * XXX: The exact number of bits to check and clear is a matter that
1805 * should be tested and standardized at some point in the future for
1806 * optimal aging of shared pages.
1809 moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1812 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1813 ("moea64_ts_referenced: page %p is not managed", m));
1814 return (moea64_clear_bit(mmu, m, LPTE_REF));
1818 * Modify the WIMG settings of all mappings for a page.
1821 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1823 struct pvo_entry *pvo;
1828 if ((m->oflags & VPO_UNMANAGED) != 0) {
1829 m->md.mdpg_cache_attrs = ma;
1833 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1836 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1837 pmap = pvo->pvo_pmap;
1839 if (!(pvo->pvo_vaddr & PVO_DEAD)) {
1840 pvo->pvo_pte.pa &= ~LPTE_WIMG;
1841 pvo->pvo_pte.pa |= lo;
1842 refchg = MOEA64_PTE_REPLACE(mmu, pvo,
1843 MOEA64_PTE_INVALIDATE);
1845 refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ?
1847 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1848 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1850 atomic_readandclear_32(&m->md.mdpg_attrs);
1851 if (refchg & LPTE_CHG)
1853 if (refchg & LPTE_REF)
1854 vm_page_aflag_set(m, PGA_REFERENCED);
1856 if (pvo->pvo_pmap == kernel_pmap)
1861 m->md.mdpg_cache_attrs = ma;
1866 * Map a wired page into kernel virtual address space.
1869 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1872 struct pvo_entry *pvo, *oldpvo;
1875 pvo = alloc_pvo_entry(0);
1878 } while (pvo == NULL);
1879 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1880 pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
1881 pvo->pvo_vaddr |= PVO_WIRED;
1883 PMAP_LOCK(kernel_pmap);
1884 oldpvo = moea64_pvo_find_va(kernel_pmap, va);
1886 moea64_pvo_remove_from_pmap(mmu, oldpvo);
1887 init_pvo_entry(pvo, kernel_pmap, va);
1888 error = moea64_pvo_enter(mmu, pvo, NULL, NULL);
1889 PMAP_UNLOCK(kernel_pmap);
1891 /* Free any dead pages */
1892 if (oldpvo != NULL) {
1893 moea64_pvo_remove_from_page(mmu, oldpvo);
1894 free_pvo_entry(oldpvo);
1898 panic("moea64_kenter: failed to enter va %#zx pa %#jx: %d", va,
1899 (uintmax_t)pa, error);
1903 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1906 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1910 * Extract the physical page address associated with the given kernel virtual
1914 moea64_kextract(mmu_t mmu, vm_offset_t va)
1916 struct pvo_entry *pvo;
1920 * Shortcut the direct-mapped case when applicable. We never put
1921 * anything but 1:1 (or 62-bit aliased) mappings below
1922 * VM_MIN_KERNEL_ADDRESS.
1924 if (va < VM_MIN_KERNEL_ADDRESS)
1925 return (va & ~DMAP_BASE_ADDRESS);
1927 PMAP_LOCK(kernel_pmap);
1928 pvo = moea64_pvo_find_va(kernel_pmap, va);
1929 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1931 pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1932 PMAP_UNLOCK(kernel_pmap);
1937 * Remove a wired page from kernel virtual address space.
1940 moea64_kremove(mmu_t mmu, vm_offset_t va)
1942 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1946 * Provide a kernel pointer corresponding to a given userland pointer.
1947 * The returned pointer is valid until the next time this function is
1948 * called in this thread. This is used internally in copyin/copyout.
1951 moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
1952 void **kaddr, size_t ulen, size_t *klen)
1955 #ifdef __powerpc64__
1960 *kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK);
1961 l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr);
1969 #ifdef __powerpc64__
1970 /* Try lockless look-up first */
1971 slb = user_va_to_slb_entry(pm, (vm_offset_t)uaddr);
1974 /* If it isn't there, we need to pre-fault the VSID */
1976 slbv = va_to_vsid(pm, (vm_offset_t)uaddr) << SLBV_VSID_SHIFT;
1982 /* Mark segment no-execute */
1985 slbv = va_to_vsid(pm, (vm_offset_t)uaddr);
1987 /* Mark segment no-execute */
1991 /* If we have already set this VSID, we can just return */
1992 if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv)
1995 __asm __volatile("isync");
1996 curthread->td_pcb->pcb_cpu.aim.usr_segm =
1997 (uintptr_t)uaddr >> ADDR_SR_SHFT;
1998 curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv;
1999 #ifdef __powerpc64__
2000 __asm __volatile ("slbie %0; slbmte %1, %2; isync" ::
2001 "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE));
2003 __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(slbv));
2010 * Figure out where a given kernel pointer (usually in a fault) points
2011 * to from the VM's perspective, potentially remapping into userland's
2015 moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
2016 vm_offset_t *decoded_addr)
2018 vm_offset_t user_sr;
2020 if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
2021 user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm;
2022 addr &= ADDR_PIDX | ADDR_POFF;
2023 addr |= user_sr << ADDR_SR_SHFT;
2024 *decoded_addr = addr;
2027 *decoded_addr = addr;
2035 * Map a range of physical addresses into kernel virtual address space.
2037 * The value passed in *virt is a suggested virtual address for the mapping.
2038 * Architectures which can support a direct-mapped physical to virtual region
2039 * can return the appropriate address within that region, leaving '*virt'
2040 * unchanged. Other architectures should map the pages starting at '*virt' and
2041 * update '*virt' with the first usable address after the mapped region.
2044 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
2045 vm_paddr_t pa_end, int prot)
2047 vm_offset_t sva, va;
2049 if (hw_direct_map) {
2051 * Check if every page in the region is covered by the direct
2052 * map. The direct map covers all of physical memory. Use
2053 * moea64_calc_wimg() as a shortcut to see if the page is in
2054 * physical memory as a way to see if the direct map covers it.
2056 for (va = pa_start; va < pa_end; va += PAGE_SIZE)
2057 if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
2060 return (PHYS_TO_DMAP(pa_start));
2064 /* XXX respect prot argument */
2065 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
2066 moea64_kenter(mmu, va, pa_start);
2073 * Returns true if the pmap's pv is one of the first
2074 * 16 pvs linked to from this page. This count may
2075 * be changed upwards or downwards in the future; it
2076 * is only necessary that true be returned for a small
2077 * subset of pmaps for proper page aging.
2080 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2083 struct pvo_entry *pvo;
2086 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2087 ("moea64_page_exists_quick: page %p is not managed", m));
2091 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2092 if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
2104 moea64_page_init(mmu_t mmu __unused, vm_page_t m)
2107 m->md.mdpg_attrs = 0;
2108 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
2109 LIST_INIT(&m->md.mdpg_pvoh);
2113 * Return the number of managed mappings to the given physical page
2117 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
2119 struct pvo_entry *pvo;
2123 if ((m->oflags & VPO_UNMANAGED) != 0)
2126 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
2127 if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
2133 static uintptr_t moea64_vsidcontext;
2136 moea64_get_unique_vsid(void) {
2143 __asm __volatile("mftb %0" : "=r"(entropy));
2145 mtx_lock(&moea64_slb_mutex);
2146 for (i = 0; i < NVSIDS; i += VSID_NBPW) {
2150 * Create a new value by mutiplying by a prime and adding in
2151 * entropy from the timebase register. This is to make the
2152 * VSID more random so that the PT hash function collides
2153 * less often. (Note that the prime casues gcc to do shifts
2154 * instead of a multiply.)
2156 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
2157 hash = moea64_vsidcontext & (NVSIDS - 1);
2158 if (hash == 0) /* 0 is special, avoid it */
2161 mask = 1 << (hash & (VSID_NBPW - 1));
2162 hash = (moea64_vsidcontext & VSID_HASHMASK);
2163 if (moea64_vsid_bitmap[n] & mask) { /* collision? */
2164 /* anything free in this bucket? */
2165 if (moea64_vsid_bitmap[n] == 0xffffffff) {
2166 entropy = (moea64_vsidcontext >> 20);
2169 i = ffs(~moea64_vsid_bitmap[n]) - 1;
2171 hash &= rounddown2(VSID_HASHMASK, VSID_NBPW);
2174 if (hash == VSID_VRMA) /* also special, avoid this too */
2176 KASSERT(!(moea64_vsid_bitmap[n] & mask),
2177 ("Allocating in-use VSID %#zx\n", hash));
2178 moea64_vsid_bitmap[n] |= mask;
2179 mtx_unlock(&moea64_slb_mutex);
2183 mtx_unlock(&moea64_slb_mutex);
2184 panic("%s: out of segments",__func__);
2187 #ifdef __powerpc64__
2189 moea64_pinit(mmu_t mmu, pmap_t pmap)
2192 RB_INIT(&pmap->pmap_pvo);
2194 pmap->pm_slb_tree_root = slb_alloc_tree();
2195 pmap->pm_slb = slb_alloc_user_cache();
2196 pmap->pm_slb_len = 0;
2200 moea64_pinit(mmu_t mmu, pmap_t pmap)
2205 RB_INIT(&pmap->pmap_pvo);
2207 if (pmap_bootstrapped)
2208 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
2211 pmap->pmap_phys = pmap;
2214 * Allocate some segment registers for this pmap.
2216 hash = moea64_get_unique_vsid();
2218 for (i = 0; i < 16; i++)
2219 pmap->pm_sr[i] = VSID_MAKE(i, hash);
2221 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
2226 * Initialize the pmap associated with process 0.
2229 moea64_pinit0(mmu_t mmu, pmap_t pm)
2233 moea64_pinit(mmu, pm);
2234 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
2238 * Set the physical protection on the specified range of this map as requested.
2241 moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
2247 PMAP_LOCK_ASSERT(pm, MA_OWNED);
2250 * Change the protection of the page.
2252 oldprot = pvo->pvo_pte.prot;
2253 pvo->pvo_pte.prot = prot;
2254 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2257 * If the PVO is in the page table, update mapping
2259 refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE);
2261 refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
2263 if (pm != kernel_pmap && pg != NULL &&
2264 (pg->a.flags & PGA_EXECUTABLE) == 0 &&
2265 (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
2266 if ((pg->oflags & VPO_UNMANAGED) == 0)
2267 vm_page_aflag_set(pg, PGA_EXECUTABLE);
2268 moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
2269 pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE);
2273 * Update vm about the REF/CHG bits if the page is managed and we have
2274 * removed write access.
2276 if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) &&
2277 (oldprot & VM_PROT_WRITE)) {
2278 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2279 if (refchg & LPTE_CHG)
2281 if (refchg & LPTE_REF)
2282 vm_page_aflag_set(pg, PGA_REFERENCED);
2287 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
2290 struct pvo_entry *pvo, *tpvo, key;
2292 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
2295 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
2296 ("moea64_protect: non current pmap"));
2298 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2299 moea64_remove(mmu, pm, sva, eva);
2304 key.pvo_vaddr = sva;
2305 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2306 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2307 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2308 moea64_pvo_protect(mmu, pm, pvo, prot);
2314 * Map a list of wired pages into kernel virtual address space. This is
2315 * intended for temporary mappings which do not need page modification or
2316 * references recorded. Existing mappings in the region are overwritten.
2319 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
2321 while (count-- > 0) {
2322 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2329 * Remove page mappings from kernel virtual address space. Intended for
2330 * temporary mappings entered by moea64_qenter.
2333 moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
2335 while (count-- > 0) {
2336 moea64_kremove(mmu, va);
2342 moea64_release_vsid(uint64_t vsid)
2346 mtx_lock(&moea64_slb_mutex);
2347 idx = vsid & (NVSIDS-1);
2348 mask = 1 << (idx % VSID_NBPW);
2350 KASSERT(moea64_vsid_bitmap[idx] & mask,
2351 ("Freeing unallocated VSID %#jx", vsid));
2352 moea64_vsid_bitmap[idx] &= ~mask;
2353 mtx_unlock(&moea64_slb_mutex);
2358 moea64_release(mmu_t mmu, pmap_t pmap)
2362 * Free segment registers' VSIDs
2364 #ifdef __powerpc64__
2365 slb_free_tree(pmap);
2366 slb_free_user_cache(pmap->pm_slb);
2368 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2370 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2375 * Remove all pages mapped by the specified pmap
2378 moea64_remove_pages(mmu_t mmu, pmap_t pm)
2380 struct pvo_entry *pvo, *tpvo;
2381 struct pvo_dlist tofree;
2383 SLIST_INIT(&tofree);
2386 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2387 if (pvo->pvo_vaddr & PVO_WIRED)
2391 * For locking reasons, remove this from the page table and
2392 * pmap, but save delinking from the vm_page for a second
2395 moea64_pvo_remove_from_pmap(mmu, pvo);
2396 SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink);
2400 while (!SLIST_EMPTY(&tofree)) {
2401 pvo = SLIST_FIRST(&tofree);
2402 SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
2403 moea64_pvo_remove_from_page(mmu, pvo);
2404 free_pvo_entry(pvo);
2409 * Remove the given range of addresses from the specified map.
2412 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2414 struct pvo_entry *pvo, *tpvo, key;
2415 struct pvo_dlist tofree;
2418 * Perform an unsynchronized read. This is, however, safe.
2420 if (pm->pm_stats.resident_count == 0)
2423 key.pvo_vaddr = sva;
2425 SLIST_INIT(&tofree);
2428 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2429 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2430 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2433 * For locking reasons, remove this from the page table and
2434 * pmap, but save delinking from the vm_page for a second
2437 moea64_pvo_remove_from_pmap(mmu, pvo);
2438 SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink);
2442 while (!SLIST_EMPTY(&tofree)) {
2443 pvo = SLIST_FIRST(&tofree);
2444 SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
2445 moea64_pvo_remove_from_page(mmu, pvo);
2446 free_pvo_entry(pvo);
2451 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2452 * will reflect changes in pte's back to the vm_page.
2455 moea64_remove_all(mmu_t mmu, vm_page_t m)
2457 struct pvo_entry *pvo, *next_pvo;
2458 struct pvo_head freequeue;
2462 LIST_INIT(&freequeue);
2465 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2466 pmap = pvo->pvo_pmap;
2468 wasdead = (pvo->pvo_vaddr & PVO_DEAD);
2470 moea64_pvo_remove_from_pmap(mmu, pvo);
2471 moea64_pvo_remove_from_page_locked(mmu, pvo, m);
2473 LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
2477 KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
2478 KASSERT((m->a.flags & PGA_WRITEABLE) == 0, ("Page still writable"));
2481 /* Clean up UMA allocations */
2482 LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo)
2483 free_pvo_entry(pvo);
2487 * Allocate a physical page of memory directly from the phys_avail map.
2488 * Can only be called from moea64_bootstrap before avail start and end are
2492 moea64_bootstrap_alloc(vm_size_t size, vm_size_t align)
2497 size = round_page(size);
2498 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2500 s = roundup2(phys_avail[i], align);
2505 if (s < phys_avail[i] || e > phys_avail[i + 1])
2508 if (s + size > platform_real_maxaddr())
2511 if (s == phys_avail[i]) {
2512 phys_avail[i] += size;
2513 } else if (e == phys_avail[i + 1]) {
2514 phys_avail[i + 1] -= size;
2516 for (j = phys_avail_count * 2; j > i; j -= 2) {
2517 phys_avail[j] = phys_avail[j - 2];
2518 phys_avail[j + 1] = phys_avail[j - 1];
2521 phys_avail[i + 3] = phys_avail[i + 1];
2522 phys_avail[i + 1] = s;
2523 phys_avail[i + 2] = e;
2529 panic("moea64_bootstrap_alloc: could not allocate memory");
2533 moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head,
2534 struct pvo_entry **oldpvop)
2536 struct pvo_entry *old_pvo;
2539 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2541 STAT_MOEA64(moea64_pvo_enter_calls++);
2546 old_pvo = RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2548 if (old_pvo != NULL) {
2549 if (oldpvop != NULL)
2554 if (pvo_head != NULL) {
2555 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2558 if (pvo->pvo_vaddr & PVO_WIRED)
2559 pvo->pvo_pmap->pm_stats.wired_count++;
2560 pvo->pvo_pmap->pm_stats.resident_count++;
2563 * Insert it into the hardware page table
2565 err = MOEA64_PTE_INSERT(mmu, pvo);
2567 panic("moea64_pvo_enter: overflow");
2570 STAT_MOEA64(moea64_pvo_entries++);
2572 if (pvo->pvo_pmap == kernel_pmap)
2575 #ifdef __powerpc64__
2577 * Make sure all our bootstrap mappings are in the SLB as soon
2578 * as virtual memory is switched on.
2580 if (!pmap_bootstrapped)
2581 moea64_bootstrap_slb_prefault(PVO_VADDR(pvo),
2582 pvo->pvo_vaddr & PVO_LARGE);
2589 moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
2594 KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap"));
2595 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2596 KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO"));
2599 * If there is an active pte entry, we need to deactivate it
2601 refchg = MOEA64_PTE_UNSET(mmu, pvo);
2604 * If it was evicted from the page table, be pessimistic and
2607 if (pvo->pvo_pte.prot & VM_PROT_WRITE)
2614 * Update our statistics.
2616 pvo->pvo_pmap->pm_stats.resident_count--;
2617 if (pvo->pvo_vaddr & PVO_WIRED)
2618 pvo->pvo_pmap->pm_stats.wired_count--;
2621 * Remove this PVO from the pmap list.
2623 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2626 * Mark this for the next sweep
2628 pvo->pvo_vaddr |= PVO_DEAD;
2630 /* Send RC bits to VM */
2631 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
2632 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2633 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2635 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2636 if (refchg & LPTE_CHG)
2638 if (refchg & LPTE_REF)
2639 vm_page_aflag_set(pg, PGA_REFERENCED);
2645 moea64_pvo_remove_from_page_locked(mmu_t mmu, struct pvo_entry *pvo,
2649 KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
2651 /* Use NULL pmaps as a sentinel for races in page deletion */
2652 if (pvo->pvo_pmap == NULL)
2654 pvo->pvo_pmap = NULL;
2657 * Update vm about page writeability/executability if managed
2659 PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN);
2660 if (pvo->pvo_vaddr & PVO_MANAGED) {
2662 LIST_REMOVE(pvo, pvo_vlink);
2663 if (LIST_EMPTY(vm_page_to_pvoh(m)))
2664 vm_page_aflag_clear(m,
2665 PGA_WRITEABLE | PGA_EXECUTABLE);
2669 STAT_MOEA64(moea64_pvo_entries--);
2670 STAT_MOEA64(moea64_pvo_remove_calls++);
2674 moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
2676 vm_page_t pg = NULL;
2678 if (pvo->pvo_vaddr & PVO_MANAGED)
2679 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2681 PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2682 moea64_pvo_remove_from_page_locked(mmu, pvo, pg);
2683 PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2686 static struct pvo_entry *
2687 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2689 struct pvo_entry key;
2691 PMAP_LOCK_ASSERT(pm, MA_OWNED);
2693 key.pvo_vaddr = va & ~ADDR_POFF;
2694 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
2698 moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
2700 struct pvo_entry *pvo;
2705 * See if this bit is stored in the page already.
2707 if (m->md.mdpg_attrs & ptebit)
2711 * Examine each PTE. Sync so that any pending REF/CHG bits are
2712 * flushed to the PTEs.
2717 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2721 * See if this pvo has a valid PTE. if so, fetch the
2722 * REF/CHG bits from the valid PTE. If the appropriate
2723 * ptebit is set, return success.
2725 PMAP_LOCK(pvo->pvo_pmap);
2726 if (!(pvo->pvo_vaddr & PVO_DEAD))
2727 ret = MOEA64_PTE_SYNCH(mmu, pvo);
2728 PMAP_UNLOCK(pvo->pvo_pmap);
2731 atomic_set_32(&m->md.mdpg_attrs,
2732 ret & (LPTE_CHG | LPTE_REF));
2745 moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2748 struct pvo_entry *pvo;
2752 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2753 * we can reset the right ones).
2758 * For each pvo entry, clear the pte's ptebit.
2762 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2765 PMAP_LOCK(pvo->pvo_pmap);
2766 if (!(pvo->pvo_vaddr & PVO_DEAD))
2767 ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit);
2768 PMAP_UNLOCK(pvo->pvo_pmap);
2770 if (ret > 0 && (ret & ptebit))
2773 atomic_clear_32(&m->md.mdpg_attrs, ptebit);
2780 moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2782 struct pvo_entry *pvo, key;
2786 if (hw_direct_map && mem_valid(pa, size) == 0)
2789 PMAP_LOCK(kernel_pmap);
2790 ppa = pa & ~ADDR_POFF;
2791 key.pvo_vaddr = DMAP_BASE_ADDRESS + ppa;
2792 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
2793 ppa < pa + size; ppa += PAGE_SIZE,
2794 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
2795 if (pvo == NULL || (pvo->pvo_pte.pa & LPTE_RPGN) != ppa) {
2800 PMAP_UNLOCK(kernel_pmap);
2806 * Map a set of physical memory pages into the kernel virtual
2807 * address space. Return a pointer to where it is mapped. This
2808 * routine is intended to be used for mapping device memory,
2812 moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2814 vm_offset_t va, tmpva, ppa, offset;
2816 ppa = trunc_page(pa);
2817 offset = pa & PAGE_MASK;
2818 size = roundup2(offset + size, PAGE_SIZE);
2820 va = kva_alloc(size);
2823 panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2825 for (tmpva = va; size > 0;) {
2826 moea64_kenter_attr(mmu, tmpva, ppa, ma);
2832 return ((void *)(va + offset));
2836 moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2839 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2843 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2845 vm_offset_t base, offset;
2847 base = trunc_page(va);
2848 offset = va & PAGE_MASK;
2849 size = roundup2(offset + size, PAGE_SIZE);
2851 kva_free(base, size);
2855 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2857 struct pvo_entry *pvo;
2862 if (__predict_false(pm == NULL))
2863 pm = &curthread->td_proc->p_vmspace->vm_pmap;
2867 lim = round_page(va+1);
2868 len = MIN(lim - va, sz);
2869 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2870 if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) {
2871 pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF);
2872 moea64_syncicache(mmu, pm, va, pa, len);
2881 moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
2884 *va = (void *)(uintptr_t)pa;
2887 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
2890 moea64_scan_init(mmu_t mmu)
2892 struct pvo_entry *pvo;
2897 /* Initialize phys. segments for dumpsys(). */
2898 memset(&dump_map, 0, sizeof(dump_map));
2899 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
2900 for (i = 0; i < pregions_sz; i++) {
2901 dump_map[i].pa_start = pregions[i].mr_start;
2902 dump_map[i].pa_size = pregions[i].mr_size;
2907 /* Virtual segments for minidumps: */
2908 memset(&dump_map, 0, sizeof(dump_map));
2910 /* 1st: kernel .data and .bss. */
2911 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
2912 dump_map[0].pa_size = round_page((uintptr_t)_end) -
2913 dump_map[0].pa_start;
2915 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2916 dump_map[1].pa_start = (vm_paddr_t)(uintptr_t)msgbufp->msg_ptr;
2917 dump_map[1].pa_size = round_page(msgbufp->msg_size);
2919 /* 3rd: kernel VM. */
2920 va = dump_map[1].pa_start + dump_map[1].pa_size;
2921 /* Find start of next chunk (from va). */
2922 while (va < virtual_end) {
2923 /* Don't dump the buffer cache. */
2924 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
2925 va = kmi.buffer_eva;
2928 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2929 if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
2933 if (va < virtual_end) {
2934 dump_map[2].pa_start = va;
2936 /* Find last page in chunk. */
2937 while (va < virtual_end) {
2938 /* Don't run into the buffer cache. */
2939 if (va == kmi.buffer_sva)
2941 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2942 if (pvo == NULL || (pvo->pvo_vaddr & PVO_DEAD))
2946 dump_map[2].pa_size = va - dump_map[2].pa_start;
2950 #ifdef __powerpc64__
2953 moea64_scan_pmap(mmu_t mmu)
2955 struct pvo_entry *pvo;
2956 vm_paddr_t pa, pa_end;
2957 vm_offset_t va, pgva, kstart, kend, kstart_lp, kend_lp;
2960 lpsize = moea64_large_page_size;
2961 kstart = trunc_page((vm_offset_t)_etext);
2962 kend = round_page((vm_offset_t)_end);
2963 kstart_lp = kstart & ~moea64_large_page_mask;
2964 kend_lp = (kend + moea64_large_page_mask) & ~moea64_large_page_mask;
2966 CTR4(KTR_PMAP, "moea64_scan_pmap: kstart=0x%016lx, kend=0x%016lx, "
2967 "kstart_lp=0x%016lx, kend_lp=0x%016lx",
2968 kstart, kend, kstart_lp, kend_lp);
2970 PMAP_LOCK(kernel_pmap);
2971 RB_FOREACH(pvo, pvo_tree, &kernel_pmap->pmap_pvo) {
2972 va = pvo->pvo_vaddr;
2977 /* Skip DMAP (except kernel area) */
2978 if (va >= DMAP_BASE_ADDRESS && va <= DMAP_MAX_ADDRESS) {
2979 if (va & PVO_LARGE) {
2980 pgva = va & ~moea64_large_page_mask;
2981 if (pgva < kstart_lp || pgva >= kend_lp)
2984 pgva = trunc_page(va);
2985 if (pgva < kstart || pgva >= kend)
2990 pa = pvo->pvo_pte.pa & LPTE_RPGN;
2992 if (va & PVO_LARGE) {
2993 pa_end = pa + lpsize;
2994 for (; pa < pa_end; pa += PAGE_SIZE) {
2995 if (is_dumpable(pa))
2999 if (is_dumpable(pa))
3003 PMAP_UNLOCK(kernel_pmap);
3005 return (sizeof(struct lpte) * moea64_pteg_count * 8);
3008 static struct dump_context dump_ctx;
3011 moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
3014 dump_ctx.ptex_end = moea64_pteg_count * 8;
3015 dump_ctx.blksz = blkpgs * PAGE_SIZE;
3022 moea64_scan_pmap(mmu_t mmu)
3028 moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
3035 #ifdef __powerpc64__
3037 moea64_map_range(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_size_t npages)
3040 for (; npages > 0; --npages) {
3041 if (moea64_large_page_size != 0 &&
3042 (pa & moea64_large_page_mask) == 0 &&
3043 (va & moea64_large_page_mask) == 0 &&
3044 npages >= (moea64_large_page_size >> PAGE_SHIFT)) {
3045 PMAP_LOCK(kernel_pmap);
3046 moea64_kenter_large(mmu, va, pa, 0, 0);
3047 PMAP_UNLOCK(kernel_pmap);
3048 pa += moea64_large_page_size;
3049 va += moea64_large_page_size;
3050 npages -= (moea64_large_page_size >> PAGE_SHIFT) - 1;
3052 moea64_kenter(mmu, va, pa);
3060 moea64_page_array_startup(mmu_t mmu, long pages)
3062 long dom_pages[MAXMEMDOM];
3064 vm_offset_t va, vm_page_base;
3065 vm_size_t needed, size;
3070 vm_page_base = 0xd000000000000000ULL;
3072 /* Short-circuit single-domain systems. */
3073 if (vm_ndomains == 1) {
3074 size = round_page(pages * sizeof(struct vm_page));
3075 pa = vm_phys_early_alloc(0, size);
3076 vm_page_base = moea64_map(mmu, &vm_page_base,
3077 pa, pa + size, VM_PROT_READ | VM_PROT_WRITE);
3078 vm_page_array_size = pages;
3079 vm_page_array = (vm_page_t)vm_page_base;
3084 for (i = 0; i < MAXMEMDOM; i++)
3087 /* Now get the number of pages required per domain. */
3088 for (i = 0; i < vm_phys_nsegs; i++) {
3089 domain = vm_phys_segs[i].domain;
3090 KASSERT(domain < MAXMEMDOM,
3091 ("Invalid vm_phys_segs NUMA domain %d!\n", domain));
3092 /* Get size of vm_page_array needed for this segment. */
3093 size = btoc(vm_phys_segs[i].end - vm_phys_segs[i].start);
3094 dom_pages[domain] += size;
3097 for (i = 0; phys_avail[i + 1] != 0; i+= 2) {
3098 domain = _vm_phys_domain(phys_avail[i]);
3099 KASSERT(domain < MAXMEMDOM,
3100 ("Invalid phys_avail NUMA domain %d!\n", domain));
3101 size = btoc(phys_avail[i + 1] - phys_avail[i]);
3102 dom_pages[domain] += size;
3106 * Map in chunks that can get us all 16MB pages. There will be some
3107 * overlap between domains, but that's acceptable for now.
3109 vm_page_array_size = 0;
3111 for (i = 0; i < MAXMEMDOM && vm_page_array_size < pages; i++) {
3112 if (dom_pages[i] == 0)
3114 size = ulmin(pages - vm_page_array_size, dom_pages[i]);
3115 size = round_page(size * sizeof(struct vm_page));
3117 size = roundup2(size, moea64_large_page_size);
3118 pa = vm_phys_early_alloc(i, size);
3119 vm_page_array_size += size / sizeof(struct vm_page);
3120 moea64_map_range(mmu, va, pa, size >> PAGE_SHIFT);
3121 /* Scoot up domain 0, to reduce the domain page overlap. */
3123 vm_page_base += size - needed;
3126 vm_page_array = (vm_page_t)vm_page_base;
3127 vm_page_array_size = pages;