2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008-2015 Nathan Whitehorn
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 * Manages physical address maps.
35 * Since the information managed by this module is also stored by the
36 * logical address mapping module, this module may throw away valid virtual
37 * to physical mappings at almost any time. However, invalidations of
38 * mappings must be done as requested.
40 * In order to cope with hardware architectures which make virtual to
41 * physical map invalidates expensive, this module may delay invalidate
42 * reduced protection operations until such time as they are actually
43 * necessary. This module is given full information as to which processors
44 * are currently using which maps, and to when physical maps must be made
48 #include "opt_compat.h"
49 #include "opt_kstack_pages.h"
51 #include <sys/param.h>
52 #include <sys/kernel.h>
54 #include <sys/queue.h>
55 #include <sys/cpuset.h>
56 #include <sys/kerneldump.h>
59 #include <sys/msgbuf.h>
60 #include <sys/malloc.h>
61 #include <sys/mutex.h>
63 #include <sys/rwlock.h>
64 #include <sys/sched.h>
65 #include <sys/sysctl.h>
66 #include <sys/systm.h>
67 #include <sys/vmmeter.h>
72 #include <dev/ofw/openfirm.h>
75 #include <vm/vm_param.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_extern.h>
81 #include <vm/vm_pageout.h>
84 #include <machine/_inttypes.h>
85 #include <machine/cpu.h>
86 #include <machine/platform.h>
87 #include <machine/frame.h>
88 #include <machine/md_var.h>
89 #include <machine/psl.h>
90 #include <machine/bat.h>
91 #include <machine/hid.h>
92 #include <machine/pte.h>
93 #include <machine/sr.h>
94 #include <machine/trap.h>
95 #include <machine/mmuvar.h>
97 #include "mmu_oea64.h"
99 #include "moea64_if.h"
101 void moea64_release_vsid(uint64_t vsid);
102 uintptr_t moea64_get_unique_vsid(void);
104 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
105 #define ENABLE_TRANS(msr) mtmsr(msr)
107 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
108 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
109 #define VSID_HASH_MASK 0x0000007fffffffffULL
114 * There are two locks of interest: the page locks and the pmap locks, which
115 * protect their individual PVO lists and are locked in that order. The contents
116 * of all PVO entries are protected by the locks of their respective pmaps.
117 * The pmap of any PVO is guaranteed not to change so long as the PVO is linked
122 #define PV_LOCK_COUNT PA_LOCK_COUNT*3
123 static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
125 #define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[pa_index(pa) % PV_LOCK_COUNT]))
126 #define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa))
127 #define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa))
128 #define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
129 #define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m))
130 #define PV_PAGE_UNLOCK(m) PV_UNLOCK(VM_PAGE_TO_PHYS(m))
131 #define PV_PAGE_LOCKASSERT(m) PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
140 extern unsigned char _etext[];
141 extern unsigned char _end[];
144 * Map of physical memory regions.
146 static struct mem_region *regions;
147 static struct mem_region *pregions;
148 static u_int phys_avail_count;
149 static int regions_sz, pregions_sz;
151 extern void bs_remap_earlyboot(void);
154 * Lock for the SLB tables.
156 struct mtx moea64_slb_mutex;
161 u_int moea64_pteg_count;
162 u_int moea64_pteg_mask;
168 uma_zone_t moea64_pvo_zone; /* zone for pvo entries */
170 static struct pvo_entry *moea64_bpvo_pool;
171 static int moea64_bpvo_pool_index = 0;
172 static int moea64_bpvo_pool_size = 327680;
173 TUNABLE_INT("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size);
174 SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
175 &moea64_bpvo_pool_index, 0, "");
177 #define VSID_NBPW (sizeof(u_int32_t) * 8)
179 #define NVSIDS (NPMAPS * 16)
180 #define VSID_HASHMASK 0xffffffffUL
182 #define NVSIDS NPMAPS
183 #define VSID_HASHMASK 0xfffffUL
185 static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
187 static boolean_t moea64_initialized = FALSE;
192 u_int moea64_pte_valid = 0;
193 u_int moea64_pte_overflow = 0;
194 u_int moea64_pvo_entries = 0;
195 u_int moea64_pvo_enter_calls = 0;
196 u_int moea64_pvo_remove_calls = 0;
197 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
198 &moea64_pte_valid, 0, "");
199 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
200 &moea64_pte_overflow, 0, "");
201 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
202 &moea64_pvo_entries, 0, "");
203 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
204 &moea64_pvo_enter_calls, 0, "");
205 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
206 &moea64_pvo_remove_calls, 0, "");
208 vm_offset_t moea64_scratchpage_va[2];
209 struct pvo_entry *moea64_scratchpage_pvo[2];
210 struct mtx moea64_scratchpage_mtx;
212 uint64_t moea64_large_page_mask = 0;
213 uint64_t moea64_large_page_size = 0;
214 int moea64_large_page_shift = 0;
219 static int moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo,
220 struct pvo_head *pvo_head);
221 static void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
222 static void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
223 static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
228 static boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t);
229 static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
230 static void moea64_kremove(mmu_t, vm_offset_t);
231 static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
232 vm_paddr_t pa, vm_size_t sz);
233 static void moea64_pmap_init_qpages(void);
236 * Kernel MMU interface
238 void moea64_clear_modify(mmu_t, vm_page_t);
239 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
240 void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
241 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
242 int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
243 u_int flags, int8_t psind);
244 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
246 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
247 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
248 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
249 void moea64_init(mmu_t);
250 boolean_t moea64_is_modified(mmu_t, vm_page_t);
251 boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
252 boolean_t moea64_is_referenced(mmu_t, vm_page_t);
253 int moea64_ts_referenced(mmu_t, vm_page_t);
254 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
255 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
256 void moea64_page_init(mmu_t, vm_page_t);
257 int moea64_page_wired_mappings(mmu_t, vm_page_t);
258 void moea64_pinit(mmu_t, pmap_t);
259 void moea64_pinit0(mmu_t, pmap_t);
260 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
261 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
262 void moea64_qremove(mmu_t, vm_offset_t, int);
263 void moea64_release(mmu_t, pmap_t);
264 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
265 void moea64_remove_pages(mmu_t, pmap_t);
266 void moea64_remove_all(mmu_t, vm_page_t);
267 void moea64_remove_write(mmu_t, vm_page_t);
268 void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
269 void moea64_zero_page(mmu_t, vm_page_t);
270 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
271 void moea64_activate(mmu_t, struct thread *);
272 void moea64_deactivate(mmu_t, struct thread *);
273 void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
274 void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
275 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
276 vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
277 void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
278 void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
279 void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
280 boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
281 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
282 void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
284 void moea64_scan_init(mmu_t mmu);
285 vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m);
286 void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
288 static mmu_method_t moea64_methods[] = {
289 MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
290 MMUMETHOD(mmu_copy_page, moea64_copy_page),
291 MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
292 MMUMETHOD(mmu_enter, moea64_enter),
293 MMUMETHOD(mmu_enter_object, moea64_enter_object),
294 MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
295 MMUMETHOD(mmu_extract, moea64_extract),
296 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),
297 MMUMETHOD(mmu_init, moea64_init),
298 MMUMETHOD(mmu_is_modified, moea64_is_modified),
299 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable),
300 MMUMETHOD(mmu_is_referenced, moea64_is_referenced),
301 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced),
302 MMUMETHOD(mmu_map, moea64_map),
303 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
304 MMUMETHOD(mmu_page_init, moea64_page_init),
305 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
306 MMUMETHOD(mmu_pinit, moea64_pinit),
307 MMUMETHOD(mmu_pinit0, moea64_pinit0),
308 MMUMETHOD(mmu_protect, moea64_protect),
309 MMUMETHOD(mmu_qenter, moea64_qenter),
310 MMUMETHOD(mmu_qremove, moea64_qremove),
311 MMUMETHOD(mmu_release, moea64_release),
312 MMUMETHOD(mmu_remove, moea64_remove),
313 MMUMETHOD(mmu_remove_pages, moea64_remove_pages),
314 MMUMETHOD(mmu_remove_all, moea64_remove_all),
315 MMUMETHOD(mmu_remove_write, moea64_remove_write),
316 MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
317 MMUMETHOD(mmu_unwire, moea64_unwire),
318 MMUMETHOD(mmu_zero_page, moea64_zero_page),
319 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
320 MMUMETHOD(mmu_activate, moea64_activate),
321 MMUMETHOD(mmu_deactivate, moea64_deactivate),
322 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
323 MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page),
324 MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page),
326 /* Internal interfaces */
327 MMUMETHOD(mmu_mapdev, moea64_mapdev),
328 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr),
329 MMUMETHOD(mmu_unmapdev, moea64_unmapdev),
330 MMUMETHOD(mmu_kextract, moea64_kextract),
331 MMUMETHOD(mmu_kenter, moea64_kenter),
332 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr),
333 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
334 MMUMETHOD(mmu_scan_init, moea64_scan_init),
335 MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map),
340 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
342 static struct pvo_head *
343 vm_page_to_pvoh(vm_page_t m)
346 mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
347 return (&m->md.mdpg_pvoh);
350 static struct pvo_entry *
351 alloc_pvo_entry(int bootstrap)
353 struct pvo_entry *pvo;
355 if (!moea64_initialized || bootstrap) {
356 if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) {
357 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
358 moea64_bpvo_pool_index, moea64_bpvo_pool_size,
359 moea64_bpvo_pool_size * sizeof(struct pvo_entry));
361 pvo = &moea64_bpvo_pool[
362 atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)];
363 bzero(pvo, sizeof(*pvo));
364 pvo->pvo_vaddr = PVO_BOOTSTRAP;
366 pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT);
367 bzero(pvo, sizeof(*pvo));
375 init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va)
381 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
383 pvo->pvo_pmap = pmap;
385 pvo->pvo_vaddr |= va;
386 vsid = va_to_vsid(pmap, va);
387 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
390 shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift :
392 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift);
393 pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3;
397 free_pvo_entry(struct pvo_entry *pvo)
400 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
401 uma_zfree(moea64_pvo_zone, pvo);
405 moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte)
408 lpte->pte_hi = (pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) &
410 lpte->pte_hi |= LPTE_VALID;
412 if (pvo->pvo_vaddr & PVO_LARGE)
413 lpte->pte_hi |= LPTE_BIG;
414 if (pvo->pvo_vaddr & PVO_WIRED)
415 lpte->pte_hi |= LPTE_WIRED;
416 if (pvo->pvo_vaddr & PVO_HID)
417 lpte->pte_hi |= LPTE_HID;
419 lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */
420 if (pvo->pvo_pte.prot & VM_PROT_WRITE)
421 lpte->pte_lo |= LPTE_BW;
423 lpte->pte_lo |= LPTE_BR;
425 if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
426 lpte->pte_lo |= LPTE_NOEXEC;
429 static __inline uint64_t
430 moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
435 if (ma != VM_MEMATTR_DEFAULT) {
437 case VM_MEMATTR_UNCACHEABLE:
438 return (LPTE_I | LPTE_G);
439 case VM_MEMATTR_CACHEABLE:
441 case VM_MEMATTR_WRITE_COMBINING:
442 case VM_MEMATTR_WRITE_BACK:
443 case VM_MEMATTR_PREFETCHABLE:
445 case VM_MEMATTR_WRITE_THROUGH:
446 return (LPTE_W | LPTE_M);
451 * Assume the page is cache inhibited and access is guarded unless
452 * it's in our available memory array.
454 pte_lo = LPTE_I | LPTE_G;
455 for (i = 0; i < pregions_sz; i++) {
456 if ((pa >= pregions[i].mr_start) &&
457 (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
458 pte_lo &= ~(LPTE_I | LPTE_G);
468 * Quick sort callout for comparing memory regions.
470 static int om_cmp(const void *a, const void *b);
473 om_cmp(const void *a, const void *b)
475 const struct ofw_map *mapa;
476 const struct ofw_map *mapb;
480 if (mapa->om_pa < mapb->om_pa)
482 else if (mapa->om_pa > mapb->om_pa)
489 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
491 struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
492 pcell_t acells, trans_cells[sz/sizeof(cell_t)];
493 struct pvo_entry *pvo;
499 bzero(translations, sz);
500 OF_getencprop(OF_finddevice("/"), "#address-cells", &acells,
502 if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1)
503 panic("moea64_bootstrap: can't get ofw translations");
505 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
506 sz /= sizeof(cell_t);
507 for (i = 0, j = 0; i < sz; j++) {
508 translations[j].om_va = trans_cells[i++];
509 translations[j].om_len = trans_cells[i++];
510 translations[j].om_pa = trans_cells[i++];
512 translations[j].om_pa <<= 32;
513 translations[j].om_pa |= trans_cells[i++];
515 translations[j].om_mode = trans_cells[i++];
517 KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)",
521 qsort(translations, sz, sizeof (*translations), om_cmp);
523 for (i = 0; i < sz; i++) {
524 pa_base = translations[i].om_pa;
525 #ifndef __powerpc64__
526 if ((translations[i].om_pa >> 32) != 0)
527 panic("OFW translations above 32-bit boundary!");
530 if (pa_base % PAGE_SIZE)
531 panic("OFW translation not page-aligned (phys)!");
532 if (translations[i].om_va % PAGE_SIZE)
533 panic("OFW translation not page-aligned (virt)!");
535 CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
536 pa_base, translations[i].om_va, translations[i].om_len);
538 /* Now enter the pages for this mapping */
541 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
542 /* If this address is direct-mapped, skip remapping */
543 if (hw_direct_map && translations[i].om_va == pa_base &&
544 moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) == LPTE_M)
547 PMAP_LOCK(kernel_pmap);
548 pvo = moea64_pvo_find_va(kernel_pmap,
549 translations[i].om_va + off);
550 PMAP_UNLOCK(kernel_pmap);
554 moea64_kenter(mmup, translations[i].om_va + off,
563 moea64_probe_large_page(void)
565 uint16_t pvr = mfpvr() >> 16;
571 powerpc_sync(); isync();
572 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
573 powerpc_sync(); isync();
577 if (moea64_large_page_size == 0) {
578 moea64_large_page_size = 0x1000000; /* 16 MB */
579 moea64_large_page_shift = 24;
583 moea64_large_page_mask = moea64_large_page_size - 1;
587 moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
594 cache = PCPU_GET(slb);
595 esid = va >> ADDR_SR_SHFT;
596 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
598 for (i = 0; i < 64; i++) {
599 if (cache[i].slbe == (slbe | i))
604 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
606 entry.slbv |= SLBV_L;
608 slb_insert_kernel(entry.slbe, entry.slbv);
613 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
614 vm_offset_t kernelend)
616 struct pvo_entry *pvo;
619 vm_offset_t size, off;
623 if (moea64_large_page_size == 0)
628 PMAP_LOCK(kernel_pmap);
629 for (i = 0; i < pregions_sz; i++) {
630 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
631 pregions[i].mr_size; pa += moea64_large_page_size) {
634 pvo = alloc_pvo_entry(1 /* bootstrap */);
635 pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE;
636 init_pvo_entry(pvo, kernel_pmap, pa);
639 * Set memory access as guarded if prefetch within
640 * the page could exit the available physmem area.
642 if (pa & moea64_large_page_mask) {
643 pa &= moea64_large_page_mask;
646 if (pa + moea64_large_page_size >
647 pregions[i].mr_start + pregions[i].mr_size)
650 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE |
652 pvo->pvo_pte.pa = pa | pte_lo;
653 moea64_pvo_enter(mmup, pvo, NULL);
656 PMAP_UNLOCK(kernel_pmap);
658 size = moea64_bpvo_pool_size*sizeof(struct pvo_entry);
659 off = (vm_offset_t)(moea64_bpvo_pool);
660 for (pa = off; pa < off + size; pa += PAGE_SIZE)
661 moea64_kenter(mmup, pa, pa);
664 * Map certain important things, like ourselves.
666 * NOTE: We do not map the exception vector space. That code is
667 * used only in real mode, and leaving it unmapped allows us to
668 * catch NULL pointer deferences, instead of making NULL a valid
672 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
674 moea64_kenter(mmup, pa, pa);
679 * Allow user to override unmapped_buf_allowed for testing.
680 * XXXKIB Only direct map implementation was tested.
682 if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed",
683 &unmapped_buf_allowed))
684 unmapped_buf_allowed = hw_direct_map;
688 moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
691 vm_size_t physsz, hwphyssz;
693 #ifndef __powerpc64__
694 /* We don't have a direct map since there is no BAT */
697 /* Make sure battable is zero, since we have no BAT */
698 for (i = 0; i < 16; i++) {
699 battable[i].batu = 0;
700 battable[i].batl = 0;
703 moea64_probe_large_page();
705 /* Use a direct map if we have large page support */
706 if (moea64_large_page_size > 0)
712 /* Get physical memory regions from firmware */
713 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
714 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
716 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
717 panic("moea64_bootstrap: phys_avail too small");
719 phys_avail_count = 0;
722 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
723 for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
724 CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
725 regions[i].mr_start, regions[i].mr_start +
726 regions[i].mr_size, regions[i].mr_size);
728 (physsz + regions[i].mr_size) >= hwphyssz) {
729 if (physsz < hwphyssz) {
730 phys_avail[j] = regions[i].mr_start;
731 phys_avail[j + 1] = regions[i].mr_start +
738 phys_avail[j] = regions[i].mr_start;
739 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
741 physsz += regions[i].mr_size;
744 /* Check for overlap with the kernel and exception vectors */
745 for (j = 0; j < 2*phys_avail_count; j+=2) {
746 if (phys_avail[j] < EXC_LAST)
747 phys_avail[j] += EXC_LAST;
749 if (kernelstart >= phys_avail[j] &&
750 kernelstart < phys_avail[j+1]) {
751 if (kernelend < phys_avail[j+1]) {
752 phys_avail[2*phys_avail_count] =
753 (kernelend & ~PAGE_MASK) + PAGE_SIZE;
754 phys_avail[2*phys_avail_count + 1] =
759 phys_avail[j+1] = kernelstart & ~PAGE_MASK;
762 if (kernelend >= phys_avail[j] &&
763 kernelend < phys_avail[j+1]) {
764 if (kernelstart > phys_avail[j]) {
765 phys_avail[2*phys_avail_count] = phys_avail[j];
766 phys_avail[2*phys_avail_count + 1] =
767 kernelstart & ~PAGE_MASK;
771 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
775 physmem = btoc(physsz);
778 moea64_pteg_count = PTEGCOUNT;
780 moea64_pteg_count = 0x1000;
782 while (moea64_pteg_count < physmem)
783 moea64_pteg_count <<= 1;
785 moea64_pteg_count >>= 1;
786 #endif /* PTEGCOUNT */
790 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
797 moea64_pteg_mask = moea64_pteg_count - 1;
800 * Initialize SLB table lock and page locks
802 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
803 for (i = 0; i < PV_LOCK_COUNT; i++)
804 mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
807 * Initialise the bootstrap pvo pool.
809 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
810 moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0);
811 moea64_bpvo_pool_index = 0;
814 * Make sure kernel vsid is allocated as well as VSID 0.
816 #ifndef __powerpc64__
817 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
818 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
819 moea64_vsid_bitmap[0] |= 1;
823 * Initialize the kernel pmap (which is statically allocated).
826 for (i = 0; i < 64; i++) {
827 pcpup->pc_slb[i].slbv = 0;
828 pcpup->pc_slb[i].slbe = 0;
831 for (i = 0; i < 16; i++)
832 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
835 kernel_pmap->pmap_phys = kernel_pmap;
836 CPU_FILL(&kernel_pmap->pm_active);
837 RB_INIT(&kernel_pmap->pmap_pvo);
839 PMAP_LOCK_INIT(kernel_pmap);
842 * Now map in all the other buffers we allocated earlier
845 moea64_setup_direct_map(mmup, kernelstart, kernelend);
849 moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
860 * Set up the Open Firmware pmap and add its mappings if not in real
864 chosen = OF_finddevice("/chosen");
865 if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) {
866 mmu = OF_instance_to_package(mmui);
868 (sz = OF_getproplen(mmu, "translations")) == -1)
870 if (sz > 6144 /* tmpstksz - 2 KB headroom */)
871 panic("moea64_bootstrap: too many ofw translations");
874 moea64_add_ofw_mappings(mmup, mmu, sz);
878 * Calculate the last available physical address.
881 for (i = 0; phys_avail[i + 2] != 0; i += 2)
882 Maxmem = max(Maxmem, powerpc_btop(phys_avail[i + 1]));
885 * Initialize MMU and remap early physical mappings
887 MMU_CPU_BOOTSTRAP(mmup,0);
888 mtmsr(mfmsr() | PSL_DR | PSL_IR);
890 bs_remap_earlyboot();
893 * Set the start and end of kva.
895 virtual_avail = VM_MIN_KERNEL_ADDRESS;
896 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
899 * Map the entire KVA range into the SLB. We must not fault there.
902 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
903 moea64_bootstrap_slb_prefault(va, 0);
907 * Figure out how far we can extend virtual_end into segment 16
908 * without running into existing mappings. Segment 16 is guaranteed
909 * to contain neither RAM nor devices (at least on Apple hardware),
910 * but will generally contain some OFW mappings we should not
914 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */
915 PMAP_LOCK(kernel_pmap);
916 while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
917 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
918 virtual_end += PAGE_SIZE;
919 PMAP_UNLOCK(kernel_pmap);
923 * Allocate a kernel stack with a guard page for thread0 and map it
924 * into the kernel page map.
926 pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
927 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
928 virtual_avail = va + kstack_pages * PAGE_SIZE;
929 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
930 thread0.td_kstack = va;
931 thread0.td_kstack_pages = kstack_pages;
932 for (i = 0; i < kstack_pages; i++) {
933 moea64_kenter(mmup, va, pa);
939 * Allocate virtual address space for the message buffer.
941 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
942 msgbufp = (struct msgbuf *)virtual_avail;
944 virtual_avail += round_page(msgbufsize);
945 while (va < virtual_avail) {
946 moea64_kenter(mmup, va, pa);
952 * Allocate virtual address space for the dynamic percpu area.
954 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
955 dpcpu = (void *)virtual_avail;
957 virtual_avail += DPCPU_SIZE;
958 while (va < virtual_avail) {
959 moea64_kenter(mmup, va, pa);
963 dpcpu_init(dpcpu, curcpu);
966 * Allocate some things for page zeroing. We put this directly
967 * in the page table and use MOEA64_PTE_REPLACE to avoid any
968 * of the PVO book-keeping or other parts of the VM system
969 * from even knowing that this hack exists.
972 if (!hw_direct_map) {
973 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
975 for (i = 0; i < 2; i++) {
976 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
977 virtual_end -= PAGE_SIZE;
979 moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
981 PMAP_LOCK(kernel_pmap);
982 moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
983 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
984 PMAP_UNLOCK(kernel_pmap);
990 moea64_pmap_init_qpages(void)
1000 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
1001 if (pc->pc_qmap_addr == 0)
1002 panic("pmap_init_qpages: unable to allocate KVA");
1003 PMAP_LOCK(kernel_pmap);
1004 pc->pc_qmap_pvo = moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
1005 PMAP_UNLOCK(kernel_pmap);
1006 mtx_init(&pc->pc_qmap_lock, "qmap lock", NULL, MTX_DEF);
1010 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL);
1013 * Activate a user pmap. This mostly involves setting some non-CPU
1017 moea64_activate(mmu_t mmu, struct thread *td)
1021 pm = &td->td_proc->p_vmspace->vm_pmap;
1022 CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1024 #ifdef __powerpc64__
1025 PCPU_SET(userslb, pm->pm_slb);
1026 __asm __volatile("slbmte %0, %1; isync" ::
1027 "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
1029 PCPU_SET(curpmap, pm->pmap_phys);
1030 mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid);
1035 moea64_deactivate(mmu_t mmu, struct thread *td)
1039 __asm __volatile("isync; slbie %0" :: "r"(USER_ADDR));
1041 pm = &td->td_proc->p_vmspace->vm_pmap;
1042 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1043 #ifdef __powerpc64__
1044 PCPU_SET(userslb, NULL);
1046 PCPU_SET(curpmap, NULL);
1051 moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1053 struct pvo_entry key, *pvo;
1057 key.pvo_vaddr = sva;
1059 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1060 pvo != NULL && PVO_VADDR(pvo) < eva;
1061 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1062 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1063 panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1065 pvo->pvo_vaddr &= ~PVO_WIRED;
1066 refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */);
1067 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1068 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1071 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1073 refchg |= atomic_readandclear_32(&m->md.mdpg_attrs);
1074 if (refchg & LPTE_CHG)
1076 if (refchg & LPTE_REF)
1077 vm_page_aflag_set(m, PGA_REFERENCED);
1079 pm->pm_stats.wired_count--;
1085 * This goes through and sets the physical address of our
1086 * special scratch PTE to the PA we want to zero or copy. Because
1087 * of locking issues (this can get called in pvo_enter() by
1088 * the UMA allocator), we can't use most other utility functions here
1092 void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa) {
1094 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1095 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1097 moea64_scratchpage_pvo[which]->pvo_pte.pa =
1098 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1099 MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which],
1100 MOEA64_PTE_INVALIDATE);
1105 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1110 dst = VM_PAGE_TO_PHYS(mdst);
1111 src = VM_PAGE_TO_PHYS(msrc);
1113 if (hw_direct_map) {
1114 bcopy((void *)src, (void *)dst, PAGE_SIZE);
1116 mtx_lock(&moea64_scratchpage_mtx);
1118 moea64_set_scratchpage_pa(mmu, 0, src);
1119 moea64_set_scratchpage_pa(mmu, 1, dst);
1121 bcopy((void *)moea64_scratchpage_va[0],
1122 (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1124 mtx_unlock(&moea64_scratchpage_mtx);
1129 moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1130 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1133 vm_offset_t a_pg_offset, b_pg_offset;
1136 while (xfersize > 0) {
1137 a_pg_offset = a_offset & PAGE_MASK;
1138 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1139 a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
1141 b_pg_offset = b_offset & PAGE_MASK;
1142 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1143 b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
1145 bcopy(a_cp, b_cp, cnt);
1153 moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1154 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1157 vm_offset_t a_pg_offset, b_pg_offset;
1160 mtx_lock(&moea64_scratchpage_mtx);
1161 while (xfersize > 0) {
1162 a_pg_offset = a_offset & PAGE_MASK;
1163 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1164 moea64_set_scratchpage_pa(mmu, 0,
1165 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
1166 a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
1167 b_pg_offset = b_offset & PAGE_MASK;
1168 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1169 moea64_set_scratchpage_pa(mmu, 1,
1170 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
1171 b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
1172 bcopy(a_cp, b_cp, cnt);
1177 mtx_unlock(&moea64_scratchpage_mtx);
1181 moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1182 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1185 if (hw_direct_map) {
1186 moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
1189 moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
1195 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1197 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1199 if (size + off > PAGE_SIZE)
1200 panic("moea64_zero_page: size + off > PAGE_SIZE");
1202 if (hw_direct_map) {
1203 bzero((caddr_t)pa + off, size);
1205 mtx_lock(&moea64_scratchpage_mtx);
1206 moea64_set_scratchpage_pa(mmu, 0, pa);
1207 bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1208 mtx_unlock(&moea64_scratchpage_mtx);
1213 * Zero a page of physical memory by temporarily mapping it
1216 moea64_zero_page(mmu_t mmu, vm_page_t m)
1218 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1219 vm_offset_t va, off;
1221 if (!hw_direct_map) {
1222 mtx_lock(&moea64_scratchpage_mtx);
1224 moea64_set_scratchpage_pa(mmu, 0, pa);
1225 va = moea64_scratchpage_va[0];
1230 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1231 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
1234 mtx_unlock(&moea64_scratchpage_mtx);
1238 moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
1240 struct pvo_entry *pvo;
1241 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1247 * MOEA64_PTE_REPLACE does some locking, so we can't just grab
1248 * a critical section and access the PCPU data like on i386.
1249 * Instead, pin the thread and grab the PCPU lock to prevent
1250 * a preempting thread from using the same PCPU data.
1254 mtx_assert(PCPU_PTR(qmap_lock), MA_NOTOWNED);
1255 pvo = PCPU_GET(qmap_pvo);
1257 mtx_lock(PCPU_PTR(qmap_lock));
1258 pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
1260 MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE);
1263 return (PCPU_GET(qmap_addr));
1267 moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
1272 mtx_assert(PCPU_PTR(qmap_lock), MA_OWNED);
1273 KASSERT(PCPU_GET(qmap_addr) == addr,
1274 ("moea64_quick_remove_page: invalid address"));
1275 mtx_unlock(PCPU_PTR(qmap_lock));
1280 * Map the given physical page at the specified virtual address in the
1281 * target pmap with the protection requested. If specified the page
1282 * will be wired down.
1286 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1287 vm_prot_t prot, u_int flags, int8_t psind)
1289 struct pvo_entry *pvo, *oldpvo;
1290 struct pvo_head *pvo_head;
1294 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1295 VM_OBJECT_ASSERT_LOCKED(m->object);
1297 pvo = alloc_pvo_entry(0);
1298 pvo->pvo_pmap = NULL; /* to be filled in later */
1299 pvo->pvo_pte.prot = prot;
1301 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1302 pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo;
1304 if ((flags & PMAP_ENTER_WIRED) != 0)
1305 pvo->pvo_vaddr |= PVO_WIRED;
1307 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
1310 pvo_head = &m->md.mdpg_pvoh;
1311 pvo->pvo_vaddr |= PVO_MANAGED;
1317 if (pvo->pvo_pmap == NULL)
1318 init_pvo_entry(pvo, pmap, va);
1319 if (prot & VM_PROT_WRITE)
1320 if (pmap_bootstrapped &&
1321 (m->oflags & VPO_UNMANAGED) == 0)
1322 vm_page_aflag_set(m, PGA_WRITEABLE);
1324 oldpvo = moea64_pvo_find_va(pmap, va);
1325 if (oldpvo != NULL) {
1326 if (oldpvo->pvo_vaddr == pvo->pvo_vaddr &&
1327 oldpvo->pvo_pte.pa == pvo->pvo_pte.pa &&
1328 oldpvo->pvo_pte.prot == prot) {
1329 /* Identical mapping already exists */
1332 /* If not in page table, reinsert it */
1333 if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) {
1334 moea64_pte_overflow--;
1335 MOEA64_PTE_INSERT(mmu, oldpvo);
1338 /* Then just clean up and go home */
1341 free_pvo_entry(pvo);
1345 /* Otherwise, need to kill it first */
1346 KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old "
1347 "mapping does not match new mapping"));
1348 moea64_pvo_remove_from_pmap(mmu, oldpvo);
1350 error = moea64_pvo_enter(mmu, pvo, pvo_head);
1354 /* Free any dead pages */
1355 if (oldpvo != NULL) {
1356 PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1357 moea64_pvo_remove_from_page(mmu, oldpvo);
1358 PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1359 free_pvo_entry(oldpvo);
1362 if (error != ENOMEM)
1364 if ((flags & PMAP_ENTER_NOSLEEP) != 0)
1365 return (KERN_RESOURCE_SHORTAGE);
1366 VM_OBJECT_ASSERT_UNLOCKED(m->object);
1371 * Flush the page from the instruction cache if this page is
1372 * mapped executable and cacheable.
1374 if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
1375 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1376 vm_page_aflag_set(m, PGA_EXECUTABLE);
1377 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1379 return (KERN_SUCCESS);
1383 moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1388 * This is much trickier than on older systems because
1389 * we can't sync the icache on physical addresses directly
1390 * without a direct map. Instead we check a couple of cases
1391 * where the memory is already mapped in and, failing that,
1392 * use the same trick we use for page zeroing to create
1393 * a temporary mapping for this physical address.
1396 if (!pmap_bootstrapped) {
1398 * If PMAP is not bootstrapped, we are likely to be
1401 __syncicache((void *)pa, sz);
1402 } else if (pmap == kernel_pmap) {
1403 __syncicache((void *)va, sz);
1404 } else if (hw_direct_map) {
1405 __syncicache((void *)pa, sz);
1407 /* Use the scratch page to set up a temp mapping */
1409 mtx_lock(&moea64_scratchpage_mtx);
1411 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1412 __syncicache((void *)(moea64_scratchpage_va[1] +
1413 (va & ADDR_POFF)), sz);
1415 mtx_unlock(&moea64_scratchpage_mtx);
1420 * Maps a sequence of resident pages belonging to the same object.
1421 * The sequence begins with the given page m_start. This page is
1422 * mapped at the given virtual address start. Each subsequent page is
1423 * mapped at a virtual address that is offset from start by the same
1424 * amount as the page is offset from m_start within the object. The
1425 * last page in the sequence is the page with the largest offset from
1426 * m_start that can be mapped at a virtual address less than the given
1427 * virtual address end. Not every virtual page between start and end
1428 * is mapped; only those for which a resident page exists with the
1429 * corresponding offset from m_start are mapped.
1432 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1433 vm_page_t m_start, vm_prot_t prot)
1436 vm_pindex_t diff, psize;
1438 VM_OBJECT_ASSERT_LOCKED(m_start->object);
1440 psize = atop(end - start);
1442 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1443 moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
1444 (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0);
1445 m = TAILQ_NEXT(m, listq);
1450 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1454 moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1455 PMAP_ENTER_NOSLEEP, 0);
1459 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1461 struct pvo_entry *pvo;
1465 pvo = moea64_pvo_find_va(pm, va);
1469 pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1476 * Atomically extract and hold the physical page with the given
1477 * pmap and virtual address pair if that mapping permits the given
1481 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1483 struct pvo_entry *pvo;
1491 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1492 if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) {
1493 if (vm_page_pa_tryrelock(pmap,
1494 pvo->pvo_pte.pa & LPTE_RPGN, &pa))
1496 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1504 static mmu_t installed_mmu;
1507 moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags,
1510 struct pvo_entry *pvo;
1516 * This entire routine is a horrible hack to avoid bothering kmem
1517 * for new KVA addresses. Because this can get called from inside
1518 * kmem allocation routines, calling kmem for a new address here
1519 * can lead to multiply locking non-recursive mutexes.
1522 *flags = UMA_SLAB_PRIV;
1523 needed_lock = !PMAP_LOCKED(kernel_pmap);
1525 m = vm_page_alloc(NULL, 0,
1526 malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1530 va = VM_PAGE_TO_PHYS(m);
1532 pvo = alloc_pvo_entry(1 /* bootstrap */);
1534 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE;
1535 pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M;
1538 PMAP_LOCK(kernel_pmap);
1540 init_pvo_entry(pvo, kernel_pmap, va);
1541 pvo->pvo_vaddr |= PVO_WIRED;
1543 moea64_pvo_enter(installed_mmu, pvo, NULL);
1546 PMAP_UNLOCK(kernel_pmap);
1548 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1549 bzero((void *)va, PAGE_SIZE);
1554 extern int elf32_nxstack;
1557 moea64_init(mmu_t mmu)
1560 CTR0(KTR_PMAP, "moea64_init");
1562 moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1563 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1564 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1566 if (!hw_direct_map) {
1567 installed_mmu = mmu;
1568 uma_zone_set_allocf(moea64_pvo_zone,moea64_uma_page_alloc);
1571 #ifdef COMPAT_FREEBSD32
1575 moea64_initialized = TRUE;
1579 moea64_is_referenced(mmu_t mmu, vm_page_t m)
1582 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1583 ("moea64_is_referenced: page %p is not managed", m));
1585 return (moea64_query_bit(mmu, m, LPTE_REF));
1589 moea64_is_modified(mmu_t mmu, vm_page_t m)
1592 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1593 ("moea64_is_modified: page %p is not managed", m));
1596 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1597 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
1598 * is clear, no PTEs can have LPTE_CHG set.
1600 VM_OBJECT_ASSERT_LOCKED(m->object);
1601 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1603 return (moea64_query_bit(mmu, m, LPTE_CHG));
1607 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1609 struct pvo_entry *pvo;
1610 boolean_t rv = TRUE;
1613 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1621 moea64_clear_modify(mmu_t mmu, vm_page_t m)
1624 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1625 ("moea64_clear_modify: page %p is not managed", m));
1626 VM_OBJECT_ASSERT_WLOCKED(m->object);
1627 KASSERT(!vm_page_xbusied(m),
1628 ("moea64_clear_modify: page %p is exclusive busied", m));
1631 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
1632 * set. If the object containing the page is locked and the page is
1633 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
1635 if ((m->aflags & PGA_WRITEABLE) == 0)
1637 moea64_clear_bit(mmu, m, LPTE_CHG);
1641 * Clear the write and modified bits in each of the given page's mappings.
1644 moea64_remove_write(mmu_t mmu, vm_page_t m)
1646 struct pvo_entry *pvo;
1647 int64_t refchg, ret;
1650 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1651 ("moea64_remove_write: page %p is not managed", m));
1654 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1655 * set by another thread while the object is locked. Thus,
1656 * if PGA_WRITEABLE is clear, no page table entries need updating.
1658 VM_OBJECT_ASSERT_WLOCKED(m->object);
1659 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1664 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1665 pmap = pvo->pvo_pmap;
1667 if (!(pvo->pvo_vaddr & PVO_DEAD) &&
1668 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1669 pvo->pvo_pte.prot &= ~VM_PROT_WRITE;
1670 ret = MOEA64_PTE_REPLACE(mmu, pvo,
1671 MOEA64_PTE_PROT_UPDATE);
1675 if (pvo->pvo_pmap == kernel_pmap)
1680 if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG)
1682 vm_page_aflag_clear(m, PGA_WRITEABLE);
1687 * moea64_ts_referenced:
1689 * Return a count of reference bits for a page, clearing those bits.
1690 * It is not necessary for every reference bit to be cleared, but it
1691 * is necessary that 0 only be returned when there are truly no
1692 * reference bits set.
1694 * XXX: The exact number of bits to check and clear is a matter that
1695 * should be tested and standardized at some point in the future for
1696 * optimal aging of shared pages.
1699 moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1702 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1703 ("moea64_ts_referenced: page %p is not managed", m));
1704 return (moea64_clear_bit(mmu, m, LPTE_REF));
1708 * Modify the WIMG settings of all mappings for a page.
1711 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1713 struct pvo_entry *pvo;
1718 if ((m->oflags & VPO_UNMANAGED) != 0) {
1719 m->md.mdpg_cache_attrs = ma;
1723 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1726 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1727 pmap = pvo->pvo_pmap;
1729 if (!(pvo->pvo_vaddr & PVO_DEAD)) {
1730 pvo->pvo_pte.pa &= ~LPTE_WIMG;
1731 pvo->pvo_pte.pa |= lo;
1732 refchg = MOEA64_PTE_REPLACE(mmu, pvo,
1733 MOEA64_PTE_INVALIDATE);
1735 refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ?
1737 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1738 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1740 atomic_readandclear_32(&m->md.mdpg_attrs);
1741 if (refchg & LPTE_CHG)
1743 if (refchg & LPTE_REF)
1744 vm_page_aflag_set(m, PGA_REFERENCED);
1746 if (pvo->pvo_pmap == kernel_pmap)
1751 m->md.mdpg_cache_attrs = ma;
1756 * Map a wired page into kernel virtual address space.
1759 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1762 struct pvo_entry *pvo, *oldpvo;
1764 pvo = alloc_pvo_entry(0);
1765 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1766 pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
1767 pvo->pvo_vaddr |= PVO_WIRED;
1769 PMAP_LOCK(kernel_pmap);
1770 oldpvo = moea64_pvo_find_va(kernel_pmap, va);
1772 moea64_pvo_remove_from_pmap(mmu, oldpvo);
1773 init_pvo_entry(pvo, kernel_pmap, va);
1774 error = moea64_pvo_enter(mmu, pvo, NULL);
1775 PMAP_UNLOCK(kernel_pmap);
1777 /* Free any dead pages */
1778 if (oldpvo != NULL) {
1779 PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1780 moea64_pvo_remove_from_page(mmu, oldpvo);
1781 PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1782 free_pvo_entry(oldpvo);
1785 if (error != 0 && error != ENOENT)
1786 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1791 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1794 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1798 * Extract the physical page address associated with the given kernel virtual
1802 moea64_kextract(mmu_t mmu, vm_offset_t va)
1804 struct pvo_entry *pvo;
1808 * Shortcut the direct-mapped case when applicable. We never put
1809 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1811 if (va < VM_MIN_KERNEL_ADDRESS)
1814 PMAP_LOCK(kernel_pmap);
1815 pvo = moea64_pvo_find_va(kernel_pmap, va);
1816 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1818 pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1819 PMAP_UNLOCK(kernel_pmap);
1824 * Remove a wired page from kernel virtual address space.
1827 moea64_kremove(mmu_t mmu, vm_offset_t va)
1829 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1833 * Map a range of physical addresses into kernel virtual address space.
1835 * The value passed in *virt is a suggested virtual address for the mapping.
1836 * Architectures which can support a direct-mapped physical to virtual region
1837 * can return the appropriate address within that region, leaving '*virt'
1838 * unchanged. Other architectures should map the pages starting at '*virt' and
1839 * update '*virt' with the first usable address after the mapped region.
1842 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1843 vm_paddr_t pa_end, int prot)
1845 vm_offset_t sva, va;
1847 if (hw_direct_map) {
1849 * Check if every page in the region is covered by the direct
1850 * map. The direct map covers all of physical memory. Use
1851 * moea64_calc_wimg() as a shortcut to see if the page is in
1852 * physical memory as a way to see if the direct map covers it.
1854 for (va = pa_start; va < pa_end; va += PAGE_SIZE)
1855 if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
1862 /* XXX respect prot argument */
1863 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1864 moea64_kenter(mmu, va, pa_start);
1871 * Returns true if the pmap's pv is one of the first
1872 * 16 pvs linked to from this page. This count may
1873 * be changed upwards or downwards in the future; it
1874 * is only necessary that true be returned for a small
1875 * subset of pmaps for proper page aging.
1878 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1881 struct pvo_entry *pvo;
1884 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1885 ("moea64_page_exists_quick: page %p is not managed", m));
1889 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1890 if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
1902 moea64_page_init(mmu_t mmu __unused, vm_page_t m)
1905 m->md.mdpg_attrs = 0;
1906 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
1907 LIST_INIT(&m->md.mdpg_pvoh);
1911 * Return the number of managed mappings to the given physical page
1915 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1917 struct pvo_entry *pvo;
1921 if ((m->oflags & VPO_UNMANAGED) != 0)
1924 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1925 if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
1931 static uintptr_t moea64_vsidcontext;
1934 moea64_get_unique_vsid(void) {
1941 __asm __volatile("mftb %0" : "=r"(entropy));
1943 mtx_lock(&moea64_slb_mutex);
1944 for (i = 0; i < NVSIDS; i += VSID_NBPW) {
1948 * Create a new value by mutiplying by a prime and adding in
1949 * entropy from the timebase register. This is to make the
1950 * VSID more random so that the PT hash function collides
1951 * less often. (Note that the prime casues gcc to do shifts
1952 * instead of a multiply.)
1954 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1955 hash = moea64_vsidcontext & (NVSIDS - 1);
1956 if (hash == 0) /* 0 is special, avoid it */
1959 mask = 1 << (hash & (VSID_NBPW - 1));
1960 hash = (moea64_vsidcontext & VSID_HASHMASK);
1961 if (moea64_vsid_bitmap[n] & mask) { /* collision? */
1962 /* anything free in this bucket? */
1963 if (moea64_vsid_bitmap[n] == 0xffffffff) {
1964 entropy = (moea64_vsidcontext >> 20);
1967 i = ffs(~moea64_vsid_bitmap[n]) - 1;
1969 hash &= rounddown2(VSID_HASHMASK, VSID_NBPW);
1972 if (hash == VSID_VRMA) /* also special, avoid this too */
1974 KASSERT(!(moea64_vsid_bitmap[n] & mask),
1975 ("Allocating in-use VSID %#zx\n", hash));
1976 moea64_vsid_bitmap[n] |= mask;
1977 mtx_unlock(&moea64_slb_mutex);
1981 mtx_unlock(&moea64_slb_mutex);
1982 panic("%s: out of segments",__func__);
1985 #ifdef __powerpc64__
1987 moea64_pinit(mmu_t mmu, pmap_t pmap)
1990 RB_INIT(&pmap->pmap_pvo);
1992 pmap->pm_slb_tree_root = slb_alloc_tree();
1993 pmap->pm_slb = slb_alloc_user_cache();
1994 pmap->pm_slb_len = 0;
1998 moea64_pinit(mmu_t mmu, pmap_t pmap)
2003 RB_INIT(&pmap->pmap_pvo);
2005 if (pmap_bootstrapped)
2006 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
2009 pmap->pmap_phys = pmap;
2012 * Allocate some segment registers for this pmap.
2014 hash = moea64_get_unique_vsid();
2016 for (i = 0; i < 16; i++)
2017 pmap->pm_sr[i] = VSID_MAKE(i, hash);
2019 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
2024 * Initialize the pmap associated with process 0.
2027 moea64_pinit0(mmu_t mmu, pmap_t pm)
2031 moea64_pinit(mmu, pm);
2032 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
2036 * Set the physical protection on the specified range of this map as requested.
2039 moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
2045 PMAP_LOCK_ASSERT(pm, MA_OWNED);
2048 * Change the protection of the page.
2050 oldprot = pvo->pvo_pte.prot;
2051 pvo->pvo_pte.prot = prot;
2052 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2055 * If the PVO is in the page table, update mapping
2057 refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE);
2059 refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
2061 if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
2062 (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
2063 if ((pg->oflags & VPO_UNMANAGED) == 0)
2064 vm_page_aflag_set(pg, PGA_EXECUTABLE);
2065 moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
2066 pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE);
2070 * Update vm about the REF/CHG bits if the page is managed and we have
2071 * removed write access.
2073 if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) &&
2074 (oldprot & VM_PROT_WRITE)) {
2075 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2076 if (refchg & LPTE_CHG)
2078 if (refchg & LPTE_REF)
2079 vm_page_aflag_set(pg, PGA_REFERENCED);
2084 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
2087 struct pvo_entry *pvo, *tpvo, key;
2089 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
2092 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
2093 ("moea64_protect: non current pmap"));
2095 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2096 moea64_remove(mmu, pm, sva, eva);
2101 key.pvo_vaddr = sva;
2102 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2103 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2104 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2105 moea64_pvo_protect(mmu, pm, pvo, prot);
2111 * Map a list of wired pages into kernel virtual address space. This is
2112 * intended for temporary mappings which do not need page modification or
2113 * references recorded. Existing mappings in the region are overwritten.
2116 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
2118 while (count-- > 0) {
2119 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2126 * Remove page mappings from kernel virtual address space. Intended for
2127 * temporary mappings entered by moea64_qenter.
2130 moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
2132 while (count-- > 0) {
2133 moea64_kremove(mmu, va);
2139 moea64_release_vsid(uint64_t vsid)
2143 mtx_lock(&moea64_slb_mutex);
2144 idx = vsid & (NVSIDS-1);
2145 mask = 1 << (idx % VSID_NBPW);
2147 KASSERT(moea64_vsid_bitmap[idx] & mask,
2148 ("Freeing unallocated VSID %#jx", vsid));
2149 moea64_vsid_bitmap[idx] &= ~mask;
2150 mtx_unlock(&moea64_slb_mutex);
2155 moea64_release(mmu_t mmu, pmap_t pmap)
2159 * Free segment registers' VSIDs
2161 #ifdef __powerpc64__
2162 slb_free_tree(pmap);
2163 slb_free_user_cache(pmap->pm_slb);
2165 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2167 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2172 * Remove all pages mapped by the specified pmap
2175 moea64_remove_pages(mmu_t mmu, pmap_t pm)
2177 struct pvo_entry *pvo, *tpvo;
2178 struct pvo_tree tofree;
2183 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2184 if (pvo->pvo_vaddr & PVO_WIRED)
2188 * For locking reasons, remove this from the page table and
2189 * pmap, but save delinking from the vm_page for a second
2192 moea64_pvo_remove_from_pmap(mmu, pvo);
2193 RB_INSERT(pvo_tree, &tofree, pvo);
2197 RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
2198 PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2199 moea64_pvo_remove_from_page(mmu, pvo);
2200 PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2201 RB_REMOVE(pvo_tree, &tofree, pvo);
2202 free_pvo_entry(pvo);
2207 * Remove the given range of addresses from the specified map.
2210 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2212 struct pvo_entry *pvo, *tpvo, key;
2213 struct pvo_tree tofree;
2216 * Perform an unsynchronized read. This is, however, safe.
2218 if (pm->pm_stats.resident_count == 0)
2221 key.pvo_vaddr = sva;
2226 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2227 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2228 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2231 * For locking reasons, remove this from the page table and
2232 * pmap, but save delinking from the vm_page for a second
2235 moea64_pvo_remove_from_pmap(mmu, pvo);
2236 RB_INSERT(pvo_tree, &tofree, pvo);
2240 RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
2241 PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2242 moea64_pvo_remove_from_page(mmu, pvo);
2243 PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2244 RB_REMOVE(pvo_tree, &tofree, pvo);
2245 free_pvo_entry(pvo);
2250 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2251 * will reflect changes in pte's back to the vm_page.
2254 moea64_remove_all(mmu_t mmu, vm_page_t m)
2256 struct pvo_entry *pvo, *next_pvo;
2257 struct pvo_head freequeue;
2261 LIST_INIT(&freequeue);
2264 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2265 pmap = pvo->pvo_pmap;
2267 wasdead = (pvo->pvo_vaddr & PVO_DEAD);
2269 moea64_pvo_remove_from_pmap(mmu, pvo);
2270 moea64_pvo_remove_from_page(mmu, pvo);
2272 LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
2276 KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
2277 KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable"));
2280 /* Clean up UMA allocations */
2281 LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo)
2282 free_pvo_entry(pvo);
2286 * Allocate a physical page of memory directly from the phys_avail map.
2287 * Can only be called from moea64_bootstrap before avail start and end are
2291 moea64_bootstrap_alloc(vm_size_t size, u_int align)
2296 size = round_page(size);
2297 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2299 s = roundup2(phys_avail[i], align);
2304 if (s < phys_avail[i] || e > phys_avail[i + 1])
2307 if (s + size > platform_real_maxaddr())
2310 if (s == phys_avail[i]) {
2311 phys_avail[i] += size;
2312 } else if (e == phys_avail[i + 1]) {
2313 phys_avail[i + 1] -= size;
2315 for (j = phys_avail_count * 2; j > i; j -= 2) {
2316 phys_avail[j] = phys_avail[j - 2];
2317 phys_avail[j + 1] = phys_avail[j - 1];
2320 phys_avail[i + 3] = phys_avail[i + 1];
2321 phys_avail[i + 1] = s;
2322 phys_avail[i + 2] = e;
2328 panic("moea64_bootstrap_alloc: could not allocate memory");
2332 moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head)
2336 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2337 KASSERT(moea64_pvo_find_va(pvo->pvo_pmap, PVO_VADDR(pvo)) == NULL,
2338 ("Existing mapping for VA %#jx", (uintmax_t)PVO_VADDR(pvo)));
2340 moea64_pvo_enter_calls++;
2345 RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2348 * Remember if the list was empty and therefore will be the first
2351 if (pvo_head != NULL) {
2352 if (LIST_FIRST(pvo_head) == NULL)
2354 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2357 if (pvo->pvo_vaddr & PVO_WIRED)
2358 pvo->pvo_pmap->pm_stats.wired_count++;
2359 pvo->pvo_pmap->pm_stats.resident_count++;
2362 * Insert it into the hardware page table
2364 err = MOEA64_PTE_INSERT(mmu, pvo);
2366 panic("moea64_pvo_enter: overflow");
2369 moea64_pvo_entries++;
2371 if (pvo->pvo_pmap == kernel_pmap)
2374 #ifdef __powerpc64__
2376 * Make sure all our bootstrap mappings are in the SLB as soon
2377 * as virtual memory is switched on.
2379 if (!pmap_bootstrapped)
2380 moea64_bootstrap_slb_prefault(PVO_VADDR(pvo),
2381 pvo->pvo_vaddr & PVO_LARGE);
2384 return (first ? ENOENT : 0);
2388 moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
2393 KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap"));
2394 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2395 KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO"));
2398 * If there is an active pte entry, we need to deactivate it
2400 refchg = MOEA64_PTE_UNSET(mmu, pvo);
2403 * If it was evicted from the page table, be pessimistic and
2406 if (pvo->pvo_pte.prot & VM_PROT_WRITE)
2413 * Update our statistics.
2415 pvo->pvo_pmap->pm_stats.resident_count--;
2416 if (pvo->pvo_vaddr & PVO_WIRED)
2417 pvo->pvo_pmap->pm_stats.wired_count--;
2420 * Remove this PVO from the pmap list.
2422 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2425 * Mark this for the next sweep
2427 pvo->pvo_vaddr |= PVO_DEAD;
2429 /* Send RC bits to VM */
2430 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
2431 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2432 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2434 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2435 if (refchg & LPTE_CHG)
2437 if (refchg & LPTE_REF)
2438 vm_page_aflag_set(pg, PGA_REFERENCED);
2444 moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
2448 KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
2450 /* Use NULL pmaps as a sentinel for races in page deletion */
2451 if (pvo->pvo_pmap == NULL)
2453 pvo->pvo_pmap = NULL;
2456 * Update vm about page writeability/executability if managed
2458 PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN);
2459 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2461 if ((pvo->pvo_vaddr & PVO_MANAGED) && pg != NULL) {
2462 LIST_REMOVE(pvo, pvo_vlink);
2463 if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2464 vm_page_aflag_clear(pg, PGA_WRITEABLE | PGA_EXECUTABLE);
2467 moea64_pvo_entries--;
2468 moea64_pvo_remove_calls++;
2471 static struct pvo_entry *
2472 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2474 struct pvo_entry key;
2476 PMAP_LOCK_ASSERT(pm, MA_OWNED);
2478 key.pvo_vaddr = va & ~ADDR_POFF;
2479 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
2483 moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
2485 struct pvo_entry *pvo;
2490 * See if this bit is stored in the page already.
2492 if (m->md.mdpg_attrs & ptebit)
2496 * Examine each PTE. Sync so that any pending REF/CHG bits are
2497 * flushed to the PTEs.
2502 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2506 * See if this pvo has a valid PTE. if so, fetch the
2507 * REF/CHG bits from the valid PTE. If the appropriate
2508 * ptebit is set, return success.
2510 PMAP_LOCK(pvo->pvo_pmap);
2511 if (!(pvo->pvo_vaddr & PVO_DEAD))
2512 ret = MOEA64_PTE_SYNCH(mmu, pvo);
2513 PMAP_UNLOCK(pvo->pvo_pmap);
2516 atomic_set_32(&m->md.mdpg_attrs,
2517 ret & (LPTE_CHG | LPTE_REF));
2530 moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2533 struct pvo_entry *pvo;
2537 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2538 * we can reset the right ones).
2543 * For each pvo entry, clear the pte's ptebit.
2547 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2550 PMAP_LOCK(pvo->pvo_pmap);
2551 if (!(pvo->pvo_vaddr & PVO_DEAD))
2552 ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit);
2553 PMAP_UNLOCK(pvo->pvo_pmap);
2555 if (ret > 0 && (ret & ptebit))
2558 atomic_clear_32(&m->md.mdpg_attrs, ptebit);
2565 moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2567 struct pvo_entry *pvo, key;
2571 PMAP_LOCK(kernel_pmap);
2572 key.pvo_vaddr = ppa = pa & ~ADDR_POFF;
2573 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
2574 ppa < pa + size; ppa += PAGE_SIZE,
2575 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
2576 if (pvo == NULL || (pvo->pvo_pte.pa & LPTE_RPGN) != ppa) {
2581 PMAP_UNLOCK(kernel_pmap);
2587 * Map a set of physical memory pages into the kernel virtual
2588 * address space. Return a pointer to where it is mapped. This
2589 * routine is intended to be used for mapping device memory,
2593 moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2595 vm_offset_t va, tmpva, ppa, offset;
2597 ppa = trunc_page(pa);
2598 offset = pa & PAGE_MASK;
2599 size = roundup2(offset + size, PAGE_SIZE);
2601 va = kva_alloc(size);
2604 panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2606 for (tmpva = va; size > 0;) {
2607 moea64_kenter_attr(mmu, tmpva, ppa, ma);
2613 return ((void *)(va + offset));
2617 moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2620 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2624 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2626 vm_offset_t base, offset;
2628 base = trunc_page(va);
2629 offset = va & PAGE_MASK;
2630 size = roundup2(offset + size, PAGE_SIZE);
2632 kva_free(base, size);
2636 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2638 struct pvo_entry *pvo;
2645 lim = round_page(va);
2646 len = MIN(lim - va, sz);
2647 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2648 if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) {
2649 pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF);
2650 moea64_syncicache(mmu, pm, va, pa, len);
2659 moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
2665 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
2668 moea64_scan_init(mmu_t mmu)
2670 struct pvo_entry *pvo;
2675 /* Initialize phys. segments for dumpsys(). */
2676 memset(&dump_map, 0, sizeof(dump_map));
2677 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
2678 for (i = 0; i < pregions_sz; i++) {
2679 dump_map[i].pa_start = pregions[i].mr_start;
2680 dump_map[i].pa_size = pregions[i].mr_size;
2685 /* Virtual segments for minidumps: */
2686 memset(&dump_map, 0, sizeof(dump_map));
2688 /* 1st: kernel .data and .bss. */
2689 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
2690 dump_map[0].pa_size = round_page((uintptr_t)_end) -
2691 dump_map[0].pa_start;
2693 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2694 dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
2695 dump_map[1].pa_size = round_page(msgbufp->msg_size);
2697 /* 3rd: kernel VM. */
2698 va = dump_map[1].pa_start + dump_map[1].pa_size;
2699 /* Find start of next chunk (from va). */
2700 while (va < virtual_end) {
2701 /* Don't dump the buffer cache. */
2702 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
2703 va = kmi.buffer_eva;
2706 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2707 if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
2711 if (va < virtual_end) {
2712 dump_map[2].pa_start = va;
2714 /* Find last page in chunk. */
2715 while (va < virtual_end) {
2716 /* Don't run into the buffer cache. */
2717 if (va == kmi.buffer_sva)
2719 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2720 if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
2724 dump_map[2].pa_size = va - dump_map[2].pa_start;