2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the NetBSD
19 * Foundation, Inc. and its contributors.
20 * 4. Neither the name of The NetBSD Foundation nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38 * Copyright (C) 1995, 1996 TooLs GmbH.
39 * All rights reserved.
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 * must display the following acknowledgement:
51 * This product includes software developed by TooLs GmbH.
52 * 4. The name of TooLs GmbH may not be used to endorse or promote products
53 * derived from this software without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
69 * Copyright (C) 2001 Benno Rice.
70 * All rights reserved.
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions
75 * 1. Redistributions of source code must retain the above copyright
76 * notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce the above copyright
78 * notice, this list of conditions and the following disclaimer in the
79 * documentation and/or other materials provided with the distribution.
81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
97 * Manages physical address maps.
99 * In addition to hardware address maps, this module is called upon to
100 * provide software-use-only maps which may or may not be stored in the
101 * same form as hardware maps. These pseudo-maps are used to store
102 * intermediate results from copy operations to and from address spaces.
104 * Since the information managed by this module is also stored by the
105 * logical address mapping module, this module may throw away valid virtual
106 * to physical mappings at almost any time. However, invalidations of
107 * mappings must be done as requested.
109 * In order to cope with hardware architectures which make virtual to
110 * physical map invalidates expensive, this module may delay invalidate
111 * reduced protection operations until such time as they are actually
112 * necessary. This module is given full information as to which processors
113 * are currently using which maps, and to when physical maps must be made
117 #include "opt_kstack_pages.h"
119 #include <sys/param.h>
120 #include <sys/kernel.h>
121 #include <sys/queue.h>
122 #include <sys/cpuset.h>
124 #include <sys/lock.h>
125 #include <sys/msgbuf.h>
126 #include <sys/mutex.h>
127 #include <sys/proc.h>
128 #include <sys/sched.h>
129 #include <sys/sysctl.h>
130 #include <sys/systm.h>
131 #include <sys/vmmeter.h>
135 #include <dev/ofw/openfirm.h>
138 #include <vm/vm_param.h>
139 #include <vm/vm_kern.h>
140 #include <vm/vm_page.h>
141 #include <vm/vm_map.h>
142 #include <vm/vm_object.h>
143 #include <vm/vm_extern.h>
144 #include <vm/vm_pageout.h>
145 #include <vm/vm_pager.h>
148 #include <machine/_inttypes.h>
149 #include <machine/cpu.h>
150 #include <machine/platform.h>
151 #include <machine/frame.h>
152 #include <machine/md_var.h>
153 #include <machine/psl.h>
154 #include <machine/bat.h>
155 #include <machine/hid.h>
156 #include <machine/pte.h>
157 #include <machine/sr.h>
158 #include <machine/trap.h>
159 #include <machine/mmuvar.h>
161 #include "mmu_oea64.h"
163 #include "moea64_if.h"
165 void moea64_release_vsid(uint64_t vsid);
166 uintptr_t moea64_get_unique_vsid(void);
168 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
169 #define ENABLE_TRANS(msr) mtmsr(msr)
171 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
172 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
173 #define VSID_HASH_MASK 0x0000007fffffffffULL
175 #define LOCK_TABLE() mtx_lock(&moea64_table_mutex)
176 #define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex);
177 #define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED)
188 * Map of physical memory regions.
190 static struct mem_region *regions;
191 static struct mem_region *pregions;
192 static u_int phys_avail_count;
193 static int regions_sz, pregions_sz;
195 extern void bs_remap_earlyboot(void);
198 * Lock for the pteg and pvo tables.
200 struct mtx moea64_table_mutex;
201 struct mtx moea64_slb_mutex;
206 u_int moea64_pteg_count;
207 u_int moea64_pteg_mask;
212 struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */
213 struct pvo_head moea64_pvo_kunmanaged = /* list of unmanaged pages */
214 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged);
216 uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
217 uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */
219 #define BPVO_POOL_SIZE 327680
220 static struct pvo_entry *moea64_bpvo_pool;
221 static int moea64_bpvo_pool_index = 0;
223 #define VSID_NBPW (sizeof(u_int32_t) * 8)
225 #define NVSIDS (NPMAPS * 16)
226 #define VSID_HASHMASK 0xffffffffUL
228 #define NVSIDS NPMAPS
229 #define VSID_HASHMASK 0xfffffUL
231 static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
233 static boolean_t moea64_initialized = FALSE;
238 u_int moea64_pte_valid = 0;
239 u_int moea64_pte_overflow = 0;
240 u_int moea64_pvo_entries = 0;
241 u_int moea64_pvo_enter_calls = 0;
242 u_int moea64_pvo_remove_calls = 0;
243 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
244 &moea64_pte_valid, 0, "");
245 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
246 &moea64_pte_overflow, 0, "");
247 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
248 &moea64_pvo_entries, 0, "");
249 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
250 &moea64_pvo_enter_calls, 0, "");
251 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
252 &moea64_pvo_remove_calls, 0, "");
254 vm_offset_t moea64_scratchpage_va[2];
255 struct pvo_entry *moea64_scratchpage_pvo[2];
256 uintptr_t moea64_scratchpage_pte[2];
257 struct mtx moea64_scratchpage_mtx;
259 uint64_t moea64_large_page_mask = 0;
260 int moea64_large_page_size = 0;
261 int moea64_large_page_shift = 0;
266 static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
267 vm_offset_t, vm_offset_t, uint64_t, int);
268 static void moea64_pvo_remove(mmu_t, struct pvo_entry *);
269 static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
274 static void moea64_enter_locked(mmu_t, pmap_t, vm_offset_t,
275 vm_page_t, vm_prot_t, boolean_t);
276 static boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t);
277 static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t);
278 static void moea64_kremove(mmu_t, vm_offset_t);
279 static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
280 vm_offset_t pa, vm_size_t sz);
283 * Kernel MMU interface
285 void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
286 void moea64_clear_modify(mmu_t, vm_page_t);
287 void moea64_clear_reference(mmu_t, vm_page_t);
288 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
289 void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
290 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
292 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
293 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
294 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
295 void moea64_init(mmu_t);
296 boolean_t moea64_is_modified(mmu_t, vm_page_t);
297 boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
298 boolean_t moea64_is_referenced(mmu_t, vm_page_t);
299 boolean_t moea64_ts_referenced(mmu_t, vm_page_t);
300 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
301 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
302 int moea64_page_wired_mappings(mmu_t, vm_page_t);
303 void moea64_pinit(mmu_t, pmap_t);
304 void moea64_pinit0(mmu_t, pmap_t);
305 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
306 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
307 void moea64_qremove(mmu_t, vm_offset_t, int);
308 void moea64_release(mmu_t, pmap_t);
309 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
310 void moea64_remove_all(mmu_t, vm_page_t);
311 void moea64_remove_write(mmu_t, vm_page_t);
312 void moea64_zero_page(mmu_t, vm_page_t);
313 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
314 void moea64_zero_page_idle(mmu_t, vm_page_t);
315 void moea64_activate(mmu_t, struct thread *);
316 void moea64_deactivate(mmu_t, struct thread *);
317 void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t);
318 void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
319 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
320 vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
321 void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
322 void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
323 void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
324 boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
325 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
327 static mmu_method_t moea64_methods[] = {
328 MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
329 MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
330 MMUMETHOD(mmu_clear_reference, moea64_clear_reference),
331 MMUMETHOD(mmu_copy_page, moea64_copy_page),
332 MMUMETHOD(mmu_enter, moea64_enter),
333 MMUMETHOD(mmu_enter_object, moea64_enter_object),
334 MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
335 MMUMETHOD(mmu_extract, moea64_extract),
336 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),
337 MMUMETHOD(mmu_init, moea64_init),
338 MMUMETHOD(mmu_is_modified, moea64_is_modified),
339 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable),
340 MMUMETHOD(mmu_is_referenced, moea64_is_referenced),
341 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced),
342 MMUMETHOD(mmu_map, moea64_map),
343 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
344 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
345 MMUMETHOD(mmu_pinit, moea64_pinit),
346 MMUMETHOD(mmu_pinit0, moea64_pinit0),
347 MMUMETHOD(mmu_protect, moea64_protect),
348 MMUMETHOD(mmu_qenter, moea64_qenter),
349 MMUMETHOD(mmu_qremove, moea64_qremove),
350 MMUMETHOD(mmu_release, moea64_release),
351 MMUMETHOD(mmu_remove, moea64_remove),
352 MMUMETHOD(mmu_remove_all, moea64_remove_all),
353 MMUMETHOD(mmu_remove_write, moea64_remove_write),
354 MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
355 MMUMETHOD(mmu_zero_page, moea64_zero_page),
356 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
357 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
358 MMUMETHOD(mmu_activate, moea64_activate),
359 MMUMETHOD(mmu_deactivate, moea64_deactivate),
360 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
362 /* Internal interfaces */
363 MMUMETHOD(mmu_mapdev, moea64_mapdev),
364 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr),
365 MMUMETHOD(mmu_unmapdev, moea64_unmapdev),
366 MMUMETHOD(mmu_kextract, moea64_kextract),
367 MMUMETHOD(mmu_kenter, moea64_kenter),
368 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr),
369 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
374 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
376 static __inline u_int
377 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
382 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
383 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
385 return (hash & moea64_pteg_mask);
388 static __inline struct pvo_head *
389 vm_page_to_pvoh(vm_page_t m)
392 return (&m->md.mdpg_pvoh);
396 moea64_attr_clear(vm_page_t m, u_int64_t ptebit)
399 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
400 m->md.mdpg_attrs &= ~ptebit;
403 static __inline u_int64_t
404 moea64_attr_fetch(vm_page_t m)
407 return (m->md.mdpg_attrs);
411 moea64_attr_save(vm_page_t m, u_int64_t ptebit)
414 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
415 m->md.mdpg_attrs |= ptebit;
419 moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va,
420 uint64_t pte_lo, int flags)
426 * Construct a PTE. Default to IMB initially. Valid bit only gets
427 * set when the real pte is set in memory.
429 * Note: Don't set the valid bit for correct operation of tlb update.
431 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
432 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
434 if (flags & PVO_LARGE)
435 pt->pte_hi |= LPTE_BIG;
440 static __inline uint64_t
441 moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
446 if (ma != VM_MEMATTR_DEFAULT) {
448 case VM_MEMATTR_UNCACHEABLE:
449 return (LPTE_I | LPTE_G);
450 case VM_MEMATTR_WRITE_COMBINING:
451 case VM_MEMATTR_WRITE_BACK:
452 case VM_MEMATTR_PREFETCHABLE:
454 case VM_MEMATTR_WRITE_THROUGH:
455 return (LPTE_W | LPTE_M);
460 * Assume the page is cache inhibited and access is guarded unless
461 * it's in our available memory array.
463 pte_lo = LPTE_I | LPTE_G;
464 for (i = 0; i < pregions_sz; i++) {
465 if ((pa >= pregions[i].mr_start) &&
466 (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
467 pte_lo &= ~(LPTE_I | LPTE_G);
477 * Quick sort callout for comparing memory regions.
479 static int om_cmp(const void *a, const void *b);
482 om_cmp(const void *a, const void *b)
484 const struct ofw_map *mapa;
485 const struct ofw_map *mapb;
489 if (mapa->om_pa_hi < mapb->om_pa_hi)
491 else if (mapa->om_pa_hi > mapb->om_pa_hi)
493 else if (mapa->om_pa_lo < mapb->om_pa_lo)
495 else if (mapa->om_pa_lo > mapb->om_pa_lo)
502 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
504 struct ofw_map translations[sz/sizeof(struct ofw_map)];
510 bzero(translations, sz);
511 if (OF_getprop(mmu, "translations", translations, sz) == -1)
512 panic("moea64_bootstrap: can't get ofw translations");
514 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
515 sz /= sizeof(*translations);
516 qsort(translations, sz, sizeof (*translations), om_cmp);
518 for (i = 0; i < sz; i++) {
519 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
520 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va,
521 translations[i].om_len);
523 if (translations[i].om_pa_lo % PAGE_SIZE)
524 panic("OFW translation not page-aligned!");
526 pa_base = translations[i].om_pa_lo;
529 pa_base += (vm_offset_t)translations[i].om_pa_hi << 32;
531 if (translations[i].om_pa_hi)
532 panic("OFW translations above 32-bit boundary!");
535 /* Now enter the pages for this mapping */
538 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
539 if (moea64_pvo_find_va(kernel_pmap,
540 translations[i].om_va + off) != NULL)
543 moea64_kenter(mmup, translations[i].om_va + off,
552 moea64_probe_large_page(void)
554 uint16_t pvr = mfpvr() >> 16;
560 powerpc_sync(); isync();
561 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
562 powerpc_sync(); isync();
566 moea64_large_page_size = 0x1000000; /* 16 MB */
567 moea64_large_page_shift = 24;
570 moea64_large_page_size = 0;
573 moea64_large_page_mask = moea64_large_page_size - 1;
577 moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
584 cache = PCPU_GET(slb);
585 esid = va >> ADDR_SR_SHFT;
586 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
588 for (i = 0; i < 64; i++) {
589 if (cache[i].slbe == (slbe | i))
594 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
596 entry.slbv |= SLBV_L;
598 slb_insert_kernel(entry.slbe, entry.slbv);
603 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
604 vm_offset_t kernelend)
608 vm_offset_t size, off;
612 if (moea64_large_page_size == 0)
617 PMAP_LOCK(kernel_pmap);
618 for (i = 0; i < pregions_sz; i++) {
619 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
620 pregions[i].mr_size; pa += moea64_large_page_size) {
624 * Set memory access as guarded if prefetch within
625 * the page could exit the available physmem area.
627 if (pa & moea64_large_page_mask) {
628 pa &= moea64_large_page_mask;
631 if (pa + moea64_large_page_size >
632 pregions[i].mr_start + pregions[i].mr_size)
635 moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
636 &moea64_pvo_kunmanaged, pa, pa,
637 pte_lo, PVO_WIRED | PVO_LARGE);
640 PMAP_UNLOCK(kernel_pmap);
642 size = sizeof(struct pvo_head) * moea64_pteg_count;
643 off = (vm_offset_t)(moea64_pvo_table);
644 for (pa = off; pa < off + size; pa += PAGE_SIZE)
645 moea64_kenter(mmup, pa, pa);
646 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry);
647 off = (vm_offset_t)(moea64_bpvo_pool);
648 for (pa = off; pa < off + size; pa += PAGE_SIZE)
649 moea64_kenter(mmup, pa, pa);
652 * Map certain important things, like ourselves.
654 * NOTE: We do not map the exception vector space. That code is
655 * used only in real mode, and leaving it unmapped allows us to
656 * catch NULL pointer deferences, instead of making NULL a valid
660 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
662 moea64_kenter(mmup, pa, pa);
668 moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
671 vm_size_t physsz, hwphyssz;
673 #ifndef __powerpc64__
674 /* We don't have a direct map since there is no BAT */
677 /* Make sure battable is zero, since we have no BAT */
678 for (i = 0; i < 16; i++) {
679 battable[i].batu = 0;
680 battable[i].batl = 0;
683 moea64_probe_large_page();
685 /* Use a direct map if we have large page support */
686 if (moea64_large_page_size > 0)
692 /* Get physical memory regions from firmware */
693 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
694 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
696 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
697 panic("moea64_bootstrap: phys_avail too small");
699 phys_avail_count = 0;
702 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
703 for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
704 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
705 regions[i].mr_start + regions[i].mr_size,
708 (physsz + regions[i].mr_size) >= hwphyssz) {
709 if (physsz < hwphyssz) {
710 phys_avail[j] = regions[i].mr_start;
711 phys_avail[j + 1] = regions[i].mr_start +
718 phys_avail[j] = regions[i].mr_start;
719 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
721 physsz += regions[i].mr_size;
724 /* Check for overlap with the kernel and exception vectors */
725 for (j = 0; j < 2*phys_avail_count; j+=2) {
726 if (phys_avail[j] < EXC_LAST)
727 phys_avail[j] += EXC_LAST;
729 if (kernelstart >= phys_avail[j] &&
730 kernelstart < phys_avail[j+1]) {
731 if (kernelend < phys_avail[j+1]) {
732 phys_avail[2*phys_avail_count] =
733 (kernelend & ~PAGE_MASK) + PAGE_SIZE;
734 phys_avail[2*phys_avail_count + 1] =
739 phys_avail[j+1] = kernelstart & ~PAGE_MASK;
742 if (kernelend >= phys_avail[j] &&
743 kernelend < phys_avail[j+1]) {
744 if (kernelstart > phys_avail[j]) {
745 phys_avail[2*phys_avail_count] = phys_avail[j];
746 phys_avail[2*phys_avail_count + 1] =
747 kernelstart & ~PAGE_MASK;
751 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
755 physmem = btoc(physsz);
758 moea64_pteg_count = PTEGCOUNT;
760 moea64_pteg_count = 0x1000;
762 while (moea64_pteg_count < physmem)
763 moea64_pteg_count <<= 1;
765 moea64_pteg_count >>= 1;
766 #endif /* PTEGCOUNT */
770 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
779 moea64_pteg_mask = moea64_pteg_count - 1;
782 * Allocate pv/overflow lists.
784 size = sizeof(struct pvo_head) * moea64_pteg_count;
786 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size,
788 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table);
791 for (i = 0; i < moea64_pteg_count; i++)
792 LIST_INIT(&moea64_pvo_table[i]);
796 * Initialize the lock that synchronizes access to the pteg and pvo
799 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF |
801 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
804 * Initialise the unmanaged pvo pool.
806 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
807 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
808 moea64_bpvo_pool_index = 0;
811 * Make sure kernel vsid is allocated as well as VSID 0.
813 #ifndef __powerpc64__
814 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
815 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
816 moea64_vsid_bitmap[0] |= 1;
820 * Initialize the kernel pmap (which is statically allocated).
823 for (i = 0; i < 64; i++) {
824 pcpup->pc_slb[i].slbv = 0;
825 pcpup->pc_slb[i].slbe = 0;
828 for (i = 0; i < 16; i++)
829 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
832 kernel_pmap->pmap_phys = kernel_pmap;
833 CPU_FILL(&kernel_pmap->pm_active);
835 PMAP_LOCK_INIT(kernel_pmap);
838 * Now map in all the other buffers we allocated earlier
841 moea64_setup_direct_map(mmup, kernelstart, kernelend);
845 moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
856 * Set up the Open Firmware pmap and add its mappings if not in real
860 chosen = OF_finddevice("/chosen");
861 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) {
862 mmu = OF_instance_to_package(mmui);
863 if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1)
865 if (sz > 6144 /* tmpstksz - 2 KB headroom */)
866 panic("moea64_bootstrap: too many ofw translations");
869 moea64_add_ofw_mappings(mmup, mmu, sz);
873 * Calculate the last available physical address.
875 for (i = 0; phys_avail[i + 2] != 0; i += 2)
877 Maxmem = powerpc_btop(phys_avail[i + 1]);
880 * Initialize MMU and remap early physical mappings
882 MMU_CPU_BOOTSTRAP(mmup,0);
883 mtmsr(mfmsr() | PSL_DR | PSL_IR);
885 bs_remap_earlyboot();
888 * Set the start and end of kva.
890 virtual_avail = VM_MIN_KERNEL_ADDRESS;
891 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
894 * Map the entire KVA range into the SLB. We must not fault there.
897 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
898 moea64_bootstrap_slb_prefault(va, 0);
902 * Figure out how far we can extend virtual_end into segment 16
903 * without running into existing mappings. Segment 16 is guaranteed
904 * to contain neither RAM nor devices (at least on Apple hardware),
905 * but will generally contain some OFW mappings we should not
909 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */
910 PMAP_LOCK(kernel_pmap);
911 while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
912 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
913 virtual_end += PAGE_SIZE;
914 PMAP_UNLOCK(kernel_pmap);
918 * Allocate a kernel stack with a guard page for thread0 and map it
919 * into the kernel page map.
921 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
922 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
923 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
924 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
925 thread0.td_kstack = va;
926 thread0.td_kstack_pages = KSTACK_PAGES;
927 for (i = 0; i < KSTACK_PAGES; i++) {
928 moea64_kenter(mmup, va, pa);
934 * Allocate virtual address space for the message buffer.
936 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
937 msgbufp = (struct msgbuf *)virtual_avail;
939 virtual_avail += round_page(msgbufsize);
940 while (va < virtual_avail) {
941 moea64_kenter(mmup, va, pa);
947 * Allocate virtual address space for the dynamic percpu area.
949 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
950 dpcpu = (void *)virtual_avail;
952 virtual_avail += DPCPU_SIZE;
953 while (va < virtual_avail) {
954 moea64_kenter(mmup, va, pa);
958 dpcpu_init(dpcpu, 0);
961 * Allocate some things for page zeroing. We put this directly
962 * in the page table, marked with LPTE_LOCKED, to avoid any
963 * of the PVO book-keeping or other parts of the VM system
964 * from even knowing that this hack exists.
967 if (!hw_direct_map) {
968 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
970 for (i = 0; i < 2; i++) {
971 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
972 virtual_end -= PAGE_SIZE;
974 moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
976 moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
977 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
979 moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE(
980 mmup, moea64_scratchpage_pvo[i]);
981 moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi
983 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i],
984 &moea64_scratchpage_pvo[i]->pvo_pte.lpte,
985 moea64_scratchpage_pvo[i]->pvo_vpn);
992 * Activate a user pmap. The pmap must be activated before its address
993 * space can be accessed in any way.
996 moea64_activate(mmu_t mmu, struct thread *td)
1000 pm = &td->td_proc->p_vmspace->vm_pmap;
1001 CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1003 #ifdef __powerpc64__
1004 PCPU_SET(userslb, pm->pm_slb);
1006 PCPU_SET(curpmap, pm->pmap_phys);
1011 moea64_deactivate(mmu_t mmu, struct thread *td)
1015 pm = &td->td_proc->p_vmspace->vm_pmap;
1016 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1017 #ifdef __powerpc64__
1018 PCPU_SET(userslb, NULL);
1020 PCPU_SET(curpmap, NULL);
1025 moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
1027 struct pvo_entry *pvo;
1033 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
1037 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1040 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1041 pm->pm_stats.wired_count++;
1042 pvo->pvo_vaddr |= PVO_WIRED;
1043 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
1045 if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1046 pm->pm_stats.wired_count--;
1047 pvo->pvo_vaddr &= ~PVO_WIRED;
1048 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
1052 /* Update wiring flag in page table. */
1053 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1057 * If we are wiring the page, and it wasn't in the
1058 * page table before, add it.
1060 vsid = PVO_VSID(pvo);
1061 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
1062 pvo->pvo_vaddr & PVO_LARGE);
1064 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
1067 PVO_PTEGIDX_CLR(pvo);
1068 PVO_PTEGIDX_SET(pvo, i);
1078 * This goes through and sets the physical address of our
1079 * special scratch PTE to the PA we want to zero or copy. Because
1080 * of locking issues (this can get called in pvo_enter() by
1081 * the UMA allocator), we can't use most other utility functions here
1085 void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) {
1087 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1088 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1090 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &=
1091 ~(LPTE_WIMG | LPTE_RPGN);
1092 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |=
1093 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1094 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which],
1095 &moea64_scratchpage_pvo[which]->pvo_pte.lpte,
1096 moea64_scratchpage_pvo[which]->pvo_vpn);
1101 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1106 dst = VM_PAGE_TO_PHYS(mdst);
1107 src = VM_PAGE_TO_PHYS(msrc);
1109 if (hw_direct_map) {
1110 kcopy((void *)src, (void *)dst, PAGE_SIZE);
1112 mtx_lock(&moea64_scratchpage_mtx);
1114 moea64_set_scratchpage_pa(mmu, 0, src);
1115 moea64_set_scratchpage_pa(mmu, 1, dst);
1117 kcopy((void *)moea64_scratchpage_va[0],
1118 (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1120 mtx_unlock(&moea64_scratchpage_mtx);
1125 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1127 vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1129 if (size + off > PAGE_SIZE)
1130 panic("moea64_zero_page: size + off > PAGE_SIZE");
1132 if (hw_direct_map) {
1133 bzero((caddr_t)pa + off, size);
1135 mtx_lock(&moea64_scratchpage_mtx);
1136 moea64_set_scratchpage_pa(mmu, 0, pa);
1137 bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1138 mtx_unlock(&moea64_scratchpage_mtx);
1143 * Zero a page of physical memory by temporarily mapping it
1146 moea64_zero_page(mmu_t mmu, vm_page_t m)
1148 vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1149 vm_offset_t va, off;
1151 if (!hw_direct_map) {
1152 mtx_lock(&moea64_scratchpage_mtx);
1154 moea64_set_scratchpage_pa(mmu, 0, pa);
1155 va = moea64_scratchpage_va[0];
1160 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1161 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
1164 mtx_unlock(&moea64_scratchpage_mtx);
1168 moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1171 moea64_zero_page(mmu, m);
1175 * Map the given physical page at the specified virtual address in the
1176 * target pmap with the protection requested. If specified the page
1177 * will be wired down.
1180 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1181 vm_prot_t prot, boolean_t wired)
1184 vm_page_lock_queues();
1186 moea64_enter_locked(mmu, pmap, va, m, prot, wired);
1187 vm_page_unlock_queues();
1192 * Map the given physical page at the specified virtual address in the
1193 * target pmap with the protection requested. If specified the page
1194 * will be wired down.
1196 * The page queues and pmap must be locked.
1200 moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1201 vm_prot_t prot, boolean_t wired)
1203 struct pvo_head *pvo_head;
1210 if (!moea64_initialized) {
1211 pvo_head = &moea64_pvo_kunmanaged;
1213 zone = moea64_upvo_zone;
1216 pvo_head = vm_page_to_pvoh(m);
1218 zone = moea64_mpvo_zone;
1219 pvo_flags = PVO_MANAGED;
1222 if (pmap_bootstrapped)
1223 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1224 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1225 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
1226 VM_OBJECT_LOCKED(m->object),
1227 ("moea64_enter_locked: page %p is not busy", m));
1229 /* XXX change the pvo head for fake pages */
1230 if ((m->oflags & VPO_UNMANAGED) != 0) {
1231 pvo_flags &= ~PVO_MANAGED;
1232 pvo_head = &moea64_pvo_kunmanaged;
1233 zone = moea64_upvo_zone;
1236 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1238 if (prot & VM_PROT_WRITE) {
1240 if (pmap_bootstrapped &&
1241 (m->oflags & VPO_UNMANAGED) == 0)
1242 vm_page_aflag_set(m, PGA_WRITEABLE);
1246 if ((prot & VM_PROT_EXECUTE) == 0)
1247 pte_lo |= LPTE_NOEXEC;
1250 pvo_flags |= PVO_WIRED;
1252 error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
1253 VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags);
1256 * Flush the page from the instruction cache if this page is
1257 * mapped executable and cacheable.
1259 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0)
1260 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1264 moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa,
1269 * This is much trickier than on older systems because
1270 * we can't sync the icache on physical addresses directly
1271 * without a direct map. Instead we check a couple of cases
1272 * where the memory is already mapped in and, failing that,
1273 * use the same trick we use for page zeroing to create
1274 * a temporary mapping for this physical address.
1277 if (!pmap_bootstrapped) {
1279 * If PMAP is not bootstrapped, we are likely to be
1282 __syncicache((void *)pa, sz);
1283 } else if (pmap == kernel_pmap) {
1284 __syncicache((void *)va, sz);
1285 } else if (hw_direct_map) {
1286 __syncicache((void *)pa, sz);
1288 /* Use the scratch page to set up a temp mapping */
1290 mtx_lock(&moea64_scratchpage_mtx);
1292 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1293 __syncicache((void *)(moea64_scratchpage_va[1] +
1294 (va & ADDR_POFF)), sz);
1296 mtx_unlock(&moea64_scratchpage_mtx);
1301 * Maps a sequence of resident pages belonging to the same object.
1302 * The sequence begins with the given page m_start. This page is
1303 * mapped at the given virtual address start. Each subsequent page is
1304 * mapped at a virtual address that is offset from start by the same
1305 * amount as the page is offset from m_start within the object. The
1306 * last page in the sequence is the page with the largest offset from
1307 * m_start that can be mapped at a virtual address less than the given
1308 * virtual address end. Not every virtual page between start and end
1309 * is mapped; only those for which a resident page exists with the
1310 * corresponding offset from m_start are mapped.
1313 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1314 vm_page_t m_start, vm_prot_t prot)
1317 vm_pindex_t diff, psize;
1319 psize = atop(end - start);
1321 vm_page_lock_queues();
1323 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1324 moea64_enter_locked(mmu, pm, start + ptoa(diff), m, prot &
1325 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1326 m = TAILQ_NEXT(m, listq);
1328 vm_page_unlock_queues();
1333 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1337 vm_page_lock_queues();
1339 moea64_enter_locked(mmu, pm, va, m,
1340 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1341 vm_page_unlock_queues();
1346 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1348 struct pvo_entry *pvo;
1352 pvo = moea64_pvo_find_va(pm, va);
1356 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
1357 (va - PVO_VADDR(pvo));
1363 * Atomically extract and hold the physical page with the given
1364 * pmap and virtual address pair if that mapping permits the given
1368 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1370 struct pvo_entry *pvo;
1378 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1379 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
1380 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
1381 (prot & VM_PROT_WRITE) == 0)) {
1382 if (vm_page_pa_tryrelock(pmap,
1383 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa))
1385 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1393 static mmu_t installed_mmu;
1396 moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1399 * This entire routine is a horrible hack to avoid bothering kmem
1400 * for new KVA addresses. Because this can get called from inside
1401 * kmem allocation routines, calling kmem for a new address here
1402 * can lead to multiply locking non-recursive mutexes.
1404 static vm_pindex_t color;
1408 int pflags, needed_lock;
1410 *flags = UMA_SLAB_PRIV;
1411 needed_lock = !PMAP_LOCKED(kernel_pmap);
1414 PMAP_LOCK(kernel_pmap);
1416 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
1417 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
1419 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
1421 pflags |= VM_ALLOC_ZERO;
1424 m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ);
1426 if (wait & M_NOWAIT)
1433 va = VM_PAGE_TO_PHYS(m);
1435 moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
1436 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M,
1437 PVO_WIRED | PVO_BOOTSTRAP);
1440 PMAP_UNLOCK(kernel_pmap);
1442 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1443 bzero((void *)va, PAGE_SIZE);
1449 moea64_init(mmu_t mmu)
1452 CTR0(KTR_PMAP, "moea64_init");
1454 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1455 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1456 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1457 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1458 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1459 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1461 if (!hw_direct_map) {
1462 installed_mmu = mmu;
1463 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
1464 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
1467 moea64_initialized = TRUE;
1471 moea64_is_referenced(mmu_t mmu, vm_page_t m)
1474 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1475 ("moea64_is_referenced: page %p is not managed", m));
1476 return (moea64_query_bit(mmu, m, PTE_REF));
1480 moea64_is_modified(mmu_t mmu, vm_page_t m)
1483 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1484 ("moea64_is_modified: page %p is not managed", m));
1487 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
1488 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
1489 * is clear, no PTEs can have LPTE_CHG set.
1491 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1492 if ((m->oflags & VPO_BUSY) == 0 &&
1493 (m->aflags & PGA_WRITEABLE) == 0)
1495 return (moea64_query_bit(mmu, m, LPTE_CHG));
1499 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1501 struct pvo_entry *pvo;
1505 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1506 rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
1512 moea64_clear_reference(mmu_t mmu, vm_page_t m)
1515 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1516 ("moea64_clear_reference: page %p is not managed", m));
1517 moea64_clear_bit(mmu, m, LPTE_REF);
1521 moea64_clear_modify(mmu_t mmu, vm_page_t m)
1524 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1525 ("moea64_clear_modify: page %p is not managed", m));
1526 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1527 KASSERT((m->oflags & VPO_BUSY) == 0,
1528 ("moea64_clear_modify: page %p is busy", m));
1531 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
1532 * set. If the object containing the page is locked and the page is
1533 * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
1535 if ((m->aflags & PGA_WRITEABLE) == 0)
1537 moea64_clear_bit(mmu, m, LPTE_CHG);
1541 * Clear the write and modified bits in each of the given page's mappings.
1544 moea64_remove_write(mmu_t mmu, vm_page_t m)
1546 struct pvo_entry *pvo;
1551 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1552 ("moea64_remove_write: page %p is not managed", m));
1555 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
1556 * another thread while the object is locked. Thus, if PGA_WRITEABLE
1557 * is clear, no page table entries need updating.
1559 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1560 if ((m->oflags & VPO_BUSY) == 0 &&
1561 (m->aflags & PGA_WRITEABLE) == 0)
1563 vm_page_lock_queues();
1564 lo = moea64_attr_fetch(m);
1566 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1567 pmap = pvo->pvo_pmap;
1570 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
1571 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1572 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1573 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1575 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
1576 lo |= pvo->pvo_pte.lpte.pte_lo;
1577 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
1578 MOEA64_PTE_CHANGE(mmu, pt,
1579 &pvo->pvo_pte.lpte, pvo->pvo_vpn);
1580 if (pvo->pvo_pmap == kernel_pmap)
1587 if ((lo & LPTE_CHG) != 0) {
1588 moea64_attr_clear(m, LPTE_CHG);
1591 vm_page_aflag_clear(m, PGA_WRITEABLE);
1592 vm_page_unlock_queues();
1596 * moea64_ts_referenced:
1598 * Return a count of reference bits for a page, clearing those bits.
1599 * It is not necessary for every reference bit to be cleared, but it
1600 * is necessary that 0 only be returned when there are truly no
1601 * reference bits set.
1603 * XXX: The exact number of bits to check and clear is a matter that
1604 * should be tested and standardized at some point in the future for
1605 * optimal aging of shared pages.
1608 moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1611 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1612 ("moea64_ts_referenced: page %p is not managed", m));
1613 return (moea64_clear_bit(mmu, m, LPTE_REF));
1617 * Modify the WIMG settings of all mappings for a page.
1620 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1622 struct pvo_entry *pvo;
1623 struct pvo_head *pvo_head;
1628 if ((m->oflags & VPO_UNMANAGED) != 0) {
1629 m->md.mdpg_cache_attrs = ma;
1633 vm_page_lock_queues();
1634 pvo_head = vm_page_to_pvoh(m);
1635 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1636 LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
1637 pmap = pvo->pvo_pmap;
1640 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1641 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
1642 pvo->pvo_pte.lpte.pte_lo |= lo;
1644 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1646 if (pvo->pvo_pmap == kernel_pmap)
1652 m->md.mdpg_cache_attrs = ma;
1653 vm_page_unlock_queues();
1657 * Map a wired page into kernel virtual address space.
1660 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
1665 pte_lo = moea64_calc_wimg(pa, ma);
1667 PMAP_LOCK(kernel_pmap);
1668 error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
1669 &moea64_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
1671 if (error != 0 && error != ENOENT)
1672 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1676 * Flush the memory from the instruction cache.
1678 if ((pte_lo & (LPTE_I | LPTE_G)) == 0)
1679 __syncicache((void *)va, PAGE_SIZE);
1680 PMAP_UNLOCK(kernel_pmap);
1684 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1687 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1691 * Extract the physical page address associated with the given kernel virtual
1695 moea64_kextract(mmu_t mmu, vm_offset_t va)
1697 struct pvo_entry *pvo;
1701 * Shortcut the direct-mapped case when applicable. We never put
1702 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1704 if (va < VM_MIN_KERNEL_ADDRESS)
1707 PMAP_LOCK(kernel_pmap);
1708 pvo = moea64_pvo_find_va(kernel_pmap, va);
1709 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1711 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1712 PMAP_UNLOCK(kernel_pmap);
1717 * Remove a wired page from kernel virtual address space.
1720 moea64_kremove(mmu_t mmu, vm_offset_t va)
1722 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1726 * Map a range of physical addresses into kernel virtual address space.
1728 * The value passed in *virt is a suggested virtual address for the mapping.
1729 * Architectures which can support a direct-mapped physical to virtual region
1730 * can return the appropriate address within that region, leaving '*virt'
1731 * unchanged. We cannot and therefore do not; *virt is updated with the
1732 * first usable address after the mapped region.
1735 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1736 vm_offset_t pa_end, int prot)
1738 vm_offset_t sva, va;
1742 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1743 moea64_kenter(mmu, va, pa_start);
1750 * Returns true if the pmap's pv is one of the first
1751 * 16 pvs linked to from this page. This count may
1752 * be changed upwards or downwards in the future; it
1753 * is only necessary that true be returned for a small
1754 * subset of pmaps for proper page aging.
1757 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1760 struct pvo_entry *pvo;
1763 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1764 ("moea64_page_exists_quick: page %p is not managed", m));
1767 vm_page_lock_queues();
1768 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1769 if (pvo->pvo_pmap == pmap) {
1776 vm_page_unlock_queues();
1781 * Return the number of managed mappings to the given physical page
1785 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1787 struct pvo_entry *pvo;
1791 if ((m->oflags & VPO_UNMANAGED) != 0)
1793 vm_page_lock_queues();
1794 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1795 if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1797 vm_page_unlock_queues();
1801 static uintptr_t moea64_vsidcontext;
1804 moea64_get_unique_vsid(void) {
1811 __asm __volatile("mftb %0" : "=r"(entropy));
1813 mtx_lock(&moea64_slb_mutex);
1814 for (i = 0; i < NVSIDS; i += VSID_NBPW) {
1818 * Create a new value by mutiplying by a prime and adding in
1819 * entropy from the timebase register. This is to make the
1820 * VSID more random so that the PT hash function collides
1821 * less often. (Note that the prime casues gcc to do shifts
1822 * instead of a multiply.)
1824 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1825 hash = moea64_vsidcontext & (NVSIDS - 1);
1826 if (hash == 0) /* 0 is special, avoid it */
1829 mask = 1 << (hash & (VSID_NBPW - 1));
1830 hash = (moea64_vsidcontext & VSID_HASHMASK);
1831 if (moea64_vsid_bitmap[n] & mask) { /* collision? */
1832 /* anything free in this bucket? */
1833 if (moea64_vsid_bitmap[n] == 0xffffffff) {
1834 entropy = (moea64_vsidcontext >> 20);
1837 i = ffs(~moea64_vsid_bitmap[n]) - 1;
1839 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1);
1842 KASSERT(!(moea64_vsid_bitmap[n] & mask),
1843 ("Allocating in-use VSID %#zx\n", hash));
1844 moea64_vsid_bitmap[n] |= mask;
1845 mtx_unlock(&moea64_slb_mutex);
1849 mtx_unlock(&moea64_slb_mutex);
1850 panic("%s: out of segments",__func__);
1853 #ifdef __powerpc64__
1855 moea64_pinit(mmu_t mmu, pmap_t pmap)
1857 PMAP_LOCK_INIT(pmap);
1859 pmap->pm_slb_tree_root = slb_alloc_tree();
1860 pmap->pm_slb = slb_alloc_user_cache();
1861 pmap->pm_slb_len = 0;
1865 moea64_pinit(mmu_t mmu, pmap_t pmap)
1870 PMAP_LOCK_INIT(pmap);
1872 if (pmap_bootstrapped)
1873 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
1876 pmap->pmap_phys = pmap;
1879 * Allocate some segment registers for this pmap.
1881 hash = moea64_get_unique_vsid();
1883 for (i = 0; i < 16; i++)
1884 pmap->pm_sr[i] = VSID_MAKE(i, hash);
1886 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
1891 * Initialize the pmap associated with process 0.
1894 moea64_pinit0(mmu_t mmu, pmap_t pm)
1896 moea64_pinit(mmu, pm);
1897 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1901 * Set the physical protection on the specified range of this map as requested.
1904 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1907 struct pvo_entry *pvo;
1910 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
1914 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1915 ("moea64_protect: non current pmap"));
1917 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1918 moea64_remove(mmu, pm, sva, eva);
1922 vm_page_lock_queues();
1924 for (; sva < eva; sva += PAGE_SIZE) {
1925 pvo = moea64_pvo_find_va(pm, sva);
1930 * Grab the PTE pointer before we diddle with the cached PTE
1934 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1937 * Change the protection of the page.
1939 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1940 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1941 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
1942 if ((prot & VM_PROT_EXECUTE) == 0)
1943 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
1946 * If the PVO is in the page table, update that pte as well.
1949 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1951 if ((pvo->pvo_pte.lpte.pte_lo &
1952 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1953 moea64_syncicache(mmu, pm, sva,
1954 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
1960 vm_page_unlock_queues();
1965 * Map a list of wired pages into kernel virtual address space. This is
1966 * intended for temporary mappings which do not need page modification or
1967 * references recorded. Existing mappings in the region are overwritten.
1970 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
1972 while (count-- > 0) {
1973 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1980 * Remove page mappings from kernel virtual address space. Intended for
1981 * temporary mappings entered by moea64_qenter.
1984 moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
1986 while (count-- > 0) {
1987 moea64_kremove(mmu, va);
1993 moea64_release_vsid(uint64_t vsid)
1997 mtx_lock(&moea64_slb_mutex);
1998 idx = vsid & (NVSIDS-1);
1999 mask = 1 << (idx % VSID_NBPW);
2001 KASSERT(moea64_vsid_bitmap[idx] & mask,
2002 ("Freeing unallocated VSID %#jx", vsid));
2003 moea64_vsid_bitmap[idx] &= ~mask;
2004 mtx_unlock(&moea64_slb_mutex);
2009 moea64_release(mmu_t mmu, pmap_t pmap)
2013 * Free segment registers' VSIDs
2015 #ifdef __powerpc64__
2016 slb_free_tree(pmap);
2017 slb_free_user_cache(pmap->pm_slb);
2019 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2021 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2024 PMAP_LOCK_DESTROY(pmap);
2028 * Remove the given range of addresses from the specified map.
2031 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2033 struct pvo_entry *pvo;
2035 vm_page_lock_queues();
2037 for (; sva < eva; sva += PAGE_SIZE) {
2038 pvo = moea64_pvo_find_va(pm, sva);
2040 moea64_pvo_remove(mmu, pvo);
2042 vm_page_unlock_queues();
2047 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2048 * will reflect changes in pte's back to the vm_page.
2051 moea64_remove_all(mmu_t mmu, vm_page_t m)
2053 struct pvo_head *pvo_head;
2054 struct pvo_entry *pvo, *next_pvo;
2057 vm_page_lock_queues();
2058 pvo_head = vm_page_to_pvoh(m);
2059 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
2060 next_pvo = LIST_NEXT(pvo, pvo_vlink);
2062 pmap = pvo->pvo_pmap;
2064 moea64_pvo_remove(mmu, pvo);
2067 if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) {
2068 moea64_attr_clear(m, LPTE_CHG);
2071 vm_page_aflag_clear(m, PGA_WRITEABLE);
2072 vm_page_unlock_queues();
2076 * Allocate a physical page of memory directly from the phys_avail map.
2077 * Can only be called from moea64_bootstrap before avail start and end are
2081 moea64_bootstrap_alloc(vm_size_t size, u_int align)
2086 size = round_page(size);
2087 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2089 s = (phys_avail[i] + align - 1) & ~(align - 1);
2094 if (s < phys_avail[i] || e > phys_avail[i + 1])
2097 if (s + size > platform_real_maxaddr())
2100 if (s == phys_avail[i]) {
2101 phys_avail[i] += size;
2102 } else if (e == phys_avail[i + 1]) {
2103 phys_avail[i + 1] -= size;
2105 for (j = phys_avail_count * 2; j > i; j -= 2) {
2106 phys_avail[j] = phys_avail[j - 2];
2107 phys_avail[j + 1] = phys_avail[j - 1];
2110 phys_avail[i + 3] = phys_avail[i + 1];
2111 phys_avail[i + 1] = s;
2112 phys_avail[i + 2] = e;
2118 panic("moea64_bootstrap_alloc: could not allocate memory");
2122 moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
2123 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
2124 uint64_t pte_lo, int flags)
2126 struct pvo_entry *pvo;
2134 * One nasty thing that can happen here is that the UMA calls to
2135 * allocate new PVOs need to map more memory, which calls pvo_enter(),
2136 * which calls UMA...
2138 * We break the loop by detecting recursion and allocating out of
2139 * the bootstrap pool.
2143 bootstrap = (flags & PVO_BOOTSTRAP);
2145 if (!moea64_initialized)
2149 * Compute the PTE Group index.
2152 vsid = va_to_vsid(pm, va);
2153 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE);
2156 * Remove any existing mapping for this page. Reuse the pvo entry if
2157 * there is a mapping.
2161 moea64_pvo_enter_calls++;
2163 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2164 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2165 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2166 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP))
2167 == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) {
2168 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
2169 /* Re-insert if spilled */
2170 i = MOEA64_PTE_INSERT(mmu, ptegidx,
2171 &pvo->pvo_pte.lpte);
2173 PVO_PTEGIDX_SET(pvo, i);
2174 moea64_pte_overflow--;
2179 moea64_pvo_remove(mmu, pvo);
2185 * If we aren't overwriting a mapping, try to allocate.
2188 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) {
2189 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
2190 moea64_bpvo_pool_index, BPVO_POOL_SIZE,
2191 BPVO_POOL_SIZE * sizeof(struct pvo_entry));
2193 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
2194 moea64_bpvo_pool_index++;
2198 * Note: drop the table lock around the UMA allocation in
2199 * case the UMA allocator needs to manipulate the page
2200 * table. The mapping we are working with is already
2201 * protected by the PMAP lock.
2204 pvo = uma_zalloc(zone, M_NOWAIT);
2213 moea64_pvo_entries++;
2214 pvo->pvo_vaddr = va;
2215 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
2218 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink);
2219 pvo->pvo_vaddr &= ~ADDR_POFF;
2221 if (flags & PVO_WIRED)
2222 pvo->pvo_vaddr |= PVO_WIRED;
2223 if (pvo_head != &moea64_pvo_kunmanaged)
2224 pvo->pvo_vaddr |= PVO_MANAGED;
2226 pvo->pvo_vaddr |= PVO_BOOTSTRAP;
2227 if (flags & PVO_LARGE)
2228 pvo->pvo_vaddr |= PVO_LARGE;
2230 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va,
2231 (uint64_t)(pa) | pte_lo, flags);
2234 * Remember if the list was empty and therefore will be the first
2237 if (LIST_FIRST(pvo_head) == NULL)
2239 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2241 if (pvo->pvo_vaddr & PVO_WIRED) {
2242 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2243 pm->pm_stats.wired_count++;
2245 pm->pm_stats.resident_count++;
2248 * We hope this succeeds but it isn't required.
2250 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
2252 PVO_PTEGIDX_SET(pvo, i);
2254 panic("moea64_pvo_enter: overflow");
2255 moea64_pte_overflow++;
2258 if (pm == kernel_pmap)
2263 #ifdef __powerpc64__
2265 * Make sure all our bootstrap mappings are in the SLB as soon
2266 * as virtual memory is switched on.
2268 if (!pmap_bootstrapped)
2269 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE);
2272 return (first ? ENOENT : 0);
2276 moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo)
2281 * If there is an active pte entry, we need to deactivate it (and
2282 * save the ref & cfg bits).
2285 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2287 MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2288 PVO_PTEGIDX_CLR(pvo);
2290 moea64_pte_overflow--;
2294 * Update our statistics.
2296 pvo->pvo_pmap->pm_stats.resident_count--;
2297 if (pvo->pvo_vaddr & PVO_WIRED)
2298 pvo->pvo_pmap->pm_stats.wired_count--;
2301 * Save the REF/CHG bits into their cache if the page is managed.
2303 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) {
2306 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
2308 moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo &
2309 (LPTE_REF | LPTE_CHG));
2314 * Remove this PVO from the PV list.
2316 LIST_REMOVE(pvo, pvo_vlink);
2319 * Remove this from the overflow list and return it to the pool
2320 * if we aren't going to reuse it.
2322 LIST_REMOVE(pvo, pvo_olink);
2324 moea64_pvo_entries--;
2325 moea64_pvo_remove_calls++;
2329 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2330 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
2331 moea64_upvo_zone, pvo);
2334 static struct pvo_entry *
2335 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2337 struct pvo_entry *pvo;
2340 #ifdef __powerpc64__
2343 if (pm == kernel_pmap) {
2344 slbv = kernel_va_to_slbv(va);
2347 slb = user_va_to_slb_entry(pm, va);
2348 /* The page is not mapped if the segment isn't */
2354 vsid = (slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
2356 va &= ~moea64_large_page_mask;
2359 ptegidx = va_to_pteg(vsid, va, slbv & SLBV_L);
2362 vsid = va_to_vsid(pm, va);
2363 ptegidx = va_to_pteg(vsid, va, 0);
2367 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2368 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va)
2377 moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2379 struct pvo_entry *pvo;
2382 if (moea64_attr_fetch(m) & ptebit)
2385 vm_page_lock_queues();
2387 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2390 * See if we saved the bit off. If so, cache it and return
2393 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2394 moea64_attr_save(m, ptebit);
2395 vm_page_unlock_queues();
2401 * No luck, now go through the hard part of looking at the PTEs
2402 * themselves. Sync so that any pending REF/CHG bits are flushed to
2406 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2409 * See if this pvo has a valid PTE. if so, fetch the
2410 * REF/CHG bits from the valid PTE. If the appropriate
2411 * ptebit is set, cache it and return success.
2414 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2416 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2417 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2420 moea64_attr_save(m, ptebit);
2421 vm_page_unlock_queues();
2428 vm_page_unlock_queues();
2433 moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2436 struct pvo_entry *pvo;
2439 vm_page_lock_queues();
2442 * Clear the cached value.
2444 moea64_attr_clear(m, ptebit);
2447 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2448 * we can reset the right ones). note that since the pvo entries and
2449 * list heads are accessed via BAT0 and are never placed in the page
2450 * table, we don't have to worry about further accesses setting the
2456 * For each pvo entry, clear the pvo's ptebit. If this pvo has a
2457 * valid pte clear the ptebit from the valid pte.
2460 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2463 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2465 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2466 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2468 MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte,
2469 pvo->pvo_vpn, ptebit);
2472 pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
2476 vm_page_unlock_queues();
2481 moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2483 struct pvo_entry *pvo;
2487 PMAP_LOCK(kernel_pmap);
2488 for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) {
2489 pvo = moea64_pvo_find_va(kernel_pmap, ppa);
2491 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) {
2496 PMAP_UNLOCK(kernel_pmap);
2502 * Map a set of physical memory pages into the kernel virtual
2503 * address space. Return a pointer to where it is mapped. This
2504 * routine is intended to be used for mapping device memory,
2508 moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
2510 vm_offset_t va, tmpva, ppa, offset;
2512 ppa = trunc_page(pa);
2513 offset = pa & PAGE_MASK;
2514 size = roundup(offset + size, PAGE_SIZE);
2516 va = kmem_alloc_nofault(kernel_map, size);
2519 panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2521 for (tmpva = va; size > 0;) {
2522 moea64_kenter_attr(mmu, tmpva, ppa, ma);
2528 return ((void *)(va + offset));
2532 moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2535 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2539 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2541 vm_offset_t base, offset;
2543 base = trunc_page(va);
2544 offset = va & PAGE_MASK;
2545 size = roundup(offset + size, PAGE_SIZE);
2547 kmem_free(kernel_map, base, size);
2551 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2553 struct pvo_entry *pvo;
2560 lim = round_page(va);
2561 len = MIN(lim - va, sz);
2562 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2563 if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) {
2564 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
2566 moea64_syncicache(mmu, pm, va, pa, len);