2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
31 * Copyright (C) 1995, 1996 TooLs GmbH.
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed by TooLs GmbH.
45 * 4. The name of TooLs GmbH may not be used to endorse or promote products
46 * derived from this software without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
53 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
54 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
55 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
56 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
57 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
62 * Copyright (C) 2001 Benno Rice.
63 * All rights reserved.
65 * Redistribution and use in source and binary forms, with or without
66 * modification, are permitted provided that the following conditions
68 * 1. Redistributions of source code must retain the above copyright
69 * notice, this list of conditions and the following disclaimer.
70 * 2. Redistributions in binary form must reproduce the above copyright
71 * notice, this list of conditions and the following disclaimer in the
72 * documentation and/or other materials provided with the distribution.
74 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
75 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
76 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
77 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
78 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
86 #include <sys/cdefs.h>
87 __FBSDID("$FreeBSD$");
90 * Manages physical address maps.
92 * Since the information managed by this module is also stored by the
93 * logical address mapping module, this module may throw away valid virtual
94 * to physical mappings at almost any time. However, invalidations of
95 * mappings must be done as requested.
97 * In order to cope with hardware architectures which make virtual to
98 * physical map invalidates expensive, this module may delay invalidate
99 * reduced protection operations until such time as they are actually
100 * necessary. This module is given full information as to which processors
101 * are currently using which maps, and to when physical maps must be made
105 #include "opt_compat.h"
106 #include "opt_kstack_pages.h"
108 #include <sys/param.h>
109 #include <sys/kernel.h>
110 #include <sys/queue.h>
111 #include <sys/cpuset.h>
113 #include <sys/lock.h>
114 #include <sys/msgbuf.h>
115 #include <sys/malloc.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/rwlock.h>
119 #include <sys/sched.h>
120 #include <sys/sysctl.h>
121 #include <sys/systm.h>
122 #include <sys/vmmeter.h>
126 #include <dev/ofw/openfirm.h>
129 #include <vm/vm_param.h>
130 #include <vm/vm_kern.h>
131 #include <vm/vm_page.h>
132 #include <vm/vm_map.h>
133 #include <vm/vm_object.h>
134 #include <vm/vm_extern.h>
135 #include <vm/vm_pageout.h>
138 #include <machine/_inttypes.h>
139 #include <machine/cpu.h>
140 #include <machine/platform.h>
141 #include <machine/frame.h>
142 #include <machine/md_var.h>
143 #include <machine/psl.h>
144 #include <machine/bat.h>
145 #include <machine/hid.h>
146 #include <machine/pte.h>
147 #include <machine/sr.h>
148 #include <machine/trap.h>
149 #include <machine/mmuvar.h>
151 #include "mmu_oea64.h"
153 #include "moea64_if.h"
155 void moea64_release_vsid(uint64_t vsid);
156 uintptr_t moea64_get_unique_vsid(void);
158 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
159 #define ENABLE_TRANS(msr) mtmsr(msr)
161 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
162 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
163 #define VSID_HASH_MASK 0x0000007fffffffffULL
167 * -- Read lock: if no modifications are being made to either the PVO lists
168 * or page table or if any modifications being made result in internal
169 * changes (e.g. wiring, protection) such that the existence of the PVOs
170 * is unchanged and they remain associated with the same pmap (in which
171 * case the changes should be protected by the pmap lock)
172 * -- Write lock: required if PTEs/PVOs are being inserted or removed.
175 #define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock)
176 #define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock)
177 #define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock)
178 #define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock)
187 extern unsigned char _etext[];
188 extern unsigned char _end[];
190 extern int dumpsys_minidump;
193 * Map of physical memory regions.
195 static struct mem_region *regions;
196 static struct mem_region *pregions;
197 static u_int phys_avail_count;
198 static int regions_sz, pregions_sz;
200 extern void bs_remap_earlyboot(void);
203 * Lock for the pteg and pvo tables.
205 struct rwlock moea64_table_lock;
206 struct mtx moea64_slb_mutex;
211 u_int moea64_pteg_count;
212 u_int moea64_pteg_mask;
217 struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */
219 uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
220 uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */
222 #define BPVO_POOL_SIZE 327680
223 static struct pvo_entry *moea64_bpvo_pool;
224 static int moea64_bpvo_pool_index = 0;
226 #define VSID_NBPW (sizeof(u_int32_t) * 8)
228 #define NVSIDS (NPMAPS * 16)
229 #define VSID_HASHMASK 0xffffffffUL
231 #define NVSIDS NPMAPS
232 #define VSID_HASHMASK 0xfffffUL
234 static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
236 static boolean_t moea64_initialized = FALSE;
241 u_int moea64_pte_valid = 0;
242 u_int moea64_pte_overflow = 0;
243 u_int moea64_pvo_entries = 0;
244 u_int moea64_pvo_enter_calls = 0;
245 u_int moea64_pvo_remove_calls = 0;
246 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
247 &moea64_pte_valid, 0, "");
248 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
249 &moea64_pte_overflow, 0, "");
250 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
251 &moea64_pvo_entries, 0, "");
252 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
253 &moea64_pvo_enter_calls, 0, "");
254 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
255 &moea64_pvo_remove_calls, 0, "");
257 vm_offset_t moea64_scratchpage_va[2];
258 struct pvo_entry *moea64_scratchpage_pvo[2];
259 uintptr_t moea64_scratchpage_pte[2];
260 struct mtx moea64_scratchpage_mtx;
262 uint64_t moea64_large_page_mask = 0;
263 uint64_t moea64_large_page_size = 0;
264 int moea64_large_page_shift = 0;
269 static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
270 vm_offset_t, vm_offset_t, uint64_t, int, int8_t);
271 static void moea64_pvo_remove(mmu_t, struct pvo_entry *);
272 static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
277 static boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t);
278 static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t);
279 static void moea64_kremove(mmu_t, vm_offset_t);
280 static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
281 vm_offset_t pa, vm_size_t sz);
284 * Kernel MMU interface
286 void moea64_clear_modify(mmu_t, vm_page_t);
287 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
288 void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
289 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
290 int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
291 u_int flags, int8_t psind);
292 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
294 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
295 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
296 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
297 void moea64_init(mmu_t);
298 boolean_t moea64_is_modified(mmu_t, vm_page_t);
299 boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
300 boolean_t moea64_is_referenced(mmu_t, vm_page_t);
301 int moea64_ts_referenced(mmu_t, vm_page_t);
302 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
303 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
304 int moea64_page_wired_mappings(mmu_t, vm_page_t);
305 void moea64_pinit(mmu_t, pmap_t);
306 void moea64_pinit0(mmu_t, pmap_t);
307 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
308 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
309 void moea64_qremove(mmu_t, vm_offset_t, int);
310 void moea64_release(mmu_t, pmap_t);
311 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
312 void moea64_remove_pages(mmu_t, pmap_t);
313 void moea64_remove_all(mmu_t, vm_page_t);
314 void moea64_remove_write(mmu_t, vm_page_t);
315 void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
316 void moea64_zero_page(mmu_t, vm_page_t);
317 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
318 void moea64_zero_page_idle(mmu_t, vm_page_t);
319 void moea64_activate(mmu_t, struct thread *);
320 void moea64_deactivate(mmu_t, struct thread *);
321 void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
322 void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
323 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
324 vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
325 void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
326 void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
327 void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
328 boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
329 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
330 vm_offset_t moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
332 struct pmap_md * moea64_scan_md(mmu_t mmu, struct pmap_md *prev);
334 static mmu_method_t moea64_methods[] = {
335 MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
336 MMUMETHOD(mmu_copy_page, moea64_copy_page),
337 MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
338 MMUMETHOD(mmu_enter, moea64_enter),
339 MMUMETHOD(mmu_enter_object, moea64_enter_object),
340 MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
341 MMUMETHOD(mmu_extract, moea64_extract),
342 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),
343 MMUMETHOD(mmu_init, moea64_init),
344 MMUMETHOD(mmu_is_modified, moea64_is_modified),
345 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable),
346 MMUMETHOD(mmu_is_referenced, moea64_is_referenced),
347 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced),
348 MMUMETHOD(mmu_map, moea64_map),
349 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
350 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
351 MMUMETHOD(mmu_pinit, moea64_pinit),
352 MMUMETHOD(mmu_pinit0, moea64_pinit0),
353 MMUMETHOD(mmu_protect, moea64_protect),
354 MMUMETHOD(mmu_qenter, moea64_qenter),
355 MMUMETHOD(mmu_qremove, moea64_qremove),
356 MMUMETHOD(mmu_release, moea64_release),
357 MMUMETHOD(mmu_remove, moea64_remove),
358 MMUMETHOD(mmu_remove_pages, moea64_remove_pages),
359 MMUMETHOD(mmu_remove_all, moea64_remove_all),
360 MMUMETHOD(mmu_remove_write, moea64_remove_write),
361 MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
362 MMUMETHOD(mmu_unwire, moea64_unwire),
363 MMUMETHOD(mmu_zero_page, moea64_zero_page),
364 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
365 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
366 MMUMETHOD(mmu_activate, moea64_activate),
367 MMUMETHOD(mmu_deactivate, moea64_deactivate),
368 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
370 /* Internal interfaces */
371 MMUMETHOD(mmu_mapdev, moea64_mapdev),
372 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr),
373 MMUMETHOD(mmu_unmapdev, moea64_unmapdev),
374 MMUMETHOD(mmu_kextract, moea64_kextract),
375 MMUMETHOD(mmu_kenter, moea64_kenter),
376 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr),
377 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
378 MMUMETHOD(mmu_scan_md, moea64_scan_md),
379 MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map),
384 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
386 static __inline u_int
387 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
392 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
393 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
395 return (hash & moea64_pteg_mask);
398 static __inline struct pvo_head *
399 vm_page_to_pvoh(vm_page_t m)
402 return (&m->md.mdpg_pvoh);
406 moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va,
407 uint64_t pte_lo, int flags)
411 * Construct a PTE. Default to IMB initially. Valid bit only gets
412 * set when the real pte is set in memory.
414 * Note: Don't set the valid bit for correct operation of tlb update.
416 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
417 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
419 if (flags & PVO_LARGE)
420 pt->pte_hi |= LPTE_BIG;
425 static __inline uint64_t
426 moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
431 if (ma != VM_MEMATTR_DEFAULT) {
433 case VM_MEMATTR_UNCACHEABLE:
434 return (LPTE_I | LPTE_G);
435 case VM_MEMATTR_WRITE_COMBINING:
436 case VM_MEMATTR_WRITE_BACK:
437 case VM_MEMATTR_PREFETCHABLE:
439 case VM_MEMATTR_WRITE_THROUGH:
440 return (LPTE_W | LPTE_M);
445 * Assume the page is cache inhibited and access is guarded unless
446 * it's in our available memory array.
448 pte_lo = LPTE_I | LPTE_G;
449 for (i = 0; i < pregions_sz; i++) {
450 if ((pa >= pregions[i].mr_start) &&
451 (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
452 pte_lo &= ~(LPTE_I | LPTE_G);
462 * Quick sort callout for comparing memory regions.
464 static int om_cmp(const void *a, const void *b);
467 om_cmp(const void *a, const void *b)
469 const struct ofw_map *mapa;
470 const struct ofw_map *mapb;
474 if (mapa->om_pa < mapb->om_pa)
476 else if (mapa->om_pa > mapb->om_pa)
483 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
485 struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
486 pcell_t acells, trans_cells[sz/sizeof(cell_t)];
492 bzero(translations, sz);
493 OF_getprop(OF_finddevice("/"), "#address-cells", &acells,
495 if (OF_getprop(mmu, "translations", trans_cells, sz) == -1)
496 panic("moea64_bootstrap: can't get ofw translations");
498 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
499 sz /= sizeof(cell_t);
500 for (i = 0, j = 0; i < sz; j++) {
501 translations[j].om_va = trans_cells[i++];
502 translations[j].om_len = trans_cells[i++];
503 translations[j].om_pa = trans_cells[i++];
505 translations[j].om_pa <<= 32;
506 translations[j].om_pa |= trans_cells[i++];
508 translations[j].om_mode = trans_cells[i++];
510 KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)",
514 qsort(translations, sz, sizeof (*translations), om_cmp);
516 for (i = 0; i < sz; i++) {
517 pa_base = translations[i].om_pa;
518 #ifndef __powerpc64__
519 if ((translations[i].om_pa >> 32) != 0)
520 panic("OFW translations above 32-bit boundary!");
523 if (pa_base % PAGE_SIZE)
524 panic("OFW translation not page-aligned (phys)!");
525 if (translations[i].om_va % PAGE_SIZE)
526 panic("OFW translation not page-aligned (virt)!");
528 CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
529 pa_base, translations[i].om_va, translations[i].om_len);
531 /* Now enter the pages for this mapping */
534 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
535 if (moea64_pvo_find_va(kernel_pmap,
536 translations[i].om_va + off) != NULL)
539 moea64_kenter(mmup, translations[i].om_va + off,
548 moea64_probe_large_page(void)
550 uint16_t pvr = mfpvr() >> 16;
556 powerpc_sync(); isync();
557 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
558 powerpc_sync(); isync();
562 moea64_large_page_size = 0x1000000; /* 16 MB */
563 moea64_large_page_shift = 24;
566 moea64_large_page_mask = moea64_large_page_size - 1;
570 moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
577 cache = PCPU_GET(slb);
578 esid = va >> ADDR_SR_SHFT;
579 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
581 for (i = 0; i < 64; i++) {
582 if (cache[i].slbe == (slbe | i))
587 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
589 entry.slbv |= SLBV_L;
591 slb_insert_kernel(entry.slbe, entry.slbv);
596 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
597 vm_offset_t kernelend)
601 vm_offset_t size, off;
605 if (moea64_large_page_size == 0)
611 PMAP_LOCK(kernel_pmap);
612 for (i = 0; i < pregions_sz; i++) {
613 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
614 pregions[i].mr_size; pa += moea64_large_page_size) {
618 * Set memory access as guarded if prefetch within
619 * the page could exit the available physmem area.
621 if (pa & moea64_large_page_mask) {
622 pa &= moea64_large_page_mask;
625 if (pa + moea64_large_page_size >
626 pregions[i].mr_start + pregions[i].mr_size)
629 moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
630 NULL, pa, pa, pte_lo,
631 PVO_WIRED | PVO_LARGE, 0);
634 PMAP_UNLOCK(kernel_pmap);
637 size = sizeof(struct pvo_head) * moea64_pteg_count;
638 off = (vm_offset_t)(moea64_pvo_table);
639 for (pa = off; pa < off + size; pa += PAGE_SIZE)
640 moea64_kenter(mmup, pa, pa);
641 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry);
642 off = (vm_offset_t)(moea64_bpvo_pool);
643 for (pa = off; pa < off + size; pa += PAGE_SIZE)
644 moea64_kenter(mmup, pa, pa);
647 * Map certain important things, like ourselves.
649 * NOTE: We do not map the exception vector space. That code is
650 * used only in real mode, and leaving it unmapped allows us to
651 * catch NULL pointer deferences, instead of making NULL a valid
655 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
657 moea64_kenter(mmup, pa, pa);
662 * Allow user to override unmapped_buf_allowed for testing.
663 * XXXKIB Only direct map implementation was tested.
665 if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed",
666 &unmapped_buf_allowed))
667 unmapped_buf_allowed = hw_direct_map;
671 moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
674 vm_size_t physsz, hwphyssz;
676 #ifndef __powerpc64__
677 /* We don't have a direct map since there is no BAT */
680 /* Make sure battable is zero, since we have no BAT */
681 for (i = 0; i < 16; i++) {
682 battable[i].batu = 0;
683 battable[i].batl = 0;
686 moea64_probe_large_page();
688 /* Use a direct map if we have large page support */
689 if (moea64_large_page_size > 0)
695 /* Get physical memory regions from firmware */
696 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
697 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
699 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
700 panic("moea64_bootstrap: phys_avail too small");
702 phys_avail_count = 0;
705 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
706 for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
707 CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
708 regions[i].mr_start, regions[i].mr_start +
709 regions[i].mr_size, regions[i].mr_size);
711 (physsz + regions[i].mr_size) >= hwphyssz) {
712 if (physsz < hwphyssz) {
713 phys_avail[j] = regions[i].mr_start;
714 phys_avail[j + 1] = regions[i].mr_start +
721 phys_avail[j] = regions[i].mr_start;
722 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
724 physsz += regions[i].mr_size;
727 /* Check for overlap with the kernel and exception vectors */
728 for (j = 0; j < 2*phys_avail_count; j+=2) {
729 if (phys_avail[j] < EXC_LAST)
730 phys_avail[j] += EXC_LAST;
732 if (kernelstart >= phys_avail[j] &&
733 kernelstart < phys_avail[j+1]) {
734 if (kernelend < phys_avail[j+1]) {
735 phys_avail[2*phys_avail_count] =
736 (kernelend & ~PAGE_MASK) + PAGE_SIZE;
737 phys_avail[2*phys_avail_count + 1] =
742 phys_avail[j+1] = kernelstart & ~PAGE_MASK;
745 if (kernelend >= phys_avail[j] &&
746 kernelend < phys_avail[j+1]) {
747 if (kernelstart > phys_avail[j]) {
748 phys_avail[2*phys_avail_count] = phys_avail[j];
749 phys_avail[2*phys_avail_count + 1] =
750 kernelstart & ~PAGE_MASK;
754 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
758 physmem = btoc(physsz);
761 moea64_pteg_count = PTEGCOUNT;
763 moea64_pteg_count = 0x1000;
765 while (moea64_pteg_count < physmem)
766 moea64_pteg_count <<= 1;
768 moea64_pteg_count >>= 1;
769 #endif /* PTEGCOUNT */
773 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
782 moea64_pteg_mask = moea64_pteg_count - 1;
785 * Allocate pv/overflow lists.
787 size = sizeof(struct pvo_head) * moea64_pteg_count;
789 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size,
791 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table);
794 for (i = 0; i < moea64_pteg_count; i++)
795 LIST_INIT(&moea64_pvo_table[i]);
799 * Initialize the lock that synchronizes access to the pteg and pvo
802 rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE);
803 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
806 * Initialise the unmanaged pvo pool.
808 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
809 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
810 moea64_bpvo_pool_index = 0;
813 * Make sure kernel vsid is allocated as well as VSID 0.
815 #ifndef __powerpc64__
816 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
817 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
818 moea64_vsid_bitmap[0] |= 1;
822 * Initialize the kernel pmap (which is statically allocated).
825 for (i = 0; i < 64; i++) {
826 pcpup->pc_slb[i].slbv = 0;
827 pcpup->pc_slb[i].slbe = 0;
830 for (i = 0; i < 16; i++)
831 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
834 kernel_pmap->pmap_phys = kernel_pmap;
835 CPU_FILL(&kernel_pmap->pm_active);
836 RB_INIT(&kernel_pmap->pmap_pvo);
838 PMAP_LOCK_INIT(kernel_pmap);
841 * Now map in all the other buffers we allocated earlier
844 moea64_setup_direct_map(mmup, kernelstart, kernelend);
848 moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
859 * Set up the Open Firmware pmap and add its mappings if not in real
863 chosen = OF_finddevice("/chosen");
864 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) {
865 mmu = OF_instance_to_package(mmui);
866 if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1)
868 if (sz > 6144 /* tmpstksz - 2 KB headroom */)
869 panic("moea64_bootstrap: too many ofw translations");
872 moea64_add_ofw_mappings(mmup, mmu, sz);
876 * Calculate the last available physical address.
878 for (i = 0; phys_avail[i + 2] != 0; i += 2)
880 Maxmem = powerpc_btop(phys_avail[i + 1]);
883 * Initialize MMU and remap early physical mappings
885 MMU_CPU_BOOTSTRAP(mmup,0);
886 mtmsr(mfmsr() | PSL_DR | PSL_IR);
888 bs_remap_earlyboot();
891 * Set the start and end of kva.
893 virtual_avail = VM_MIN_KERNEL_ADDRESS;
894 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
897 * Map the entire KVA range into the SLB. We must not fault there.
900 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
901 moea64_bootstrap_slb_prefault(va, 0);
905 * Figure out how far we can extend virtual_end into segment 16
906 * without running into existing mappings. Segment 16 is guaranteed
907 * to contain neither RAM nor devices (at least on Apple hardware),
908 * but will generally contain some OFW mappings we should not
912 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */
913 PMAP_LOCK(kernel_pmap);
914 while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
915 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
916 virtual_end += PAGE_SIZE;
917 PMAP_UNLOCK(kernel_pmap);
921 * Allocate a kernel stack with a guard page for thread0 and map it
922 * into the kernel page map.
924 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
925 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
926 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
927 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
928 thread0.td_kstack = va;
929 thread0.td_kstack_pages = KSTACK_PAGES;
930 for (i = 0; i < KSTACK_PAGES; i++) {
931 moea64_kenter(mmup, va, pa);
937 * Allocate virtual address space for the message buffer.
939 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
940 msgbufp = (struct msgbuf *)virtual_avail;
942 virtual_avail += round_page(msgbufsize);
943 while (va < virtual_avail) {
944 moea64_kenter(mmup, va, pa);
950 * Allocate virtual address space for the dynamic percpu area.
952 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
953 dpcpu = (void *)virtual_avail;
955 virtual_avail += DPCPU_SIZE;
956 while (va < virtual_avail) {
957 moea64_kenter(mmup, va, pa);
961 dpcpu_init(dpcpu, 0);
964 * Allocate some things for page zeroing. We put this directly
965 * in the page table, marked with LPTE_LOCKED, to avoid any
966 * of the PVO book-keeping or other parts of the VM system
967 * from even knowing that this hack exists.
970 if (!hw_direct_map) {
971 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
973 for (i = 0; i < 2; i++) {
974 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
975 virtual_end -= PAGE_SIZE;
977 moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
979 moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
980 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
982 moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE(
983 mmup, moea64_scratchpage_pvo[i]);
984 moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi
986 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i],
987 &moea64_scratchpage_pvo[i]->pvo_pte.lpte,
988 moea64_scratchpage_pvo[i]->pvo_vpn);
995 * Activate a user pmap. The pmap must be activated before its address
996 * space can be accessed in any way.
999 moea64_activate(mmu_t mmu, struct thread *td)
1003 pm = &td->td_proc->p_vmspace->vm_pmap;
1004 CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1006 #ifdef __powerpc64__
1007 PCPU_SET(userslb, pm->pm_slb);
1009 PCPU_SET(curpmap, pm->pmap_phys);
1014 moea64_deactivate(mmu_t mmu, struct thread *td)
1018 pm = &td->td_proc->p_vmspace->vm_pmap;
1019 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1020 #ifdef __powerpc64__
1021 PCPU_SET(userslb, NULL);
1023 PCPU_SET(curpmap, NULL);
1028 moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1030 struct pvo_entry key, *pvo;
1035 key.pvo_vaddr = sva;
1036 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1037 pvo != NULL && PVO_VADDR(pvo) < eva;
1038 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1039 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1040 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1041 panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1043 pvo->pvo_vaddr &= ~PVO_WIRED;
1044 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0)
1045 panic("moea64_unwire: pte %p is missing LPTE_WIRED",
1046 &pvo->pvo_pte.lpte);
1047 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
1050 * The PTE's wired attribute is not a hardware
1051 * feature, so there is no need to invalidate any TLB
1054 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1057 pm->pm_stats.wired_count--;
1064 * This goes through and sets the physical address of our
1065 * special scratch PTE to the PA we want to zero or copy. Because
1066 * of locking issues (this can get called in pvo_enter() by
1067 * the UMA allocator), we can't use most other utility functions here
1071 void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) {
1073 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1074 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1076 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &=
1077 ~(LPTE_WIMG | LPTE_RPGN);
1078 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |=
1079 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1080 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which],
1081 &moea64_scratchpage_pvo[which]->pvo_pte.lpte,
1082 moea64_scratchpage_pvo[which]->pvo_vpn);
1087 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1092 dst = VM_PAGE_TO_PHYS(mdst);
1093 src = VM_PAGE_TO_PHYS(msrc);
1095 if (hw_direct_map) {
1096 bcopy((void *)src, (void *)dst, PAGE_SIZE);
1098 mtx_lock(&moea64_scratchpage_mtx);
1100 moea64_set_scratchpage_pa(mmu, 0, src);
1101 moea64_set_scratchpage_pa(mmu, 1, dst);
1103 bcopy((void *)moea64_scratchpage_va[0],
1104 (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1106 mtx_unlock(&moea64_scratchpage_mtx);
1111 moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1112 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1115 vm_offset_t a_pg_offset, b_pg_offset;
1118 while (xfersize > 0) {
1119 a_pg_offset = a_offset & PAGE_MASK;
1120 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1121 a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
1123 b_pg_offset = b_offset & PAGE_MASK;
1124 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1125 b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
1127 bcopy(a_cp, b_cp, cnt);
1135 moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1136 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1139 vm_offset_t a_pg_offset, b_pg_offset;
1142 mtx_lock(&moea64_scratchpage_mtx);
1143 while (xfersize > 0) {
1144 a_pg_offset = a_offset & PAGE_MASK;
1145 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1146 moea64_set_scratchpage_pa(mmu, 0,
1147 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
1148 a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
1149 b_pg_offset = b_offset & PAGE_MASK;
1150 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1151 moea64_set_scratchpage_pa(mmu, 1,
1152 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
1153 b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
1154 bcopy(a_cp, b_cp, cnt);
1159 mtx_unlock(&moea64_scratchpage_mtx);
1163 moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1164 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1167 if (hw_direct_map) {
1168 moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
1171 moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
1177 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1179 vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1181 if (size + off > PAGE_SIZE)
1182 panic("moea64_zero_page: size + off > PAGE_SIZE");
1184 if (hw_direct_map) {
1185 bzero((caddr_t)pa + off, size);
1187 mtx_lock(&moea64_scratchpage_mtx);
1188 moea64_set_scratchpage_pa(mmu, 0, pa);
1189 bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1190 mtx_unlock(&moea64_scratchpage_mtx);
1195 * Zero a page of physical memory by temporarily mapping it
1198 moea64_zero_page(mmu_t mmu, vm_page_t m)
1200 vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1201 vm_offset_t va, off;
1203 if (!hw_direct_map) {
1204 mtx_lock(&moea64_scratchpage_mtx);
1206 moea64_set_scratchpage_pa(mmu, 0, pa);
1207 va = moea64_scratchpage_va[0];
1212 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1213 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
1216 mtx_unlock(&moea64_scratchpage_mtx);
1220 moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1223 moea64_zero_page(mmu, m);
1227 * Map the given physical page at the specified virtual address in the
1228 * target pmap with the protection requested. If specified the page
1229 * will be wired down.
1233 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1234 vm_prot_t prot, u_int flags, int8_t psind)
1236 struct pvo_head *pvo_head;
1243 if (!moea64_initialized) {
1246 zone = moea64_upvo_zone;
1249 pvo_head = vm_page_to_pvoh(m);
1251 zone = moea64_mpvo_zone;
1252 pvo_flags = PVO_MANAGED;
1255 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1256 VM_OBJECT_ASSERT_LOCKED(m->object);
1258 /* XXX change the pvo head for fake pages */
1259 if ((m->oflags & VPO_UNMANAGED) != 0) {
1260 pvo_flags &= ~PVO_MANAGED;
1262 zone = moea64_upvo_zone;
1265 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1267 if (prot & VM_PROT_WRITE) {
1269 if (pmap_bootstrapped &&
1270 (m->oflags & VPO_UNMANAGED) == 0)
1271 vm_page_aflag_set(m, PGA_WRITEABLE);
1275 if ((prot & VM_PROT_EXECUTE) == 0)
1276 pte_lo |= LPTE_NOEXEC;
1278 if ((flags & PMAP_ENTER_WIRED) != 0)
1279 pvo_flags |= PVO_WIRED;
1284 error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
1285 VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags, psind);
1288 if (error != ENOMEM)
1290 if ((flags & PMAP_ENTER_NOSLEEP) != 0)
1291 return (KERN_RESOURCE_SHORTAGE);
1292 VM_OBJECT_ASSERT_UNLOCKED(m->object);
1297 * Flush the page from the instruction cache if this page is
1298 * mapped executable and cacheable.
1300 if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
1301 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1302 vm_page_aflag_set(m, PGA_EXECUTABLE);
1303 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1305 return (KERN_SUCCESS);
1309 moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa,
1314 * This is much trickier than on older systems because
1315 * we can't sync the icache on physical addresses directly
1316 * without a direct map. Instead we check a couple of cases
1317 * where the memory is already mapped in and, failing that,
1318 * use the same trick we use for page zeroing to create
1319 * a temporary mapping for this physical address.
1322 if (!pmap_bootstrapped) {
1324 * If PMAP is not bootstrapped, we are likely to be
1327 __syncicache((void *)pa, sz);
1328 } else if (pmap == kernel_pmap) {
1329 __syncicache((void *)va, sz);
1330 } else if (hw_direct_map) {
1331 __syncicache((void *)pa, sz);
1333 /* Use the scratch page to set up a temp mapping */
1335 mtx_lock(&moea64_scratchpage_mtx);
1337 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1338 __syncicache((void *)(moea64_scratchpage_va[1] +
1339 (va & ADDR_POFF)), sz);
1341 mtx_unlock(&moea64_scratchpage_mtx);
1346 * Maps a sequence of resident pages belonging to the same object.
1347 * The sequence begins with the given page m_start. This page is
1348 * mapped at the given virtual address start. Each subsequent page is
1349 * mapped at a virtual address that is offset from start by the same
1350 * amount as the page is offset from m_start within the object. The
1351 * last page in the sequence is the page with the largest offset from
1352 * m_start that can be mapped at a virtual address less than the given
1353 * virtual address end. Not every virtual page between start and end
1354 * is mapped; only those for which a resident page exists with the
1355 * corresponding offset from m_start are mapped.
1358 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1359 vm_page_t m_start, vm_prot_t prot)
1362 vm_pindex_t diff, psize;
1364 VM_OBJECT_ASSERT_LOCKED(m_start->object);
1366 psize = atop(end - start);
1368 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1369 moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
1370 (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0);
1371 m = TAILQ_NEXT(m, listq);
1376 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1380 moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1381 PMAP_ENTER_NOSLEEP, 0);
1385 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1387 struct pvo_entry *pvo;
1391 pvo = moea64_pvo_find_va(pm, va);
1395 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
1396 (va - PVO_VADDR(pvo));
1402 * Atomically extract and hold the physical page with the given
1403 * pmap and virtual address pair if that mapping permits the given
1407 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1409 struct pvo_entry *pvo;
1417 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1418 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
1419 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
1420 (prot & VM_PROT_WRITE) == 0)) {
1421 if (vm_page_pa_tryrelock(pmap,
1422 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa))
1424 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1432 static mmu_t installed_mmu;
1435 moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1438 * This entire routine is a horrible hack to avoid bothering kmem
1439 * for new KVA addresses. Because this can get called from inside
1440 * kmem allocation routines, calling kmem for a new address here
1441 * can lead to multiply locking non-recursive mutexes.
1446 int pflags, needed_lock;
1448 *flags = UMA_SLAB_PRIV;
1449 needed_lock = !PMAP_LOCKED(kernel_pmap);
1450 pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
1453 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
1455 if (wait & M_NOWAIT)
1462 va = VM_PAGE_TO_PHYS(m);
1466 PMAP_LOCK(kernel_pmap);
1468 moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
1469 NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP,
1473 PMAP_UNLOCK(kernel_pmap);
1476 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1477 bzero((void *)va, PAGE_SIZE);
1482 extern int elf32_nxstack;
1485 moea64_init(mmu_t mmu)
1488 CTR0(KTR_PMAP, "moea64_init");
1490 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1491 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1492 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1493 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1494 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1495 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1497 if (!hw_direct_map) {
1498 installed_mmu = mmu;
1499 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
1500 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
1503 #ifdef COMPAT_FREEBSD32
1507 moea64_initialized = TRUE;
1511 moea64_is_referenced(mmu_t mmu, vm_page_t m)
1514 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1515 ("moea64_is_referenced: page %p is not managed", m));
1516 return (moea64_query_bit(mmu, m, PTE_REF));
1520 moea64_is_modified(mmu_t mmu, vm_page_t m)
1523 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1524 ("moea64_is_modified: page %p is not managed", m));
1527 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1528 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
1529 * is clear, no PTEs can have LPTE_CHG set.
1531 VM_OBJECT_ASSERT_LOCKED(m->object);
1532 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1534 return (moea64_query_bit(mmu, m, LPTE_CHG));
1538 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1540 struct pvo_entry *pvo;
1544 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1545 rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
1551 moea64_clear_modify(mmu_t mmu, vm_page_t m)
1554 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1555 ("moea64_clear_modify: page %p is not managed", m));
1556 VM_OBJECT_ASSERT_WLOCKED(m->object);
1557 KASSERT(!vm_page_xbusied(m),
1558 ("moea64_clear_modify: page %p is exclusive busied", m));
1561 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
1562 * set. If the object containing the page is locked and the page is
1563 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
1565 if ((m->aflags & PGA_WRITEABLE) == 0)
1567 moea64_clear_bit(mmu, m, LPTE_CHG);
1571 * Clear the write and modified bits in each of the given page's mappings.
1574 moea64_remove_write(mmu_t mmu, vm_page_t m)
1576 struct pvo_entry *pvo;
1581 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1582 ("moea64_remove_write: page %p is not managed", m));
1585 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1586 * set by another thread while the object is locked. Thus,
1587 * if PGA_WRITEABLE is clear, no page table entries need updating.
1589 VM_OBJECT_ASSERT_WLOCKED(m->object);
1590 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1594 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1595 pmap = pvo->pvo_pmap;
1597 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
1598 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1599 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1600 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1602 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
1603 lo |= pvo->pvo_pte.lpte.pte_lo;
1604 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
1605 MOEA64_PTE_CHANGE(mmu, pt,
1606 &pvo->pvo_pte.lpte, pvo->pvo_vpn);
1607 if (pvo->pvo_pmap == kernel_pmap)
1611 if ((lo & LPTE_CHG) != 0)
1616 vm_page_aflag_clear(m, PGA_WRITEABLE);
1620 * moea64_ts_referenced:
1622 * Return a count of reference bits for a page, clearing those bits.
1623 * It is not necessary for every reference bit to be cleared, but it
1624 * is necessary that 0 only be returned when there are truly no
1625 * reference bits set.
1627 * XXX: The exact number of bits to check and clear is a matter that
1628 * should be tested and standardized at some point in the future for
1629 * optimal aging of shared pages.
1632 moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1635 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1636 ("moea64_ts_referenced: page %p is not managed", m));
1637 return (moea64_clear_bit(mmu, m, LPTE_REF));
1641 * Modify the WIMG settings of all mappings for a page.
1644 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1646 struct pvo_entry *pvo;
1647 struct pvo_head *pvo_head;
1652 if ((m->oflags & VPO_UNMANAGED) != 0) {
1653 m->md.mdpg_cache_attrs = ma;
1657 pvo_head = vm_page_to_pvoh(m);
1658 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1660 LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
1661 pmap = pvo->pvo_pmap;
1663 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1664 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
1665 pvo->pvo_pte.lpte.pte_lo |= lo;
1667 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1669 if (pvo->pvo_pmap == kernel_pmap)
1675 m->md.mdpg_cache_attrs = ma;
1679 * Map a wired page into kernel virtual address space.
1682 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
1687 pte_lo = moea64_calc_wimg(pa, ma);
1690 PMAP_LOCK(kernel_pmap);
1691 error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
1692 NULL, va, pa, pte_lo, PVO_WIRED, 0);
1693 PMAP_UNLOCK(kernel_pmap);
1696 if (error != 0 && error != ENOENT)
1697 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1702 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1705 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1709 * Extract the physical page address associated with the given kernel virtual
1713 moea64_kextract(mmu_t mmu, vm_offset_t va)
1715 struct pvo_entry *pvo;
1719 * Shortcut the direct-mapped case when applicable. We never put
1720 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1722 if (va < VM_MIN_KERNEL_ADDRESS)
1725 PMAP_LOCK(kernel_pmap);
1726 pvo = moea64_pvo_find_va(kernel_pmap, va);
1727 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1729 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1730 PMAP_UNLOCK(kernel_pmap);
1735 * Remove a wired page from kernel virtual address space.
1738 moea64_kremove(mmu_t mmu, vm_offset_t va)
1740 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1744 * Map a range of physical addresses into kernel virtual address space.
1746 * The value passed in *virt is a suggested virtual address for the mapping.
1747 * Architectures which can support a direct-mapped physical to virtual region
1748 * can return the appropriate address within that region, leaving '*virt'
1749 * unchanged. We cannot and therefore do not; *virt is updated with the
1750 * first usable address after the mapped region.
1753 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1754 vm_paddr_t pa_end, int prot)
1756 vm_offset_t sva, va;
1760 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1761 moea64_kenter(mmu, va, pa_start);
1768 * Returns true if the pmap's pv is one of the first
1769 * 16 pvs linked to from this page. This count may
1770 * be changed upwards or downwards in the future; it
1771 * is only necessary that true be returned for a small
1772 * subset of pmaps for proper page aging.
1775 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1778 struct pvo_entry *pvo;
1781 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1782 ("moea64_page_exists_quick: page %p is not managed", m));
1786 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1787 if (pvo->pvo_pmap == pmap) {
1799 * Return the number of managed mappings to the given physical page
1803 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1805 struct pvo_entry *pvo;
1809 if ((m->oflags & VPO_UNMANAGED) != 0)
1812 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1813 if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1819 static uintptr_t moea64_vsidcontext;
1822 moea64_get_unique_vsid(void) {
1829 __asm __volatile("mftb %0" : "=r"(entropy));
1831 mtx_lock(&moea64_slb_mutex);
1832 for (i = 0; i < NVSIDS; i += VSID_NBPW) {
1836 * Create a new value by mutiplying by a prime and adding in
1837 * entropy from the timebase register. This is to make the
1838 * VSID more random so that the PT hash function collides
1839 * less often. (Note that the prime casues gcc to do shifts
1840 * instead of a multiply.)
1842 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1843 hash = moea64_vsidcontext & (NVSIDS - 1);
1844 if (hash == 0) /* 0 is special, avoid it */
1847 mask = 1 << (hash & (VSID_NBPW - 1));
1848 hash = (moea64_vsidcontext & VSID_HASHMASK);
1849 if (moea64_vsid_bitmap[n] & mask) { /* collision? */
1850 /* anything free in this bucket? */
1851 if (moea64_vsid_bitmap[n] == 0xffffffff) {
1852 entropy = (moea64_vsidcontext >> 20);
1855 i = ffs(~moea64_vsid_bitmap[n]) - 1;
1857 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1);
1860 KASSERT(!(moea64_vsid_bitmap[n] & mask),
1861 ("Allocating in-use VSID %#zx\n", hash));
1862 moea64_vsid_bitmap[n] |= mask;
1863 mtx_unlock(&moea64_slb_mutex);
1867 mtx_unlock(&moea64_slb_mutex);
1868 panic("%s: out of segments",__func__);
1871 #ifdef __powerpc64__
1873 moea64_pinit(mmu_t mmu, pmap_t pmap)
1876 RB_INIT(&pmap->pmap_pvo);
1878 pmap->pm_slb_tree_root = slb_alloc_tree();
1879 pmap->pm_slb = slb_alloc_user_cache();
1880 pmap->pm_slb_len = 0;
1884 moea64_pinit(mmu_t mmu, pmap_t pmap)
1889 RB_INIT(&pmap->pmap_pvo);
1891 if (pmap_bootstrapped)
1892 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
1895 pmap->pmap_phys = pmap;
1898 * Allocate some segment registers for this pmap.
1900 hash = moea64_get_unique_vsid();
1902 for (i = 0; i < 16; i++)
1903 pmap->pm_sr[i] = VSID_MAKE(i, hash);
1905 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
1910 * Initialize the pmap associated with process 0.
1913 moea64_pinit0(mmu_t mmu, pmap_t pm)
1917 moea64_pinit(mmu, pm);
1918 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1922 * Set the physical protection on the specified range of this map as requested.
1925 moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
1931 PMAP_LOCK_ASSERT(pm, MA_OWNED);
1934 * Grab the PTE pointer before we diddle with the cached PTE
1937 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1940 * Change the protection of the page.
1942 oldlo = pvo->pvo_pte.lpte.pte_lo;
1943 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1944 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
1945 if ((prot & VM_PROT_EXECUTE) == 0)
1946 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
1947 if (prot & VM_PROT_WRITE)
1948 pvo->pvo_pte.lpte.pte_lo |= LPTE_BW;
1950 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1952 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1955 * If the PVO is in the page table, update that pte as well.
1958 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1960 if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
1961 (pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1962 if ((pg->oflags & VPO_UNMANAGED) == 0)
1963 vm_page_aflag_set(pg, PGA_EXECUTABLE);
1964 moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
1965 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE);
1969 * Update vm about the REF/CHG bits if the page is managed and we have
1970 * removed write access.
1972 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED &&
1973 (oldlo & LPTE_PP) != LPTE_BR && !(prot & VM_PROT_WRITE)) {
1975 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
1977 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
1978 vm_page_aflag_set(pg, PGA_REFERENCED);
1984 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1987 struct pvo_entry *pvo, *tpvo, key;
1989 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
1992 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1993 ("moea64_protect: non current pmap"));
1995 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1996 moea64_remove(mmu, pm, sva, eva);
2002 key.pvo_vaddr = sva;
2003 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2004 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2005 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2006 moea64_pvo_protect(mmu, pm, pvo, prot);
2013 * Map a list of wired pages into kernel virtual address space. This is
2014 * intended for temporary mappings which do not need page modification or
2015 * references recorded. Existing mappings in the region are overwritten.
2018 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
2020 while (count-- > 0) {
2021 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2028 * Remove page mappings from kernel virtual address space. Intended for
2029 * temporary mappings entered by moea64_qenter.
2032 moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
2034 while (count-- > 0) {
2035 moea64_kremove(mmu, va);
2041 moea64_release_vsid(uint64_t vsid)
2045 mtx_lock(&moea64_slb_mutex);
2046 idx = vsid & (NVSIDS-1);
2047 mask = 1 << (idx % VSID_NBPW);
2049 KASSERT(moea64_vsid_bitmap[idx] & mask,
2050 ("Freeing unallocated VSID %#jx", vsid));
2051 moea64_vsid_bitmap[idx] &= ~mask;
2052 mtx_unlock(&moea64_slb_mutex);
2057 moea64_release(mmu_t mmu, pmap_t pmap)
2061 * Free segment registers' VSIDs
2063 #ifdef __powerpc64__
2064 slb_free_tree(pmap);
2065 slb_free_user_cache(pmap->pm_slb);
2067 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2069 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2074 * Remove all pages mapped by the specified pmap
2077 moea64_remove_pages(mmu_t mmu, pmap_t pm)
2079 struct pvo_entry *pvo, *tpvo;
2083 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2084 if (!(pvo->pvo_vaddr & PVO_WIRED))
2085 moea64_pvo_remove(mmu, pvo);
2092 * Remove the given range of addresses from the specified map.
2095 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2097 struct pvo_entry *pvo, *tpvo, key;
2100 * Perform an unsynchronized read. This is, however, safe.
2102 if (pm->pm_stats.resident_count == 0)
2107 key.pvo_vaddr = sva;
2108 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2109 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2110 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2111 moea64_pvo_remove(mmu, pvo);
2118 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2119 * will reflect changes in pte's back to the vm_page.
2122 moea64_remove_all(mmu_t mmu, vm_page_t m)
2124 struct pvo_entry *pvo, *next_pvo;
2128 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2129 pmap = pvo->pvo_pmap;
2131 moea64_pvo_remove(mmu, pvo);
2135 if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m))
2137 vm_page_aflag_clear(m, PGA_WRITEABLE);
2138 vm_page_aflag_clear(m, PGA_EXECUTABLE);
2142 * Allocate a physical page of memory directly from the phys_avail map.
2143 * Can only be called from moea64_bootstrap before avail start and end are
2147 moea64_bootstrap_alloc(vm_size_t size, u_int align)
2152 size = round_page(size);
2153 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2155 s = (phys_avail[i] + align - 1) & ~(align - 1);
2160 if (s < phys_avail[i] || e > phys_avail[i + 1])
2163 if (s + size > platform_real_maxaddr())
2166 if (s == phys_avail[i]) {
2167 phys_avail[i] += size;
2168 } else if (e == phys_avail[i + 1]) {
2169 phys_avail[i + 1] -= size;
2171 for (j = phys_avail_count * 2; j > i; j -= 2) {
2172 phys_avail[j] = phys_avail[j - 2];
2173 phys_avail[j + 1] = phys_avail[j - 1];
2176 phys_avail[i + 3] = phys_avail[i + 1];
2177 phys_avail[i + 1] = s;
2178 phys_avail[i + 2] = e;
2184 panic("moea64_bootstrap_alloc: could not allocate memory");
2188 moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
2189 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
2190 uint64_t pte_lo, int flags, int8_t psind __unused)
2192 struct pvo_entry *pvo;
2201 * One nasty thing that can happen here is that the UMA calls to
2202 * allocate new PVOs need to map more memory, which calls pvo_enter(),
2203 * which calls UMA...
2205 * We break the loop by detecting recursion and allocating out of
2206 * the bootstrap pool.
2210 bootstrap = (flags & PVO_BOOTSTRAP);
2212 if (!moea64_initialized)
2215 PMAP_LOCK_ASSERT(pm, MA_OWNED);
2216 rw_assert(&moea64_table_lock, RA_WLOCKED);
2219 * Compute the PTE Group index.
2222 vsid = va_to_vsid(pm, va);
2223 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE);
2226 * Remove any existing mapping for this page. Reuse the pvo entry if
2227 * there is a mapping.
2229 moea64_pvo_enter_calls++;
2231 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2232 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2233 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2234 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP))
2235 == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) {
2237 * The physical page and protection are not
2238 * changing. Instead, this may be a request
2239 * to change the mapping's wired attribute.
2242 if ((flags & PVO_WIRED) != 0 &&
2243 (pvo->pvo_vaddr & PVO_WIRED) == 0) {
2244 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2245 pvo->pvo_vaddr |= PVO_WIRED;
2246 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2247 pm->pm_stats.wired_count++;
2248 } else if ((flags & PVO_WIRED) == 0 &&
2249 (pvo->pvo_vaddr & PVO_WIRED) != 0) {
2250 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2251 pvo->pvo_vaddr &= ~PVO_WIRED;
2252 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
2253 pm->pm_stats.wired_count--;
2255 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
2257 ("moea64_pvo_enter: valid pt"));
2258 /* Re-insert if spilled */
2259 i = MOEA64_PTE_INSERT(mmu, ptegidx,
2260 &pvo->pvo_pte.lpte);
2262 PVO_PTEGIDX_SET(pvo, i);
2263 moea64_pte_overflow--;
2264 } else if (pt != -1) {
2266 * The PTE's wired attribute is not a
2267 * hardware feature, so there is no
2268 * need to invalidate any TLB entries.
2270 MOEA64_PTE_CHANGE(mmu, pt,
2271 &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2275 moea64_pvo_remove(mmu, pvo);
2281 * If we aren't overwriting a mapping, try to allocate.
2284 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) {
2285 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
2286 moea64_bpvo_pool_index, BPVO_POOL_SIZE,
2287 BPVO_POOL_SIZE * sizeof(struct pvo_entry));
2289 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
2290 moea64_bpvo_pool_index++;
2293 pvo = uma_zalloc(zone, M_NOWAIT);
2299 moea64_pvo_entries++;
2300 pvo->pvo_vaddr = va;
2301 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
2304 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink);
2305 pvo->pvo_vaddr &= ~ADDR_POFF;
2307 if (flags & PVO_WIRED)
2308 pvo->pvo_vaddr |= PVO_WIRED;
2309 if (pvo_head != NULL)
2310 pvo->pvo_vaddr |= PVO_MANAGED;
2312 pvo->pvo_vaddr |= PVO_BOOTSTRAP;
2313 if (flags & PVO_LARGE)
2314 pvo->pvo_vaddr |= PVO_LARGE;
2316 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va,
2317 (uint64_t)(pa) | pte_lo, flags);
2322 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo);
2325 * Remember if the list was empty and therefore will be the first
2328 if (pvo_head != NULL) {
2329 if (LIST_FIRST(pvo_head) == NULL)
2331 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2334 if (pvo->pvo_vaddr & PVO_WIRED) {
2335 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2336 pm->pm_stats.wired_count++;
2338 pm->pm_stats.resident_count++;
2341 * We hope this succeeds but it isn't required.
2343 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
2345 PVO_PTEGIDX_SET(pvo, i);
2347 panic("moea64_pvo_enter: overflow");
2348 moea64_pte_overflow++;
2351 if (pm == kernel_pmap)
2354 #ifdef __powerpc64__
2356 * Make sure all our bootstrap mappings are in the SLB as soon
2357 * as virtual memory is switched on.
2359 if (!pmap_bootstrapped)
2360 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE);
2363 return (first ? ENOENT : 0);
2367 moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo)
2372 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2373 rw_assert(&moea64_table_lock, RA_WLOCKED);
2376 * If there is an active pte entry, we need to deactivate it (and
2377 * save the ref & cfg bits).
2379 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2381 MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2382 PVO_PTEGIDX_CLR(pvo);
2384 moea64_pte_overflow--;
2388 * Update our statistics.
2390 pvo->pvo_pmap->pm_stats.resident_count--;
2391 if (pvo->pvo_vaddr & PVO_WIRED)
2392 pvo->pvo_pmap->pm_stats.wired_count--;
2395 * Remove this PVO from the pmap list.
2397 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2400 * Remove this from the overflow list and return it to the pool
2401 * if we aren't going to reuse it.
2403 LIST_REMOVE(pvo, pvo_olink);
2406 * Update vm about the REF/CHG bits if the page is managed.
2408 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
2410 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) {
2411 LIST_REMOVE(pvo, pvo_vlink);
2412 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
2413 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
2415 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
2416 vm_page_aflag_set(pg, PGA_REFERENCED);
2417 if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2418 vm_page_aflag_clear(pg, PGA_WRITEABLE);
2420 if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2421 vm_page_aflag_clear(pg, PGA_EXECUTABLE);
2424 moea64_pvo_entries--;
2425 moea64_pvo_remove_calls++;
2427 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2428 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
2429 moea64_upvo_zone, pvo);
2432 static struct pvo_entry *
2433 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2435 struct pvo_entry key;
2437 key.pvo_vaddr = va & ~ADDR_POFF;
2438 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
2442 moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2444 struct pvo_entry *pvo;
2448 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2450 * See if we saved the bit off. If so, return success.
2452 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2459 * No luck, now go through the hard part of looking at the PTEs
2460 * themselves. Sync so that any pending REF/CHG bits are flushed to
2464 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2467 * See if this pvo has a valid PTE. if so, fetch the
2468 * REF/CHG bits from the valid PTE. If the appropriate
2469 * ptebit is set, return success.
2471 PMAP_LOCK(pvo->pvo_pmap);
2472 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2474 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2475 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2476 PMAP_UNLOCK(pvo->pvo_pmap);
2481 PMAP_UNLOCK(pvo->pvo_pmap);
2489 moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2492 struct pvo_entry *pvo;
2496 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2497 * we can reset the right ones). note that since the pvo entries and
2498 * list heads are accessed via BAT0 and are never placed in the page
2499 * table, we don't have to worry about further accesses setting the
2505 * For each pvo entry, clear the pvo's ptebit. If this pvo has a
2506 * valid pte clear the ptebit from the valid pte.
2510 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2511 PMAP_LOCK(pvo->pvo_pmap);
2512 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2514 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2515 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2517 MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte,
2518 pvo->pvo_vpn, ptebit);
2521 pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
2522 PMAP_UNLOCK(pvo->pvo_pmap);
2530 moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2532 struct pvo_entry *pvo, key;
2536 PMAP_LOCK(kernel_pmap);
2537 key.pvo_vaddr = ppa = pa & ~ADDR_POFF;
2538 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
2539 ppa < pa + size; ppa += PAGE_SIZE,
2540 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
2542 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) {
2547 PMAP_UNLOCK(kernel_pmap);
2553 * Map a set of physical memory pages into the kernel virtual
2554 * address space. Return a pointer to where it is mapped. This
2555 * routine is intended to be used for mapping device memory,
2559 moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
2561 vm_offset_t va, tmpva, ppa, offset;
2563 ppa = trunc_page(pa);
2564 offset = pa & PAGE_MASK;
2565 size = roundup2(offset + size, PAGE_SIZE);
2567 va = kva_alloc(size);
2570 panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2572 for (tmpva = va; size > 0;) {
2573 moea64_kenter_attr(mmu, tmpva, ppa, ma);
2579 return ((void *)(va + offset));
2583 moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2586 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2590 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2592 vm_offset_t base, offset;
2594 base = trunc_page(va);
2595 offset = va & PAGE_MASK;
2596 size = roundup2(offset + size, PAGE_SIZE);
2598 kva_free(base, size);
2602 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2604 struct pvo_entry *pvo;
2611 lim = round_page(va);
2612 len = MIN(lim - va, sz);
2613 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2614 if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) {
2615 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
2617 moea64_syncicache(mmu, pm, va, pa, len);
2626 moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2629 if (md->md_vaddr == ~0UL)
2630 return (md->md_paddr + ofs);
2632 return (md->md_vaddr + ofs);
2636 moea64_scan_md(mmu_t mmu, struct pmap_md *prev)
2638 static struct pmap_md md;
2639 struct pvo_entry *pvo;
2642 if (dumpsys_minidump) {
2643 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */
2645 /* 1st: kernel .data and .bss. */
2647 md.md_vaddr = trunc_page((uintptr_t)_etext);
2648 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
2651 switch (prev->md_index) {
2653 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2655 md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr;
2656 md.md_size = round_page(msgbufp->msg_size);
2659 /* 3rd: kernel VM. */
2660 va = prev->md_vaddr + prev->md_size;
2661 /* Find start of next chunk (from va). */
2662 while (va < virtual_end) {
2663 /* Don't dump the buffer cache. */
2664 if (va >= kmi.buffer_sva &&
2665 va < kmi.buffer_eva) {
2666 va = kmi.buffer_eva;
2669 pvo = moea64_pvo_find_va(kernel_pmap,
2672 (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
2676 if (va < virtual_end) {
2679 /* Find last page in chunk. */
2680 while (va < virtual_end) {
2681 /* Don't run into the buffer cache. */
2682 if (va == kmi.buffer_sva)
2684 pvo = moea64_pvo_find_va(kernel_pmap,
2687 !(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
2691 md.md_size = va - md.md_vaddr;
2699 } else { /* minidumps */
2701 /* first physical chunk. */
2702 md.md_paddr = pregions[0].mr_start;
2703 md.md_size = pregions[0].mr_size;
2706 } else if (md.md_index < pregions_sz) {
2707 md.md_paddr = pregions[md.md_index].mr_start;
2708 md.md_size = pregions[md.md_index].mr_size;
2712 /* There's no next physical chunk. */