2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the NetBSD
19 * Foundation, Inc. and its contributors.
20 * 4. Neither the name of The NetBSD Foundation nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38 * Copyright (C) 1995, 1996 TooLs GmbH.
39 * All rights reserved.
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 * must display the following acknowledgement:
51 * This product includes software developed by TooLs GmbH.
52 * 4. The name of TooLs GmbH may not be used to endorse or promote products
53 * derived from this software without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
69 * Copyright (C) 2001 Benno Rice.
70 * All rights reserved.
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions
75 * 1. Redistributions of source code must retain the above copyright
76 * notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce the above copyright
78 * notice, this list of conditions and the following disclaimer in the
79 * documentation and/or other materials provided with the distribution.
81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
97 * Manages physical address maps.
99 * In addition to hardware address maps, this module is called upon to
100 * provide software-use-only maps which may or may not be stored in the
101 * same form as hardware maps. These pseudo-maps are used to store
102 * intermediate results from copy operations to and from address spaces.
104 * Since the information managed by this module is also stored by the
105 * logical address mapping module, this module may throw away valid virtual
106 * to physical mappings at almost any time. However, invalidations of
107 * mappings must be done as requested.
109 * In order to cope with hardware architectures which make virtual to
110 * physical map invalidates expensive, this module may delay invalidate
111 * reduced protection operations until such time as they are actually
112 * necessary. This module is given full information as to which processors
113 * are currently using which maps, and to when physical maps must be made
117 #include "opt_kstack_pages.h"
119 #include <sys/param.h>
120 #include <sys/kernel.h>
122 #include <sys/lock.h>
123 #include <sys/msgbuf.h>
124 #include <sys/mutex.h>
125 #include <sys/proc.h>
126 #include <sys/sysctl.h>
127 #include <sys/systm.h>
128 #include <sys/vmmeter.h>
132 #include <dev/ofw/openfirm.h>
135 #include <vm/vm_param.h>
136 #include <vm/vm_kern.h>
137 #include <vm/vm_page.h>
138 #include <vm/vm_map.h>
139 #include <vm/vm_object.h>
140 #include <vm/vm_extern.h>
141 #include <vm/vm_pageout.h>
142 #include <vm/vm_pager.h>
145 #include <machine/cpu.h>
146 #include <machine/platform.h>
147 #include <machine/frame.h>
148 #include <machine/md_var.h>
149 #include <machine/psl.h>
150 #include <machine/bat.h>
151 #include <machine/pte.h>
152 #include <machine/sr.h>
153 #include <machine/trap.h>
154 #include <machine/mmuvar.h>
160 #define TODO panic("%s: not implemented", __func__);
162 static __inline u_int32_t
163 cntlzw(volatile u_int32_t a) {
165 __asm ("cntlzw %0, %1" : "=r"(b) : "r"(a));
169 static __inline uint64_t
170 va_to_vsid(pmap_t pm, vm_offset_t va)
172 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
175 #define PTESYNC() __asm __volatile("ptesync");
176 #define TLBSYNC() __asm __volatile("tlbsync; ptesync");
177 #define SYNC() __asm __volatile("sync");
178 #define EIEIO() __asm __volatile("eieio");
181 * The tlbie instruction must be executed in 64-bit mode
182 * so we have to twiddle MSR[SF] around every invocation.
183 * Just to add to the fun, exceptions must be off as well
184 * so that we can't trap in 64-bit mode. What a pain.
186 struct mtx tlbie_mutex;
189 TLBIE(pmap_t pmap, vm_offset_t va) {
191 register_t vpn_hi, vpn_lo;
195 vpn = (uint64_t)(va & ADDR_PIDX);
197 vpn |= (va_to_vsid(pmap,va) << 28);
198 vpn &= ~(0xffffULL << 48);
200 vpn_hi = (uint32_t)(vpn >> 32);
201 vpn_lo = (uint32_t)vpn;
203 mtx_lock_spin(&tlbie_mutex);
219 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
221 mtx_unlock_spin(&tlbie_mutex);
224 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync()
225 #define ENABLE_TRANS(msr) mtmsr(msr); isync()
227 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
228 #define VSID_TO_SR(vsid) ((vsid) & 0xf)
229 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
230 #define VSID_HASH_MASK 0x0000007fffffffffULL
232 #define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */
233 #define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */
234 #define PVO_WIRED 0x010UL /* PVO entry is wired */
235 #define PVO_MANAGED 0x020UL /* PVO entry is managed */
236 #define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during
238 #define PVO_FAKE 0x100UL /* fictitious phys page */
239 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
240 #define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE)
241 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
242 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
243 #define PVO_PTEGIDX_CLR(pvo) \
244 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
245 #define PVO_PTEGIDX_SET(pvo, i) \
246 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
248 #define MOEA_PVO_CHECK(pvo)
250 #define LOCK_TABLE() mtx_lock(&moea64_table_mutex)
251 #define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex);
252 #define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED)
257 vm_offset_t om_pa_hi;
258 vm_offset_t om_pa_lo;
263 * Map of physical memory regions.
265 static struct mem_region *regions;
266 static struct mem_region *pregions;
267 extern u_int phys_avail_count;
268 extern int regions_sz, pregions_sz;
269 extern int ofw_real_mode;
271 extern struct pmap ofw_pmap;
273 extern void bs_remap_earlyboot(void);
277 * Lock for the pteg and pvo tables.
279 struct mtx moea64_table_mutex;
284 static struct lpteg *moea64_pteg_table;
285 u_int moea64_pteg_count;
286 u_int moea64_pteg_mask;
291 struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */
292 /* lists of unmanaged pages */
293 struct pvo_head moea64_pvo_kunmanaged =
294 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged);
295 struct pvo_head moea64_pvo_unmanaged =
296 LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged);
298 uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
299 uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */
301 #define BPVO_POOL_SIZE 327680
302 static struct pvo_entry *moea64_bpvo_pool;
303 static int moea64_bpvo_pool_index = 0;
305 #define VSID_NBPW (sizeof(u_int32_t) * 8)
306 static u_int moea64_vsid_bitmap[NPMAPS / VSID_NBPW];
308 static boolean_t moea64_initialized = FALSE;
313 u_int moea64_pte_valid = 0;
314 u_int moea64_pte_overflow = 0;
315 u_int moea64_pvo_entries = 0;
316 u_int moea64_pvo_enter_calls = 0;
317 u_int moea64_pvo_remove_calls = 0;
318 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
319 &moea64_pte_valid, 0, "");
320 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
321 &moea64_pte_overflow, 0, "");
322 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
323 &moea64_pvo_entries, 0, "");
324 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
325 &moea64_pvo_enter_calls, 0, "");
326 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
327 &moea64_pvo_remove_calls, 0, "");
329 vm_offset_t moea64_scratchpage_va[2];
330 struct lpte *moea64_scratchpage_pte[2];
331 struct mtx moea64_scratchpage_mtx;
334 * Allocate physical memory for use in moea64_bootstrap.
336 static vm_offset_t moea64_bootstrap_alloc(vm_size_t, u_int);
341 static int moea64_pte_insert(u_int, struct lpte *);
346 static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
347 vm_offset_t, vm_offset_t, uint64_t, int);
348 static void moea64_pvo_remove(struct pvo_entry *, int);
349 static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *);
350 static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *, int);
355 static void moea64_bridge_bootstrap(mmu_t mmup,
356 vm_offset_t kernelstart, vm_offset_t kernelend);
357 static void moea64_bridge_cpu_bootstrap(mmu_t, int ap);
358 static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t,
359 vm_prot_t, boolean_t);
360 static boolean_t moea64_query_bit(vm_page_t, u_int64_t);
361 static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *);
362 static void moea64_kremove(mmu_t, vm_offset_t);
363 static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
364 vm_offset_t pa, vm_size_t sz);
365 static void tlbia(void);
368 * Kernel MMU interface
370 void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
371 void moea64_clear_modify(mmu_t, vm_page_t);
372 void moea64_clear_reference(mmu_t, vm_page_t);
373 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
374 void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
375 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
377 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
378 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
379 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
380 void moea64_init(mmu_t);
381 boolean_t moea64_is_modified(mmu_t, vm_page_t);
382 boolean_t moea64_ts_referenced(mmu_t, vm_page_t);
383 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
384 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
385 int moea64_page_wired_mappings(mmu_t, vm_page_t);
386 void moea64_pinit(mmu_t, pmap_t);
387 void moea64_pinit0(mmu_t, pmap_t);
388 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
389 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
390 void moea64_qremove(mmu_t, vm_offset_t, int);
391 void moea64_release(mmu_t, pmap_t);
392 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
393 void moea64_remove_all(mmu_t, vm_page_t);
394 void moea64_remove_write(mmu_t, vm_page_t);
395 void moea64_zero_page(mmu_t, vm_page_t);
396 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
397 void moea64_zero_page_idle(mmu_t, vm_page_t);
398 void moea64_activate(mmu_t, struct thread *);
399 void moea64_deactivate(mmu_t, struct thread *);
400 void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t);
401 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
402 vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
403 void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
404 boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
405 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
407 static mmu_method_t moea64_bridge_methods[] = {
408 MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
409 MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
410 MMUMETHOD(mmu_clear_reference, moea64_clear_reference),
411 MMUMETHOD(mmu_copy_page, moea64_copy_page),
412 MMUMETHOD(mmu_enter, moea64_enter),
413 MMUMETHOD(mmu_enter_object, moea64_enter_object),
414 MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
415 MMUMETHOD(mmu_extract, moea64_extract),
416 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),
417 MMUMETHOD(mmu_init, moea64_init),
418 MMUMETHOD(mmu_is_modified, moea64_is_modified),
419 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced),
420 MMUMETHOD(mmu_map, moea64_map),
421 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
422 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
423 MMUMETHOD(mmu_pinit, moea64_pinit),
424 MMUMETHOD(mmu_pinit0, moea64_pinit0),
425 MMUMETHOD(mmu_protect, moea64_protect),
426 MMUMETHOD(mmu_qenter, moea64_qenter),
427 MMUMETHOD(mmu_qremove, moea64_qremove),
428 MMUMETHOD(mmu_release, moea64_release),
429 MMUMETHOD(mmu_remove, moea64_remove),
430 MMUMETHOD(mmu_remove_all, moea64_remove_all),
431 MMUMETHOD(mmu_remove_write, moea64_remove_write),
432 MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
433 MMUMETHOD(mmu_zero_page, moea64_zero_page),
434 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
435 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
436 MMUMETHOD(mmu_activate, moea64_activate),
437 MMUMETHOD(mmu_deactivate, moea64_deactivate),
439 /* Internal interfaces */
440 MMUMETHOD(mmu_bootstrap, moea64_bridge_bootstrap),
441 MMUMETHOD(mmu_cpu_bootstrap, moea64_bridge_cpu_bootstrap),
442 MMUMETHOD(mmu_mapdev, moea64_mapdev),
443 MMUMETHOD(mmu_unmapdev, moea64_unmapdev),
444 MMUMETHOD(mmu_kextract, moea64_kextract),
445 MMUMETHOD(mmu_kenter, moea64_kenter),
446 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
451 static mmu_def_t oea64_bridge_mmu = {
453 moea64_bridge_methods,
456 MMU_DEF(oea64_bridge_mmu);
458 static __inline u_int
459 va_to_pteg(uint64_t vsid, vm_offset_t addr)
463 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
465 return (hash & moea64_pteg_mask);
468 static __inline struct pvo_head *
469 pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
473 pg = PHYS_TO_VM_PAGE(pa);
479 return (&moea64_pvo_unmanaged);
481 return (&pg->md.mdpg_pvoh);
484 static __inline struct pvo_head *
485 vm_page_to_pvoh(vm_page_t m)
488 return (&m->md.mdpg_pvoh);
492 moea64_attr_clear(vm_page_t m, u_int64_t ptebit)
495 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
496 m->md.mdpg_attrs &= ~ptebit;
499 static __inline u_int64_t
500 moea64_attr_fetch(vm_page_t m)
503 return (m->md.mdpg_attrs);
507 moea64_attr_save(vm_page_t m, u_int64_t ptebit)
510 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
511 m->md.mdpg_attrs |= ptebit;
515 moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va,
521 * Construct a PTE. Default to IMB initially. Valid bit only gets
522 * set when the real pte is set in memory.
524 * Note: Don't set the valid bit for correct operation of tlb update.
526 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
527 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
533 moea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt)
538 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG);
542 moea64_pte_clear(struct lpte *pt, pmap_t pmap, vm_offset_t va, u_int64_t ptebit)
547 * As shown in Section 7.6.3.2.3
549 pt->pte_lo &= ~ptebit;
554 moea64_pte_set(struct lpte *pt, struct lpte *pvo_pt)
558 pvo_pt->pte_hi |= LPTE_VALID;
561 * Update the PTE as defined in section 7.6.3.1.
562 * Note that the REF/CHG bits are from pvo_pt and thus should have
563 * been saved so this routine can restore them (if desired).
565 pt->pte_lo = pvo_pt->pte_lo;
567 pt->pte_hi = pvo_pt->pte_hi;
573 moea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va)
576 pvo_pt->pte_hi &= ~LPTE_VALID;
579 * Force the reg & chg bits back into the PTEs.
584 * Invalidate the pte.
586 pt->pte_hi &= ~LPTE_VALID;
590 * Save the reg & chg bits.
592 moea64_pte_synch(pt, pvo_pt);
597 moea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va)
603 moea64_pte_unset(pt, pvo_pt, pmap, va);
604 moea64_pte_set(pt, pvo_pt);
605 if (pmap == kernel_pmap)
609 static __inline uint64_t
610 moea64_calc_wimg(vm_offset_t pa)
616 * Assume the page is cache inhibited and access is guarded unless
617 * it's in our available memory array.
619 pte_lo = LPTE_I | LPTE_G;
620 for (i = 0; i < pregions_sz; i++) {
621 if ((pa >= pregions[i].mr_start) &&
622 (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
623 pte_lo &= ~(LPTE_I | LPTE_G);
633 * Quick sort callout for comparing memory regions.
635 static int mr_cmp(const void *a, const void *b);
636 static int om_cmp(const void *a, const void *b);
639 mr_cmp(const void *a, const void *b)
641 const struct mem_region *regiona;
642 const struct mem_region *regionb;
646 if (regiona->mr_start < regionb->mr_start)
648 else if (regiona->mr_start > regionb->mr_start)
655 om_cmp(const void *a, const void *b)
657 const struct ofw_map *mapa;
658 const struct ofw_map *mapb;
662 if (mapa->om_pa_hi < mapb->om_pa_hi)
664 else if (mapa->om_pa_hi > mapb->om_pa_hi)
666 else if (mapa->om_pa_lo < mapb->om_pa_lo)
668 else if (mapa->om_pa_lo > mapb->om_pa_lo)
675 moea64_bridge_cpu_bootstrap(mmu_t mmup, int ap)
680 * Initialize segment registers and MMU
683 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync();
684 for (i = 0; i < 16; i++) {
685 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
687 __asm __volatile ("ptesync; mtsdr1 %0; isync"
688 :: "r"((u_int)moea64_pteg_table
689 | (32 - cntlzw(moea64_pteg_mask >> 11))));
694 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
696 struct ofw_map translations[sz/sizeof(struct ofw_map)];
702 bzero(translations, sz);
703 if (OF_getprop(mmu, "translations", translations, sz) == -1)
704 panic("moea64_bootstrap: can't get ofw translations");
706 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
707 sz /= sizeof(*translations);
708 qsort(translations, sz, sizeof (*translations), om_cmp);
710 for (i = 0, ofw_mappings = 0; i < sz; i++) {
711 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
712 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va,
713 translations[i].om_len);
715 if (translations[i].om_pa_lo % PAGE_SIZE)
716 panic("OFW translation not page-aligned!");
718 if (translations[i].om_pa_hi)
719 panic("OFW translations above 32-bit boundary!");
721 pa_base = translations[i].om_pa_lo;
723 /* Now enter the pages for this mapping */
726 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
727 moea64_kenter(mmup, translations[i].om_va + off,
737 moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
744 vm_size_t size, physsz, hwphyssz;
745 vm_offset_t pa, va, off;
749 /* We don't have a direct map since there is no BAT */
752 /* Make sure battable is zero, since we have no BAT */
753 for (i = 0; i < 16; i++) {
754 battable[i].batu = 0;
755 battable[i].batl = 0;
758 /* Get physical memory regions from firmware */
759 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
760 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
762 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
763 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
764 panic("moea64_bootstrap: phys_avail too small");
765 qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
766 phys_avail_count = 0;
769 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
770 for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
771 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
772 regions[i].mr_start + regions[i].mr_size,
775 (physsz + regions[i].mr_size) >= hwphyssz) {
776 if (physsz < hwphyssz) {
777 phys_avail[j] = regions[i].mr_start;
778 phys_avail[j + 1] = regions[i].mr_start +
785 phys_avail[j] = regions[i].mr_start;
786 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
788 physsz += regions[i].mr_size;
790 physmem = btoc(physsz);
793 * Allocate PTEG table.
796 moea64_pteg_count = PTEGCOUNT;
798 moea64_pteg_count = 0x1000;
800 while (moea64_pteg_count < physmem)
801 moea64_pteg_count <<= 1;
802 #endif /* PTEGCOUNT */
804 size = moea64_pteg_count * sizeof(struct lpteg);
805 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
806 moea64_pteg_count, size);
809 * We now need to allocate memory. This memory, to be allocated,
810 * has to reside in a page table. The page table we are about to
811 * allocate. We don't have BAT. So drop to data real mode for a minute
812 * as a measure of last resort. We do this a couple times.
815 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size);
817 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg));
820 moea64_pteg_mask = moea64_pteg_count - 1;
822 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
825 * Allocate pv/overflow lists.
827 size = sizeof(struct pvo_head) * moea64_pteg_count;
829 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size,
831 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table);
834 for (i = 0; i < moea64_pteg_count; i++)
835 LIST_INIT(&moea64_pvo_table[i]);
839 * Initialize the lock that synchronizes access to the pteg and pvo
842 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF |
846 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU.
848 mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN);
851 * Initialise the unmanaged pvo pool.
853 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
854 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
855 moea64_bpvo_pool_index = 0;
858 * Make sure kernel vsid is allocated as well as VSID 0.
860 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
861 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
862 moea64_vsid_bitmap[0] |= 1;
865 * Initialize the kernel pmap (which is statically allocated).
867 for (i = 0; i < 16; i++)
868 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
870 kernel_pmap->pmap_phys = kernel_pmap;
871 kernel_pmap->pm_active = ~0;
873 PMAP_LOCK_INIT(kernel_pmap);
876 * Now map in all the other buffers we allocated earlier
880 size = moea64_pteg_count * sizeof(struct lpteg);
881 off = (vm_offset_t)(moea64_pteg_table);
882 for (pa = off; pa < off + size; pa += PAGE_SIZE)
883 moea64_kenter(mmup, pa, pa);
884 size = sizeof(struct pvo_head) * moea64_pteg_count;
885 off = (vm_offset_t)(moea64_pvo_table);
886 for (pa = off; pa < off + size; pa += PAGE_SIZE)
887 moea64_kenter(mmup, pa, pa);
888 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry);
889 off = (vm_offset_t)(moea64_bpvo_pool);
890 for (pa = off; pa < off + size; pa += PAGE_SIZE)
891 moea64_kenter(mmup, pa, pa);
894 * Map certain important things, like ourselves.
896 * NOTE: We do not map the exception vector space. That code is
897 * used only in real mode, and leaving it unmapped allows us to
898 * catch NULL pointer deferences, instead of making NULL a valid
902 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; pa += PAGE_SIZE)
903 moea64_kenter(mmup, pa, pa);
906 if (!ofw_real_mode) {
908 * Set up the Open Firmware pmap and add its mappings.
911 moea64_pinit(mmup, &ofw_pmap);
912 for (i = 0; i < 16; i++)
913 ofw_pmap.pm_sr[i] = kernel_pmap->pm_sr[i];
915 if ((chosen = OF_finddevice("/chosen")) == -1)
916 panic("moea64_bootstrap: can't find /chosen");
917 OF_getprop(chosen, "mmu", &mmui, 4);
918 if ((mmu = OF_instance_to_package(mmui)) == -1)
919 panic("moea64_bootstrap: can't get mmu package");
920 if ((sz = OF_getproplen(mmu, "translations")) == -1)
921 panic("moea64_bootstrap: can't get ofw translation count");
922 if (sz > 6144 /* tmpstksz - 2 KB headroom */)
923 panic("moea64_bootstrap: too many ofw translations");
925 moea64_add_ofw_mappings(mmup, mmu, sz);
933 * Calculate the last available physical address.
935 for (i = 0; phys_avail[i + 2] != 0; i += 2)
937 Maxmem = powerpc_btop(phys_avail[i + 1]);
940 * Initialize MMU and remap early physical mappings
942 moea64_bridge_cpu_bootstrap(mmup,0);
943 mtmsr(mfmsr() | PSL_DR | PSL_IR); isync();
945 bs_remap_earlyboot();
948 * Set the start and end of kva.
950 virtual_avail = VM_MIN_KERNEL_ADDRESS;
951 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
954 * Figure out how far we can extend virtual_end into segment 16
955 * without running into existing mappings. Segment 16 is guaranteed
956 * to contain neither RAM nor devices (at least on Apple hardware),
957 * but will generally contain some OFW mappings we should not
961 PMAP_LOCK(kernel_pmap);
962 while (moea64_pvo_find_va(kernel_pmap, virtual_end+1, NULL) == NULL)
963 virtual_end += PAGE_SIZE;
964 PMAP_UNLOCK(kernel_pmap);
967 * Allocate some things for page zeroing. We put this directly
968 * in the page table, marked with LPTE_LOCKED, to avoid any
969 * of the PVO book-keeping or other parts of the VM system
970 * from even knowing that this hack exists.
973 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, MTX_DEF);
974 for (i = 0; i < 2; i++) {
979 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
980 virtual_end -= PAGE_SIZE;
984 vsid = va_to_vsid(kernel_pmap, moea64_scratchpage_va[i]);
985 moea64_pte_create(&pt, vsid, moea64_scratchpage_va[i],
987 pt.pte_hi |= LPTE_LOCKED;
989 ptegidx = va_to_pteg(vsid, moea64_scratchpage_va[i]);
990 pteidx = moea64_pte_insert(ptegidx, &pt);
991 if (pt.pte_hi & LPTE_HID)
992 ptegidx ^= moea64_pteg_mask;
994 moea64_scratchpage_pte[i] =
995 &moea64_pteg_table[ptegidx].pt[pteidx];
1001 * Allocate a kernel stack with a guard page for thread0 and map it
1002 * into the kernel page map.
1004 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
1005 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1006 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
1007 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
1008 thread0.td_kstack = va;
1009 thread0.td_kstack_pages = KSTACK_PAGES;
1010 for (i = 0; i < KSTACK_PAGES; i++) {
1011 moea64_kenter(mmup, va, pa);
1017 * Allocate virtual address space for the message buffer.
1019 pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE);
1020 msgbufp = (struct msgbuf *)virtual_avail;
1022 virtual_avail += round_page(MSGBUF_SIZE);
1023 while (va < virtual_avail) {
1024 moea64_kenter(mmup, va, pa);
1030 * Allocate virtual address space for the dynamic percpu area.
1032 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
1033 dpcpu = (void *)virtual_avail;
1034 virtual_avail += DPCPU_SIZE;
1035 while (va < virtual_avail) {
1036 moea64_kenter(mmup, va, pa);
1040 dpcpu_init(dpcpu, 0);
1044 * Activate a user pmap. The pmap must be activated before it's address
1045 * space can be accessed in any way.
1048 moea64_activate(mmu_t mmu, struct thread *td)
1053 * Load all the data we need up front to encourage the compiler to
1054 * not issue any loads while we have interrupts disabled below.
1056 pm = &td->td_proc->p_vmspace->vm_pmap;
1057 pmr = pm->pmap_phys;
1059 pm->pm_active |= PCPU_GET(cpumask);
1060 PCPU_SET(curpmap, pmr);
1064 moea64_deactivate(mmu_t mmu, struct thread *td)
1068 pm = &td->td_proc->p_vmspace->vm_pmap;
1069 pm->pm_active &= ~(PCPU_GET(cpumask));
1070 PCPU_SET(curpmap, NULL);
1074 moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
1076 struct pvo_entry *pvo;
1079 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1083 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1084 pm->pm_stats.wired_count++;
1085 pvo->pvo_vaddr |= PVO_WIRED;
1087 if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1088 pm->pm_stats.wired_count--;
1089 pvo->pvo_vaddr &= ~PVO_WIRED;
1096 * This goes through and sets the physical address of our
1097 * special scratch PTE to the PA we want to zero or copy. Because
1098 * of locking issues (this can get called in pvo_enter() by
1099 * the UMA allocator), we can't use most other utility functions here
1103 void moea64_set_scratchpage_pa(int which, vm_offset_t pa) {
1105 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1107 moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID;
1108 TLBIE(kernel_pmap, moea64_scratchpage_va[which]);
1110 moea64_scratchpage_pte[which]->pte_lo &=
1111 ~(LPTE_WIMG | LPTE_RPGN);
1112 moea64_scratchpage_pte[which]->pte_lo |=
1113 moea64_calc_wimg(pa) | (uint64_t)pa;
1116 moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID;
1121 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1126 dst = VM_PAGE_TO_PHYS(mdst);
1127 src = VM_PAGE_TO_PHYS(msrc);
1129 mtx_lock(&moea64_scratchpage_mtx);
1131 moea64_set_scratchpage_pa(0,src);
1132 moea64_set_scratchpage_pa(1,dst);
1134 kcopy((void *)moea64_scratchpage_va[0],
1135 (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1137 mtx_unlock(&moea64_scratchpage_mtx);
1141 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1143 vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1145 if (!moea64_initialized)
1146 panic("moea64_zero_page: can't zero pa %#x", pa);
1147 if (size + off > PAGE_SIZE)
1148 panic("moea64_zero_page: size + off > PAGE_SIZE");
1150 mtx_lock(&moea64_scratchpage_mtx);
1152 moea64_set_scratchpage_pa(0,pa);
1153 bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1154 mtx_unlock(&moea64_scratchpage_mtx);
1158 * Zero a page of physical memory by temporarily mapping it
1161 moea64_zero_page(mmu_t mmu, vm_page_t m)
1163 vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1166 if (!moea64_initialized)
1167 panic("moea64_zero_page: can't zero pa %#x", pa);
1169 mtx_lock(&moea64_scratchpage_mtx);
1171 moea64_set_scratchpage_pa(0,pa);
1172 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1173 __asm __volatile("dcbz 0,%0" ::
1174 "r"(moea64_scratchpage_va[0] + off));
1175 mtx_unlock(&moea64_scratchpage_mtx);
1179 moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1182 moea64_zero_page(mmu, m);
1186 * Map the given physical page at the specified virtual address in the
1187 * target pmap with the protection requested. If specified the page
1188 * will be wired down.
1191 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1192 vm_prot_t prot, boolean_t wired)
1195 vm_page_lock_queues();
1197 moea64_enter_locked(pmap, va, m, prot, wired);
1198 vm_page_unlock_queues();
1203 * Map the given physical page at the specified virtual address in the
1204 * target pmap with the protection requested. If specified the page
1205 * will be wired down.
1207 * The page queues and pmap must be locked.
1211 moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1214 struct pvo_head *pvo_head;
1221 if (!moea64_initialized) {
1222 pvo_head = &moea64_pvo_kunmanaged;
1224 zone = moea64_upvo_zone;
1227 pvo_head = vm_page_to_pvoh(m);
1229 zone = moea64_mpvo_zone;
1230 pvo_flags = PVO_MANAGED;
1233 if (pmap_bootstrapped)
1234 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1235 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1237 /* XXX change the pvo head for fake pages */
1238 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) {
1239 pvo_flags &= ~PVO_MANAGED;
1240 pvo_head = &moea64_pvo_kunmanaged;
1241 zone = moea64_upvo_zone;
1244 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m));
1246 if (prot & VM_PROT_WRITE) {
1248 if (pmap_bootstrapped)
1249 vm_page_flag_set(m, PG_WRITEABLE);
1253 if (prot & VM_PROT_EXECUTE)
1254 pvo_flags |= VM_PROT_EXECUTE;
1257 pvo_flags |= PVO_WIRED;
1259 if ((m->flags & PG_FICTITIOUS) != 0)
1260 pvo_flags |= PVO_FAKE;
1262 error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
1266 * Flush the page from the instruction cache if this page is
1267 * mapped executable and cacheable.
1269 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1270 moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1275 moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz)
1279 * This is much trickier than on older systems because
1280 * we can't sync the icache on physical addresses directly
1281 * without a direct map. Instead we check a couple of cases
1282 * where the memory is already mapped in and, failing that,
1283 * use the same trick we use for page zeroing to create
1284 * a temporary mapping for this physical address.
1287 if (!pmap_bootstrapped) {
1289 * If PMAP is not bootstrapped, we are likely to be
1292 __syncicache((void *)pa, sz);
1293 } else if (pmap == kernel_pmap) {
1294 __syncicache((void *)va, sz);
1296 /* Use the scratch page to set up a temp mapping */
1298 mtx_lock(&moea64_scratchpage_mtx);
1300 moea64_set_scratchpage_pa(1,pa);
1301 __syncicache((void *)moea64_scratchpage_va[1], sz);
1303 mtx_unlock(&moea64_scratchpage_mtx);
1308 * Maps a sequence of resident pages belonging to the same object.
1309 * The sequence begins with the given page m_start. This page is
1310 * mapped at the given virtual address start. Each subsequent page is
1311 * mapped at a virtual address that is offset from start by the same
1312 * amount as the page is offset from m_start within the object. The
1313 * last page in the sequence is the page with the largest offset from
1314 * m_start that can be mapped at a virtual address less than the given
1315 * virtual address end. Not every virtual page between start and end
1316 * is mapped; only those for which a resident page exists with the
1317 * corresponding offset from m_start are mapped.
1320 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1321 vm_page_t m_start, vm_prot_t prot)
1324 vm_pindex_t diff, psize;
1326 psize = atop(end - start);
1329 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1330 moea64_enter_locked(pm, start + ptoa(diff), m, prot &
1331 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1332 m = TAILQ_NEXT(m, listq);
1338 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1342 moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1349 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1351 struct pvo_entry *pvo;
1355 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1359 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF);
1365 * Atomically extract and hold the physical page with the given
1366 * pmap and virtual address pair if that mapping permits the given
1370 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1372 struct pvo_entry *pvo;
1376 vm_page_lock_queues();
1378 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
1379 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
1380 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
1381 (prot & VM_PROT_WRITE) == 0)) {
1382 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1385 vm_page_unlock_queues();
1391 moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1394 * This entire routine is a horrible hack to avoid bothering kmem
1395 * for new KVA addresses. Because this can get called from inside
1396 * kmem allocation routines, calling kmem for a new address here
1397 * can lead to multiply locking non-recursive mutexes.
1399 static vm_pindex_t color;
1403 int pflags, needed_lock;
1405 *flags = UMA_SLAB_PRIV;
1406 needed_lock = !PMAP_LOCKED(kernel_pmap);
1409 PMAP_LOCK(kernel_pmap);
1411 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
1412 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
1414 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
1416 pflags |= VM_ALLOC_ZERO;
1419 m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ);
1421 if (wait & M_NOWAIT)
1428 va = VM_PAGE_TO_PHYS(m);
1430 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
1431 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M,
1432 PVO_WIRED | PVO_BOOTSTRAP);
1435 PMAP_UNLOCK(kernel_pmap);
1437 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1438 bzero((void *)va, PAGE_SIZE);
1444 moea64_init(mmu_t mmu)
1447 CTR0(KTR_PMAP, "moea64_init");
1449 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1450 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1451 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1452 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1453 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1454 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1456 if (!hw_direct_map) {
1457 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
1458 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
1461 moea64_initialized = TRUE;
1465 moea64_is_modified(mmu_t mmu, vm_page_t m)
1468 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1471 return (moea64_query_bit(m, LPTE_CHG));
1475 moea64_clear_reference(mmu_t mmu, vm_page_t m)
1478 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1480 moea64_clear_bit(m, LPTE_REF, NULL);
1484 moea64_clear_modify(mmu_t mmu, vm_page_t m)
1487 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1489 moea64_clear_bit(m, LPTE_CHG, NULL);
1493 * Clear the write and modified bits in each of the given page's mappings.
1496 moea64_remove_write(mmu_t mmu, vm_page_t m)
1498 struct pvo_entry *pvo;
1503 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1504 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1505 (m->flags & PG_WRITEABLE) == 0)
1507 lo = moea64_attr_fetch(m);
1509 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1510 pmap = pvo->pvo_pmap;
1513 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
1514 pt = moea64_pvo_to_pte(pvo, -1);
1515 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1516 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1518 moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
1519 lo |= pvo->pvo_pte.lpte.pte_lo;
1520 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
1521 moea64_pte_change(pt, &pvo->pvo_pte.lpte,
1522 pvo->pvo_pmap, PVO_VADDR(pvo));
1528 if ((lo & LPTE_CHG) != 0) {
1529 moea64_attr_clear(m, LPTE_CHG);
1532 vm_page_flag_clear(m, PG_WRITEABLE);
1536 * moea64_ts_referenced:
1538 * Return a count of reference bits for a page, clearing those bits.
1539 * It is not necessary for every reference bit to be cleared, but it
1540 * is necessary that 0 only be returned when there are truly no
1541 * reference bits set.
1543 * XXX: The exact number of bits to check and clear is a matter that
1544 * should be tested and standardized at some point in the future for
1545 * optimal aging of shared pages.
1548 moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1552 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1555 count = moea64_clear_bit(m, LPTE_REF, NULL);
1561 * Map a wired page into kernel virtual address space.
1564 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1570 if (!pmap_bootstrapped) {
1571 if (va >= VM_MIN_KERNEL_ADDRESS && va < virtual_end)
1572 panic("Trying to enter an address in KVA -- %#x!\n",pa);
1576 pte_lo = moea64_calc_wimg(pa);
1578 PMAP_LOCK(kernel_pmap);
1579 error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
1580 &moea64_pvo_kunmanaged, va, pa, pte_lo,
1581 PVO_WIRED | VM_PROT_EXECUTE);
1583 if (error != 0 && error != ENOENT)
1584 panic("moea64_kenter: failed to enter va %#x pa %#x: %d", va,
1588 * Flush the memory from the instruction cache.
1590 if ((pte_lo & (LPTE_I | LPTE_G)) == 0) {
1591 __syncicache((void *)va, PAGE_SIZE);
1593 PMAP_UNLOCK(kernel_pmap);
1597 * Extract the physical page address associated with the given kernel virtual
1601 moea64_kextract(mmu_t mmu, vm_offset_t va)
1603 struct pvo_entry *pvo;
1607 * Shortcut the direct-mapped case when applicable. We never put
1608 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1610 if (va < VM_MIN_KERNEL_ADDRESS)
1613 PMAP_LOCK(kernel_pmap);
1614 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1615 KASSERT(pvo != NULL, ("moea64_kextract: no addr found"));
1616 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF);
1617 PMAP_UNLOCK(kernel_pmap);
1622 * Remove a wired page from kernel virtual address space.
1625 moea64_kremove(mmu_t mmu, vm_offset_t va)
1627 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1631 * Map a range of physical addresses into kernel virtual address space.
1633 * The value passed in *virt is a suggested virtual address for the mapping.
1634 * Architectures which can support a direct-mapped physical to virtual region
1635 * can return the appropriate address within that region, leaving '*virt'
1636 * unchanged. We cannot and therefore do not; *virt is updated with the
1637 * first usable address after the mapped region.
1640 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1641 vm_offset_t pa_end, int prot)
1643 vm_offset_t sva, va;
1647 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1648 moea64_kenter(mmu, va, pa_start);
1655 * Returns true if the pmap's pv is one of the first
1656 * 16 pvs linked to from this page. This count may
1657 * be changed upwards or downwards in the future; it
1658 * is only necessary that true be returned for a small
1659 * subset of pmaps for proper page aging.
1662 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1665 struct pvo_entry *pvo;
1667 if (!moea64_initialized || (m->flags & PG_FICTITIOUS))
1670 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1673 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1674 if (pvo->pvo_pmap == pmap)
1684 * Return the number of managed mappings to the given physical page
1688 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1690 struct pvo_entry *pvo;
1694 if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0)
1696 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1697 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1698 if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1703 static u_int moea64_vsidcontext;
1706 moea64_pinit(mmu_t mmu, pmap_t pmap)
1711 PMAP_LOCK_INIT(pmap);
1714 __asm __volatile("mftb %0" : "=r"(entropy));
1716 if (pmap_bootstrapped)
1717 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, (vm_offset_t)pmap);
1719 pmap->pmap_phys = pmap;
1722 * Allocate some segment registers for this pmap.
1724 for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1728 * Create a new value by mutiplying by a prime and adding in
1729 * entropy from the timebase register. This is to make the
1730 * VSID more random so that the PT hash function collides
1731 * less often. (Note that the prime casues gcc to do shifts
1732 * instead of a multiply.)
1734 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1735 hash = moea64_vsidcontext & (NPMAPS - 1);
1736 if (hash == 0) /* 0 is special, avoid it */
1739 mask = 1 << (hash & (VSID_NBPW - 1));
1740 hash = (moea64_vsidcontext & 0xfffff);
1741 if (moea64_vsid_bitmap[n] & mask) { /* collision? */
1742 /* anything free in this bucket? */
1743 if (moea64_vsid_bitmap[n] == 0xffffffff) {
1744 entropy = (moea64_vsidcontext >> 20);
1747 i = ffs(~moea64_vsid_bitmap[i]) - 1;
1749 hash &= 0xfffff & ~(VSID_NBPW - 1);
1752 moea64_vsid_bitmap[n] |= mask;
1753 for (i = 0; i < 16; i++) {
1754 pmap->pm_sr[i] = VSID_MAKE(i, hash);
1759 panic("moea64_pinit: out of segments");
1763 * Initialize the pmap associated with process 0.
1766 moea64_pinit0(mmu_t mmu, pmap_t pm)
1768 moea64_pinit(mmu, pm);
1769 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1773 * Set the physical protection on the specified range of this map as requested.
1776 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1779 struct pvo_entry *pvo;
1783 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
1787 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1788 ("moea64_protect: non current pmap"));
1790 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1791 moea64_remove(mmu, pm, sva, eva);
1795 vm_page_lock_queues();
1797 for (; sva < eva; sva += PAGE_SIZE) {
1798 pvo = moea64_pvo_find_va(pm, sva, &pteidx);
1803 * Grab the PTE pointer before we diddle with the cached PTE
1807 pt = moea64_pvo_to_pte(pvo, pteidx);
1810 * Change the protection of the page.
1812 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1813 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1814 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
1815 if ((prot & VM_PROT_EXECUTE) == 0)
1816 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
1819 * If the PVO is in the page table, update that pte as well.
1822 moea64_pte_change(pt, &pvo->pvo_pte.lpte,
1823 pvo->pvo_pmap, PVO_VADDR(pvo));
1824 if ((pvo->pvo_pte.lpte.pte_lo &
1825 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1826 moea64_syncicache(pm, sva,
1827 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
1833 vm_page_unlock_queues();
1838 * Map a list of wired pages into kernel virtual address space. This is
1839 * intended for temporary mappings which do not need page modification or
1840 * references recorded. Existing mappings in the region are overwritten.
1843 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
1845 while (count-- > 0) {
1846 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1853 * Remove page mappings from kernel virtual address space. Intended for
1854 * temporary mappings entered by moea64_qenter.
1857 moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
1859 while (count-- > 0) {
1860 moea64_kremove(mmu, va);
1866 moea64_release(mmu_t mmu, pmap_t pmap)
1871 * Free segment register's VSID
1873 if (pmap->pm_sr[0] == 0)
1874 panic("moea64_release");
1876 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
1877 mask = 1 << (idx % VSID_NBPW);
1879 moea64_vsid_bitmap[idx] &= ~mask;
1880 PMAP_LOCK_DESTROY(pmap);
1884 * Remove the given range of addresses from the specified map.
1887 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1889 struct pvo_entry *pvo;
1892 vm_page_lock_queues();
1894 for (; sva < eva; sva += PAGE_SIZE) {
1895 pvo = moea64_pvo_find_va(pm, sva, &pteidx);
1897 moea64_pvo_remove(pvo, pteidx);
1900 vm_page_unlock_queues();
1905 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
1906 * will reflect changes in pte's back to the vm_page.
1909 moea64_remove_all(mmu_t mmu, vm_page_t m)
1911 struct pvo_head *pvo_head;
1912 struct pvo_entry *pvo, *next_pvo;
1915 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1917 pvo_head = vm_page_to_pvoh(m);
1918 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1919 next_pvo = LIST_NEXT(pvo, pvo_vlink);
1921 MOEA_PVO_CHECK(pvo); /* sanity check */
1922 pmap = pvo->pvo_pmap;
1924 moea64_pvo_remove(pvo, -1);
1927 if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) {
1928 moea64_attr_clear(m, LPTE_CHG);
1931 vm_page_flag_clear(m, PG_WRITEABLE);
1935 * Allocate a physical page of memory directly from the phys_avail map.
1936 * Can only be called from moea64_bootstrap before avail start and end are
1940 moea64_bootstrap_alloc(vm_size_t size, u_int align)
1945 size = round_page(size);
1946 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1948 s = (phys_avail[i] + align - 1) & ~(align - 1);
1953 if (s < phys_avail[i] || e > phys_avail[i + 1])
1956 if (s == phys_avail[i]) {
1957 phys_avail[i] += size;
1958 } else if (e == phys_avail[i + 1]) {
1959 phys_avail[i + 1] -= size;
1961 for (j = phys_avail_count * 2; j > i; j -= 2) {
1962 phys_avail[j] = phys_avail[j - 2];
1963 phys_avail[j + 1] = phys_avail[j - 1];
1966 phys_avail[i + 3] = phys_avail[i + 1];
1967 phys_avail[i + 1] = s;
1968 phys_avail[i + 2] = e;
1974 panic("moea64_bootstrap_alloc: could not allocate memory");
1981 register_t msr, scratch;
1983 for (i = 0; i < 0xFF000; i += 0x00001000) {
1997 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
2002 moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
2003 vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags)
2005 struct pvo_entry *pvo;
2013 * One nasty thing that can happen here is that the UMA calls to
2014 * allocate new PVOs need to map more memory, which calls pvo_enter(),
2015 * which calls UMA...
2017 * We break the loop by detecting recursion and allocating out of
2018 * the bootstrap pool.
2021 moea64_pvo_enter_calls++;
2023 bootstrap = (flags & PVO_BOOTSTRAP);
2025 if (!moea64_initialized)
2029 * Compute the PTE Group index.
2032 vsid = va_to_vsid(pm, va);
2033 ptegidx = va_to_pteg(vsid, va);
2036 * Remove any existing mapping for this page. Reuse the pvo entry if
2037 * there is a mapping.
2041 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2042 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2043 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2044 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) ==
2045 (pte_lo & LPTE_PP)) {
2049 moea64_pvo_remove(pvo, -1);
2055 * If we aren't overwriting a mapping, try to allocate.
2058 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) {
2059 panic("moea64_enter: bpvo pool exhausted, %d, %d, %d",
2060 moea64_bpvo_pool_index, BPVO_POOL_SIZE,
2061 BPVO_POOL_SIZE * sizeof(struct pvo_entry));
2063 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
2064 moea64_bpvo_pool_index++;
2068 * Note: drop the table lock around the UMA allocation in
2069 * case the UMA allocator needs to manipulate the page
2070 * table. The mapping we are working with is already
2071 * protected by the PMAP lock.
2074 pvo = uma_zalloc(zone, M_NOWAIT);
2083 moea64_pvo_entries++;
2084 pvo->pvo_vaddr = va;
2086 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink);
2087 pvo->pvo_vaddr &= ~ADDR_POFF;
2089 if (!(flags & VM_PROT_EXECUTE))
2090 pte_lo |= LPTE_NOEXEC;
2091 if (flags & PVO_WIRED)
2092 pvo->pvo_vaddr |= PVO_WIRED;
2093 if (pvo_head != &moea64_pvo_kunmanaged)
2094 pvo->pvo_vaddr |= PVO_MANAGED;
2096 pvo->pvo_vaddr |= PVO_BOOTSTRAP;
2097 if (flags & PVO_FAKE)
2098 pvo->pvo_vaddr |= PVO_FAKE;
2100 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va,
2101 (uint64_t)(pa) | pte_lo);
2104 * Remember if the list was empty and therefore will be the first
2107 if (LIST_FIRST(pvo_head) == NULL)
2109 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2111 if (pvo->pvo_vaddr & PVO_WIRED)
2112 pm->pm_stats.wired_count++;
2113 pm->pm_stats.resident_count++;
2116 * We hope this succeeds but it isn't required.
2118 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte);
2120 PVO_PTEGIDX_SET(pvo, i);
2122 panic("moea64_pvo_enter: overflow");
2123 moea64_pte_overflow++;
2126 if (pm == kernel_pmap)
2131 return (first ? ENOENT : 0);
2135 moea64_pvo_remove(struct pvo_entry *pvo, int pteidx)
2140 * If there is an active pte entry, we need to deactivate it (and
2141 * save the ref & cfg bits).
2144 pt = moea64_pvo_to_pte(pvo, pteidx);
2146 moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_pmap,
2148 PVO_PTEGIDX_CLR(pvo);
2150 moea64_pte_overflow--;
2154 * Update our statistics.
2156 pvo->pvo_pmap->pm_stats.resident_count--;
2157 if (pvo->pvo_vaddr & PVO_WIRED)
2158 pvo->pvo_pmap->pm_stats.wired_count--;
2161 * Save the REF/CHG bits into their cache if the page is managed.
2163 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) {
2166 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
2168 moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo &
2169 (LPTE_REF | LPTE_CHG));
2174 * Remove this PVO from the PV list.
2176 LIST_REMOVE(pvo, pvo_vlink);
2179 * Remove this from the overflow list and return it to the pool
2180 * if we aren't going to reuse it.
2182 LIST_REMOVE(pvo, pvo_olink);
2185 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2186 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
2187 moea64_upvo_zone, pvo);
2189 moea64_pvo_entries--;
2190 moea64_pvo_remove_calls++;
2194 moea64_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
2198 * We can find the actual pte entry without searching by grabbing
2199 * the PTEG index from 3 unused bits in pvo_vaddr and by
2200 * noticing the HID bit.
2202 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID)
2203 ptegidx ^= moea64_pteg_mask;
2205 return ((ptegidx << 3) | PVO_PTEGIDX_GET(pvo));
2208 static struct pvo_entry *
2209 moea64_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
2211 struct pvo_entry *pvo;
2216 vsid = va_to_vsid(pm, va);
2217 ptegidx = va_to_pteg(vsid, va);
2220 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2221 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2223 *pteidx_p = moea64_pvo_pte_index(pvo, ptegidx);
2232 static struct lpte *
2233 moea64_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
2238 * If we haven't been supplied the ptegidx, calculate it.
2244 vsid = va_to_vsid(pvo->pvo_pmap, PVO_VADDR(pvo));
2245 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo));
2246 pteidx = moea64_pvo_pte_index(pvo, ptegidx);
2249 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7];
2251 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
2252 !PVO_PTEGIDX_ISSET(pvo)) {
2253 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no "
2254 "valid pte index", pvo);
2257 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 &&
2258 PVO_PTEGIDX_ISSET(pvo)) {
2259 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo "
2260 "pvo but no valid pte", pvo);
2263 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) ==
2265 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) {
2266 panic("moea64_pvo_to_pte: pvo %p has valid pte in "
2267 "moea64_pteg_table %p but invalid in pvo", pvo, pt);
2270 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) &
2271 ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) {
2272 panic("moea64_pvo_to_pte: pvo %p pte does not match "
2273 "pte %p in moea64_pteg_table difference is %#x",
2275 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo));
2278 ASSERT_TABLE_LOCK();
2282 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) {
2283 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in "
2284 "moea64_pteg_table but valid in pvo", pvo, pt);
2291 moea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt)
2296 ASSERT_TABLE_LOCK();
2299 * First try primary hash.
2301 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2302 if ((pt->pte_hi & LPTE_VALID) == 0 &&
2303 (pt->pte_hi & LPTE_LOCKED) == 0) {
2304 pvo_pt->pte_hi &= ~LPTE_HID;
2305 moea64_pte_set(pt, pvo_pt);
2311 * Now try secondary hash.
2313 ptegidx ^= moea64_pteg_mask;
2315 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2316 if ((pt->pte_hi & LPTE_VALID) == 0 &&
2317 (pt->pte_hi & LPTE_LOCKED) == 0) {
2318 pvo_pt->pte_hi |= LPTE_HID;
2319 moea64_pte_set(pt, pvo_pt);
2324 panic("moea64_pte_insert: overflow");
2329 moea64_query_bit(vm_page_t m, u_int64_t ptebit)
2331 struct pvo_entry *pvo;
2334 if (moea64_attr_fetch(m) & ptebit)
2337 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2339 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2340 MOEA_PVO_CHECK(pvo); /* sanity check */
2343 * See if we saved the bit off. If so, cache it and return
2346 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2347 moea64_attr_save(m, ptebit);
2348 MOEA_PVO_CHECK(pvo); /* sanity check */
2354 * No luck, now go through the hard part of looking at the PTEs
2355 * themselves. Sync so that any pending REF/CHG bits are flushed to
2359 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2360 MOEA_PVO_CHECK(pvo); /* sanity check */
2363 * See if this pvo has a valid PTE. if so, fetch the
2364 * REF/CHG bits from the valid PTE. If the appropriate
2365 * ptebit is set, cache it and return success.
2368 pt = moea64_pvo_to_pte(pvo, -1);
2370 moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
2371 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2374 moea64_attr_save(m, ptebit);
2375 MOEA_PVO_CHECK(pvo); /* sanity check */
2386 moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit)
2389 struct pvo_entry *pvo;
2393 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2396 * Clear the cached value.
2398 rv = moea64_attr_fetch(m);
2399 moea64_attr_clear(m, ptebit);
2402 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2403 * we can reset the right ones). note that since the pvo entries and
2404 * list heads are accessed via BAT0 and are never placed in the page
2405 * table, we don't have to worry about further accesses setting the
2411 * For each pvo entry, clear the pvo's ptebit. If this pvo has a
2412 * valid pte clear the ptebit from the valid pte.
2415 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2416 MOEA_PVO_CHECK(pvo); /* sanity check */
2419 pt = moea64_pvo_to_pte(pvo, -1);
2421 moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
2422 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2424 moea64_pte_clear(pt, pvo->pvo_pmap, PVO_VADDR(pvo), ptebit);
2427 rv |= pvo->pvo_pte.lpte.pte_lo;
2428 pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
2429 MOEA_PVO_CHECK(pvo); /* sanity check */
2433 if (origbit != NULL) {
2441 moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2443 struct pvo_entry *pvo;
2447 PMAP_LOCK(kernel_pmap);
2448 for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) {
2449 pvo = moea64_pvo_find_va(kernel_pmap, ppa, NULL);
2451 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) {
2456 PMAP_UNLOCK(kernel_pmap);
2462 * Map a set of physical memory pages into the kernel virtual
2463 * address space. Return a pointer to where it is mapped. This
2464 * routine is intended to be used for mapping device memory,
2468 moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2470 vm_offset_t va, tmpva, ppa, offset;
2472 ppa = trunc_page(pa);
2473 offset = pa & PAGE_MASK;
2474 size = roundup(offset + size, PAGE_SIZE);
2476 va = kmem_alloc_nofault(kernel_map, size);
2479 panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2481 for (tmpva = va; size > 0;) {
2482 moea64_kenter(mmu, tmpva, ppa);
2488 return ((void *)(va + offset));
2492 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2494 vm_offset_t base, offset;
2496 base = trunc_page(va);
2497 offset = va & PAGE_MASK;
2498 size = roundup(offset + size, PAGE_SIZE);
2500 kmem_free(kernel_map, base, size);
2504 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2506 struct pvo_entry *pvo;
2513 lim = round_page(va);
2514 len = MIN(lim - va, sz);
2515 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
2517 pa = (pvo->pvo_pte.pte.pte_lo & LPTE_RPGN) |
2519 moea64_syncicache(pm, va, pa, len);