2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the NetBSD
19 * Foundation, Inc. and its contributors.
20 * 4. Neither the name of The NetBSD Foundation nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38 * Copyright (C) 1995, 1996 TooLs GmbH.
39 * All rights reserved.
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 * must display the following acknowledgement:
51 * This product includes software developed by TooLs GmbH.
52 * 4. The name of TooLs GmbH may not be used to endorse or promote products
53 * derived from this software without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
69 * Copyright (C) 2001 Benno Rice.
70 * All rights reserved.
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions
75 * 1. Redistributions of source code must retain the above copyright
76 * notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce the above copyright
78 * notice, this list of conditions and the following disclaimer in the
79 * documentation and/or other materials provided with the distribution.
81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
97 * Manages physical address maps.
99 * In addition to hardware address maps, this module is called upon to
100 * provide software-use-only maps which may or may not be stored in the
101 * same form as hardware maps. These pseudo-maps are used to store
102 * intermediate results from copy operations to and from address spaces.
104 * Since the information managed by this module is also stored by the
105 * logical address mapping module, this module may throw away valid virtual
106 * to physical mappings at almost any time. However, invalidations of
107 * mappings must be done as requested.
109 * In order to cope with hardware architectures which make virtual to
110 * physical map invalidates expensive, this module may delay invalidate
111 * reduced protection operations until such time as they are actually
112 * necessary. This module is given full information as to which processors
113 * are currently using which maps, and to when physical maps must be made
117 #include "opt_kstack_pages.h"
119 #include <sys/param.h>
120 #include <sys/kernel.h>
122 #include <sys/lock.h>
123 #include <sys/msgbuf.h>
124 #include <sys/mutex.h>
125 #include <sys/proc.h>
126 #include <sys/sysctl.h>
127 #include <sys/systm.h>
128 #include <sys/vmmeter.h>
132 #include <dev/ofw/openfirm.h>
135 #include <vm/vm_param.h>
136 #include <vm/vm_kern.h>
137 #include <vm/vm_page.h>
138 #include <vm/vm_map.h>
139 #include <vm/vm_object.h>
140 #include <vm/vm_extern.h>
141 #include <vm/vm_pageout.h>
142 #include <vm/vm_pager.h>
145 #include <machine/cpu.h>
146 #include <machine/platform.h>
147 #include <machine/frame.h>
148 #include <machine/md_var.h>
149 #include <machine/psl.h>
150 #include <machine/bat.h>
151 #include <machine/pte.h>
152 #include <machine/sr.h>
153 #include <machine/trap.h>
154 #include <machine/mmuvar.h>
160 #define TODO panic("%s: not implemented", __func__);
162 static __inline u_int32_t
163 cntlzw(volatile u_int32_t a) {
165 __asm ("cntlzw %0, %1" : "=r"(b) : "r"(a));
169 static __inline uint64_t
170 va_to_vsid(pmap_t pm, vm_offset_t va)
172 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
175 #define TLBSYNC() __asm __volatile("tlbsync; ptesync");
176 #define SYNC() __asm __volatile("sync");
177 #define EIEIO() __asm __volatile("eieio");
180 * The tlbie instruction must be executed in 64-bit mode
181 * so we have to twiddle MSR[SF] around every invocation.
182 * Just to add to the fun, exceptions must be off as well
183 * so that we can't trap in 64-bit mode. What a pain.
187 TLBIE(pmap_t pmap, vm_offset_t va) {
192 register_t vpn_hi, vpn_lo;
196 * CPU documentation says that tlbie takes the VPN, not the
197 * VA. I think the code below does this correctly. We will see.
200 vpn = (uint64_t)(va & ADDR_PIDX);
202 vpn |= (va_to_vsid(pmap,va) << 28);
207 vpn_hi = (uint32_t)(vpn >> 32);
208 vpn_lo = (uint32_t)vpn;
225 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32));
228 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync()
229 #define ENABLE_TRANS(msr) mtmsr(msr); isync()
231 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
232 #define VSID_TO_SR(vsid) ((vsid) & 0xf)
233 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
235 #define PVO_PTEGIDX_MASK 0x007 /* which PTEG slot */
236 #define PVO_PTEGIDX_VALID 0x008 /* slot is valid */
237 #define PVO_WIRED 0x010 /* PVO entry is wired */
238 #define PVO_MANAGED 0x020 /* PVO entry is managed */
239 #define PVO_BOOTSTRAP 0x080 /* PVO entry allocated during
241 #define PVO_FAKE 0x100 /* fictitious phys page */
242 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
243 #define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE)
244 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
245 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
246 #define PVO_PTEGIDX_CLR(pvo) \
247 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
248 #define PVO_PTEGIDX_SET(pvo, i) \
249 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
251 #define MOEA_PVO_CHECK(pvo)
253 #define LOCK_TABLE() mtx_lock(&moea64_table_mutex)
254 #define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex);
255 #define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED)
260 vm_offset_t om_pa_hi;
261 vm_offset_t om_pa_lo;
266 * Map of physical memory regions.
268 static struct mem_region *regions;
269 static struct mem_region *pregions;
270 extern u_int phys_avail_count;
271 extern int regions_sz, pregions_sz;
272 extern int ofw_real_mode;
273 static struct ofw_map translations[96];
275 extern struct pmap ofw_pmap;
277 extern void bs_remap_earlyboot(void);
281 * Lock for the pteg and pvo tables.
283 struct mtx moea64_table_mutex;
288 static struct lpteg *moea64_pteg_table;
289 u_int moea64_pteg_count;
290 u_int moea64_pteg_mask;
295 struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */
296 /* lists of unmanaged pages */
297 struct pvo_head moea64_pvo_kunmanaged =
298 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged);
299 struct pvo_head moea64_pvo_unmanaged =
300 LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged);
302 uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
303 uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */
305 vm_offset_t pvo_allocator_start;
306 vm_offset_t pvo_allocator_end;
308 #define BPVO_POOL_SIZE 327680
309 static struct pvo_entry *moea64_bpvo_pool;
310 static int moea64_bpvo_pool_index = 0;
312 #define VSID_NBPW (sizeof(u_int32_t) * 8)
313 static u_int moea64_vsid_bitmap[NPMAPS / VSID_NBPW];
315 static boolean_t moea64_initialized = FALSE;
320 u_int moea64_pte_valid = 0;
321 u_int moea64_pte_overflow = 0;
322 u_int moea64_pvo_entries = 0;
323 u_int moea64_pvo_enter_calls = 0;
324 u_int moea64_pvo_remove_calls = 0;
325 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
326 &moea64_pte_valid, 0, "");
327 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
328 &moea64_pte_overflow, 0, "");
329 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
330 &moea64_pvo_entries, 0, "");
331 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
332 &moea64_pvo_enter_calls, 0, "");
333 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
334 &moea64_pvo_remove_calls, 0, "");
336 vm_offset_t moea64_scratchpage_va[2];
337 struct pvo_entry *moea64_scratchpage_pvo[2];
338 struct lpte *moea64_scratchpage_pte[2];
339 struct mtx moea64_scratchpage_mtx;
342 * Allocate physical memory for use in moea64_bootstrap.
344 static vm_offset_t moea64_bootstrap_alloc(vm_size_t, u_int);
349 static int moea64_pte_insert(u_int, struct lpte *);
354 static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
355 vm_offset_t, vm_offset_t, uint64_t, int, int);
356 static void moea64_pvo_remove(struct pvo_entry *, int);
357 static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *);
358 static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *, int);
363 static void moea64_bridge_bootstrap(mmu_t mmup,
364 vm_offset_t kernelstart, vm_offset_t kernelend);
365 static void moea64_bridge_cpu_bootstrap(mmu_t, int ap);
366 static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t,
367 vm_prot_t, boolean_t);
368 static boolean_t moea64_query_bit(vm_page_t, u_int64_t);
369 static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *);
370 static void moea64_kremove(mmu_t, vm_offset_t);
371 static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
373 static void tlbia(void);
376 * Kernel MMU interface
378 void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
379 void moea64_clear_modify(mmu_t, vm_page_t);
380 void moea64_clear_reference(mmu_t, vm_page_t);
381 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
382 void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
383 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
385 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
386 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
387 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
388 void moea64_init(mmu_t);
389 boolean_t moea64_is_modified(mmu_t, vm_page_t);
390 boolean_t moea64_ts_referenced(mmu_t, vm_page_t);
391 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
392 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
393 int moea64_page_wired_mappings(mmu_t, vm_page_t);
394 void moea64_pinit(mmu_t, pmap_t);
395 void moea64_pinit0(mmu_t, pmap_t);
396 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
397 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
398 void moea64_qremove(mmu_t, vm_offset_t, int);
399 void moea64_release(mmu_t, pmap_t);
400 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
401 void moea64_remove_all(mmu_t, vm_page_t);
402 void moea64_remove_write(mmu_t, vm_page_t);
403 void moea64_zero_page(mmu_t, vm_page_t);
404 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
405 void moea64_zero_page_idle(mmu_t, vm_page_t);
406 void moea64_activate(mmu_t, struct thread *);
407 void moea64_deactivate(mmu_t, struct thread *);
408 void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t);
409 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
410 vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
411 void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
412 boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
413 boolean_t moea64_page_executable(mmu_t, vm_page_t);
415 static mmu_method_t moea64_bridge_methods[] = {
416 MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
417 MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
418 MMUMETHOD(mmu_clear_reference, moea64_clear_reference),
419 MMUMETHOD(mmu_copy_page, moea64_copy_page),
420 MMUMETHOD(mmu_enter, moea64_enter),
421 MMUMETHOD(mmu_enter_object, moea64_enter_object),
422 MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
423 MMUMETHOD(mmu_extract, moea64_extract),
424 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),
425 MMUMETHOD(mmu_init, moea64_init),
426 MMUMETHOD(mmu_is_modified, moea64_is_modified),
427 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced),
428 MMUMETHOD(mmu_map, moea64_map),
429 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
430 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
431 MMUMETHOD(mmu_pinit, moea64_pinit),
432 MMUMETHOD(mmu_pinit0, moea64_pinit0),
433 MMUMETHOD(mmu_protect, moea64_protect),
434 MMUMETHOD(mmu_qenter, moea64_qenter),
435 MMUMETHOD(mmu_qremove, moea64_qremove),
436 MMUMETHOD(mmu_release, moea64_release),
437 MMUMETHOD(mmu_remove, moea64_remove),
438 MMUMETHOD(mmu_remove_all, moea64_remove_all),
439 MMUMETHOD(mmu_remove_write, moea64_remove_write),
440 MMUMETHOD(mmu_zero_page, moea64_zero_page),
441 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
442 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
443 MMUMETHOD(mmu_activate, moea64_activate),
444 MMUMETHOD(mmu_deactivate, moea64_deactivate),
446 /* Internal interfaces */
447 MMUMETHOD(mmu_bootstrap, moea64_bridge_bootstrap),
448 MMUMETHOD(mmu_cpu_bootstrap, moea64_bridge_cpu_bootstrap),
449 MMUMETHOD(mmu_mapdev, moea64_mapdev),
450 MMUMETHOD(mmu_unmapdev, moea64_unmapdev),
451 MMUMETHOD(mmu_kextract, moea64_kextract),
452 MMUMETHOD(mmu_kenter, moea64_kenter),
453 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
454 MMUMETHOD(mmu_page_executable, moea64_page_executable),
459 static mmu_def_t oea64_bridge_mmu = {
461 moea64_bridge_methods,
464 MMU_DEF(oea64_bridge_mmu);
466 static __inline u_int
467 va_to_pteg(uint64_t vsid, vm_offset_t addr)
471 hash = vsid ^ (((uint64_t)addr & ADDR_PIDX) >>
473 return (hash & moea64_pteg_mask);
476 static __inline struct pvo_head *
477 pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
481 pg = PHYS_TO_VM_PAGE(pa);
487 return (&moea64_pvo_unmanaged);
489 return (&pg->md.mdpg_pvoh);
492 static __inline struct pvo_head *
493 vm_page_to_pvoh(vm_page_t m)
496 return (&m->md.mdpg_pvoh);
500 moea64_attr_clear(vm_page_t m, u_int64_t ptebit)
503 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
504 m->md.mdpg_attrs &= ~ptebit;
507 static __inline u_int64_t
508 moea64_attr_fetch(vm_page_t m)
511 return (m->md.mdpg_attrs);
515 moea64_attr_save(vm_page_t m, u_int64_t ptebit)
518 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
519 m->md.mdpg_attrs |= ptebit;
523 moea64_pte_compare(const struct lpte *pt, const struct lpte *pvo_pt)
525 if (pt->pte_hi == pvo_pt->pte_hi)
532 moea64_pte_match(struct lpte *pt, uint64_t vsid, vm_offset_t va, int which)
534 return (pt->pte_hi & ~LPTE_VALID) ==
535 ((vsid << LPTE_VSID_SHIFT) |
536 ((uint64_t)(va >> ADDR_API_SHFT64) & LPTE_API) | which);
540 moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va,
546 * Construct a PTE. Default to IMB initially. Valid bit only gets
547 * set when the real pte is set in memory.
549 * Note: Don't set the valid bit for correct operation of tlb update.
551 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
552 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
558 moea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt)
563 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG);
567 moea64_pte_clear(struct lpte *pt, pmap_t pmap, vm_offset_t va, u_int64_t ptebit)
572 * As shown in Section 7.6.3.2.3
574 pt->pte_lo &= ~ptebit;
579 moea64_pte_set(struct lpte *pt, struct lpte *pvo_pt)
583 pvo_pt->pte_hi |= LPTE_VALID;
586 * Update the PTE as defined in section 7.6.3.1.
587 * Note that the REF/CHG bits are from pvo_pt and thus should have
588 * been saved so this routine can restore them (if desired).
590 pt->pte_lo = pvo_pt->pte_lo;
592 pt->pte_hi = pvo_pt->pte_hi;
598 moea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va)
601 pvo_pt->pte_hi &= ~LPTE_VALID;
604 * Force the reg & chg bits back into the PTEs.
609 * Invalidate the pte.
611 pt->pte_hi &= ~LPTE_VALID;
616 * Save the reg & chg bits.
618 moea64_pte_synch(pt, pvo_pt);
623 moea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va)
629 moea64_pte_unset(pt, pvo_pt, pmap, va);
630 moea64_pte_set(pt, pvo_pt);
633 static __inline uint64_t
634 moea64_calc_wimg(vm_offset_t pa)
640 * Assume the page is cache inhibited and access is guarded unless
641 * it's in our available memory array.
643 pte_lo = LPTE_I | LPTE_G;
644 for (i = 0; i < pregions_sz; i++) {
645 if ((pa >= pregions[i].mr_start) &&
646 (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
647 pte_lo &= ~(LPTE_I | LPTE_G);
657 * Quick sort callout for comparing memory regions.
659 static int mr_cmp(const void *a, const void *b);
660 static int om_cmp(const void *a, const void *b);
663 mr_cmp(const void *a, const void *b)
665 const struct mem_region *regiona;
666 const struct mem_region *regionb;
670 if (regiona->mr_start < regionb->mr_start)
672 else if (regiona->mr_start > regionb->mr_start)
679 om_cmp(const void *a, const void *b)
681 const struct ofw_map *mapa;
682 const struct ofw_map *mapb;
686 if (mapa->om_pa_hi < mapb->om_pa_hi)
688 else if (mapa->om_pa_hi > mapb->om_pa_hi)
690 else if (mapa->om_pa_lo < mapb->om_pa_lo)
692 else if (mapa->om_pa_lo > mapb->om_pa_lo)
699 moea64_bridge_cpu_bootstrap(mmu_t mmup, int ap)
704 * Initialize segment registers and MMU
707 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync();
708 for (i = 0; i < 16; i++) {
709 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
711 __asm __volatile ("sync; mtsdr1 %0; isync"
712 :: "r"((u_int)moea64_pteg_table
713 | (32 - cntlzw(moea64_pteg_mask >> 11))));
718 moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
726 vm_size_t size, physsz, hwphyssz;
727 vm_offset_t pa, va, off;
731 /* We don't have a direct map since there is no BAT */
734 /* Make sure battable is zero, since we have no BAT */
735 for (i = 0; i < 16; i++) {
736 battable[i].batu = 0;
737 battable[i].batl = 0;
740 /* Get physical memory regions from firmware */
741 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
742 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
744 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
745 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
746 panic("moea64_bootstrap: phys_avail too small");
747 qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
748 phys_avail_count = 0;
751 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
752 for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
753 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
754 regions[i].mr_start + regions[i].mr_size,
757 (physsz + regions[i].mr_size) >= hwphyssz) {
758 if (physsz < hwphyssz) {
759 phys_avail[j] = regions[i].mr_start;
760 phys_avail[j + 1] = regions[i].mr_start +
767 phys_avail[j] = regions[i].mr_start;
768 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
770 physsz += regions[i].mr_size;
772 physmem = btoc(physsz);
775 * Allocate PTEG table.
778 moea64_pteg_count = PTEGCOUNT;
780 moea64_pteg_count = 0x1000;
782 while (moea64_pteg_count < physmem)
783 moea64_pteg_count <<= 1;
784 #endif /* PTEGCOUNT */
786 size = moea64_pteg_count * sizeof(struct lpteg);
787 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
788 moea64_pteg_count, size);
791 * We now need to allocate memory. This memory, to be allocated,
792 * has to reside in a page table. The page table we are about to
793 * allocate. We don't have BAT. So drop to data real mode for a minute
794 * as a measure of last resort. We do this a couple times.
797 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size);
799 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg));
802 moea64_pteg_mask = moea64_pteg_count - 1;
804 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
807 * Allocate pv/overflow lists.
809 size = sizeof(struct pvo_head) * moea64_pteg_count;
811 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size,
813 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table);
816 for (i = 0; i < moea64_pteg_count; i++)
817 LIST_INIT(&moea64_pvo_table[i]);
821 * Initialize the lock that synchronizes access to the pteg and pvo
824 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF |
828 * Initialise the unmanaged pvo pool.
830 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
831 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
832 moea64_bpvo_pool_index = 0;
835 * Make sure kernel vsid is allocated as well as VSID 0.
837 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
838 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
839 moea64_vsid_bitmap[0] |= 1;
842 * Initialize the kernel pmap (which is statically allocated).
844 for (i = 0; i < 16; i++)
845 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
847 kernel_pmap->pmap_phys = kernel_pmap;
848 kernel_pmap->pm_active = ~0;
850 PMAP_LOCK_INIT(kernel_pmap);
853 * Now map in all the other buffers we allocated earlier
857 size = moea64_pteg_count * sizeof(struct lpteg);
858 off = (vm_offset_t)(moea64_pteg_table);
859 for (pa = off; pa < off + size; pa += PAGE_SIZE)
860 moea64_kenter(mmup, pa, pa);
861 size = sizeof(struct pvo_head) * moea64_pteg_count;
862 off = (vm_offset_t)(moea64_pvo_table);
863 for (pa = off; pa < off + size; pa += PAGE_SIZE)
864 moea64_kenter(mmup, pa, pa);
865 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry);
866 off = (vm_offset_t)(moea64_bpvo_pool);
867 for (pa = off; pa < off + size; pa += PAGE_SIZE)
868 moea64_kenter(mmup, pa, pa);
872 * Map certain important things, like ourselves and the exception
877 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; pa += PAGE_SIZE)
878 moea64_kenter(mmup, pa, pa);
879 for (pa = EXC_RSVD; pa < EXC_LAST; pa += PAGE_SIZE)
880 moea64_kenter(mmup, pa, pa);
883 if (!ofw_real_mode) {
885 * Set up the Open Firmware pmap and add its mappings.
888 moea64_pinit(mmup, &ofw_pmap);
889 ofw_pmap.pm_sr[KERNEL_SR] = kernel_pmap->pm_sr[KERNEL_SR];
890 ofw_pmap.pm_sr[KERNEL2_SR] = kernel_pmap->pm_sr[KERNEL2_SR];
892 if ((chosen = OF_finddevice("/chosen")) == -1)
893 panic("moea64_bootstrap: can't find /chosen");
894 OF_getprop(chosen, "mmu", &mmui, 4);
895 if ((mmu = OF_instance_to_package(mmui)) == -1)
896 panic("moea64_bootstrap: can't get mmu package");
897 if ((sz = OF_getproplen(mmu, "translations")) == -1)
898 panic("moea64_bootstrap: can't get ofw translation count");
899 if (sz > sizeof(translations))
900 panic("moea64_bootstrap: too many ofw translations (%d)",
901 sz/sizeof(*translations));
903 bzero(translations, sz);
904 if (OF_getprop(mmu, "translations", translations, sz) == -1)
905 panic("moea64_bootstrap: can't get ofw translations");
907 CTR0(KTR_PMAP, "moea64_bootstrap: translations");
908 sz /= sizeof(*translations);
909 qsort(translations, sz, sizeof (*translations), om_cmp);
911 for (i = 0, ofw_mappings = 0; i < sz; i++) {
912 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
913 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va,
914 translations[i].om_len);
916 if (translations[i].om_pa_lo % PAGE_SIZE)
917 panic("OFW translation not page-aligned!");
919 if (translations[i].om_pa_hi)
920 panic("OFW translations above 32-bit boundary!");
922 /* Now enter the pages for this mapping */
925 * Lock the ofw pmap. pmap_kenter(), which we use for the
926 * pages the kernel also needs, does its own locking.
928 PMAP_LOCK(&ofw_pmap);
930 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
933 /* Map low memory mappings into the kernel pmap, too.
934 * These are typically mappings made by the loader,
935 * so we need them if we want to keep executing. */
937 if (translations[i].om_va + off < SEGMENT_LENGTH)
938 moea64_kenter(mmup, translations[i].om_va + off,
939 translations[i].om_va + off);
941 m.phys_addr = translations[i].om_pa_lo + off;
942 moea64_enter_locked(&ofw_pmap,
943 translations[i].om_va + off, &m, VM_PROT_ALL, 1);
948 PMAP_UNLOCK(&ofw_pmap);
957 * Calculate the last available physical address.
959 for (i = 0; phys_avail[i + 2] != 0; i += 2)
961 Maxmem = powerpc_btop(phys_avail[i + 1]);
964 * Initialize MMU and remap early physical mappings
966 moea64_bridge_cpu_bootstrap(mmup,0);
967 mtmsr(mfmsr() | PSL_DR | PSL_IR); isync();
969 bs_remap_earlyboot();
972 * Set the start and end of kva.
974 virtual_avail = VM_MIN_KERNEL_ADDRESS;
975 virtual_end = VM_MAX_KERNEL_ADDRESS;
978 * Allocate some stupid buffer regions.
981 pvo_allocator_start = virtual_avail;
982 virtual_avail += SEGMENT_LENGTH/4;
983 pvo_allocator_end = virtual_avail;
986 * Allocate some things for page zeroing
989 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, MTX_DEF);
990 for (i = 0; i < 2; i++) {
991 moea64_scratchpage_va[i] = virtual_avail;
992 virtual_avail += PAGE_SIZE;
994 moea64_kenter(mmup,moea64_scratchpage_va[i],kernelstart);
997 moea64_scratchpage_pvo[i] = moea64_pvo_find_va(kernel_pmap,
998 moea64_scratchpage_va[i],&j);
999 moea64_scratchpage_pte[i] = moea64_pvo_to_pte(
1000 moea64_scratchpage_pvo[i],j);
1005 * Allocate a kernel stack with a guard page for thread0 and map it
1006 * into the kernel page map.
1008 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
1009 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1010 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
1011 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
1012 thread0.td_kstack = va;
1013 thread0.td_kstack_pages = KSTACK_PAGES;
1014 for (i = 0; i < KSTACK_PAGES; i++) {
1015 moea64_kenter(mmup, va, pa);;
1021 * Allocate virtual address space for the message buffer.
1023 pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE);
1024 msgbufp = (struct msgbuf *)virtual_avail;
1026 virtual_avail += round_page(MSGBUF_SIZE);
1027 while (va < virtual_avail) {
1028 moea64_kenter(mmup, va, pa);;
1034 * Allocate virtual address space for the dynamic percpu area.
1036 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
1037 dpcpu = (void *)virtual_avail;
1039 virtual_avail += DPCPU_SIZE;
1040 while (va < virtual_avail) {
1041 moea64_kenter(mmup, va, pa);;
1045 dpcpu_init(dpcpu, 0);
1049 * Activate a user pmap. The pmap must be activated before it's address
1050 * space can be accessed in any way.
1053 moea64_activate(mmu_t mmu, struct thread *td)
1058 * Load all the data we need up front to encourage the compiler to
1059 * not issue any loads while we have interrupts disabled below.
1061 pm = &td->td_proc->p_vmspace->vm_pmap;
1062 pmr = pm->pmap_phys;
1064 pm->pm_active |= PCPU_GET(cpumask);
1065 PCPU_SET(curpmap, pmr);
1069 moea64_deactivate(mmu_t mmu, struct thread *td)
1073 pm = &td->td_proc->p_vmspace->vm_pmap;
1074 pm->pm_active &= ~(PCPU_GET(cpumask));
1075 PCPU_SET(curpmap, NULL);
1079 moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
1081 struct pvo_entry *pvo;
1084 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1088 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1089 pm->pm_stats.wired_count++;
1090 pvo->pvo_vaddr |= PVO_WIRED;
1092 if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1093 pm->pm_stats.wired_count--;
1094 pvo->pvo_vaddr &= ~PVO_WIRED;
1101 * Zero a page of physical memory by temporarily mapping it into the tlb.
1104 moea64_zero_page(mmu_t mmu, vm_page_t m)
1106 moea64_zero_page_area(mmu,m,0,PAGE_SIZE);
1110 * This goes through and sets the physical address of our
1111 * special scratch PTE to the PA we want to zero or copy. Because
1112 * of locking issues (this can get called in pvo_enter() by
1113 * the UMA allocator), we can't use most other utility functions here
1117 void moea64_set_scratchpage_pa(int which, vm_offset_t pa) {
1118 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &=
1119 (~LPTE_WIMG & ~LPTE_RPGN);
1120 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |=
1121 moea64_calc_wimg(pa) | (uint64_t)pa;
1123 moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID;
1124 TLBIE(kernel_pmap, moea64_scratchpage_va[which]);
1126 moea64_scratchpage_pte[which]->pte_lo =
1127 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo;
1130 moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID;
1131 TLBIE(kernel_pmap, moea64_scratchpage_va[which]);
1135 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1140 dst = VM_PAGE_TO_PHYS(mdst);
1141 src = VM_PAGE_TO_PHYS(msrc);
1143 mtx_lock(&moea64_scratchpage_mtx);
1145 moea64_set_scratchpage_pa(0,src);
1146 moea64_set_scratchpage_pa(1,dst);
1148 kcopy((void *)moea64_scratchpage_va[0],
1149 (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1151 __syncicache((void *)moea64_scratchpage_va[1],PAGE_SIZE);
1153 mtx_unlock(&moea64_scratchpage_mtx);
1157 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1159 vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1161 if (!moea64_initialized)
1162 panic("moea64_zero_page: can't zero pa %#x", pa);
1163 if (size + off > PAGE_SIZE)
1164 panic("moea64_zero_page: size + off > PAGE_SIZE");
1166 mtx_lock(&moea64_scratchpage_mtx);
1168 moea64_set_scratchpage_pa(0,pa);
1169 bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1170 __syncicache((void *)moea64_scratchpage_va[0],PAGE_SIZE);
1172 mtx_unlock(&moea64_scratchpage_mtx);
1176 moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1179 moea64_zero_page(mmu, m);
1183 * Map the given physical page at the specified virtual address in the
1184 * target pmap with the protection requested. If specified the page
1185 * will be wired down.
1188 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1189 vm_prot_t prot, boolean_t wired)
1192 vm_page_lock_queues();
1194 moea64_enter_locked(pmap, va, m, prot, wired);
1195 vm_page_unlock_queues();
1200 * Map the given physical page at the specified virtual address in the
1201 * target pmap with the protection requested. If specified the page
1202 * will be wired down.
1204 * The page queues and pmap must be locked.
1208 moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1211 struct pvo_head *pvo_head;
1218 if (!moea64_initialized) {
1219 pvo_head = &moea64_pvo_kunmanaged;
1221 zone = moea64_upvo_zone;
1224 pvo_head = vm_page_to_pvoh(m);
1226 zone = moea64_mpvo_zone;
1227 pvo_flags = PVO_MANAGED;
1230 if (pmap_bootstrapped)
1231 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1232 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1234 /* XXX change the pvo head for fake pages */
1235 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) {
1236 pvo_flags &= ~PVO_MANAGED;
1237 pvo_head = &moea64_pvo_kunmanaged;
1238 zone = moea64_upvo_zone;
1241 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m));
1243 if (prot & VM_PROT_WRITE) {
1245 if (pmap_bootstrapped)
1246 vm_page_flag_set(m, PG_WRITEABLE);
1250 if (prot & VM_PROT_EXECUTE)
1251 pvo_flags |= VM_PROT_EXECUTE;
1254 pvo_flags |= PVO_WIRED;
1256 if ((m->flags & PG_FICTITIOUS) != 0)
1257 pvo_flags |= PVO_FAKE;
1259 error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
1260 pte_lo, pvo_flags, 0);
1262 if (pmap == kernel_pmap)
1266 * Flush the page from the instruction cache if this page is
1267 * mapped executable and cacheable.
1269 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1270 moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m));
1275 moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa)
1278 * This is much trickier than on older systems because
1279 * we can't sync the icache on physical addresses directly
1280 * without a direct map. Instead we check a couple of cases
1281 * where the memory is already mapped in and, failing that,
1282 * use the same trick we use for page zeroing to create
1283 * a temporary mapping for this physical address.
1286 if (!pmap_bootstrapped) {
1288 * If PMAP is not bootstrapped, we are likely to be
1291 __syncicache((void *)pa,PAGE_SIZE);
1292 } else if (pmap == kernel_pmap) {
1293 __syncicache((void *)va,PAGE_SIZE);
1295 /* Use the scratch page to set up a temp mapping */
1297 mtx_lock(&moea64_scratchpage_mtx);
1299 moea64_set_scratchpage_pa(1,pa);
1300 __syncicache((void *)moea64_scratchpage_va[1],PAGE_SIZE);
1302 mtx_unlock(&moea64_scratchpage_mtx);
1307 * Maps a sequence of resident pages belonging to the same object.
1308 * The sequence begins with the given page m_start. This page is
1309 * mapped at the given virtual address start. Each subsequent page is
1310 * mapped at a virtual address that is offset from start by the same
1311 * amount as the page is offset from m_start within the object. The
1312 * last page in the sequence is the page with the largest offset from
1313 * m_start that can be mapped at a virtual address less than the given
1314 * virtual address end. Not every virtual page between start and end
1315 * is mapped; only those for which a resident page exists with the
1316 * corresponding offset from m_start are mapped.
1319 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1320 vm_page_t m_start, vm_prot_t prot)
1323 vm_pindex_t diff, psize;
1325 psize = atop(end - start);
1328 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1329 moea64_enter_locked(pm, start + ptoa(diff), m, prot &
1330 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1331 m = TAILQ_NEXT(m, listq);
1337 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1341 moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1348 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1350 struct pvo_entry *pvo;
1354 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1358 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF);
1364 * Atomically extract and hold the physical page with the given
1365 * pmap and virtual address pair if that mapping permits the given
1369 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1371 struct pvo_entry *pvo;
1375 vm_page_lock_queues();
1377 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
1378 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
1379 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
1380 (prot & VM_PROT_WRITE) == 0)) {
1381 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1384 vm_page_unlock_queues();
1390 moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1393 * This entire routine is a horrible hack to avoid bothering kmem
1394 * for new KVA addresses. Because this can get called from inside
1395 * kmem allocation routines, calling kmem for a new address here
1396 * can lead to multiply locking non-recursive mutexes.
1398 static vm_pindex_t color;
1402 int pflags, needed_lock;
1404 *flags = UMA_SLAB_PRIV;
1405 needed_lock = !PMAP_LOCKED(kernel_pmap);
1408 PMAP_LOCK(kernel_pmap);
1410 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
1411 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
1413 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
1415 pflags |= VM_ALLOC_ZERO;
1418 m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ);
1420 if (wait & M_NOWAIT)
1427 va = pvo_allocator_start;
1428 pvo_allocator_start += PAGE_SIZE;
1430 if (pvo_allocator_start >= pvo_allocator_end)
1431 panic("Ran out of PVO allocator buffer space!");
1433 /* Now call pvo_enter in recursive mode */
1434 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
1435 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M,
1436 PVO_WIRED | PVO_BOOTSTRAP, 1);
1438 TLBIE(kernel_pmap, va);
1441 PMAP_UNLOCK(kernel_pmap);
1443 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1444 bzero((void *)va, PAGE_SIZE);
1450 moea64_init(mmu_t mmu)
1453 CTR0(KTR_PMAP, "moea64_init");
1455 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1456 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1457 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1458 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1459 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1460 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1462 if (!hw_direct_map) {
1463 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
1464 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
1467 moea64_initialized = TRUE;
1471 moea64_is_modified(mmu_t mmu, vm_page_t m)
1474 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1477 return (moea64_query_bit(m, LPTE_CHG));
1481 moea64_clear_reference(mmu_t mmu, vm_page_t m)
1484 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1486 moea64_clear_bit(m, LPTE_REF, NULL);
1490 moea64_clear_modify(mmu_t mmu, vm_page_t m)
1493 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1495 moea64_clear_bit(m, LPTE_CHG, NULL);
1499 * Clear the write and modified bits in each of the given page's mappings.
1502 moea64_remove_write(mmu_t mmu, vm_page_t m)
1504 struct pvo_entry *pvo;
1509 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1510 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1511 (m->flags & PG_WRITEABLE) == 0)
1513 lo = moea64_attr_fetch(m);
1515 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1516 pmap = pvo->pvo_pmap;
1518 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
1520 pt = moea64_pvo_to_pte(pvo, -1);
1521 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1522 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1524 moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
1525 lo |= pvo->pvo_pte.lpte.pte_lo;
1526 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
1527 moea64_pte_change(pt, &pvo->pvo_pte.lpte,
1528 pvo->pvo_pmap, pvo->pvo_vaddr);
1534 if ((lo & LPTE_CHG) != 0) {
1535 moea64_attr_clear(m, LPTE_CHG);
1538 vm_page_flag_clear(m, PG_WRITEABLE);
1542 * moea64_ts_referenced:
1544 * Return a count of reference bits for a page, clearing those bits.
1545 * It is not necessary for every reference bit to be cleared, but it
1546 * is necessary that 0 only be returned when there are truly no
1547 * reference bits set.
1549 * XXX: The exact number of bits to check and clear is a matter that
1550 * should be tested and standardized at some point in the future for
1551 * optimal aging of shared pages.
1554 moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1558 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1561 count = moea64_clear_bit(m, LPTE_REF, NULL);
1567 * Map a wired page into kernel virtual address space.
1570 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1575 if (!pmap_bootstrapped) {
1576 if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)
1577 panic("Trying to enter an address in KVA -- %#x!\n",pa);
1580 pte_lo = moea64_calc_wimg(pa);
1582 PMAP_LOCK(kernel_pmap);
1583 error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
1584 &moea64_pvo_kunmanaged, va, pa, pte_lo,
1585 PVO_WIRED | VM_PROT_EXECUTE, 0);
1587 TLBIE(kernel_pmap, va);
1589 if (error != 0 && error != ENOENT)
1590 panic("moea64_kenter: failed to enter va %#x pa %#x: %d", va,
1594 * Flush the memory from the instruction cache.
1596 if ((pte_lo & (LPTE_I | LPTE_G)) == 0) {
1597 __syncicache((void *)va, PAGE_SIZE);
1599 PMAP_UNLOCK(kernel_pmap);
1603 * Extract the physical page address associated with the given kernel virtual
1607 moea64_kextract(mmu_t mmu, vm_offset_t va)
1609 struct pvo_entry *pvo;
1612 PMAP_LOCK(kernel_pmap);
1613 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1614 KASSERT(pvo != NULL, ("moea64_kextract: no addr found"));
1615 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF);
1616 PMAP_UNLOCK(kernel_pmap);
1621 * Remove a wired page from kernel virtual address space.
1624 moea64_kremove(mmu_t mmu, vm_offset_t va)
1626 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1630 * Map a range of physical addresses into kernel virtual address space.
1632 * The value passed in *virt is a suggested virtual address for the mapping.
1633 * Architectures which can support a direct-mapped physical to virtual region
1634 * can return the appropriate address within that region, leaving '*virt'
1635 * unchanged. We cannot and therefore do not; *virt is updated with the
1636 * first usable address after the mapped region.
1639 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1640 vm_offset_t pa_end, int prot)
1642 vm_offset_t sva, va;
1646 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1647 moea64_kenter(mmu, va, pa_start);
1654 * Returns true if the pmap's pv is one of the first
1655 * 16 pvs linked to from this page. This count may
1656 * be changed upwards or downwards in the future; it
1657 * is only necessary that true be returned for a small
1658 * subset of pmaps for proper page aging.
1661 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1664 struct pvo_entry *pvo;
1666 if (!moea64_initialized || (m->flags & PG_FICTITIOUS))
1670 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1671 if (pvo->pvo_pmap == pmap)
1681 * Return the number of managed mappings to the given physical page
1685 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1687 struct pvo_entry *pvo;
1691 if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0)
1693 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1694 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1695 if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1700 static u_int moea64_vsidcontext;
1703 moea64_pinit(mmu_t mmu, pmap_t pmap)
1708 PMAP_LOCK_INIT(pmap);
1711 __asm __volatile("mftb %0" : "=r"(entropy));
1713 if (pmap_bootstrapped)
1714 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, (vm_offset_t)pmap);
1716 pmap->pmap_phys = pmap;
1719 * Allocate some segment registers for this pmap.
1721 for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1725 * Create a new value by mutiplying by a prime and adding in
1726 * entropy from the timebase register. This is to make the
1727 * VSID more random so that the PT hash function collides
1728 * less often. (Note that the prime casues gcc to do shifts
1729 * instead of a multiply.)
1731 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1732 hash = moea64_vsidcontext & (NPMAPS - 1);
1733 if (hash == 0) /* 0 is special, avoid it */
1736 mask = 1 << (hash & (VSID_NBPW - 1));
1737 hash = (moea64_vsidcontext & 0xfffff);
1738 if (moea64_vsid_bitmap[n] & mask) { /* collision? */
1739 /* anything free in this bucket? */
1740 if (moea64_vsid_bitmap[n] == 0xffffffff) {
1741 entropy = (moea64_vsidcontext >> 20);
1744 i = ffs(~moea64_vsid_bitmap[i]) - 1;
1746 hash &= 0xfffff & ~(VSID_NBPW - 1);
1749 moea64_vsid_bitmap[n] |= mask;
1750 for (i = 0; i < 16; i++) {
1751 pmap->pm_sr[i] = VSID_MAKE(i, hash);
1756 panic("moea64_pinit: out of segments");
1760 * Initialize the pmap associated with process 0.
1763 moea64_pinit0(mmu_t mmu, pmap_t pm)
1765 moea64_pinit(mmu, pm);
1766 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1770 * Set the physical protection on the specified range of this map as requested.
1773 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1776 struct pvo_entry *pvo;
1780 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
1784 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1785 ("moea64_protect: non current pmap"));
1787 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1788 moea64_remove(mmu, pm, sva, eva);
1792 vm_page_lock_queues();
1794 for (; sva < eva; sva += PAGE_SIZE) {
1795 pvo = moea64_pvo_find_va(pm, sva, &pteidx);
1800 * Grab the PTE pointer before we diddle with the cached PTE
1804 pt = moea64_pvo_to_pte(pvo, pteidx);
1807 * Change the protection of the page.
1809 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1810 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1811 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
1812 if ((prot & VM_PROT_EXECUTE) == 0)
1813 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
1816 * If the PVO is in the page table, update that pte as well.
1819 moea64_pte_change(pt, &pvo->pvo_pte.lpte,
1820 pvo->pvo_pmap, pvo->pvo_vaddr);
1821 if ((pvo->pvo_pte.lpte.pte_lo &
1822 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1823 moea64_syncicache(pm, sva,
1824 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1829 vm_page_unlock_queues();
1834 * Map a list of wired pages into kernel virtual address space. This is
1835 * intended for temporary mappings which do not need page modification or
1836 * references recorded. Existing mappings in the region are overwritten.
1839 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
1841 while (count-- > 0) {
1842 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1849 * Remove page mappings from kernel virtual address space. Intended for
1850 * temporary mappings entered by moea64_qenter.
1853 moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
1855 while (count-- > 0) {
1856 moea64_kremove(mmu, va);
1862 moea64_release(mmu_t mmu, pmap_t pmap)
1867 * Free segment register's VSID
1869 if (pmap->pm_sr[0] == 0)
1870 panic("moea64_release");
1872 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
1873 mask = 1 << (idx % VSID_NBPW);
1875 moea64_vsid_bitmap[idx] &= ~mask;
1876 PMAP_LOCK_DESTROY(pmap);
1880 * Remove the given range of addresses from the specified map.
1883 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1885 struct pvo_entry *pvo;
1888 vm_page_lock_queues();
1890 for (; sva < eva; sva += PAGE_SIZE) {
1891 pvo = moea64_pvo_find_va(pm, sva, &pteidx);
1893 moea64_pvo_remove(pvo, pteidx);
1896 vm_page_unlock_queues();
1901 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
1902 * will reflect changes in pte's back to the vm_page.
1905 moea64_remove_all(mmu_t mmu, vm_page_t m)
1907 struct pvo_head *pvo_head;
1908 struct pvo_entry *pvo, *next_pvo;
1911 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1913 pvo_head = vm_page_to_pvoh(m);
1914 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1915 next_pvo = LIST_NEXT(pvo, pvo_vlink);
1917 MOEA_PVO_CHECK(pvo); /* sanity check */
1918 pmap = pvo->pvo_pmap;
1920 moea64_pvo_remove(pvo, -1);
1923 vm_page_flag_clear(m, PG_WRITEABLE);
1927 * Allocate a physical page of memory directly from the phys_avail map.
1928 * Can only be called from moea64_bootstrap before avail start and end are
1932 moea64_bootstrap_alloc(vm_size_t size, u_int align)
1937 size = round_page(size);
1938 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1940 s = (phys_avail[i] + align - 1) & ~(align - 1);
1945 if (s < phys_avail[i] || e > phys_avail[i + 1])
1948 if (s == phys_avail[i]) {
1949 phys_avail[i] += size;
1950 } else if (e == phys_avail[i + 1]) {
1951 phys_avail[i + 1] -= size;
1953 for (j = phys_avail_count * 2; j > i; j -= 2) {
1954 phys_avail[j] = phys_avail[j - 2];
1955 phys_avail[j + 1] = phys_avail[j - 1];
1958 phys_avail[i + 3] = phys_avail[i + 1];
1959 phys_avail[i + 1] = s;
1960 phys_avail[i + 2] = e;
1966 panic("moea64_bootstrap_alloc: could not allocate memory");
1974 for (i = 0; i < 0xFF000; i += 0x00001000)
1979 moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
1980 vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags, int recurse)
1982 struct pvo_entry *pvo;
1990 * One nasty thing that can happen here is that the UMA calls to
1991 * allocate new PVOs need to map more memory, which calls pvo_enter(),
1992 * which calls UMA...
1994 * We break the loop by detecting recursion and allocating out of
1995 * the bootstrap pool.
1998 moea64_pvo_enter_calls++;
2000 bootstrap = (flags & PVO_BOOTSTRAP);
2002 if (!moea64_initialized)
2006 * Compute the PTE Group index.
2009 vsid = va_to_vsid(pm, va);
2010 ptegidx = va_to_pteg(vsid, va);
2013 * Remove any existing mapping for this page. Reuse the pvo entry if
2014 * there is a mapping.
2019 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2020 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2021 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2022 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) ==
2023 (pte_lo & LPTE_PP)) {
2028 moea64_pvo_remove(pvo, -1);
2034 * If we aren't overwriting a mapping, try to allocate.
2037 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) {
2038 panic("moea64_enter: bpvo pool exhausted, %d, %d, %d",
2039 moea64_bpvo_pool_index, BPVO_POOL_SIZE,
2040 BPVO_POOL_SIZE * sizeof(struct pvo_entry));
2042 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
2043 moea64_bpvo_pool_index++;
2046 pvo = uma_zalloc(zone, M_NOWAIT);
2055 moea64_pvo_entries++;
2056 pvo->pvo_vaddr = va;
2058 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink);
2059 pvo->pvo_vaddr &= ~ADDR_POFF;
2061 if (!(flags & VM_PROT_EXECUTE))
2062 pte_lo |= LPTE_NOEXEC;
2063 if (flags & PVO_WIRED)
2064 pvo->pvo_vaddr |= PVO_WIRED;
2065 if (pvo_head != &moea64_pvo_kunmanaged)
2066 pvo->pvo_vaddr |= PVO_MANAGED;
2068 pvo->pvo_vaddr |= PVO_BOOTSTRAP;
2069 if (flags & PVO_FAKE)
2070 pvo->pvo_vaddr |= PVO_FAKE;
2072 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va,
2073 (uint64_t)(pa) | pte_lo);
2076 * Remember if the list was empty and therefore will be the first
2079 if (LIST_FIRST(pvo_head) == NULL)
2081 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2083 if (pvo->pvo_pte.lpte.pte_lo & PVO_WIRED)
2084 pm->pm_stats.wired_count++;
2085 pm->pm_stats.resident_count++;
2088 * We hope this succeeds but it isn't required.
2090 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte);
2092 PVO_PTEGIDX_SET(pvo, i);
2094 panic("moea64_pvo_enter: overflow");
2095 moea64_pte_overflow++;
2101 return (first ? ENOENT : 0);
2105 moea64_pvo_remove(struct pvo_entry *pvo, int pteidx)
2110 * If there is an active pte entry, we need to deactivate it (and
2111 * save the ref & cfg bits).
2114 pt = moea64_pvo_to_pte(pvo, pteidx);
2116 moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_pmap,
2118 PVO_PTEGIDX_CLR(pvo);
2120 moea64_pte_overflow--;
2125 * Update our statistics.
2127 pvo->pvo_pmap->pm_stats.resident_count--;
2128 if (pvo->pvo_pte.lpte.pte_lo & PVO_WIRED)
2129 pvo->pvo_pmap->pm_stats.wired_count--;
2132 * Save the REF/CHG bits into their cache if the page is managed.
2134 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) {
2137 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
2139 moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo &
2140 (LPTE_REF | LPTE_CHG));
2145 * Remove this PVO from the PV list.
2147 LIST_REMOVE(pvo, pvo_vlink);
2150 * Remove this from the overflow list and return it to the pool
2151 * if we aren't going to reuse it.
2153 LIST_REMOVE(pvo, pvo_olink);
2154 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2155 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea64_mpvo_zone :
2156 moea64_upvo_zone, pvo);
2157 moea64_pvo_entries--;
2158 moea64_pvo_remove_calls++;
2162 moea64_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
2167 * We can find the actual pte entry without searching by grabbing
2168 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
2169 * noticing the HID bit.
2171 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
2172 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID)
2173 pteidx ^= moea64_pteg_mask * 8;
2178 static struct pvo_entry *
2179 moea64_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
2181 struct pvo_entry *pvo;
2186 vsid = va_to_vsid(pm, va);
2187 ptegidx = va_to_pteg(vsid, va);
2190 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2191 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2193 *pteidx_p = moea64_pvo_pte_index(pvo, ptegidx);
2202 static struct lpte *
2203 moea64_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
2208 * If we haven't been supplied the ptegidx, calculate it.
2214 vsid = va_to_vsid(pvo->pvo_pmap, pvo->pvo_vaddr);
2215 ptegidx = va_to_pteg(vsid, pvo->pvo_vaddr);
2216 pteidx = moea64_pvo_pte_index(pvo, ptegidx);
2219 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7];
2221 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
2222 !PVO_PTEGIDX_ISSET(pvo)) {
2223 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no "
2224 "valid pte index", pvo);
2227 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 &&
2228 PVO_PTEGIDX_ISSET(pvo)) {
2229 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo "
2230 "pvo but no valid pte", pvo);
2233 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) ==
2235 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) {
2236 panic("moea64_pvo_to_pte: pvo %p has valid pte in "
2237 "moea64_pteg_table %p but invalid in pvo", pvo, pt);
2240 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) &
2241 ~(LPTE_CHG|LPTE_REF)) != 0) {
2242 panic("moea64_pvo_to_pte: pvo %p pte does not match "
2243 "pte %p in moea64_pteg_table difference is %#x",
2245 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo));
2248 ASSERT_TABLE_LOCK();
2252 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) {
2253 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in "
2254 "moea64_pteg_table but valid in pvo", pvo, pt);
2261 moea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt)
2266 ASSERT_TABLE_LOCK();
2269 * First try primary hash.
2271 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2272 if ((pt->pte_hi & LPTE_VALID) == 0) {
2273 pvo_pt->pte_hi &= ~LPTE_HID;
2274 moea64_pte_set(pt, pvo_pt);
2280 * Now try secondary hash.
2282 ptegidx ^= moea64_pteg_mask;
2284 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2285 if ((pt->pte_hi & LPTE_VALID) == 0) {
2286 pvo_pt->pte_hi |= LPTE_HID;
2287 moea64_pte_set(pt, pvo_pt);
2292 panic("moea64_pte_insert: overflow");
2297 moea64_query_bit(vm_page_t m, u_int64_t ptebit)
2299 struct pvo_entry *pvo;
2303 if (moea64_attr_fetch(m) & ptebit)
2307 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2308 MOEA_PVO_CHECK(pvo); /* sanity check */
2311 * See if we saved the bit off. If so, cache it and return
2314 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2315 moea64_attr_save(m, ptebit);
2316 MOEA_PVO_CHECK(pvo); /* sanity check */
2322 * No luck, now go through the hard part of looking at the PTEs
2323 * themselves. Sync so that any pending REF/CHG bits are flushed to
2327 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2328 MOEA_PVO_CHECK(pvo); /* sanity check */
2331 * See if this pvo has a valid PTE. if so, fetch the
2332 * REF/CHG bits from the valid PTE. If the appropriate
2333 * ptebit is set, cache it and return success.
2336 pt = moea64_pvo_to_pte(pvo, -1);
2338 moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
2339 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2342 moea64_attr_save(m, ptebit);
2343 MOEA_PVO_CHECK(pvo); /* sanity check */
2354 moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit)
2357 struct pvo_entry *pvo;
2362 * Clear the cached value.
2364 rv = moea64_attr_fetch(m);
2365 moea64_attr_clear(m, ptebit);
2368 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2369 * we can reset the right ones). note that since the pvo entries and
2370 * list heads are accessed via BAT0 and are never placed in the page
2371 * table, we don't have to worry about further accesses setting the
2377 * For each pvo entry, clear the pvo's ptebit. If this pvo has a
2378 * valid pte clear the ptebit from the valid pte.
2381 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2382 MOEA_PVO_CHECK(pvo); /* sanity check */
2385 pt = moea64_pvo_to_pte(pvo, -1);
2387 moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
2388 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2390 moea64_pte_clear(pt, pvo->pvo_pmap, PVO_VADDR(pvo), ptebit);
2394 rv |= pvo->pvo_pte.lpte.pte_lo;
2395 pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
2396 MOEA_PVO_CHECK(pvo); /* sanity check */
2399 if (origbit != NULL) {
2407 moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2413 moea64_page_executable(mmu_t mmu, vm_page_t pg)
2415 return (!moea64_query_bit(pg, LPTE_NOEXEC));
2419 * Map a set of physical memory pages into the kernel virtual
2420 * address space. Return a pointer to where it is mapped. This
2421 * routine is intended to be used for mapping device memory,
2425 moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2427 vm_offset_t va, tmpva, ppa, offset;
2429 ppa = trunc_page(pa);
2430 offset = pa & PAGE_MASK;
2431 size = roundup(offset + size, PAGE_SIZE);
2433 va = kmem_alloc_nofault(kernel_map, size);
2436 panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2438 for (tmpva = va; size > 0;) {
2439 moea64_kenter(mmu, tmpva, ppa);
2445 return ((void *)(va + offset));
2449 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2451 vm_offset_t base, offset;
2453 base = trunc_page(va);
2454 offset = va & PAGE_MASK;
2455 size = roundup(offset + size, PAGE_SIZE);
2457 kmem_free(kernel_map, base, size);