2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the NetBSD
19 * Foundation, Inc. and its contributors.
20 * 4. Neither the name of The NetBSD Foundation nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38 * Copyright (C) 1995, 1996 TooLs GmbH.
39 * All rights reserved.
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 * must display the following acknowledgement:
51 * This product includes software developed by TooLs GmbH.
52 * 4. The name of TooLs GmbH may not be used to endorse or promote products
53 * derived from this software without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
69 * Copyright (C) 2001 Benno Rice.
70 * All rights reserved.
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions
75 * 1. Redistributions of source code must retain the above copyright
76 * notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce the above copyright
78 * notice, this list of conditions and the following disclaimer in the
79 * documentation and/or other materials provided with the distribution.
81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
97 * Native 64-bit page table operations for running without a hypervisor.
100 #include <sys/param.h>
101 #include <sys/kernel.h>
103 #include <sys/lock.h>
104 #include <sys/mutex.h>
105 #include <sys/proc.h>
106 #include <sys/sysctl.h>
107 #include <sys/systm.h>
112 #include <vm/vm_param.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_page.h>
115 #include <vm/vm_map.h>
116 #include <vm/vm_object.h>
117 #include <vm/vm_extern.h>
118 #include <vm/vm_pageout.h>
119 #include <vm/vm_pager.h>
121 #include <machine/md_var.h>
122 #include <machine/mmuvar.h>
124 #include "mmu_oea64.h"
126 #include "moea64_if.h"
128 #define PTESYNC() __asm __volatile("ptesync");
129 #define TLBSYNC() __asm __volatile("tlbsync; ptesync");
130 #define SYNC() __asm __volatile("sync");
131 #define EIEIO() __asm __volatile("eieio");
133 #define VSID_HASH_MASK 0x0000007fffffffffULL
136 * The tlbie instruction must be executed in 64-bit mode
137 * so we have to twiddle MSR[SF] around every invocation.
138 * Just to add to the fun, exceptions must be off as well
139 * so that we can't trap in 64-bit mode. What a pain.
141 struct mtx tlbie_mutex;
144 TLBIE(uint64_t vpn) {
145 #ifndef __powerpc64__
146 register_t vpn_hi, vpn_lo;
151 vpn <<= ADDR_PIDX_SHFT;
152 vpn &= ~(0xffffULL << 48);
154 mtx_lock_spin(&tlbie_mutex);
162 :: "r"(vpn) : "memory");
164 vpn_hi = (uint32_t)(vpn >> 32);
165 vpn_lo = (uint32_t)vpn;
182 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
185 mtx_unlock_spin(&tlbie_mutex);
188 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
189 #define ENABLE_TRANS(msr) mtmsr(msr)
194 static struct lpteg *moea64_pteg_table;
199 static int moea64_pte_insert_native(mmu_t, u_int, struct lpte *);
200 static uintptr_t moea64_pvo_to_pte_native(mmu_t, const struct pvo_entry *);
201 static void moea64_pte_synch_native(mmu_t, uintptr_t pt,
202 struct lpte *pvo_pt);
203 static void moea64_pte_clear_native(mmu_t, uintptr_t pt,
204 struct lpte *pvo_pt, uint64_t vpn, uint64_t ptebit);
205 static void moea64_pte_change_native(mmu_t, uintptr_t pt,
206 struct lpte *pvo_pt, uint64_t vpn);
207 static void moea64_pte_unset_native(mmu_t mmu, uintptr_t pt,
208 struct lpte *pvo_pt, uint64_t vpn);
213 static void moea64_bootstrap_native(mmu_t mmup,
214 vm_offset_t kernelstart, vm_offset_t kernelend);
215 static void moea64_cpu_bootstrap_native(mmu_t, int ap);
216 static void tlbia(void);
218 static mmu_method_t moea64_native_methods[] = {
219 /* Internal interfaces */
220 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native),
221 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native),
223 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native),
224 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native),
225 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native),
226 MMUMETHOD(moea64_pte_change, moea64_pte_change_native),
227 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native),
228 MMUMETHOD(moea64_pvo_to_pte, moea64_pvo_to_pte_native),
233 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
236 static __inline u_int
237 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
242 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
243 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
245 return (hash & moea64_pteg_mask);
249 moea64_pte_synch_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt)
251 struct lpte *pt = (struct lpte *)pt_cookie;
253 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG);
257 moea64_pte_clear_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt,
258 uint64_t vpn, uint64_t ptebit)
260 struct lpte *pt = (struct lpte *)pt_cookie;
263 * As shown in Section 7.6.3.2.3
265 pt->pte_lo &= ~ptebit;
270 moea64_pte_set_native(struct lpte *pt, struct lpte *pvo_pt)
273 pvo_pt->pte_hi |= LPTE_VALID;
276 * Update the PTE as defined in section 7.6.3.1.
277 * Note that the REF/CHG bits are from pvo_pt and thus should have
278 * been saved so this routine can restore them (if desired).
280 pt->pte_lo = pvo_pt->pte_lo;
282 pt->pte_hi = pvo_pt->pte_hi;
285 /* Keep statistics for unlocked pages */
286 if (!(pvo_pt->pte_hi & LPTE_LOCKED))
291 moea64_pte_unset_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt,
294 struct lpte *pt = (struct lpte *)pt_cookie;
296 pvo_pt->pte_hi &= ~LPTE_VALID;
298 /* Finish all pending operations */
302 * Force the reg & chg bits back into the PTEs.
307 * Invalidate the pte.
309 pt->pte_hi &= ~LPTE_VALID;
313 * Save the reg & chg bits.
315 moea64_pte_synch_native(mmu, pt_cookie, pvo_pt);
317 /* Keep statistics for unlocked pages */
318 if (!(pvo_pt->pte_hi & LPTE_LOCKED))
323 moea64_pte_change_native(mmu_t mmu, uintptr_t pt, struct lpte *pvo_pt,
330 moea64_pte_unset_native(mmu, pt, pvo_pt, vpn);
331 moea64_pte_set_native((struct lpte *)pt, pvo_pt);
335 moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
339 struct slb *slb = PCPU_GET(slb);
344 * Initialize segment registers and MMU
347 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
350 * Install kernel SLB entries
354 __asm __volatile ("slbia");
355 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) :
358 for (i = 0; i < 64; i++) {
359 if (!(slb[i].slbe & SLBE_VALID))
362 __asm __volatile ("slbmte %0, %1" ::
363 "r"(slb[i].slbv), "r"(slb[i].slbe));
366 for (i = 0; i < 16; i++)
367 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
374 __asm __volatile ("ptesync; mtsdr1 %0; isync"
375 :: "r"((uintptr_t)moea64_pteg_table
376 | (uintptr_t)(flsl(moea64_pteg_mask >> 11))));
381 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
382 vm_offset_t kernelend)
389 moea64_early_bootstrap(mmup, kernelstart, kernelend);
392 * Allocate PTEG table.
395 size = moea64_pteg_count * sizeof(struct lpteg);
396 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
397 moea64_pteg_count, size);
400 * We now need to allocate memory. This memory, to be allocated,
401 * has to reside in a page table. The page table we are about to
402 * allocate. We don't have BAT. So drop to data real mode for a minute
403 * as a measure of last resort. We do this a couple times.
406 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size);
408 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg));
411 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
414 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU.
416 mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN);
418 moea64_mid_bootstrap(mmup, kernelstart, kernelend);
421 * Add a mapping for the page table itself if there is no direct map.
423 if (!hw_direct_map) {
424 size = moea64_pteg_count * sizeof(struct lpteg);
425 off = (vm_offset_t)(moea64_pteg_table);
427 for (pa = off; pa < off + size; pa += PAGE_SIZE)
432 /* Bring up virtual memory */
433 moea64_late_bootstrap(mmup, kernelstart, kernelend);
440 #ifndef __powerpc64__
441 register_t msr, scratch;
446 for (i = 0; i < 0xFF000; i += 0x00001000) {
448 __asm __volatile("tlbiel %0" :: "r"(i));
461 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
470 moea64_pvo_to_pte_native(mmu_t mmu, const struct pvo_entry *pvo)
476 /* If the PTEG index is not set, then there is no page table entry */
477 if (!PVO_PTEGIDX_ISSET(pvo))
481 * Calculate the ptegidx
483 vsid = PVO_VSID(pvo);
484 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
485 pvo->pvo_vaddr & PVO_LARGE);
488 * We can find the actual pte entry without searching by grabbing
489 * the PTEG index from 3 unused bits in pvo_vaddr and by
490 * noticing the HID bit.
492 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID)
493 ptegidx ^= moea64_pteg_mask;
495 pteidx = (ptegidx << 3) | PVO_PTEGIDX_GET(pvo);
497 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
498 !PVO_PTEGIDX_ISSET(pvo)) {
499 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no "
500 "valid pte index", pvo);
503 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 &&
504 PVO_PTEGIDX_ISSET(pvo)) {
505 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo "
506 "pvo but no valid pte", pvo);
509 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7];
510 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) ==
512 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) {
513 panic("moea64_pvo_to_pte: pvo %p has valid pte in "
514 "moea64_pteg_table %p but invalid in pvo", pvo, pt);
517 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) &
518 ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) {
519 panic("moea64_pvo_to_pte: pvo %p pte does not match "
520 "pte %p in moea64_pteg_table difference is %#x",
522 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo));
525 return ((uintptr_t)pt);
528 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) {
529 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in "
530 "moea64_pteg_table but valid in pvo", pvo, pt);
537 moea64_pte_spillable_ident(u_int ptegidx)
542 /* Start at a random slot */
545 for (j = 0; j < 8; j++) {
546 pt = &moea64_pteg_table[ptegidx].pt[(i + j) % 8];
547 if (pt->pte_hi & (LPTE_LOCKED | LPTE_WIRED))
550 /* This is a candidate, so remember it */
553 /* Try to get a page that has not been used lately */
554 if (!(pt->pte_lo & LPTE_REF))
562 moea64_pte_insert_native(mmu_t mmu, u_int ptegidx, struct lpte *pvo_pt)
565 struct pvo_entry *pvo;
570 * First try primary hash.
572 pteg_bktidx = ptegidx;
573 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
574 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
575 pvo_pt->pte_hi &= ~LPTE_HID;
576 moea64_pte_set_native(pt, pvo_pt);
582 * Now try secondary hash.
584 pteg_bktidx ^= moea64_pteg_mask;
585 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
586 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
587 pvo_pt->pte_hi |= LPTE_HID;
588 moea64_pte_set_native(pt, pvo_pt);
594 * Out of luck. Find a PTE to sacrifice.
596 pteg_bktidx = ptegidx;
597 i = moea64_pte_spillable_ident(pteg_bktidx);
599 pteg_bktidx ^= moea64_pteg_mask;
600 i = moea64_pte_spillable_ident(pteg_bktidx);
604 /* No freeable slots in either PTEG? We're hosed. */
605 panic("moea64_pte_insert: overflow");
609 if (pteg_bktidx == ptegidx)
610 pvo_pt->pte_hi &= ~LPTE_HID;
612 pvo_pt->pte_hi |= LPTE_HID;
615 * Synchronize the sacrifice PTE with its PVO, then mark both
616 * invalid. The PVO will be reused when/if the VM system comes
617 * here after a fault.
619 pt = &moea64_pteg_table[pteg_bktidx].pt[i];
621 if (pt->pte_hi & LPTE_HID)
622 pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */
624 LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) {
625 if (pvo->pvo_pte.lpte.pte_hi == pt->pte_hi) {
626 KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID,
627 ("Invalid PVO for valid PTE!"));
628 moea64_pte_unset_native(mmu, (uintptr_t)pt,
629 &pvo->pvo_pte.lpte, pvo->pvo_vpn);
630 PVO_PTEGIDX_CLR(pvo);
631 moea64_pte_overflow++;
636 KASSERT(pvo->pvo_pte.lpte.pte_hi == pt->pte_hi,
637 ("Unable to find PVO for spilled PTE"));
642 moea64_pte_set_native(pt, pvo_pt);