2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND 4-Clause-BSD
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
33 * Copyright (C) 1995, 1996 TooLs GmbH.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by TooLs GmbH.
47 * 4. The name of TooLs GmbH may not be used to endorse or promote products
48 * derived from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
55 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
56 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
64 * Copyright (C) 2001 Benno Rice.
65 * All rights reserved.
67 * Redistribution and use in source and binary forms, with or without
68 * modification, are permitted provided that the following conditions
70 * 1. Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * 2. Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in the
74 * documentation and/or other materials provided with the distribution.
76 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
77 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
78 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
79 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
80 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
81 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
82 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
83 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
84 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
85 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
92 * Native 64-bit page table operations for running without a hypervisor.
95 #include <sys/param.h>
96 #include <sys/kernel.h>
99 #include <sys/mutex.h>
100 #include <sys/proc.h>
101 #include <sys/sched.h>
102 #include <sys/sysctl.h>
103 #include <sys/systm.h>
104 #include <sys/rwlock.h>
105 #include <sys/endian.h>
110 #include <vm/vm_param.h>
111 #include <vm/vm_kern.h>
112 #include <vm/vm_page.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_object.h>
115 #include <vm/vm_extern.h>
116 #include <vm/vm_pageout.h>
118 #include <machine/md_var.h>
119 #include <machine/mmuvar.h>
121 #include "mmu_oea64.h"
123 #include "moea64_if.h"
125 #define PTESYNC() __asm __volatile("ptesync");
126 #define TLBSYNC() __asm __volatile("tlbsync; ptesync");
127 #define SYNC() __asm __volatile("sync");
128 #define EIEIO() __asm __volatile("eieio");
130 #define VSID_HASH_MASK 0x0000007fffffffffULL
133 TLBIE(uint64_t vpn) {
134 #ifndef __powerpc64__
135 register_t vpn_hi, vpn_lo;
137 register_t scratch, intr;
140 static volatile u_int tlbie_lock = 0;
142 vpn <<= ADDR_PIDX_SHFT;
143 vpn &= ~(0xffffULL << 48);
145 /* Hobo spinlock: we need stronger guarantees than mutexes provide */
146 while (!atomic_cmpset_int(&tlbie_lock, 0, 1));
147 isync(); /* Flush instruction queue once lock acquired */
150 __asm __volatile("tlbie %0" :: "r"(vpn) : "memory");
151 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
153 vpn_hi = (uint32_t)(vpn >> 32);
154 vpn_lo = (uint32_t)vpn;
156 intr = intr_disable();
171 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
176 /* No barriers or special ops -- taken care of by ptesync above */
180 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
181 #define ENABLE_TRANS(msr) mtmsr(msr)
186 static volatile struct lpte *moea64_pteg_table;
187 static struct rwlock moea64_eviction_lock;
192 static int moea64_pte_insert_native(mmu_t, struct pvo_entry *);
193 static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *);
194 static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t);
195 static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int);
196 static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *);
201 static void moea64_bootstrap_native(mmu_t mmup,
202 vm_offset_t kernelstart, vm_offset_t kernelend);
203 static void moea64_cpu_bootstrap_native(mmu_t, int ap);
204 static void tlbia(void);
206 static mmu_method_t moea64_native_methods[] = {
207 /* Internal interfaces */
208 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native),
209 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native),
211 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native),
212 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native),
213 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native),
214 MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native),
215 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native),
220 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
224 moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo)
226 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
227 struct lpte properpt;
230 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
232 moea64_pte_from_pvo(pvo, &properpt);
234 rw_rlock(&moea64_eviction_lock);
235 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
236 (properpt.pte_hi & LPTE_AVPN_MASK)) {
238 rw_runlock(&moea64_eviction_lock);
243 ptelo = be64toh(pt->pte_lo);
245 rw_runlock(&moea64_eviction_lock);
247 return (ptelo & (LPTE_REF | LPTE_CHG));
251 moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
253 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
254 struct lpte properpt;
257 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
259 moea64_pte_from_pvo(pvo, &properpt);
261 rw_rlock(&moea64_eviction_lock);
262 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
263 (properpt.pte_hi & LPTE_AVPN_MASK)) {
265 rw_runlock(&moea64_eviction_lock);
269 if (ptebit == LPTE_REF) {
270 /* See "Resetting the Reference Bit" in arch manual */
272 /* 2-step here safe: precision is not guaranteed */
273 ptelo = be64toh(pt->pte_lo);
275 /* One-byte store to avoid touching the C bit */
276 ((volatile uint8_t *)(&pt->pte_lo))[6] =
277 #if BYTE_ORDER == BIG_ENDIAN
278 ((uint8_t *)(&properpt.pte_lo))[6];
280 ((uint8_t *)(&properpt.pte_lo))[1];
282 rw_runlock(&moea64_eviction_lock);
288 rw_runlock(&moea64_eviction_lock);
289 ptelo = moea64_pte_unset_native(mmu, pvo);
290 moea64_pte_insert_native(mmu, pvo);
293 return (ptelo & (LPTE_REF | LPTE_CHG));
297 moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo)
299 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
300 struct lpte properpt;
303 moea64_pte_from_pvo(pvo, &properpt);
305 rw_rlock(&moea64_eviction_lock);
306 if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) !=
307 (properpt.pte_hi & LPTE_AVPN_MASK)) {
309 moea64_pte_overflow--;
310 rw_runlock(&moea64_eviction_lock);
315 * Invalidate the pte, briefly locking it to collect RC bits. No
316 * atomics needed since this is protected against eviction by the lock.
320 pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED);
323 ptelo = be64toh(pt->pte_lo);
324 *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */
326 rw_runlock(&moea64_eviction_lock);
328 /* Keep statistics */
331 return (ptelo & (LPTE_CHG | LPTE_REF));
335 moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags)
337 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
338 struct lpte properpt;
342 /* Just some software bits changing. */
343 moea64_pte_from_pvo(pvo, &properpt);
345 rw_rlock(&moea64_eviction_lock);
346 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
347 (properpt.pte_hi & LPTE_AVPN_MASK)) {
348 rw_runlock(&moea64_eviction_lock);
351 pt->pte_hi = htobe64(properpt.pte_hi);
352 ptelo = be64toh(pt->pte_lo);
353 rw_runlock(&moea64_eviction_lock);
355 /* Otherwise, need reinsertion and deletion */
356 ptelo = moea64_pte_unset_native(mmu, pvo);
357 moea64_pte_insert_native(mmu, pvo);
364 moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
368 struct slb *slb = PCPU_GET(slb);
373 * Initialize segment registers and MMU
376 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
379 * Install kernel SLB entries
383 __asm __volatile ("slbia");
384 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) :
387 for (i = 0; i < n_slbs; i++) {
388 if (!(slb[i].slbe & SLBE_VALID))
391 __asm __volatile ("slbmte %0, %1" ::
392 "r"(slb[i].slbv), "r"(slb[i].slbe));
395 for (i = 0; i < 16; i++)
396 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
403 __asm __volatile ("ptesync; mtsdr1 %0; isync"
404 :: "r"((uintptr_t)moea64_pteg_table
405 | (uintptr_t)(flsl(moea64_pteg_mask >> 11))));
410 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
411 vm_offset_t kernelend)
418 moea64_early_bootstrap(mmup, kernelstart, kernelend);
421 * Allocate PTEG table.
424 size = moea64_pteg_count * sizeof(struct lpteg);
425 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
426 moea64_pteg_count, size);
427 rw_init(&moea64_eviction_lock, "pte eviction");
430 * We now need to allocate memory. This memory, to be allocated,
431 * has to reside in a page table. The page table we are about to
432 * allocate. We don't have BAT. So drop to data real mode for a minute
433 * as a measure of last resort. We do this a couple times.
436 moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, size);
438 bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count *
439 sizeof(struct lpteg));
442 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
444 moea64_mid_bootstrap(mmup, kernelstart, kernelend);
447 * Add a mapping for the page table itself if there is no direct map.
449 if (!hw_direct_map) {
450 size = moea64_pteg_count * sizeof(struct lpteg);
451 off = (vm_offset_t)(moea64_pteg_table);
453 for (pa = off; pa < off + size; pa += PAGE_SIZE)
458 /* Bring up virtual memory */
459 moea64_late_bootstrap(mmup, kernelstart, kernelend);
466 #ifndef __powerpc64__
467 register_t msr, scratch;
470 i = 0xc00; /* IS = 11 */
471 switch (mfpvr() >> 16) {
480 i = 0; /* IS not supported */
486 for (; i < 0x200000; i += 0x00001000) {
488 __asm __volatile("tlbiel %0" :: "r"(i));
501 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
510 atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi)
516 * Note: in principle, if just the locked bit were set here, we
517 * could avoid needing the eviction lock. However, eviction occurs
518 * so rarely that it isn't worth bothering about in practice.
522 "1:\tlwarx %1, 0, %3\n\t" /* load old value */
523 "and. %0,%1,%4\n\t" /* check if any bits set */
524 "bne 2f\n\t" /* exit if any set */
525 "stwcx. %5, 0, %3\n\t" /* attempt to store */
526 "bne- 1b\n\t" /* spin if failed */
527 "li %0, 1\n\t" /* success - retval = 1 */
528 "b 3f\n\t" /* we've succeeded */
530 "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */
531 "li %0, 0\n\t" /* failure - retval = 0 */
533 : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi)
534 : "r" ((volatile char *)&pte->pte_hi + 4),
535 "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED),
537 : "cr0", "cr1", "cr2", "memory");
539 *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf;
545 moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase,
548 volatile struct lpte *pt;
549 uint64_t oldptehi, va;
553 /* Start at a random slot */
555 for (j = 0; j < 8; j++) {
556 k = slotbase + (i + j) % 8;
557 pt = &moea64_pteg_table[k];
558 /* Invalidate and seize lock only if no bits in mask set */
559 if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */
566 if (oldptehi & LPTE_VALID) {
567 KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry"));
569 * Need to invalidate old entry completely: see
570 * "Modifying a Page Table Entry". Need to reconstruct
571 * the virtual address for the outgoing entry to do that.
573 if (oldptehi & LPTE_BIG)
574 va = oldptehi >> moea64_large_page_shift;
576 va = oldptehi >> ADDR_PIDX_SHFT;
577 if (oldptehi & LPTE_HID)
578 va = (((k >> 3) ^ moea64_pteg_mask) ^ va) &
581 va = ((k >> 3) ^ va) & VSID_HASH_MASK;
582 va |= (oldptehi & LPTE_AVPN_MASK) <<
583 (ADDR_API_SHFT64 - ADDR_PIDX_SHFT);
587 moea64_pte_overflow++;
591 * Update the PTE as per "Adding a Page Table Entry". Lock is released
592 * by setting the high doubleworld.
594 pt->pte_lo = htobe64(pvo_pt->pte_lo);
596 pt->pte_hi = htobe64(pvo_pt->pte_hi);
599 /* Keep statistics */
606 moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo)
608 struct lpte insertpt;
612 moea64_pte_from_pvo(pvo, &insertpt);
614 /* Make sure further insertion is locked out during evictions */
615 rw_rlock(&moea64_eviction_lock);
618 * First try primary hash.
620 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */
621 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
622 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED);
624 rw_runlock(&moea64_eviction_lock);
625 pvo->pvo_pte.slot = slot;
630 * Now try secondary hash.
632 pvo->pvo_vaddr ^= PVO_HID;
633 insertpt.pte_hi ^= LPTE_HID;
634 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
635 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
636 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED);
638 rw_runlock(&moea64_eviction_lock);
639 pvo->pvo_pte.slot = slot;
644 * Out of luck. Find a PTE to sacrifice.
647 /* Lock out all insertions for a bit */
648 if (!rw_try_upgrade(&moea64_eviction_lock)) {
649 rw_runlock(&moea64_eviction_lock);
650 rw_wlock(&moea64_eviction_lock);
653 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
654 LPTE_WIRED | LPTE_LOCKED);
656 rw_wunlock(&moea64_eviction_lock);
657 pvo->pvo_pte.slot = slot;
661 /* Try other hash table. Now we're getting desperate... */
662 pvo->pvo_vaddr ^= PVO_HID;
663 insertpt.pte_hi ^= LPTE_HID;
664 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
665 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
666 LPTE_WIRED | LPTE_LOCKED);
668 rw_wunlock(&moea64_eviction_lock);
669 pvo->pvo_pte.slot = slot;
673 /* No freeable slots in either PTEG? We're hosed. */
674 rw_wunlock(&moea64_eviction_lock);
675 panic("moea64_pte_insert: overflow");