2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND 4-Clause-BSD
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
33 * Copyright (C) 1995, 1996 TooLs GmbH.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by TooLs GmbH.
47 * 4. The name of TooLs GmbH may not be used to endorse or promote products
48 * derived from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
55 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
56 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
64 * Copyright (C) 2001 Benno Rice.
65 * All rights reserved.
67 * Redistribution and use in source and binary forms, with or without
68 * modification, are permitted provided that the following conditions
70 * 1. Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * 2. Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in the
74 * documentation and/or other materials provided with the distribution.
76 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
77 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
78 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
79 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
80 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
81 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
82 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
83 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
84 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
85 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
92 * Native 64-bit page table operations for running without a hypervisor.
95 #include <sys/param.h>
96 #include <sys/kernel.h>
99 #include <sys/mutex.h>
100 #include <sys/proc.h>
101 #include <sys/sched.h>
102 #include <sys/sysctl.h>
103 #include <sys/systm.h>
104 #include <sys/rwlock.h>
105 #include <sys/endian.h>
110 #include <vm/vm_param.h>
111 #include <vm/vm_kern.h>
112 #include <vm/vm_page.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_object.h>
115 #include <vm/vm_extern.h>
116 #include <vm/vm_pageout.h>
118 #include <machine/cpu.h>
119 #include <machine/md_var.h>
120 #include <machine/mmuvar.h>
122 #include "mmu_oea64.h"
124 #include "moea64_if.h"
126 #define PTESYNC() __asm __volatile("ptesync");
127 #define TLBSYNC() __asm __volatile("tlbsync; ptesync");
128 #define SYNC() __asm __volatile("sync");
129 #define EIEIO() __asm __volatile("eieio");
131 #define VSID_HASH_MASK 0x0000007fffffffffULL
133 /* POWER9 only permits a 64k partition table size. */
134 #define PART_SIZE 0x10000
137 TLBIE(uint64_t vpn) {
138 #ifndef __powerpc64__
139 register_t vpn_hi, vpn_lo;
141 register_t scratch, intr;
144 static volatile u_int tlbie_lock = 0;
146 vpn <<= ADDR_PIDX_SHFT;
147 vpn &= ~(0xffffULL << 48);
149 /* Hobo spinlock: we need stronger guarantees than mutexes provide */
150 while (!atomic_cmpset_int(&tlbie_lock, 0, 1));
151 isync(); /* Flush instruction queue once lock acquired */
154 __asm __volatile("tlbie %0" :: "r"(vpn) : "memory");
155 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
157 vpn_hi = (uint32_t)(vpn >> 32);
158 vpn_lo = (uint32_t)vpn;
160 intr = intr_disable();
175 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
180 /* No barriers or special ops -- taken care of by ptesync above */
184 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
185 #define ENABLE_TRANS(msr) mtmsr(msr)
190 static volatile struct pate *moea64_part_table;
191 static volatile struct lpte *moea64_pteg_table;
192 static struct rwlock moea64_eviction_lock;
197 static int moea64_pte_insert_native(mmu_t, struct pvo_entry *);
198 static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *);
199 static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t);
200 static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int);
201 static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *);
206 static void moea64_bootstrap_native(mmu_t mmup,
207 vm_offset_t kernelstart, vm_offset_t kernelend);
208 static void moea64_cpu_bootstrap_native(mmu_t, int ap);
209 static void tlbia(void);
211 static mmu_method_t moea64_native_methods[] = {
212 /* Internal interfaces */
213 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native),
214 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native),
216 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native),
217 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native),
218 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native),
219 MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native),
220 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native),
225 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
229 moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo)
231 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
232 struct lpte properpt;
235 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
237 moea64_pte_from_pvo(pvo, &properpt);
239 rw_rlock(&moea64_eviction_lock);
240 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
241 (properpt.pte_hi & LPTE_AVPN_MASK)) {
243 rw_runlock(&moea64_eviction_lock);
248 ptelo = be64toh(pt->pte_lo);
250 rw_runlock(&moea64_eviction_lock);
252 return (ptelo & (LPTE_REF | LPTE_CHG));
256 moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
258 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
259 struct lpte properpt;
262 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
264 moea64_pte_from_pvo(pvo, &properpt);
266 rw_rlock(&moea64_eviction_lock);
267 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
268 (properpt.pte_hi & LPTE_AVPN_MASK)) {
270 rw_runlock(&moea64_eviction_lock);
274 if (ptebit == LPTE_REF) {
275 /* See "Resetting the Reference Bit" in arch manual */
277 /* 2-step here safe: precision is not guaranteed */
278 ptelo = be64toh(pt->pte_lo);
280 /* One-byte store to avoid touching the C bit */
281 ((volatile uint8_t *)(&pt->pte_lo))[6] =
282 #if BYTE_ORDER == BIG_ENDIAN
283 ((uint8_t *)(&properpt.pte_lo))[6];
285 ((uint8_t *)(&properpt.pte_lo))[1];
287 rw_runlock(&moea64_eviction_lock);
293 rw_runlock(&moea64_eviction_lock);
294 ptelo = moea64_pte_unset_native(mmu, pvo);
295 moea64_pte_insert_native(mmu, pvo);
298 return (ptelo & (LPTE_REF | LPTE_CHG));
302 moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo)
304 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
305 struct lpte properpt;
308 moea64_pte_from_pvo(pvo, &properpt);
310 rw_rlock(&moea64_eviction_lock);
311 if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) !=
312 (properpt.pte_hi & LPTE_AVPN_MASK)) {
314 moea64_pte_overflow--;
315 rw_runlock(&moea64_eviction_lock);
320 * Invalidate the pte, briefly locking it to collect RC bits. No
321 * atomics needed since this is protected against eviction by the lock.
325 pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED);
328 ptelo = be64toh(pt->pte_lo);
329 *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */
331 rw_runlock(&moea64_eviction_lock);
333 /* Keep statistics */
336 return (ptelo & (LPTE_CHG | LPTE_REF));
340 moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags)
342 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
343 struct lpte properpt;
347 /* Just some software bits changing. */
348 moea64_pte_from_pvo(pvo, &properpt);
350 rw_rlock(&moea64_eviction_lock);
351 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
352 (properpt.pte_hi & LPTE_AVPN_MASK)) {
353 rw_runlock(&moea64_eviction_lock);
356 pt->pte_hi = htobe64(properpt.pte_hi);
357 ptelo = be64toh(pt->pte_lo);
358 rw_runlock(&moea64_eviction_lock);
360 /* Otherwise, need reinsertion and deletion */
361 ptelo = moea64_pte_unset_native(mmu, pvo);
362 moea64_pte_insert_native(mmu, pvo);
369 moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
373 struct slb *slb = PCPU_GET(aim.slb);
378 * Initialize segment registers and MMU
381 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
384 * Install kernel SLB entries
388 __asm __volatile ("slbia");
389 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) :
392 for (i = 0; i < n_slbs; i++) {
393 if (!(slb[i].slbe & SLBE_VALID))
396 __asm __volatile ("slbmte %0, %1" ::
397 "r"(slb[i].slbv), "r"(slb[i].slbe));
400 for (i = 0; i < 16; i++)
401 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
408 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) {
410 ((uintptr_t)moea64_part_table & ~DMAP_BASE_ADDRESS) |
411 flsl((PART_SIZE >> 12) - 1));
413 __asm __volatile ("ptesync; mtsdr1 %0; isync"
414 :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS)
415 | (uintptr_t)(flsl(moea64_pteg_mask >> 11))));
421 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
422 vm_offset_t kernelend)
429 moea64_early_bootstrap(mmup, kernelstart, kernelend);
432 * Allocate PTEG table.
435 size = moea64_pteg_count * sizeof(struct lpteg);
436 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
437 moea64_pteg_count, size);
438 rw_init(&moea64_eviction_lock, "pte eviction");
441 * We now need to allocate memory. This memory, to be allocated,
442 * has to reside in a page table. The page table we are about to
443 * allocate. We don't have BAT. So drop to data real mode for a minute
444 * as a measure of last resort. We do this a couple times.
447 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) {
449 (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE);
452 (struct pate *)PHYS_TO_DMAP((vm_offset_t)moea64_part_table);
455 * PTEG table must be aligned on a 256k boundary, but can be placed
456 * anywhere with that alignment.
458 moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, 256*1024);
461 (struct lpte *)PHYS_TO_DMAP((vm_offset_t)moea64_pteg_table);
463 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) {
464 bzero(__DEVOLATILE(void *, moea64_part_table), PART_SIZE);
465 moea64_part_table[0].pagetab =
466 ((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS) |
467 (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11));
469 bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count *
470 sizeof(struct lpteg));
473 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
475 moea64_mid_bootstrap(mmup, kernelstart, kernelend);
478 * Add a mapping for the page table itself if there is no direct map.
480 if (!hw_direct_map) {
481 size = moea64_pteg_count * sizeof(struct lpteg);
482 off = (vm_offset_t)(moea64_pteg_table);
484 for (pa = off; pa < off + size; pa += PAGE_SIZE)
489 /* Bring up virtual memory */
490 moea64_late_bootstrap(mmup, kernelstart, kernelend);
497 #ifndef __powerpc64__
498 register_t msr, scratch;
501 i = 0xc00; /* IS = 11 */
502 switch (mfpvr() >> 16) {
511 i = 0; /* IS not supported */
517 for (; i < 0x200000; i += 0x00001000) {
519 __asm __volatile("tlbiel %0" :: "r"(i));
532 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
541 atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi)
547 * Note: in principle, if just the locked bit were set here, we
548 * could avoid needing the eviction lock. However, eviction occurs
549 * so rarely that it isn't worth bothering about in practice.
553 "1:\tlwarx %1, 0, %3\n\t" /* load old value */
554 "and. %0,%1,%4\n\t" /* check if any bits set */
555 "bne 2f\n\t" /* exit if any set */
556 "stwcx. %5, 0, %3\n\t" /* attempt to store */
557 "bne- 1b\n\t" /* spin if failed */
558 "li %0, 1\n\t" /* success - retval = 1 */
559 "b 3f\n\t" /* we've succeeded */
561 "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */
562 "li %0, 0\n\t" /* failure - retval = 0 */
564 : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi)
565 : "r" ((volatile char *)&pte->pte_hi + 4),
566 "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED),
568 : "cr0", "cr1", "cr2", "memory");
570 *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf;
576 moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase,
579 volatile struct lpte *pt;
580 uint64_t oldptehi, va;
584 /* Start at a random slot */
586 for (j = 0; j < 8; j++) {
587 k = slotbase + (i + j) % 8;
588 pt = &moea64_pteg_table[k];
589 /* Invalidate and seize lock only if no bits in mask set */
590 if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */
597 if (oldptehi & LPTE_VALID) {
598 KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry"));
600 * Need to invalidate old entry completely: see
601 * "Modifying a Page Table Entry". Need to reconstruct
602 * the virtual address for the outgoing entry to do that.
604 if (oldptehi & LPTE_BIG)
605 va = oldptehi >> moea64_large_page_shift;
607 va = oldptehi >> ADDR_PIDX_SHFT;
608 if (oldptehi & LPTE_HID)
609 va = (((k >> 3) ^ moea64_pteg_mask) ^ va) &
612 va = ((k >> 3) ^ va) & VSID_HASH_MASK;
613 va |= (oldptehi & LPTE_AVPN_MASK) <<
614 (ADDR_API_SHFT64 - ADDR_PIDX_SHFT);
618 moea64_pte_overflow++;
622 * Update the PTE as per "Adding a Page Table Entry". Lock is released
623 * by setting the high doubleworld.
625 pt->pte_lo = htobe64(pvo_pt->pte_lo);
627 pt->pte_hi = htobe64(pvo_pt->pte_hi);
630 /* Keep statistics */
637 moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo)
639 struct lpte insertpt;
643 moea64_pte_from_pvo(pvo, &insertpt);
645 /* Make sure further insertion is locked out during evictions */
646 rw_rlock(&moea64_eviction_lock);
649 * First try primary hash.
651 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */
652 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
653 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED);
655 rw_runlock(&moea64_eviction_lock);
656 pvo->pvo_pte.slot = slot;
661 * Now try secondary hash.
663 pvo->pvo_vaddr ^= PVO_HID;
664 insertpt.pte_hi ^= LPTE_HID;
665 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
666 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
667 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED);
669 rw_runlock(&moea64_eviction_lock);
670 pvo->pvo_pte.slot = slot;
675 * Out of luck. Find a PTE to sacrifice.
678 /* Lock out all insertions for a bit */
679 if (!rw_try_upgrade(&moea64_eviction_lock)) {
680 rw_runlock(&moea64_eviction_lock);
681 rw_wlock(&moea64_eviction_lock);
684 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
685 LPTE_WIRED | LPTE_LOCKED);
687 rw_wunlock(&moea64_eviction_lock);
688 pvo->pvo_pte.slot = slot;
692 /* Try other hash table. Now we're getting desperate... */
693 pvo->pvo_vaddr ^= PVO_HID;
694 insertpt.pte_hi ^= LPTE_HID;
695 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
696 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
697 LPTE_WIRED | LPTE_LOCKED);
699 rw_wunlock(&moea64_eviction_lock);
700 pvo->pvo_pte.slot = slot;
704 /* No freeable slots in either PTEG? We're hosed. */
705 rw_wunlock(&moea64_eviction_lock);
706 panic("moea64_pte_insert: overflow");