2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND 4-Clause-BSD
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
33 * Copyright (C) 1995, 1996 TooLs GmbH.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by TooLs GmbH.
47 * 4. The name of TooLs GmbH may not be used to endorse or promote products
48 * derived from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
55 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
56 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
64 * Copyright (C) 2001 Benno Rice.
65 * All rights reserved.
67 * Redistribution and use in source and binary forms, with or without
68 * modification, are permitted provided that the following conditions
70 * 1. Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * 2. Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in the
74 * documentation and/or other materials provided with the distribution.
76 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
77 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
78 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
79 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
80 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
81 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
82 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
83 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
84 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
85 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
92 * Native 64-bit page table operations for running without a hypervisor.
95 #include <sys/param.h>
96 #include <sys/kernel.h>
99 #include <sys/mutex.h>
100 #include <sys/proc.h>
101 #include <sys/sched.h>
102 #include <sys/sysctl.h>
103 #include <sys/systm.h>
104 #include <sys/rwlock.h>
105 #include <sys/endian.h>
110 #include <vm/vm_param.h>
111 #include <vm/vm_kern.h>
112 #include <vm/vm_page.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_object.h>
115 #include <vm/vm_extern.h>
116 #include <vm/vm_pageout.h>
118 #include <machine/cpu.h>
119 #include <machine/hid.h>
120 #include <machine/md_var.h>
121 #include <machine/mmuvar.h>
123 #include "mmu_oea64.h"
125 #include "moea64_if.h"
127 #define PTESYNC() __asm __volatile("ptesync");
128 #define TLBSYNC() __asm __volatile("tlbsync; ptesync");
129 #define SYNC() __asm __volatile("sync");
130 #define EIEIO() __asm __volatile("eieio");
132 #define VSID_HASH_MASK 0x0000007fffffffffULL
134 /* POWER9 only permits a 64k partition table size. */
135 #define PART_SIZE 0x10000
137 static bool moea64_crop_tlbie;
138 static bool moea64_need_lock;
141 TLBIE(uint64_t vpn) {
142 #ifndef __powerpc64__
143 register_t vpn_hi, vpn_lo;
145 register_t scratch, intr;
148 static volatile u_int tlbie_lock = 0;
149 bool need_lock = moea64_need_lock;
151 vpn <<= ADDR_PIDX_SHFT;
153 /* Hobo spinlock: we need stronger guarantees than mutexes provide */
155 while (!atomic_cmpset_int(&tlbie_lock, 0, 1));
156 isync(); /* Flush instruction queue once lock acquired */
158 if (moea64_crop_tlbie)
159 vpn &= ~(0xffffULL << 48);
164 * Explicitly clobber r0. The tlbie instruction has two forms: an old
165 * one used by PowerISA 2.03 and prior, and a newer one used by PowerISA
166 * 2.06 (maybe 2.05?) and later. We need to support both, and it just
167 * so happens that since we use 4k pages we can simply zero out r0, and
168 * clobber it, and the assembler will interpret the single-operand form
169 * of tlbie as having RB set, and everything else as 0. The RS operand
170 * in the newer form is in the same position as the L(page size) bit of
171 * the old form, so a slong as RS is 0, we're good on both sides.
173 __asm __volatile("li 0, 0 \n tlbie %0" :: "r"(vpn) : "r0", "memory");
174 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
176 vpn_hi = (uint32_t)(vpn >> 32);
177 vpn_lo = (uint32_t)vpn;
179 intr = intr_disable();
194 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
199 /* No barriers or special ops -- taken care of by ptesync above */
204 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
205 #define ENABLE_TRANS(msr) mtmsr(msr)
210 static volatile struct lpte *moea64_pteg_table;
211 static struct rwlock moea64_eviction_lock;
213 static volatile struct pate *moea64_part_table;
218 static int moea64_pte_insert_native(mmu_t, struct pvo_entry *);
219 static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *);
220 static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t);
221 static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int);
222 static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *);
227 static void moea64_bootstrap_native(mmu_t mmup,
228 vm_offset_t kernelstart, vm_offset_t kernelend);
229 static void moea64_cpu_bootstrap_native(mmu_t, int ap);
230 static void tlbia(void);
232 static mmu_method_t moea64_native_methods[] = {
233 /* Internal interfaces */
234 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native),
235 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native),
237 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native),
238 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native),
239 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native),
240 MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native),
241 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native),
246 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
250 moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo)
252 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
253 struct lpte properpt;
256 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
258 moea64_pte_from_pvo(pvo, &properpt);
260 rw_rlock(&moea64_eviction_lock);
261 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
262 (properpt.pte_hi & LPTE_AVPN_MASK)) {
264 rw_runlock(&moea64_eviction_lock);
269 ptelo = be64toh(pt->pte_lo);
271 rw_runlock(&moea64_eviction_lock);
273 return (ptelo & (LPTE_REF | LPTE_CHG));
277 moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
279 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
280 struct lpte properpt;
283 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
285 moea64_pte_from_pvo(pvo, &properpt);
287 rw_rlock(&moea64_eviction_lock);
288 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
289 (properpt.pte_hi & LPTE_AVPN_MASK)) {
291 rw_runlock(&moea64_eviction_lock);
295 if (ptebit == LPTE_REF) {
296 /* See "Resetting the Reference Bit" in arch manual */
298 /* 2-step here safe: precision is not guaranteed */
299 ptelo = be64toh(pt->pte_lo);
301 /* One-byte store to avoid touching the C bit */
302 ((volatile uint8_t *)(&pt->pte_lo))[6] =
303 #if BYTE_ORDER == BIG_ENDIAN
304 ((uint8_t *)(&properpt.pte_lo))[6];
306 ((uint8_t *)(&properpt.pte_lo))[1];
308 rw_runlock(&moea64_eviction_lock);
314 rw_runlock(&moea64_eviction_lock);
315 ptelo = moea64_pte_unset_native(mmu, pvo);
316 moea64_pte_insert_native(mmu, pvo);
319 return (ptelo & (LPTE_REF | LPTE_CHG));
323 moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo)
325 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
326 struct lpte properpt;
329 moea64_pte_from_pvo(pvo, &properpt);
331 rw_rlock(&moea64_eviction_lock);
332 if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) !=
333 (properpt.pte_hi & LPTE_AVPN_MASK)) {
335 STAT_MOEA64(moea64_pte_overflow--);
336 rw_runlock(&moea64_eviction_lock);
341 * Invalidate the pte, briefly locking it to collect RC bits. No
342 * atomics needed since this is protected against eviction by the lock.
346 pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED);
349 ptelo = be64toh(pt->pte_lo);
350 *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */
352 rw_runlock(&moea64_eviction_lock);
354 /* Keep statistics */
355 STAT_MOEA64(moea64_pte_valid--);
357 return (ptelo & (LPTE_CHG | LPTE_REF));
361 moea64_pte_replace_inval_native(mmu_t mmu, struct pvo_entry *pvo,
362 volatile struct lpte *pt)
364 struct lpte properpt;
367 moea64_pte_from_pvo(pvo, &properpt);
369 rw_rlock(&moea64_eviction_lock);
370 if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) !=
371 (properpt.pte_hi & LPTE_AVPN_MASK)) {
373 STAT_MOEA64(moea64_pte_overflow--);
374 rw_runlock(&moea64_eviction_lock);
379 * Replace the pte, briefly locking it to collect RC bits. No
380 * atomics needed since this is protected against eviction by the lock.
384 pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED);
387 ptelo = be64toh(pt->pte_lo);
389 pt->pte_lo = htobe64(properpt.pte_lo);
391 pt->pte_hi = htobe64(properpt.pte_hi); /* Release lock */
394 rw_runlock(&moea64_eviction_lock);
396 return (ptelo & (LPTE_CHG | LPTE_REF));
400 moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags)
402 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
403 struct lpte properpt;
407 /* Just some software bits changing. */
408 moea64_pte_from_pvo(pvo, &properpt);
410 rw_rlock(&moea64_eviction_lock);
411 if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
412 (properpt.pte_hi & LPTE_AVPN_MASK)) {
413 rw_runlock(&moea64_eviction_lock);
416 pt->pte_hi = htobe64(properpt.pte_hi);
417 ptelo = be64toh(pt->pte_lo);
418 rw_runlock(&moea64_eviction_lock);
420 /* Otherwise, need reinsertion and deletion */
421 ptelo = moea64_pte_replace_inval_native(mmu, pvo, pt);
428 moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
432 struct slb *slb = PCPU_GET(aim.slb);
437 * Initialize segment registers and MMU
440 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
442 switch(mfpvr() >> 16) {
444 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_RADIX);
449 * Install kernel SLB entries
453 __asm __volatile ("slbia");
454 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) :
457 for (i = 0; i < n_slbs; i++) {
458 if (!(slb[i].slbe & SLBE_VALID))
461 __asm __volatile ("slbmte %0, %1" ::
462 "r"(slb[i].slbv), "r"(slb[i].slbe));
465 for (i = 0; i < 16; i++)
466 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
473 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00)
475 ((uintptr_t)moea64_part_table & ~DMAP_BASE_ADDRESS) |
476 flsl((PART_SIZE >> 12) - 1));
478 __asm __volatile ("ptesync; mtsdr1 %0; isync"
479 :: "r"(((uintptr_t)moea64_pteg_table & ~DMAP_BASE_ADDRESS)
480 | (uintptr_t)(flsl(moea64_pteg_mask >> 11))));
485 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
486 vm_offset_t kernelend)
493 moea64_early_bootstrap(mmup, kernelstart, kernelend);
495 switch (mfpvr() >> 16) {
497 moea64_need_lock = false;
505 moea64_crop_tlbie = true;
507 moea64_need_lock = true;
510 * Allocate PTEG table.
513 size = moea64_pteg_count * sizeof(struct lpteg);
514 CTR2(KTR_PMAP, "moea64_bootstrap: %lu PTEGs, %lu bytes",
515 moea64_pteg_count, size);
516 rw_init(&moea64_eviction_lock, "pte eviction");
519 * We now need to allocate memory. This memory, to be allocated,
520 * has to reside in a page table. The page table we are about to
521 * allocate. We don't have BAT. So drop to data real mode for a minute
522 * as a measure of last resort. We do this a couple times.
525 * PTEG table must be aligned on a 256k boundary, but can be placed
526 * anywhere with that alignment on POWER ISA 3+ systems. On earlier
527 * systems, offset addition is done by the CPU with bitwise OR rather
528 * than addition, so the table must also be aligned on a boundary of
529 * its own size. Pick the larger of the two, which works on all
532 moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size,
533 MAX(256*1024, size));
536 (struct lpte *)PHYS_TO_DMAP((vm_offset_t)moea64_pteg_table);
537 /* Allocate partition table (ISA 3.0). */
538 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) {
540 (struct pate *)moea64_bootstrap_alloc(PART_SIZE, PART_SIZE);
543 (struct pate *)PHYS_TO_DMAP((vm_offset_t)moea64_part_table);
546 bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count *
547 sizeof(struct lpteg));
548 if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) {
549 bzero(__DEVOLATILE(void *, moea64_part_table), PART_SIZE);
550 moea64_part_table[0].pagetab =
551 (DMAP_TO_PHYS((vm_offset_t)moea64_pteg_table)) |
552 (uintptr_t)(flsl((moea64_pteg_count - 1) >> 11));
556 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
558 moea64_mid_bootstrap(mmup, kernelstart, kernelend);
561 * Add a mapping for the page table itself if there is no direct map.
563 if (!hw_direct_map) {
564 size = moea64_pteg_count * sizeof(struct lpteg);
565 off = (vm_offset_t)(moea64_pteg_table);
567 for (pa = off; pa < off + size; pa += PAGE_SIZE)
572 /* Bring up virtual memory */
573 moea64_late_bootstrap(mmup, kernelstart, kernelend);
580 #ifndef __powerpc64__
581 register_t msr, scratch;
584 i = 0xc00; /* IS = 11 */
585 switch (mfpvr() >> 16) {
594 i = 0; /* IS not supported */
600 for (; i < 0x400000; i += 0x00001000) {
602 __asm __volatile("tlbiel %0" :: "r"(i));
615 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
624 atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi)
630 * Note: in principle, if just the locked bit were set here, we
631 * could avoid needing the eviction lock. However, eviction occurs
632 * so rarely that it isn't worth bothering about in practice.
636 "1:\tlwarx %1, 0, %3\n\t" /* load old value */
637 "and. %0,%1,%4\n\t" /* check if any bits set */
638 "bne 2f\n\t" /* exit if any set */
639 "stwcx. %5, 0, %3\n\t" /* attempt to store */
640 "bne- 1b\n\t" /* spin if failed */
641 "li %0, 1\n\t" /* success - retval = 1 */
642 "b 3f\n\t" /* we've succeeded */
644 "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */
645 "li %0, 0\n\t" /* failure - retval = 0 */
647 : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi)
648 : "r" ((volatile char *)&pte->pte_hi + 4),
649 "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED),
651 : "cr0", "cr1", "cr2", "memory");
653 *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf;
659 moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase,
662 volatile struct lpte *pt;
663 uint64_t oldptehi, va;
667 /* Start at a random slot */
669 for (j = 0; j < 8; j++) {
670 k = slotbase + (i + j) % 8;
671 pt = &moea64_pteg_table[k];
672 /* Invalidate and seize lock only if no bits in mask set */
673 if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */
680 if (oldptehi & LPTE_VALID) {
681 KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry"));
683 * Need to invalidate old entry completely: see
684 * "Modifying a Page Table Entry". Need to reconstruct
685 * the virtual address for the outgoing entry to do that.
687 va = oldptehi >> (ADDR_SR_SHFT - ADDR_API_SHFT64);
688 if (oldptehi & LPTE_HID)
689 va = (((k >> 3) ^ moea64_pteg_mask) ^ va) &
690 (ADDR_PIDX >> ADDR_PIDX_SHFT);
692 va = ((k >> 3) ^ va) & (ADDR_PIDX >> ADDR_PIDX_SHFT);
693 va |= (oldptehi & LPTE_AVPN_MASK) <<
694 (ADDR_API_SHFT64 - ADDR_PIDX_SHFT);
697 STAT_MOEA64(moea64_pte_valid--);
698 STAT_MOEA64(moea64_pte_overflow++);
702 * Update the PTE as per "Adding a Page Table Entry". Lock is released
703 * by setting the high doubleworld.
705 pt->pte_lo = htobe64(pvo_pt->pte_lo);
707 pt->pte_hi = htobe64(pvo_pt->pte_hi);
710 /* Keep statistics */
711 STAT_MOEA64(moea64_pte_valid++);
717 moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo)
719 struct lpte insertpt;
723 moea64_pte_from_pvo(pvo, &insertpt);
725 /* Make sure further insertion is locked out during evictions */
726 rw_rlock(&moea64_eviction_lock);
729 * First try primary hash.
731 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */
732 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
733 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED);
735 rw_runlock(&moea64_eviction_lock);
736 pvo->pvo_pte.slot = slot;
741 * Now try secondary hash.
743 pvo->pvo_vaddr ^= PVO_HID;
744 insertpt.pte_hi ^= LPTE_HID;
745 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
746 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
747 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED);
749 rw_runlock(&moea64_eviction_lock);
750 pvo->pvo_pte.slot = slot;
755 * Out of luck. Find a PTE to sacrifice.
758 /* Lock out all insertions for a bit */
759 if (!rw_try_upgrade(&moea64_eviction_lock)) {
760 rw_runlock(&moea64_eviction_lock);
761 rw_wlock(&moea64_eviction_lock);
764 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
765 LPTE_WIRED | LPTE_LOCKED);
767 rw_wunlock(&moea64_eviction_lock);
768 pvo->pvo_pte.slot = slot;
772 /* Try other hash table. Now we're getting desperate... */
773 pvo->pvo_vaddr ^= PVO_HID;
774 insertpt.pte_hi ^= LPTE_HID;
775 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
776 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
777 LPTE_WIRED | LPTE_LOCKED);
779 rw_wunlock(&moea64_eviction_lock);
780 pvo->pvo_pte.slot = slot;
784 /* No freeable slots in either PTEG? We're hosed. */
785 rw_wunlock(&moea64_eviction_lock);
786 panic("moea64_pte_insert: overflow");