2 * Copyright (C) 2010 Andreas Tobler
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
22 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
23 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
29 #include <sys/param.h>
30 #include <sys/kernel.h>
33 #include <sys/msgbuf.h>
34 #include <sys/mutex.h>
36 #include <sys/sysctl.h>
37 #include <sys/systm.h>
38 #include <sys/vmmeter.h>
40 #include <dev/ofw/openfirm.h>
41 #include <machine/ofw_machdep.h>
44 #include <vm/vm_param.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_pageout.h>
53 #include <powerpc/aim/mmu_oea64.h>
56 #include "moea64_if.h"
58 #include "phyp-hvcall.h"
63 * Kernel MMU interface
66 static void mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
67 vm_offset_t kernelend);
68 static void mphyp_cpu_bootstrap(mmu_t mmup, int ap);
69 static void mphyp_pte_synch(mmu_t, uintptr_t pt, struct lpte *pvo_pt);
70 static void mphyp_pte_clear(mmu_t, uintptr_t pt, struct lpte *pvo_pt,
71 uint64_t vpn, u_int64_t ptebit);
72 static void mphyp_pte_unset(mmu_t, uintptr_t pt, struct lpte *pvo_pt,
74 static void mphyp_pte_change(mmu_t, uintptr_t pt, struct lpte *pvo_pt,
76 static int mphyp_pte_insert(mmu_t, u_int ptegidx, struct lpte *pvo_pt);
77 static uintptr_t mphyp_pvo_to_pte(mmu_t, const struct pvo_entry *pvo);
79 #define VSID_HASH_MASK 0x0000007fffffffffULL
82 static mmu_method_t mphyp_methods[] = {
83 MMUMETHOD(mmu_bootstrap, mphyp_bootstrap),
84 MMUMETHOD(mmu_cpu_bootstrap, mphyp_cpu_bootstrap),
86 MMUMETHOD(moea64_pte_synch, mphyp_pte_synch),
87 MMUMETHOD(moea64_pte_clear, mphyp_pte_clear),
88 MMUMETHOD(moea64_pte_unset, mphyp_pte_unset),
89 MMUMETHOD(moea64_pte_change, mphyp_pte_change),
90 MMUMETHOD(moea64_pte_insert, mphyp_pte_insert),
91 MMUMETHOD(moea64_pvo_to_pte, mphyp_pvo_to_pte),
96 MMU_DEF_INHERIT(pseries_mmu, "mmu_phyp", mphyp_methods, 0, oea64_mmu);
99 mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
101 uint64_t final_pteg_count = 0;
104 uint32_t nptlp, shift = 0, slb_encoding = 0;
105 phandle_t dev, node, root;
108 moea64_early_bootstrap(mmup, kernelstart, kernelend);
112 dev = OF_child(root);
114 res = OF_getprop(dev, "name", buf, sizeof(buf));
115 if (res > 0 && strcmp(buf, "cpus") == 0)
120 node = OF_child(dev);
123 res = OF_getprop(node, "device_type", buf, sizeof(buf));
124 if (res > 0 && strcmp(buf, "cpu") == 0)
126 node = OF_peer(node);
129 res = OF_getprop(node, "ibm,pft-size", prop, sizeof(prop));
131 panic("mmu_phyp: unknown PFT size");
132 final_pteg_count = 1 << prop[1];
133 res = OF_getprop(node, "ibm,slb-size", prop, sizeof(prop[0]));
137 moea64_pteg_count = final_pteg_count / sizeof(struct lpteg);
140 * Scan the large page size property for PAPR compatible machines.
141 * See PAPR D.5 Changes to Section 5.1.4, 'CPU Node Properties'
142 * for the encoding of the property.
145 len = OF_getproplen(node, "ibm,segment-page-sizes");
148 * We have to use a variable length array on the stack
149 * since we have very limited stack space.
151 cell_t arr[len/sizeof(cell_t)];
152 res = OF_getprop(node, "ibm,segment-page-sizes", &arr,
158 slb_encoding = arr[idx + 1];
159 nptlp = arr[idx + 2];
162 while (len > 0 && nptlp) {
169 /* For now we allow shift only to be <= 0x18. */
173 moea64_large_page_shift = shift;
174 moea64_large_page_size = 1 << shift;
177 moea64_mid_bootstrap(mmup, kernelstart, kernelend);
178 moea64_late_bootstrap(mmup, kernelstart, kernelend);
182 mphyp_cpu_bootstrap(mmu_t mmup, int ap)
184 struct slb *slb = PCPU_GET(slb);
189 * Install kernel SLB entries
192 __asm __volatile ("slbia");
193 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0));
194 for (i = 0; i < 64; i++) {
195 if (!(slb[i].slbe & SLBE_VALID))
198 __asm __volatile ("slbmte %0, %1" ::
199 "r"(slb[i].slbv), "r"(slb[i].slbe));
204 mphyp_pte_synch(mmu_t mmu, uintptr_t slot, struct lpte *pvo_pt)
209 __asm __volatile("ptesync");
210 phyp_pft_hcall(H_READ, 0, slot, 0, 0, &pte.pte_hi, &pte.pte_lo,
213 pvo_pt->pte_lo |= pte.pte_lo & (LPTE_CHG | LPTE_REF);
217 mphyp_pte_clear(mmu_t mmu, uintptr_t slot, struct lpte *pvo_pt, uint64_t vpn,
221 if (ptebit & LPTE_CHG)
222 phyp_hcall(H_CLEAR_MOD, 0, slot);
223 if (ptebit & LPTE_REF)
224 phyp_hcall(H_CLEAR_REF, 0, slot);
228 mphyp_pte_unset(mmu_t mmu, uintptr_t slot, struct lpte *pvo_pt, uint64_t vpn)
234 err = phyp_pft_hcall(H_REMOVE, 1UL << 31, slot,
235 pvo_pt->pte_hi & LPTE_AVPN_MASK, 0, &pte.pte_hi, &pte.pte_lo,
237 KASSERT(err == H_SUCCESS, ("Error removing page: %d", err));
239 pvo_pt->pte_lo |= pte.pte_lo & (LPTE_CHG | LPTE_REF);
243 mphyp_pte_change(mmu_t mmu, uintptr_t slot, struct lpte *pvo_pt, uint64_t vpn)
246 uint64_t index, junk;
250 * NB: this is protected by the global table lock, so this two-step
251 * is safe, except for the scratch-page case. No CPUs on which we run
252 * this code should be using scratch pages.
254 KASSERT(!(pvo_pt->pte_hi & LPTE_LOCKED),
255 ("Locked pages not supported on PHYP"));
257 /* XXX: optimization using H_PROTECT for common case? */
258 mphyp_pte_unset(mmu, slot, pvo_pt, vpn);
259 result = phyp_pft_hcall(H_ENTER, H_EXACT, slot, pvo_pt->pte_hi,
260 pvo_pt->pte_lo, &index, &evicted.pte_lo, &junk);
261 if (result != H_SUCCESS)
262 panic("mphyp_pte_change() insertion failure: %ld\n", result);
266 mphyp_pte_spillable_ident(u_int ptegidx, struct lpte *to_evict)
268 uint64_t slot, junk, k;
272 /* Start at a random slot */
275 for (j = 0; j < 8; j++) {
276 slot = (ptegidx << 3) + (i + j) % 8;
277 phyp_pft_hcall(H_READ, 0, slot, 0, 0, &pt.pte_hi, &pt.pte_lo,
280 if (pt.pte_hi & LPTE_SWBITS)
283 /* This is a candidate, so remember it */
286 /* Try to get a page that has not been used lately */
287 if (!(pt.pte_lo & LPTE_REF)) {
288 memcpy(to_evict, &pt, sizeof(struct lpte));
293 phyp_pft_hcall(H_READ, 0, slot, 0, 0, &to_evict->pte_hi,
294 &to_evict->pte_lo, &junk);
299 mphyp_pte_insert(mmu_t mmu, u_int ptegidx, struct lpte *pvo_pt)
303 struct pvo_entry *pvo;
304 uint64_t index, junk;
307 /* Check for locked pages, which we can't support on this system */
308 KASSERT(!(pvo_pt->pte_hi & LPTE_LOCKED),
309 ("Locked pages not supported on PHYP"));
312 pvo_pt->pte_hi |= LPTE_VALID;
313 pvo_pt->pte_hi &= ~LPTE_HID;
317 * First try primary hash.
319 pteg_bktidx = ptegidx;
320 result = phyp_pft_hcall(H_ENTER, 0, pteg_bktidx << 3, pvo_pt->pte_hi,
321 pvo_pt->pte_lo, &index, &evicted.pte_lo, &junk);
322 if (result == H_SUCCESS)
323 return (index & 0x07);
324 KASSERT(result == H_PTEG_FULL, ("Page insertion error: %ld "
325 "(ptegidx: %#x/%#x, PTE %#lx/%#lx", result, ptegidx,
326 moea64_pteg_count, pvo_pt->pte_hi, pvo_pt->pte_lo));
329 * Next try secondary hash.
331 pteg_bktidx ^= moea64_pteg_mask;
332 pvo_pt->pte_hi |= LPTE_HID;
333 result = phyp_pft_hcall(H_ENTER, 0, pteg_bktidx << 3,
334 pvo_pt->pte_hi, pvo_pt->pte_lo, &index, &evicted.pte_lo, &junk);
335 if (result == H_SUCCESS)
336 return (index & 0x07);
337 KASSERT(result == H_PTEG_FULL, ("Secondary page insertion error: %ld",
341 * Out of luck. Find a PTE to sacrifice.
343 pteg_bktidx = ptegidx;
344 index = mphyp_pte_spillable_ident(pteg_bktidx, &evicted);
346 pteg_bktidx ^= moea64_pteg_mask;
347 index = mphyp_pte_spillable_ident(pteg_bktidx, &evicted);
351 /* No freeable slots in either PTEG? We're hosed. */
352 panic("mphyp_pte_insert: overflow");
356 if (pteg_bktidx == ptegidx)
357 pvo_pt->pte_hi &= ~LPTE_HID;
359 pvo_pt->pte_hi |= LPTE_HID;
362 * Synchronize the sacrifice PTE with its PVO, then mark both
363 * invalid. The PVO will be reused when/if the VM system comes
364 * here after a fault.
367 if (evicted.pte_hi & LPTE_HID)
368 pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */
370 LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) {
371 if (pvo->pvo_pte.lpte.pte_hi == evicted.pte_hi) {
372 KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID,
373 ("Invalid PVO for valid PTE!"));
374 mphyp_pte_unset(mmu, index, &pvo->pvo_pte.lpte,
376 PVO_PTEGIDX_CLR(pvo);
377 moea64_pte_overflow++;
382 KASSERT(pvo->pvo_pte.lpte.pte_hi == evicted.pte_hi,
383 ("Unable to find PVO for spilled PTE"));
388 result = phyp_pft_hcall(H_ENTER, H_EXACT, index, pvo_pt->pte_hi,
389 pvo_pt->pte_lo, &index, &evicted.pte_lo, &junk);
390 if (result == H_SUCCESS)
391 return (index & 0x07);
393 panic("Page replacement error: %ld", result);
397 static __inline u_int
398 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
403 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
404 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
406 return (hash & moea64_pteg_mask);
410 mphyp_pvo_to_pte(mmu_t mmu, const struct pvo_entry *pvo)
415 /* If the PTEG index is not set, then there is no page table entry */
416 if (!PVO_PTEGIDX_ISSET(pvo))
419 vsid = PVO_VSID(pvo);
420 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), pvo->pvo_vaddr & PVO_LARGE);
423 * We can find the actual pte entry without searching by grabbing
424 * the PTEG index from 3 unused bits in pvo_vaddr and by
425 * noticing the HID bit.
427 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID)
428 ptegidx ^= moea64_pteg_mask;
430 return ((ptegidx << 3) | PVO_PTEGIDX_GET(pvo));