2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010 Nathan Whitehorn
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/param.h>
32 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
37 #include <sys/systm.h>
43 #include <vm/vm_map.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_pageout.h>
47 #include <machine/md_var.h>
48 #include <machine/platform.h>
49 #include <machine/vmparam.h>
50 #include <machine/trap.h>
52 #include "mmu_oea64.h"
54 uintptr_t moea64_get_unique_vsid(void);
55 void moea64_release_vsid(uint64_t vsid);
56 static void slb_zone_init(void *);
58 static uma_zone_t slbt_zone;
59 static uma_zone_t slb_cache_zone;
62 SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
67 /* Only 36 bits needed for full 64-bit address space. */
70 struct slbtnode *ua_child[16];
71 struct slb slb_entries[16];
76 * For a full 64-bit address space, there are 36 bits in play in an
77 * esid, so 8 levels, with the leaf being at level 0.
79 * |3333|3322|2222|2222|1111|1111|11 | | | esid
80 * |5432|1098|7654|3210|9876|5432|1098|7654|3210| bits
81 * +----+----+----+----+----+----+----+----+----+--------
82 * | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | level
84 #define UAD_ROOT_LEVEL 8
85 #define UAD_LEAF_LEVEL 0
88 esid2idx(uint64_t esid, int level)
93 return ((esid >> shift) & 0xF);
97 * The ua_base field should have 0 bits after the first 4*(level+1)
100 #define uad_baseok(ua) \
101 (esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
104 static inline uint64_t
105 esid2base(uint64_t esid, int level)
110 shift = (level + 1) * 4;
111 mask = ~((1ULL << shift) - 1);
112 return (esid & mask);
116 * Allocate a new leaf node for the specified esid/vmhandle from the
120 make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
122 struct slbtnode *child;
126 idx = esid2idx(esid, parent->ua_level);
127 KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
129 /* unlock and M_WAITOK and loop? */
130 child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
131 KASSERT(child != NULL, ("unhandled NULL case"));
133 child->ua_level = UAD_LEAF_LEVEL;
134 child->ua_base = esid2base(esid, child->ua_level);
135 idx = esid2idx(esid, child->ua_level);
136 child->u.slb_entries[idx].slbv = slbv;
137 child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
138 setbit(&child->ua_alloc, idx);
140 retval = &child->u.slb_entries[idx];
143 * The above stores must be visible before the next one, so
144 * that a lockless searcher always sees a valid path through
149 idx = esid2idx(esid, parent->ua_level);
150 parent->u.ua_child[idx] = child;
151 setbit(&parent->ua_alloc, idx);
157 * Allocate a new intermediate node to fit between the parent and
160 static struct slbtnode*
161 make_intermediate(uint64_t esid, struct slbtnode *parent)
163 struct slbtnode *child, *inter;
166 idx = esid2idx(esid, parent->ua_level);
167 child = parent->u.ua_child[idx];
168 KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
169 ("No need for an intermediate node?"));
172 * Find the level where the existing child and our new esid
173 * meet. It must be lower than parent->ua_level or we would
174 * have chosen a different index in parent.
176 level = child->ua_level + 1;
177 while (esid2base(esid, level) !=
178 esid2base(child->ua_base, level))
180 KASSERT(level < parent->ua_level,
181 ("Found splitting level %d for %09jx and %09jx, "
182 "but it's the same as %p's",
183 level, esid, child->ua_base, parent));
185 /* unlock and M_WAITOK and loop? */
186 inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
187 KASSERT(inter != NULL, ("unhandled NULL case"));
189 /* Set up intermediate node to point to child ... */
190 inter->ua_level = level;
191 inter->ua_base = esid2base(esid, inter->ua_level);
192 idx = esid2idx(child->ua_base, inter->ua_level);
193 inter->u.ua_child[idx] = child;
194 setbit(&inter->ua_alloc, idx);
197 /* Set up parent to point to intermediate node ... */
198 idx = esid2idx(inter->ua_base, parent->ua_level);
199 parent->u.ua_child[idx] = inter;
200 setbit(&parent->ua_alloc, idx);
206 kernel_va_to_slbv(vm_offset_t va)
210 /* Set kernel VSID to deterministic value */
211 slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
214 * Figure out if this is a large-page mapping.
216 if (hw_direct_map && va > DMAP_BASE_ADDRESS && va < DMAP_MAX_ADDRESS) {
218 * XXX: If we have set up a direct map, assumes
219 * all physical memory is mapped with large pages.
222 if (mem_valid(DMAP_TO_PHYS(va), 0) == 0)
224 } else if (moea64_large_page_size != 0 &&
225 va >= (vm_offset_t)vm_page_array &&
226 va <= (uintptr_t)(&vm_page_array[vm_page_array_size]))
233 user_va_to_slb_entry(pmap_t pm, vm_offset_t va)
235 uint64_t esid = va >> ADDR_SR_SHFT;
239 ua = pm->pm_slb_tree_root;
242 KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
243 ua->ua_base, ua->ua_level));
244 idx = esid2idx(esid, ua->ua_level);
247 * This code is specific to ppc64 where a load is
248 * atomic, so no need for atomic_load macro.
250 if (ua->ua_level == UAD_LEAF_LEVEL)
251 return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
252 &ua->u.slb_entries[idx] : NULL);
255 * The following accesses are implicitly ordered under the POWER
256 * ISA by load dependencies (the store ordering is provided by
257 * the powerpc_lwsync() calls elsewhere) and so are run without
260 ua = ua->u.ua_child[idx];
262 esid2base(esid, ua->ua_level) != ua->ua_base)
270 va_to_vsid(pmap_t pm, vm_offset_t va)
274 /* Shortcut kernel case */
275 if (pm == kernel_pmap)
276 return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
279 * If there is no vsid for this VA, we need to add a new entry
280 * to the PMAP's segment table.
283 entry = user_va_to_slb_entry(pm, va);
286 return (allocate_user_vsid(pm,
287 (uintptr_t)va >> ADDR_SR_SHFT, 0));
289 return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
293 allocate_user_vsid(pmap_t pm, uint64_t esid, int large)
296 struct slbtnode *ua, *next, *inter;
300 KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
302 PMAP_LOCK_ASSERT(pm, MA_OWNED);
303 vsid = moea64_get_unique_vsid();
305 slbv = vsid << SLBV_VSID_SHIFT;
309 ua = pm->pm_slb_tree_root;
311 /* Descend to the correct leaf or NULL pointer. */
313 KASSERT(uad_baseok(ua),
314 ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
315 idx = esid2idx(esid, ua->ua_level);
317 if (ua->ua_level == UAD_LEAF_LEVEL) {
318 ua->u.slb_entries[idx].slbv = slbv;
320 ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
322 setbit(&ua->ua_alloc, idx);
323 slb = &ua->u.slb_entries[idx];
327 next = ua->u.ua_child[idx];
329 slb = make_new_leaf(esid, slbv, ua);
334 * Check if the next item down has an okay ua_base.
335 * If not, we need to allocate an intermediate node.
337 if (esid2base(esid, next->ua_level) != next->ua_base) {
338 inter = make_intermediate(esid, ua);
339 slb = make_new_leaf(esid, slbv, inter);
347 * Someone probably wants this soon, and it may be a wired
348 * SLB mapping, so pre-spill this entry.
351 slb_insert_user(pm, slb);
357 free_vsid(pmap_t pm, uint64_t esid, int large)
362 PMAP_LOCK_ASSERT(pm, MA_OWNED);
364 ua = pm->pm_slb_tree_root;
365 /* Descend to the correct leaf. */
367 KASSERT(uad_baseok(ua),
368 ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
370 idx = esid2idx(esid, ua->ua_level);
371 if (ua->ua_level == UAD_LEAF_LEVEL) {
372 ua->u.slb_entries[idx].slbv = 0;
374 ua->u.slb_entries[idx].slbe = 0;
375 clrbit(&ua->ua_alloc, idx);
379 ua = ua->u.ua_child[idx];
381 esid2base(esid, ua->ua_level) != ua->ua_base) {
382 /* Perhaps just return instead of assert? */
384 ("Asked to remove an entry that was never inserted!"));
391 free_slb_tree_node(struct slbtnode *ua)
395 for (idx = 0; idx < 16; idx++) {
396 if (ua->ua_level != UAD_LEAF_LEVEL) {
397 if (ua->u.ua_child[idx] != NULL)
398 free_slb_tree_node(ua->u.ua_child[idx]);
400 if (ua->u.slb_entries[idx].slbv != 0)
401 moea64_release_vsid(ua->u.slb_entries[idx].slbv
406 uma_zfree(slbt_zone, ua);
410 slb_free_tree(pmap_t pm)
413 free_slb_tree_node(pm->pm_slb_tree_root);
419 struct slbtnode *root;
421 root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
422 KASSERT(root != NULL, ("unhandled NULL case"));
423 root->ua_level = UAD_ROOT_LEVEL;
428 /* Lock entries mapping kernel text and stacks */
431 slb_insert_kernel(uint64_t slbe, uint64_t slbv)
433 struct slb *slbcache;
436 /* We don't want to be preempted while modifying the kernel map */
439 slbcache = PCPU_GET(aim.slb);
441 /* Check for an unused slot, abusing the user slot as a full flag */
442 if (slbcache[USER_SLB_SLOT].slbe == 0) {
443 for (i = 0; i < n_slbs; i++) {
444 if (i == USER_SLB_SLOT)
446 if (!(slbcache[i].slbe & SLBE_VALID))
451 slbcache[USER_SLB_SLOT].slbe = 1;
455 if (i == USER_SLB_SLOT)
459 KASSERT(i != USER_SLB_SLOT,
460 ("Filling user SLB slot with a kernel mapping"));
461 slbcache[i].slbv = slbv;
462 slbcache[i].slbe = slbe | (uint64_t)i;
464 /* If it is for this CPU, put it in the SLB right away */
465 if (pmap_bootstrapped) {
466 /* slbie not required */
467 __asm __volatile ("slbmte %0, %1" ::
468 "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
475 slb_insert_user(pmap_t pm, struct slb *slb)
479 PMAP_LOCK_ASSERT(pm, MA_OWNED);
481 if (pm->pm_slb_len < n_slbs) {
488 /* Note that this replacement is atomic with respect to trap_subr */
493 slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
494 u_int8_t *flags, int wait)
496 static vm_offset_t realmax = 0;
501 realmax = platform_real_maxaddr();
503 *flags = UMA_SLAB_PRIV;
504 m = vm_page_alloc_contig_domain(NULL, 0, domain,
505 malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED,
506 1, 0, realmax, PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT);
511 va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
513 va = (void *)(VM_PAGE_TO_PHYS(m) | DMAP_BASE_ADDRESS);
514 pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
517 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
518 bzero(va, PAGE_SIZE);
524 slb_zone_init(void *dummy)
526 slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
527 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
528 UMA_ZONE_CONTIG | UMA_ZONE_VM);
529 slb_cache_zone = uma_zcreate("SLB cache",
530 (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
531 UMA_ALIGN_PTR, UMA_ZONE_CONTIG | UMA_ZONE_VM);
533 if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
534 uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
535 uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
540 slb_alloc_user_cache(void)
542 return (uma_zalloc(slb_cache_zone, M_ZERO));
546 slb_free_user_cache(struct slb **slb)
548 uma_zfree(slb_cache_zone, slb);
551 /* Handle kernel SLB faults -- runs in real mode, all seat belts off */
553 handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
555 struct slb *slbcache;
560 addr = (type == EXC_ISE) ? srr0 : dar;
561 slbcache = PCPU_GET(aim.slb);
562 esid = (uintptr_t)addr >> ADDR_SR_SHFT;
563 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
565 /* See if the hardware flushed this somehow (can happen in LPARs) */
566 for (i = 0; i < n_slbs; i++)
567 if (slbcache[i].slbe == (slbe | (uint64_t)i))
570 /* Not in the map, needs to actually be added */
571 slbv = kernel_va_to_slbv(addr);
572 if (slbcache[USER_SLB_SLOT].slbe == 0) {
573 for (i = 0; i < n_slbs; i++) {
574 if (i == USER_SLB_SLOT)
576 if (!(slbcache[i].slbe & SLBE_VALID))
581 slbcache[USER_SLB_SLOT].slbe = 1;
584 /* Sacrifice a random SLB entry that is not the user entry */
586 if (i == USER_SLB_SLOT)
590 /* Write new entry */
591 slbcache[i].slbv = slbv;
592 slbcache[i].slbe = slbe | (uint64_t)i;
594 /* Trap handler will restore from cache on exit */
598 handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
600 struct slb *user_entry;
604 if (pm->pm_slb == NULL)
607 esid = (uintptr_t)addr >> ADDR_SR_SHFT;
610 user_entry = user_va_to_slb_entry(pm, addr);
612 if (user_entry == NULL) {
613 /* allocate_vsid auto-spills it */
614 (void)allocate_user_vsid(pm, esid, 0);
617 * Check that another CPU has not already mapped this.
618 * XXX: Per-thread SLB caches would be better.
620 for (i = 0; i < pm->pm_slb_len; i++)
621 if (pm->pm_slb[i] == user_entry)
624 if (i == pm->pm_slb_len)
625 slb_insert_user(pm, user_entry);