2 * Copyright (c) 2010 Nathan Whitehorn
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/param.h>
30 #include <sys/kernel.h>
32 #include <sys/mutex.h>
34 #include <sys/systm.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_page.h>
42 #include <vm/vm_pageout.h>
43 #include <vm/vm_phys.h>
45 #include <machine/md_var.h>
46 #include <machine/platform.h>
47 #include <machine/pmap.h>
48 #include <machine/vmparam.h>
50 uintptr_t moea64_get_unique_vsid(void);
51 void moea64_release_vsid(uint64_t vsid);
52 static void slb_zone_init(void *);
54 static uma_zone_t slbt_zone;
55 static uma_zone_t slb_cache_zone;
58 SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
63 /* Only 36 bits needed for full 64-bit address space. */
66 struct slbtnode *ua_child[16];
67 struct slb slb_entries[16];
72 * For a full 64-bit address space, there are 36 bits in play in an
73 * esid, so 8 levels, with the leaf being at level 0.
75 * |3333|3322|2222|2222|1111|1111|11 | | | esid
76 * |5432|1098|7654|3210|9876|5432|1098|7654|3210| bits
77 * +----+----+----+----+----+----+----+----+----+--------
78 * | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | level
80 #define UAD_ROOT_LEVEL 8
81 #define UAD_LEAF_LEVEL 0
84 esid2idx(uint64_t esid, int level)
89 return ((esid >> shift) & 0xF);
93 * The ua_base field should have 0 bits after the first 4*(level+1)
96 #define uad_baseok(ua) \
97 (esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
100 static inline uint64_t
101 esid2base(uint64_t esid, int level)
106 shift = (level + 1) * 4;
107 mask = ~((1ULL << shift) - 1);
108 return (esid & mask);
112 * Allocate a new leaf node for the specified esid/vmhandle from the
116 make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
118 struct slbtnode *child;
122 idx = esid2idx(esid, parent->ua_level);
123 KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
125 /* unlock and M_WAITOK and loop? */
126 child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
127 KASSERT(child != NULL, ("unhandled NULL case"));
129 child->ua_level = UAD_LEAF_LEVEL;
130 child->ua_base = esid2base(esid, child->ua_level);
131 idx = esid2idx(esid, child->ua_level);
132 child->u.slb_entries[idx].slbv = slbv;
133 child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
134 setbit(&child->ua_alloc, idx);
136 retval = &child->u.slb_entries[idx];
139 * The above stores must be visible before the next one, so
140 * that a lockless searcher always sees a valid path through
145 idx = esid2idx(esid, parent->ua_level);
146 parent->u.ua_child[idx] = child;
147 setbit(&parent->ua_alloc, idx);
153 * Allocate a new intermediate node to fit between the parent and
156 static struct slbtnode*
157 make_intermediate(uint64_t esid, struct slbtnode *parent)
159 struct slbtnode *child, *inter;
162 idx = esid2idx(esid, parent->ua_level);
163 child = parent->u.ua_child[idx];
164 KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
165 ("No need for an intermediate node?"));
168 * Find the level where the existing child and our new esid
169 * meet. It must be lower than parent->ua_level or we would
170 * have chosen a different index in parent.
172 level = child->ua_level + 1;
173 while (esid2base(esid, level) !=
174 esid2base(child->ua_base, level))
176 KASSERT(level < parent->ua_level,
177 ("Found splitting level %d for %09jx and %09jx, "
178 "but it's the same as %p's",
179 level, esid, child->ua_base, parent));
181 /* unlock and M_WAITOK and loop? */
182 inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
183 KASSERT(inter != NULL, ("unhandled NULL case"));
185 /* Set up intermediate node to point to child ... */
186 inter->ua_level = level;
187 inter->ua_base = esid2base(esid, inter->ua_level);
188 idx = esid2idx(child->ua_base, inter->ua_level);
189 inter->u.ua_child[idx] = child;
190 setbit(&inter->ua_alloc, idx);
193 /* Set up parent to point to intermediate node ... */
194 idx = esid2idx(inter->ua_base, parent->ua_level);
195 parent->u.ua_child[idx] = inter;
196 setbit(&parent->ua_alloc, idx);
202 kernel_va_to_slbv(vm_offset_t va)
206 /* Set kernel VSID to deterministic value */
207 slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
209 /* Figure out if this is a large-page mapping */
210 if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
212 * XXX: If we have set up a direct map, assumes
213 * all physical memory is mapped with large pages.
215 if (mem_valid(va, 0) == 0)
223 user_va_to_slb_entry(pmap_t pm, vm_offset_t va)
225 uint64_t esid = va >> ADDR_SR_SHFT;
229 ua = pm->pm_slb_tree_root;
232 KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
233 ua->ua_base, ua->ua_level));
234 idx = esid2idx(esid, ua->ua_level);
237 * This code is specific to ppc64 where a load is
238 * atomic, so no need for atomic_load macro.
240 if (ua->ua_level == UAD_LEAF_LEVEL)
241 return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
242 &ua->u.slb_entries[idx] : NULL);
244 ua = ua->u.ua_child[idx];
246 esid2base(esid, ua->ua_level) != ua->ua_base)
254 va_to_vsid(pmap_t pm, vm_offset_t va)
258 /* Shortcut kernel case */
259 if (pm == kernel_pmap)
260 return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
263 * If there is no vsid for this VA, we need to add a new entry
264 * to the PMAP's segment table.
267 entry = user_va_to_slb_entry(pm, va);
270 return (allocate_user_vsid(pm,
271 (uintptr_t)va >> ADDR_SR_SHFT, 0));
273 return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
277 allocate_user_vsid(pmap_t pm, uint64_t esid, int large)
280 struct slbtnode *ua, *next, *inter;
284 KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
286 PMAP_LOCK_ASSERT(pm, MA_OWNED);
287 vsid = moea64_get_unique_vsid();
289 slbv = vsid << SLBV_VSID_SHIFT;
293 ua = pm->pm_slb_tree_root;
295 /* Descend to the correct leaf or NULL pointer. */
297 KASSERT(uad_baseok(ua),
298 ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
299 idx = esid2idx(esid, ua->ua_level);
301 if (ua->ua_level == UAD_LEAF_LEVEL) {
302 ua->u.slb_entries[idx].slbv = slbv;
304 ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
306 setbit(&ua->ua_alloc, idx);
307 slb = &ua->u.slb_entries[idx];
311 next = ua->u.ua_child[idx];
313 slb = make_new_leaf(esid, slbv, ua);
318 * Check if the next item down has an okay ua_base.
319 * If not, we need to allocate an intermediate node.
321 if (esid2base(esid, next->ua_level) != next->ua_base) {
322 inter = make_intermediate(esid, ua);
323 slb = make_new_leaf(esid, slbv, inter);
331 * Someone probably wants this soon, and it may be a wired
332 * SLB mapping, so pre-spill this entry.
335 slb_insert_user(pm, slb);
341 free_vsid(pmap_t pm, uint64_t esid, int large)
346 PMAP_LOCK_ASSERT(pm, MA_OWNED);
348 ua = pm->pm_slb_tree_root;
349 /* Descend to the correct leaf. */
351 KASSERT(uad_baseok(ua),
352 ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
354 idx = esid2idx(esid, ua->ua_level);
355 if (ua->ua_level == UAD_LEAF_LEVEL) {
356 ua->u.slb_entries[idx].slbv = 0;
358 ua->u.slb_entries[idx].slbe = 0;
359 clrbit(&ua->ua_alloc, idx);
363 ua = ua->u.ua_child[idx];
365 esid2base(esid, ua->ua_level) != ua->ua_base) {
366 /* Perhaps just return instead of assert? */
368 ("Asked to remove an entry that was never inserted!"));
375 free_slb_tree_node(struct slbtnode *ua)
379 for (idx = 0; idx < 16; idx++) {
380 if (ua->ua_level != UAD_LEAF_LEVEL) {
381 if (ua->u.ua_child[idx] != NULL)
382 free_slb_tree_node(ua->u.ua_child[idx]);
384 if (ua->u.slb_entries[idx].slbv != 0)
385 moea64_release_vsid(ua->u.slb_entries[idx].slbv
390 uma_zfree(slbt_zone, ua);
394 slb_free_tree(pmap_t pm)
397 free_slb_tree_node(pm->pm_slb_tree_root);
403 struct slbtnode *root;
405 root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
406 root->ua_level = UAD_ROOT_LEVEL;
411 /* Lock entries mapping kernel text and stacks */
414 slb_insert_kernel(uint64_t slbe, uint64_t slbv)
416 struct slb *slbcache;
419 /* We don't want to be preempted while modifying the kernel map */
422 slbcache = PCPU_GET(slb);
424 /* Check for an unused slot, abusing the user slot as a full flag */
425 if (slbcache[USER_SLB_SLOT].slbe == 0) {
426 for (i = 0; i < n_slbs; i++) {
427 if (i == USER_SLB_SLOT)
429 if (!(slbcache[i].slbe & SLBE_VALID))
434 slbcache[USER_SLB_SLOT].slbe = 1;
438 if (i == USER_SLB_SLOT)
442 KASSERT(i != USER_SLB_SLOT,
443 ("Filling user SLB slot with a kernel mapping"));
444 slbcache[i].slbv = slbv;
445 slbcache[i].slbe = slbe | (uint64_t)i;
447 /* If it is for this CPU, put it in the SLB right away */
448 if (pmap_bootstrapped) {
449 /* slbie not required */
450 __asm __volatile ("slbmte %0, %1" ::
451 "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
458 slb_insert_user(pmap_t pm, struct slb *slb)
462 PMAP_LOCK_ASSERT(pm, MA_OWNED);
464 if (pm->pm_slb_len < n_slbs) {
471 /* Note that this replacement is atomic with respect to trap_subr */
476 slb_uma_real_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
478 static vm_offset_t realmax = 0;
483 realmax = platform_real_maxaddr();
485 *flags = UMA_SLAB_PRIV;
488 m = vm_phys_alloc_contig(1, 0, realmax, PAGE_SIZE,
498 va = (void *) VM_PAGE_TO_PHYS(m);
501 pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
503 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
504 bzero(va, PAGE_SIZE);
506 /* vm_phys_alloc_contig does not track wiring */
507 atomic_add_int(&cnt.v_wire_count, 1);
514 slb_zone_init(void *dummy)
517 slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
518 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
519 slb_cache_zone = uma_zcreate("SLB cache",
520 (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
521 UMA_ALIGN_PTR, UMA_ZONE_VM);
523 if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
524 uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
525 uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
530 slb_alloc_user_cache(void)
532 return (uma_zalloc(slb_cache_zone, M_ZERO));
536 slb_free_user_cache(struct slb **slb)
538 uma_zfree(slb_cache_zone, slb);