2 * Copyright (c) 2006 Kip Macy
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/queue.h>
38 #include <sys/mutex.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
44 #include <vm/vm_extern.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_pageout.h>
48 #include <machine/cpufunc.h>
49 #include <machine/hypervisorvar.h>
50 #include <machine/smp.h>
51 #include <machine/mmu.h>
52 #include <machine/tte.h>
53 #include <machine/tte_hash.h>
54 #include <machine/tsb.h>
55 #include <machine/vmparam.h>
56 #include <machine/tlb.h>
58 CTASSERT(sizeof(tte_t) == sizeof(uint64_t));
59 #define TSB_MASK(tsb) ((tsb->hti_ntte) - 1)
60 /* make TSB start off at the same size as the hash */
64 #define DPRINTF printf
69 void tsb_sysinit(void);
72 tsb_init(hv_tsb_info_t *hvtsb, uint64_t *scratchval, uint64_t page_shift)
75 int npages = (1 << page_shift);
77 ptr = pmap_alloc_zeroed_contig_pages(npages, npages*PAGE_SIZE);
79 if ((((uint64_t)ptr) & (npages*PAGE_SIZE - 1)) != 0)
80 panic("vm_page_alloc_contig allocated unaligned pages: %p",
83 hvtsb->hti_idxpgsz = TTE8K;
85 hvtsb->hti_ntte = (npages*PAGE_SIZE >> TTE_SHIFT);
86 hvtsb->hti_ctx_index = -1; /* TSBs aren't shared so if we don't
87 * set the context in the TTEs we can
88 * simplify miss handling slightly
90 hvtsb->hti_pgszs = TSB8K;
92 hvtsb->hti_ra = TLB_DIRECT_TO_PHYS((vm_offset_t)ptr);
94 *scratchval = ((uint64_t) ptr) | page_shift;
98 tsb_deinit(hv_tsb_info_t *hvtsb)
104 m = PHYS_TO_VM_PAGE((vm_paddr_t)hvtsb->hti_ra);
105 for (i = 0, tm = m; i < TSB_SIZE; i++, m++) {
107 atomic_subtract_int(&cnt.v_wire_count, 1);
114 tsb_assert_invalid(hv_tsb_info_t *tsb, vm_offset_t va)
116 vm_paddr_t tsb_load_pa;
117 uint64_t tsb_index, tsb_shift, tte_tag, tte_data;
118 tsb_shift = TTE_PAGE_SHIFT(tsb->hti_idxpgsz);
119 tsb_index = (va >> tsb_shift) & TSB_MASK(tsb);
120 tsb_load_pa = tsb->hti_ra + 2*tsb_index*sizeof(uint64_t);
121 load_real_dw(tsb_load_pa, &tte_tag, &tte_data);
122 if (tte_tag == 0 && tte_data == 0)
124 printf("tsb_shift=0x%lx tsb_index=0x%lx\n", tsb_shift, tsb_index);
125 printf("tte_tag=0x%lx tte_data=0x%lx TSB_MASK=%lx\n", tte_tag, tte_data, (uint64_t)TSB_MASK(tsb));
126 panic("non-zero entry found where not expected");
131 tsb_set_tte_real(hv_tsb_info_t *tsb, vm_offset_t index_va, vm_offset_t tag_va,
132 uint64_t tte_data, uint64_t ctx)
134 vm_paddr_t tsb_store_pa;
135 uint64_t tsb_index, tsb_shift, tte_tag;
136 DPRINTF("tsb_set_tte index_va: 0x%lx tag_va: 0x%lx idxpgsz: %x ",
137 index_va, tag_va, tsb->hti_idxpgsz);
139 tsb_shift = TTE_PAGE_SHIFT(tsb->hti_idxpgsz);
141 tsb_index = (index_va >> tsb_shift) & TSB_MASK(tsb);
142 DPRINTF("tsb_index_absolute: 0x%lx tsb_index: 0x%lx\n", (index_va >> tsb_shift), tsb_index);
143 tsb_store_pa = tsb->hti_ra + 2*tsb_index*sizeof(uint64_t);
145 /* store new value with valid bit cleared
146 * to avoid invalid intermediate value;
148 store_real(tsb_store_pa + sizeof(uint64_t), tte_data);
149 tte_tag = (ctx << TTARGET_CTX_SHIFT) | (tag_va >> TTARGET_VA_SHIFT);
150 store_real(tsb_store_pa, tte_tag);
155 tsb_set_tte(hv_tsb_info_t *tsb, vm_offset_t va, uint64_t tte_data, uint64_t ctx)
158 uint64_t tsb_index, tsb_shift, tte_tag;
161 tsb_shift = TTE_PAGE_SHIFT(tsb->hti_idxpgsz);
162 tsb_index = (va >> tsb_shift) & TSB_MASK(tsb);
163 entry = (tte_t *)TLB_PHYS_TO_DIRECT(tsb->hti_ra + 2*tsb_index*sizeof(uint64_t));
164 tte_tag = (ctx << TTARGET_CTX_SHIFT) | (va >> TTARGET_VA_SHIFT);
165 /* store new value with valid bit cleared
166 * to avoid invalid intermediate value;
171 *(entry + 1) = tte_data;
177 tsb_clear(hv_tsb_info_t *tsb)
179 hwblkclr((void *)TLB_PHYS_TO_DIRECT(tsb->hti_ra), tsb->hti_ntte << TTE_SHIFT);
183 tsb_clear_tte(hv_tsb_info_t *tsb, vm_offset_t va)
186 uint64_t tsb_index, tsb_shift;
188 tsb_shift = TTE_PAGE_SHIFT(tsb->hti_idxpgsz);
189 tsb_index = (va >> tsb_shift) & TSB_MASK(tsb);
190 entry = (tte_t *)TLB_PHYS_TO_DIRECT(tsb->hti_ra + 2*tsb_index*sizeof(uint64_t));
198 tsb_clear_range(hv_tsb_info_t *tsb, vm_offset_t sva, vm_offset_t eva)
201 uint64_t tsb_index, tsb_shift, tsb_mask;
204 tsb_mask = TSB_MASK(tsb);
205 tsb_shift = TTE_PAGE_SHIFT(tsb->hti_idxpgsz);
207 for (tva = sva; tva < eva; tva += PAGE_SIZE) {
208 tsb_index = (tva >> tsb_shift) & tsb_mask;
209 entry = (tte_t *)TLB_PHYS_TO_DIRECT(tsb->hti_ra + 2*tsb_index*sizeof(uint64_t));
217 tsb_get_tte(hv_tsb_info_t *tsb, vm_offset_t va)
220 uint64_t tsb_index, tsb_shift, tte_tag, tte_data;
222 tsb_shift = TTE_PAGE_SHIFT(tsb->hti_idxpgsz);
223 tsb_index = (va >> tsb_shift) & TSB_MASK(tsb);
224 entry = (tte_t *)TLB_PHYS_TO_DIRECT(tsb->hti_ra + 2*tsb_index*sizeof(uint64_t));
226 tte_data = *(entry + 1);
228 if ((tte_tag << TTARGET_VA_SHIFT) == (va & ~PAGE_MASK_4M))
235 tsb_lookup_tte(vm_offset_t va, uint64_t ctx)
241 if ((tte_data = tsb_get_tte(&kernel_td[TSB4M_INDEX], va)) != 0)
252 tsb_set_scratchpad_kernel(hv_tsb_info_t *tsb)
254 uint64_t tsb_shift, tsb_scratch;
255 tsb_shift = ffs(tsb->hti_ntte >> (PAGE_SHIFT - TTE_SHIFT)) - 1;
256 tsb_scratch = TLB_PHYS_TO_DIRECT(tsb->hti_ra) | tsb_shift;
258 set_tsb_kernel_scratchpad(tsb_scratch);
264 tsb_set_scratchpad_user(hv_tsb_info_t *tsb)
266 uint64_t tsb_shift, tsb_scratch;
267 tsb_shift = ffs(tsb->hti_ntte >> (PAGE_SHIFT - TTE_SHIFT)) - 1;
268 tsb_scratch = TLB_PHYS_TO_DIRECT(tsb->hti_ra) | tsb_shift;
269 set_tsb_user_scratchpad(tsb_scratch);
275 tsb_size(hv_tsb_info_t *hvtsb)
277 return (hvtsb->hti_ntte >> (PAGE_SHIFT - TTE_SHIFT));
281 tsb_page_shift(pmap_t pmap)
283 return (pmap->pm_tsbscratch & PAGE_MASK);