2 * Copyright (c) 2004-2010 Juli Mallett <jmallett@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/systm.h>
40 #include <machine/pte.h>
41 #include <machine/tlb.h>
43 #if defined(CPU_CNMIPS)
44 #define MIPS_MAX_TLB_ENTRIES 128
45 #elif defined(CPU_NLM)
46 #define MIPS_MAX_TLB_ENTRIES (2048 + 128)
48 #define MIPS_MAX_TLB_ENTRIES 64
58 } entry[MIPS_MAX_TLB_ENTRIES];
61 static struct tlb_state tlb_state[MAXCPU];
65 * PageMask must increment in steps of 2 bits.
67 COMPILE_TIME_ASSERT(POPCNT(TLBMASK_MASK) % 2 == 0);
73 __asm __volatile ("tlbp" : : : "memory");
80 __asm __volatile ("tlbr" : : : "memory");
85 tlb_write_indexed(void)
87 __asm __volatile ("tlbwi" : : : "memory");
92 tlb_write_random(void)
94 __asm __volatile ("tlbwr" : : : "memory");
98 static void tlb_invalidate_one(unsigned);
101 tlb_insert_wired(unsigned i, vm_offset_t va, pt_entry_t pte0, pt_entry_t pte1)
109 asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
113 mips_wr_entryhi(TLBHI_ENTRY(va, 0));
114 mips_wr_entrylo0(pte0);
115 mips_wr_entrylo1(pte1);
118 mips_wr_entryhi(asid);
123 tlb_invalidate_address(struct pmap *pmap, vm_offset_t va)
132 asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
135 mips_wr_entryhi(TLBHI_ENTRY(va, pmap_asid(pmap)));
139 tlb_invalidate_one(i);
141 mips_wr_entryhi(asid);
146 tlb_invalidate_all(void)
153 asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
155 for (i = mips_rd_wired(); i < num_tlbentries; i++)
156 tlb_invalidate_one(i);
158 mips_wr_entryhi(asid);
163 tlb_invalidate_all_user(struct pmap *pmap)
170 asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
172 for (i = mips_rd_wired(); i < num_tlbentries; i++) {
178 uasid = mips_rd_entryhi() & TLBHI_ASID_MASK;
181 * Invalidate all non-kernel entries.
187 * Invalidate this pmap's entries.
189 if (uasid != pmap_asid(pmap))
192 tlb_invalidate_one(i);
195 mips_wr_entryhi(asid);
200 * Invalidates any TLB entries that map a virtual page from the specified
201 * address range. If "end" is zero, then every virtual page is considered to
202 * be within the address range's upper bound.
205 tlb_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end)
207 register_t asid, end_hi, hi, hi_pagemask, s, save_asid, start_hi;
210 KASSERT(start < end || (end == 0 && start > 0),
211 ("tlb_invalidate_range: invalid range"));
214 * Truncate the virtual address "start" to an even page frame number,
215 * and round the virtual address "end" to an even page frame number.
217 start &= ~((1 << TLBMASK_SHIFT) - 1);
218 end = (end + (1 << TLBMASK_SHIFT) - 1) & ~((1 << TLBMASK_SHIFT) - 1);
221 save_asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
223 asid = pmap_asid(pmap);
224 start_hi = TLBHI_ENTRY(start, asid);
225 end_hi = TLBHI_ENTRY(end, asid);
228 * Select the fastest method for invalidating the TLB entries.
230 if (end - start < num_tlbentries << TLBMASK_SHIFT || (end == 0 &&
231 start >= -(num_tlbentries << TLBMASK_SHIFT))) {
233 * The virtual address range is small compared to the size of
234 * the TLB. Probe the TLB for each even numbered page frame
235 * within the virtual address range.
237 for (hi = start_hi; hi != end_hi; hi += 1 << TLBMASK_SHIFT) {
243 tlb_invalidate_one(i);
247 * The virtual address range is large compared to the size of
248 * the TLB. Test every non-wired TLB entry.
250 for (i = mips_rd_wired(); i < num_tlbentries; i++) {
253 hi = mips_rd_entryhi();
254 if ((hi & TLBHI_ASID_MASK) == asid && (hi < end_hi ||
257 * If "hi" is a large page that spans
258 * "start_hi", then it must be invalidated.
260 hi_pagemask = mips_rd_pagemask();
261 if (hi >= (start_hi & ~(hi_pagemask <<
263 tlb_invalidate_one(i);
268 mips_wr_entryhi(save_asid);
272 /* XXX Only if DDB? */
276 unsigned ntlb, i, cpu;
278 cpu = PCPU_GET(cpuid);
279 if (num_tlbentries > MIPS_MAX_TLB_ENTRIES)
280 ntlb = MIPS_MAX_TLB_ENTRIES;
282 ntlb = num_tlbentries;
283 tlb_state[cpu].wired = mips_rd_wired();
284 for (i = 0; i < ntlb; i++) {
288 tlb_state[cpu].entry[i].entryhi = mips_rd_entryhi();
289 tlb_state[cpu].entry[i].pagemask = mips_rd_pagemask();
290 tlb_state[cpu].entry[i].entrylo0 = mips_rd_entrylo0();
291 tlb_state[cpu].entry[i].entrylo1 = mips_rd_entrylo1();
296 tlb_update(struct pmap *pmap, vm_offset_t va, pt_entry_t pte)
303 pte &= ~TLBLO_SWBITS_MASK;
306 asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
309 mips_wr_entryhi(TLBHI_ENTRY(va, pmap_asid(pmap)));
315 if ((va & PAGE_SIZE) == 0) {
316 mips_wr_entrylo0(pte);
318 mips_wr_entrylo1(pte);
323 mips_wr_entryhi(asid);
328 tlb_invalidate_one(unsigned i)
330 /* XXX an invalid ASID? */
331 mips_wr_entryhi(TLBHI_ENTRY(MIPS_KSEG0_START + (2 * i * PAGE_SIZE), 0));
342 DB_SHOW_COMMAND(tlb, ddb_dump_tlb)
344 register_t ehi, elo0, elo1, epagemask;
345 unsigned i, cpu, ntlb;
349 * The worst conversion from hex to decimal ever.
352 cpu = ((addr >> 4) % 16) * 10 + (addr % 16);
354 cpu = PCPU_GET(cpuid);
356 if (cpu < 0 || cpu >= mp_ncpus) {
357 db_printf("Invalid CPU %u\n", cpu);
360 if (num_tlbentries > MIPS_MAX_TLB_ENTRIES) {
361 ntlb = MIPS_MAX_TLB_ENTRIES;
362 db_printf("Warning: Only %d of %d TLB entries saved!\n",
363 ntlb, num_tlbentries);
365 ntlb = num_tlbentries;
367 if (cpu == PCPU_GET(cpuid))
370 db_printf("Beginning TLB dump for CPU %u...\n", cpu);
371 for (i = 0; i < ntlb; i++) {
372 if (i == tlb_state[cpu].wired) {
374 db_printf("^^^ WIRED ENTRIES ^^^\n");
376 db_printf("(No wired entries.)\n");
380 ehi = tlb_state[cpu].entry[i].entryhi;
381 elo0 = tlb_state[cpu].entry[i].entrylo0;
382 elo1 = tlb_state[cpu].entry[i].entrylo1;
383 epagemask = tlb_state[cpu].entry[i].pagemask;
385 if (elo0 == 0 && elo1 == 0)
388 db_printf("#%u\t=> %jx (pagemask %jx)\n", i, (intmax_t)ehi, (intmax_t) epagemask);
389 db_printf(" Lo0\t%jx\t(%#jx)\n", (intmax_t)elo0, (intmax_t)TLBLO_PTE_TO_PA(elo0));
390 db_printf(" Lo1\t%jx\t(%#jx)\n", (intmax_t)elo1, (intmax_t)TLBLO_PTE_TO_PA(elo1));
392 db_printf("Finished.\n");