1 /* $NetBSD: cache_mipsNN.c,v 1.10 2005/12/24 20:07:19 perry Exp $ */
4 * Copyright 2001 Wasabi Systems, Inc.
7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include "opt_cputype.h"
43 #include <sys/types.h>
44 #include <sys/systm.h>
45 #include <sys/param.h>
47 #include <machine/cache.h>
48 #include <machine/cache_r4k.h>
49 #include <machine/cpuinfo.h>
51 #define round_line16(x) (((x) + 15) & ~15)
52 #define trunc_line16(x) ((x) & ~15)
54 #define round_line32(x) (((x) + 31) & ~31)
55 #define trunc_line32(x) ((x) & ~31)
75 #if defined(SB1250_PASS1)
76 #define SYNC __asm volatile("sync; sync")
77 #elif defined(CPU_NLM)
78 #define SYNC xlp_sync()
80 #define SYNC __asm volatile("sync")
83 #if defined(CPU_CNMIPS)
84 #define SYNCI mips_sync_icache();
85 #elif defined(CPU_NLM)
86 #define SYNCI xlp_sync()
92 * Exported variables for consumers like bus_dma code
94 int mips_picache_linesize;
95 int mips_pdcache_linesize;
97 static int picache_size;
98 static int picache_stride;
99 static int picache_loopcount;
100 static int picache_way_mask;
101 static int pdcache_size;
102 static int pdcache_stride;
103 static int pdcache_loopcount;
104 static int pdcache_way_mask;
107 mipsNN_cache_init(struct mips_cpuinfo * cpuinfo)
109 int flush_multiple_lines_per_way;
111 flush_multiple_lines_per_way = cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize * cpuinfo->l1.ic_linesize > PAGE_SIZE;
112 if (cpuinfo->icache_virtual) {
114 * With a virtual Icache we don't need to flush
115 * multiples of the page size with index ops; we just
116 * need to flush one pages' worth.
118 flush_multiple_lines_per_way = 0;
121 if (flush_multiple_lines_per_way) {
122 picache_stride = PAGE_SIZE;
123 picache_loopcount = (cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize / PAGE_SIZE) *
124 cpuinfo->l1.ic_nways;
126 picache_stride = cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize;
127 picache_loopcount = cpuinfo->l1.ic_nways;
130 if (cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize < PAGE_SIZE) {
131 pdcache_stride = cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize;
132 pdcache_loopcount = cpuinfo->l1.dc_nways;
134 pdcache_stride = PAGE_SIZE;
135 pdcache_loopcount = (cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize / PAGE_SIZE) *
136 cpuinfo->l1.dc_nways;
139 mips_picache_linesize = cpuinfo->l1.ic_linesize;
140 mips_pdcache_linesize = cpuinfo->l1.dc_linesize;
142 picache_size = cpuinfo->l1.ic_size;
143 picache_way_mask = cpuinfo->l1.ic_nways - 1;
144 pdcache_size = cpuinfo->l1.dc_size;
145 pdcache_way_mask = cpuinfo->l1.dc_nways - 1;
149 printf("Cache info:\n");
150 if (cpuinfo->icache_virtual)
151 printf(" icache is virtual\n");
152 printf(" picache_stride = %d\n", picache_stride);
153 printf(" picache_loopcount = %d\n", picache_loopcount);
154 printf(" pdcache_stride = %d\n", pdcache_stride);
155 printf(" pdcache_loopcount = %d\n", pdcache_loopcount);
160 mipsNN_icache_sync_all_16(void)
164 va = MIPS_PHYS_TO_KSEG0(0);
165 eva = va + picache_size;
168 * Since we're hitting the whole thing, we don't have to
169 * worry about the N different "ways".
172 mips_intern_dcache_wbinv_all();
175 cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
183 mipsNN_icache_sync_all_32(void)
187 va = MIPS_PHYS_TO_KSEG0(0);
188 eva = va + picache_size;
191 * Since we're hitting the whole thing, we don't have to
192 * worry about the N different "ways".
195 mips_intern_dcache_wbinv_all();
198 cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
206 mipsNN_icache_sync_range_16(vm_offset_t va, vm_size_t size)
210 eva = round_line16(va + size);
211 va = trunc_line16(va);
213 mips_intern_dcache_wb_range(va, (eva - va));
215 while ((eva - va) >= (32 * 16)) {
216 cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
221 cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
229 mipsNN_icache_sync_range_32(vm_offset_t va, vm_size_t size)
233 eva = round_line32(va + size);
234 va = trunc_line32(va);
236 mips_intern_dcache_wb_range(va, (eva - va));
238 while ((eva - va) >= (32 * 32)) {
239 cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
244 cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
252 mipsNN_icache_sync_range_index_16(vm_offset_t va, vm_size_t size)
254 vm_offset_t eva, tmpva;
255 int i, stride, loopcount;
258 * Since we're doing Index ops, we expect to not be able
259 * to access the address we've been given. So, get the
260 * bits that determine the cache index, and make a KSEG0
261 * address out of them.
263 va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);
265 eva = round_line16(va + size);
266 va = trunc_line16(va);
269 * GCC generates better code in the loops if we reference local
270 * copies of these global variables.
272 stride = picache_stride;
273 loopcount = picache_loopcount;
275 mips_intern_dcache_wbinv_range_index(va, (eva - va));
277 while ((eva - va) >= (8 * 16)) {
279 for (i = 0; i < loopcount; i++, tmpva += stride)
280 cache_r4k_op_8lines_16(tmpva,
281 CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
287 for (i = 0; i < loopcount; i++, tmpva += stride)
288 cache_op_r4k_line(tmpva,
289 CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
295 mipsNN_icache_sync_range_index_32(vm_offset_t va, vm_size_t size)
297 vm_offset_t eva, tmpva;
298 int i, stride, loopcount;
301 * Since we're doing Index ops, we expect to not be able
302 * to access the address we've been given. So, get the
303 * bits that determine the cache index, and make a KSEG0
304 * address out of them.
306 va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);
308 eva = round_line32(va + size);
309 va = trunc_line32(va);
312 * GCC generates better code in the loops if we reference local
313 * copies of these global variables.
315 stride = picache_stride;
316 loopcount = picache_loopcount;
318 mips_intern_dcache_wbinv_range_index(va, (eva - va));
320 while ((eva - va) >= (8 * 32)) {
322 for (i = 0; i < loopcount; i++, tmpva += stride)
323 cache_r4k_op_8lines_32(tmpva,
324 CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
330 for (i = 0; i < loopcount; i++, tmpva += stride)
331 cache_op_r4k_line(tmpva,
332 CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
338 mipsNN_pdcache_wbinv_all_16(void)
342 va = MIPS_PHYS_TO_KSEG0(0);
343 eva = va + pdcache_size;
346 * Since we're hitting the whole thing, we don't have to
347 * worry about the N different "ways".
351 cache_r4k_op_32lines_16(va,
352 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
360 mipsNN_pdcache_wbinv_all_32(void)
364 va = MIPS_PHYS_TO_KSEG0(0);
365 eva = va + pdcache_size;
368 * Since we're hitting the whole thing, we don't have to
369 * worry about the N different "ways".
373 cache_r4k_op_32lines_32(va,
374 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
382 mipsNN_pdcache_wbinv_range_16(vm_offset_t va, vm_size_t size)
386 eva = round_line16(va + size);
387 va = trunc_line16(va);
389 while ((eva - va) >= (32 * 16)) {
390 cache_r4k_op_32lines_16(va,
391 CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
396 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
404 mipsNN_pdcache_wbinv_range_32(vm_offset_t va, vm_size_t size)
408 eva = round_line32(va + size);
409 va = trunc_line32(va);
411 while ((eva - va) >= (32 * 32)) {
412 cache_r4k_op_32lines_32(va,
413 CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
418 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
426 mipsNN_pdcache_wbinv_range_index_16(vm_offset_t va, vm_size_t size)
428 vm_offset_t eva, tmpva;
429 int i, stride, loopcount;
432 * Since we're doing Index ops, we expect to not be able
433 * to access the address we've been given. So, get the
434 * bits that determine the cache index, and make a KSEG0
435 * address out of them.
437 va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask);
439 eva = round_line16(va + size);
440 va = trunc_line16(va);
443 * GCC generates better code in the loops if we reference local
444 * copies of these global variables.
446 stride = pdcache_stride;
447 loopcount = pdcache_loopcount;
449 while ((eva - va) >= (8 * 16)) {
451 for (i = 0; i < loopcount; i++, tmpva += stride)
452 cache_r4k_op_8lines_16(tmpva,
453 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
459 for (i = 0; i < loopcount; i++, tmpva += stride)
460 cache_op_r4k_line(tmpva,
461 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
467 mipsNN_pdcache_wbinv_range_index_32(vm_offset_t va, vm_size_t size)
469 vm_offset_t eva, tmpva;
470 int i, stride, loopcount;
473 * Since we're doing Index ops, we expect to not be able
474 * to access the address we've been given. So, get the
475 * bits that determine the cache index, and make a KSEG0
476 * address out of them.
478 va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask);
480 eva = round_line32(va + size);
481 va = trunc_line32(va);
484 * GCC generates better code in the loops if we reference local
485 * copies of these global variables.
487 stride = pdcache_stride;
488 loopcount = pdcache_loopcount;
490 while ((eva - va) >= (8 * 32)) {
492 for (i = 0; i < loopcount; i++, tmpva += stride)
493 cache_r4k_op_8lines_32(tmpva,
494 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
500 for (i = 0; i < loopcount; i++, tmpva += stride)
501 cache_op_r4k_line(tmpva,
502 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
508 mipsNN_pdcache_inv_range_16(vm_offset_t va, vm_size_t size)
512 eva = round_line16(va + size);
513 va = trunc_line16(va);
515 while ((eva - va) >= (32 * 16)) {
516 cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
521 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
529 mipsNN_pdcache_inv_range_32(vm_offset_t va, vm_size_t size)
533 eva = round_line32(va + size);
534 va = trunc_line32(va);
536 while ((eva - va) >= (32 * 32)) {
537 cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
542 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
550 mipsNN_pdcache_wb_range_16(vm_offset_t va, vm_size_t size)
554 eva = round_line16(va + size);
555 va = trunc_line16(va);
557 while ((eva - va) >= (32 * 16)) {
558 cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
563 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
571 mipsNN_pdcache_wb_range_32(vm_offset_t va, vm_size_t size)
575 eva = round_line32(va + size);
576 va = trunc_line32(va);
578 while ((eva - va) >= (32 * 32)) {
579 cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
584 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
595 mipsNN_icache_sync_all_128(void)
601 mipsNN_icache_sync_range_128(vm_offset_t va, vm_size_t size)
607 mipsNN_icache_sync_range_index_128(vm_offset_t va, vm_size_t size)
613 mipsNN_pdcache_wbinv_all_128(void)
619 mipsNN_pdcache_wbinv_range_128(vm_offset_t va, vm_size_t size)
625 mipsNN_pdcache_wbinv_range_index_128(vm_offset_t va, vm_size_t size)
630 mipsNN_pdcache_inv_range_128(vm_offset_t va, vm_size_t size)
635 mipsNN_pdcache_wb_range_128(vm_offset_t va, vm_size_t size)