1 /* $NetBSD: cache_mipsNN.c,v 1.10 2005/12/24 20:07:19 perry Exp $ */
4 * Copyright 2001 Wasabi Systems, Inc.
7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include <sys/types.h>
42 #include <sys/systm.h>
43 #include <sys/param.h>
45 #include <machine/cache.h>
46 #include <machine/cache_r4k.h>
47 #include <machine/cpuinfo.h>
49 #define round_line16(x) (((x) + 15) & ~15)
50 #define trunc_line16(x) ((x) & ~15)
52 #define round_line32(x) (((x) + 31) & ~31)
53 #define trunc_line32(x) ((x) & ~31)
57 #define SYNC __asm volatile("sync; sync")
59 #define SYNC __asm volatile("sync")
63 #define SYNCI mips_sync_icache();
71 static int picache_size;
72 static int picache_stride;
73 static int picache_loopcount;
74 static int picache_way_mask;
75 static int pdcache_size;
76 static int pdcache_stride;
77 static int pdcache_loopcount;
78 static int pdcache_way_mask;
81 mipsNN_cache_init(struct mips_cpuinfo * cpuinfo)
83 int flush_multiple_lines_per_way;
85 flush_multiple_lines_per_way = cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize * cpuinfo->l1.ic_linesize > PAGE_SIZE;
86 if (cpuinfo->icache_virtual) {
88 * With a virtual Icache we don't need to flush
89 * multiples of the page size with index ops; we just
90 * need to flush one pages' worth.
92 flush_multiple_lines_per_way = 0;
95 if (flush_multiple_lines_per_way) {
96 picache_stride = PAGE_SIZE;
97 picache_loopcount = (cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize / PAGE_SIZE) *
100 picache_stride = cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize;
101 picache_loopcount = cpuinfo->l1.ic_nways;
104 if (cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize < PAGE_SIZE) {
105 pdcache_stride = cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize;
106 pdcache_loopcount = cpuinfo->l1.dc_nways;
108 pdcache_stride = PAGE_SIZE;
109 pdcache_loopcount = (cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize / PAGE_SIZE) *
110 cpuinfo->l1.dc_nways;
112 picache_size = cpuinfo->l1.ic_size;
113 picache_way_mask = cpuinfo->l1.ic_nways - 1;
114 pdcache_size = cpuinfo->l1.dc_size;
115 pdcache_way_mask = cpuinfo->l1.dc_nways - 1;
118 if (cpuinfo->icache_virtual)
119 printf(" icache is virtual\n");
120 printf(" picache_stride = %d\n", picache_stride);
121 printf(" picache_loopcount = %d\n", picache_loopcount);
122 printf(" pdcache_stride = %d\n", pdcache_stride);
123 printf(" pdcache_loopcount = %d\n", pdcache_loopcount);
128 mipsNN_icache_sync_all_16(void)
132 va = MIPS_PHYS_TO_KSEG0(0);
133 eva = va + picache_size;
136 * Since we're hitting the whole thing, we don't have to
137 * worry about the N different "ways".
140 mips_intern_dcache_wbinv_all();
143 cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
151 mipsNN_icache_sync_all_32(void)
155 va = MIPS_PHYS_TO_KSEG0(0);
156 eva = va + picache_size;
159 * Since we're hitting the whole thing, we don't have to
160 * worry about the N different "ways".
163 mips_intern_dcache_wbinv_all();
166 cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
174 mipsNN_icache_sync_range_16(vm_offset_t va, vm_size_t size)
178 eva = round_line16(va + size);
179 va = trunc_line16(va);
181 mips_intern_dcache_wb_range(va, (eva - va));
183 while ((eva - va) >= (32 * 16)) {
184 cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
189 cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
197 mipsNN_icache_sync_range_32(vm_offset_t va, vm_size_t size)
201 eva = round_line32(va + size);
202 va = trunc_line32(va);
204 mips_intern_dcache_wb_range(va, (eva - va));
206 while ((eva - va) >= (32 * 32)) {
207 cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
212 cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
220 mipsNN_icache_sync_range_index_16(vm_offset_t va, vm_size_t size)
222 unsigned int eva, tmpva;
223 int i, stride, loopcount;
226 * Since we're doing Index ops, we expect to not be able
227 * to access the address we've been given. So, get the
228 * bits that determine the cache index, and make a KSEG0
229 * address out of them.
231 va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);
233 eva = round_line16(va + size);
234 va = trunc_line16(va);
237 * GCC generates better code in the loops if we reference local
238 * copies of these global variables.
240 stride = picache_stride;
241 loopcount = picache_loopcount;
243 mips_intern_dcache_wbinv_range_index(va, (eva - va));
245 while ((eva - va) >= (8 * 16)) {
247 for (i = 0; i < loopcount; i++, tmpva += stride)
248 cache_r4k_op_8lines_16(tmpva,
249 CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
255 for (i = 0; i < loopcount; i++, tmpva += stride)
256 cache_op_r4k_line(tmpva,
257 CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
263 mipsNN_icache_sync_range_index_32(vm_offset_t va, vm_size_t size)
265 unsigned int eva, tmpva;
266 int i, stride, loopcount;
269 * Since we're doing Index ops, we expect to not be able
270 * to access the address we've been given. So, get the
271 * bits that determine the cache index, and make a KSEG0
272 * address out of them.
274 va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);
276 eva = round_line32(va + size);
277 va = trunc_line32(va);
280 * GCC generates better code in the loops if we reference local
281 * copies of these global variables.
283 stride = picache_stride;
284 loopcount = picache_loopcount;
286 mips_intern_dcache_wbinv_range_index(va, (eva - va));
288 while ((eva - va) >= (8 * 32)) {
290 for (i = 0; i < loopcount; i++, tmpva += stride)
291 cache_r4k_op_8lines_32(tmpva,
292 CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
298 for (i = 0; i < loopcount; i++, tmpva += stride)
299 cache_op_r4k_line(tmpva,
300 CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
306 mipsNN_pdcache_wbinv_all_16(void)
310 va = MIPS_PHYS_TO_KSEG0(0);
311 eva = va + pdcache_size;
314 * Since we're hitting the whole thing, we don't have to
315 * worry about the N different "ways".
319 cache_r4k_op_32lines_16(va,
320 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
328 mipsNN_pdcache_wbinv_all_32(void)
332 va = MIPS_PHYS_TO_KSEG0(0);
333 eva = va + pdcache_size;
336 * Since we're hitting the whole thing, we don't have to
337 * worry about the N different "ways".
341 cache_r4k_op_32lines_32(va,
342 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
350 mipsNN_pdcache_wbinv_range_16(vm_offset_t va, vm_size_t size)
354 eva = round_line16(va + size);
355 va = trunc_line16(va);
357 while ((eva - va) >= (32 * 16)) {
358 cache_r4k_op_32lines_16(va,
359 CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
364 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
372 mipsNN_pdcache_wbinv_range_32(vm_offset_t va, vm_size_t size)
376 eva = round_line32(va + size);
377 va = trunc_line32(va);
379 while ((eva - va) >= (32 * 32)) {
380 cache_r4k_op_32lines_32(va,
381 CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
386 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
394 mipsNN_pdcache_wbinv_range_index_16(vm_offset_t va, vm_size_t size)
396 unsigned int eva, tmpva;
397 int i, stride, loopcount;
400 * Since we're doing Index ops, we expect to not be able
401 * to access the address we've been given. So, get the
402 * bits that determine the cache index, and make a KSEG0
403 * address out of them.
405 va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask);
407 eva = round_line16(va + size);
408 va = trunc_line16(va);
411 * GCC generates better code in the loops if we reference local
412 * copies of these global variables.
414 stride = pdcache_stride;
415 loopcount = pdcache_loopcount;
417 while ((eva - va) >= (8 * 16)) {
419 for (i = 0; i < loopcount; i++, tmpva += stride)
420 cache_r4k_op_8lines_16(tmpva,
421 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
427 for (i = 0; i < loopcount; i++, tmpva += stride)
428 cache_op_r4k_line(tmpva,
429 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
435 mipsNN_pdcache_wbinv_range_index_32(vm_offset_t va, vm_size_t size)
437 unsigned int eva, tmpva;
438 int i, stride, loopcount;
441 * Since we're doing Index ops, we expect to not be able
442 * to access the address we've been given. So, get the
443 * bits that determine the cache index, and make a KSEG0
444 * address out of them.
446 va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask);
448 eva = round_line32(va + size);
449 va = trunc_line32(va);
452 * GCC generates better code in the loops if we reference local
453 * copies of these global variables.
455 stride = pdcache_stride;
456 loopcount = pdcache_loopcount;
458 while ((eva - va) >= (8 * 32)) {
460 for (i = 0; i < loopcount; i++, tmpva += stride)
461 cache_r4k_op_8lines_32(tmpva,
462 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
468 for (i = 0; i < loopcount; i++, tmpva += stride)
469 cache_op_r4k_line(tmpva,
470 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
476 mipsNN_pdcache_inv_range_16(vm_offset_t va, vm_size_t size)
480 eva = round_line16(va + size);
481 va = trunc_line16(va);
483 while ((eva - va) >= (32 * 16)) {
484 cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
489 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
497 mipsNN_pdcache_inv_range_32(vm_offset_t va, vm_size_t size)
501 eva = round_line32(va + size);
502 va = trunc_line32(va);
504 while ((eva - va) >= (32 * 32)) {
505 cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
510 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
518 mipsNN_pdcache_wb_range_16(vm_offset_t va, vm_size_t size)
522 eva = round_line16(va + size);
523 va = trunc_line16(va);
525 while ((eva - va) >= (32 * 16)) {
526 cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
531 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
539 mipsNN_pdcache_wb_range_32(vm_offset_t va, vm_size_t size)
543 eva = round_line32(va + size);
544 va = trunc_line32(va);
546 while ((eva - va) >= (32 * 32)) {
547 cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
552 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
563 mipsNN_icache_sync_all_128(void)
569 mipsNN_icache_sync_range_128(vm_offset_t va, vm_size_t size)
575 mipsNN_icache_sync_range_index_128(vm_offset_t va, vm_size_t size)
581 mipsNN_pdcache_wbinv_all_128(void)
587 mipsNN_pdcache_wbinv_range_128(vm_offset_t va, vm_size_t size)
593 mipsNN_pdcache_wbinv_range_index_128(vm_offset_t va, vm_size_t size)
598 mipsNN_pdcache_inv_range_128(vm_offset_t va, vm_size_t size)
603 mipsNN_pdcache_wb_range_128(vm_offset_t va, vm_size_t size)