2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2003 Jake Burkholder.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
44 #include <machine/cache.h>
45 #include <machine/cpufunc.h>
46 #include <machine/lsu.h>
47 #include <machine/smp.h>
48 #include <machine/tlb.h>
50 #define SPITFIRE_TLB_ENTRIES 64
52 PMAP_STATS_VAR(spitfire_dcache_npage_inval);
53 PMAP_STATS_VAR(spitfire_dcache_npage_inval_match);
54 PMAP_STATS_VAR(spitfire_icache_npage_inval);
55 PMAP_STATS_VAR(spitfire_icache_npage_inval_match);
58 * Enable the level 1 caches.
61 spitfire_cache_enable(u_int cpu_impl __unused)
65 lsu = ldxa(0, ASI_LSU_CTL_REG);
66 stxa_sync(0, ASI_LSU_CTL_REG, lsu | LSU_IC | LSU_DC);
70 * Flush all lines from the level 1 caches.
73 spitfire_cache_flush(void)
77 for (addr = 0; addr < PCPU_GET(cache.dc_size);
78 addr += PCPU_GET(cache.dc_linesize))
79 stxa_sync(addr, ASI_DCACHE_TAG, 0);
80 for (addr = 0; addr < PCPU_GET(cache.ic_size);
81 addr += PCPU_GET(cache.ic_linesize))
82 stxa_sync(addr, ASI_ICACHE_TAG, 0);
86 * Flush a physical page from the data cache.
89 spitfire_dcache_page_inval(vm_paddr_t pa)
96 KASSERT((pa & PAGE_MASK) == 0, ("%s: pa not page aligned", __func__));
97 PMAP_STATS_INC(spitfire_dcache_npage_inval);
98 target = pa >> (PAGE_SHIFT - DC_TAG_SHIFT);
99 cookie = ipi_dcache_page_inval(tl_ipi_spitfire_dcache_page_inval, pa);
100 for (addr = 0; addr < PCPU_GET(cache.dc_size);
101 addr += PCPU_GET(cache.dc_linesize)) {
102 tag = ldxa(addr, ASI_DCACHE_TAG);
103 if (((tag >> DC_VALID_SHIFT) & DC_VALID_MASK) == 0)
105 tag &= DC_TAG_MASK << DC_TAG_SHIFT;
107 PMAP_STATS_INC(spitfire_dcache_npage_inval_match);
108 stxa_sync(addr, ASI_DCACHE_TAG, tag);
115 * Flush a physical page from the instruction cache.
118 spitfire_icache_page_inval(vm_paddr_t pa)
120 register u_long tag __asm("%g1");
125 KASSERT((pa & PAGE_MASK) == 0, ("%s: pa not page aligned", __func__));
126 PMAP_STATS_INC(spitfire_icache_npage_inval);
127 target = pa >> (PAGE_SHIFT - IC_TAG_SHIFT);
128 cookie = ipi_icache_page_inval(tl_ipi_spitfire_icache_page_inval, pa);
129 for (addr = 0; addr < PCPU_GET(cache.ic_size);
130 addr += PCPU_GET(cache.ic_linesize)) {
131 __asm __volatile("ldda [%1] %2, %%g0" /*, %g1 */
132 : "=r" (tag) : "r" (addr), "n" (ASI_ICACHE_TAG));
133 if (((tag >> IC_VALID_SHIFT) & IC_VALID_MASK) == 0)
135 tag &= (u_long)IC_TAG_MASK << IC_TAG_SHIFT;
137 PMAP_STATS_INC(spitfire_icache_npage_inval_match);
138 stxa_sync(addr, ASI_ICACHE_TAG, tag);
145 * Flush all non-locked mappings from the TLBs.
148 spitfire_tlb_flush_nonlocked(void)
153 for (i = 0; i < SPITFIRE_TLB_ENTRIES; i++) {
154 slot = TLB_DAR_SLOT(TLB_DAR_T32, i);
155 if ((ldxa(slot, ASI_DTLB_DATA_ACCESS_REG) & TD_L) == 0)
156 stxa_sync(slot, ASI_DTLB_DATA_ACCESS_REG, 0);
157 if ((ldxa(slot, ASI_ITLB_DATA_ACCESS_REG) & TD_L) == 0)
158 stxa_sync(slot, ASI_ITLB_DATA_ACCESS_REG, 0);
163 * Flush all user mappings from the TLBs.
166 spitfire_tlb_flush_user(void)
173 for (i = 0; i < SPITFIRE_TLB_ENTRIES; i++) {
174 slot = TLB_DAR_SLOT(TLB_DAR_T32, i);
175 data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
176 tag = ldxa(slot, ASI_DTLB_TAG_READ_REG);
177 if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
178 TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
179 stxa_sync(slot, ASI_DTLB_DATA_ACCESS_REG, 0);
180 data = ldxa(slot, ASI_ITLB_DATA_ACCESS_REG);
181 tag = ldxa(slot, ASI_ITLB_TAG_READ_REG);
182 if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
183 TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
184 stxa_sync(slot, ASI_ITLB_DATA_ACCESS_REG, 0);