]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/mips/include/cache_r4k.h
Upgrade to version 3.1.6
[FreeBSD/FreeBSD.git] / sys / mips / include / cache_r4k.h
1 /*      $NetBSD: cache_r4k.h,v 1.10 2003/03/08 04:43:26 rafal Exp $     */
2
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * Copyright 2001 Wasabi Systems, Inc.
7  * All rights reserved.
8  *
9  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *      This product includes software developed for the NetBSD Project by
22  *      Wasabi Systems, Inc.
23  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
24  *    or promote products derived from this software without specific prior
25  *    written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  *
39  * $FreeBSD$
40  */
41
42 /*
43  * Cache definitions/operations for R4000-style caches.
44  */
45
46 #define CACHE_R4K_I                     0
47 #define CACHE_R4K_D                     1
48 #define CACHE_R4K_SI                    2
49 #define CACHE_R4K_SD                    3
50
51 #define CACHEOP_R4K_INDEX_INV           (0 << 2)        /* I, SI */
52 #define CACHEOP_R4K_INDEX_WB_INV        (0 << 2)        /* D, SD */
53 #define CACHEOP_R4K_INDEX_LOAD_TAG      (1 << 2)        /* all */
54 #define CACHEOP_R4K_INDEX_STORE_TAG     (2 << 2)        /* all */
55 #define CACHEOP_R4K_CREATE_DIRTY_EXCL   (3 << 2)        /* D, SD */
56 #define CACHEOP_R4K_HIT_INV             (4 << 2)        /* all */
57 #define CACHEOP_R4K_HIT_WB_INV          (5 << 2)        /* D, SD */
58 #define CACHEOP_R4K_FILL                (5 << 2)        /* I */
59 #define CACHEOP_R4K_HIT_WB              (6 << 2)        /* I, D, SD */
60 #define CACHEOP_R4K_HIT_SET_VIRTUAL     (7 << 2)        /* SI, SD */
61
62 #if !defined(LOCORE)
63
64 /*
65  * cache_r4k_op_line:
66  *
67  *      Perform the specified cache operation on a single line.
68  */
69 #define cache_op_r4k_line(va, op)                                       \
70 do {                                                                    \
71         __asm __volatile(                                               \
72                 ".set noreorder                                 \n\t"   \
73                 "cache %1, 0(%0)                                \n\t"   \
74                 ".set reorder"                                          \
75             :                                                           \
76             : "r" (va), "i" (op)                                        \
77             : "memory");                                                \
78 } while (/*CONSTCOND*/0)
79
80 /*
81  * cache_r4k_op_8lines_16:
82  *
83  *      Perform the specified cache operation on 8 16-byte cache lines.
84  */
85 #define cache_r4k_op_8lines_16(va, op)                                  \
86 do {                                                                    \
87         __asm __volatile(                                               \
88                 ".set noreorder                                 \n\t"   \
89                 "cache %1, 0x00(%0); cache %1, 0x10(%0)         \n\t"   \
90                 "cache %1, 0x20(%0); cache %1, 0x30(%0)         \n\t"   \
91                 "cache %1, 0x40(%0); cache %1, 0x50(%0)         \n\t"   \
92                 "cache %1, 0x60(%0); cache %1, 0x70(%0)         \n\t"   \
93                 ".set reorder"                                          \
94             :                                                           \
95             : "r" (va), "i" (op)                                        \
96             : "memory");                                                \
97 } while (/*CONSTCOND*/0)
98
99 /*
100  * cache_r4k_op_8lines_32:
101  *
102  *      Perform the specified cache operation on 8 32-byte cache lines.
103  */
104 #define cache_r4k_op_8lines_32(va, op)                                  \
105 do {                                                                    \
106         __asm __volatile(                                               \
107                 ".set noreorder                                 \n\t"   \
108                 "cache %1, 0x00(%0); cache %1, 0x20(%0)         \n\t"   \
109                 "cache %1, 0x40(%0); cache %1, 0x60(%0)         \n\t"   \
110                 "cache %1, 0x80(%0); cache %1, 0xa0(%0)         \n\t"   \
111                 "cache %1, 0xc0(%0); cache %1, 0xe0(%0)         \n\t"   \
112                 ".set reorder"                                          \
113             :                                                           \
114             : "r" (va), "i" (op)                                        \
115             : "memory");                                                \
116 } while (/*CONSTCOND*/0)
117
118 /*
119  * cache_r4k_op_8lines_64:
120  *
121  *      Perform the specified cache operation on 8 64-byte cache lines.
122  */
123 #define cache_r4k_op_8lines_64(va, op)                                  \
124 do {                                                                    \
125         __asm __volatile(                                               \
126                 ".set noreorder                                 \n\t"   \
127                 "cache %1, 0x000(%0); cache %1, 0x040(%0)       \n\t"   \
128                 "cache %1, 0x080(%0); cache %1, 0x0c0(%0)       \n\t"   \
129                 "cache %1, 0x100(%0); cache %1, 0x140(%0)       \n\t"   \
130                 "cache %1, 0x180(%0); cache %1, 0x1c0(%0)       \n\t"   \
131                 ".set reorder"                                          \
132             :                                                           \
133             : "r" (va), "i" (op)                                        \
134             : "memory");                                                \
135 } while (/*CONSTCOND*/0)
136
137 /*
138  * cache_r4k_op_32lines_16:
139  *
140  *      Perform the specified cache operation on 32 16-byte
141  *      cache lines.
142  */
143 #define cache_r4k_op_32lines_16(va, op)                                 \
144 do {                                                                    \
145         __asm __volatile(                                               \
146                 ".set noreorder                                 \n\t"   \
147                 "cache %1, 0x000(%0); cache %1, 0x010(%0);      \n\t"   \
148                 "cache %1, 0x020(%0); cache %1, 0x030(%0);      \n\t"   \
149                 "cache %1, 0x040(%0); cache %1, 0x050(%0);      \n\t"   \
150                 "cache %1, 0x060(%0); cache %1, 0x070(%0);      \n\t"   \
151                 "cache %1, 0x080(%0); cache %1, 0x090(%0);      \n\t"   \
152                 "cache %1, 0x0a0(%0); cache %1, 0x0b0(%0);      \n\t"   \
153                 "cache %1, 0x0c0(%0); cache %1, 0x0d0(%0);      \n\t"   \
154                 "cache %1, 0x0e0(%0); cache %1, 0x0f0(%0);      \n\t"   \
155                 "cache %1, 0x100(%0); cache %1, 0x110(%0);      \n\t"   \
156                 "cache %1, 0x120(%0); cache %1, 0x130(%0);      \n\t"   \
157                 "cache %1, 0x140(%0); cache %1, 0x150(%0);      \n\t"   \
158                 "cache %1, 0x160(%0); cache %1, 0x170(%0);      \n\t"   \
159                 "cache %1, 0x180(%0); cache %1, 0x190(%0);      \n\t"   \
160                 "cache %1, 0x1a0(%0); cache %1, 0x1b0(%0);      \n\t"   \
161                 "cache %1, 0x1c0(%0); cache %1, 0x1d0(%0);      \n\t"   \
162                 "cache %1, 0x1e0(%0); cache %1, 0x1f0(%0);      \n\t"   \
163                 ".set reorder"                                          \
164             :                                                           \
165             : "r" (va), "i" (op)                                        \
166             : "memory");                                                \
167 } while (/*CONSTCOND*/0)
168
169 /*
170  * cache_r4k_op_32lines_32:
171  *
172  *      Perform the specified cache operation on 32 32-byte
173  *      cache lines.
174  */
175 #define cache_r4k_op_32lines_32(va, op)                                 \
176 do {                                                                    \
177         __asm __volatile(                                               \
178                 ".set noreorder                                 \n\t"   \
179                 "cache %1, 0x000(%0); cache %1, 0x020(%0);      \n\t"   \
180                 "cache %1, 0x040(%0); cache %1, 0x060(%0);      \n\t"   \
181                 "cache %1, 0x080(%0); cache %1, 0x0a0(%0);      \n\t"   \
182                 "cache %1, 0x0c0(%0); cache %1, 0x0e0(%0);      \n\t"   \
183                 "cache %1, 0x100(%0); cache %1, 0x120(%0);      \n\t"   \
184                 "cache %1, 0x140(%0); cache %1, 0x160(%0);      \n\t"   \
185                 "cache %1, 0x180(%0); cache %1, 0x1a0(%0);      \n\t"   \
186                 "cache %1, 0x1c0(%0); cache %1, 0x1e0(%0);      \n\t"   \
187                 "cache %1, 0x200(%0); cache %1, 0x220(%0);      \n\t"   \
188                 "cache %1, 0x240(%0); cache %1, 0x260(%0);      \n\t"   \
189                 "cache %1, 0x280(%0); cache %1, 0x2a0(%0);      \n\t"   \
190                 "cache %1, 0x2c0(%0); cache %1, 0x2e0(%0);      \n\t"   \
191                 "cache %1, 0x300(%0); cache %1, 0x320(%0);      \n\t"   \
192                 "cache %1, 0x340(%0); cache %1, 0x360(%0);      \n\t"   \
193                 "cache %1, 0x380(%0); cache %1, 0x3a0(%0);      \n\t"   \
194                 "cache %1, 0x3c0(%0); cache %1, 0x3e0(%0);      \n\t"   \
195                 ".set reorder"                                          \
196             :                                                           \
197             : "r" (va), "i" (op)                                        \
198             : "memory");                                                \
199 } while (/*CONSTCOND*/0)
200
201 /*
202  * cache_r4k_op_32lines_64:
203  *
204  *      Perform the specified cache operation on 32 64-byte
205  *      cache lines.
206  */
207 #define cache_r4k_op_32lines_64(va, op)                                 \
208 do {                                                                    \
209         __asm __volatile(                                               \
210                 ".set noreorder                                 \n\t"   \
211                 "cache %1, 0x000(%0); cache %1, 0x040(%0);      \n\t"   \
212                 "cache %1, 0x080(%0); cache %1, 0x0c0(%0);      \n\t"   \
213                 "cache %1, 0x100(%0); cache %1, 0x140(%0);      \n\t"   \
214                 "cache %1, 0x180(%0); cache %1, 0x1c0(%0);      \n\t"   \
215                 "cache %1, 0x200(%0); cache %1, 0x240(%0);      \n\t"   \
216                 "cache %1, 0x280(%0); cache %1, 0x2c0(%0);      \n\t"   \
217                 "cache %1, 0x300(%0); cache %1, 0x340(%0);      \n\t"   \
218                 "cache %1, 0x380(%0); cache %1, 0x3c0(%0);      \n\t"   \
219                 "cache %1, 0x400(%0); cache %1, 0x440(%0);      \n\t"   \
220                 "cache %1, 0x480(%0); cache %1, 0x4c0(%0);      \n\t"   \
221                 "cache %1, 0x500(%0); cache %1, 0x540(%0);      \n\t"   \
222                 "cache %1, 0x580(%0); cache %1, 0x5c0(%0);      \n\t"   \
223                 "cache %1, 0x600(%0); cache %1, 0x640(%0);      \n\t"   \
224                 "cache %1, 0x680(%0); cache %1, 0x6c0(%0);      \n\t"   \
225                 "cache %1, 0x700(%0); cache %1, 0x740(%0);      \n\t"   \
226                 "cache %1, 0x780(%0); cache %1, 0x7c0(%0);      \n\t"   \
227                 ".set reorder"                                          \
228             :                                                           \
229             : "r" (va), "i" (op)                                        \
230             : "memory");                                                \
231 } while (/*CONSTCOND*/0)
232
233 /*
234  * cache_r4k_op_32lines_128:
235  *
236  *      Perform the specified cache operation on 32 128-byte
237  *      cache lines.
238  */
239 #define cache_r4k_op_32lines_128(va, op)                                \
240 do {                                                                    \
241         __asm __volatile(                                               \
242                 ".set noreorder                                 \n\t"   \
243                 "cache %1, 0x0000(%0); cache %1, 0x0080(%0);    \n\t"   \
244                 "cache %1, 0x0100(%0); cache %1, 0x0180(%0);    \n\t"   \
245                 "cache %1, 0x0200(%0); cache %1, 0x0280(%0);    \n\t"   \
246                 "cache %1, 0x0300(%0); cache %1, 0x0380(%0);    \n\t"   \
247                 "cache %1, 0x0400(%0); cache %1, 0x0480(%0);    \n\t"   \
248                 "cache %1, 0x0500(%0); cache %1, 0x0580(%0);    \n\t"   \
249                 "cache %1, 0x0600(%0); cache %1, 0x0680(%0);    \n\t"   \
250                 "cache %1, 0x0700(%0); cache %1, 0x0780(%0);    \n\t"   \
251                 "cache %1, 0x0800(%0); cache %1, 0x0880(%0);    \n\t"   \
252                 "cache %1, 0x0900(%0); cache %1, 0x0980(%0);    \n\t"   \
253                 "cache %1, 0x0a00(%0); cache %1, 0x0a80(%0);    \n\t"   \
254                 "cache %1, 0x0b00(%0); cache %1, 0x0b80(%0);    \n\t"   \
255                 "cache %1, 0x0c00(%0); cache %1, 0x0c80(%0);    \n\t"   \
256                 "cache %1, 0x0d00(%0); cache %1, 0x0d80(%0);    \n\t"   \
257                 "cache %1, 0x0e00(%0); cache %1, 0x0e80(%0);    \n\t"   \
258                 "cache %1, 0x0f00(%0); cache %1, 0x0f80(%0);    \n\t"   \
259                 ".set reorder"                                          \
260             :                                                           \
261             : "r" (va), "i" (op)                                        \
262             : "memory");                                                \
263 } while (/*CONSTCOND*/0)
264
265 /*
266  * cache_r4k_op_16lines_16_2way:
267  *
268  *      Perform the specified cache operation on 16 16-byte
269  *      cache lines, 2-ways.
270  */
271 #define cache_r4k_op_16lines_16_2way(va1, va2, op)                      \
272 do {                                                                    \
273         __asm __volatile(                                               \
274                 ".set noreorder                                 \n\t"   \
275                 "cache %2, 0x000(%0); cache %2, 0x000(%1);      \n\t"   \
276                 "cache %2, 0x010(%0); cache %2, 0x010(%1);      \n\t"   \
277                 "cache %2, 0x020(%0); cache %2, 0x020(%1);      \n\t"   \
278                 "cache %2, 0x030(%0); cache %2, 0x030(%1);      \n\t"   \
279                 "cache %2, 0x040(%0); cache %2, 0x040(%1);      \n\t"   \
280                 "cache %2, 0x050(%0); cache %2, 0x050(%1);      \n\t"   \
281                 "cache %2, 0x060(%0); cache %2, 0x060(%1);      \n\t"   \
282                 "cache %2, 0x070(%0); cache %2, 0x070(%1);      \n\t"   \
283                 "cache %2, 0x080(%0); cache %2, 0x080(%1);      \n\t"   \
284                 "cache %2, 0x090(%0); cache %2, 0x090(%1);      \n\t"   \
285                 "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1);      \n\t"   \
286                 "cache %2, 0x0b0(%0); cache %2, 0x0b0(%1);      \n\t"   \
287                 "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1);      \n\t"   \
288                 "cache %2, 0x0d0(%0); cache %2, 0x0d0(%1);      \n\t"   \
289                 "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1);      \n\t"   \
290                 "cache %2, 0x0f0(%0); cache %2, 0x0f0(%1);      \n\t"   \
291                 ".set reorder"                                          \
292             :                                                           \
293             : "r" (va1), "r" (va2), "i" (op)                            \
294             : "memory");                                                \
295 } while (/*CONSTCOND*/0)
296
297 /*
298  * cache_r4k_op_16lines_32_2way:
299  *
300  *      Perform the specified cache operation on 16 32-byte
301  *      cache lines, 2-ways.
302  */
303 #define cache_r4k_op_16lines_32_2way(va1, va2, op)                      \
304 do {                                                                    \
305         __asm __volatile(                                               \
306                 ".set noreorder                                 \n\t"   \
307                 "cache %2, 0x000(%0); cache %2, 0x000(%1);      \n\t"   \
308                 "cache %2, 0x020(%0); cache %2, 0x020(%1);      \n\t"   \
309                 "cache %2, 0x040(%0); cache %2, 0x040(%1);      \n\t"   \
310                 "cache %2, 0x060(%0); cache %2, 0x060(%1);      \n\t"   \
311                 "cache %2, 0x080(%0); cache %2, 0x080(%1);      \n\t"   \
312                 "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1);      \n\t"   \
313                 "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1);      \n\t"   \
314                 "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1);      \n\t"   \
315                 "cache %2, 0x100(%0); cache %2, 0x100(%1);      \n\t"   \
316                 "cache %2, 0x120(%0); cache %2, 0x120(%1);      \n\t"   \
317                 "cache %2, 0x140(%0); cache %2, 0x140(%1);      \n\t"   \
318                 "cache %2, 0x160(%0); cache %2, 0x160(%1);      \n\t"   \
319                 "cache %2, 0x180(%0); cache %2, 0x180(%1);      \n\t"   \
320                 "cache %2, 0x1a0(%0); cache %2, 0x1a0(%1);      \n\t"   \
321                 "cache %2, 0x1c0(%0); cache %2, 0x1c0(%1);      \n\t"   \
322                 "cache %2, 0x1e0(%0); cache %2, 0x1e0(%1);      \n\t"   \
323                 ".set reorder"                                          \
324             :                                                           \
325             : "r" (va1), "r" (va2), "i" (op)                            \
326             : "memory");                                                \
327 } while (/*CONSTCOND*/0)
328
329 /*
330  * cache_r4k_op_8lines_16_4way:
331  *
332  *      Perform the specified cache operation on 8 16-byte
333  *      cache lines, 4-ways.
334  */
335 #define cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op)             \
336 do {                                                                    \
337         __asm __volatile(                                               \
338                 ".set noreorder                                 \n\t"   \
339                 "cache %4, 0x000(%0); cache %4, 0x000(%1);      \n\t"   \
340                 "cache %4, 0x000(%2); cache %4, 0x000(%3);      \n\t"   \
341                 "cache %4, 0x010(%0); cache %4, 0x010(%1);      \n\t"   \
342                 "cache %4, 0x010(%2); cache %4, 0x010(%3);      \n\t"   \
343                 "cache %4, 0x020(%0); cache %4, 0x020(%1);      \n\t"   \
344                 "cache %4, 0x020(%2); cache %4, 0x020(%3);      \n\t"   \
345                 "cache %4, 0x030(%0); cache %4, 0x030(%1);      \n\t"   \
346                 "cache %4, 0x030(%2); cache %4, 0x030(%3);      \n\t"   \
347                 "cache %4, 0x040(%0); cache %4, 0x040(%1);      \n\t"   \
348                 "cache %4, 0x040(%2); cache %4, 0x040(%3);      \n\t"   \
349                 "cache %4, 0x050(%0); cache %4, 0x050(%1);      \n\t"   \
350                 "cache %4, 0x050(%2); cache %4, 0x050(%3);      \n\t"   \
351                 "cache %4, 0x060(%0); cache %4, 0x060(%1);      \n\t"   \
352                 "cache %4, 0x060(%2); cache %4, 0x060(%3);      \n\t"   \
353                 "cache %4, 0x070(%0); cache %4, 0x070(%1);      \n\t"   \
354                 "cache %4, 0x070(%2); cache %4, 0x070(%3);      \n\t"   \
355                 ".set reorder"                                          \
356             :                                                           \
357             : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op)      \
358             : "memory");                                                \
359 } while (/*CONSTCOND*/0)
360
361 /*
362  * cache_r4k_op_8lines_32_4way:
363  *
364  *      Perform the specified cache operation on 8 32-byte
365  *      cache lines, 4-ways.
366  */
367 #define cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op)             \
368 do {                                                                    \
369         __asm __volatile(                                               \
370                 ".set noreorder                                 \n\t"   \
371                 "cache %4, 0x000(%0); cache %4, 0x000(%1);      \n\t"   \
372                 "cache %4, 0x000(%2); cache %4, 0x000(%3);      \n\t"   \
373                 "cache %4, 0x020(%0); cache %4, 0x020(%1);      \n\t"   \
374                 "cache %4, 0x020(%2); cache %4, 0x020(%3);      \n\t"   \
375                 "cache %4, 0x040(%0); cache %4, 0x040(%1);      \n\t"   \
376                 "cache %4, 0x040(%2); cache %4, 0x040(%3);      \n\t"   \
377                 "cache %4, 0x060(%0); cache %4, 0x060(%1);      \n\t"   \
378                 "cache %4, 0x060(%2); cache %4, 0x060(%3);      \n\t"   \
379                 "cache %4, 0x080(%0); cache %4, 0x080(%1);      \n\t"   \
380                 "cache %4, 0x080(%2); cache %4, 0x080(%3);      \n\t"   \
381                 "cache %4, 0x0a0(%0); cache %4, 0x0a0(%1);      \n\t"   \
382                 "cache %4, 0x0a0(%2); cache %4, 0x0a0(%3);      \n\t"   \
383                 "cache %4, 0x0c0(%0); cache %4, 0x0c0(%1);      \n\t"   \
384                 "cache %4, 0x0c0(%2); cache %4, 0x0c0(%3);      \n\t"   \
385                 "cache %4, 0x0e0(%0); cache %4, 0x0e0(%1);      \n\t"   \
386                 "cache %4, 0x0e0(%2); cache %4, 0x0e0(%3);      \n\t"   \
387                 ".set reorder"                                          \
388             :                                                           \
389             : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op)      \
390             : "memory");                                                \
391 } while (/*CONSTCOND*/0)
392
393 void    r4k_icache_sync_all_16(void);
394 void    r4k_icache_sync_range_16(vm_paddr_t, vm_size_t);
395 void    r4k_icache_sync_range_index_16(vm_paddr_t, vm_size_t);
396
397 void    r4k_icache_sync_all_32(void);
398 void    r4k_icache_sync_range_32(vm_paddr_t, vm_size_t);
399 void    r4k_icache_sync_range_index_32(vm_paddr_t, vm_size_t);
400
401 void    r4k_pdcache_wbinv_all_16(void);
402 void    r4k_pdcache_wbinv_range_16(vm_paddr_t, vm_size_t);
403 void    r4k_pdcache_wbinv_range_index_16(vm_paddr_t, vm_size_t);
404
405 void    r4k_pdcache_inv_range_16(vm_paddr_t, vm_size_t);
406 void    r4k_pdcache_wb_range_16(vm_paddr_t, vm_size_t);
407
408 void    r4k_pdcache_wbinv_all_32(void);
409 void    r4k_pdcache_wbinv_range_32(vm_paddr_t, vm_size_t);
410 void    r4k_pdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t);
411
412 void    r4k_pdcache_inv_range_32(vm_paddr_t, vm_size_t);
413 void    r4k_pdcache_wb_range_32(vm_paddr_t, vm_size_t);
414
415 void    r4k_sdcache_wbinv_all_32(void);
416 void    r4k_sdcache_wbinv_range_32(vm_paddr_t, vm_size_t);
417 void    r4k_sdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t);
418
419 void    r4k_sdcache_inv_range_32(vm_paddr_t, vm_size_t);
420 void    r4k_sdcache_wb_range_32(vm_paddr_t, vm_size_t);
421
422 void    r4k_sdcache_wbinv_all_128(void);
423 void    r4k_sdcache_wbinv_range_128(vm_paddr_t, vm_size_t);
424 void    r4k_sdcache_wbinv_range_index_128(vm_paddr_t, vm_size_t);
425
426 void    r4k_sdcache_inv_range_128(vm_paddr_t, vm_size_t);
427 void    r4k_sdcache_wb_range_128(vm_paddr_t, vm_size_t);
428
429 void    r4k_sdcache_wbinv_all_generic(void);
430 void    r4k_sdcache_wbinv_range_generic(vm_paddr_t, vm_size_t);
431 void    r4k_sdcache_wbinv_range_index_generic(vm_paddr_t, vm_size_t);
432
433 void    r4k_sdcache_inv_range_generic(vm_paddr_t, vm_size_t);
434 void    r4k_sdcache_wb_range_generic(vm_paddr_t, vm_size_t);
435
436 #endif /* !LOCORE */