]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/mips/include/cpufunc.h
Copy head (r256279) to stable/10 as part of the 10.0-RELEASE cycle.
[FreeBSD/stable/10.git] / sys / mips / include / cpufunc.h
1 /*      $OpenBSD: pio.h,v 1.2 1998/09/15 10:50:12 pefo Exp $    */
2
3 /*-
4  * Copyright (c) 2002-2004 Juli Mallett.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 /*
28  * Copyright (c) 1995-1999 Per Fogelstrom.  All rights reserved.
29  *
30  * Redistribution and use in source and binary forms, with or without
31  * modification, are permitted provided that the following conditions
32  * are met:
33  * 1. Redistributions of source code must retain the above copyright
34  *    notice, this list of conditions and the following disclaimer.
35  * 2. Redistributions in binary form must reproduce the above copyright
36  *    notice, this list of conditions and the following disclaimer in the
37  *    documentation and/or other materials provided with the distribution.
38  * 3. All advertising materials mentioning features or use of this software
39  *    must display the following acknowledgement:
40  *      This product includes software developed by Per Fogelstrom.
41  * 4. The name of the author may not be used to endorse or promote products
42  *    derived from this software without specific prior written permission
43  *
44  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
45  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
48  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
49  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
53  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  *
55  *      JNPR: cpufunc.h,v 1.5 2007/08/09 11:23:32 katta
56  * $FreeBSD$
57  */
58
59 #ifndef _MACHINE_CPUFUNC_H_
60 #define _MACHINE_CPUFUNC_H_
61
62 #include <sys/types.h>
63 #include <machine/cpuregs.h>
64
65 /* 
66  * These functions are required by user-land atomi ops
67  */ 
68
69 static __inline void
70 mips_barrier(void)
71 {
72 #if defined(CPU_CNMIPS) || defined(CPU_RMI) || defined(CPU_NLM)
73         __compiler_membar();
74 #else
75         __asm __volatile (".set noreorder\n\t"
76                           "nop\n\t"
77                           "nop\n\t"
78                           "nop\n\t"
79                           "nop\n\t"
80                           "nop\n\t"
81                           "nop\n\t"
82                           "nop\n\t"
83                           "nop\n\t"
84                           ".set reorder\n\t"
85                           : : : "memory");
86 #endif
87 }
88
89 static __inline void
90 mips_cp0_sync(void)
91 {
92         __asm __volatile (__XSTRING(COP0_SYNC));
93 }
94
95 static __inline void
96 mips_wbflush(void)
97 {
98 #if defined(CPU_CNMIPS)
99         __asm __volatile (".set noreorder\n\t"
100                         "syncw\n\t"
101                         ".set reorder\n"
102                         : : : "memory");
103 #else   
104         __asm __volatile ("sync" : : : "memory");
105         mips_barrier();
106 #endif
107 }
108
109 #ifdef _KERNEL
110 /*
111  * XXX
112  * It would be nice to add variants that read/write register_t, to avoid some
113  * ABI checks.
114  */
115 #if defined(__mips_n32) || defined(__mips_n64)
116 #define MIPS_RW64_COP0(n,r)                                     \
117 static __inline uint64_t                                        \
118 mips_rd_ ## n (void)                                            \
119 {                                                               \
120         int v0;                                                 \
121         __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)";"       \
122                           : [v0] "=&r"(v0));                    \
123         mips_barrier();                                         \
124         return (v0);                                            \
125 }                                                               \
126 static __inline void                                            \
127 mips_wr_ ## n (uint64_t a0)                                     \
128 {                                                               \
129         __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)";"       \
130                          __XSTRING(COP0_SYNC)";"                \
131                          "nop;"                                 \
132                          "nop;"                                 \
133                          :                                      \
134                          : [a0] "r"(a0));                       \
135         mips_barrier();                                         \
136 } struct __hack
137
138 #define MIPS_RW64_COP0_SEL(n,r,s)                               \
139 static __inline uint64_t                                        \
140 mips_rd_ ## n(void)                                             \
141 {                                                               \
142         int v0;                                                 \
143         __asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";"       \
144                           : [v0] "=&r"(v0));                    \
145         mips_barrier();                                         \
146         return (v0);                                            \
147 }                                                               \
148 static __inline void                                            \
149 mips_wr_ ## n(uint64_t a0)                                      \
150 {                                                               \
151         __asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";"       \
152                          __XSTRING(COP0_SYNC)";"                \
153                          :                                      \
154                          : [a0] "r"(a0));                       \
155         mips_barrier();                                         \
156 } struct __hack
157
158 #if defined(__mips_n64)
159 MIPS_RW64_COP0(excpc, MIPS_COP_0_EXC_PC);
160 MIPS_RW64_COP0(entryhi, MIPS_COP_0_TLB_HI);
161 MIPS_RW64_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
162 #ifdef CPU_CNMIPS
163 MIPS_RW64_COP0_SEL(cvmcount, MIPS_COP_0_COUNT, 6);
164 MIPS_RW64_COP0_SEL(cvmctl, MIPS_COP_0_COUNT, 7);
165 MIPS_RW64_COP0_SEL(cvmmemctl, MIPS_COP_0_COMPARE, 7);
166 MIPS_RW64_COP0_SEL(icache_err, MIPS_COP_0_CACHE_ERR, 0);
167 MIPS_RW64_COP0_SEL(dcache_err, MIPS_COP_0_CACHE_ERR, 1);
168 #endif
169 #endif
170 #if defined(__mips_n64) || defined(__mips_n32) /* PHYSADDR_64_BIT */
171 MIPS_RW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
172 MIPS_RW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
173 #endif
174 MIPS_RW64_COP0(xcontext, MIPS_COP_0_TLB_XCONTEXT);
175
176 #undef  MIPS_RW64_COP0
177 #undef  MIPS_RW64_COP0_SEL
178 #endif
179
180 #define MIPS_RW32_COP0(n,r)                                     \
181 static __inline uint32_t                                        \
182 mips_rd_ ## n (void)                                            \
183 {                                                               \
184         int v0;                                                 \
185         __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)";"        \
186                           : [v0] "=&r"(v0));                    \
187         mips_barrier();                                         \
188         return (v0);                                            \
189 }                                                               \
190 static __inline void                                            \
191 mips_wr_ ## n (uint32_t a0)                                     \
192 {                                                               \
193         __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)";"        \
194                          __XSTRING(COP0_SYNC)";"                \
195                          "nop;"                                 \
196                          "nop;"                                 \
197                          :                                      \
198                          : [a0] "r"(a0));                       \
199         mips_barrier();                                         \
200 } struct __hack
201
202 #define MIPS_RW32_COP0_SEL(n,r,s)                               \
203 static __inline uint32_t                                        \
204 mips_rd_ ## n(void)                                             \
205 {                                                               \
206         int v0;                                                 \
207         __asm __volatile ("mfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";"        \
208                           : [v0] "=&r"(v0));                    \
209         mips_barrier();                                         \
210         return (v0);                                            \
211 }                                                               \
212 static __inline void                                            \
213 mips_wr_ ## n(uint32_t a0)                                      \
214 {                                                               \
215         __asm __volatile ("mtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";"        \
216                          __XSTRING(COP0_SYNC)";"                \
217                          "nop;"                                 \
218                          "nop;"                                 \
219                          :                                      \
220                          : [a0] "r"(a0));                       \
221         mips_barrier();                                         \
222 } struct __hack
223
224 #ifdef CPU_CNMIPS
225 static __inline void mips_sync_icache (void)
226 {
227         __asm __volatile (
228                 ".set push\n"
229                 ".set mips64\n"
230                 ".word 0x041f0000\n"            /* xxx ICACHE */
231                 "nop\n"
232                 ".set pop\n"
233                 : : );
234 }
235 #endif
236
237 MIPS_RW32_COP0(compare, MIPS_COP_0_COMPARE);
238 MIPS_RW32_COP0(config, MIPS_COP_0_CONFIG);
239 MIPS_RW32_COP0_SEL(config1, MIPS_COP_0_CONFIG, 1);
240 MIPS_RW32_COP0_SEL(config2, MIPS_COP_0_CONFIG, 2);
241 MIPS_RW32_COP0_SEL(config3, MIPS_COP_0_CONFIG, 3);
242 #ifdef CPU_CNMIPS
243 MIPS_RW32_COP0_SEL(config4, MIPS_COP_0_CONFIG, 4);
244 #endif
245 #ifdef CPU_NLM
246 MIPS_RW32_COP0_SEL(config6, MIPS_COP_0_CONFIG, 6);
247 MIPS_RW32_COP0_SEL(config7, MIPS_COP_0_CONFIG, 7);
248 #endif
249 MIPS_RW32_COP0(count, MIPS_COP_0_COUNT);
250 MIPS_RW32_COP0(index, MIPS_COP_0_TLB_INDEX);
251 MIPS_RW32_COP0(wired, MIPS_COP_0_TLB_WIRED);
252 MIPS_RW32_COP0(cause, MIPS_COP_0_CAUSE);
253 #if !defined(__mips_n64)
254 MIPS_RW32_COP0(excpc, MIPS_COP_0_EXC_PC);
255 #endif
256 MIPS_RW32_COP0(status, MIPS_COP_0_STATUS);
257
258 /* XXX: Some of these registers are specific to MIPS32. */
259 #if !defined(__mips_n64)
260 MIPS_RW32_COP0(entryhi, MIPS_COP_0_TLB_HI);
261 MIPS_RW32_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
262 #endif
263 #ifdef CPU_NLM
264 MIPS_RW32_COP0_SEL(pagegrain, MIPS_COP_0_TLB_PG_MASK, 1);
265 #endif
266 #if !defined(__mips_n64) && !defined(__mips_n32) /* !PHYSADDR_64_BIT */
267 MIPS_RW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
268 MIPS_RW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
269 #endif
270 MIPS_RW32_COP0(prid, MIPS_COP_0_PRID);
271 /* XXX 64-bit?  */
272 MIPS_RW32_COP0_SEL(ebase, MIPS_COP_0_PRID, 1);
273 MIPS_RW32_COP0(watchlo, MIPS_COP_0_WATCH_LO);
274 MIPS_RW32_COP0_SEL(watchlo1, MIPS_COP_0_WATCH_LO, 1);
275 MIPS_RW32_COP0_SEL(watchlo2, MIPS_COP_0_WATCH_LO, 2);
276 MIPS_RW32_COP0_SEL(watchlo3, MIPS_COP_0_WATCH_LO, 3);
277 MIPS_RW32_COP0(watchhi, MIPS_COP_0_WATCH_HI);
278 MIPS_RW32_COP0_SEL(watchhi1, MIPS_COP_0_WATCH_HI, 1);
279 MIPS_RW32_COP0_SEL(watchhi2, MIPS_COP_0_WATCH_HI, 2);
280 MIPS_RW32_COP0_SEL(watchhi3, MIPS_COP_0_WATCH_HI, 3);
281
282 MIPS_RW32_COP0_SEL(perfcnt0, MIPS_COP_0_PERFCNT, 0);
283 MIPS_RW32_COP0_SEL(perfcnt1, MIPS_COP_0_PERFCNT, 1);
284 MIPS_RW32_COP0_SEL(perfcnt2, MIPS_COP_0_PERFCNT, 2);
285 MIPS_RW32_COP0_SEL(perfcnt3, MIPS_COP_0_PERFCNT, 3);
286
287 #undef  MIPS_RW32_COP0
288 #undef  MIPS_RW32_COP0_SEL
289
290 static __inline register_t
291 intr_disable(void)
292 {
293         register_t s;
294
295         s = mips_rd_status();
296         mips_wr_status(s & ~MIPS_SR_INT_IE);
297
298         return (s & MIPS_SR_INT_IE);
299 }
300
301 static __inline register_t
302 intr_enable(void)
303 {
304         register_t s;
305
306         s = mips_rd_status();
307         mips_wr_status(s | MIPS_SR_INT_IE);
308
309         return (s);
310 }
311
312 static __inline void
313 intr_restore(register_t ie)
314 {
315         if (ie == MIPS_SR_INT_IE) {
316                 intr_enable();
317         }
318 }
319
320 static __inline uint32_t
321 set_intr_mask(uint32_t mask)
322 {
323         uint32_t ostatus;
324
325         ostatus = mips_rd_status();
326         mask = (ostatus & ~MIPS_SR_INT_MASK) | (mask & MIPS_SR_INT_MASK);
327         mips_wr_status(mask);
328         return (ostatus);
329 }
330
331 static __inline uint32_t
332 get_intr_mask(void)
333 {
334
335         return (mips_rd_status() & MIPS_SR_INT_MASK);
336 }
337
338 static __inline void
339 breakpoint(void)
340 {
341         __asm __volatile ("break");
342 }
343
344 #if defined(__GNUC__) && !defined(__mips_o32)
345 #define mips3_ld(a)     (*(const volatile uint64_t *)(a))
346 #define mips3_sd(a, v)  (*(volatile uint64_t *)(a) = (v))
347 #else
348 uint64_t mips3_ld(volatile uint64_t *va);
349 void mips3_sd(volatile uint64_t *, uint64_t);
350 #endif  /* __GNUC__ */
351
352 #endif /* _KERNEL */
353
354 #define readb(va)       (*(volatile uint8_t *) (va))
355 #define readw(va)       (*(volatile uint16_t *) (va))
356 #define readl(va)       (*(volatile uint32_t *) (va))
357 #if defined(__GNUC__) && !defined(__mips_o32)
358 #define readq(a)        (*(volatile uint64_t *)(a))
359 #endif
360  
361 #define writeb(va, d)   (*(volatile uint8_t *) (va) = (d))
362 #define writew(va, d)   (*(volatile uint16_t *) (va) = (d))
363 #define writel(va, d)   (*(volatile uint32_t *) (va) = (d))
364 #if defined(__GNUC__) && !defined(__mips_o32)
365 #define writeq(va, d)   (*(volatile uint64_t *) (va) = (d))
366 #endif
367
368 #endif /* !_MACHINE_CPUFUNC_H_ */