2 * Copyright (c) 2003-2009 RMI Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifndef __MIPS_EXTS_H__
33 #define __MIPS_EXTS_H__
35 #define CPU_BLOCKID_IFU 0
36 #define CPU_BLOCKID_ICU 1
37 #define CPU_BLOCKID_IEU 2
38 #define CPU_BLOCKID_LSU 3
39 #define CPU_BLOCKID_MMU 4
40 #define CPU_BLOCKID_PRF 5
42 #define LSU_CERRLOG_REGID 9
44 #if defined(__mips_n64) || defined(__mips_n32)
45 static __inline uint64_t
46 read_xlr_ctrl_register(int block, int reg)
54 ".word 0x71280018\n\t" /* mfcr $8, $9 */
57 : "=r" (res) : "r"((block << 8) | reg)
64 write_xlr_ctrl_register(int block, int reg, uint64_t value)
71 ".word 0x71280019\n" /* mtcr $8, $9 */
74 : "r" (value), "r" ((block << 8) | reg)
79 #else /* !(defined(__mips_n64) || defined(__mips_n32)) */
81 static __inline uint64_t
82 read_xlr_ctrl_register(int block, int reg)
91 ".word 0x71280018\n" /* "mfcr $8, $9\n" */
92 "dsra32 %0, $8, 0\n\t"
95 : "=r" (high), "=r"(low)
96 : "r" ((block << 8) | reg)
99 return ( (((uint64_t)high) << 32) | low);
103 write_xlr_ctrl_register(int block, int reg, uint64_t value)
107 low = value & 0xffffffff;
109 __asm__ __volatile__(
113 "dsll32 $9, %0, 0\n\t"
114 "dsll32 $8, %1, 0\n\t"
115 "dsrl32 $8, $8, 0\n\t"
118 ".word 0x71280019\n\t" /* mtcr $8, $9 */
121 : "r" (high), "r" (low), "r"((block << 8) | reg)
124 #endif /* defined(__mips_n64) || defined(__mips_n32) */
127 * 32 bit read write for c0
129 #define read_c0_register32(reg, sel) \
132 __asm__ __volatile__( \
135 "mfc0 %0, $%1, %2\n\t" \
137 : "=r" (__rv) : "i" (reg), "i" (sel) ); \
141 #define write_c0_register32(reg, sel, value) \
142 __asm__ __volatile__( \
145 "mtc0 %0, $%1, %2\n\t" \
147 : : "r" (value), "i" (reg), "i" (sel) );
149 #define read_c2_register32(reg, sel) \
152 __asm__ __volatile__( \
155 "mfc2 %0, $%1, %2\n\t" \
157 : "=r" (__rv) : "i" (reg), "i" (sel) ); \
161 #define write_c2_register32(reg, sel, value) \
162 __asm__ __volatile__( \
165 "mtc2 %0, $%1, %2\n\t" \
167 : : "r" (value), "i" (reg), "i" (sel) );
169 #if defined(__mips_n64) || defined(__mips_n32)
171 * On 64 bit compilation, the operations are simple
173 #define read_c0_register64(reg, sel) \
176 __asm__ __volatile__( \
179 "dmfc0 %0, $%1, %2\n\t" \
181 : "=r" (__rv) : "i" (reg), "i" (sel) ); \
185 #define write_c0_register64(reg, sel, value) \
186 __asm__ __volatile__( \
189 "dmtc0 %0, $%1, %2\n\t" \
191 : : "r" (value), "i" (reg), "i" (sel) );
193 #define read_c2_register64(reg, sel) \
196 __asm__ __volatile__( \
199 "dmfc2 %0, $%1, %2\n\t" \
201 : "=r" (__rv) : "i" (reg), "i" (sel) ); \
205 #define write_c2_register64(reg, sel, value) \
206 __asm__ __volatile__( \
209 "dmtc2 %0, $%1, %2\n\t" \
211 : : "r" (value), "i" (reg), "i" (sel) );
213 #else /* ! (defined(__mips_n64) || defined(__mips_n32)) */
216 * 32 bit compilation, 64 bit values has to split
218 #define read_c0_register64(reg, sel) \
220 uint32_t __high, __low; \
221 __asm__ __volatile__( \
223 ".set noreorder\n\t" \
225 "dmfc0 $8, $%2, %3\n\t" \
226 "dsra32 %0, $8, 0\n\t" \
227 "sll %1, $8, 0\n\t" \
229 : "=r"(__high), "=r"(__low): "i"(reg), "i"(sel) \
231 ((uint64_t)__high << 32) | __low; \
234 #define write_c0_register64(reg, sel, value) \
236 uint32_t __high = value >> 32; \
237 uint32_t __low = value & 0xffffffff; \
238 __asm__ __volatile__( \
240 ".set noreorder\n\t" \
242 "dsll32 $8, %1, 0\n\t" \
243 "dsll32 $9, %0, 0\n\t" \
244 "dsrl32 $8, $8, 0\n\t" \
245 "or $8, $8, $9\n\t" \
246 "dmtc0 $8, $%2, %3\n\t" \
248 :: "r"(__high), "r"(__low), "i"(reg), "i"(sel) \
252 #define read_c2_register64(reg, sel) \
254 uint32_t __high, __low; \
255 __asm__ __volatile__( \
257 ".set noreorder\n\t" \
259 "dmfc2 $8, $%2, %3\n\t" \
260 "dsra32 %0, $8, 0\n\t" \
261 "sll %1, $8, 0\n\t" \
263 : "=r"(__high), "=r"(__low): "i"(reg), "i"(sel) \
265 ((uint64_t)__high << 32) | __low; \
268 #define write_c2_register64(reg, sel, value) \
270 uint32_t __high = value >> 32; \
271 uint32_t __low = value & 0xffffffff; \
272 __asm__ __volatile__( \
274 ".set noreorder\n\t" \
276 "dsll32 $8, %1, 0\n\t" \
277 "dsll32 $9, %0, 0\n\t" \
278 "dsrl32 $8, $8, 0\n\t" \
279 "or $8, $8, $9\n\t" \
280 "dmtc2 $8, $%2, %3\n\t" \
282 :: "r"(__high), "r"(__low), "i"(reg), "i"(sel) \
286 #endif /* defined(__mips_n64) || defined(__mips_n32) */
292 return (read_c0_register32(15, 1) & 0x1f);
299 return (xlr_cpu_id() / 4);
306 return (read_c0_register32(15, 1) & 0x3);
309 /* Additional registers on the XLR */
310 #define MIPS_COP_0_OSSCRATCH 22
311 #define XLR_CACHELINE_SIZE 32
313 /* functions to write to and read from the extended
315 * EIRR : Extended Interrupt Request Register
316 * cp0 register 9 sel 6
317 * bits 0...7 are same as cause register 8...15
318 * EIMR : Extended Interrupt Mask Register
319 * cp0 register 9 sel 7
320 * bits 0...7 are same as status register 8...15
322 static __inline uint64_t
326 return (read_c0_register64(9, 6));
330 write_c0_eirr64(uint64_t val)
333 write_c0_register64(9, 6, val);
336 static __inline uint64_t
340 return (read_c0_register64(9, 7));
344 write_c0_eimr64(uint64_t val)
347 write_c0_register64(9, 7, val);
351 xlr_test_and_set(int *lock)
355 __asm__ __volatile__(
364 : "+m"(*lock), "=r"(oldval)
365 : "r"((unsigned long)lock)
369 return (oldval == 0 ? 1 /* success */ : 0 /* failure */);
372 static __inline uint32_t
373 xlr_mfcr(uint32_t reg)
377 __asm__ __volatile__(
382 : "r"(reg):"$8", "$9");
388 xlr_mtcr(uint32_t reg, uint32_t val)
390 __asm__ __volatile__(
394 :: "r"(val), "r"(reg)
399 * Atomic increment a unsigned int
401 static __inline unsigned int
402 xlr_ldaddwu(unsigned int value, unsigned int *addr)
404 __asm__ __volatile__(
409 ".word 0x71280011\n" /* ldaddwu $8, $9 */
412 : "=&r"(value), "+m"(*addr)
413 : "0"(value), "r" ((unsigned long)addr)
419 #if defined(__mips_n64)
420 static __inline uint32_t
421 xlr_paddr_lw(uint64_t paddr)
424 paddr |= 0x9800000000000000ULL;
425 return (*(uint32_t *)(uintptr_t)paddr);
428 static __inline uint64_t
429 xlr_paddr_ld(uint64_t paddr)
432 paddr |= 0x9800000000000000ULL;
433 return (*(uint64_t *)(uintptr_t)paddr);
436 #elif defined(__mips_n32)
437 static __inline uint32_t
438 xlr_paddr_lw(uint64_t paddr)
442 paddr |= 0x9800000000000000ULL;
443 __asm__ __volatile__(
454 static __inline uint64_t
455 xlr_paddr_ld(uint64_t paddr)
459 paddr |= 0x9800000000000000ULL;
460 __asm__ __volatile__(
471 #else /* o32 compilation */
472 static __inline uint32_t
473 xlr_paddr_lw(uint64_t paddr)
475 uint32_t addrh, addrl;
478 addrh = 0x98000000 | (paddr >> 32);
479 addrl = paddr & 0xffffffff;
481 __asm__ __volatile__(
484 "dsll32 $8, %1, 0 \n\t"
485 "dsll32 $9, %2, 0 \n\t" /* get rid of the */
486 "dsrl32 $9, $9, 0 \n\t" /* sign extend */
491 : "r"(addrh), "r"(addrl)
497 static __inline uint64_t
498 xlr_paddr_ld(uint64_t paddr)
500 uint32_t addrh, addrl;
503 addrh = 0x98000000 | (paddr >> 32);
504 addrl = paddr & 0xffffffff;
506 __asm__ __volatile__(
509 "dsll32 %0, %2, 0 \n\t"
510 "dsll32 %1, %3, 0 \n\t" /* get rid of the */
511 "dsrl32 %1, %1, 0 \n\t" /* sign extend */
516 : "=&r"(valh), "=&r"(vall)
517 : "r"(addrh), "r"(addrl));
519 return (((uint64_t)valh << 32) | vall);
524 * XXX: Not really needed in n32 or n64, retain for now
526 #if defined(__mips_n64) || defined(__mips_n32)
527 static __inline uint32_t
535 xlr_restore_kx(uint32_t sr)
539 #else /* !defined(__mips_n64) && !defined(__mips_n32) */
541 * o32 compilation, we will disable interrupts and enable
542 * the KX bit so that we can use XKPHYS to access any 40bit
545 static __inline uint32_t
548 uint32_t sr = mips_rd_status();
550 mips_wr_status((sr & ~MIPS_SR_INT_IE) | MIPS_SR_KX);
555 xlr_restore_kx(uint32_t sr)
560 #endif /* defined(__mips_n64) || defined(__mips_n32) */
563 * XLR/XLS processors have maximum 8 cores, and maximum 4 threads
566 #define XLR_MAX_CORES 8
567 #define XLR_NTHREADS 4
570 * FreeBSD can be started with few threads and cores turned off,
571 * so have a hardware thread id to FreeBSD cpuid mapping.
573 extern int xlr_ncores;
574 extern int xlr_threads_per_core;
575 extern uint32_t xlr_hw_thread_mask;
576 extern int xlr_cpuid_to_hwtid[];
577 extern int xlr_hwtid_to_cpuid[];