1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
43 * Interface to the Mips interrupts.
45 * <hr>$Revision: 70030 $<hr>
49 /* Backtrace is only available with the new toolchain. */
52 #endif /* __U_BOOT__ */
53 #include "cvmx-config.h"
55 #include "cvmx-interrupt.h"
56 #include "cvmx-sysinfo.h"
57 #include "cvmx-uart.h"
59 #include "cvmx-ebt3000.h"
60 #include "cvmx-coremask.h"
61 #include "cvmx-spinlock.h"
62 #include "cvmx-atomic.h"
63 #include "cvmx-app-init.h"
64 #include "cvmx-error.h"
65 #include "cvmx-app-hotplug.h"
66 #include "cvmx-profiler.h"
68 # include <octeon_mem_map.h>
70 # include <asm/arch/octeon_mem_map.h>
72 EXTERN_ASM void cvmx_interrupt_stage1(void);
73 EXTERN_ASM void cvmx_debug_handler_stage1(void);
74 EXTERN_ASM void cvmx_interrupt_cache_error(void);
76 int cvmx_interrupt_in_isr = 0;
78 struct __cvmx_interrupt_handler {
79 cvmx_interrupt_func_t handler; /**< One function to call per interrupt */
80 void *data; /**< User data per interrupt */
81 int handler_data; /**< Used internally */
85 * Internal status the interrupt registration
89 struct __cvmx_interrupt_handler handlers[CVMX_IRQ_MAX];
90 cvmx_interrupt_exception_t exception_handler;
91 } cvmx_interrupt_state_t;
94 * Internal state the interrupt registration
97 static CVMX_SHARED cvmx_interrupt_state_t cvmx_interrupt_state;
98 static CVMX_SHARED cvmx_spinlock_t cvmx_interrupt_default_lock;
99 /* Incremented once first core processing is finished. */
100 static CVMX_SHARED int32_t cvmx_interrupt_initialize_flag;
101 #endif /* __U_BOOT__ */
103 #define ULL unsigned long long
105 #define HI32(data64) ((uint32_t)(data64 >> 32))
106 #define LO32(data64) ((uint32_t)(data64 & 0xFFFFFFFF))
108 static const char reg_names[][32] = { "r0","at","v0","v1","a0","a1","a2","a3",
109 "t0","t1","t2","t3","t4","t5","t6","t7",
110 "s0","s1","s2","s3","s4","s5", "s6","s7",
111 "t8","t9", "k0","k1","gp","sp","s8","ra" };
114 * version of printf that works better in exception context.
118 void cvmx_safe_printf(const char *format, ...)
125 va_start(args, format);
127 count = vsnprintf(buffer, sizeof(buffer), format, args);
129 count = vsprintf(buffer, format, args);
135 cvmx_uart_lsr_t lsrval;
137 /* Spin until there is room */
140 lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(0));
141 #if !defined(CONFIG_OCTEON_SIM_SPEED)
142 if (lsrval.s.temt == 0)
143 cvmx_wait(10000); /* Just to reduce the load on the system */
146 while (lsrval.s.temt == 0);
149 cvmx_write_csr(CVMX_MIO_UARTX_THR(0), '\r');
150 cvmx_write_csr(CVMX_MIO_UARTX_THR(0), *ptr++);
154 /* Textual descriptions of cause codes */
155 static const char cause_names[][128] = {
157 /* 1 */ "TLB modification",
158 /* 2 */ "tlb load/fetch",
160 /* 4 */ "address exc, load/fetch",
161 /* 5 */ "address exc, store",
162 /* 6 */ "bus error, instruction fetch",
163 /* 7 */ "bus error, load/store",
165 /* 9 */ "breakpoint",
166 /* 10 */ "reserved instruction",
167 /* 11 */ "cop unusable",
168 /* 12 */ "arithmetic overflow",
171 /* 15 */ "floating point exc",
174 /* 18 */ "cop2 exception",
178 /* 22 */ "mdmx unusable",
180 /* 24 */ "machine check",
186 /* 30 */ "cache error",
193 * @param name Name of the value to print
194 * @param reg Value to print
196 static inline void print_reg64(const char *name, uint64_t reg)
198 cvmx_safe_printf("%16s: 0x%08x%08x\n", name, (unsigned int)HI32(reg),(unsigned int)LO32(reg));
203 * Dump all useful registers to the console
205 * @param registers CPU register to dump
207 static void __cvmx_interrupt_dump_registers(uint64_t *registers)
211 for (reg=0; reg<16; reg++)
213 r1 = registers[reg]; r2 = registers[reg+16];
214 cvmx_safe_printf("%3s ($%02d): 0x%08x%08x \t %3s ($%02d): 0x%08x%08x\n",
215 reg_names[reg], reg, (unsigned int)HI32(r1), (unsigned int)LO32(r1),
216 reg_names[reg+16], reg+16, (unsigned int)HI32(r2), (unsigned int)LO32(r2));
218 CVMX_MF_COP0 (r1, COP0_CAUSE);
219 print_reg64 ("COP0_CAUSE", r1);
220 CVMX_MF_COP0 (r2, COP0_STATUS);
221 print_reg64 ("COP0_STATUS", r2);
222 CVMX_MF_COP0 (r1, COP0_BADVADDR);
223 print_reg64 ("COP0_BADVADDR", r1);
224 CVMX_MF_COP0 (r2, COP0_EPC);
225 print_reg64 ("COP0_EPC", r2);
230 * Default exception handler. Prints out the exception
231 * cause decode and all relevant registers.
233 * @param registers Registers at time of the exception
237 #endif /* __U_BOOT__ */
238 void __cvmx_interrupt_default_exception_handler(uint64_t *registers)
240 uint64_t trap_print_cause;
243 int modified_zero_pc = 0;
245 ebt3000_str_write("Trap");
246 cvmx_spinlock_lock(&cvmx_interrupt_default_lock);
248 CVMX_MF_COP0 (trap_print_cause, COP0_CAUSE);
249 str = cause_names [(trap_print_cause >> 2) & 0x1f];
250 cvmx_safe_printf("Core %d: Unhandled Exception. Cause register decodes to:\n%s\n", (int)cvmx_get_core_num(), str && *str ? str : "Reserved exception cause");
251 cvmx_safe_printf("******************************************************************\n");
252 __cvmx_interrupt_dump_registers(registers);
256 cvmx_safe_printf("******************************************************************\n");
257 #if __GNUC__ >= 4 && !defined(OCTEON_DISABLE_BACKTRACE)
258 cvmx_safe_printf("Backtrace:\n\n");
259 if (registers[35] == 0) {
260 modified_zero_pc = 1;
261 /* If PC is zero we probably did jalr $zero, in which case $31 - 8 is the call site. */
262 registers[35] = registers[31] - 8;
264 __octeon_print_backtrace_func ((__octeon_backtrace_printf_t)cvmx_safe_printf);
265 if (modified_zero_pc)
267 cvmx_safe_printf("******************************************************************\n");
270 cvmx_spinlock_unlock(&cvmx_interrupt_default_lock);
272 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
277 /* Interrupts are suppressed when we are in the exception
278 handler (because of SR[EXL]). Spin and poll the uart
279 status and see if the debugger is trying to stop us. */
280 cvmx_uart_lsr_t lsrval;
281 lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(cvmx_debug_uart));
285 /* Pulse the MCD0 signal. */
297 #endif /* __U_BOOT__ */
303 * Default interrupt handler if the user doesn't register one.
305 * @param irq_number IRQ that caused this interrupt
306 * @param registers Register at the time of the interrupt
307 * @param user_arg Unused optional user data
309 static void __cvmx_interrupt_default(int irq_number, uint64_t *registers, void *user_arg)
311 cvmx_safe_printf("cvmx_interrupt_default: Received interrupt %d\n", irq_number);
312 __cvmx_interrupt_dump_registers(registers);
316 * Map a ciu bit to an irq number. 0xff for invalid.
321 static CVMX_SHARED uint8_t cvmx_ciu_to_irq[8][64];
322 #define cvmx_ciu_en0_to_irq cvmx_ciu_to_irq[0]
323 #define cvmx_ciu_en1_to_irq cvmx_ciu_to_irq[1]
324 #define cvmx_ciu2_wrkq_to_irq cvmx_ciu_to_irq[0]
325 #define cvmx_ciu2_wdog_to_irq cvmx_ciu_to_irq[1]
326 #define cvmx_ciu2_rml_to_irq cvmx_ciu_to_irq[2]
327 #define cvmx_ciu2_mio_to_irq cvmx_ciu_to_irq[3]
328 #define cvmx_ciu2_io_to_irq cvmx_ciu_to_irq[4]
329 #define cvmx_ciu2_mem_to_irq cvmx_ciu_to_irq[5]
330 #define cvmx_ciu2_eth_to_irq cvmx_ciu_to_irq[6]
331 #define cvmx_ciu2_gpio_to_irq cvmx_ciu_to_irq[7]
333 static CVMX_SHARED uint8_t cvmx_ciu2_mbox_to_irq[64];
334 static CVMX_SHARED uint8_t cvmx_ciu_61xx_timer_to_irq[64];
336 static void __cvmx_interrupt_set_mapping(int irq, unsigned int en, unsigned int bit)
338 cvmx_interrupt_state.handlers[irq].handler_data = (en << 6) | bit;
340 cvmx_ciu_to_irq[en][bit] = irq;
342 cvmx_ciu_61xx_timer_to_irq[bit] = irq;
344 cvmx_ciu2_mbox_to_irq[bit] = irq;
347 static uint64_t cvmx_interrupt_ciu_en0_mirror;
348 static uint64_t cvmx_interrupt_ciu_en1_mirror;
349 static uint64_t cvmx_interrupt_ciu_61xx_timer_mirror;
353 * Called for all Performance Counter interrupts. Handler for
356 * @param irq_number Interrupt number that we're being called for
357 * @param registers Registers at the time of the interrupt
358 * @param user_arg Unused user argument*
360 static void __cvmx_interrupt_perf(int irq_number, uint64_t *registers, void *user_arg)
362 uint64_t perf_counter;
363 CVMX_MF_COP0(perf_counter, COP0_PERFVALUE0);
364 if (perf_counter & (1ull << 63))
365 cvmx_collect_sample();
370 * Handler for interrupt lines 2 and 3. These are directly tied
371 * to the CIU. The handler queries the status of the CIU and
372 * calls the secondary handler for the CIU interrupt that
375 * @param irq_number Interrupt number that fired (2 or 3)
376 * @param registers Registers at the time of the interrupt
377 * @param user_arg Unused user argument
379 static void __cvmx_interrupt_ciu(int irq_number, uint64_t *registers, void *user_arg)
385 int core = cvmx_get_core_num();
387 if (irq_number == CVMX_IRQ_MIPS2) {
388 /* Handle EN0 sources */
389 ciu_offset = core * 2;
390 irq_mask = cvmx_read_csr(CVMX_CIU_INTX_SUM0(ciu_offset)) & cvmx_interrupt_ciu_en0_mirror;
391 CVMX_DCLZ(bit, irq_mask);
393 /* If ciu_int_sum1<sum2> is set, means its a timer interrupt */
394 if (bit == 51 && (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2))) {
397 irq_mask = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP2(core)) & cvmx_interrupt_ciu_61xx_timer_mirror;
398 CVMX_DCLZ(bit, irq_mask);
400 /* Handle TIMER(4..9) interrupts */
401 if (bit <= 9 && bit >= 4) {
402 uint64_t irq = cvmx_ciu_61xx_timer_to_irq[bit];
403 if (cvmx_unlikely(irq == 0xff)) {
405 cvmx_interrupt_ciu_61xx_timer_mirror &= ~(1ull << bit);
406 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP2(core), cvmx_interrupt_ciu_61xx_timer_mirror);
409 struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
410 h->handler(irq, registers, h->data);
416 irq = cvmx_ciu_en0_to_irq[bit];
417 if (cvmx_unlikely(irq == 0xff)) {
419 cvmx_interrupt_ciu_en0_mirror &= ~(1ull << bit);
420 cvmx_write_csr(CVMX_CIU_INTX_EN0(ciu_offset), cvmx_interrupt_ciu_en0_mirror);
423 struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
424 h->handler(irq, registers, h->data);
428 /* Handle EN1 sources */
429 ciu_offset = cvmx_get_core_num() * 2 + 1;
430 irq_mask = cvmx_read_csr(CVMX_CIU_INT_SUM1) & cvmx_interrupt_ciu_en1_mirror;
431 CVMX_DCLZ(bit, irq_mask);
434 irq = cvmx_ciu_en1_to_irq[bit];
435 if (cvmx_unlikely(irq == 0xff)) {
437 cvmx_interrupt_ciu_en1_mirror &= ~(1ull << bit);
438 cvmx_write_csr(CVMX_CIU_INTX_EN1(ciu_offset), cvmx_interrupt_ciu_en1_mirror);
441 struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
442 h->handler(irq, registers, h->data);
450 * Handler for interrupt line 3, the DPI_DMA will have different value
451 * per core, all other fields values are identical for different cores.
452 * These are directly tied to the CIU. The handler queries the status of
453 * the CIU and calls the secondary handler for the CIU interrupt that
456 * @param irq_number Interrupt number that fired (2 or 3)
457 * @param registers Registers at the time of the interrupt
458 * @param user_arg Unused user argument
460 static void __cvmx_interrupt_ciu_cn61xx(int irq_number, uint64_t *registers, void *user_arg)
462 /* Handle EN1 sources */
463 int core = cvmx_get_core_num();
469 ciu_offset = core * 2 + 1;
470 irq_mask = cvmx_read_csr(CVMX_CIU_SUM1_PPX_IP3(core)) & cvmx_interrupt_ciu_en1_mirror;
471 CVMX_DCLZ(bit, irq_mask);
474 irq = cvmx_ciu_en1_to_irq[bit];
475 if (cvmx_unlikely(irq == 0xff)) {
477 cvmx_interrupt_ciu_en1_mirror &= ~(1ull << bit);
478 cvmx_write_csr(CVMX_CIU_INTX_EN1(ciu_offset), cvmx_interrupt_ciu_en1_mirror);
481 struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + irq;
482 h->handler(irq, registers, h->data);
489 * Handler for interrupt line 2 on 68XX. These are directly tied
490 * to the CIU2. The handler queries the status of the CIU and
491 * calls the secondary handler for the CIU interrupt that
494 * @param irq_number Interrupt number that fired (2 or 3)
495 * @param registers Registers at the time of the interrupt
496 * @param user_arg Unused user argument
498 static void __cvmx_interrupt_ciu2(int irq_number, uint64_t *registers, void *user_arg)
500 int sum_bit, src_bit;
502 uint64_t src_reg, src_val;
503 struct __cvmx_interrupt_handler *h;
504 int core = cvmx_get_core_num();
505 uint64_t sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core));
507 CVMX_DCLZ(sum_bit, sum);
508 sum_bit = 63 - sum_bit;
516 irq = cvmx_ciu2_mbox_to_irq[sum_bit - 60];
517 if (cvmx_unlikely(irq == 0xff)) {
519 uint64_t mask_reg = CVMX_CIU2_EN_PPX_IP2_MBOX_W1C(core);
520 cvmx_write_csr(mask_reg, 1ull << (sum_bit - 60));
523 h = cvmx_interrupt_state.handlers + irq;
524 h->handler(irq, registers, h->data);
535 src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core) + (0x1000 * sum_bit);
536 src_val = cvmx_read_csr(src_reg);
539 CVMX_DCLZ(src_bit, src_val);
540 src_bit = 63 - src_bit;
541 irq = cvmx_ciu_to_irq[sum_bit][src_bit];
542 if (cvmx_unlikely(irq == 0xff)) {
544 uint64_t mask_reg = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(core) + (0x1000 * sum_bit);
545 cvmx_write_csr(mask_reg, 1ull << src_bit);
548 h = cvmx_interrupt_state.handlers + irq;
549 h->handler(irq, registers, h->data);
553 cvmx_safe_printf("Unknown CIU2 bit: %d\n", sum_bit);
557 /* Clear the source to reduce the chance for spurious interrupts. */
559 /* CN68XX has an CIU-15786 errata that accessing the ACK registers
560 * can stop interrupts from propagating
563 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
564 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
566 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core));
572 * Called for all RML interrupts. This is usually an ECC error
574 * @param irq_number Interrupt number that we're being called for
575 * @param registers Registers at the time of the interrupt
576 * @param user_arg Unused user argument
578 static void __cvmx_interrupt_ecc(int irq_number, uint64_t *registers, void *user_arg)
585 * Process an interrupt request
587 * @param registers Registers at time of interrupt / exception
588 * Registers 0-31 are standard MIPS, others specific to this routine
591 void cvmx_interrupt_do_irq(uint64_t *registers);
592 void cvmx_interrupt_do_irq(uint64_t *registers)
600 /* Determine the cause of the interrupt */
601 asm volatile ("dmfc0 %0,$13,0" : "=r" (cause));
602 asm volatile ("dmfc0 %0,$12,0" : "=r" (status));
603 /* In case of exception, clear all interrupts to avoid recursive interrupts.
604 Also clear EXL bit to display the correct PC value. */
605 if ((cause & 0x7c) == 0)
607 asm volatile ("dmtc0 %0, $12, 0" : : "r" (status & ~(0xff02)));
609 /* The assembly stub at each exception vector saves its address in k1 when
610 ** it calls the stage 2 handler. We use this to compute the exception vector
611 ** that brought us here */
612 exc_vec = (uint32_t)(registers[27] & 0x780); /* Mask off bits we need to ignore */
614 /* Check for cache errors. The cache errors go to a separate exception vector,
615 ** so we will only check these if we got here from a cache error exception, and
616 ** the ERL (error level) bit is set. */
617 i = cvmx_get_core_num();
618 if (exc_vec == 0x100 && (status & 0x4))
620 CVMX_MF_CACHE_ERR(cache_err);
622 /* Use copy of DCACHE_ERR register that early exception stub read */
623 if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
625 if (registers[34] & 0x1)
626 cvmx_safe_printf("Dcache error detected: core: %d, way: %d, va 7:3: 0x%x\n", i, (int)(registers[34] >> 8) & 0x3f, (int)(registers[34] >> 3) & 0x1f);
627 else if (cache_err & 0x1)
628 cvmx_safe_printf("Icache error detected: core: %d, set: %d, way : %d, va 6:3 = 0x%x\n", i, (int)(cache_err >> 5) & 0x3f, (int)(cache_err >> 3) & 0x3, (int)(cache_err >> 11) & 0xf);
630 cvmx_safe_printf("Cache error exception: core %d\n", i);
634 if (registers[34] & 0x1)
635 cvmx_safe_printf("Dcache error detected: core: %d, way: %d, va 9:7: 0x%x\n", i, (int)(registers[34] >> 10) & 0x1f, (int)(registers[34] >> 7) & 0x3);
636 else if (cache_err & 0x1)
637 cvmx_safe_printf("Icache error detected: core: %d, way : %d, va 9:3 = 0x%x\n", i, (int)(cache_err >> 10) & 0x3f, (int)(cache_err >> 3) & 0x7f);
639 cvmx_safe_printf("Cache error exception: core %d\n", i);
641 CVMX_MT_DCACHE_ERR(1);
642 CVMX_MT_CACHE_ERR(0);
645 /* The bus error exceptions can occur due to DID timeout or write buffer,
646 check by reading COP0_CACHEERRD */
647 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
649 i = cvmx_get_core_num();
650 if (registers[34] & 0x4)
652 cvmx_safe_printf("Bus error detected due to DID timeout: core: %d\n", i);
653 CVMX_MT_DCACHE_ERR(4);
655 else if (registers[34] & 0x2)
657 cvmx_safe_printf("Bus error detected due to write buffer parity: core: %d\n", i);
658 CVMX_MT_DCACHE_ERR(2);
662 if ((cause & 0x7c) != 0)
664 cvmx_interrupt_state.exception_handler(registers);
665 goto return_from_interrupt;
668 /* Convert the cause into an active mask */
669 mask = ((cause & status) >> 8) & 0xff;
672 goto return_from_interrupt; /* Spurious interrupt */
679 struct __cvmx_interrupt_handler *h = cvmx_interrupt_state.handlers + i;
680 h->handler(i, registers, h->data);
681 goto return_from_interrupt;
685 /* We should never get here */
686 __cvmx_interrupt_default_exception_handler(registers);
688 return_from_interrupt:
689 /* Restore Status register before returning from exception. */
690 asm volatile ("dmtc0 %0, $12, 0" : : "r" (status));
693 void (*cvmx_interrupt_mask_irq)(int irq_number);
694 void (*cvmx_interrupt_unmask_irq)(int irq_number);
696 #define CLEAR_OR_MASK(V,M,O) ({\
703 static void __cvmx_interrupt_ciu2_mask_unmask_irq(int irq_number, int op)
706 if (irq_number < 0 || irq_number >= CVMX_IRQ_MAX)
709 if (irq_number <= CVMX_IRQ_MIPS7) {
710 uint32_t flags, mask;
712 flags = cvmx_interrupt_disable_save();
713 asm volatile ("mfc0 %0,$12,0" : "=r" (mask));
714 CLEAR_OR_MASK(mask, 1 << (8 + irq_number), op);
715 asm volatile ("mtc0 %0,$12,0" : : "r" (mask));
716 cvmx_interrupt_restore(flags);
720 int core = cvmx_get_core_num();
722 int bit = cvmx_interrupt_state.handlers[irq_number].handler_data;
732 reg = CVMX_CIU2_EN_PPX_IP2_MBOX_W1C(core);
734 reg = CVMX_CIU2_EN_PPX_IP2_MBOX_W1S(core);
737 reg = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(core) + (0x1000 * idx);
739 reg = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(core) + (0x1000 * idx);
741 cvmx_write_csr(reg, 1ull << bit);
745 static void __cvmx_interrupt_ciu2_mask_irq(int irq_number)
747 __cvmx_interrupt_ciu2_mask_unmask_irq(irq_number, 1);
750 static void __cvmx_interrupt_ciu2_unmask_irq(int irq_number)
752 __cvmx_interrupt_ciu2_mask_unmask_irq(irq_number, 0);
755 static void __cvmx_interrupt_ciu_mask_unmask_irq(int irq_number, int op)
759 if (irq_number < 0 || irq_number >= CVMX_IRQ_MAX)
762 flags = cvmx_interrupt_disable_save();
763 if (irq_number <= CVMX_IRQ_MIPS7) {
765 asm volatile ("mfc0 %0,$12,0" : "=r" (mask));
766 CLEAR_OR_MASK(mask, 1 << (8 + irq_number), op);
767 asm volatile ("mtc0 %0,$12,0" : : "r" (mask));
769 int ciu_bit, ciu_offset;
770 int bit = cvmx_interrupt_state.handlers[irq_number].handler_data;
771 int is_timer_intr = bit >> 6;
772 int core = cvmx_get_core_num();
777 ciu_bit = bit & 0x3f;
778 ciu_offset = core * 2;
780 if (is_timer_intr == 8)
782 CLEAR_OR_MASK(cvmx_interrupt_ciu_61xx_timer_mirror, 1ull << ciu_bit, op);
783 CLEAR_OR_MASK(cvmx_interrupt_ciu_en0_mirror, 1ull << 51, op); // SUM2 bit
784 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP2(core), cvmx_interrupt_ciu_61xx_timer_mirror);
786 else if (bit & 0x40) {
789 CLEAR_OR_MASK(cvmx_interrupt_ciu_en1_mirror, 1ull << ciu_bit, op);
790 cvmx_write_csr(CVMX_CIU_INTX_EN1(ciu_offset), cvmx_interrupt_ciu_en1_mirror);
793 CLEAR_OR_MASK(cvmx_interrupt_ciu_en0_mirror, 1ull << ciu_bit, op);
794 cvmx_write_csr(CVMX_CIU_INTX_EN0(ciu_offset), cvmx_interrupt_ciu_en0_mirror);
798 cvmx_interrupt_restore(flags);
801 static void __cvmx_interrupt_ciu_mask_irq(int irq_number)
803 __cvmx_interrupt_ciu_mask_unmask_irq(irq_number, 1);
806 static void __cvmx_interrupt_ciu_unmask_irq(int irq_number)
808 __cvmx_interrupt_ciu_mask_unmask_irq(irq_number, 0);
812 * Register an interrupt handler for the specified interrupt number.
814 * @param irq_number Interrupt number to register for See
815 * cvmx-interrupt.h for enumeration and description of sources.
816 * @param func Function to call on interrupt.
817 * @param user_arg User data to pass to the interrupt handler
819 void cvmx_interrupt_register(int irq_number, cvmx_interrupt_func_t func, void *user_arg)
821 if (irq_number >= CVMX_IRQ_MAX || irq_number < 0) {
822 cvmx_warn("cvmx_interrupt_register: Illegal irq_number %d\n", irq_number);
825 cvmx_interrupt_state.handlers[irq_number].handler = func;
826 cvmx_interrupt_state.handlers[irq_number].data = user_arg;
831 static void cvmx_interrupt_ciu_initialize(cvmx_sysinfo_t *sys_info_ptr)
834 int core = cvmx_get_core_num();
836 /* Disable all CIU interrupts by default */
837 cvmx_interrupt_ciu_en0_mirror = 0;
838 cvmx_interrupt_ciu_en1_mirror = 0;
839 cvmx_interrupt_ciu_61xx_timer_mirror = 0;
840 cvmx_write_csr(CVMX_CIU_INTX_EN0(core * 2), cvmx_interrupt_ciu_en0_mirror);
841 cvmx_write_csr(CVMX_CIU_INTX_EN0((core * 2)+1), cvmx_interrupt_ciu_en0_mirror);
842 cvmx_write_csr(CVMX_CIU_INTX_EN1(core * 2), cvmx_interrupt_ciu_en1_mirror);
843 cvmx_write_csr(CVMX_CIU_INTX_EN1((core * 2)+1), cvmx_interrupt_ciu_en1_mirror);
844 if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2))
845 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP2(cvmx_get_core_num()), cvmx_interrupt_ciu_61xx_timer_mirror);
847 if (!cvmx_coremask_first_core(sys_info_ptr->core_mask)|| is_core_being_hot_plugged())
850 /* On the first core, set up the maps */
851 for (i = 0; i < 64; i++) {
852 cvmx_ciu_en0_to_irq[i] = 0xff;
853 cvmx_ciu_en1_to_irq[i] = 0xff;
854 cvmx_ciu_61xx_timer_to_irq[i] = 0xff;
858 for (i = 0; i < 16; i++)
859 __cvmx_interrupt_set_mapping(CVMX_IRQ_WORKQ0 + i, 0, i);
861 for (i = 0; i < 16; i++)
862 __cvmx_interrupt_set_mapping(CVMX_IRQ_GPIO0 + i, 0, i + 16);
865 for (i = 0; i < 2; i++)
866 __cvmx_interrupt_set_mapping(CVMX_IRQ_MBOX0 + i, 0, i + 32);
869 __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + 0, 0, 34);
870 __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + 1, 0, 35);
871 __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + 2, 1, 16);
874 for (i = 0; i < 4; i++)
875 __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_INT0 + i, 0, i + 36);
878 for (i = 0; i < 4; i++)
879 __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_MSI0 + i, 0, i + 40);
882 __cvmx_interrupt_set_mapping(CVMX_IRQ_TWSI0 + 0, 0, 45);
883 __cvmx_interrupt_set_mapping(CVMX_IRQ_TWSI0 + 1, 0, 59);
886 __cvmx_interrupt_set_mapping(CVMX_IRQ_RML, 0, 46);
887 __cvmx_interrupt_set_mapping(CVMX_IRQ_TRACE0, 0, 47);
890 for (i = 0; i < 2; i++)
891 __cvmx_interrupt_set_mapping(CVMX_IRQ_GMX_DRP0 + i, 0, i + 48);
893 __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD_DRP, 0, 50);
894 __cvmx_interrupt_set_mapping(CVMX_IRQ_KEY_ZERO, 0, 51);
897 for (i = 0; i < 4; i++)
898 __cvmx_interrupt_set_mapping(CVMX_IRQ_TIMER0 + i, 0, i + 52);
901 for(i = 0; i < 6; i++)
902 __cvmx_interrupt_set_mapping(CVMX_IRQ_TIMER4 + i, 8, i + 4);
904 __cvmx_interrupt_set_mapping(CVMX_IRQ_USB0 + 0, 0, 56);
905 __cvmx_interrupt_set_mapping(CVMX_IRQ_USB0 + 1, 1, 17);
906 __cvmx_interrupt_set_mapping(CVMX_IRQ_PCM, 0, 57);
907 __cvmx_interrupt_set_mapping(CVMX_IRQ_MPI, 0, 58);
908 __cvmx_interrupt_set_mapping(CVMX_IRQ_POWIQ, 0, 60);
909 __cvmx_interrupt_set_mapping(CVMX_IRQ_IPDPPTHR, 0, 61);
910 __cvmx_interrupt_set_mapping(CVMX_IRQ_MII0 + 0, 0, 62);
911 __cvmx_interrupt_set_mapping(CVMX_IRQ_MII0 + 1, 1, 18);
912 __cvmx_interrupt_set_mapping(CVMX_IRQ_BOOTDMA, 0, 63);
915 for (i = 0; i < 16; i++)
916 __cvmx_interrupt_set_mapping(CVMX_IRQ_WDOG0 + i, 1, i);
918 __cvmx_interrupt_set_mapping(CVMX_IRQ_NAND, 1, 19);
919 __cvmx_interrupt_set_mapping(CVMX_IRQ_MIO, 1, 20);
920 __cvmx_interrupt_set_mapping(CVMX_IRQ_IOB, 1, 21);
921 __cvmx_interrupt_set_mapping(CVMX_IRQ_FPA, 1, 22);
922 __cvmx_interrupt_set_mapping(CVMX_IRQ_POW, 1, 23);
923 __cvmx_interrupt_set_mapping(CVMX_IRQ_L2C, 1, 24);
924 __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD, 1, 25);
925 __cvmx_interrupt_set_mapping(CVMX_IRQ_PIP, 1, 26);
926 __cvmx_interrupt_set_mapping(CVMX_IRQ_PKO, 1, 27);
927 __cvmx_interrupt_set_mapping(CVMX_IRQ_ZIP, 1, 28);
928 __cvmx_interrupt_set_mapping(CVMX_IRQ_TIM, 1, 29);
929 __cvmx_interrupt_set_mapping(CVMX_IRQ_RAD, 1, 30);
930 __cvmx_interrupt_set_mapping(CVMX_IRQ_KEY, 1, 31);
931 __cvmx_interrupt_set_mapping(CVMX_IRQ_DFA, 1, 32);
932 __cvmx_interrupt_set_mapping(CVMX_IRQ_USBCTL, 1, 33);
933 __cvmx_interrupt_set_mapping(CVMX_IRQ_SLI, 1, 34);
934 __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI, 1, 35);
935 __cvmx_interrupt_set_mapping(CVMX_IRQ_AGX0, 1, 36);
936 __cvmx_interrupt_set_mapping(CVMX_IRQ_AGX0 + 1, 1, 37);
937 __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI_DMA, 1, 40);
938 __cvmx_interrupt_set_mapping(CVMX_IRQ_AGL, 1, 46);
939 __cvmx_interrupt_set_mapping(CVMX_IRQ_PTP, 1, 47);
940 __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM0, 1, 48);
941 __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM1, 1, 49);
942 __cvmx_interrupt_set_mapping(CVMX_IRQ_SRIO0, 1, 50);
943 __cvmx_interrupt_set_mapping(CVMX_IRQ_SRIO1, 1, 51);
944 __cvmx_interrupt_set_mapping(CVMX_IRQ_LMC0, 1, 52);
945 __cvmx_interrupt_set_mapping(CVMX_IRQ_DFM, 1, 56);
946 __cvmx_interrupt_set_mapping(CVMX_IRQ_SRIO2, 1, 60);
947 __cvmx_interrupt_set_mapping(CVMX_IRQ_RST, 1, 63);
950 static void cvmx_interrupt_ciu2_initialize(cvmx_sysinfo_t *sys_info_ptr)
954 /* Disable all CIU2 interrupts by default */
956 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_WRKQ(cvmx_get_core_num()), 0);
957 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_WRKQ(cvmx_get_core_num()), 0);
958 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_WRKQ(cvmx_get_core_num()), 0);
959 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_WDOG(cvmx_get_core_num()), 0);
960 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_WDOG(cvmx_get_core_num()), 0);
961 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_WDOG(cvmx_get_core_num()), 0);
962 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_RML(cvmx_get_core_num()), 0);
963 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_RML(cvmx_get_core_num()), 0);
964 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_RML(cvmx_get_core_num()), 0);
965 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_MIO(cvmx_get_core_num()), 0);
966 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_MIO(cvmx_get_core_num()), 0);
967 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_MIO(cvmx_get_core_num()), 0);
968 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_IO(cvmx_get_core_num()), 0);
969 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_IO(cvmx_get_core_num()), 0);
970 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_IO(cvmx_get_core_num()), 0);
971 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_MEM(cvmx_get_core_num()), 0);
972 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_MEM(cvmx_get_core_num()), 0);
973 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_MEM(cvmx_get_core_num()), 0);
974 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_PKT(cvmx_get_core_num()), 0);
975 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_PKT(cvmx_get_core_num()), 0);
976 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_PKT(cvmx_get_core_num()), 0);
977 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_GPIO(cvmx_get_core_num()), 0);
978 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_GPIO(cvmx_get_core_num()), 0);
979 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_GPIO(cvmx_get_core_num()), 0);
980 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP2_MBOX(cvmx_get_core_num()), 0);
981 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP3_MBOX(cvmx_get_core_num()), 0);
982 cvmx_write_csr(CVMX_CIU2_EN_PPX_IP4_MBOX(cvmx_get_core_num()), 0);
984 if (!cvmx_coremask_first_core(sys_info_ptr->core_mask) || is_core_being_hot_plugged())
987 /* On the first core, set up the maps */
988 for (i = 0; i < 64; i++) {
989 cvmx_ciu2_wrkq_to_irq[i] = 0xff;
990 cvmx_ciu2_wdog_to_irq[i] = 0xff;
991 cvmx_ciu2_rml_to_irq[i] = 0xff;
992 cvmx_ciu2_mio_to_irq[i] = 0xff;
993 cvmx_ciu2_io_to_irq[i] = 0xff;
994 cvmx_ciu2_mem_to_irq[i] = 0xff;
995 cvmx_ciu2_eth_to_irq[i] = 0xff;
996 cvmx_ciu2_gpio_to_irq[i] = 0xff;
997 cvmx_ciu2_mbox_to_irq[i] = 0xff;
1001 for (i = 0; i < 64; i++)
1002 __cvmx_interrupt_set_mapping(CVMX_IRQ_WORKQ0 + i, 0, i);
1005 for (i = 0; i < 16; i++)
1006 __cvmx_interrupt_set_mapping(CVMX_IRQ_GPIO0 + i, 7, i);
1009 for (i = 0; i < 4; i++)
1010 __cvmx_interrupt_set_mapping(CVMX_IRQ_MBOX0 + i, 60, i);
1013 for (i = 0; i < 2; i++)
1014 __cvmx_interrupt_set_mapping(CVMX_IRQ_UART0 + i, 3, 36 + i);
1017 for (i = 0; i < 4; i++)
1018 __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_INT0 + i, 4, 16 + i);
1021 for (i = 0; i < 4; i++)
1022 __cvmx_interrupt_set_mapping(CVMX_IRQ_PCI_MSI0 + i, 4, 8 + i);
1025 for (i = 0; i < 2; i++)
1026 __cvmx_interrupt_set_mapping(CVMX_IRQ_TWSI0 + i, 3, 32 + i);
1029 for (i = 0; i < 4; i++)
1030 __cvmx_interrupt_set_mapping(CVMX_IRQ_TRACE0 + i, 2, 52 + i);
1033 for (i = 0; i < 5; i++)
1034 __cvmx_interrupt_set_mapping(CVMX_IRQ_GMX_DRP0 + i, 6, 8 + i);
1036 __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD_DRP, 3, 2);
1039 for (i = 0; i < 4; i++)
1040 __cvmx_interrupt_set_mapping(CVMX_IRQ_TIMER0 + i, 3, 8 + i);
1042 __cvmx_interrupt_set_mapping(CVMX_IRQ_USB0, 3, 44);
1043 __cvmx_interrupt_set_mapping(CVMX_IRQ_IPDPPTHR, 3, 0);
1044 __cvmx_interrupt_set_mapping(CVMX_IRQ_MII0, 6, 40);
1045 __cvmx_interrupt_set_mapping(CVMX_IRQ_BOOTDMA, 3, 18);
1048 for (i = 0; i < 32; i++)
1049 __cvmx_interrupt_set_mapping(CVMX_IRQ_WDOG0 + i, 1, i);
1051 __cvmx_interrupt_set_mapping(CVMX_IRQ_NAND, 3, 16);
1052 __cvmx_interrupt_set_mapping(CVMX_IRQ_MIO, 3, 17);
1053 __cvmx_interrupt_set_mapping(CVMX_IRQ_IOB, 2, 0);
1054 __cvmx_interrupt_set_mapping(CVMX_IRQ_FPA, 2, 4);
1055 __cvmx_interrupt_set_mapping(CVMX_IRQ_POW, 2, 16);
1056 __cvmx_interrupt_set_mapping(CVMX_IRQ_L2C, 2, 48);
1057 __cvmx_interrupt_set_mapping(CVMX_IRQ_IPD, 2, 5);
1058 __cvmx_interrupt_set_mapping(CVMX_IRQ_PIP, 2, 6);
1059 __cvmx_interrupt_set_mapping(CVMX_IRQ_PKO, 2, 7);
1060 __cvmx_interrupt_set_mapping(CVMX_IRQ_ZIP, 2, 24);
1061 __cvmx_interrupt_set_mapping(CVMX_IRQ_TIM, 2, 28);
1062 __cvmx_interrupt_set_mapping(CVMX_IRQ_RAD, 2, 29);
1063 __cvmx_interrupt_set_mapping(CVMX_IRQ_KEY, 2, 30);
1064 __cvmx_interrupt_set_mapping(CVMX_IRQ_DFA, 2, 40);
1065 __cvmx_interrupt_set_mapping(CVMX_IRQ_USBCTL, 3, 40);
1066 __cvmx_interrupt_set_mapping(CVMX_IRQ_SLI, 2, 32);
1067 __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI, 2, 33);
1068 __cvmx_interrupt_set_mapping(CVMX_IRQ_DPI_DMA, 2, 36);
1071 for (i = 0; i < 5; i++)
1072 __cvmx_interrupt_set_mapping(CVMX_IRQ_AGX0 + i, 6, i);
1074 __cvmx_interrupt_set_mapping(CVMX_IRQ_AGL, 6, 32);
1075 __cvmx_interrupt_set_mapping(CVMX_IRQ_PTP, 3, 48);
1076 __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM0, 4, 32);
1077 __cvmx_interrupt_set_mapping(CVMX_IRQ_PEM1, 4, 32);
1080 for (i = 0; i < 4; i++)
1081 __cvmx_interrupt_set_mapping(CVMX_IRQ_LMC0 + i, 5, i);
1083 __cvmx_interrupt_set_mapping(CVMX_IRQ_RST, 3, 63);
1084 __cvmx_interrupt_set_mapping(CVMX_IRQ_ILK, 6, 48);
1088 * Initialize the interrupt routine and copy the low level
1089 * stub into the correct interrupt vector. This is called
1090 * automatically during application startup.
1092 void cvmx_interrupt_initialize(void)
1094 void *low_level_loc;
1095 cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
1098 if (cvmx_coremask_first_core(sys_info_ptr->core_mask) && !is_core_being_hot_plugged()) {
1099 #ifndef CVMX_ENABLE_CSR_ADDRESS_CHECKING
1100 /* We assume this relationship between the registers. */
1101 CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x1000 == CVMX_CIU2_SRC_PPX_IP2_WDOG(0));
1102 CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x2000 == CVMX_CIU2_SRC_PPX_IP2_RML(0));
1103 CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x3000 == CVMX_CIU2_SRC_PPX_IP2_MIO(0));
1104 CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x4000 == CVMX_CIU2_SRC_PPX_IP2_IO(0));
1105 CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x5000 == CVMX_CIU2_SRC_PPX_IP2_MEM(0));
1106 CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x6000 == CVMX_CIU2_SRC_PPX_IP2_PKT(0));
1107 CVMX_BUILD_ASSERT(CVMX_CIU2_SRC_PPX_IP2_WRKQ(0) + 0x7000 == CVMX_CIU2_SRC_PPX_IP2_GPIO(0));
1108 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x1000 == CVMX_CIU2_EN_PPX_IP2_WDOG_W1C(0));
1109 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x2000 == CVMX_CIU2_EN_PPX_IP2_RML_W1C(0));
1110 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x3000 == CVMX_CIU2_EN_PPX_IP2_MIO_W1C(0));
1111 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x4000 == CVMX_CIU2_EN_PPX_IP2_IO_W1C(0));
1112 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x5000 == CVMX_CIU2_EN_PPX_IP2_MEM_W1C(0));
1113 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x6000 == CVMX_CIU2_EN_PPX_IP2_PKT_W1C(0));
1114 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(0) + 0x7000 == CVMX_CIU2_EN_PPX_IP2_GPIO_W1C(0));
1115 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x1000 == CVMX_CIU2_EN_PPX_IP2_WDOG_W1S(0));
1116 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x2000 == CVMX_CIU2_EN_PPX_IP2_RML_W1S(0));
1117 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x3000 == CVMX_CIU2_EN_PPX_IP2_MIO_W1S(0));
1118 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x4000 == CVMX_CIU2_EN_PPX_IP2_IO_W1S(0));
1119 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x5000 == CVMX_CIU2_EN_PPX_IP2_MEM_W1S(0));
1120 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x6000 == CVMX_CIU2_EN_PPX_IP2_PKT_W1S(0));
1121 CVMX_BUILD_ASSERT(CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(0) + 0x7000 == CVMX_CIU2_EN_PPX_IP2_GPIO_W1S(0));
1122 #endif /* !CVMX_ENABLE_CSR_ADDRESS_CHECKING */
1124 for (i = 0; i < CVMX_IRQ_MAX; i++) {
1125 cvmx_interrupt_state.handlers[i].handler = __cvmx_interrupt_default;
1126 cvmx_interrupt_state.handlers[i].data = NULL;
1127 cvmx_interrupt_state.handlers[i].handler_data = -1;
1131 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1133 cvmx_interrupt_mask_irq = __cvmx_interrupt_ciu2_mask_irq;
1134 cvmx_interrupt_unmask_irq = __cvmx_interrupt_ciu2_unmask_irq;
1135 cvmx_interrupt_ciu2_initialize(sys_info_ptr);
1136 /* Add an interrupt handlers for chained CIU interrupt */
1137 cvmx_interrupt_register(CVMX_IRQ_MIPS2, __cvmx_interrupt_ciu2, NULL);
1139 else if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2))
1141 cvmx_interrupt_mask_irq = __cvmx_interrupt_ciu_mask_irq;
1142 cvmx_interrupt_unmask_irq = __cvmx_interrupt_ciu_unmask_irq;
1143 cvmx_interrupt_ciu_initialize(sys_info_ptr);
1145 /* Add an interrupt handlers for chained CIU interrupts */
1146 cvmx_interrupt_register(CVMX_IRQ_MIPS2, __cvmx_interrupt_ciu, NULL);
1147 cvmx_interrupt_register(CVMX_IRQ_MIPS3, __cvmx_interrupt_ciu_cn61xx, NULL);
1151 cvmx_interrupt_mask_irq = __cvmx_interrupt_ciu_mask_irq;
1152 cvmx_interrupt_unmask_irq = __cvmx_interrupt_ciu_unmask_irq;
1153 cvmx_interrupt_ciu_initialize(sys_info_ptr);
1155 /* Add an interrupt handlers for chained CIU interrupts */
1156 cvmx_interrupt_register(CVMX_IRQ_MIPS2, __cvmx_interrupt_ciu, NULL);
1157 cvmx_interrupt_register(CVMX_IRQ_MIPS3, __cvmx_interrupt_ciu, NULL);
1160 /* Move performance counter interrupts to IRQ 6*/
1161 cvmx_update_perfcnt_irq();
1163 /* Add an interrupt handler for Perf counter interrupts */
1164 cvmx_interrupt_register(CVMX_IRQ_MIPS6, __cvmx_interrupt_perf, NULL);
1166 if (cvmx_coremask_first_core(sys_info_ptr->core_mask) && !is_core_being_hot_plugged())
1168 cvmx_interrupt_state.exception_handler = __cvmx_interrupt_default_exception_handler;
1170 low_level_loc = CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,sys_info_ptr->exception_base_addr));
1171 memcpy(low_level_loc + 0x80, (void*)cvmx_interrupt_stage1, 0x80);
1172 memcpy(low_level_loc + 0x100, (void*)cvmx_interrupt_cache_error, 0x80);
1173 memcpy(low_level_loc + 0x180, (void*)cvmx_interrupt_stage1, 0x80);
1174 memcpy(low_level_loc + 0x200, (void*)cvmx_interrupt_stage1, 0x80);
1176 /* Make sure the locations used to count Icache and Dcache exceptions
1177 starts out as zero */
1178 cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 8), 0);
1179 cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 16), 0);
1180 cvmx_write64_uint64(CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, 24), 0);
1183 /* Add an interrupt handler for ECC failures */
1184 if (cvmx_error_initialize(0 /* || CVMX_ERROR_FLAGS_ECC_SINGLE_BIT */))
1185 cvmx_warn("cvmx_error_initialize() failed\n");
1187 /* Enable PIP/IPD, POW, PKO, FPA, NAND, KEY, RAD, L2C, LMC, GMX, AGL,
1188 DFM, DFA, error handling interrupts. */
1189 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1193 for (i = 0; i < 5; i++)
1195 cvmx_interrupt_register(CVMX_IRQ_AGX0+i, __cvmx_interrupt_ecc, NULL);
1196 cvmx_interrupt_unmask_irq(CVMX_IRQ_AGX0+i);
1198 cvmx_interrupt_register(CVMX_IRQ_NAND, __cvmx_interrupt_ecc, NULL);
1199 cvmx_interrupt_unmask_irq(CVMX_IRQ_NAND);
1200 cvmx_interrupt_register(CVMX_IRQ_MIO, __cvmx_interrupt_ecc, NULL);
1201 cvmx_interrupt_unmask_irq(CVMX_IRQ_MIO);
1202 cvmx_interrupt_register(CVMX_IRQ_FPA, __cvmx_interrupt_ecc, NULL);
1203 cvmx_interrupt_unmask_irq(CVMX_IRQ_FPA);
1204 cvmx_interrupt_register(CVMX_IRQ_IPD, __cvmx_interrupt_ecc, NULL);
1205 cvmx_interrupt_unmask_irq(CVMX_IRQ_IPD);
1206 cvmx_interrupt_register(CVMX_IRQ_PIP, __cvmx_interrupt_ecc, NULL);
1207 cvmx_interrupt_unmask_irq(CVMX_IRQ_PIP);
1208 cvmx_interrupt_register(CVMX_IRQ_POW, __cvmx_interrupt_ecc, NULL);
1209 cvmx_interrupt_unmask_irq(CVMX_IRQ_POW);
1210 cvmx_interrupt_register(CVMX_IRQ_L2C, __cvmx_interrupt_ecc, NULL);
1211 cvmx_interrupt_unmask_irq(CVMX_IRQ_L2C);
1212 cvmx_interrupt_register(CVMX_IRQ_PKO, __cvmx_interrupt_ecc, NULL);
1213 cvmx_interrupt_unmask_irq(CVMX_IRQ_PKO);
1214 cvmx_interrupt_register(CVMX_IRQ_ZIP, __cvmx_interrupt_ecc, NULL);
1215 cvmx_interrupt_unmask_irq(CVMX_IRQ_ZIP);
1216 cvmx_interrupt_register(CVMX_IRQ_RAD, __cvmx_interrupt_ecc, NULL);
1217 cvmx_interrupt_unmask_irq(CVMX_IRQ_RAD);
1218 cvmx_interrupt_register(CVMX_IRQ_KEY, __cvmx_interrupt_ecc, NULL);
1219 cvmx_interrupt_unmask_irq(CVMX_IRQ_KEY);
1220 /* Before enabling SLI interrupt clear any RML_TO interrupt */
1221 if (cvmx_read_csr(CVMX_PEXP_SLI_INT_SUM) & 0x1)
1223 cvmx_safe_printf("clearing pending SLI_INT_SUM[RML_TO] interrupt (ignore)\n");
1224 cvmx_write_csr(CVMX_PEXP_SLI_INT_SUM, 1);
1226 cvmx_interrupt_register(CVMX_IRQ_SLI, __cvmx_interrupt_ecc, NULL);
1227 cvmx_interrupt_unmask_irq(CVMX_IRQ_SLI);
1228 cvmx_interrupt_register(CVMX_IRQ_DPI, __cvmx_interrupt_ecc, NULL);
1229 cvmx_interrupt_unmask_irq(CVMX_IRQ_DPI);
1230 cvmx_interrupt_register(CVMX_IRQ_DFA, __cvmx_interrupt_ecc, NULL);
1231 cvmx_interrupt_unmask_irq(CVMX_IRQ_DFA);
1232 cvmx_interrupt_register(CVMX_IRQ_AGL, __cvmx_interrupt_ecc, NULL);
1233 cvmx_interrupt_unmask_irq(CVMX_IRQ_AGL);
1234 for (i = 0; i < 4; i++)
1236 cvmx_interrupt_register(CVMX_IRQ_LMC0+i, __cvmx_interrupt_ecc, NULL);
1237 cvmx_interrupt_unmask_irq(CVMX_IRQ_LMC0+i);
1239 cvmx_interrupt_register(CVMX_IRQ_DFM, __cvmx_interrupt_ecc, NULL);
1240 cvmx_interrupt_unmask_irq(CVMX_IRQ_DFM);
1241 cvmx_interrupt_register(CVMX_IRQ_RST, __cvmx_interrupt_ecc, NULL);
1242 cvmx_interrupt_unmask_irq(CVMX_IRQ_RST);
1243 cvmx_interrupt_register(CVMX_IRQ_ILK, __cvmx_interrupt_ecc, NULL);
1244 cvmx_interrupt_unmask_irq(CVMX_IRQ_ILK);
1248 cvmx_interrupt_register(CVMX_IRQ_RML, __cvmx_interrupt_ecc, NULL);
1249 cvmx_interrupt_unmask_irq(CVMX_IRQ_RML);
1252 cvmx_atomic_set32(&cvmx_interrupt_initialize_flag, 1);
1255 while (!cvmx_atomic_get32(&cvmx_interrupt_initialize_flag))
1256 ; /* Wait for first core to finish above. */
1258 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
1259 cvmx_interrupt_unmask_irq(CVMX_IRQ_MIPS2);
1261 cvmx_interrupt_unmask_irq(CVMX_IRQ_MIPS2);
1262 cvmx_interrupt_unmask_irq(CVMX_IRQ_MIPS3);
1265 CVMX_ICACHE_INVALIDATE;
1267 /* Enable interrupts for each core (bit0 of COP0 Status) */
1268 cvmx_interrupt_restore(1);
1274 * Set the exception handler for all non interrupt sources.
1276 * @param handler New exception handler
1277 * @return Old exception handler
1279 cvmx_interrupt_exception_t cvmx_interrupt_set_exception(cvmx_interrupt_exception_t handler)
1281 cvmx_interrupt_exception_t result = cvmx_interrupt_state.exception_handler;
1282 cvmx_interrupt_state.exception_handler = handler;
1286 #endif /* !__U_BOOT__ */