2 * Copyright (c) 2013 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
9 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
10 * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
11 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
12 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
13 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
14 * PERFORMANCE OF THIS SOFTWARE.
20 #include "ah_internal.h"
22 #include "ar9300/ar9300.h"
23 #include "ar9300/ar9300reg.h"
24 #include "ar9300/ar9300phy.h"
27 * Checks to see if an interrupt is pending on our NIC
29 * Returns: TRUE if an interrupt is pending
33 ar9300_is_interrupt_pending(struct ath_hal *ah)
35 u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
39 * Some platforms trigger our ISR before applying power to
40 * the card, so make sure.
42 host_isr = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE));
43 if ((host_isr & AR_INTR_ASYNC_USED) && (host_isr != AR_INTR_SPURIOUS)) {
47 host_isr = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE));
48 if (AR_SREV_POSEIDON(ah)) {
49 sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
51 else if (AR_SREV_WASP(ah)) {
52 sync_en_def = AR9340_INTR_SYNC_DEFAULT;
55 if ((host_isr & (sync_en_def | AR_INTR_SYNC_MASK_GPIO)) &&
56 (host_isr != AR_INTR_SPURIOUS)) {
64 * Reads the Interrupt Status Register value from the NIC, thus deasserting
65 * the interrupt line, and returns both the masked and unmasked mapped ISR
66 * values. The value returned is mapped to abstract the hw-specific bit
67 * locations in the Interrupt Status Register.
69 * Returns: A hardware-abstracted bitmap of all non-masked-out
70 * interrupts pending, as well as an unmasked value
72 #define MAP_ISR_S2_HAL_CST 6 /* Carrier sense timeout */
73 #define MAP_ISR_S2_HAL_GTT 6 /* Global transmit timeout */
74 #define MAP_ISR_S2_HAL_TIM 3 /* TIM */
75 #define MAP_ISR_S2_HAL_CABEND 0 /* CABEND */
76 #define MAP_ISR_S2_HAL_DTIMSYNC 7 /* DTIMSYNC */
77 #define MAP_ISR_S2_HAL_DTIM 7 /* DTIM */
78 #define MAP_ISR_S2_HAL_TSFOOR 4 /* Rx TSF out of range */
79 #define MAP_ISR_S2_HAL_BBPANIC 6 /* Panic watchdog IRQ from BB */
81 ar9300_get_pending_interrupts(
88 struct ath_hal_9300 *ahp = AH9300(ah);
89 HAL_BOOL ret_val = AH_TRUE;
92 u_int32_t sync_cause = 0;
93 u_int32_t async_cause;
94 u_int32_t msi_pend_addr_mask = 0;
95 u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
96 HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
101 if (HAL_INT_MSI == type) {
102 if (msi == HAL_MSIVEC_RXHP) {
103 OS_REG_WRITE(ah, AR_ISR, AR_ISR_HP_RXOK);
104 *masked = HAL_INT_RXHP;
106 } else if (msi == HAL_MSIVEC_RXLP) {
107 OS_REG_WRITE(ah, AR_ISR,
108 (AR_ISR_LP_RXOK | AR_ISR_RXMINTR | AR_ISR_RXINTM));
109 *masked = HAL_INT_RXLP;
111 } else if (msi == HAL_MSIVEC_TX) {
112 OS_REG_WRITE(ah, AR_ISR, AR_ISR_TXOK);
113 *masked = HAL_INT_TX;
115 } else if (msi == HAL_MSIVEC_MISC) {
117 * For the misc MSI event fall through and determine the cause.
123 /* Make sure mac interrupt is pending in async interrupt cause register */
124 async_cause = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE));
125 if (async_cause & AR_INTR_ASYNC_USED) {
127 * RTC may not be on since it runs on a slow 32khz clock
128 * so check its status to be sure
131 (OS_REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) ==
134 isr = OS_REG_READ(ah, AR_ISR);
138 if (AR_SREV_POSEIDON(ah)) {
139 sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
141 else if (AR_SREV_WASP(ah)) {
142 sync_en_def = AR9340_INTR_SYNC_DEFAULT;
146 OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE)) &
147 (sync_en_def | AR_INTR_SYNC_MASK_GPIO);
149 if (!isr && !sync_cause && !async_cause) {
154 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
155 "%s: isr=0x%x, sync_cause=0x%x, async_cause=0x%x\n",
162 if (isr & AR_ISR_BCNMISC) {
164 isr2 = OS_REG_READ(ah, AR_ISR_S2);
166 /* Translate ISR bits to HAL values */
167 mask2 |= ((isr2 & AR_ISR_S2_TIM) >> MAP_ISR_S2_HAL_TIM);
168 mask2 |= ((isr2 & AR_ISR_S2_DTIM) >> MAP_ISR_S2_HAL_DTIM);
169 mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >> MAP_ISR_S2_HAL_DTIMSYNC);
170 mask2 |= ((isr2 & AR_ISR_S2_CABEND) >> MAP_ISR_S2_HAL_CABEND);
171 mask2 |= ((isr2 & AR_ISR_S2_GTT) << MAP_ISR_S2_HAL_GTT);
172 mask2 |= ((isr2 & AR_ISR_S2_CST) << MAP_ISR_S2_HAL_CST);
173 mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >> MAP_ISR_S2_HAL_TSFOOR);
174 mask2 |= ((isr2 & AR_ISR_S2_BBPANIC) >> MAP_ISR_S2_HAL_BBPANIC);
176 if (!p_cap->halIsrRacSupport) {
178 * EV61133 (missing interrupts due to ISR_RAC):
179 * If not using ISR_RAC, clear interrupts by writing to ISR_S2.
180 * This avoids a race condition where a new BCNMISC interrupt
181 * could come in between reading the ISR and clearing the
182 * interrupt via the primary ISR. We therefore clear the
183 * interrupt via the secondary, which avoids this race.
185 OS_REG_WRITE(ah, AR_ISR_S2, isr2);
186 isr &= ~AR_ISR_BCNMISC;
190 /* Use AR_ISR_RAC only if chip supports it.
191 * See EV61133 (missing interrupts due to ISR_RAC)
193 if (p_cap->halIsrRacSupport) {
194 isr = OS_REG_READ(ah, AR_ISR_RAC);
196 if (isr == 0xffffffff) {
202 *masked = isr & HAL_INT_COMMON;
205 * When interrupt mitigation is switched on, we fake a normal RX or TX
206 * interrupt when we received a mitigated interrupt. This way, the upper
207 * layer do not need to know about feature.
209 if (ahp->ah_intr_mitigation_rx) {
210 /* Only Rx interrupt mitigation. No Tx intr. mitigation. */
211 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) {
212 *masked |= HAL_INT_RXLP;
215 if (ahp->ah_intr_mitigation_tx) {
216 if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM)) {
217 *masked |= HAL_INT_TX;
221 if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR)) {
222 *masked |= HAL_INT_RXLP;
224 if (isr & AR_ISR_HP_RXOK) {
225 *masked |= HAL_INT_RXHP;
227 if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
228 *masked |= HAL_INT_TX;
230 if (!p_cap->halIsrRacSupport) {
233 * EV61133 (missing interrupts due to ISR_RAC):
234 * If not using ISR_RAC, clear interrupts by writing to
236 * This avoids a race condition where a new interrupt
237 * could come in between reading the ISR and clearing the
238 * interrupt via the primary ISR. We therefore clear the
239 * interrupt via the secondary, which avoids this race.
241 s0 = OS_REG_READ(ah, AR_ISR_S0);
242 OS_REG_WRITE(ah, AR_ISR_S0, s0);
243 s1 = OS_REG_READ(ah, AR_ISR_S1);
244 OS_REG_WRITE(ah, AR_ISR_S1, s1);
246 isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL);
251 * Do not treat receive overflows as fatal for owl.
253 if (isr & AR_ISR_RXORN) {
254 #if __PKT_SERIOUS_ERRORS__
255 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
256 "%s: receive FIFO overrun interrupt\n", __func__);
261 /* XXX Verify if this is fixed for Osprey */
262 if (!p_cap->halAutoSleepSupport) {
263 u_int32_t isr5 = OS_REG_READ(ah, AR_ISR_S5_S);
264 if (isr5 & AR_ISR_S5_TIM_TIMER) {
265 *masked |= HAL_INT_TIM_TIMER;
269 if (isr & AR_ISR_GENTMR) {
272 if (p_cap->halIsrRacSupport) {
273 /* Use secondary shadow registers if using ISR_RAC */
274 s5 = OS_REG_READ(ah, AR_ISR_S5_S);
276 s5 = OS_REG_READ(ah, AR_ISR_S5);
278 if (isr & AR_ISR_GENTMR) {
280 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
281 "%s: GENTIMER, ISR_RAC=0x%x ISR_S2_S=0x%x\n", __func__,
283 ahp->ah_intr_gen_timer_trigger =
284 MS(s5, AR_ISR_S5_GENTIMER_TRIG);
285 ahp->ah_intr_gen_timer_thresh =
286 MS(s5, AR_ISR_S5_GENTIMER_THRESH);
287 if (ahp->ah_intr_gen_timer_trigger) {
288 *masked |= HAL_INT_GENTIMER;
291 if (!p_cap->halIsrRacSupport) {
293 * EV61133 (missing interrupts due to ISR_RAC):
294 * If not using ISR_RAC, clear interrupts by writing to ISR_S5.
295 * This avoids a race condition where a new interrupt
296 * could come in between reading the ISR and clearing the
297 * interrupt via the primary ISR. We therefore clear the
298 * interrupt via the secondary, which avoids this race.
300 OS_REG_WRITE(ah, AR_ISR_S5, s5);
301 isr &= ~AR_ISR_GENTMR;
307 if (!p_cap->halIsrRacSupport) {
309 * EV61133 (missing interrupts due to ISR_RAC):
310 * If not using ISR_RAC, clear the interrupts we've read by
311 * writing back ones in these locations to the primary ISR
312 * (except for interrupts that have a secondary isr register -
315 OS_REG_WRITE(ah, AR_ISR, isr);
317 /* Flush prior write */
318 (void) OS_REG_READ(ah, AR_ISR);
321 #ifdef AH_SUPPORT_AR9300
322 if (*masked & HAL_INT_BBPANIC) {
323 ar9300_handle_bb_panic(ah);
331 AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE_CLR), async_cause);
332 /* Flush prior write */
333 (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE_CLR));
335 #ifdef ATH_GPIO_USE_ASYNC_CAUSE
336 if (async_cause & AR_INTR_ASYNC_CAUSE_GPIO) {
337 ahp->ah_gpio_cause = (async_cause & AR_INTR_ASYNC_CAUSE_GPIO) >>
338 AR_INTR_ASYNC_ENABLE_GPIO_S;
339 *masked |= HAL_INT_GPIO;
345 if ((async_cause & AR_INTR_ASYNC_CAUSE_MCI) &&
346 p_cap->halMciSupport)
348 u_int32_t int_raw, int_rx_msg;
350 int_rx_msg = OS_REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
351 int_raw = OS_REG_READ(ah, AR_MCI_INTERRUPT_RAW);
353 if ((int_raw == 0xdeadbeef) || (int_rx_msg == 0xdeadbeef))
355 HALDEBUG(ah, HAL_DEBUG_BT_COEX,
356 "(MCI) Get 0xdeadbeef during MCI int processing"
357 "new int_raw=0x%08x, new rx_msg_raw=0x%08x, "
358 "int_raw=0x%08x, rx_msg_raw=0x%08x\n",
359 int_raw, int_rx_msg, ahp->ah_mci_int_raw,
360 ahp->ah_mci_int_rx_msg);
363 if (ahp->ah_mci_int_raw || ahp->ah_mci_int_rx_msg) {
364 ahp->ah_mci_int_rx_msg |= int_rx_msg;
365 ahp->ah_mci_int_raw |= int_raw;
368 ahp->ah_mci_int_rx_msg = int_rx_msg;
369 ahp->ah_mci_int_raw = int_raw;
372 *masked |= HAL_INT_MCI;
373 ahp->ah_mci_rx_status = OS_REG_READ(ah, AR_MCI_RX_STATUS);
374 if (int_rx_msg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
375 ahp->ah_mci_cont_status =
376 OS_REG_READ(ah, AR_MCI_CONT_STATUS);
378 OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
380 OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, int_raw);
382 HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s:AR_INTR_SYNC_MCI\n", __func__);
389 int host1_fatal, host1_perr, radm_cpl_timeout, local_timeout;
391 host1_fatal = AR_SREV_WASP(ah) ?
392 AR9340_INTR_SYNC_HOST1_FATAL : AR9300_INTR_SYNC_HOST1_FATAL;
393 host1_perr = AR_SREV_WASP(ah) ?
394 AR9340_INTR_SYNC_HOST1_PERR : AR9300_INTR_SYNC_HOST1_PERR;
395 radm_cpl_timeout = AR_SREV_WASP(ah) ?
396 0x0 : AR9300_INTR_SYNC_RADM_CPL_TIMEOUT;
397 local_timeout = AR_SREV_WASP(ah) ?
398 AR9340_INTR_SYNC_LOCAL_TIMEOUT : AR9300_INTR_SYNC_LOCAL_TIMEOUT;
400 if (sync_cause & host1_fatal) {
401 #if __PKT_SERIOUS_ERRORS__
402 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
403 "%s: received PCI FATAL interrupt\n", __func__);
405 *masked |= HAL_INT_FATAL; /* Set FATAL INT flag here;*/
407 if (sync_cause & host1_perr) {
408 #if __PKT_SERIOUS_ERRORS__
409 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
410 "%s: received PCI PERR interrupt\n", __func__);
414 if (sync_cause & radm_cpl_timeout) {
415 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
416 "%s: AR_INTR_SYNC_RADM_CPL_TIMEOUT\n",
419 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_RC), AR_RC_HOSTIF);
420 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_RC), 0);
421 *masked |= HAL_INT_FATAL;
423 if (sync_cause & local_timeout) {
424 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
425 "%s: AR_INTR_SYNC_LOCAL_TIMEOUT\n",
429 #ifndef ATH_GPIO_USE_ASYNC_CAUSE
430 if (sync_cause & AR_INTR_SYNC_MASK_GPIO) {
431 ahp->ah_gpio_cause = (sync_cause & AR_INTR_SYNC_MASK_GPIO) >>
432 AR_INTR_SYNC_ENABLE_GPIO_S;
433 *masked |= HAL_INT_GPIO;
434 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
435 "%s: AR_INTR_SYNC_GPIO\n", __func__);
439 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE_CLR), sync_cause);
440 /* Flush prior write */
441 (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE_CLR));
445 if (HAL_INT_MSI == type) {
447 * WAR for Bug EV#75887
448 * In normal case, SW read HOST_INTF_PCIE_MSI (0x40A4) and write
449 * into ah_msi_reg. Then use value of ah_msi_reg to set bit#25
450 * when want to enable HW write the cfg_msi_pending.
451 * Sometimes, driver get MSI interrupt before read 0x40a4 and
452 * ah_msi_reg is initialization value (0x0).
453 * We don't know why "MSI interrupt earlier than driver read" now...
455 if (!ahp->ah_msi_reg) {
456 ahp->ah_msi_reg = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI));
458 if (AR_SREV_POSEIDON(ah)) {
459 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
461 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR;
463 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI),
464 ((ahp->ah_msi_reg | AR_PCIE_MSI_ENABLE) & msi_pend_addr_mask));
472 ar9300_get_interrupts(struct ath_hal *ah)
474 return AH9300(ah)->ah_mask_reg;
478 * Atomically enables NIC interrupts. Interrupts are passed in
479 * via the enumerated bitmask in ints.
482 ar9300_set_interrupts(struct ath_hal *ah, HAL_INT ints, HAL_BOOL nortc)
484 struct ath_hal_9300 *ahp = AH9300(ah);
485 u_int32_t omask = ahp->ah_mask_reg;
486 u_int32_t mask, mask2, msi_mask = 0;
487 u_int32_t msi_pend_addr_mask = 0;
488 u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
489 HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
491 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
492 "%s: 0x%x => 0x%x\n", __func__, omask, ints);
494 if (omask & HAL_INT_GLOBAL) {
495 HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: disable IER\n", __func__);
497 if (ah->ah_config.ath_hal_enable_msi) {
498 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE), 0);
499 /* flush write to HW */
500 (void)OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE));
504 OS_REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
505 (void) OS_REG_READ(ah, AR_IER); /* flush write to HW */
508 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE), 0);
509 /* flush write to HW */
510 (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE));
511 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE), 0);
512 /* flush write to HW */
513 (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE));
517 /* reference count for global IER */
518 if (ints & HAL_INT_GLOBAL) {
520 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
521 "%s: Request HAL_INT_GLOBAL ENABLED\n", __func__);
523 if (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) == 0) {
524 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
525 "%s: WARNING: ah_ier_ref_count is 0 "
526 "and attempting to enable IER\n",
532 if (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) > 0) {
533 OS_ATOMIC_DEC(&ahp->ah_ier_ref_count);
537 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
538 "%s: Request HAL_INT_GLOBAL DISABLED\n", __func__);
539 OS_ATOMIC_INC(&ahp->ah_ier_ref_count);
541 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
542 "%s: ah_ier_ref_count = %d\n", __func__, ahp->ah_ier_ref_count);
544 mask = ints & HAL_INT_COMMON;
548 if (ints & HAL_INT_TX) {
549 if (ahp->ah_intr_mitigation_tx) {
550 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
551 } else if (ahp->ah_tx_ok_interrupt_mask) {
554 msi_mask |= AR_INTR_PRIO_TX;
555 if (ahp->ah_tx_err_interrupt_mask) {
556 mask |= AR_IMR_TXERR;
558 if (ahp->ah_tx_eol_interrupt_mask) {
559 mask |= AR_IMR_TXEOL;
562 if (ints & HAL_INT_RX) {
563 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
564 if (ahp->ah_intr_mitigation_rx) {
565 mask &= ~(AR_IMR_RXOK_LP);
566 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
568 mask |= AR_IMR_RXOK_LP;
570 msi_mask |= AR_INTR_PRIO_RXLP | AR_INTR_PRIO_RXHP;
571 if (! p_cap->halAutoSleepSupport) {
572 mask |= AR_IMR_GENTMR;
576 if (ints & (HAL_INT_BMISC)) {
577 mask |= AR_IMR_BCNMISC;
578 if (ints & HAL_INT_TIM) {
579 mask2 |= AR_IMR_S2_TIM;
581 if (ints & HAL_INT_DTIM) {
582 mask2 |= AR_IMR_S2_DTIM;
584 if (ints & HAL_INT_DTIMSYNC) {
585 mask2 |= AR_IMR_S2_DTIMSYNC;
587 if (ints & HAL_INT_CABEND) {
588 mask2 |= (AR_IMR_S2_CABEND);
590 if (ints & HAL_INT_TSFOOR) {
591 mask2 |= AR_IMR_S2_TSFOOR;
595 if (ints & (HAL_INT_GTT | HAL_INT_CST)) {
596 mask |= AR_IMR_BCNMISC;
597 if (ints & HAL_INT_GTT) {
598 mask2 |= AR_IMR_S2_GTT;
600 if (ints & HAL_INT_CST) {
601 mask2 |= AR_IMR_S2_CST;
605 if (ints & HAL_INT_BBPANIC) {
606 /* EV92527 - MAC secondary interrupt must enable AR_IMR_BCNMISC */
607 mask |= AR_IMR_BCNMISC;
608 mask2 |= AR_IMR_S2_BBPANIC;
611 if (ints & HAL_INT_GENTIMER) {
612 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
613 "%s: enabling gen timer\n", __func__);
614 mask |= AR_IMR_GENTMR;
617 /* Write the new IMR and store off our SW copy. */
618 HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: new IMR 0x%x\n", __func__, mask);
619 OS_REG_WRITE(ah, AR_IMR, mask);
620 ahp->ah_mask2Reg &= ~(AR_IMR_S2_TIM |
629 ahp->ah_mask2Reg |= mask2;
630 OS_REG_WRITE(ah, AR_IMR_S2, ahp->ah_mask2Reg );
631 ahp->ah_mask_reg = ints;
633 if (! p_cap->halAutoSleepSupport) {
634 if (ints & HAL_INT_TIM_TIMER) {
635 OS_REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
638 OS_REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
643 /* Re-enable interrupts if they were enabled before. */
644 #if HAL_INTR_REFCOUNT_DISABLE
645 if ((ints & HAL_INT_GLOBAL)) {
647 if ((ints & HAL_INT_GLOBAL) && (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) == 0)) {
649 HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: enable IER\n", __func__);
652 OS_REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
655 mask = AR_INTR_MAC_IRQ;
656 #ifdef ATH_GPIO_USE_ASYNC_CAUSE
657 if (ints & HAL_INT_GPIO) {
658 if (ahp->ah_gpio_mask) {
659 mask |= SM(ahp->ah_gpio_mask, AR_INTR_ASYNC_MASK_GPIO);
665 if (ints & HAL_INT_MCI) {
666 mask |= AR_INTR_ASYNC_MASK_MCI;
670 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE), mask);
671 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_MASK), mask);
673 if (ah->ah_config.ath_hal_enable_msi) {
674 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE),
676 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_MASK),
678 if (AR_SREV_POSEIDON(ah)) {
679 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
681 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR;
683 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI),
684 ((ahp->ah_msi_reg | AR_PCIE_MSI_ENABLE) & msi_pend_addr_mask));
688 * debug - enable to see all synchronous interrupts status
689 * Enable synchronous GPIO interrupts as well, since some async
690 * GPIO interrupts don't wake the chip up.
693 #ifndef ATH_GPIO_USE_ASYNC_CAUSE
694 if (ints & HAL_INT_GPIO) {
695 mask |= SM(ahp->ah_gpio_mask, AR_INTR_SYNC_MASK_GPIO);
698 if (AR_SREV_POSEIDON(ah)) {
699 sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
701 else if (AR_SREV_WASP(ah)) {
702 sync_en_def = AR9340_INTR_SYNC_DEFAULT;
705 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE),
706 (sync_en_def | mask));
707 OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_MASK),
708 (sync_en_def | mask));
710 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
711 "AR_IMR 0x%x IER 0x%x\n",
712 OS_REG_READ(ah, AR_IMR), OS_REG_READ(ah, AR_IER));
719 ar9300_set_intr_mitigation_timer(
721 HAL_INT_MITIGATION reg,
724 #ifdef AR5416_INT_MITIGATION
726 case HAL_INT_THRESHOLD:
727 OS_REG_WRITE(ah, AR_MIRT, 0);
729 case HAL_INT_RX_LASTPKT:
730 OS_REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, value);
732 case HAL_INT_RX_FIRSTPKT:
733 OS_REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, value);
735 case HAL_INT_TX_LASTPKT:
736 OS_REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, value);
738 case HAL_INT_TX_FIRSTPKT:
739 OS_REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, value);
748 ar9300_get_intr_mitigation_timer(struct ath_hal* ah, HAL_INT_MITIGATION reg)
751 #ifdef AR5416_INT_MITIGATION
753 case HAL_INT_THRESHOLD:
754 val = OS_REG_READ(ah, AR_MIRT);
756 case HAL_INT_RX_LASTPKT:
757 val = OS_REG_READ(ah, AR_RIMT) & 0xFFFF;
759 case HAL_INT_RX_FIRSTPKT:
760 val = OS_REG_READ(ah, AR_RIMT) >> 16;
762 case HAL_INT_TX_LASTPKT:
763 val = OS_REG_READ(ah, AR_TIMT) & 0xFFFF;
765 case HAL_INT_TX_FIRSTPKT:
766 val = OS_REG_READ(ah, AR_TIMT) >> 16;