2 * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
31 #include "efx_types.h"
36 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
38 (_eep)->ee_stat[_stat]++; \
39 _NOTE(CONSTANTCONDITION) \
42 #define EFX_EV_QSTAT_INCR(_eep, _stat)
52 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
53 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
55 if (enp->en_mod_flags & EFX_MOD_EV) {
60 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
63 * Program the event queue for receive and transmit queue
66 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
67 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
68 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
70 enp->en_mod_flags |= EFX_MOD_EV;
74 EFSYS_PROBE1(fail1, int, rc);
79 static __checkReturn boolean_t
82 __in efx_qword_t *eqp,
85 __inout uint16_t *flagsp)
87 boolean_t ignore = B_FALSE;
89 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
90 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
91 EFSYS_PROBE(tobe_disc);
93 * Assume this is a unicast address mismatch, unless below
94 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
95 * EV_RX_PAUSE_FRM_ERR is set.
97 (*flagsp) |= EFX_ADDR_MISMATCH;
100 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
101 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
102 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
103 (*flagsp) |= EFX_DISCARD;
105 #if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER)
107 * Lookout for payload queue ran dry errors and ignore them.
109 * Sadly for the header/data split cases, the descriptor
110 * pointer in this event refers to the header queue and
111 * therefore cannot be easily detected as duplicate.
112 * So we drop these and rely on the receive processing seeing
113 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
114 * the partially received packet.
116 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
117 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
118 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
120 #endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */
123 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
124 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
125 EFSYS_PROBE(crc_err);
126 (*flagsp) &= ~EFX_ADDR_MISMATCH;
127 (*flagsp) |= EFX_DISCARD;
130 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
131 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
132 EFSYS_PROBE(pause_frm_err);
133 (*flagsp) &= ~EFX_ADDR_MISMATCH;
134 (*flagsp) |= EFX_DISCARD;
137 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
138 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
139 EFSYS_PROBE(owner_id_err);
140 (*flagsp) |= EFX_DISCARD;
143 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
144 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
145 EFSYS_PROBE(ipv4_err);
146 (*flagsp) &= ~EFX_CKSUM_IPV4;
149 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
150 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
151 EFSYS_PROBE(udp_chk_err);
152 (*flagsp) &= ~EFX_CKSUM_TCPUDP;
155 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
156 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
159 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
160 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
163 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
169 static __checkReturn boolean_t
172 __in efx_qword_t *eqp,
173 __in const efx_ev_callbacks_t *eecp,
176 efx_nic_t *enp = eep->ee_enp;
181 #if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER)
183 boolean_t jumbo_cont;
184 #endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */
189 boolean_t should_abort;
191 EFX_EV_QSTAT_INCR(eep, EV_RX);
193 /* Basic packet information */
194 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
195 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
196 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
197 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
199 #if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER)
200 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
201 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
202 #endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */
204 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
206 is_v6 = (enp->en_family != EFX_FAMILY_FALCON &&
207 EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
210 * If packet is marked as OK and packet type is TCP/IP or
211 * UDP/IP or other IP, then we can rely on the hardware checksums.
214 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
215 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
217 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
218 flags |= EFX_PKT_IPV6;
220 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
221 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
225 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
226 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
228 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
229 flags |= EFX_PKT_IPV6;
231 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
232 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
236 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
238 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
239 flags = EFX_PKT_IPV6;
241 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
242 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
246 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
247 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
252 EFSYS_ASSERT(B_FALSE);
257 #if EFSYS_OPT_RX_SCATTER || EFSYS_OPT_RX_HDR_SPLIT
258 /* Report scatter and header/lookahead split buffer flags */
260 flags |= EFX_PKT_START;
262 flags |= EFX_PKT_CONT;
263 #endif /* EFSYS_OPT_RX_SCATTER || EFSYS_OPT_RX_HDR_SPLIT */
265 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
267 ignore = efx_ev_rx_not_ok(eep, eqp, label, id, &flags);
269 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
270 uint32_t, size, uint16_t, flags);
276 /* If we're not discarding the packet then it is ok */
277 if (~flags & EFX_DISCARD)
278 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
280 /* Detect multicast packets that didn't match the filter */
281 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
282 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
284 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
285 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
287 EFSYS_PROBE(mcast_mismatch);
288 flags |= EFX_ADDR_MISMATCH;
291 flags |= EFX_PKT_UNICAST;
295 * The packet parser in Siena can abort parsing packets under
296 * certain error conditions, setting the PKT_NOT_PARSED bit
297 * (which clears PKT_OK). If this is set, then don't trust
298 * the PKT_TYPE field.
300 if (enp->en_family != EFX_FAMILY_FALCON && !ok) {
303 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
305 flags |= EFX_CHECK_VLAN;
308 if (~flags & EFX_CHECK_VLAN) {
311 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
312 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
313 flags |= EFX_PKT_VLAN_TAGGED;
316 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
317 uint32_t, size, uint16_t, flags);
319 EFSYS_ASSERT(eecp->eec_rx != NULL);
320 should_abort = eecp->eec_rx(arg, label, id, size, flags);
322 return (should_abort);
325 static __checkReturn boolean_t
328 __in efx_qword_t *eqp,
329 __in const efx_ev_callbacks_t *eecp,
334 boolean_t should_abort;
336 EFX_EV_QSTAT_INCR(eep, EV_TX);
338 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
339 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
340 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
341 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
343 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
344 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
346 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
348 EFSYS_ASSERT(eecp->eec_tx != NULL);
349 should_abort = eecp->eec_tx(arg, label, id);
351 return (should_abort);
354 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
355 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
356 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
357 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
359 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
360 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
362 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
363 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
365 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
366 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
368 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
372 static __checkReturn boolean_t
375 __in efx_qword_t *eqp,
376 __in const efx_ev_callbacks_t *eecp,
379 efx_nic_t *enp = eep->ee_enp;
380 efx_port_t *epp = &(enp->en_port);
381 boolean_t should_abort;
383 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
384 should_abort = B_FALSE;
386 /* Check for a link management event */
387 if (EFX_QWORD_FIELD(*eqp, FSF_BZ_GLB_EV_XG_MNT_INTR) != 0) {
388 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL_MNT);
392 epp->ep_mac_poll_needed = B_TRUE;
395 return (should_abort);
398 static __checkReturn boolean_t
401 __in efx_qword_t *eqp,
402 __in const efx_ev_callbacks_t *eecp,
405 boolean_t should_abort;
407 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
408 should_abort = B_FALSE;
410 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
411 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
414 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
416 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
418 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
420 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
421 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
425 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
429 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
430 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
432 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
433 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
436 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
438 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
440 should_abort = eecp->eec_rxq_flush_failed(arg, rxq_index);
442 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
444 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
446 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
451 case FSE_AZ_EVQ_INIT_DONE_EV:
452 EFSYS_ASSERT(eecp->eec_initialized != NULL);
453 should_abort = eecp->eec_initialized(arg);
457 case FSE_AZ_EVQ_NOT_EN_EV:
458 EFSYS_PROBE(evq_not_en);
461 case FSE_AZ_SRM_UPD_DONE_EV: {
464 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
466 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
468 EFSYS_ASSERT(eecp->eec_sram != NULL);
469 should_abort = eecp->eec_sram(arg, code);
473 case FSE_AZ_WAKE_UP_EV: {
476 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
478 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
479 should_abort = eecp->eec_wake_up(arg, id);
483 case FSE_AZ_TX_PKT_NON_TCP_UDP:
484 EFSYS_PROBE(tx_pkt_non_tcp_udp);
487 case FSE_AZ_TIMER_EV: {
490 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
492 EFSYS_ASSERT(eecp->eec_timer != NULL);
493 should_abort = eecp->eec_timer(arg, id);
497 case FSE_AZ_RX_DSC_ERROR_EV:
498 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
500 EFSYS_PROBE(rx_dsc_error);
502 EFSYS_ASSERT(eecp->eec_exception != NULL);
503 should_abort = eecp->eec_exception(arg,
504 EFX_EXCEPTION_RX_DSC_ERROR, 0);
508 case FSE_AZ_TX_DSC_ERROR_EV:
509 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
511 EFSYS_PROBE(tx_dsc_error);
513 EFSYS_ASSERT(eecp->eec_exception != NULL);
514 should_abort = eecp->eec_exception(arg,
515 EFX_EXCEPTION_TX_DSC_ERROR, 0);
523 return (should_abort);
526 static __checkReturn boolean_t
529 __in efx_qword_t *eqp,
530 __in const efx_ev_callbacks_t *eecp,
534 boolean_t should_abort;
536 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
538 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
539 if (data >= ((uint32_t)1 << 16)) {
540 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
541 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
542 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
546 EFSYS_ASSERT(eecp->eec_software != NULL);
547 should_abort = eecp->eec_software(arg, (uint16_t)data);
549 return (should_abort);
554 static __checkReturn boolean_t
557 __in efx_qword_t *eqp,
558 __in const efx_ev_callbacks_t *eecp,
561 efx_nic_t *enp = eep->ee_enp;
563 boolean_t should_abort = B_FALSE;
565 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
567 if (enp->en_family != EFX_FAMILY_SIENA)
570 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
572 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
574 case MCDI_EVENT_CODE_BADSSERT:
575 efx_mcdi_ev_death(enp, EINTR);
578 case MCDI_EVENT_CODE_CMDDONE:
580 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
581 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
582 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
585 case MCDI_EVENT_CODE_LINKCHANGE: {
586 efx_link_mode_t link_mode;
588 siena_phy_link_ev(enp, eqp, &link_mode);
589 should_abort = eecp->eec_link_change(arg, link_mode);
592 case MCDI_EVENT_CODE_SENSOREVT: {
593 #if EFSYS_OPT_MON_STATS
595 efx_mon_stat_value_t value;
598 if ((rc = siena_mon_ev(enp, eqp, &id, &value)) == 0)
599 should_abort = eecp->eec_monitor(arg, id, value);
600 else if (rc == ENOTSUP) {
601 should_abort = eecp->eec_exception(arg,
602 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
603 MCDI_EV_FIELD(eqp, DATA));
605 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
607 should_abort = B_FALSE;
611 case MCDI_EVENT_CODE_SCHEDERR:
612 /* Informational only */
615 case MCDI_EVENT_CODE_REBOOT:
616 efx_mcdi_ev_death(enp, EIO);
619 case MCDI_EVENT_CODE_MAC_STATS_DMA:
620 #if EFSYS_OPT_MAC_STATS
621 if (eecp->eec_mac_stats != NULL) {
622 eecp->eec_mac_stats(arg,
623 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
628 case MCDI_EVENT_CODE_FWALERT: {
629 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
631 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
632 should_abort = eecp->eec_exception(arg,
633 EFX_EXCEPTION_FWALERT_SRAM,
634 MCDI_EV_FIELD(eqp, FWALERT_DATA));
636 should_abort = eecp->eec_exception(arg,
637 EFX_EXCEPTION_UNKNOWN_FWALERT,
638 MCDI_EV_FIELD(eqp, DATA));
643 EFSYS_PROBE1(mc_pcol_error, int, code);
648 return (should_abort);
651 #endif /* EFSYS_OPT_SIENA */
656 __in unsigned int count)
658 efx_nic_t *enp = eep->ee_enp;
663 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
665 if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
670 rptr = count & eep->ee_mask;
672 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
674 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
680 EFSYS_PROBE1(fail1, int, rc);
685 __checkReturn boolean_t
688 __in unsigned int count)
693 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
695 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
696 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
698 return (EFX_QWORD_FIELD(qword, EFX_DWORD_0) != 0xffffffff &&
699 EFX_QWORD_FIELD(qword, EFX_DWORD_1) != 0xffffffff);
702 #if EFSYS_OPT_EV_PREFETCH
707 __in unsigned int count)
711 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
713 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
714 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
717 #endif /* EFSYS_OPT_EV_PREFETCH */
719 #define EFX_EV_BATCH 8
721 #define EFX_EV_PRESENT(_qword) \
722 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
723 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
728 __inout unsigned int *countp,
729 __in const efx_ev_callbacks_t *eecp,
732 efx_qword_t ev[EFX_EV_BATCH];
739 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
740 EFSYS_ASSERT(countp != NULL);
741 EFSYS_ASSERT(eecp != NULL);
745 /* Read up until the end of the batch period */
746 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
747 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
748 for (total = 0; total < batch; ++total) {
749 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
751 if (!EFX_EV_PRESENT(ev[total]))
754 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
755 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
756 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
758 offset += sizeof (efx_qword_t);
761 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
763 * Prefetch the next batch when we get within PREFETCH_PERIOD
764 * of a completed batch. If the batch is smaller, then prefetch
767 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
768 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
769 #endif /* EFSYS_OPT_EV_PREFETCH */
771 /* Process the batch of events */
772 for (index = 0; index < total; ++index) {
773 boolean_t should_abort;
775 efx_ev_handler_t handler;
777 #if EFSYS_OPT_EV_PREFETCH
778 /* Prefetch if we've now reached the batch period */
779 if (total == batch &&
780 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
781 offset = (count + batch) & eep->ee_mask;
782 offset *= sizeof (efx_qword_t);
784 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
786 #endif /* EFSYS_OPT_EV_PREFETCH */
788 EFX_EV_QSTAT_INCR(eep, EV_ALL);
790 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
791 handler = eep->ee_handler[code];
792 EFSYS_ASSERT(handler != NULL);
793 should_abort = handler(eep, &(ev[index]), eecp, arg);
795 /* Ignore subsequent events */
802 * Now that the hardware has most likely moved onto dma'ing
803 * into the next cache line, clear the processed events. Take
804 * care to only clear out events that we've processed
806 EFX_SET_QWORD(ev[0]);
807 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
808 for (index = 0; index < total; ++index) {
809 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
810 offset += sizeof (efx_qword_t);
815 } while (total == batch);
825 efx_nic_t *enp = eep->ee_enp;
829 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
831 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
832 FSF_AZ_EV_DATA_DW0, (uint32_t)data);
834 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
835 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
836 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
838 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
844 __in unsigned int us)
846 efx_nic_t *enp = eep->ee_enp;
851 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
853 if (us > enp->en_nic_cfg.enc_evq_moderation_max) {
858 /* If the value is zero then disable the timer */
860 if (enp->en_family == EFX_FAMILY_FALCON)
861 EFX_POPULATE_DWORD_2(dword,
862 FRF_AB_TC_TIMER_MODE, FFE_AB_TIMER_MODE_DIS,
863 FRF_AB_TC_TIMER_VAL, 0);
865 EFX_POPULATE_DWORD_2(dword,
866 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
867 FRF_CZ_TC_TIMER_VAL, 0);
871 /* Calculate the timer value in quanta */
872 us -= (us % EFX_EV_TIMER_QUANTUM);
873 if (us < EFX_EV_TIMER_QUANTUM)
874 us = EFX_EV_TIMER_QUANTUM;
876 timer_val = us / EFX_EV_TIMER_QUANTUM;
878 /* Moderation value is base 0 so we need to deduct 1 */
879 if (enp->en_family == EFX_FAMILY_FALCON)
880 EFX_POPULATE_DWORD_2(dword,
881 FRF_AB_TC_TIMER_MODE, FFE_AB_TIMER_MODE_INT_HLDOFF,
882 FRF_AB_TIMER_VAL, timer_val - 1);
884 EFX_POPULATE_DWORD_2(dword,
885 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
886 FRF_CZ_TC_TIMER_VAL, timer_val - 1);
889 locked = (eep->ee_index == 0) ? 1 : 0;
891 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
892 eep->ee_index, &dword, locked);
897 EFSYS_PROBE1(fail1, int, rc);
905 __in unsigned int index,
906 __in efsys_mem_t *esmp,
909 __deref_out efx_evq_t **eepp)
911 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
917 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
918 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
920 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit);
922 if (!ISP2(n) || !(n & EFX_EVQ_NEVS_MASK)) {
926 if (index >= encp->enc_evq_limit) {
930 #if EFSYS_OPT_RX_SCALE
931 if (enp->en_intr.ei_type == EFX_INTR_LINE &&
932 index >= EFX_MAXRSS_LEGACY) {
937 for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
939 if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS))
941 if (id + (1 << size) >= encp->enc_buftbl_limit) {
946 /* Allocate an EVQ object */
947 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
953 eep->ee_magic = EFX_EVQ_MAGIC;
955 eep->ee_index = index;
956 eep->ee_mask = n - 1;
959 /* Set up the handler table */
960 eep->ee_handler[FSE_AZ_EV_CODE_RX_EV] = efx_ev_rx;
961 eep->ee_handler[FSE_AZ_EV_CODE_TX_EV] = efx_ev_tx;
962 eep->ee_handler[FSE_AZ_EV_CODE_DRIVER_EV] = efx_ev_driver;
963 eep->ee_handler[FSE_AZ_EV_CODE_GLOBAL_EV] = efx_ev_global;
964 eep->ee_handler[FSE_AZ_EV_CODE_DRV_GEN_EV] = efx_ev_drv_gen;
966 eep->ee_handler[FSE_AZ_EV_CODE_MCDI_EVRESPONSE] = efx_ev_mcdi;
967 #endif /* EFSYS_OPT_SIENA */
969 /* Set up the new event queue */
970 if (enp->en_family != EFX_FAMILY_FALCON) {
971 EFX_POPULATE_OWORD_1(oword, FRF_CZ_TIMER_Q_EN, 1);
972 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword);
975 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
976 FRF_AZ_EVQ_BUF_BASE_ID, id);
978 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword);
988 #if EFSYS_OPT_RX_SCALE
995 EFSYS_PROBE1(fail1, int, rc);
1000 #if EFSYS_OPT_QSTATS
1002 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock 67e9bdcd920059bd */
1003 static const char __cs * __cs __efx_ev_qstat_name[] = {
1011 "rx_buf_owner_id_err",
1012 "rx_ipv4_hdr_chksum_err",
1013 "rx_tcp_udp_chksum_err",
1017 "rx_mcast_hash_match",
1034 "global_rx_recovery",
1036 "driver_srm_upd_done",
1037 "driver_tx_descq_fls_done",
1038 "driver_rx_descq_fls_done",
1039 "driver_rx_descq_fls_failed",
1040 "driver_rx_dsc_error",
1041 "driver_tx_dsc_error",
1045 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
1049 __in efx_nic_t *enp,
1050 __in unsigned int id)
1052 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1053 EFSYS_ASSERT3U(id, <, EV_NQSTATS);
1055 return (__efx_ev_qstat_name[id]);
1057 #endif /* EFSYS_OPT_NAMES */
1058 #endif /* EFSYS_OPT_QSTATS */
1060 #if EFSYS_OPT_QSTATS
1062 efx_ev_qstats_update(
1063 __in efx_evq_t *eep,
1064 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
1068 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
1070 for (id = 0; id < EV_NQSTATS; id++) {
1071 efsys_stat_t *essp = &stat[id];
1073 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
1074 eep->ee_stat[id] = 0;
1077 #endif /* EFSYS_OPT_QSTATS */
1081 __in efx_evq_t *eep)
1083 efx_nic_t *enp = eep->ee_enp;
1086 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
1088 EFSYS_ASSERT(enp->en_ev_qcount != 0);
1089 --enp->en_ev_qcount;
1091 /* Purge event queue */
1092 EFX_ZERO_OWORD(oword);
1094 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
1095 eep->ee_index, &oword);
1097 if (enp->en_family != EFX_FAMILY_FALCON) {
1098 EFX_ZERO_OWORD(oword);
1099 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL,
1100 eep->ee_index, &oword);
1103 /* Free the EVQ object */
1104 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
1109 __in efx_nic_t *enp)
1111 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1112 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
1113 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
1114 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
1115 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
1116 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
1118 enp->en_mod_flags &= ~EFX_MOD_EV;