2 * Copyright (c) 2007-2016 Solarflare Communications Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #if EFSYS_OPT_MON_MCDI
41 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
43 (_eep)->ee_stat[_stat]++; \
44 _NOTE(CONSTANTCONDITION) \
47 #define EFX_EV_QSTAT_INCR(_eep, _stat)
50 #define EFX_EV_PRESENT(_qword) \
51 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
52 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
58 static __checkReturn efx_rc_t
66 static __checkReturn efx_rc_t
69 __in unsigned int index,
70 __in efsys_mem_t *esmp,
80 static __checkReturn efx_rc_t
83 __in unsigned int count);
88 __inout unsigned int *countp,
89 __in const efx_ev_callbacks_t *eecp,
97 static __checkReturn efx_rc_t
100 __in unsigned int us);
104 siena_ev_qstats_update(
106 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
110 #endif /* EFSYS_OPT_SIENA */
113 static const efx_ev_ops_t __efx_ev_siena_ops = {
114 siena_ev_init, /* eevo_init */
115 siena_ev_fini, /* eevo_fini */
116 siena_ev_qcreate, /* eevo_qcreate */
117 siena_ev_qdestroy, /* eevo_qdestroy */
118 siena_ev_qprime, /* eevo_qprime */
119 siena_ev_qpost, /* eevo_qpost */
120 siena_ev_qmoderate, /* eevo_qmoderate */
122 siena_ev_qstats_update, /* eevo_qstats_update */
125 #endif /* EFSYS_OPT_SIENA */
127 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
128 static const efx_ev_ops_t __efx_ev_ef10_ops = {
129 ef10_ev_init, /* eevo_init */
130 ef10_ev_fini, /* eevo_fini */
131 ef10_ev_qcreate, /* eevo_qcreate */
132 ef10_ev_qdestroy, /* eevo_qdestroy */
133 ef10_ev_qprime, /* eevo_qprime */
134 ef10_ev_qpost, /* eevo_qpost */
135 ef10_ev_qmoderate, /* eevo_qmoderate */
137 ef10_ev_qstats_update, /* eevo_qstats_update */
140 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
143 __checkReturn efx_rc_t
147 const efx_ev_ops_t *eevop;
150 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
151 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
153 if (enp->en_mod_flags & EFX_MOD_EV) {
158 switch (enp->en_family) {
160 case EFX_FAMILY_SIENA:
161 eevop = &__efx_ev_siena_ops;
163 #endif /* EFSYS_OPT_SIENA */
165 #if EFSYS_OPT_HUNTINGTON
166 case EFX_FAMILY_HUNTINGTON:
167 eevop = &__efx_ev_ef10_ops;
169 #endif /* EFSYS_OPT_HUNTINGTON */
171 #if EFSYS_OPT_MEDFORD
172 case EFX_FAMILY_MEDFORD:
173 eevop = &__efx_ev_ef10_ops;
175 #endif /* EFSYS_OPT_MEDFORD */
183 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
185 if ((rc = eevop->eevo_init(enp)) != 0)
188 enp->en_eevop = eevop;
189 enp->en_mod_flags |= EFX_MOD_EV;
196 EFSYS_PROBE1(fail1, efx_rc_t, rc);
198 enp->en_eevop = NULL;
199 enp->en_mod_flags &= ~EFX_MOD_EV;
207 const efx_ev_ops_t *eevop = enp->en_eevop;
209 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
210 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
211 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
212 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
213 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
214 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
216 eevop->eevo_fini(enp);
218 enp->en_eevop = NULL;
219 enp->en_mod_flags &= ~EFX_MOD_EV;
223 __checkReturn efx_rc_t
226 __in unsigned int index,
227 __in efsys_mem_t *esmp,
231 __deref_out efx_evq_t **eepp)
233 const efx_ev_ops_t *eevop = enp->en_eevop;
234 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
238 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
239 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
241 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit);
243 /* Allocate an EVQ object */
244 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
250 eep->ee_magic = EFX_EVQ_MAGIC;
252 eep->ee_index = index;
253 eep->ee_mask = n - 1;
257 * Set outputs before the queue is created because interrupts may be
258 * raised for events immediately after the queue is created, before the
259 * function call below returns. See bug58606.
261 * The eepp pointer passed in by the client must therefore point to data
262 * shared with the client's event processing context.
267 if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, us, eep)) != 0)
277 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
279 EFSYS_PROBE1(fail1, efx_rc_t, rc);
287 efx_nic_t *enp = eep->ee_enp;
288 const efx_ev_ops_t *eevop = enp->en_eevop;
290 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
292 EFSYS_ASSERT(enp->en_ev_qcount != 0);
295 eevop->eevo_qdestroy(eep);
297 /* Free the EVQ object */
298 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
301 __checkReturn efx_rc_t
304 __in unsigned int count)
306 efx_nic_t *enp = eep->ee_enp;
307 const efx_ev_ops_t *eevop = enp->en_eevop;
310 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
312 if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
317 if ((rc = eevop->eevo_qprime(eep, count)) != 0)
325 EFSYS_PROBE1(fail1, efx_rc_t, rc);
329 __checkReturn boolean_t
332 __in unsigned int count)
337 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
339 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
340 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
342 return (EFX_EV_PRESENT(qword));
345 #if EFSYS_OPT_EV_PREFETCH
350 __in unsigned int count)
354 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
356 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
357 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
360 #endif /* EFSYS_OPT_EV_PREFETCH */
365 __inout unsigned int *countp,
366 __in const efx_ev_callbacks_t *eecp,
369 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
372 * FIXME: Huntington will require support for hardware event batching
373 * and merging, which will need a different ev_qpoll implementation.
375 * Without those features the Falcon/Siena code can be used unchanged.
377 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
378 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
380 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
381 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
382 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
383 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
384 FSE_AZ_EV_CODE_DRV_GEN_EV);
386 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
387 FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
389 siena_ev_qpoll(eep, countp, eecp, arg);
397 efx_nic_t *enp = eep->ee_enp;
398 const efx_ev_ops_t *eevop = enp->en_eevop;
400 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
402 EFSYS_ASSERT(eevop != NULL &&
403 eevop->eevo_qpost != NULL);
405 eevop->eevo_qpost(eep, data);
408 __checkReturn efx_rc_t
409 efx_ev_usecs_to_ticks(
411 __in unsigned int us,
412 __out unsigned int *ticksp)
414 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
417 /* Convert microseconds to a timer tick count */
420 else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
421 ticks = 1; /* Never round down to zero */
423 ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
429 __checkReturn efx_rc_t
432 __in unsigned int us)
434 efx_nic_t *enp = eep->ee_enp;
435 const efx_ev_ops_t *eevop = enp->en_eevop;
438 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
440 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
446 EFSYS_PROBE1(fail1, efx_rc_t, rc);
452 efx_ev_qstats_update(
454 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
456 { efx_nic_t *enp = eep->ee_enp;
457 const efx_ev_ops_t *eevop = enp->en_eevop;
459 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
461 eevop->eevo_qstats_update(eep, stat);
464 #endif /* EFSYS_OPT_QSTATS */
468 static __checkReturn efx_rc_t
475 * Program the event queue for receive and transmit queue
478 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
479 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
480 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
486 static __checkReturn boolean_t
489 __in efx_qword_t *eqp,
492 __inout uint16_t *flagsp)
494 boolean_t ignore = B_FALSE;
496 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
497 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
498 EFSYS_PROBE(tobe_disc);
500 * Assume this is a unicast address mismatch, unless below
501 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
502 * EV_RX_PAUSE_FRM_ERR is set.
504 (*flagsp) |= EFX_ADDR_MISMATCH;
507 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
508 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
509 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
510 (*flagsp) |= EFX_DISCARD;
512 #if EFSYS_OPT_RX_SCATTER
514 * Lookout for payload queue ran dry errors and ignore them.
516 * Sadly for the header/data split cases, the descriptor
517 * pointer in this event refers to the header queue and
518 * therefore cannot be easily detected as duplicate.
519 * So we drop these and rely on the receive processing seeing
520 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
521 * the partially received packet.
523 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
524 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
525 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
527 #endif /* EFSYS_OPT_RX_SCATTER */
530 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
531 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
532 EFSYS_PROBE(crc_err);
533 (*flagsp) &= ~EFX_ADDR_MISMATCH;
534 (*flagsp) |= EFX_DISCARD;
537 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
538 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
539 EFSYS_PROBE(pause_frm_err);
540 (*flagsp) &= ~EFX_ADDR_MISMATCH;
541 (*flagsp) |= EFX_DISCARD;
544 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
545 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
546 EFSYS_PROBE(owner_id_err);
547 (*flagsp) |= EFX_DISCARD;
550 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
551 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
552 EFSYS_PROBE(ipv4_err);
553 (*flagsp) &= ~EFX_CKSUM_IPV4;
556 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
557 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
558 EFSYS_PROBE(udp_chk_err);
559 (*flagsp) &= ~EFX_CKSUM_TCPUDP;
562 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
563 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
566 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
567 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
570 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
576 static __checkReturn boolean_t
579 __in efx_qword_t *eqp,
580 __in const efx_ev_callbacks_t *eecp,
587 #if EFSYS_OPT_RX_SCATTER
589 boolean_t jumbo_cont;
590 #endif /* EFSYS_OPT_RX_SCATTER */
595 boolean_t should_abort;
597 EFX_EV_QSTAT_INCR(eep, EV_RX);
599 /* Basic packet information */
600 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
601 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
602 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
603 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
605 #if EFSYS_OPT_RX_SCATTER
606 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
607 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
608 #endif /* EFSYS_OPT_RX_SCATTER */
610 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
612 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
615 * If packet is marked as OK and packet type is TCP/IP or
616 * UDP/IP or other IP, then we can rely on the hardware checksums.
619 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
620 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
622 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
623 flags |= EFX_PKT_IPV6;
625 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
626 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
630 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
631 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
633 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
634 flags |= EFX_PKT_IPV6;
636 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
637 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
641 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
643 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
644 flags = EFX_PKT_IPV6;
646 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
647 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
651 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
652 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
657 EFSYS_ASSERT(B_FALSE);
662 #if EFSYS_OPT_RX_SCATTER
663 /* Report scatter and header/lookahead split buffer flags */
665 flags |= EFX_PKT_START;
667 flags |= EFX_PKT_CONT;
668 #endif /* EFSYS_OPT_RX_SCATTER */
670 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
672 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
674 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
675 uint32_t, size, uint16_t, flags);
681 /* If we're not discarding the packet then it is ok */
682 if (~flags & EFX_DISCARD)
683 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
685 /* Detect multicast packets that didn't match the filter */
686 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
687 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
689 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
690 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
692 EFSYS_PROBE(mcast_mismatch);
693 flags |= EFX_ADDR_MISMATCH;
696 flags |= EFX_PKT_UNICAST;
700 * The packet parser in Siena can abort parsing packets under
701 * certain error conditions, setting the PKT_NOT_PARSED bit
702 * (which clears PKT_OK). If this is set, then don't trust
703 * the PKT_TYPE field.
708 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
710 flags |= EFX_CHECK_VLAN;
713 if (~flags & EFX_CHECK_VLAN) {
716 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
717 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
718 flags |= EFX_PKT_VLAN_TAGGED;
721 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
722 uint32_t, size, uint16_t, flags);
724 EFSYS_ASSERT(eecp->eec_rx != NULL);
725 should_abort = eecp->eec_rx(arg, label, id, size, flags);
727 return (should_abort);
730 static __checkReturn boolean_t
733 __in efx_qword_t *eqp,
734 __in const efx_ev_callbacks_t *eecp,
739 boolean_t should_abort;
741 EFX_EV_QSTAT_INCR(eep, EV_TX);
743 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
744 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
745 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
746 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
748 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
749 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
751 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
753 EFSYS_ASSERT(eecp->eec_tx != NULL);
754 should_abort = eecp->eec_tx(arg, label, id);
756 return (should_abort);
759 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
760 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
761 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
762 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
764 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
765 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
767 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
768 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
770 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
771 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
773 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
777 static __checkReturn boolean_t
780 __in efx_qword_t *eqp,
781 __in const efx_ev_callbacks_t *eecp,
784 _NOTE(ARGUNUSED(eqp, eecp, arg))
786 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
791 static __checkReturn boolean_t
794 __in efx_qword_t *eqp,
795 __in const efx_ev_callbacks_t *eecp,
798 boolean_t should_abort;
800 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
801 should_abort = B_FALSE;
803 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
804 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
807 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
809 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
811 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
813 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
814 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
818 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
822 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
823 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
825 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
826 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
829 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
831 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
833 should_abort = eecp->eec_rxq_flush_failed(arg,
836 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
838 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
840 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
845 case FSE_AZ_EVQ_INIT_DONE_EV:
846 EFSYS_ASSERT(eecp->eec_initialized != NULL);
847 should_abort = eecp->eec_initialized(arg);
851 case FSE_AZ_EVQ_NOT_EN_EV:
852 EFSYS_PROBE(evq_not_en);
855 case FSE_AZ_SRM_UPD_DONE_EV: {
858 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
860 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
862 EFSYS_ASSERT(eecp->eec_sram != NULL);
863 should_abort = eecp->eec_sram(arg, code);
867 case FSE_AZ_WAKE_UP_EV: {
870 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
872 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
873 should_abort = eecp->eec_wake_up(arg, id);
877 case FSE_AZ_TX_PKT_NON_TCP_UDP:
878 EFSYS_PROBE(tx_pkt_non_tcp_udp);
881 case FSE_AZ_TIMER_EV: {
884 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
886 EFSYS_ASSERT(eecp->eec_timer != NULL);
887 should_abort = eecp->eec_timer(arg, id);
891 case FSE_AZ_RX_DSC_ERROR_EV:
892 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
894 EFSYS_PROBE(rx_dsc_error);
896 EFSYS_ASSERT(eecp->eec_exception != NULL);
897 should_abort = eecp->eec_exception(arg,
898 EFX_EXCEPTION_RX_DSC_ERROR, 0);
902 case FSE_AZ_TX_DSC_ERROR_EV:
903 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
905 EFSYS_PROBE(tx_dsc_error);
907 EFSYS_ASSERT(eecp->eec_exception != NULL);
908 should_abort = eecp->eec_exception(arg,
909 EFX_EXCEPTION_TX_DSC_ERROR, 0);
917 return (should_abort);
920 static __checkReturn boolean_t
923 __in efx_qword_t *eqp,
924 __in const efx_ev_callbacks_t *eecp,
928 boolean_t should_abort;
930 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
932 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
933 if (data >= ((uint32_t)1 << 16)) {
934 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
935 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
936 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
940 EFSYS_ASSERT(eecp->eec_software != NULL);
941 should_abort = eecp->eec_software(arg, (uint16_t)data);
943 return (should_abort);
948 static __checkReturn boolean_t
951 __in efx_qword_t *eqp,
952 __in const efx_ev_callbacks_t *eecp,
955 efx_nic_t *enp = eep->ee_enp;
957 boolean_t should_abort = B_FALSE;
959 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
961 if (enp->en_family != EFX_FAMILY_SIENA)
964 EFSYS_ASSERT(eecp->eec_link_change != NULL);
965 EFSYS_ASSERT(eecp->eec_exception != NULL);
966 #if EFSYS_OPT_MON_STATS
967 EFSYS_ASSERT(eecp->eec_monitor != NULL);
970 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
972 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
974 case MCDI_EVENT_CODE_BADSSERT:
975 efx_mcdi_ev_death(enp, EINTR);
978 case MCDI_EVENT_CODE_CMDDONE:
980 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
981 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
982 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
985 case MCDI_EVENT_CODE_LINKCHANGE: {
986 efx_link_mode_t link_mode;
988 siena_phy_link_ev(enp, eqp, &link_mode);
989 should_abort = eecp->eec_link_change(arg, link_mode);
992 case MCDI_EVENT_CODE_SENSOREVT: {
993 #if EFSYS_OPT_MON_STATS
995 efx_mon_stat_value_t value;
998 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
999 should_abort = eecp->eec_monitor(arg, id, value);
1000 else if (rc == ENOTSUP) {
1001 should_abort = eecp->eec_exception(arg,
1002 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1003 MCDI_EV_FIELD(eqp, DATA));
1005 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1007 should_abort = B_FALSE;
1011 case MCDI_EVENT_CODE_SCHEDERR:
1012 /* Informational only */
1015 case MCDI_EVENT_CODE_REBOOT:
1016 efx_mcdi_ev_death(enp, EIO);
1019 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1020 #if EFSYS_OPT_MAC_STATS
1021 if (eecp->eec_mac_stats != NULL) {
1022 eecp->eec_mac_stats(arg,
1023 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1028 case MCDI_EVENT_CODE_FWALERT: {
1029 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1031 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1032 should_abort = eecp->eec_exception(arg,
1033 EFX_EXCEPTION_FWALERT_SRAM,
1034 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1036 should_abort = eecp->eec_exception(arg,
1037 EFX_EXCEPTION_UNKNOWN_FWALERT,
1038 MCDI_EV_FIELD(eqp, DATA));
1043 EFSYS_PROBE1(mc_pcol_error, int, code);
1048 return (should_abort);
1051 #endif /* EFSYS_OPT_MCDI */
1053 static __checkReturn efx_rc_t
1055 __in efx_evq_t *eep,
1056 __in unsigned int count)
1058 efx_nic_t *enp = eep->ee_enp;
1062 rptr = count & eep->ee_mask;
1064 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
1066 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
1072 #define EFX_EV_BATCH 8
1076 __in efx_evq_t *eep,
1077 __inout unsigned int *countp,
1078 __in const efx_ev_callbacks_t *eecp,
1081 efx_qword_t ev[EFX_EV_BATCH];
1088 EFSYS_ASSERT(countp != NULL);
1089 EFSYS_ASSERT(eecp != NULL);
1093 /* Read up until the end of the batch period */
1094 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
1095 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1096 for (total = 0; total < batch; ++total) {
1097 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
1099 if (!EFX_EV_PRESENT(ev[total]))
1102 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
1103 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
1104 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
1106 offset += sizeof (efx_qword_t);
1109 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
1111 * Prefetch the next batch when we get within PREFETCH_PERIOD
1112 * of a completed batch. If the batch is smaller, then prefetch
1115 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
1116 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1117 #endif /* EFSYS_OPT_EV_PREFETCH */
1119 /* Process the batch of events */
1120 for (index = 0; index < total; ++index) {
1121 boolean_t should_abort;
1124 #if EFSYS_OPT_EV_PREFETCH
1125 /* Prefetch if we've now reached the batch period */
1126 if (total == batch &&
1127 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
1128 offset = (count + batch) & eep->ee_mask;
1129 offset *= sizeof (efx_qword_t);
1131 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1133 #endif /* EFSYS_OPT_EV_PREFETCH */
1135 EFX_EV_QSTAT_INCR(eep, EV_ALL);
1137 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
1139 case FSE_AZ_EV_CODE_RX_EV:
1140 should_abort = eep->ee_rx(eep,
1141 &(ev[index]), eecp, arg);
1143 case FSE_AZ_EV_CODE_TX_EV:
1144 should_abort = eep->ee_tx(eep,
1145 &(ev[index]), eecp, arg);
1147 case FSE_AZ_EV_CODE_DRIVER_EV:
1148 should_abort = eep->ee_driver(eep,
1149 &(ev[index]), eecp, arg);
1151 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1152 should_abort = eep->ee_drv_gen(eep,
1153 &(ev[index]), eecp, arg);
1156 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
1157 should_abort = eep->ee_mcdi(eep,
1158 &(ev[index]), eecp, arg);
1161 case FSE_AZ_EV_CODE_GLOBAL_EV:
1162 if (eep->ee_global) {
1163 should_abort = eep->ee_global(eep,
1164 &(ev[index]), eecp, arg);
1167 /* else fallthrough */
1169 EFSYS_PROBE3(bad_event,
1170 unsigned int, eep->ee_index,
1172 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
1174 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
1176 EFSYS_ASSERT(eecp->eec_exception != NULL);
1177 (void) eecp->eec_exception(arg,
1178 EFX_EXCEPTION_EV_ERROR, code);
1179 should_abort = B_TRUE;
1182 /* Ignore subsequent events */
1189 * Now that the hardware has most likely moved onto dma'ing
1190 * into the next cache line, clear the processed events. Take
1191 * care to only clear out events that we've processed
1193 EFX_SET_QWORD(ev[0]);
1194 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1195 for (index = 0; index < total; ++index) {
1196 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
1197 offset += sizeof (efx_qword_t);
1202 } while (total == batch);
1209 __in efx_evq_t *eep,
1212 efx_nic_t *enp = eep->ee_enp;
1216 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
1217 FSF_AZ_EV_DATA_DW0, (uint32_t)data);
1219 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
1220 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
1221 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
1223 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
1226 static __checkReturn efx_rc_t
1228 __in efx_evq_t *eep,
1229 __in unsigned int us)
1231 efx_nic_t *enp = eep->ee_enp;
1232 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1233 unsigned int locked;
1237 if (us > encp->enc_evq_timer_max_us) {
1242 /* If the value is zero then disable the timer */
1244 EFX_POPULATE_DWORD_2(dword,
1245 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
1246 FRF_CZ_TC_TIMER_VAL, 0);
1250 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
1253 EFSYS_ASSERT(ticks > 0);
1254 EFX_POPULATE_DWORD_2(dword,
1255 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
1256 FRF_CZ_TC_TIMER_VAL, ticks - 1);
1259 locked = (eep->ee_index == 0) ? 1 : 0;
1261 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
1262 eep->ee_index, &dword, locked);
1269 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1274 static __checkReturn efx_rc_t
1276 __in efx_nic_t *enp,
1277 __in unsigned int index,
1278 __in efsys_mem_t *esmp,
1282 __in efx_evq_t *eep)
1284 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1289 _NOTE(ARGUNUSED(esmp))
1291 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
1292 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
1294 if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
1298 if (index >= encp->enc_evq_limit) {
1302 #if EFSYS_OPT_RX_SCALE
1303 if (enp->en_intr.ei_type == EFX_INTR_LINE &&
1304 index >= EFX_MAXRSS_LEGACY) {
1309 for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
1311 if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS))
1313 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1318 /* Set up the handler table */
1319 eep->ee_rx = siena_ev_rx;
1320 eep->ee_tx = siena_ev_tx;
1321 eep->ee_driver = siena_ev_driver;
1322 eep->ee_global = siena_ev_global;
1323 eep->ee_drv_gen = siena_ev_drv_gen;
1325 eep->ee_mcdi = siena_ev_mcdi;
1326 #endif /* EFSYS_OPT_MCDI */
1328 /* Set up the new event queue */
1329 EFX_POPULATE_OWORD_1(oword, FRF_CZ_TIMER_Q_EN, 1);
1330 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
1332 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
1333 FRF_AZ_EVQ_BUF_BASE_ID, id);
1335 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
1337 /* Set initial interrupt moderation */
1338 siena_ev_qmoderate(eep, us);
1344 #if EFSYS_OPT_RX_SCALE
1351 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1356 #endif /* EFSYS_OPT_SIENA */
1358 #if EFSYS_OPT_QSTATS
1360 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock b693ddf85aee1bfd */
1361 static const char *__efx_ev_qstat_name[] = {
1368 "rx_buf_owner_id_err",
1369 "rx_ipv4_hdr_chksum_err",
1370 "rx_tcp_udp_chksum_err",
1374 "rx_mcast_hash_match",
1391 "driver_srm_upd_done",
1392 "driver_tx_descq_fls_done",
1393 "driver_rx_descq_fls_done",
1394 "driver_rx_descq_fls_failed",
1395 "driver_rx_dsc_error",
1396 "driver_tx_dsc_error",
1400 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
1404 __in efx_nic_t *enp,
1405 __in unsigned int id)
1407 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1408 EFSYS_ASSERT3U(id, <, EV_NQSTATS);
1410 return (__efx_ev_qstat_name[id]);
1412 #endif /* EFSYS_OPT_NAMES */
1413 #endif /* EFSYS_OPT_QSTATS */
1417 #if EFSYS_OPT_QSTATS
1419 siena_ev_qstats_update(
1420 __in efx_evq_t *eep,
1421 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
1425 for (id = 0; id < EV_NQSTATS; id++) {
1426 efsys_stat_t *essp = &stat[id];
1428 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
1429 eep->ee_stat[id] = 0;
1432 #endif /* EFSYS_OPT_QSTATS */
1436 __in efx_evq_t *eep)
1438 efx_nic_t *enp = eep->ee_enp;
1441 /* Purge event queue */
1442 EFX_ZERO_OWORD(oword);
1444 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
1445 eep->ee_index, &oword, B_TRUE);
1447 EFX_ZERO_OWORD(oword);
1448 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
1453 __in efx_nic_t *enp)
1455 _NOTE(ARGUNUSED(enp))
1458 #endif /* EFSYS_OPT_SIENA */