2 * Copyright (c) 2007-2016 Solarflare Communications Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #if EFSYS_OPT_MON_MCDI
41 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
43 (_eep)->ee_stat[_stat]++; \
44 _NOTE(CONSTANTCONDITION) \
47 #define EFX_EV_QSTAT_INCR(_eep, _stat)
50 #define EFX_EV_PRESENT(_qword) \
51 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
52 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
58 static __checkReturn efx_rc_t
66 static __checkReturn efx_rc_t
69 __in unsigned int index,
70 __in efsys_mem_t *esmp,
79 static __checkReturn efx_rc_t
82 __in unsigned int count);
87 __inout unsigned int *countp,
88 __in const efx_ev_callbacks_t *eecp,
96 static __checkReturn efx_rc_t
99 __in unsigned int us);
103 siena_ev_qstats_update(
105 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
109 #endif /* EFSYS_OPT_SIENA */
112 static const efx_ev_ops_t __efx_ev_siena_ops = {
113 siena_ev_init, /* eevo_init */
114 siena_ev_fini, /* eevo_fini */
115 siena_ev_qcreate, /* eevo_qcreate */
116 siena_ev_qdestroy, /* eevo_qdestroy */
117 siena_ev_qprime, /* eevo_qprime */
118 siena_ev_qpost, /* eevo_qpost */
119 siena_ev_qmoderate, /* eevo_qmoderate */
121 siena_ev_qstats_update, /* eevo_qstats_update */
124 #endif /* EFSYS_OPT_SIENA */
126 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
127 static const efx_ev_ops_t __efx_ev_ef10_ops = {
128 ef10_ev_init, /* eevo_init */
129 ef10_ev_fini, /* eevo_fini */
130 ef10_ev_qcreate, /* eevo_qcreate */
131 ef10_ev_qdestroy, /* eevo_qdestroy */
132 ef10_ev_qprime, /* eevo_qprime */
133 ef10_ev_qpost, /* eevo_qpost */
134 ef10_ev_qmoderate, /* eevo_qmoderate */
136 ef10_ev_qstats_update, /* eevo_qstats_update */
139 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
142 __checkReturn efx_rc_t
146 const efx_ev_ops_t *eevop;
149 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
150 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
152 if (enp->en_mod_flags & EFX_MOD_EV) {
157 switch (enp->en_family) {
159 case EFX_FAMILY_SIENA:
160 eevop = &__efx_ev_siena_ops;
162 #endif /* EFSYS_OPT_SIENA */
164 #if EFSYS_OPT_HUNTINGTON
165 case EFX_FAMILY_HUNTINGTON:
166 eevop = &__efx_ev_ef10_ops;
168 #endif /* EFSYS_OPT_HUNTINGTON */
170 #if EFSYS_OPT_MEDFORD
171 case EFX_FAMILY_MEDFORD:
172 eevop = &__efx_ev_ef10_ops;
174 #endif /* EFSYS_OPT_MEDFORD */
182 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
184 if ((rc = eevop->eevo_init(enp)) != 0)
187 enp->en_eevop = eevop;
188 enp->en_mod_flags |= EFX_MOD_EV;
195 EFSYS_PROBE1(fail1, efx_rc_t, rc);
197 enp->en_eevop = NULL;
198 enp->en_mod_flags &= ~EFX_MOD_EV;
206 const efx_ev_ops_t *eevop = enp->en_eevop;
208 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
209 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
210 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
211 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
212 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
213 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
215 eevop->eevo_fini(enp);
217 enp->en_eevop = NULL;
218 enp->en_mod_flags &= ~EFX_MOD_EV;
222 __checkReturn efx_rc_t
225 __in unsigned int index,
226 __in efsys_mem_t *esmp,
229 __deref_out efx_evq_t **eepp)
231 const efx_ev_ops_t *eevop = enp->en_eevop;
232 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
236 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
237 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
239 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit);
241 /* Allocate an EVQ object */
242 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
248 eep->ee_magic = EFX_EVQ_MAGIC;
250 eep->ee_index = index;
251 eep->ee_mask = n - 1;
255 * Set outputs before the queue is created because interrupts may be
256 * raised for events immediately after the queue is created, before the
257 * function call below returns. See bug58606.
259 * The eepp pointer passed in by the client must therefore point to data
260 * shared with the client's event processing context.
265 if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, eep)) != 0)
275 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
277 EFSYS_PROBE1(fail1, efx_rc_t, rc);
285 efx_nic_t *enp = eep->ee_enp;
286 const efx_ev_ops_t *eevop = enp->en_eevop;
288 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
290 EFSYS_ASSERT(enp->en_ev_qcount != 0);
293 eevop->eevo_qdestroy(eep);
295 /* Free the EVQ object */
296 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
299 __checkReturn efx_rc_t
302 __in unsigned int count)
304 efx_nic_t *enp = eep->ee_enp;
305 const efx_ev_ops_t *eevop = enp->en_eevop;
308 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
310 if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
315 if ((rc = eevop->eevo_qprime(eep, count)) != 0)
323 EFSYS_PROBE1(fail1, efx_rc_t, rc);
327 __checkReturn boolean_t
330 __in unsigned int count)
335 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
337 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
338 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
340 return (EFX_EV_PRESENT(qword));
343 #if EFSYS_OPT_EV_PREFETCH
348 __in unsigned int count)
350 efx_nic_t *enp = eep->ee_enp;
353 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
355 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
356 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
359 #endif /* EFSYS_OPT_EV_PREFETCH */
364 __inout unsigned int *countp,
365 __in const efx_ev_callbacks_t *eecp,
368 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
371 * FIXME: Huntington will require support for hardware event batching
372 * and merging, which will need a different ev_qpoll implementation.
374 * Without those features the Falcon/Siena code can be used unchanged.
376 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
377 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
379 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
380 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
381 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
382 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
383 FSE_AZ_EV_CODE_DRV_GEN_EV);
385 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
386 FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
388 siena_ev_qpoll(eep, countp, eecp, arg);
396 efx_nic_t *enp = eep->ee_enp;
397 const efx_ev_ops_t *eevop = enp->en_eevop;
399 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
401 EFSYS_ASSERT(eevop != NULL &&
402 eevop->eevo_qpost != NULL);
404 eevop->eevo_qpost(eep, data);
407 __checkReturn efx_rc_t
410 __in unsigned int us)
412 efx_nic_t *enp = eep->ee_enp;
413 const efx_ev_ops_t *eevop = enp->en_eevop;
416 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
418 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
424 EFSYS_PROBE1(fail1, efx_rc_t, rc);
430 efx_ev_qstats_update(
432 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
434 { efx_nic_t *enp = eep->ee_enp;
435 const efx_ev_ops_t *eevop = enp->en_eevop;
437 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
439 eevop->eevo_qstats_update(eep, stat);
442 #endif /* EFSYS_OPT_QSTATS */
446 static __checkReturn efx_rc_t
453 * Program the event queue for receive and transmit queue
456 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
457 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
458 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
464 static __checkReturn boolean_t
467 __in efx_qword_t *eqp,
470 __inout uint16_t *flagsp)
472 boolean_t ignore = B_FALSE;
474 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
475 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
476 EFSYS_PROBE(tobe_disc);
478 * Assume this is a unicast address mismatch, unless below
479 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
480 * EV_RX_PAUSE_FRM_ERR is set.
482 (*flagsp) |= EFX_ADDR_MISMATCH;
485 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
486 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
487 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
488 (*flagsp) |= EFX_DISCARD;
490 #if EFSYS_OPT_RX_SCATTER
492 * Lookout for payload queue ran dry errors and ignore them.
494 * Sadly for the header/data split cases, the descriptor
495 * pointer in this event refers to the header queue and
496 * therefore cannot be easily detected as duplicate.
497 * So we drop these and rely on the receive processing seeing
498 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
499 * the partially received packet.
501 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
502 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
503 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
505 #endif /* EFSYS_OPT_RX_SCATTER */
508 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
509 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
510 EFSYS_PROBE(crc_err);
511 (*flagsp) &= ~EFX_ADDR_MISMATCH;
512 (*flagsp) |= EFX_DISCARD;
515 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
516 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
517 EFSYS_PROBE(pause_frm_err);
518 (*flagsp) &= ~EFX_ADDR_MISMATCH;
519 (*flagsp) |= EFX_DISCARD;
522 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
523 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
524 EFSYS_PROBE(owner_id_err);
525 (*flagsp) |= EFX_DISCARD;
528 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
529 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
530 EFSYS_PROBE(ipv4_err);
531 (*flagsp) &= ~EFX_CKSUM_IPV4;
534 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
535 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
536 EFSYS_PROBE(udp_chk_err);
537 (*flagsp) &= ~EFX_CKSUM_TCPUDP;
540 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
541 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
544 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
545 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
548 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
554 static __checkReturn boolean_t
557 __in efx_qword_t *eqp,
558 __in const efx_ev_callbacks_t *eecp,
565 #if EFSYS_OPT_RX_SCATTER
567 boolean_t jumbo_cont;
568 #endif /* EFSYS_OPT_RX_SCATTER */
573 boolean_t should_abort;
575 EFX_EV_QSTAT_INCR(eep, EV_RX);
577 /* Basic packet information */
578 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
579 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
580 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
581 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
583 #if EFSYS_OPT_RX_SCATTER
584 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
585 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
586 #endif /* EFSYS_OPT_RX_SCATTER */
588 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
590 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
593 * If packet is marked as OK and packet type is TCP/IP or
594 * UDP/IP or other IP, then we can rely on the hardware checksums.
597 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
598 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
600 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
601 flags |= EFX_PKT_IPV6;
603 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
604 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
608 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
609 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
611 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
612 flags |= EFX_PKT_IPV6;
614 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
615 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
619 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
621 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
622 flags = EFX_PKT_IPV6;
624 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
625 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
629 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
630 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
635 EFSYS_ASSERT(B_FALSE);
640 #if EFSYS_OPT_RX_SCATTER
641 /* Report scatter and header/lookahead split buffer flags */
643 flags |= EFX_PKT_START;
645 flags |= EFX_PKT_CONT;
646 #endif /* EFSYS_OPT_RX_SCATTER */
648 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
650 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
652 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
653 uint32_t, size, uint16_t, flags);
659 /* If we're not discarding the packet then it is ok */
660 if (~flags & EFX_DISCARD)
661 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
663 /* Detect multicast packets that didn't match the filter */
664 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
665 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
667 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
668 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
670 EFSYS_PROBE(mcast_mismatch);
671 flags |= EFX_ADDR_MISMATCH;
674 flags |= EFX_PKT_UNICAST;
678 * The packet parser in Siena can abort parsing packets under
679 * certain error conditions, setting the PKT_NOT_PARSED bit
680 * (which clears PKT_OK). If this is set, then don't trust
681 * the PKT_TYPE field.
686 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
688 flags |= EFX_CHECK_VLAN;
691 if (~flags & EFX_CHECK_VLAN) {
694 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
695 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
696 flags |= EFX_PKT_VLAN_TAGGED;
699 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
700 uint32_t, size, uint16_t, flags);
702 EFSYS_ASSERT(eecp->eec_rx != NULL);
703 should_abort = eecp->eec_rx(arg, label, id, size, flags);
705 return (should_abort);
708 static __checkReturn boolean_t
711 __in efx_qword_t *eqp,
712 __in const efx_ev_callbacks_t *eecp,
717 boolean_t should_abort;
719 EFX_EV_QSTAT_INCR(eep, EV_TX);
721 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
722 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
723 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
724 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
726 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
727 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
729 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
731 EFSYS_ASSERT(eecp->eec_tx != NULL);
732 should_abort = eecp->eec_tx(arg, label, id);
734 return (should_abort);
737 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
738 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
739 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
740 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
742 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
743 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
745 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
746 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
748 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
749 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
751 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
755 static __checkReturn boolean_t
758 __in efx_qword_t *eqp,
759 __in const efx_ev_callbacks_t *eecp,
762 _NOTE(ARGUNUSED(eqp, eecp, arg))
764 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
769 static __checkReturn boolean_t
772 __in efx_qword_t *eqp,
773 __in const efx_ev_callbacks_t *eecp,
776 boolean_t should_abort;
778 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
779 should_abort = B_FALSE;
781 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
782 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
785 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
787 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
789 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
791 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
792 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
796 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
800 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
801 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
803 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
804 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
807 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
809 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
811 should_abort = eecp->eec_rxq_flush_failed(arg,
814 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
816 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
818 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
823 case FSE_AZ_EVQ_INIT_DONE_EV:
824 EFSYS_ASSERT(eecp->eec_initialized != NULL);
825 should_abort = eecp->eec_initialized(arg);
829 case FSE_AZ_EVQ_NOT_EN_EV:
830 EFSYS_PROBE(evq_not_en);
833 case FSE_AZ_SRM_UPD_DONE_EV: {
836 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
838 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
840 EFSYS_ASSERT(eecp->eec_sram != NULL);
841 should_abort = eecp->eec_sram(arg, code);
845 case FSE_AZ_WAKE_UP_EV: {
848 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
850 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
851 should_abort = eecp->eec_wake_up(arg, id);
855 case FSE_AZ_TX_PKT_NON_TCP_UDP:
856 EFSYS_PROBE(tx_pkt_non_tcp_udp);
859 case FSE_AZ_TIMER_EV: {
862 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
864 EFSYS_ASSERT(eecp->eec_timer != NULL);
865 should_abort = eecp->eec_timer(arg, id);
869 case FSE_AZ_RX_DSC_ERROR_EV:
870 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
872 EFSYS_PROBE(rx_dsc_error);
874 EFSYS_ASSERT(eecp->eec_exception != NULL);
875 should_abort = eecp->eec_exception(arg,
876 EFX_EXCEPTION_RX_DSC_ERROR, 0);
880 case FSE_AZ_TX_DSC_ERROR_EV:
881 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
883 EFSYS_PROBE(tx_dsc_error);
885 EFSYS_ASSERT(eecp->eec_exception != NULL);
886 should_abort = eecp->eec_exception(arg,
887 EFX_EXCEPTION_TX_DSC_ERROR, 0);
895 return (should_abort);
898 static __checkReturn boolean_t
901 __in efx_qword_t *eqp,
902 __in const efx_ev_callbacks_t *eecp,
906 boolean_t should_abort;
908 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
910 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
911 if (data >= ((uint32_t)1 << 16)) {
912 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
913 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
914 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
918 EFSYS_ASSERT(eecp->eec_software != NULL);
919 should_abort = eecp->eec_software(arg, (uint16_t)data);
921 return (should_abort);
926 static __checkReturn boolean_t
929 __in efx_qword_t *eqp,
930 __in const efx_ev_callbacks_t *eecp,
933 efx_nic_t *enp = eep->ee_enp;
935 boolean_t should_abort = B_FALSE;
937 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
939 if (enp->en_family != EFX_FAMILY_SIENA)
942 EFSYS_ASSERT(eecp->eec_link_change != NULL);
943 EFSYS_ASSERT(eecp->eec_exception != NULL);
944 #if EFSYS_OPT_MON_STATS
945 EFSYS_ASSERT(eecp->eec_monitor != NULL);
948 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
950 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
952 case MCDI_EVENT_CODE_BADSSERT:
953 efx_mcdi_ev_death(enp, EINTR);
956 case MCDI_EVENT_CODE_CMDDONE:
958 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
959 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
960 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
963 case MCDI_EVENT_CODE_LINKCHANGE: {
964 efx_link_mode_t link_mode;
966 siena_phy_link_ev(enp, eqp, &link_mode);
967 should_abort = eecp->eec_link_change(arg, link_mode);
970 case MCDI_EVENT_CODE_SENSOREVT: {
971 #if EFSYS_OPT_MON_STATS
973 efx_mon_stat_value_t value;
976 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
977 should_abort = eecp->eec_monitor(arg, id, value);
978 else if (rc == ENOTSUP) {
979 should_abort = eecp->eec_exception(arg,
980 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
981 MCDI_EV_FIELD(eqp, DATA));
983 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
985 should_abort = B_FALSE;
989 case MCDI_EVENT_CODE_SCHEDERR:
990 /* Informational only */
993 case MCDI_EVENT_CODE_REBOOT:
994 efx_mcdi_ev_death(enp, EIO);
997 case MCDI_EVENT_CODE_MAC_STATS_DMA:
998 #if EFSYS_OPT_MAC_STATS
999 if (eecp->eec_mac_stats != NULL) {
1000 eecp->eec_mac_stats(arg,
1001 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1006 case MCDI_EVENT_CODE_FWALERT: {
1007 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1009 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1010 should_abort = eecp->eec_exception(arg,
1011 EFX_EXCEPTION_FWALERT_SRAM,
1012 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1014 should_abort = eecp->eec_exception(arg,
1015 EFX_EXCEPTION_UNKNOWN_FWALERT,
1016 MCDI_EV_FIELD(eqp, DATA));
1021 EFSYS_PROBE1(mc_pcol_error, int, code);
1026 return (should_abort);
1029 #endif /* EFSYS_OPT_MCDI */
1031 static __checkReturn efx_rc_t
1033 __in efx_evq_t *eep,
1034 __in unsigned int count)
1036 efx_nic_t *enp = eep->ee_enp;
1040 rptr = count & eep->ee_mask;
1042 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
1044 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
1050 #define EFX_EV_BATCH 8
1054 __in efx_evq_t *eep,
1055 __inout unsigned int *countp,
1056 __in const efx_ev_callbacks_t *eecp,
1059 efx_qword_t ev[EFX_EV_BATCH];
1066 EFSYS_ASSERT(countp != NULL);
1067 EFSYS_ASSERT(eecp != NULL);
1071 /* Read up until the end of the batch period */
1072 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
1073 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1074 for (total = 0; total < batch; ++total) {
1075 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
1077 if (!EFX_EV_PRESENT(ev[total]))
1080 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
1081 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
1082 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
1084 offset += sizeof (efx_qword_t);
1087 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
1089 * Prefetch the next batch when we get within PREFETCH_PERIOD
1090 * of a completed batch. If the batch is smaller, then prefetch
1093 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
1094 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1095 #endif /* EFSYS_OPT_EV_PREFETCH */
1097 /* Process the batch of events */
1098 for (index = 0; index < total; ++index) {
1099 boolean_t should_abort;
1102 #if EFSYS_OPT_EV_PREFETCH
1103 /* Prefetch if we've now reached the batch period */
1104 if (total == batch &&
1105 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
1106 offset = (count + batch) & eep->ee_mask;
1107 offset *= sizeof (efx_qword_t);
1109 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
1111 #endif /* EFSYS_OPT_EV_PREFETCH */
1113 EFX_EV_QSTAT_INCR(eep, EV_ALL);
1115 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
1117 case FSE_AZ_EV_CODE_RX_EV:
1118 should_abort = eep->ee_rx(eep,
1119 &(ev[index]), eecp, arg);
1121 case FSE_AZ_EV_CODE_TX_EV:
1122 should_abort = eep->ee_tx(eep,
1123 &(ev[index]), eecp, arg);
1125 case FSE_AZ_EV_CODE_DRIVER_EV:
1126 should_abort = eep->ee_driver(eep,
1127 &(ev[index]), eecp, arg);
1129 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1130 should_abort = eep->ee_drv_gen(eep,
1131 &(ev[index]), eecp, arg);
1134 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
1135 should_abort = eep->ee_mcdi(eep,
1136 &(ev[index]), eecp, arg);
1139 case FSE_AZ_EV_CODE_GLOBAL_EV:
1140 if (eep->ee_global) {
1141 should_abort = eep->ee_global(eep,
1142 &(ev[index]), eecp, arg);
1145 /* else fallthrough */
1147 EFSYS_PROBE3(bad_event,
1148 unsigned int, eep->ee_index,
1150 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
1152 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
1154 EFSYS_ASSERT(eecp->eec_exception != NULL);
1155 (void) eecp->eec_exception(arg,
1156 EFX_EXCEPTION_EV_ERROR, code);
1157 should_abort = B_TRUE;
1160 /* Ignore subsequent events */
1167 * Now that the hardware has most likely moved onto dma'ing
1168 * into the next cache line, clear the processed events. Take
1169 * care to only clear out events that we've processed
1171 EFX_SET_QWORD(ev[0]);
1172 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
1173 for (index = 0; index < total; ++index) {
1174 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
1175 offset += sizeof (efx_qword_t);
1180 } while (total == batch);
1187 __in efx_evq_t *eep,
1190 efx_nic_t *enp = eep->ee_enp;
1194 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
1195 FSF_AZ_EV_DATA_DW0, (uint32_t)data);
1197 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
1198 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
1199 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
1201 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
1204 static __checkReturn efx_rc_t
1206 __in efx_evq_t *eep,
1207 __in unsigned int us)
1209 efx_nic_t *enp = eep->ee_enp;
1210 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1211 unsigned int locked;
1215 if (us > encp->enc_evq_timer_max_us) {
1220 /* If the value is zero then disable the timer */
1222 EFX_POPULATE_DWORD_2(dword,
1223 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
1224 FRF_CZ_TC_TIMER_VAL, 0);
1228 /* Calculate the timer value in quanta */
1229 timer_val = us * 1000 / encp->enc_evq_timer_quantum_ns;
1231 /* Moderation value is base 0 so we need to deduct 1 */
1235 EFX_POPULATE_DWORD_2(dword,
1236 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
1237 FRF_CZ_TC_TIMER_VAL, timer_val);
1240 locked = (eep->ee_index == 0) ? 1 : 0;
1242 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
1243 eep->ee_index, &dword, locked);
1248 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1253 static __checkReturn efx_rc_t
1255 __in efx_nic_t *enp,
1256 __in unsigned int index,
1257 __in efsys_mem_t *esmp,
1260 __in efx_evq_t *eep)
1262 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1267 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
1268 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
1270 if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
1274 if (index >= encp->enc_evq_limit) {
1278 #if EFSYS_OPT_RX_SCALE
1279 if (enp->en_intr.ei_type == EFX_INTR_LINE &&
1280 index >= EFX_MAXRSS_LEGACY) {
1285 for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
1287 if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS))
1289 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1294 /* Set up the handler table */
1295 eep->ee_rx = siena_ev_rx;
1296 eep->ee_tx = siena_ev_tx;
1297 eep->ee_driver = siena_ev_driver;
1298 eep->ee_global = siena_ev_global;
1299 eep->ee_drv_gen = siena_ev_drv_gen;
1301 eep->ee_mcdi = siena_ev_mcdi;
1302 #endif /* EFSYS_OPT_MCDI */
1304 /* Set up the new event queue */
1305 EFX_POPULATE_OWORD_1(oword, FRF_CZ_TIMER_Q_EN, 1);
1306 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
1308 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
1309 FRF_AZ_EVQ_BUF_BASE_ID, id);
1311 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
1317 #if EFSYS_OPT_RX_SCALE
1324 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1329 #endif /* EFSYS_OPT_SIENA */
1331 #if EFSYS_OPT_QSTATS
1333 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock b693ddf85aee1bfd */
1334 static const char *__efx_ev_qstat_name[] = {
1341 "rx_buf_owner_id_err",
1342 "rx_ipv4_hdr_chksum_err",
1343 "rx_tcp_udp_chksum_err",
1347 "rx_mcast_hash_match",
1364 "driver_srm_upd_done",
1365 "driver_tx_descq_fls_done",
1366 "driver_rx_descq_fls_done",
1367 "driver_rx_descq_fls_failed",
1368 "driver_rx_dsc_error",
1369 "driver_tx_dsc_error",
1373 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
1377 __in efx_nic_t *enp,
1378 __in unsigned int id)
1380 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1381 EFSYS_ASSERT3U(id, <, EV_NQSTATS);
1383 return (__efx_ev_qstat_name[id]);
1385 #endif /* EFSYS_OPT_NAMES */
1386 #endif /* EFSYS_OPT_QSTATS */
1390 #if EFSYS_OPT_QSTATS
1392 siena_ev_qstats_update(
1393 __in efx_evq_t *eep,
1394 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
1398 for (id = 0; id < EV_NQSTATS; id++) {
1399 efsys_stat_t *essp = &stat[id];
1401 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
1402 eep->ee_stat[id] = 0;
1405 #endif /* EFSYS_OPT_QSTATS */
1409 __in efx_evq_t *eep)
1411 efx_nic_t *enp = eep->ee_enp;
1414 /* Purge event queue */
1415 EFX_ZERO_OWORD(oword);
1417 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
1418 eep->ee_index, &oword, B_TRUE);
1420 EFX_ZERO_OWORD(oword);
1421 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
1426 __in efx_nic_t *enp)
1428 _NOTE(ARGUNUSED(enp))
1431 #endif /* EFSYS_OPT_SIENA */