2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2007-2016 Solarflare Communications Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
26 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * The views and conclusions contained in the software and documentation are
29 * those of the authors and should not be interpreted as representing official
30 * policies, either expressed or implied, of the FreeBSD Project.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
38 #if EFSYS_OPT_MON_MCDI
43 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
45 (_eep)->ee_stat[_stat]++; \
46 _NOTE(CONSTANTCONDITION) \
49 #define EFX_EV_QSTAT_INCR(_eep, _stat)
52 #define EFX_EV_PRESENT(_qword) \
53 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
54 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
58 static __checkReturn efx_rc_t
66 static __checkReturn efx_rc_t
69 __in unsigned int index,
70 __in efsys_mem_t *esmp,
81 static __checkReturn efx_rc_t
84 __in unsigned int count);
91 static __checkReturn efx_rc_t
94 __in unsigned int us);
98 siena_ev_qstats_update(
100 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
104 #endif /* EFSYS_OPT_SIENA */
107 static const efx_ev_ops_t __efx_ev_siena_ops = {
108 siena_ev_init, /* eevo_init */
109 siena_ev_fini, /* eevo_fini */
110 siena_ev_qcreate, /* eevo_qcreate */
111 siena_ev_qdestroy, /* eevo_qdestroy */
112 siena_ev_qprime, /* eevo_qprime */
113 siena_ev_qpost, /* eevo_qpost */
114 siena_ev_qmoderate, /* eevo_qmoderate */
116 siena_ev_qstats_update, /* eevo_qstats_update */
119 #endif /* EFSYS_OPT_SIENA */
121 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
122 static const efx_ev_ops_t __efx_ev_ef10_ops = {
123 ef10_ev_init, /* eevo_init */
124 ef10_ev_fini, /* eevo_fini */
125 ef10_ev_qcreate, /* eevo_qcreate */
126 ef10_ev_qdestroy, /* eevo_qdestroy */
127 ef10_ev_qprime, /* eevo_qprime */
128 ef10_ev_qpost, /* eevo_qpost */
129 ef10_ev_qmoderate, /* eevo_qmoderate */
131 ef10_ev_qstats_update, /* eevo_qstats_update */
134 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
136 __checkReturn efx_rc_t
140 const efx_ev_ops_t *eevop;
143 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
144 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
146 if (enp->en_mod_flags & EFX_MOD_EV) {
151 switch (enp->en_family) {
153 case EFX_FAMILY_SIENA:
154 eevop = &__efx_ev_siena_ops;
156 #endif /* EFSYS_OPT_SIENA */
158 #if EFSYS_OPT_HUNTINGTON
159 case EFX_FAMILY_HUNTINGTON:
160 eevop = &__efx_ev_ef10_ops;
162 #endif /* EFSYS_OPT_HUNTINGTON */
164 #if EFSYS_OPT_MEDFORD
165 case EFX_FAMILY_MEDFORD:
166 eevop = &__efx_ev_ef10_ops;
168 #endif /* EFSYS_OPT_MEDFORD */
170 #if EFSYS_OPT_MEDFORD2
171 case EFX_FAMILY_MEDFORD2:
172 eevop = &__efx_ev_ef10_ops;
174 #endif /* EFSYS_OPT_MEDFORD2 */
182 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
184 if ((rc = eevop->eevo_init(enp)) != 0)
187 enp->en_eevop = eevop;
188 enp->en_mod_flags |= EFX_MOD_EV;
195 EFSYS_PROBE1(fail1, efx_rc_t, rc);
197 enp->en_eevop = NULL;
198 enp->en_mod_flags &= ~EFX_MOD_EV;
206 const efx_ev_ops_t *eevop = enp->en_eevop;
208 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
209 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
210 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
211 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
212 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
213 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
215 eevop->eevo_fini(enp);
217 enp->en_eevop = NULL;
218 enp->en_mod_flags &= ~EFX_MOD_EV;
221 __checkReturn efx_rc_t
224 __in unsigned int index,
225 __in efsys_mem_t *esmp,
230 __deref_out efx_evq_t **eepp)
232 const efx_ev_ops_t *eevop = enp->en_eevop;
236 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
237 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
239 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <,
240 enp->en_nic_cfg.enc_evq_limit);
242 switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
243 case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
245 case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
256 /* Allocate an EVQ object */
257 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
263 eep->ee_magic = EFX_EVQ_MAGIC;
265 eep->ee_index = index;
266 eep->ee_mask = ndescs - 1;
267 eep->ee_flags = flags;
271 * Set outputs before the queue is created because interrupts may be
272 * raised for events immediately after the queue is created, before the
273 * function call below returns. See bug58606.
275 * The eepp pointer passed in by the client must therefore point to data
276 * shared with the client's event processing context.
281 if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags,
292 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
298 EFSYS_PROBE1(fail1, efx_rc_t, rc);
306 efx_nic_t *enp = eep->ee_enp;
307 const efx_ev_ops_t *eevop = enp->en_eevop;
309 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
311 EFSYS_ASSERT(enp->en_ev_qcount != 0);
314 eevop->eevo_qdestroy(eep);
316 /* Free the EVQ object */
317 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
320 __checkReturn efx_rc_t
323 __in unsigned int count)
325 efx_nic_t *enp = eep->ee_enp;
326 const efx_ev_ops_t *eevop = enp->en_eevop;
329 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
331 if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
336 if ((rc = eevop->eevo_qprime(eep, count)) != 0)
344 EFSYS_PROBE1(fail1, efx_rc_t, rc);
348 __checkReturn boolean_t
351 __in unsigned int count)
356 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
358 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
359 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
361 return (EFX_EV_PRESENT(qword));
364 #if EFSYS_OPT_EV_PREFETCH
369 __in unsigned int count)
373 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
375 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
376 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
379 #endif /* EFSYS_OPT_EV_PREFETCH */
381 #define EFX_EV_BATCH 8
386 __inout unsigned int *countp,
387 __in const efx_ev_callbacks_t *eecp,
390 efx_qword_t ev[EFX_EV_BATCH];
397 /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
398 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
399 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
401 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
402 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
403 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
404 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
405 FSE_AZ_EV_CODE_DRV_GEN_EV);
407 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
408 FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
411 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
412 EFSYS_ASSERT(countp != NULL);
413 EFSYS_ASSERT(eecp != NULL);
417 /* Read up until the end of the batch period */
418 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
419 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
420 for (total = 0; total < batch; ++total) {
421 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
423 if (!EFX_EV_PRESENT(ev[total]))
426 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
427 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
428 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
430 offset += sizeof (efx_qword_t);
433 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
435 * Prefetch the next batch when we get within PREFETCH_PERIOD
436 * of a completed batch. If the batch is smaller, then prefetch
439 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
440 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
441 #endif /* EFSYS_OPT_EV_PREFETCH */
443 /* Process the batch of events */
444 for (index = 0; index < total; ++index) {
445 boolean_t should_abort;
448 #if EFSYS_OPT_EV_PREFETCH
449 /* Prefetch if we've now reached the batch period */
450 if (total == batch &&
451 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
452 offset = (count + batch) & eep->ee_mask;
453 offset *= sizeof (efx_qword_t);
455 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
457 #endif /* EFSYS_OPT_EV_PREFETCH */
459 EFX_EV_QSTAT_INCR(eep, EV_ALL);
461 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
463 case FSE_AZ_EV_CODE_RX_EV:
464 should_abort = eep->ee_rx(eep,
465 &(ev[index]), eecp, arg);
467 case FSE_AZ_EV_CODE_TX_EV:
468 should_abort = eep->ee_tx(eep,
469 &(ev[index]), eecp, arg);
471 case FSE_AZ_EV_CODE_DRIVER_EV:
472 should_abort = eep->ee_driver(eep,
473 &(ev[index]), eecp, arg);
475 case FSE_AZ_EV_CODE_DRV_GEN_EV:
476 should_abort = eep->ee_drv_gen(eep,
477 &(ev[index]), eecp, arg);
480 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
481 should_abort = eep->ee_mcdi(eep,
482 &(ev[index]), eecp, arg);
485 case FSE_AZ_EV_CODE_GLOBAL_EV:
486 if (eep->ee_global) {
487 should_abort = eep->ee_global(eep,
488 &(ev[index]), eecp, arg);
491 /* else fallthrough */
493 EFSYS_PROBE3(bad_event,
494 unsigned int, eep->ee_index,
496 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
498 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
500 EFSYS_ASSERT(eecp->eec_exception != NULL);
501 (void) eecp->eec_exception(arg,
502 EFX_EXCEPTION_EV_ERROR, code);
503 should_abort = B_TRUE;
506 /* Ignore subsequent events */
510 * Poison batch to ensure the outer
511 * loop is broken out of.
513 EFSYS_ASSERT(batch <= EFX_EV_BATCH);
514 batch += (EFX_EV_BATCH << 1);
515 EFSYS_ASSERT(total != batch);
521 * Now that the hardware has most likely moved onto dma'ing
522 * into the next cache line, clear the processed events. Take
523 * care to only clear out events that we've processed
525 EFX_SET_QWORD(ev[0]);
526 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
527 for (index = 0; index < total; ++index) {
528 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
529 offset += sizeof (efx_qword_t);
534 } while (total == batch);
544 efx_nic_t *enp = eep->ee_enp;
545 const efx_ev_ops_t *eevop = enp->en_eevop;
547 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
549 EFSYS_ASSERT(eevop != NULL &&
550 eevop->eevo_qpost != NULL);
552 eevop->eevo_qpost(eep, data);
555 __checkReturn efx_rc_t
556 efx_ev_usecs_to_ticks(
558 __in unsigned int us,
559 __out unsigned int *ticksp)
561 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
564 /* Convert microseconds to a timer tick count */
567 else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
568 ticks = 1; /* Never round down to zero */
570 ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
576 __checkReturn efx_rc_t
579 __in unsigned int us)
581 efx_nic_t *enp = eep->ee_enp;
582 const efx_ev_ops_t *eevop = enp->en_eevop;
585 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
587 if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
588 EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
593 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
601 EFSYS_PROBE1(fail1, efx_rc_t, rc);
607 efx_ev_qstats_update(
609 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
611 { efx_nic_t *enp = eep->ee_enp;
612 const efx_ev_ops_t *eevop = enp->en_eevop;
614 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
616 eevop->eevo_qstats_update(eep, stat);
619 #endif /* EFSYS_OPT_QSTATS */
623 static __checkReturn efx_rc_t
630 * Program the event queue for receive and transmit queue
633 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
634 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
635 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
641 static __checkReturn boolean_t
644 __in efx_qword_t *eqp,
647 __inout uint16_t *flagsp)
649 boolean_t ignore = B_FALSE;
651 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
652 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
653 EFSYS_PROBE(tobe_disc);
655 * Assume this is a unicast address mismatch, unless below
656 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
657 * EV_RX_PAUSE_FRM_ERR is set.
659 (*flagsp) |= EFX_ADDR_MISMATCH;
662 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
663 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
664 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
665 (*flagsp) |= EFX_DISCARD;
667 #if EFSYS_OPT_RX_SCATTER
669 * Lookout for payload queue ran dry errors and ignore them.
671 * Sadly for the header/data split cases, the descriptor
672 * pointer in this event refers to the header queue and
673 * therefore cannot be easily detected as duplicate.
674 * So we drop these and rely on the receive processing seeing
675 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
676 * the partially received packet.
678 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
679 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
680 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
682 #endif /* EFSYS_OPT_RX_SCATTER */
685 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
686 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
687 EFSYS_PROBE(crc_err);
688 (*flagsp) &= ~EFX_ADDR_MISMATCH;
689 (*flagsp) |= EFX_DISCARD;
692 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
693 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
694 EFSYS_PROBE(pause_frm_err);
695 (*flagsp) &= ~EFX_ADDR_MISMATCH;
696 (*flagsp) |= EFX_DISCARD;
699 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
700 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
701 EFSYS_PROBE(owner_id_err);
702 (*flagsp) |= EFX_DISCARD;
705 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
706 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
707 EFSYS_PROBE(ipv4_err);
708 (*flagsp) &= ~EFX_CKSUM_IPV4;
711 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
712 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
713 EFSYS_PROBE(udp_chk_err);
714 (*flagsp) &= ~EFX_CKSUM_TCPUDP;
717 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
718 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
721 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
722 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
725 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
731 static __checkReturn boolean_t
734 __in efx_qword_t *eqp,
735 __in const efx_ev_callbacks_t *eecp,
742 #if EFSYS_OPT_RX_SCATTER
744 boolean_t jumbo_cont;
745 #endif /* EFSYS_OPT_RX_SCATTER */
750 boolean_t should_abort;
752 EFX_EV_QSTAT_INCR(eep, EV_RX);
754 /* Basic packet information */
755 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
756 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
757 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
758 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
760 #if EFSYS_OPT_RX_SCATTER
761 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
762 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
763 #endif /* EFSYS_OPT_RX_SCATTER */
765 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
767 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
770 * If packet is marked as OK and packet type is TCP/IP or
771 * UDP/IP or other IP, then we can rely on the hardware checksums.
774 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
775 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
777 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
778 flags |= EFX_PKT_IPV6;
780 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
781 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
785 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
786 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
788 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
789 flags |= EFX_PKT_IPV6;
791 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
792 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
796 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
798 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
799 flags = EFX_PKT_IPV6;
801 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
802 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
806 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
807 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
812 EFSYS_ASSERT(B_FALSE);
817 #if EFSYS_OPT_RX_SCATTER
818 /* Report scatter and header/lookahead split buffer flags */
820 flags |= EFX_PKT_START;
822 flags |= EFX_PKT_CONT;
823 #endif /* EFSYS_OPT_RX_SCATTER */
825 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
827 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
829 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
830 uint32_t, size, uint16_t, flags);
836 /* If we're not discarding the packet then it is ok */
837 if (~flags & EFX_DISCARD)
838 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
840 /* Detect multicast packets that didn't match the filter */
841 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
842 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
844 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
845 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
847 EFSYS_PROBE(mcast_mismatch);
848 flags |= EFX_ADDR_MISMATCH;
851 flags |= EFX_PKT_UNICAST;
855 * The packet parser in Siena can abort parsing packets under
856 * certain error conditions, setting the PKT_NOT_PARSED bit
857 * (which clears PKT_OK). If this is set, then don't trust
858 * the PKT_TYPE field.
863 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
865 flags |= EFX_CHECK_VLAN;
868 if (~flags & EFX_CHECK_VLAN) {
871 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
872 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
873 flags |= EFX_PKT_VLAN_TAGGED;
876 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
877 uint32_t, size, uint16_t, flags);
879 EFSYS_ASSERT(eecp->eec_rx != NULL);
880 should_abort = eecp->eec_rx(arg, label, id, size, flags);
882 return (should_abort);
885 static __checkReturn boolean_t
888 __in efx_qword_t *eqp,
889 __in const efx_ev_callbacks_t *eecp,
894 boolean_t should_abort;
896 EFX_EV_QSTAT_INCR(eep, EV_TX);
898 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
899 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
900 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
901 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
902 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
903 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
905 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
907 EFSYS_ASSERT(eecp->eec_tx != NULL);
908 should_abort = eecp->eec_tx(arg, label, id);
910 return (should_abort);
913 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
914 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
915 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
916 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
918 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
919 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
921 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
922 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
924 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
925 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
927 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
931 static __checkReturn boolean_t
934 __in efx_qword_t *eqp,
935 __in const efx_ev_callbacks_t *eecp,
938 _NOTE(ARGUNUSED(eqp, eecp, arg))
940 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
945 static __checkReturn boolean_t
948 __in efx_qword_t *eqp,
949 __in const efx_ev_callbacks_t *eecp,
952 boolean_t should_abort;
954 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
955 should_abort = B_FALSE;
957 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
958 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
961 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
963 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
965 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
967 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
968 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
972 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
976 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
977 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
979 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
980 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
983 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
985 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
987 should_abort = eecp->eec_rxq_flush_failed(arg,
990 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
992 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
994 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
999 case FSE_AZ_EVQ_INIT_DONE_EV:
1000 EFSYS_ASSERT(eecp->eec_initialized != NULL);
1001 should_abort = eecp->eec_initialized(arg);
1005 case FSE_AZ_EVQ_NOT_EN_EV:
1006 EFSYS_PROBE(evq_not_en);
1009 case FSE_AZ_SRM_UPD_DONE_EV: {
1012 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
1014 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1016 EFSYS_ASSERT(eecp->eec_sram != NULL);
1017 should_abort = eecp->eec_sram(arg, code);
1021 case FSE_AZ_WAKE_UP_EV: {
1024 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1026 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1027 should_abort = eecp->eec_wake_up(arg, id);
1031 case FSE_AZ_TX_PKT_NON_TCP_UDP:
1032 EFSYS_PROBE(tx_pkt_non_tcp_udp);
1035 case FSE_AZ_TIMER_EV: {
1038 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1040 EFSYS_ASSERT(eecp->eec_timer != NULL);
1041 should_abort = eecp->eec_timer(arg, id);
1045 case FSE_AZ_RX_DSC_ERROR_EV:
1046 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
1048 EFSYS_PROBE(rx_dsc_error);
1050 EFSYS_ASSERT(eecp->eec_exception != NULL);
1051 should_abort = eecp->eec_exception(arg,
1052 EFX_EXCEPTION_RX_DSC_ERROR, 0);
1056 case FSE_AZ_TX_DSC_ERROR_EV:
1057 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
1059 EFSYS_PROBE(tx_dsc_error);
1061 EFSYS_ASSERT(eecp->eec_exception != NULL);
1062 should_abort = eecp->eec_exception(arg,
1063 EFX_EXCEPTION_TX_DSC_ERROR, 0);
1071 return (should_abort);
1074 static __checkReturn boolean_t
1076 __in efx_evq_t *eep,
1077 __in efx_qword_t *eqp,
1078 __in const efx_ev_callbacks_t *eecp,
1082 boolean_t should_abort;
1084 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1086 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
1087 if (data >= ((uint32_t)1 << 16)) {
1088 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1089 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1090 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1094 EFSYS_ASSERT(eecp->eec_software != NULL);
1095 should_abort = eecp->eec_software(arg, (uint16_t)data);
1097 return (should_abort);
1102 static __checkReturn boolean_t
1104 __in efx_evq_t *eep,
1105 __in efx_qword_t *eqp,
1106 __in const efx_ev_callbacks_t *eecp,
1109 efx_nic_t *enp = eep->ee_enp;
1111 boolean_t should_abort = B_FALSE;
1113 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
1115 if (enp->en_family != EFX_FAMILY_SIENA)
1118 EFSYS_ASSERT(eecp->eec_link_change != NULL);
1119 EFSYS_ASSERT(eecp->eec_exception != NULL);
1120 #if EFSYS_OPT_MON_STATS
1121 EFSYS_ASSERT(eecp->eec_monitor != NULL);
1124 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1126 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1128 case MCDI_EVENT_CODE_BADSSERT:
1129 efx_mcdi_ev_death(enp, EINTR);
1132 case MCDI_EVENT_CODE_CMDDONE:
1133 efx_mcdi_ev_cpl(enp,
1134 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1135 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1136 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1139 case MCDI_EVENT_CODE_LINKCHANGE: {
1140 efx_link_mode_t link_mode;
1142 siena_phy_link_ev(enp, eqp, &link_mode);
1143 should_abort = eecp->eec_link_change(arg, link_mode);
1146 case MCDI_EVENT_CODE_SENSOREVT: {
1147 #if EFSYS_OPT_MON_STATS
1149 efx_mon_stat_value_t value;
1152 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
1153 should_abort = eecp->eec_monitor(arg, id, value);
1154 else if (rc == ENOTSUP) {
1155 should_abort = eecp->eec_exception(arg,
1156 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1157 MCDI_EV_FIELD(eqp, DATA));
1159 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1161 should_abort = B_FALSE;
1165 case MCDI_EVENT_CODE_SCHEDERR:
1166 /* Informational only */
1169 case MCDI_EVENT_CODE_REBOOT:
1170 efx_mcdi_ev_death(enp, EIO);
1173 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1174 #if EFSYS_OPT_MAC_STATS
1175 if (eecp->eec_mac_stats != NULL) {
1176 eecp->eec_mac_stats(arg,
1177 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1182 case MCDI_EVENT_CODE_FWALERT: {
1183 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1185 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1186 should_abort = eecp->eec_exception(arg,
1187 EFX_EXCEPTION_FWALERT_SRAM,
1188 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1190 should_abort = eecp->eec_exception(arg,
1191 EFX_EXCEPTION_UNKNOWN_FWALERT,
1192 MCDI_EV_FIELD(eqp, DATA));
1197 EFSYS_PROBE1(mc_pcol_error, int, code);
1202 return (should_abort);
1205 #endif /* EFSYS_OPT_MCDI */
1207 static __checkReturn efx_rc_t
1209 __in efx_evq_t *eep,
1210 __in unsigned int count)
1212 efx_nic_t *enp = eep->ee_enp;
1216 rptr = count & eep->ee_mask;
1218 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
1220 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
1228 __in efx_evq_t *eep,
1231 efx_nic_t *enp = eep->ee_enp;
1235 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
1236 FSF_AZ_EV_DATA_DW0, (uint32_t)data);
1238 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
1239 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
1240 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
1242 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
1245 static __checkReturn efx_rc_t
1247 __in efx_evq_t *eep,
1248 __in unsigned int us)
1250 efx_nic_t *enp = eep->ee_enp;
1251 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1252 unsigned int locked;
1256 if (us > encp->enc_evq_timer_max_us) {
1261 /* If the value is zero then disable the timer */
1263 EFX_POPULATE_DWORD_2(dword,
1264 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
1265 FRF_CZ_TC_TIMER_VAL, 0);
1269 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
1272 EFSYS_ASSERT(ticks > 0);
1273 EFX_POPULATE_DWORD_2(dword,
1274 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
1275 FRF_CZ_TC_TIMER_VAL, ticks - 1);
1278 locked = (eep->ee_index == 0) ? 1 : 0;
1280 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
1281 eep->ee_index, &dword, locked);
1288 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1293 static __checkReturn efx_rc_t
1295 __in efx_nic_t *enp,
1296 __in unsigned int index,
1297 __in efsys_mem_t *esmp,
1301 __in uint32_t flags,
1302 __in efx_evq_t *eep)
1304 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1308 boolean_t notify_mode;
1310 _NOTE(ARGUNUSED(esmp))
1312 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
1313 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
1315 if (!ISP2(ndescs) ||
1316 (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
1320 if (index >= encp->enc_evq_limit) {
1324 #if EFSYS_OPT_RX_SCALE
1325 if (enp->en_intr.ei_type == EFX_INTR_LINE &&
1326 index >= EFX_MAXRSS_LEGACY) {
1331 for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
1333 if ((1 << size) == (int)(ndescs / EFX_EVQ_MINNEVS))
1335 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1340 /* Set up the handler table */
1341 eep->ee_rx = siena_ev_rx;
1342 eep->ee_tx = siena_ev_tx;
1343 eep->ee_driver = siena_ev_driver;
1344 eep->ee_global = siena_ev_global;
1345 eep->ee_drv_gen = siena_ev_drv_gen;
1347 eep->ee_mcdi = siena_ev_mcdi;
1348 #endif /* EFSYS_OPT_MCDI */
1350 notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
1351 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
1353 /* Set up the new event queue */
1354 EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
1355 FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
1356 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1357 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
1359 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
1360 FRF_AZ_EVQ_BUF_BASE_ID, id);
1362 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
1364 /* Set initial interrupt moderation */
1365 siena_ev_qmoderate(eep, us);
1371 #if EFSYS_OPT_RX_SCALE
1378 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1383 #endif /* EFSYS_OPT_SIENA */
1385 #if EFSYS_OPT_QSTATS
1387 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock c0f3bc5083b40532 */
1388 static const char * const __efx_ev_qstat_name[] = {
1395 "rx_buf_owner_id_err",
1396 "rx_ipv4_hdr_chksum_err",
1397 "rx_tcp_udp_chksum_err",
1401 "rx_mcast_hash_match",
1418 "driver_srm_upd_done",
1419 "driver_tx_descq_fls_done",
1420 "driver_rx_descq_fls_done",
1421 "driver_rx_descq_fls_failed",
1422 "driver_rx_dsc_error",
1423 "driver_tx_dsc_error",
1427 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
1431 __in efx_nic_t *enp,
1432 __in unsigned int id)
1434 _NOTE(ARGUNUSED(enp))
1436 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1437 EFSYS_ASSERT3U(id, <, EV_NQSTATS);
1439 return (__efx_ev_qstat_name[id]);
1441 #endif /* EFSYS_OPT_NAMES */
1442 #endif /* EFSYS_OPT_QSTATS */
1446 #if EFSYS_OPT_QSTATS
1448 siena_ev_qstats_update(
1449 __in efx_evq_t *eep,
1450 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
1454 for (id = 0; id < EV_NQSTATS; id++) {
1455 efsys_stat_t *essp = &stat[id];
1457 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
1458 eep->ee_stat[id] = 0;
1461 #endif /* EFSYS_OPT_QSTATS */
1465 __in efx_evq_t *eep)
1467 efx_nic_t *enp = eep->ee_enp;
1470 /* Purge event queue */
1471 EFX_ZERO_OWORD(oword);
1473 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
1474 eep->ee_index, &oword, B_TRUE);
1476 EFX_ZERO_OWORD(oword);
1477 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
1482 __in efx_nic_t *enp)
1484 _NOTE(ARGUNUSED(enp))
1487 #endif /* EFSYS_OPT_SIENA */