2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2007-2016 Solarflare Communications Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
26 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * The views and conclusions contained in the software and documentation are
29 * those of the authors and should not be interpreted as representing official
30 * policies, either expressed or implied, of the FreeBSD Project.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
38 #if EFSYS_OPT_MON_MCDI
43 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
45 (_eep)->ee_stat[_stat]++; \
46 _NOTE(CONSTANTCONDITION) \
49 #define EFX_EV_QSTAT_INCR(_eep, _stat)
52 #define EFX_EV_PRESENT(_qword) \
53 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
54 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
60 static __checkReturn efx_rc_t
68 static __checkReturn efx_rc_t
71 __in unsigned int index,
72 __in efsys_mem_t *esmp,
83 static __checkReturn efx_rc_t
86 __in unsigned int count);
93 static __checkReturn efx_rc_t
96 __in unsigned int us);
100 siena_ev_qstats_update(
102 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
106 #endif /* EFSYS_OPT_SIENA */
109 static const efx_ev_ops_t __efx_ev_siena_ops = {
110 siena_ev_init, /* eevo_init */
111 siena_ev_fini, /* eevo_fini */
112 siena_ev_qcreate, /* eevo_qcreate */
113 siena_ev_qdestroy, /* eevo_qdestroy */
114 siena_ev_qprime, /* eevo_qprime */
115 siena_ev_qpost, /* eevo_qpost */
116 siena_ev_qmoderate, /* eevo_qmoderate */
118 siena_ev_qstats_update, /* eevo_qstats_update */
121 #endif /* EFSYS_OPT_SIENA */
123 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
124 static const efx_ev_ops_t __efx_ev_ef10_ops = {
125 ef10_ev_init, /* eevo_init */
126 ef10_ev_fini, /* eevo_fini */
127 ef10_ev_qcreate, /* eevo_qcreate */
128 ef10_ev_qdestroy, /* eevo_qdestroy */
129 ef10_ev_qprime, /* eevo_qprime */
130 ef10_ev_qpost, /* eevo_qpost */
131 ef10_ev_qmoderate, /* eevo_qmoderate */
133 ef10_ev_qstats_update, /* eevo_qstats_update */
136 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
139 __checkReturn efx_rc_t
143 const efx_ev_ops_t *eevop;
146 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
147 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
149 if (enp->en_mod_flags & EFX_MOD_EV) {
154 switch (enp->en_family) {
156 case EFX_FAMILY_SIENA:
157 eevop = &__efx_ev_siena_ops;
159 #endif /* EFSYS_OPT_SIENA */
161 #if EFSYS_OPT_HUNTINGTON
162 case EFX_FAMILY_HUNTINGTON:
163 eevop = &__efx_ev_ef10_ops;
165 #endif /* EFSYS_OPT_HUNTINGTON */
167 #if EFSYS_OPT_MEDFORD
168 case EFX_FAMILY_MEDFORD:
169 eevop = &__efx_ev_ef10_ops;
171 #endif /* EFSYS_OPT_MEDFORD */
173 #if EFSYS_OPT_MEDFORD2
174 case EFX_FAMILY_MEDFORD2:
175 eevop = &__efx_ev_ef10_ops;
177 #endif /* EFSYS_OPT_MEDFORD2 */
185 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
187 if ((rc = eevop->eevo_init(enp)) != 0)
190 enp->en_eevop = eevop;
191 enp->en_mod_flags |= EFX_MOD_EV;
198 EFSYS_PROBE1(fail1, efx_rc_t, rc);
200 enp->en_eevop = NULL;
201 enp->en_mod_flags &= ~EFX_MOD_EV;
209 const efx_ev_ops_t *eevop = enp->en_eevop;
211 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
212 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
213 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
214 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
215 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
216 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
218 eevop->eevo_fini(enp);
220 enp->en_eevop = NULL;
221 enp->en_mod_flags &= ~EFX_MOD_EV;
225 __checkReturn efx_rc_t
228 __in unsigned int index,
229 __in efsys_mem_t *esmp,
234 __deref_out efx_evq_t **eepp)
236 const efx_ev_ops_t *eevop = enp->en_eevop;
240 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
241 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
243 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <,
244 enp->en_nic_cfg.enc_evq_limit);
246 switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
247 case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
249 case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
260 /* Allocate an EVQ object */
261 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
267 eep->ee_magic = EFX_EVQ_MAGIC;
269 eep->ee_index = index;
270 eep->ee_mask = ndescs - 1;
271 eep->ee_flags = flags;
275 * Set outputs before the queue is created because interrupts may be
276 * raised for events immediately after the queue is created, before the
277 * function call below returns. See bug58606.
279 * The eepp pointer passed in by the client must therefore point to data
280 * shared with the client's event processing context.
285 if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags,
296 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
302 EFSYS_PROBE1(fail1, efx_rc_t, rc);
310 efx_nic_t *enp = eep->ee_enp;
311 const efx_ev_ops_t *eevop = enp->en_eevop;
313 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
315 EFSYS_ASSERT(enp->en_ev_qcount != 0);
318 eevop->eevo_qdestroy(eep);
320 /* Free the EVQ object */
321 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
324 __checkReturn efx_rc_t
327 __in unsigned int count)
329 efx_nic_t *enp = eep->ee_enp;
330 const efx_ev_ops_t *eevop = enp->en_eevop;
333 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
335 if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
340 if ((rc = eevop->eevo_qprime(eep, count)) != 0)
348 EFSYS_PROBE1(fail1, efx_rc_t, rc);
352 __checkReturn boolean_t
355 __in unsigned int count)
360 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
362 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
363 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
365 return (EFX_EV_PRESENT(qword));
368 #if EFSYS_OPT_EV_PREFETCH
373 __in unsigned int count)
377 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
379 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
380 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
383 #endif /* EFSYS_OPT_EV_PREFETCH */
385 #define EFX_EV_BATCH 8
390 __inout unsigned int *countp,
391 __in const efx_ev_callbacks_t *eecp,
394 efx_qword_t ev[EFX_EV_BATCH];
401 /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
402 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
403 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
405 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
406 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
407 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
408 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
409 FSE_AZ_EV_CODE_DRV_GEN_EV);
411 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
412 FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
415 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
416 EFSYS_ASSERT(countp != NULL);
417 EFSYS_ASSERT(eecp != NULL);
421 /* Read up until the end of the batch period */
422 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
423 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
424 for (total = 0; total < batch; ++total) {
425 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
427 if (!EFX_EV_PRESENT(ev[total]))
430 EFSYS_PROBE3(event, unsigned int, eep->ee_index,
431 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
432 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
434 offset += sizeof (efx_qword_t);
437 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
439 * Prefetch the next batch when we get within PREFETCH_PERIOD
440 * of a completed batch. If the batch is smaller, then prefetch
443 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
444 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
445 #endif /* EFSYS_OPT_EV_PREFETCH */
447 /* Process the batch of events */
448 for (index = 0; index < total; ++index) {
449 boolean_t should_abort;
452 #if EFSYS_OPT_EV_PREFETCH
453 /* Prefetch if we've now reached the batch period */
454 if (total == batch &&
455 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
456 offset = (count + batch) & eep->ee_mask;
457 offset *= sizeof (efx_qword_t);
459 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
461 #endif /* EFSYS_OPT_EV_PREFETCH */
463 EFX_EV_QSTAT_INCR(eep, EV_ALL);
465 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
467 case FSE_AZ_EV_CODE_RX_EV:
468 should_abort = eep->ee_rx(eep,
469 &(ev[index]), eecp, arg);
471 case FSE_AZ_EV_CODE_TX_EV:
472 should_abort = eep->ee_tx(eep,
473 &(ev[index]), eecp, arg);
475 case FSE_AZ_EV_CODE_DRIVER_EV:
476 should_abort = eep->ee_driver(eep,
477 &(ev[index]), eecp, arg);
479 case FSE_AZ_EV_CODE_DRV_GEN_EV:
480 should_abort = eep->ee_drv_gen(eep,
481 &(ev[index]), eecp, arg);
484 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
485 should_abort = eep->ee_mcdi(eep,
486 &(ev[index]), eecp, arg);
489 case FSE_AZ_EV_CODE_GLOBAL_EV:
490 if (eep->ee_global) {
491 should_abort = eep->ee_global(eep,
492 &(ev[index]), eecp, arg);
495 /* else fallthrough */
497 EFSYS_PROBE3(bad_event,
498 unsigned int, eep->ee_index,
500 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
502 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
504 EFSYS_ASSERT(eecp->eec_exception != NULL);
505 (void) eecp->eec_exception(arg,
506 EFX_EXCEPTION_EV_ERROR, code);
507 should_abort = B_TRUE;
510 /* Ignore subsequent events */
514 * Poison batch to ensure the outer
515 * loop is broken out of.
517 EFSYS_ASSERT(batch <= EFX_EV_BATCH);
518 batch += (EFX_EV_BATCH << 1);
519 EFSYS_ASSERT(total != batch);
525 * Now that the hardware has most likely moved onto dma'ing
526 * into the next cache line, clear the processed events. Take
527 * care to only clear out events that we've processed
529 EFX_SET_QWORD(ev[0]);
530 offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
531 for (index = 0; index < total; ++index) {
532 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
533 offset += sizeof (efx_qword_t);
538 } while (total == batch);
548 efx_nic_t *enp = eep->ee_enp;
549 const efx_ev_ops_t *eevop = enp->en_eevop;
551 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
553 EFSYS_ASSERT(eevop != NULL &&
554 eevop->eevo_qpost != NULL);
556 eevop->eevo_qpost(eep, data);
559 __checkReturn efx_rc_t
560 efx_ev_usecs_to_ticks(
562 __in unsigned int us,
563 __out unsigned int *ticksp)
565 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
568 /* Convert microseconds to a timer tick count */
571 else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
572 ticks = 1; /* Never round down to zero */
574 ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
580 __checkReturn efx_rc_t
583 __in unsigned int us)
585 efx_nic_t *enp = eep->ee_enp;
586 const efx_ev_ops_t *eevop = enp->en_eevop;
589 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
591 if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
592 EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
597 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
605 EFSYS_PROBE1(fail1, efx_rc_t, rc);
611 efx_ev_qstats_update(
613 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
615 { efx_nic_t *enp = eep->ee_enp;
616 const efx_ev_ops_t *eevop = enp->en_eevop;
618 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
620 eevop->eevo_qstats_update(eep, stat);
623 #endif /* EFSYS_OPT_QSTATS */
627 static __checkReturn efx_rc_t
634 * Program the event queue for receive and transmit queue
637 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
638 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
639 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
645 static __checkReturn boolean_t
648 __in efx_qword_t *eqp,
651 __inout uint16_t *flagsp)
653 boolean_t ignore = B_FALSE;
655 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
656 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
657 EFSYS_PROBE(tobe_disc);
659 * Assume this is a unicast address mismatch, unless below
660 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
661 * EV_RX_PAUSE_FRM_ERR is set.
663 (*flagsp) |= EFX_ADDR_MISMATCH;
666 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
667 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
668 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
669 (*flagsp) |= EFX_DISCARD;
671 #if EFSYS_OPT_RX_SCATTER
673 * Lookout for payload queue ran dry errors and ignore them.
675 * Sadly for the header/data split cases, the descriptor
676 * pointer in this event refers to the header queue and
677 * therefore cannot be easily detected as duplicate.
678 * So we drop these and rely on the receive processing seeing
679 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
680 * the partially received packet.
682 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
683 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
684 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
686 #endif /* EFSYS_OPT_RX_SCATTER */
689 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
690 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
691 EFSYS_PROBE(crc_err);
692 (*flagsp) &= ~EFX_ADDR_MISMATCH;
693 (*flagsp) |= EFX_DISCARD;
696 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
697 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
698 EFSYS_PROBE(pause_frm_err);
699 (*flagsp) &= ~EFX_ADDR_MISMATCH;
700 (*flagsp) |= EFX_DISCARD;
703 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
704 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
705 EFSYS_PROBE(owner_id_err);
706 (*flagsp) |= EFX_DISCARD;
709 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
710 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
711 EFSYS_PROBE(ipv4_err);
712 (*flagsp) &= ~EFX_CKSUM_IPV4;
715 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
716 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
717 EFSYS_PROBE(udp_chk_err);
718 (*flagsp) &= ~EFX_CKSUM_TCPUDP;
721 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
722 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
725 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
726 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
729 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
735 static __checkReturn boolean_t
738 __in efx_qword_t *eqp,
739 __in const efx_ev_callbacks_t *eecp,
746 #if EFSYS_OPT_RX_SCATTER
748 boolean_t jumbo_cont;
749 #endif /* EFSYS_OPT_RX_SCATTER */
754 boolean_t should_abort;
756 EFX_EV_QSTAT_INCR(eep, EV_RX);
758 /* Basic packet information */
759 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
760 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
761 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
762 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
764 #if EFSYS_OPT_RX_SCATTER
765 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
766 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
767 #endif /* EFSYS_OPT_RX_SCATTER */
769 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
771 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
774 * If packet is marked as OK and packet type is TCP/IP or
775 * UDP/IP or other IP, then we can rely on the hardware checksums.
778 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
779 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
781 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
782 flags |= EFX_PKT_IPV6;
784 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
785 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
789 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
790 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
792 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
793 flags |= EFX_PKT_IPV6;
795 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
796 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
800 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
802 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
803 flags = EFX_PKT_IPV6;
805 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
806 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
810 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
811 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
816 EFSYS_ASSERT(B_FALSE);
821 #if EFSYS_OPT_RX_SCATTER
822 /* Report scatter and header/lookahead split buffer flags */
824 flags |= EFX_PKT_START;
826 flags |= EFX_PKT_CONT;
827 #endif /* EFSYS_OPT_RX_SCATTER */
829 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
831 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
833 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
834 uint32_t, size, uint16_t, flags);
840 /* If we're not discarding the packet then it is ok */
841 if (~flags & EFX_DISCARD)
842 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
844 /* Detect multicast packets that didn't match the filter */
845 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
846 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
848 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
849 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
851 EFSYS_PROBE(mcast_mismatch);
852 flags |= EFX_ADDR_MISMATCH;
855 flags |= EFX_PKT_UNICAST;
859 * The packet parser in Siena can abort parsing packets under
860 * certain error conditions, setting the PKT_NOT_PARSED bit
861 * (which clears PKT_OK). If this is set, then don't trust
862 * the PKT_TYPE field.
867 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
869 flags |= EFX_CHECK_VLAN;
872 if (~flags & EFX_CHECK_VLAN) {
875 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
876 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
877 flags |= EFX_PKT_VLAN_TAGGED;
880 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
881 uint32_t, size, uint16_t, flags);
883 EFSYS_ASSERT(eecp->eec_rx != NULL);
884 should_abort = eecp->eec_rx(arg, label, id, size, flags);
886 return (should_abort);
889 static __checkReturn boolean_t
892 __in efx_qword_t *eqp,
893 __in const efx_ev_callbacks_t *eecp,
898 boolean_t should_abort;
900 EFX_EV_QSTAT_INCR(eep, EV_TX);
902 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
903 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
904 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
905 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
907 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
908 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
910 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
912 EFSYS_ASSERT(eecp->eec_tx != NULL);
913 should_abort = eecp->eec_tx(arg, label, id);
915 return (should_abort);
918 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
919 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
920 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
921 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
923 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
924 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
926 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
927 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
929 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
930 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
932 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
936 static __checkReturn boolean_t
939 __in efx_qword_t *eqp,
940 __in const efx_ev_callbacks_t *eecp,
943 _NOTE(ARGUNUSED(eqp, eecp, arg))
945 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
950 static __checkReturn boolean_t
953 __in efx_qword_t *eqp,
954 __in const efx_ev_callbacks_t *eecp,
957 boolean_t should_abort;
959 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
960 should_abort = B_FALSE;
962 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
963 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
966 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
968 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
970 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
972 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
973 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
977 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
981 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
982 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
984 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
985 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
988 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
990 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
992 should_abort = eecp->eec_rxq_flush_failed(arg,
995 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
997 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
999 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1004 case FSE_AZ_EVQ_INIT_DONE_EV:
1005 EFSYS_ASSERT(eecp->eec_initialized != NULL);
1006 should_abort = eecp->eec_initialized(arg);
1010 case FSE_AZ_EVQ_NOT_EN_EV:
1011 EFSYS_PROBE(evq_not_en);
1014 case FSE_AZ_SRM_UPD_DONE_EV: {
1017 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
1019 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1021 EFSYS_ASSERT(eecp->eec_sram != NULL);
1022 should_abort = eecp->eec_sram(arg, code);
1026 case FSE_AZ_WAKE_UP_EV: {
1029 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1031 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1032 should_abort = eecp->eec_wake_up(arg, id);
1036 case FSE_AZ_TX_PKT_NON_TCP_UDP:
1037 EFSYS_PROBE(tx_pkt_non_tcp_udp);
1040 case FSE_AZ_TIMER_EV: {
1043 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
1045 EFSYS_ASSERT(eecp->eec_timer != NULL);
1046 should_abort = eecp->eec_timer(arg, id);
1050 case FSE_AZ_RX_DSC_ERROR_EV:
1051 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
1053 EFSYS_PROBE(rx_dsc_error);
1055 EFSYS_ASSERT(eecp->eec_exception != NULL);
1056 should_abort = eecp->eec_exception(arg,
1057 EFX_EXCEPTION_RX_DSC_ERROR, 0);
1061 case FSE_AZ_TX_DSC_ERROR_EV:
1062 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
1064 EFSYS_PROBE(tx_dsc_error);
1066 EFSYS_ASSERT(eecp->eec_exception != NULL);
1067 should_abort = eecp->eec_exception(arg,
1068 EFX_EXCEPTION_TX_DSC_ERROR, 0);
1076 return (should_abort);
1079 static __checkReturn boolean_t
1081 __in efx_evq_t *eep,
1082 __in efx_qword_t *eqp,
1083 __in const efx_ev_callbacks_t *eecp,
1087 boolean_t should_abort;
1089 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1091 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
1092 if (data >= ((uint32_t)1 << 16)) {
1093 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1094 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1095 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1099 EFSYS_ASSERT(eecp->eec_software != NULL);
1100 should_abort = eecp->eec_software(arg, (uint16_t)data);
1102 return (should_abort);
1107 static __checkReturn boolean_t
1109 __in efx_evq_t *eep,
1110 __in efx_qword_t *eqp,
1111 __in const efx_ev_callbacks_t *eecp,
1114 efx_nic_t *enp = eep->ee_enp;
1116 boolean_t should_abort = B_FALSE;
1118 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
1120 if (enp->en_family != EFX_FAMILY_SIENA)
1123 EFSYS_ASSERT(eecp->eec_link_change != NULL);
1124 EFSYS_ASSERT(eecp->eec_exception != NULL);
1125 #if EFSYS_OPT_MON_STATS
1126 EFSYS_ASSERT(eecp->eec_monitor != NULL);
1129 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1131 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1133 case MCDI_EVENT_CODE_BADSSERT:
1134 efx_mcdi_ev_death(enp, EINTR);
1137 case MCDI_EVENT_CODE_CMDDONE:
1138 efx_mcdi_ev_cpl(enp,
1139 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1140 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1141 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1144 case MCDI_EVENT_CODE_LINKCHANGE: {
1145 efx_link_mode_t link_mode;
1147 siena_phy_link_ev(enp, eqp, &link_mode);
1148 should_abort = eecp->eec_link_change(arg, link_mode);
1151 case MCDI_EVENT_CODE_SENSOREVT: {
1152 #if EFSYS_OPT_MON_STATS
1154 efx_mon_stat_value_t value;
1157 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
1158 should_abort = eecp->eec_monitor(arg, id, value);
1159 else if (rc == ENOTSUP) {
1160 should_abort = eecp->eec_exception(arg,
1161 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1162 MCDI_EV_FIELD(eqp, DATA));
1164 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1166 should_abort = B_FALSE;
1170 case MCDI_EVENT_CODE_SCHEDERR:
1171 /* Informational only */
1174 case MCDI_EVENT_CODE_REBOOT:
1175 efx_mcdi_ev_death(enp, EIO);
1178 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1179 #if EFSYS_OPT_MAC_STATS
1180 if (eecp->eec_mac_stats != NULL) {
1181 eecp->eec_mac_stats(arg,
1182 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1187 case MCDI_EVENT_CODE_FWALERT: {
1188 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1190 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1191 should_abort = eecp->eec_exception(arg,
1192 EFX_EXCEPTION_FWALERT_SRAM,
1193 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1195 should_abort = eecp->eec_exception(arg,
1196 EFX_EXCEPTION_UNKNOWN_FWALERT,
1197 MCDI_EV_FIELD(eqp, DATA));
1202 EFSYS_PROBE1(mc_pcol_error, int, code);
1207 return (should_abort);
1210 #endif /* EFSYS_OPT_MCDI */
1212 static __checkReturn efx_rc_t
1214 __in efx_evq_t *eep,
1215 __in unsigned int count)
1217 efx_nic_t *enp = eep->ee_enp;
1221 rptr = count & eep->ee_mask;
1223 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
1225 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
1233 __in efx_evq_t *eep,
1236 efx_nic_t *enp = eep->ee_enp;
1240 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
1241 FSF_AZ_EV_DATA_DW0, (uint32_t)data);
1243 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
1244 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
1245 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
1247 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
1250 static __checkReturn efx_rc_t
1252 __in efx_evq_t *eep,
1253 __in unsigned int us)
1255 efx_nic_t *enp = eep->ee_enp;
1256 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1257 unsigned int locked;
1261 if (us > encp->enc_evq_timer_max_us) {
1266 /* If the value is zero then disable the timer */
1268 EFX_POPULATE_DWORD_2(dword,
1269 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
1270 FRF_CZ_TC_TIMER_VAL, 0);
1274 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
1277 EFSYS_ASSERT(ticks > 0);
1278 EFX_POPULATE_DWORD_2(dword,
1279 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
1280 FRF_CZ_TC_TIMER_VAL, ticks - 1);
1283 locked = (eep->ee_index == 0) ? 1 : 0;
1285 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
1286 eep->ee_index, &dword, locked);
1293 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1298 static __checkReturn efx_rc_t
1300 __in efx_nic_t *enp,
1301 __in unsigned int index,
1302 __in efsys_mem_t *esmp,
1306 __in uint32_t flags,
1307 __in efx_evq_t *eep)
1309 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1313 boolean_t notify_mode;
1315 _NOTE(ARGUNUSED(esmp))
1317 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
1318 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
1320 if (!ISP2(ndescs) ||
1321 (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
1325 if (index >= encp->enc_evq_limit) {
1329 #if EFSYS_OPT_RX_SCALE
1330 if (enp->en_intr.ei_type == EFX_INTR_LINE &&
1331 index >= EFX_MAXRSS_LEGACY) {
1336 for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
1338 if ((1 << size) == (int)(ndescs / EFX_EVQ_MINNEVS))
1340 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1345 /* Set up the handler table */
1346 eep->ee_rx = siena_ev_rx;
1347 eep->ee_tx = siena_ev_tx;
1348 eep->ee_driver = siena_ev_driver;
1349 eep->ee_global = siena_ev_global;
1350 eep->ee_drv_gen = siena_ev_drv_gen;
1352 eep->ee_mcdi = siena_ev_mcdi;
1353 #endif /* EFSYS_OPT_MCDI */
1355 notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
1356 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
1358 /* Set up the new event queue */
1359 EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
1360 FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
1361 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1362 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
1364 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
1365 FRF_AZ_EVQ_BUF_BASE_ID, id);
1367 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
1369 /* Set initial interrupt moderation */
1370 siena_ev_qmoderate(eep, us);
1376 #if EFSYS_OPT_RX_SCALE
1383 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1388 #endif /* EFSYS_OPT_SIENA */
1390 #if EFSYS_OPT_QSTATS
1392 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock c0f3bc5083b40532 */
1393 static const char * const __efx_ev_qstat_name[] = {
1400 "rx_buf_owner_id_err",
1401 "rx_ipv4_hdr_chksum_err",
1402 "rx_tcp_udp_chksum_err",
1406 "rx_mcast_hash_match",
1423 "driver_srm_upd_done",
1424 "driver_tx_descq_fls_done",
1425 "driver_rx_descq_fls_done",
1426 "driver_rx_descq_fls_failed",
1427 "driver_rx_dsc_error",
1428 "driver_tx_dsc_error",
1432 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
1436 __in efx_nic_t *enp,
1437 __in unsigned int id)
1439 _NOTE(ARGUNUSED(enp))
1441 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1442 EFSYS_ASSERT3U(id, <, EV_NQSTATS);
1444 return (__efx_ev_qstat_name[id]);
1446 #endif /* EFSYS_OPT_NAMES */
1447 #endif /* EFSYS_OPT_QSTATS */
1451 #if EFSYS_OPT_QSTATS
1453 siena_ev_qstats_update(
1454 __in efx_evq_t *eep,
1455 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
1459 for (id = 0; id < EV_NQSTATS; id++) {
1460 efsys_stat_t *essp = &stat[id];
1462 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
1463 eep->ee_stat[id] = 0;
1466 #endif /* EFSYS_OPT_QSTATS */
1470 __in efx_evq_t *eep)
1472 efx_nic_t *enp = eep->ee_enp;
1475 /* Purge event queue */
1476 EFX_ZERO_OWORD(oword);
1478 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
1479 eep->ee_index, &oword, B_TRUE);
1481 EFX_ZERO_OWORD(oword);
1482 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
1487 __in efx_nic_t *enp)
1489 _NOTE(ARGUNUSED(enp))
1492 #endif /* EFSYS_OPT_SIENA */