2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #if EFSYS_OPT_MON_STATS
40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
43 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
45 (_eep)->ee_stat[_stat]++; \
46 _NOTE(CONSTANTCONDITION) \
49 #define EFX_EV_QSTAT_INCR(_eep, _stat)
53 static __checkReturn boolean_t
56 __in efx_qword_t *eqp,
57 __in const efx_ev_callbacks_t *eecp,
60 static __checkReturn boolean_t
63 __in efx_qword_t *eqp,
64 __in const efx_ev_callbacks_t *eecp,
67 static __checkReturn boolean_t
70 __in efx_qword_t *eqp,
71 __in const efx_ev_callbacks_t *eecp,
74 static __checkReturn boolean_t
77 __in efx_qword_t *eqp,
78 __in const efx_ev_callbacks_t *eecp,
81 static __checkReturn boolean_t
84 __in efx_qword_t *eqp,
85 __in const efx_ev_callbacks_t *eecp,
89 static __checkReturn efx_rc_t
92 __in uint32_t instance,
94 __in uint32_t timer_ns)
97 uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
98 MC_CMD_SET_EVQ_TMR_OUT_LEN)];
101 (void) memset(payload, 0, sizeof (payload));
102 req.emr_cmd = MC_CMD_SET_EVQ_TMR;
103 req.emr_in_buf = payload;
104 req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
105 req.emr_out_buf = payload;
106 req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
108 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
109 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
110 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
111 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
113 efx_mcdi_execute(enp, &req);
115 if (req.emr_rc != 0) {
120 if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
130 EFSYS_PROBE1(fail1, efx_rc_t, rc);
135 static __checkReturn efx_rc_t
138 __in unsigned int instance,
139 __in efsys_mem_t *esmp,
144 __in boolean_t low_latency)
148 MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
149 MC_CMD_INIT_EVQ_OUT_LEN)];
150 efx_qword_t *dma_addr;
157 npages = EFX_EVQ_NBUFS(nevs);
158 if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
163 (void) memset(payload, 0, sizeof (payload));
164 req.emr_cmd = MC_CMD_INIT_EVQ;
165 req.emr_in_buf = payload;
166 req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
167 req.emr_out_buf = payload;
168 req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
170 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
171 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
172 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
175 * On Huntington RX and TX event batching can only be requested together
176 * (even if the datapath firmware doesn't actually support RX
177 * batching). If event cut through is enabled no RX batching will occur.
179 * So always enable RX and TX event batching, and enable event cut
180 * through if we want low latency operation.
182 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
183 case EFX_EVQ_FLAGS_TYPE_AUTO:
184 ev_cut_through = low_latency ? 1 : 0;
186 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
189 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
196 MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
197 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
198 INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
199 INIT_EVQ_IN_FLAG_INT_ARMD, 0,
200 INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
201 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
202 INIT_EVQ_IN_FLAG_TX_MERGE, 1);
204 /* If the value is zero then disable the timer */
206 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
207 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
208 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
209 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
213 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
216 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
217 MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
218 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
219 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
222 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
223 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
224 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
226 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
227 addr = EFSYS_MEM_ADDR(esmp);
229 for (i = 0; i < npages; i++) {
230 EFX_POPULATE_QWORD_2(*dma_addr,
231 EFX_DWORD_1, (uint32_t)(addr >> 32),
232 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
235 addr += EFX_BUF_SIZE;
238 efx_mcdi_execute(enp, &req);
240 if (req.emr_rc != 0) {
245 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
250 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
263 EFSYS_PROBE1(fail1, efx_rc_t, rc);
269 static __checkReturn efx_rc_t
270 efx_mcdi_init_evq_v2(
272 __in unsigned int instance,
273 __in efsys_mem_t *esmp,
281 MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
282 MC_CMD_INIT_EVQ_V2_OUT_LEN)];
283 unsigned int evq_type;
284 efx_qword_t *dma_addr;
290 npages = EFX_EVQ_NBUFS(nevs);
291 if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
296 (void) memset(payload, 0, sizeof (payload));
297 req.emr_cmd = MC_CMD_INIT_EVQ;
298 req.emr_in_buf = payload;
299 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
300 req.emr_out_buf = payload;
301 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
303 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
304 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
305 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
307 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
308 case EFX_EVQ_FLAGS_TYPE_AUTO:
309 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
311 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
312 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
314 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
315 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
321 MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
322 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
323 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
324 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
325 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
327 /* If the value is zero then disable the timer */
329 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
330 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
331 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
332 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
336 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
339 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
340 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
341 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
342 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
345 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
346 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
347 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
349 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
350 addr = EFSYS_MEM_ADDR(esmp);
352 for (i = 0; i < npages; i++) {
353 EFX_POPULATE_QWORD_2(*dma_addr,
354 EFX_DWORD_1, (uint32_t)(addr >> 32),
355 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
358 addr += EFX_BUF_SIZE;
361 efx_mcdi_execute(enp, &req);
363 if (req.emr_rc != 0) {
368 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
373 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
375 EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
376 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
389 EFSYS_PROBE1(fail1, efx_rc_t, rc);
394 static __checkReturn efx_rc_t
397 __in uint32_t instance)
400 uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
401 MC_CMD_FINI_EVQ_OUT_LEN)];
404 (void) memset(payload, 0, sizeof (payload));
405 req.emr_cmd = MC_CMD_FINI_EVQ;
406 req.emr_in_buf = payload;
407 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
408 req.emr_out_buf = payload;
409 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
411 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
413 efx_mcdi_execute_quiet(enp, &req);
415 if (req.emr_rc != 0) {
423 EFSYS_PROBE1(fail1, efx_rc_t, rc);
430 __checkReturn efx_rc_t
434 _NOTE(ARGUNUSED(enp))
442 _NOTE(ARGUNUSED(enp))
445 __checkReturn efx_rc_t
448 __in unsigned int index,
449 __in efsys_mem_t *esmp,
456 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
460 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
461 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
462 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
464 if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
469 if (index >= encp->enc_evq_limit) {
474 if (us > encp->enc_evq_timer_max_us) {
479 /* Set up the handler table */
480 eep->ee_rx = ef10_ev_rx;
481 eep->ee_tx = ef10_ev_tx;
482 eep->ee_driver = ef10_ev_driver;
483 eep->ee_drv_gen = ef10_ev_drv_gen;
484 eep->ee_mcdi = ef10_ev_mcdi;
486 /* Set up the event queue */
487 irq = index; /* INIT_EVQ expects function-relative vector number */
490 * Interrupts may be raised for events immediately after the queue is
491 * created. See bug58606.
494 if (encp->enc_init_evq_v2_supported) {
496 * On Medford the low latency license is required to enable RX
497 * and event cut through and to disable RX batching. If event
498 * queue type in flags is auto, we let the firmware decide the
499 * settings to use. If the adapter has a low latency license,
500 * it will choose the best settings for low latency, otherwise
501 * it will choose the best settings for throughput.
503 rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
508 * On Huntington we need to specify the settings to use.
509 * If event queue type in flags is auto, we favour throughput
510 * if the adapter is running virtualization supporting firmware
511 * (i.e. the full featured firmware variant)
512 * and latency otherwise. The Ethernet Virtual Bridging
513 * capability is used to make this decision. (Note though that
514 * the low latency firmware variant is also best for
515 * throughput and corresponding type should be specified
518 boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
519 rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
536 EFSYS_PROBE1(fail1, efx_rc_t, rc);
545 efx_nic_t *enp = eep->ee_enp;
547 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
548 enp->en_family == EFX_FAMILY_MEDFORD);
550 (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
553 __checkReturn efx_rc_t
556 __in unsigned int count)
558 efx_nic_t *enp = eep->ee_enp;
562 rptr = count & eep->ee_mask;
564 if (enp->en_nic_cfg.enc_bug35388_workaround) {
565 EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
566 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
567 EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
568 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
570 EFX_POPULATE_DWORD_2(dword,
571 ERF_DD_EVQ_IND_RPTR_FLAGS,
572 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
574 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
575 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
578 EFX_POPULATE_DWORD_2(dword,
579 ERF_DD_EVQ_IND_RPTR_FLAGS,
580 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
582 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
583 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
586 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
587 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
594 static __checkReturn efx_rc_t
595 efx_mcdi_driver_event(
598 __in efx_qword_t data)
601 uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
602 MC_CMD_DRIVER_EVENT_OUT_LEN)];
605 req.emr_cmd = MC_CMD_DRIVER_EVENT;
606 req.emr_in_buf = payload;
607 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
608 req.emr_out_buf = payload;
609 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
611 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
613 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
614 EFX_QWORD_FIELD(data, EFX_DWORD_0));
615 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
616 EFX_QWORD_FIELD(data, EFX_DWORD_1));
618 efx_mcdi_execute(enp, &req);
620 if (req.emr_rc != 0) {
628 EFSYS_PROBE1(fail1, efx_rc_t, rc);
638 efx_nic_t *enp = eep->ee_enp;
641 EFX_POPULATE_QWORD_3(event,
642 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
643 ESF_DZ_DRV_SUB_CODE, 0,
644 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
646 (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
649 __checkReturn efx_rc_t
652 __in unsigned int us)
654 efx_nic_t *enp = eep->ee_enp;
655 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
660 /* Check that hardware and MCDI use the same timer MODE values */
661 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
662 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
663 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
664 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
665 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
666 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
667 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
668 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
670 if (us > encp->enc_evq_timer_max_us) {
675 /* If the value is zero then disable the timer */
677 mode = FFE_CZ_TIMER_MODE_DIS;
679 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
682 if (encp->enc_bug61265_workaround) {
683 uint32_t ns = us * 1000;
685 rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
691 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
694 if (encp->enc_bug35388_workaround) {
695 EFX_POPULATE_DWORD_3(dword,
696 ERF_DD_EVQ_IND_TIMER_FLAGS,
697 EFE_DD_EVQ_IND_TIMER_FLAGS,
698 ERF_DD_EVQ_IND_TIMER_MODE, mode,
699 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
700 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
701 eep->ee_index, &dword, 0);
703 EFX_POPULATE_DWORD_2(dword,
704 ERF_DZ_TC_TIMER_MODE, mode,
705 ERF_DZ_TC_TIMER_VAL, ticks);
706 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
707 eep->ee_index, &dword, 0);
718 EFSYS_PROBE1(fail1, efx_rc_t, rc);
726 ef10_ev_qstats_update(
728 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
732 for (id = 0; id < EV_NQSTATS; id++) {
733 efsys_stat_t *essp = &stat[id];
735 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
736 eep->ee_stat[id] = 0;
739 #endif /* EFSYS_OPT_QSTATS */
742 static __checkReturn boolean_t
745 __in efx_qword_t *eqp,
746 __in const efx_ev_callbacks_t *eecp,
749 efx_nic_t *enp = eep->ee_enp;
753 uint32_t eth_tag_class;
756 uint32_t next_read_lbits;
759 boolean_t should_abort;
760 efx_evq_rxq_state_t *eersp;
761 unsigned int desc_count;
762 unsigned int last_used_id;
764 EFX_EV_QSTAT_INCR(eep, EV_RX);
766 /* Discard events after RXQ/TXQ errors */
767 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
770 /* Basic packet information */
771 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
772 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
773 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
774 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
775 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
776 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
777 l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
778 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
780 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
781 /* Drop this event */
788 * This may be part of a scattered frame, or it may be a
789 * truncated frame if scatter is disabled on this RXQ.
790 * Overlength frames can be received if e.g. a VF is configured
791 * for 1500 MTU but connected to a port set to 9000 MTU
793 * FIXME: There is not yet any driver that supports scatter on
794 * Huntington. Scatter support is required for OSX.
796 flags |= EFX_PKT_CONT;
799 if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
800 flags |= EFX_PKT_UNICAST;
802 /* Increment the count of descriptors read */
803 eersp = &eep->ee_rxq_state[label];
804 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
805 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
806 eersp->eers_rx_read_ptr += desc_count;
809 * FIXME: add error checking to make sure this a batched event.
810 * This could also be an aborted scatter, see Bug36629.
812 if (desc_count > 1) {
813 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
814 flags |= EFX_PKT_PREFIX_LEN;
817 /* Calculate the index of the the last descriptor consumed */
818 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
820 /* Check for errors that invalidate checksum and L3/L4 fields */
821 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
822 /* RX frame truncated (error flag is misnamed) */
823 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
824 flags |= EFX_DISCARD;
827 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
828 /* Bad Ethernet frame CRC */
829 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
830 flags |= EFX_DISCARD;
833 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
835 * Hardware parse failed, due to malformed headers
836 * or headers that are too long for the parser.
837 * Headers and checksums must be validated by the host.
839 /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
843 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
844 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
845 flags |= EFX_PKT_VLAN_TAGGED;
849 case ESE_DZ_L3_CLASS_IP4:
850 case ESE_DZ_L3_CLASS_IP4_FRAG:
851 flags |= EFX_PKT_IPV4;
852 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
853 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
855 flags |= EFX_CKSUM_IPV4;
858 if (l4_class == ESE_DZ_L4_CLASS_TCP) {
859 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
860 flags |= EFX_PKT_TCP;
861 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
862 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
863 flags |= EFX_PKT_UDP;
865 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
869 case ESE_DZ_L3_CLASS_IP6:
870 case ESE_DZ_L3_CLASS_IP6_FRAG:
871 flags |= EFX_PKT_IPV6;
873 if (l4_class == ESE_DZ_L4_CLASS_TCP) {
874 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
875 flags |= EFX_PKT_TCP;
876 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
877 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
878 flags |= EFX_PKT_UDP;
880 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
885 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
889 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
890 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
891 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
893 flags |= EFX_CKSUM_TCPUDP;
898 /* If we're not discarding the packet then it is ok */
899 if (~flags & EFX_DISCARD)
900 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
902 EFSYS_ASSERT(eecp->eec_rx != NULL);
903 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
905 return (should_abort);
908 static __checkReturn boolean_t
911 __in efx_qword_t *eqp,
912 __in const efx_ev_callbacks_t *eecp,
915 efx_nic_t *enp = eep->ee_enp;
918 boolean_t should_abort;
920 EFX_EV_QSTAT_INCR(eep, EV_TX);
922 /* Discard events after RXQ/TXQ errors */
923 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
926 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
927 /* Drop this event */
931 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
932 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
933 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
935 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
937 EFSYS_ASSERT(eecp->eec_tx != NULL);
938 should_abort = eecp->eec_tx(arg, label, id);
940 return (should_abort);
943 static __checkReturn boolean_t
946 __in efx_qword_t *eqp,
947 __in const efx_ev_callbacks_t *eecp,
951 boolean_t should_abort;
953 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
954 should_abort = B_FALSE;
956 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
958 case ESE_DZ_DRV_TIMER_EV: {
961 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
963 EFSYS_ASSERT(eecp->eec_timer != NULL);
964 should_abort = eecp->eec_timer(arg, id);
968 case ESE_DZ_DRV_WAKE_UP_EV: {
971 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
973 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
974 should_abort = eecp->eec_wake_up(arg, id);
978 case ESE_DZ_DRV_START_UP_EV:
979 EFSYS_ASSERT(eecp->eec_initialized != NULL);
980 should_abort = eecp->eec_initialized(arg);
984 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
985 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
986 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
990 return (should_abort);
993 static __checkReturn boolean_t
996 __in efx_qword_t *eqp,
997 __in const efx_ev_callbacks_t *eecp,
1001 boolean_t should_abort;
1003 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1004 should_abort = B_FALSE;
1006 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1007 if (data >= ((uint32_t)1 << 16)) {
1008 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1009 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1010 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1015 EFSYS_ASSERT(eecp->eec_software != NULL);
1016 should_abort = eecp->eec_software(arg, (uint16_t)data);
1018 return (should_abort);
1021 static __checkReturn boolean_t
1023 __in efx_evq_t *eep,
1024 __in efx_qword_t *eqp,
1025 __in const efx_ev_callbacks_t *eecp,
1028 efx_nic_t *enp = eep->ee_enp;
1030 boolean_t should_abort = B_FALSE;
1032 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1034 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1036 case MCDI_EVENT_CODE_BADSSERT:
1037 efx_mcdi_ev_death(enp, EINTR);
1040 case MCDI_EVENT_CODE_CMDDONE:
1041 efx_mcdi_ev_cpl(enp,
1042 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1043 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1044 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1047 #if EFSYS_OPT_MCDI_PROXY_AUTH
1048 case MCDI_EVENT_CODE_PROXY_RESPONSE:
1050 * This event notifies a function that an authorization request
1051 * has been processed. If the request was authorized then the
1052 * function can now re-send the original MCDI request.
1053 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1055 efx_mcdi_ev_proxy_response(enp,
1056 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1057 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1059 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1061 case MCDI_EVENT_CODE_LINKCHANGE: {
1062 efx_link_mode_t link_mode;
1064 ef10_phy_link_ev(enp, eqp, &link_mode);
1065 should_abort = eecp->eec_link_change(arg, link_mode);
1069 case MCDI_EVENT_CODE_SENSOREVT: {
1070 #if EFSYS_OPT_MON_STATS
1072 efx_mon_stat_value_t value;
1075 /* Decode monitor stat for MCDI sensor (if supported) */
1076 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1077 /* Report monitor stat change */
1078 should_abort = eecp->eec_monitor(arg, id, value);
1079 } else if (rc == ENOTSUP) {
1080 should_abort = eecp->eec_exception(arg,
1081 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1082 MCDI_EV_FIELD(eqp, DATA));
1084 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1090 case MCDI_EVENT_CODE_SCHEDERR:
1091 /* Informational only */
1094 case MCDI_EVENT_CODE_REBOOT:
1095 /* Falcon/Siena only (should not been seen with Huntington). */
1096 efx_mcdi_ev_death(enp, EIO);
1099 case MCDI_EVENT_CODE_MC_REBOOT:
1100 /* MC_REBOOT event is used for Huntington (EF10) and later. */
1101 efx_mcdi_ev_death(enp, EIO);
1104 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1105 #if EFSYS_OPT_MAC_STATS
1106 if (eecp->eec_mac_stats != NULL) {
1107 eecp->eec_mac_stats(arg,
1108 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1113 case MCDI_EVENT_CODE_FWALERT: {
1114 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1116 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1117 should_abort = eecp->eec_exception(arg,
1118 EFX_EXCEPTION_FWALERT_SRAM,
1119 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1121 should_abort = eecp->eec_exception(arg,
1122 EFX_EXCEPTION_UNKNOWN_FWALERT,
1123 MCDI_EV_FIELD(eqp, DATA));
1127 case MCDI_EVENT_CODE_TX_ERR: {
1129 * After a TXQ error is detected, firmware sends a TX_ERR event.
1130 * This may be followed by TX completions (which we discard),
1131 * and then finally by a TX_FLUSH event. Firmware destroys the
1132 * TXQ automatically after sending the TX_FLUSH event.
1134 enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1136 EFSYS_PROBE2(tx_descq_err,
1137 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1138 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1140 /* Inform the driver that a reset is required. */
1141 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1142 MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1146 case MCDI_EVENT_CODE_TX_FLUSH: {
1147 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1150 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1151 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1152 * We want to wait for all completions, so ignore the events
1153 * with TX_FLUSH_TO_DRIVER.
1155 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1156 should_abort = B_FALSE;
1160 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1162 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1164 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1165 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1169 case MCDI_EVENT_CODE_RX_ERR: {
1171 * After an RXQ error is detected, firmware sends an RX_ERR
1172 * event. This may be followed by RX events (which we discard),
1173 * and then finally by an RX_FLUSH event. Firmware destroys the
1174 * RXQ automatically after sending the RX_FLUSH event.
1176 enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1178 EFSYS_PROBE2(rx_descq_err,
1179 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1180 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1182 /* Inform the driver that a reset is required. */
1183 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1184 MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1188 case MCDI_EVENT_CODE_RX_FLUSH: {
1189 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1192 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1193 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1194 * We want to wait for all completions, so ignore the events
1195 * with RX_FLUSH_TO_DRIVER.
1197 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1198 should_abort = B_FALSE;
1202 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1204 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1206 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1207 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1212 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1213 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1214 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1218 return (should_abort);
1222 ef10_ev_rxlabel_init(
1223 __in efx_evq_t *eep,
1224 __in efx_rxq_t *erp,
1225 __in unsigned int label)
1227 efx_evq_rxq_state_t *eersp;
1229 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1230 eersp = &eep->ee_rxq_state[label];
1232 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1234 eersp->eers_rx_read_ptr = 0;
1235 eersp->eers_rx_mask = erp->er_mask;
1239 ef10_ev_rxlabel_fini(
1240 __in efx_evq_t *eep,
1241 __in unsigned int label)
1243 efx_evq_rxq_state_t *eersp;
1245 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1246 eersp = &eep->ee_rxq_state[label];
1248 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1250 eersp->eers_rx_read_ptr = 0;
1251 eersp->eers_rx_mask = 0;
1254 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */