2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #if EFSYS_OPT_MON_STATS
40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
43 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
45 (_eep)->ee_stat[_stat]++; \
46 _NOTE(CONSTANTCONDITION) \
49 #define EFX_EV_QSTAT_INCR(_eep, _stat)
53 * Non-interrupting event queue requires interrrupting event queue to
54 * refer to for wake-up events even if wake ups are never used.
55 * It could be even non-allocated event queue.
57 #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
59 static __checkReturn boolean_t
62 __in efx_qword_t *eqp,
63 __in const efx_ev_callbacks_t *eecp,
66 static __checkReturn boolean_t
69 __in efx_qword_t *eqp,
70 __in const efx_ev_callbacks_t *eecp,
73 static __checkReturn boolean_t
76 __in efx_qword_t *eqp,
77 __in const efx_ev_callbacks_t *eecp,
80 static __checkReturn boolean_t
83 __in efx_qword_t *eqp,
84 __in const efx_ev_callbacks_t *eecp,
87 static __checkReturn boolean_t
90 __in efx_qword_t *eqp,
91 __in const efx_ev_callbacks_t *eecp,
95 static __checkReturn efx_rc_t
98 __in uint32_t instance,
100 __in uint32_t timer_ns)
103 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
104 MC_CMD_SET_EVQ_TMR_OUT_LEN);
107 req.emr_cmd = MC_CMD_SET_EVQ_TMR;
108 req.emr_in_buf = payload;
109 req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
110 req.emr_out_buf = payload;
111 req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
113 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
114 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
115 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
116 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
118 efx_mcdi_execute(enp, &req);
120 if (req.emr_rc != 0) {
125 if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
135 EFSYS_PROBE1(fail1, efx_rc_t, rc);
140 static __checkReturn efx_rc_t
143 __in unsigned int instance,
144 __in efsys_mem_t *esmp,
149 __in boolean_t low_latency)
152 EFX_MCDI_DECLARE_BUF(payload,
153 MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
154 MC_CMD_INIT_EVQ_OUT_LEN);
155 efx_qword_t *dma_addr;
159 boolean_t interrupting;
163 npages = EFX_EVQ_NBUFS(nevs);
164 if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
169 req.emr_cmd = MC_CMD_INIT_EVQ;
170 req.emr_in_buf = payload;
171 req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
172 req.emr_out_buf = payload;
173 req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
175 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
176 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
177 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
179 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
180 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
183 * On Huntington RX and TX event batching can only be requested together
184 * (even if the datapath firmware doesn't actually support RX
185 * batching). If event cut through is enabled no RX batching will occur.
187 * So always enable RX and TX event batching, and enable event cut
188 * through if we want low latency operation.
190 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
191 case EFX_EVQ_FLAGS_TYPE_AUTO:
192 ev_cut_through = low_latency ? 1 : 0;
194 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
197 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
204 MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
205 INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
206 INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
207 INIT_EVQ_IN_FLAG_INT_ARMD, 0,
208 INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
209 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
210 INIT_EVQ_IN_FLAG_TX_MERGE, 1);
212 /* If the value is zero then disable the timer */
214 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
215 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
216 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
217 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
221 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
224 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
225 MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
226 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
227 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
230 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
231 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
232 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
234 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
235 addr = EFSYS_MEM_ADDR(esmp);
237 for (i = 0; i < npages; i++) {
238 EFX_POPULATE_QWORD_2(*dma_addr,
239 EFX_DWORD_1, (uint32_t)(addr >> 32),
240 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
243 addr += EFX_BUF_SIZE;
246 efx_mcdi_execute(enp, &req);
248 if (req.emr_rc != 0) {
253 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
258 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
271 EFSYS_PROBE1(fail1, efx_rc_t, rc);
277 static __checkReturn efx_rc_t
278 efx_mcdi_init_evq_v2(
280 __in unsigned int instance,
281 __in efsys_mem_t *esmp,
288 EFX_MCDI_DECLARE_BUF(payload,
289 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
290 MC_CMD_INIT_EVQ_V2_OUT_LEN);
291 boolean_t interrupting;
292 unsigned int evq_type;
293 efx_qword_t *dma_addr;
299 npages = EFX_EVQ_NBUFS(nevs);
300 if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
305 req.emr_cmd = MC_CMD_INIT_EVQ;
306 req.emr_in_buf = payload;
307 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
308 req.emr_out_buf = payload;
309 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
311 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
312 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
313 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
315 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
316 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
318 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
319 case EFX_EVQ_FLAGS_TYPE_AUTO:
320 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
322 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
323 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
325 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
326 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
332 MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
333 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
334 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
335 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
336 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
338 /* If the value is zero then disable the timer */
340 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
341 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
342 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
343 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
347 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
350 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
351 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
352 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
353 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
356 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
357 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
358 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
360 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
361 addr = EFSYS_MEM_ADDR(esmp);
363 for (i = 0; i < npages; i++) {
364 EFX_POPULATE_QWORD_2(*dma_addr,
365 EFX_DWORD_1, (uint32_t)(addr >> 32),
366 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
369 addr += EFX_BUF_SIZE;
372 efx_mcdi_execute(enp, &req);
374 if (req.emr_rc != 0) {
379 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
384 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
386 EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
387 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
400 EFSYS_PROBE1(fail1, efx_rc_t, rc);
405 static __checkReturn efx_rc_t
408 __in uint32_t instance)
411 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
412 MC_CMD_FINI_EVQ_OUT_LEN);
415 req.emr_cmd = MC_CMD_FINI_EVQ;
416 req.emr_in_buf = payload;
417 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
418 req.emr_out_buf = payload;
419 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
421 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
423 efx_mcdi_execute_quiet(enp, &req);
425 if (req.emr_rc != 0) {
434 * EALREADY is not an error, but indicates that the MC has rebooted and
435 * that the EVQ has already been destroyed.
438 EFSYS_PROBE1(fail1, efx_rc_t, rc);
445 __checkReturn efx_rc_t
449 _NOTE(ARGUNUSED(enp))
457 _NOTE(ARGUNUSED(enp))
460 __checkReturn efx_rc_t
463 __in unsigned int index,
464 __in efsys_mem_t *esmp,
471 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
475 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
476 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
477 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
480 (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
485 if (index >= encp->enc_evq_limit) {
490 if (us > encp->enc_evq_timer_max_us) {
495 /* Set up the handler table */
496 eep->ee_rx = ef10_ev_rx;
497 eep->ee_tx = ef10_ev_tx;
498 eep->ee_driver = ef10_ev_driver;
499 eep->ee_drv_gen = ef10_ev_drv_gen;
500 eep->ee_mcdi = ef10_ev_mcdi;
502 /* Set up the event queue */
503 /* INIT_EVQ expects function-relative vector number */
504 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
505 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
507 } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
509 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
510 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
512 irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
516 * Interrupts may be raised for events immediately after the queue is
517 * created. See bug58606.
520 if (encp->enc_init_evq_v2_supported) {
522 * On Medford the low latency license is required to enable RX
523 * and event cut through and to disable RX batching. If event
524 * queue type in flags is auto, we let the firmware decide the
525 * settings to use. If the adapter has a low latency license,
526 * it will choose the best settings for low latency, otherwise
527 * it will choose the best settings for throughput.
529 rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us,
535 * On Huntington we need to specify the settings to use.
536 * If event queue type in flags is auto, we favour throughput
537 * if the adapter is running virtualization supporting firmware
538 * (i.e. the full featured firmware variant)
539 * and latency otherwise. The Ethernet Virtual Bridging
540 * capability is used to make this decision. (Note though that
541 * the low latency firmware variant is also best for
542 * throughput and corresponding type should be specified
545 boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
546 rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
563 EFSYS_PROBE1(fail1, efx_rc_t, rc);
572 efx_nic_t *enp = eep->ee_enp;
574 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
575 enp->en_family == EFX_FAMILY_MEDFORD ||
576 enp->en_family == EFX_FAMILY_MEDFORD2);
578 (void) efx_mcdi_fini_evq(enp, eep->ee_index);
581 __checkReturn efx_rc_t
584 __in unsigned int count)
586 efx_nic_t *enp = eep->ee_enp;
590 rptr = count & eep->ee_mask;
592 if (enp->en_nic_cfg.enc_bug35388_workaround) {
593 EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
594 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
595 EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
596 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
598 EFX_POPULATE_DWORD_2(dword,
599 ERF_DD_EVQ_IND_RPTR_FLAGS,
600 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
602 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
603 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
606 EFX_POPULATE_DWORD_2(dword,
607 ERF_DD_EVQ_IND_RPTR_FLAGS,
608 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
610 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
611 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
614 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
615 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
622 static __checkReturn efx_rc_t
623 efx_mcdi_driver_event(
626 __in efx_qword_t data)
629 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
630 MC_CMD_DRIVER_EVENT_OUT_LEN);
633 req.emr_cmd = MC_CMD_DRIVER_EVENT;
634 req.emr_in_buf = payload;
635 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
636 req.emr_out_buf = payload;
637 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
639 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
641 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
642 EFX_QWORD_FIELD(data, EFX_DWORD_0));
643 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
644 EFX_QWORD_FIELD(data, EFX_DWORD_1));
646 efx_mcdi_execute(enp, &req);
648 if (req.emr_rc != 0) {
656 EFSYS_PROBE1(fail1, efx_rc_t, rc);
666 efx_nic_t *enp = eep->ee_enp;
669 EFX_POPULATE_QWORD_3(event,
670 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
671 ESF_DZ_DRV_SUB_CODE, 0,
672 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
674 (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
677 __checkReturn efx_rc_t
680 __in unsigned int us)
682 efx_nic_t *enp = eep->ee_enp;
683 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
688 /* Check that hardware and MCDI use the same timer MODE values */
689 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
690 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
691 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
692 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
693 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
694 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
695 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
696 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
698 if (us > encp->enc_evq_timer_max_us) {
703 /* If the value is zero then disable the timer */
705 mode = FFE_CZ_TIMER_MODE_DIS;
707 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
710 if (encp->enc_bug61265_workaround) {
711 uint32_t ns = us * 1000;
713 rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
719 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
722 if (encp->enc_bug35388_workaround) {
723 EFX_POPULATE_DWORD_3(dword,
724 ERF_DD_EVQ_IND_TIMER_FLAGS,
725 EFE_DD_EVQ_IND_TIMER_FLAGS,
726 ERF_DD_EVQ_IND_TIMER_MODE, mode,
727 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
728 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
729 eep->ee_index, &dword, 0);
732 * NOTE: The TMR_REL field introduced in Medford2 is
733 * ignored on earlier EF10 controllers. See bug66418
734 * comment 9 for details.
736 EFX_POPULATE_DWORD_3(dword,
737 ERF_DZ_TC_TIMER_MODE, mode,
738 ERF_DZ_TC_TIMER_VAL, ticks,
739 ERF_FZ_TC_TMR_REL_VAL, ticks);
740 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
741 eep->ee_index, &dword, 0);
752 EFSYS_PROBE1(fail1, efx_rc_t, rc);
760 ef10_ev_qstats_update(
762 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
766 for (id = 0; id < EV_NQSTATS; id++) {
767 efsys_stat_t *essp = &stat[id];
769 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
770 eep->ee_stat[id] = 0;
773 #endif /* EFSYS_OPT_QSTATS */
775 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
777 static __checkReturn boolean_t
778 ef10_ev_rx_packed_stream(
780 __in efx_qword_t *eqp,
781 __in const efx_ev_callbacks_t *eecp,
785 uint32_t pkt_count_lbits;
787 boolean_t should_abort;
788 efx_evq_rxq_state_t *eersp;
789 unsigned int pkt_count;
790 unsigned int current_id;
791 boolean_t new_buffer;
793 pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
794 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
795 new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
799 eersp = &eep->ee_rxq_state[label];
802 * RX_DSC_PTR_LBITS has least significant bits of the global
803 * (not per-buffer) packet counter. It is guaranteed that
804 * maximum number of completed packets fits in lbits-mask.
805 * So, modulo lbits-mask arithmetic should be used to calculate
806 * packet counter increment.
808 pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
809 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
810 eersp->eers_rx_stream_npackets += pkt_count;
813 flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
814 #if EFSYS_OPT_RX_PACKED_STREAM
816 * If both packed stream and equal stride super-buffer
817 * modes are compiled in, in theory credits should be
818 * be maintained for packed stream only, but right now
819 * these modes are not distinguished in the event queue
820 * Rx queue state and it is OK to increment the counter
821 * regardless (it might be event cheaper than branching
822 * since neighbour structure member are updated as well).
824 eersp->eers_rx_packed_stream_credits++;
826 eersp->eers_rx_read_ptr++;
828 current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
830 /* Check for errors that invalidate checksum and L3/L4 fields */
831 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
832 /* RX frame truncated */
833 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
834 flags |= EFX_DISCARD;
837 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
838 /* Bad Ethernet frame CRC */
839 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
840 flags |= EFX_DISCARD;
844 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
845 flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
849 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
850 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
852 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
853 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
856 /* If we're not discarding the packet then it is ok */
857 if (~flags & EFX_DISCARD)
858 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
860 EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
861 should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
864 return (should_abort);
867 #endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
869 static __checkReturn boolean_t
872 __in efx_qword_t *eqp,
873 __in const efx_ev_callbacks_t *eecp,
876 efx_nic_t *enp = eep->ee_enp;
880 uint32_t eth_tag_class;
883 uint32_t next_read_lbits;
886 boolean_t should_abort;
887 efx_evq_rxq_state_t *eersp;
888 unsigned int desc_count;
889 unsigned int last_used_id;
891 EFX_EV_QSTAT_INCR(eep, EV_RX);
893 /* Discard events after RXQ/TXQ errors, or hardware not available */
894 if (enp->en_reset_flags &
895 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
898 /* Basic packet information */
899 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
900 eersp = &eep->ee_rxq_state[label];
902 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
904 * Packed stream events are very different,
905 * so handle them separately
907 if (eersp->eers_rx_packed_stream)
908 return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
911 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
912 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
913 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
914 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
915 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
916 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
919 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
920 * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
921 * and values for all EF10 controllers.
923 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
924 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
925 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
926 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
928 l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
930 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
931 /* Drop this event */
938 * This may be part of a scattered frame, or it may be a
939 * truncated frame if scatter is disabled on this RXQ.
940 * Overlength frames can be received if e.g. a VF is configured
941 * for 1500 MTU but connected to a port set to 9000 MTU
943 * FIXME: There is not yet any driver that supports scatter on
944 * Huntington. Scatter support is required for OSX.
946 flags |= EFX_PKT_CONT;
949 if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
950 flags |= EFX_PKT_UNICAST;
952 /* Increment the count of descriptors read */
953 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
954 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
955 eersp->eers_rx_read_ptr += desc_count;
958 * FIXME: add error checking to make sure this a batched event.
959 * This could also be an aborted scatter, see Bug36629.
961 if (desc_count > 1) {
962 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
963 flags |= EFX_PKT_PREFIX_LEN;
966 /* Calculate the index of the last descriptor consumed */
967 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
969 /* Check for errors that invalidate checksum and L3/L4 fields */
970 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
971 /* RX frame truncated */
972 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
973 flags |= EFX_DISCARD;
976 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
977 /* Bad Ethernet frame CRC */
978 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
979 flags |= EFX_DISCARD;
982 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
984 * Hardware parse failed, due to malformed headers
985 * or headers that are too long for the parser.
986 * Headers and checksums must be validated by the host.
988 /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
992 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
993 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
994 flags |= EFX_PKT_VLAN_TAGGED;
998 case ESE_DZ_L3_CLASS_IP4:
999 case ESE_DZ_L3_CLASS_IP4_FRAG:
1000 flags |= EFX_PKT_IPV4;
1001 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
1002 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
1004 flags |= EFX_CKSUM_IPV4;
1008 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
1009 * only 2 bits wide on Medford2. Check it is safe to use the
1010 * Medford2 field and values for all EF10 controllers.
1012 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
1013 ESF_DE_RX_L4_CLASS_LBN);
1014 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
1015 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
1016 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
1017 ESE_DE_L4_CLASS_UNKNOWN);
1019 if (l4_class == ESE_FZ_L4_CLASS_TCP) {
1020 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
1021 flags |= EFX_PKT_TCP;
1022 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
1023 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
1024 flags |= EFX_PKT_UDP;
1026 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
1030 case ESE_DZ_L3_CLASS_IP6:
1031 case ESE_DZ_L3_CLASS_IP6_FRAG:
1032 flags |= EFX_PKT_IPV6;
1035 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
1036 * only 2 bits wide on Medford2. Check it is safe to use the
1037 * Medford2 field and values for all EF10 controllers.
1039 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
1040 ESF_DE_RX_L4_CLASS_LBN);
1041 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
1042 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
1043 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
1044 ESE_DE_L4_CLASS_UNKNOWN);
1046 if (l4_class == ESE_FZ_L4_CLASS_TCP) {
1047 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
1048 flags |= EFX_PKT_TCP;
1049 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
1050 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
1051 flags |= EFX_PKT_UDP;
1053 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
1058 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
1062 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
1063 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
1064 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
1066 flags |= EFX_CKSUM_TCPUDP;
1071 /* If we're not discarding the packet then it is ok */
1072 if (~flags & EFX_DISCARD)
1073 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
1075 EFSYS_ASSERT(eecp->eec_rx != NULL);
1076 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
1078 return (should_abort);
1081 static __checkReturn boolean_t
1083 __in efx_evq_t *eep,
1084 __in efx_qword_t *eqp,
1085 __in const efx_ev_callbacks_t *eecp,
1088 efx_nic_t *enp = eep->ee_enp;
1091 boolean_t should_abort;
1093 EFX_EV_QSTAT_INCR(eep, EV_TX);
1095 /* Discard events after RXQ/TXQ errors, or hardware not available */
1096 if (enp->en_reset_flags &
1097 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
1100 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
1101 /* Drop this event */
1105 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
1106 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
1107 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
1109 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
1111 EFSYS_ASSERT(eecp->eec_tx != NULL);
1112 should_abort = eecp->eec_tx(arg, label, id);
1114 return (should_abort);
1117 static __checkReturn boolean_t
1119 __in efx_evq_t *eep,
1120 __in efx_qword_t *eqp,
1121 __in const efx_ev_callbacks_t *eecp,
1125 boolean_t should_abort;
1127 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
1128 should_abort = B_FALSE;
1130 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
1132 case ESE_DZ_DRV_TIMER_EV: {
1135 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
1137 EFSYS_ASSERT(eecp->eec_timer != NULL);
1138 should_abort = eecp->eec_timer(arg, id);
1142 case ESE_DZ_DRV_WAKE_UP_EV: {
1145 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
1147 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1148 should_abort = eecp->eec_wake_up(arg, id);
1152 case ESE_DZ_DRV_START_UP_EV:
1153 EFSYS_ASSERT(eecp->eec_initialized != NULL);
1154 should_abort = eecp->eec_initialized(arg);
1158 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1159 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1160 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1164 return (should_abort);
1167 static __checkReturn boolean_t
1169 __in efx_evq_t *eep,
1170 __in efx_qword_t *eqp,
1171 __in const efx_ev_callbacks_t *eecp,
1175 boolean_t should_abort;
1177 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1178 should_abort = B_FALSE;
1180 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1181 if (data >= ((uint32_t)1 << 16)) {
1182 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1183 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1184 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1189 EFSYS_ASSERT(eecp->eec_software != NULL);
1190 should_abort = eecp->eec_software(arg, (uint16_t)data);
1192 return (should_abort);
1195 static __checkReturn boolean_t
1197 __in efx_evq_t *eep,
1198 __in efx_qword_t *eqp,
1199 __in const efx_ev_callbacks_t *eecp,
1202 efx_nic_t *enp = eep->ee_enp;
1204 boolean_t should_abort = B_FALSE;
1206 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1208 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1210 case MCDI_EVENT_CODE_BADSSERT:
1211 efx_mcdi_ev_death(enp, EINTR);
1214 case MCDI_EVENT_CODE_CMDDONE:
1215 efx_mcdi_ev_cpl(enp,
1216 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1217 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1218 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1221 #if EFSYS_OPT_MCDI_PROXY_AUTH
1222 case MCDI_EVENT_CODE_PROXY_RESPONSE:
1224 * This event notifies a function that an authorization request
1225 * has been processed. If the request was authorized then the
1226 * function can now re-send the original MCDI request.
1227 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1229 efx_mcdi_ev_proxy_response(enp,
1230 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1231 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1233 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1235 case MCDI_EVENT_CODE_LINKCHANGE: {
1236 efx_link_mode_t link_mode;
1238 ef10_phy_link_ev(enp, eqp, &link_mode);
1239 should_abort = eecp->eec_link_change(arg, link_mode);
1243 case MCDI_EVENT_CODE_SENSOREVT: {
1244 #if EFSYS_OPT_MON_STATS
1246 efx_mon_stat_value_t value;
1249 /* Decode monitor stat for MCDI sensor (if supported) */
1250 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1251 /* Report monitor stat change */
1252 should_abort = eecp->eec_monitor(arg, id, value);
1253 } else if (rc == ENOTSUP) {
1254 should_abort = eecp->eec_exception(arg,
1255 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1256 MCDI_EV_FIELD(eqp, DATA));
1258 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1264 case MCDI_EVENT_CODE_SCHEDERR:
1265 /* Informational only */
1268 case MCDI_EVENT_CODE_REBOOT:
1269 /* Falcon/Siena only (should not been seen with Huntington). */
1270 efx_mcdi_ev_death(enp, EIO);
1273 case MCDI_EVENT_CODE_MC_REBOOT:
1274 /* MC_REBOOT event is used for Huntington (EF10) and later. */
1275 efx_mcdi_ev_death(enp, EIO);
1278 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1279 #if EFSYS_OPT_MAC_STATS
1280 if (eecp->eec_mac_stats != NULL) {
1281 eecp->eec_mac_stats(arg,
1282 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1287 case MCDI_EVENT_CODE_FWALERT: {
1288 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1290 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1291 should_abort = eecp->eec_exception(arg,
1292 EFX_EXCEPTION_FWALERT_SRAM,
1293 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1295 should_abort = eecp->eec_exception(arg,
1296 EFX_EXCEPTION_UNKNOWN_FWALERT,
1297 MCDI_EV_FIELD(eqp, DATA));
1301 case MCDI_EVENT_CODE_TX_ERR: {
1303 * After a TXQ error is detected, firmware sends a TX_ERR event.
1304 * This may be followed by TX completions (which we discard),
1305 * and then finally by a TX_FLUSH event. Firmware destroys the
1306 * TXQ automatically after sending the TX_FLUSH event.
1308 enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1310 EFSYS_PROBE2(tx_descq_err,
1311 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1312 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1314 /* Inform the driver that a reset is required. */
1315 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1316 MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1320 case MCDI_EVENT_CODE_TX_FLUSH: {
1321 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1324 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1325 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1326 * We want to wait for all completions, so ignore the events
1327 * with TX_FLUSH_TO_DRIVER.
1329 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1330 should_abort = B_FALSE;
1334 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1336 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1338 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1339 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1343 case MCDI_EVENT_CODE_RX_ERR: {
1345 * After an RXQ error is detected, firmware sends an RX_ERR
1346 * event. This may be followed by RX events (which we discard),
1347 * and then finally by an RX_FLUSH event. Firmware destroys the
1348 * RXQ automatically after sending the RX_FLUSH event.
1350 enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1352 EFSYS_PROBE2(rx_descq_err,
1353 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1354 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1356 /* Inform the driver that a reset is required. */
1357 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1358 MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1362 case MCDI_EVENT_CODE_RX_FLUSH: {
1363 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1366 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1367 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1368 * We want to wait for all completions, so ignore the events
1369 * with RX_FLUSH_TO_DRIVER.
1371 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1372 should_abort = B_FALSE;
1376 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1378 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1380 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1381 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1386 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1387 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1388 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1392 return (should_abort);
1396 ef10_ev_rxlabel_init(
1397 __in efx_evq_t *eep,
1398 __in efx_rxq_t *erp,
1399 __in unsigned int label,
1400 __in efx_rxq_type_t type)
1402 efx_evq_rxq_state_t *eersp;
1403 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1404 boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
1405 boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
1408 _NOTE(ARGUNUSED(type))
1409 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1410 eersp = &eep->ee_rxq_state[label];
1412 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1414 #if EFSYS_OPT_RX_PACKED_STREAM
1416 * For packed stream modes, the very first event will
1417 * have a new buffer flag set, so it will be incremented,
1418 * yielding the correct pointer. That results in a simpler
1419 * code than trying to detect start-of-the-world condition
1420 * in the event handler.
1422 eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
1424 eersp->eers_rx_read_ptr = 0;
1426 eersp->eers_rx_mask = erp->er_mask;
1427 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1428 eersp->eers_rx_stream_npackets = 0;
1429 eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
1431 #if EFSYS_OPT_RX_PACKED_STREAM
1432 if (packed_stream) {
1433 eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
1434 EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
1435 EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
1436 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
1438 * A single credit is allocated to the queue when it is started.
1439 * It is immediately spent by the first packet which has NEW
1440 * BUFFER flag set, though, but still we shall take into
1441 * account, as to not wrap around the maximum number of credits
1444 eersp->eers_rx_packed_stream_credits--;
1445 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
1446 EFX_RX_PACKED_STREAM_MAX_CREDITS);
1452 ef10_ev_rxlabel_fini(
1453 __in efx_evq_t *eep,
1454 __in unsigned int label)
1456 efx_evq_rxq_state_t *eersp;
1458 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1459 eersp = &eep->ee_rxq_state[label];
1461 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1463 eersp->eers_rx_read_ptr = 0;
1464 eersp->eers_rx_mask = 0;
1465 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1466 eersp->eers_rx_stream_npackets = 0;
1467 eersp->eers_rx_packed_stream = B_FALSE;
1469 #if EFSYS_OPT_RX_PACKED_STREAM
1470 eersp->eers_rx_packed_stream_credits = 0;
1474 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */