2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #if EFSYS_OPT_MON_STATS
40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
43 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
45 (_eep)->ee_stat[_stat]++; \
46 _NOTE(CONSTANTCONDITION) \
49 #define EFX_EV_QSTAT_INCR(_eep, _stat)
53 * Non-interrupting event queue requires interrrupting event queue to
54 * refer to for wake-up events even if wake ups are never used.
55 * It could be even non-allocated event queue.
57 #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
59 static __checkReturn boolean_t
62 __in efx_qword_t *eqp,
63 __in const efx_ev_callbacks_t *eecp,
66 static __checkReturn boolean_t
69 __in efx_qword_t *eqp,
70 __in const efx_ev_callbacks_t *eecp,
73 static __checkReturn boolean_t
76 __in efx_qword_t *eqp,
77 __in const efx_ev_callbacks_t *eecp,
80 static __checkReturn boolean_t
83 __in efx_qword_t *eqp,
84 __in const efx_ev_callbacks_t *eecp,
87 static __checkReturn boolean_t
90 __in efx_qword_t *eqp,
91 __in const efx_ev_callbacks_t *eecp,
95 static __checkReturn efx_rc_t
98 __in uint32_t instance,
100 __in uint32_t timer_ns)
103 uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
104 MC_CMD_SET_EVQ_TMR_OUT_LEN)];
107 (void) memset(payload, 0, sizeof (payload));
108 req.emr_cmd = MC_CMD_SET_EVQ_TMR;
109 req.emr_in_buf = payload;
110 req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
111 req.emr_out_buf = payload;
112 req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
114 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
115 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
116 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
117 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
119 efx_mcdi_execute(enp, &req);
121 if (req.emr_rc != 0) {
126 if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
136 EFSYS_PROBE1(fail1, efx_rc_t, rc);
141 static __checkReturn efx_rc_t
144 __in unsigned int instance,
145 __in efsys_mem_t *esmp,
150 __in boolean_t low_latency)
154 MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
155 MC_CMD_INIT_EVQ_OUT_LEN)];
156 efx_qword_t *dma_addr;
160 boolean_t interrupting;
164 npages = EFX_EVQ_NBUFS(nevs);
165 if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
170 (void) memset(payload, 0, sizeof (payload));
171 req.emr_cmd = MC_CMD_INIT_EVQ;
172 req.emr_in_buf = payload;
173 req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
174 req.emr_out_buf = payload;
175 req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
177 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
178 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
179 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
181 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
182 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
185 * On Huntington RX and TX event batching can only be requested together
186 * (even if the datapath firmware doesn't actually support RX
187 * batching). If event cut through is enabled no RX batching will occur.
189 * So always enable RX and TX event batching, and enable event cut
190 * through if we want low latency operation.
192 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
193 case EFX_EVQ_FLAGS_TYPE_AUTO:
194 ev_cut_through = low_latency ? 1 : 0;
196 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
199 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
206 MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
207 INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
208 INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
209 INIT_EVQ_IN_FLAG_INT_ARMD, 0,
210 INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
211 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
212 INIT_EVQ_IN_FLAG_TX_MERGE, 1);
214 /* If the value is zero then disable the timer */
216 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
217 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
218 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
219 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
223 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
226 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
227 MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
228 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
229 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
232 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
233 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
234 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
236 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
237 addr = EFSYS_MEM_ADDR(esmp);
239 for (i = 0; i < npages; i++) {
240 EFX_POPULATE_QWORD_2(*dma_addr,
241 EFX_DWORD_1, (uint32_t)(addr >> 32),
242 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
245 addr += EFX_BUF_SIZE;
248 efx_mcdi_execute(enp, &req);
250 if (req.emr_rc != 0) {
255 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
260 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
273 EFSYS_PROBE1(fail1, efx_rc_t, rc);
279 static __checkReturn efx_rc_t
280 efx_mcdi_init_evq_v2(
282 __in unsigned int instance,
283 __in efsys_mem_t *esmp,
291 MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
292 MC_CMD_INIT_EVQ_V2_OUT_LEN)];
293 boolean_t interrupting;
294 unsigned int evq_type;
295 efx_qword_t *dma_addr;
301 npages = EFX_EVQ_NBUFS(nevs);
302 if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
307 (void) memset(payload, 0, sizeof (payload));
308 req.emr_cmd = MC_CMD_INIT_EVQ;
309 req.emr_in_buf = payload;
310 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
311 req.emr_out_buf = payload;
312 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
314 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
315 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
316 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
318 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
319 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
321 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
322 case EFX_EVQ_FLAGS_TYPE_AUTO:
323 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
325 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
326 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
328 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
329 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
335 MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
336 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
337 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
338 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
339 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
341 /* If the value is zero then disable the timer */
343 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
344 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
345 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
346 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
350 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
353 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
354 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
355 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
356 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
359 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
360 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
361 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
363 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
364 addr = EFSYS_MEM_ADDR(esmp);
366 for (i = 0; i < npages; i++) {
367 EFX_POPULATE_QWORD_2(*dma_addr,
368 EFX_DWORD_1, (uint32_t)(addr >> 32),
369 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
372 addr += EFX_BUF_SIZE;
375 efx_mcdi_execute(enp, &req);
377 if (req.emr_rc != 0) {
382 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
387 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
389 EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
390 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
403 EFSYS_PROBE1(fail1, efx_rc_t, rc);
408 static __checkReturn efx_rc_t
411 __in uint32_t instance)
414 uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
415 MC_CMD_FINI_EVQ_OUT_LEN)];
418 (void) memset(payload, 0, sizeof (payload));
419 req.emr_cmd = MC_CMD_FINI_EVQ;
420 req.emr_in_buf = payload;
421 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
422 req.emr_out_buf = payload;
423 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
425 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
427 efx_mcdi_execute_quiet(enp, &req);
429 if (req.emr_rc != 0) {
437 EFSYS_PROBE1(fail1, efx_rc_t, rc);
444 __checkReturn efx_rc_t
448 _NOTE(ARGUNUSED(enp))
456 _NOTE(ARGUNUSED(enp))
459 __checkReturn efx_rc_t
462 __in unsigned int index,
463 __in efsys_mem_t *esmp,
470 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
474 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
475 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
476 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
478 if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
483 if (index >= encp->enc_evq_limit) {
488 if (us > encp->enc_evq_timer_max_us) {
493 /* Set up the handler table */
494 eep->ee_rx = ef10_ev_rx;
495 eep->ee_tx = ef10_ev_tx;
496 eep->ee_driver = ef10_ev_driver;
497 eep->ee_drv_gen = ef10_ev_drv_gen;
498 eep->ee_mcdi = ef10_ev_mcdi;
500 /* Set up the event queue */
501 /* INIT_EVQ expects function-relative vector number */
502 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
503 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
505 } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
507 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
508 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
510 irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
514 * Interrupts may be raised for events immediately after the queue is
515 * created. See bug58606.
518 if (encp->enc_init_evq_v2_supported) {
520 * On Medford the low latency license is required to enable RX
521 * and event cut through and to disable RX batching. If event
522 * queue type in flags is auto, we let the firmware decide the
523 * settings to use. If the adapter has a low latency license,
524 * it will choose the best settings for low latency, otherwise
525 * it will choose the best settings for throughput.
527 rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
532 * On Huntington we need to specify the settings to use.
533 * If event queue type in flags is auto, we favour throughput
534 * if the adapter is running virtualization supporting firmware
535 * (i.e. the full featured firmware variant)
536 * and latency otherwise. The Ethernet Virtual Bridging
537 * capability is used to make this decision. (Note though that
538 * the low latency firmware variant is also best for
539 * throughput and corresponding type should be specified
542 boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
543 rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
560 EFSYS_PROBE1(fail1, efx_rc_t, rc);
569 efx_nic_t *enp = eep->ee_enp;
571 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
572 enp->en_family == EFX_FAMILY_MEDFORD);
574 (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
577 __checkReturn efx_rc_t
580 __in unsigned int count)
582 efx_nic_t *enp = eep->ee_enp;
586 rptr = count & eep->ee_mask;
588 if (enp->en_nic_cfg.enc_bug35388_workaround) {
589 EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
590 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
591 EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
592 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
594 EFX_POPULATE_DWORD_2(dword,
595 ERF_DD_EVQ_IND_RPTR_FLAGS,
596 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
598 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
599 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
602 EFX_POPULATE_DWORD_2(dword,
603 ERF_DD_EVQ_IND_RPTR_FLAGS,
604 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
606 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
607 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
610 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
611 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
618 static __checkReturn efx_rc_t
619 efx_mcdi_driver_event(
622 __in efx_qword_t data)
625 uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
626 MC_CMD_DRIVER_EVENT_OUT_LEN)];
629 req.emr_cmd = MC_CMD_DRIVER_EVENT;
630 req.emr_in_buf = payload;
631 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
632 req.emr_out_buf = payload;
633 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
635 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
637 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
638 EFX_QWORD_FIELD(data, EFX_DWORD_0));
639 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
640 EFX_QWORD_FIELD(data, EFX_DWORD_1));
642 efx_mcdi_execute(enp, &req);
644 if (req.emr_rc != 0) {
652 EFSYS_PROBE1(fail1, efx_rc_t, rc);
662 efx_nic_t *enp = eep->ee_enp;
665 EFX_POPULATE_QWORD_3(event,
666 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
667 ESF_DZ_DRV_SUB_CODE, 0,
668 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
670 (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
673 __checkReturn efx_rc_t
676 __in unsigned int us)
678 efx_nic_t *enp = eep->ee_enp;
679 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
684 /* Check that hardware and MCDI use the same timer MODE values */
685 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
686 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
687 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
688 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
689 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
690 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
691 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
692 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
694 if (us > encp->enc_evq_timer_max_us) {
699 /* If the value is zero then disable the timer */
701 mode = FFE_CZ_TIMER_MODE_DIS;
703 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
706 if (encp->enc_bug61265_workaround) {
707 uint32_t ns = us * 1000;
709 rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
715 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
718 if (encp->enc_bug35388_workaround) {
719 EFX_POPULATE_DWORD_3(dword,
720 ERF_DD_EVQ_IND_TIMER_FLAGS,
721 EFE_DD_EVQ_IND_TIMER_FLAGS,
722 ERF_DD_EVQ_IND_TIMER_MODE, mode,
723 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
724 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
725 eep->ee_index, &dword, 0);
727 EFX_POPULATE_DWORD_2(dword,
728 ERF_DZ_TC_TIMER_MODE, mode,
729 ERF_DZ_TC_TIMER_VAL, ticks);
730 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
731 eep->ee_index, &dword, 0);
742 EFSYS_PROBE1(fail1, efx_rc_t, rc);
750 ef10_ev_qstats_update(
752 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
756 for (id = 0; id < EV_NQSTATS; id++) {
757 efsys_stat_t *essp = &stat[id];
759 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
760 eep->ee_stat[id] = 0;
763 #endif /* EFSYS_OPT_QSTATS */
766 static __checkReturn boolean_t
769 __in efx_qword_t *eqp,
770 __in const efx_ev_callbacks_t *eecp,
773 efx_nic_t *enp = eep->ee_enp;
777 uint32_t eth_tag_class;
780 uint32_t next_read_lbits;
783 boolean_t should_abort;
784 efx_evq_rxq_state_t *eersp;
785 unsigned int desc_count;
786 unsigned int last_used_id;
788 EFX_EV_QSTAT_INCR(eep, EV_RX);
790 /* Discard events after RXQ/TXQ errors */
791 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
794 /* Basic packet information */
795 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
796 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
797 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
798 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
799 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
800 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
801 l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
802 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
804 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
805 /* Drop this event */
812 * This may be part of a scattered frame, or it may be a
813 * truncated frame if scatter is disabled on this RXQ.
814 * Overlength frames can be received if e.g. a VF is configured
815 * for 1500 MTU but connected to a port set to 9000 MTU
817 * FIXME: There is not yet any driver that supports scatter on
818 * Huntington. Scatter support is required for OSX.
820 flags |= EFX_PKT_CONT;
823 if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
824 flags |= EFX_PKT_UNICAST;
826 /* Increment the count of descriptors read */
827 eersp = &eep->ee_rxq_state[label];
828 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
829 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
830 eersp->eers_rx_read_ptr += desc_count;
833 * FIXME: add error checking to make sure this a batched event.
834 * This could also be an aborted scatter, see Bug36629.
836 if (desc_count > 1) {
837 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
838 flags |= EFX_PKT_PREFIX_LEN;
841 /* Calculate the index of the last descriptor consumed */
842 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
844 /* Check for errors that invalidate checksum and L3/L4 fields */
845 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
846 /* RX frame truncated (error flag is misnamed) */
847 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
848 flags |= EFX_DISCARD;
851 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
852 /* Bad Ethernet frame CRC */
853 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
854 flags |= EFX_DISCARD;
857 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
859 * Hardware parse failed, due to malformed headers
860 * or headers that are too long for the parser.
861 * Headers and checksums must be validated by the host.
863 /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
867 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
868 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
869 flags |= EFX_PKT_VLAN_TAGGED;
873 case ESE_DZ_L3_CLASS_IP4:
874 case ESE_DZ_L3_CLASS_IP4_FRAG:
875 flags |= EFX_PKT_IPV4;
876 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
877 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
879 flags |= EFX_CKSUM_IPV4;
882 if (l4_class == ESE_DZ_L4_CLASS_TCP) {
883 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
884 flags |= EFX_PKT_TCP;
885 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
886 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
887 flags |= EFX_PKT_UDP;
889 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
893 case ESE_DZ_L3_CLASS_IP6:
894 case ESE_DZ_L3_CLASS_IP6_FRAG:
895 flags |= EFX_PKT_IPV6;
897 if (l4_class == ESE_DZ_L4_CLASS_TCP) {
898 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
899 flags |= EFX_PKT_TCP;
900 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
901 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
902 flags |= EFX_PKT_UDP;
904 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
909 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
913 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
914 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
915 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
917 flags |= EFX_CKSUM_TCPUDP;
922 /* If we're not discarding the packet then it is ok */
923 if (~flags & EFX_DISCARD)
924 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
926 EFSYS_ASSERT(eecp->eec_rx != NULL);
927 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
929 return (should_abort);
932 static __checkReturn boolean_t
935 __in efx_qword_t *eqp,
936 __in const efx_ev_callbacks_t *eecp,
939 efx_nic_t *enp = eep->ee_enp;
942 boolean_t should_abort;
944 EFX_EV_QSTAT_INCR(eep, EV_TX);
946 /* Discard events after RXQ/TXQ errors */
947 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
950 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
951 /* Drop this event */
955 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
956 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
957 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
959 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
961 EFSYS_ASSERT(eecp->eec_tx != NULL);
962 should_abort = eecp->eec_tx(arg, label, id);
964 return (should_abort);
967 static __checkReturn boolean_t
970 __in efx_qword_t *eqp,
971 __in const efx_ev_callbacks_t *eecp,
975 boolean_t should_abort;
977 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
978 should_abort = B_FALSE;
980 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
982 case ESE_DZ_DRV_TIMER_EV: {
985 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
987 EFSYS_ASSERT(eecp->eec_timer != NULL);
988 should_abort = eecp->eec_timer(arg, id);
992 case ESE_DZ_DRV_WAKE_UP_EV: {
995 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
997 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
998 should_abort = eecp->eec_wake_up(arg, id);
1002 case ESE_DZ_DRV_START_UP_EV:
1003 EFSYS_ASSERT(eecp->eec_initialized != NULL);
1004 should_abort = eecp->eec_initialized(arg);
1008 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1009 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1010 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1014 return (should_abort);
1017 static __checkReturn boolean_t
1019 __in efx_evq_t *eep,
1020 __in efx_qword_t *eqp,
1021 __in const efx_ev_callbacks_t *eecp,
1025 boolean_t should_abort;
1027 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1028 should_abort = B_FALSE;
1030 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1031 if (data >= ((uint32_t)1 << 16)) {
1032 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1033 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1034 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1039 EFSYS_ASSERT(eecp->eec_software != NULL);
1040 should_abort = eecp->eec_software(arg, (uint16_t)data);
1042 return (should_abort);
1045 static __checkReturn boolean_t
1047 __in efx_evq_t *eep,
1048 __in efx_qword_t *eqp,
1049 __in const efx_ev_callbacks_t *eecp,
1052 efx_nic_t *enp = eep->ee_enp;
1054 boolean_t should_abort = B_FALSE;
1056 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1058 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1060 case MCDI_EVENT_CODE_BADSSERT:
1061 efx_mcdi_ev_death(enp, EINTR);
1064 case MCDI_EVENT_CODE_CMDDONE:
1065 efx_mcdi_ev_cpl(enp,
1066 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1067 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1068 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1071 #if EFSYS_OPT_MCDI_PROXY_AUTH
1072 case MCDI_EVENT_CODE_PROXY_RESPONSE:
1074 * This event notifies a function that an authorization request
1075 * has been processed. If the request was authorized then the
1076 * function can now re-send the original MCDI request.
1077 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1079 efx_mcdi_ev_proxy_response(enp,
1080 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1081 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1083 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1085 case MCDI_EVENT_CODE_LINKCHANGE: {
1086 efx_link_mode_t link_mode;
1088 ef10_phy_link_ev(enp, eqp, &link_mode);
1089 should_abort = eecp->eec_link_change(arg, link_mode);
1093 case MCDI_EVENT_CODE_SENSOREVT: {
1094 #if EFSYS_OPT_MON_STATS
1096 efx_mon_stat_value_t value;
1099 /* Decode monitor stat for MCDI sensor (if supported) */
1100 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1101 /* Report monitor stat change */
1102 should_abort = eecp->eec_monitor(arg, id, value);
1103 } else if (rc == ENOTSUP) {
1104 should_abort = eecp->eec_exception(arg,
1105 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1106 MCDI_EV_FIELD(eqp, DATA));
1108 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1114 case MCDI_EVENT_CODE_SCHEDERR:
1115 /* Informational only */
1118 case MCDI_EVENT_CODE_REBOOT:
1119 /* Falcon/Siena only (should not been seen with Huntington). */
1120 efx_mcdi_ev_death(enp, EIO);
1123 case MCDI_EVENT_CODE_MC_REBOOT:
1124 /* MC_REBOOT event is used for Huntington (EF10) and later. */
1125 efx_mcdi_ev_death(enp, EIO);
1128 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1129 #if EFSYS_OPT_MAC_STATS
1130 if (eecp->eec_mac_stats != NULL) {
1131 eecp->eec_mac_stats(arg,
1132 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1137 case MCDI_EVENT_CODE_FWALERT: {
1138 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1140 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1141 should_abort = eecp->eec_exception(arg,
1142 EFX_EXCEPTION_FWALERT_SRAM,
1143 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1145 should_abort = eecp->eec_exception(arg,
1146 EFX_EXCEPTION_UNKNOWN_FWALERT,
1147 MCDI_EV_FIELD(eqp, DATA));
1151 case MCDI_EVENT_CODE_TX_ERR: {
1153 * After a TXQ error is detected, firmware sends a TX_ERR event.
1154 * This may be followed by TX completions (which we discard),
1155 * and then finally by a TX_FLUSH event. Firmware destroys the
1156 * TXQ automatically after sending the TX_FLUSH event.
1158 enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1160 EFSYS_PROBE2(tx_descq_err,
1161 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1162 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1164 /* Inform the driver that a reset is required. */
1165 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1166 MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1170 case MCDI_EVENT_CODE_TX_FLUSH: {
1171 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1174 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1175 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1176 * We want to wait for all completions, so ignore the events
1177 * with TX_FLUSH_TO_DRIVER.
1179 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1180 should_abort = B_FALSE;
1184 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1186 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1188 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1189 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1193 case MCDI_EVENT_CODE_RX_ERR: {
1195 * After an RXQ error is detected, firmware sends an RX_ERR
1196 * event. This may be followed by RX events (which we discard),
1197 * and then finally by an RX_FLUSH event. Firmware destroys the
1198 * RXQ automatically after sending the RX_FLUSH event.
1200 enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1202 EFSYS_PROBE2(rx_descq_err,
1203 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1204 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1206 /* Inform the driver that a reset is required. */
1207 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1208 MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1212 case MCDI_EVENT_CODE_RX_FLUSH: {
1213 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1216 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1217 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1218 * We want to wait for all completions, so ignore the events
1219 * with RX_FLUSH_TO_DRIVER.
1221 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1222 should_abort = B_FALSE;
1226 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1228 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1230 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1231 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1236 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1237 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1238 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1242 return (should_abort);
1246 ef10_ev_rxlabel_init(
1247 __in efx_evq_t *eep,
1248 __in efx_rxq_t *erp,
1249 __in unsigned int label)
1251 efx_evq_rxq_state_t *eersp;
1253 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1254 eersp = &eep->ee_rxq_state[label];
1256 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1258 eersp->eers_rx_read_ptr = 0;
1259 eersp->eers_rx_mask = erp->er_mask;
1263 ef10_ev_rxlabel_fini(
1264 __in efx_evq_t *eep,
1265 __in unsigned int label)
1267 efx_evq_rxq_state_t *eersp;
1269 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1270 eersp = &eep->ee_rxq_state[label];
1272 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1274 eersp->eers_rx_read_ptr = 0;
1275 eersp->eers_rx_mask = 0;
1278 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */