2 * Copyright (c) 2010-2011 Solarflare Communications, Inc.
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/param.h>
37 #include <sys/queue.h>
38 #include <sys/systm.h>
39 #include <sys/taskqueue.h>
41 #include "common/efx.h"
46 sfxge_ev_qcomplete(struct sfxge_evq *evq, boolean_t eop)
48 struct sfxge_softc *sc;
50 struct sfxge_rxq *rxq;
51 struct sfxge_txq *txq;
53 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
59 if ((txq = evq->txq) != NULL) {
61 evq->txqs = &(evq->txq);
64 struct sfxge_txq *next;
69 KASSERT(txq->evq_index == index,
70 ("txq->evq_index != index"));
72 if (txq->pending != txq->completed)
73 sfxge_tx_qcomplete(txq, evq);
76 } while (txq != NULL);
79 if (rxq->pending != rxq->completed)
80 sfxge_rx_qcomplete(rxq, eop);
84 sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size,
87 struct sfxge_evq *evq;
88 struct sfxge_softc *sc;
89 struct sfxge_rxq *rxq;
90 unsigned int expected;
91 struct sfxge_rx_sw_desc *rx_desc;
94 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
101 rxq = sc->rxq[label];
102 KASSERT(rxq != NULL, ("rxq == NULL"));
103 KASSERT(evq->index == rxq->index,
104 ("evq->index != rxq->index"));
106 if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED))
109 expected = rxq->pending++ & rxq->ptr_mask;
110 if (id != expected) {
111 evq->exception = B_TRUE;
113 device_printf(sc->dev, "RX completion out of order"
114 " (id=%#x expected=%#x flags=%#x); resetting\n",
115 id, expected, flags);
116 sfxge_schedule_reset(sc);
121 rx_desc = &rxq->queue[id];
123 KASSERT(rx_desc->flags == EFX_DISCARD,
124 ("rx_desc->flags != EFX_DISCARD"));
125 rx_desc->flags = flags;
127 KASSERT(size < (1 << 16), ("size > (1 << 16)"));
128 rx_desc->size = (uint16_t)size;
129 prefetch_read_many(rx_desc->mbuf);
133 if (rxq->pending - rxq->completed >= SFXGE_RX_BATCH)
134 sfxge_ev_qcomplete(evq, B_FALSE);
137 return (evq->rx_done >= SFXGE_EV_BATCH);
141 sfxge_ev_exception(void *arg, uint32_t code, uint32_t data)
143 struct sfxge_evq *evq;
144 struct sfxge_softc *sc;
146 evq = (struct sfxge_evq *)arg;
147 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
151 evq->exception = B_TRUE;
153 if (code != EFX_EXCEPTION_UNKNOWN_SENSOREVT) {
154 device_printf(sc->dev,
155 "hardware exception (code=%u); resetting\n",
157 sfxge_schedule_reset(sc);
164 sfxge_ev_rxq_flush_done(void *arg, uint32_t rxq_index)
166 struct sfxge_evq *evq;
167 struct sfxge_softc *sc;
168 struct sfxge_rxq *rxq;
173 evq = (struct sfxge_evq *)arg;
174 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
177 rxq = sc->rxq[rxq_index];
179 KASSERT(rxq != NULL, ("rxq == NULL"));
181 /* Resend a software event on the correct queue */
183 evq = sc->evq[index];
186 KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
187 ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != level"));
188 magic = SFXGE_MAGIC_RX_QFLUSH_DONE | label;
190 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
191 ("evq not started"));
192 efx_ev_qpost(evq->common, magic);
198 sfxge_ev_rxq_flush_failed(void *arg, uint32_t rxq_index)
200 struct sfxge_evq *evq;
201 struct sfxge_softc *sc;
202 struct sfxge_rxq *rxq;
207 evq = (struct sfxge_evq *)arg;
208 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
211 rxq = sc->rxq[rxq_index];
213 KASSERT(rxq != NULL, ("rxq == NULL"));
215 /* Resend a software event on the correct queue */
217 evq = sc->evq[index];
220 KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
221 ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
222 magic = SFXGE_MAGIC_RX_QFLUSH_FAILED | label;
224 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
225 ("evq not started"));
226 efx_ev_qpost(evq->common, magic);
231 static struct sfxge_txq *
232 sfxge_get_txq_by_label(struct sfxge_evq *evq, enum sfxge_txq_type label)
236 KASSERT((evq->index == 0 && label < SFXGE_TXQ_NTYPES) ||
237 (label == SFXGE_TXQ_IP_TCP_UDP_CKSUM), ("unexpected txq label"));
238 index = (evq->index == 0) ? label : (evq->index - 1 + SFXGE_TXQ_NTYPES);
239 return (evq->sc->txq[index]);
243 sfxge_ev_tx(void *arg, uint32_t label, uint32_t id)
245 struct sfxge_evq *evq;
246 struct sfxge_txq *txq;
250 evq = (struct sfxge_evq *)arg;
251 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
253 txq = sfxge_get_txq_by_label(evq, label);
255 KASSERT(txq != NULL, ("txq == NULL"));
256 KASSERT(evq->index == txq->evq_index,
257 ("evq->index != txq->evq_index"));
259 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED))
262 stop = (id + 1) & txq->ptr_mask;
263 id = txq->pending & txq->ptr_mask;
265 delta = (stop >= id) ? (stop - id) : (txq->entries - id + stop);
266 txq->pending += delta;
270 if (txq->next == NULL &&
271 evq->txqs != &(txq->next)) {
273 evq->txqs = &(txq->next);
276 if (txq->pending - txq->completed >= SFXGE_TX_BATCH)
277 sfxge_tx_qcomplete(txq, evq);
280 return (evq->tx_done >= SFXGE_EV_BATCH);
284 sfxge_ev_txq_flush_done(void *arg, uint32_t txq_index)
286 struct sfxge_evq *evq;
287 struct sfxge_softc *sc;
288 struct sfxge_txq *txq;
292 evq = (struct sfxge_evq *)arg;
293 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
296 txq = sc->txq[txq_index];
298 KASSERT(txq != NULL, ("txq == NULL"));
299 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
300 ("txq not initialized"));
302 /* Resend a software event on the correct queue */
303 evq = sc->evq[txq->evq_index];
306 KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
307 ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
308 magic = SFXGE_MAGIC_TX_QFLUSH_DONE | label;
310 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
311 ("evq not started"));
312 efx_ev_qpost(evq->common, magic);
318 sfxge_ev_software(void *arg, uint16_t magic)
320 struct sfxge_evq *evq;
321 struct sfxge_softc *sc;
324 evq = (struct sfxge_evq *)arg;
325 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
329 label = magic & SFXGE_MAGIC_DMAQ_LABEL_MASK;
330 magic &= ~SFXGE_MAGIC_DMAQ_LABEL_MASK;
333 case SFXGE_MAGIC_RX_QFLUSH_DONE: {
334 struct sfxge_rxq *rxq = sc->rxq[label];
336 KASSERT(rxq != NULL, ("rxq == NULL"));
337 KASSERT(evq->index == rxq->index,
338 ("evq->index != rxq->index"));
340 sfxge_rx_qflush_done(rxq);
343 case SFXGE_MAGIC_RX_QFLUSH_FAILED: {
344 struct sfxge_rxq *rxq = sc->rxq[label];
346 KASSERT(rxq != NULL, ("rxq == NULL"));
347 KASSERT(evq->index == rxq->index,
348 ("evq->index != rxq->index"));
350 sfxge_rx_qflush_failed(rxq);
353 case SFXGE_MAGIC_RX_QREFILL: {
354 struct sfxge_rxq *rxq = sc->rxq[label];
356 KASSERT(rxq != NULL, ("rxq == NULL"));
357 KASSERT(evq->index == rxq->index,
358 ("evq->index != rxq->index"));
360 sfxge_rx_qrefill(rxq);
363 case SFXGE_MAGIC_TX_QFLUSH_DONE: {
364 struct sfxge_txq *txq = sfxge_get_txq_by_label(evq, label);
366 KASSERT(txq != NULL, ("txq == NULL"));
367 KASSERT(evq->index == txq->evq_index,
368 ("evq->index != txq->evq_index"));
370 sfxge_tx_qflush_done(txq);
381 sfxge_ev_sram(void *arg, uint32_t code)
387 case EFX_SRAM_UPDATE:
388 EFSYS_PROBE(sram_update);
392 EFSYS_PROBE(sram_clear);
395 case EFX_SRAM_ILLEGAL_CLEAR:
396 EFSYS_PROBE(sram_illegal_clear);
400 KASSERT(B_FALSE, ("Impossible SRAM event"));
408 sfxge_ev_timer(void *arg, uint32_t index)
417 sfxge_ev_wake_up(void *arg, uint32_t index)
428 sfxge_ev_stat_update(struct sfxge_softc *sc)
430 struct sfxge_evq *evq;
434 SFXGE_ADAPTER_LOCK(sc);
436 if (__predict_false(sc->evq[0]->init_state != SFXGE_EVQ_STARTED))
440 if (now - sc->ev_stats_update_time < hz)
443 sc->ev_stats_update_time = now;
445 /* Add event counts from each event queue in turn */
446 for (index = 0; index < sc->evq_count; index++) {
447 evq = sc->evq[index];
449 efx_ev_qstats_update(evq->common, sc->ev_stats);
450 SFXGE_EVQ_UNLOCK(evq);
453 SFXGE_ADAPTER_UNLOCK(sc);
457 sfxge_ev_stat_handler(SYSCTL_HANDLER_ARGS)
459 struct sfxge_softc *sc = arg1;
460 unsigned int id = arg2;
462 sfxge_ev_stat_update(sc);
464 return (SYSCTL_OUT(req, &sc->ev_stats[id], sizeof(sc->ev_stats[id])));
468 sfxge_ev_stat_init(struct sfxge_softc *sc)
470 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
471 struct sysctl_oid_list *stat_list;
475 stat_list = SYSCTL_CHILDREN(sc->stats_node);
477 for (id = 0; id < EV_NQSTATS; id++) {
478 snprintf(name, sizeof(name), "ev_%s",
479 efx_ev_qstat_name(sc->enp, id));
482 OID_AUTO, name, CTLTYPE_U64|CTLFLAG_RD,
483 sc, id, sfxge_ev_stat_handler, "Q",
488 #endif /* EFSYS_OPT_QSTATS */
491 sfxge_ev_qmoderate(struct sfxge_softc *sc, unsigned int idx, unsigned int us)
493 struct sfxge_evq *evq;
499 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
500 ("evq->init_state != SFXGE_EVQ_STARTED"));
502 (void)efx_ev_qmoderate(eep, us);
506 sfxge_int_mod_handler(SYSCTL_HANDLER_ARGS)
508 struct sfxge_softc *sc = arg1;
509 struct sfxge_intr *intr = &sc->intr;
510 unsigned int moderation;
514 SFXGE_ADAPTER_LOCK(sc);
516 if (req->newptr != NULL) {
517 if ((error = SYSCTL_IN(req, &moderation, sizeof(moderation)))
521 /* We may not be calling efx_ev_qmoderate() now,
522 * so we have to range-check the value ourselves.
525 efx_nic_cfg_get(sc->enp)->enc_evq_timer_max_us) {
530 sc->ev_moderation = moderation;
531 if (intr->state == SFXGE_INTR_STARTED) {
532 for (index = 0; index < sc->evq_count; index++)
533 sfxge_ev_qmoderate(sc, index, moderation);
536 error = SYSCTL_OUT(req, &sc->ev_moderation,
537 sizeof(sc->ev_moderation));
541 SFXGE_ADAPTER_UNLOCK(sc);
547 sfxge_ev_initialized(void *arg)
549 struct sfxge_evq *evq;
551 evq = (struct sfxge_evq *)arg;
552 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
554 KASSERT(evq->init_state == SFXGE_EVQ_STARTING,
555 ("evq not starting"));
557 evq->init_state = SFXGE_EVQ_STARTED;
563 sfxge_ev_link_change(void *arg, efx_link_mode_t link_mode)
565 struct sfxge_evq *evq;
566 struct sfxge_softc *sc;
568 evq = (struct sfxge_evq *)arg;
569 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
573 sfxge_mac_link_update(sc, link_mode);
578 static const efx_ev_callbacks_t sfxge_ev_callbacks = {
579 .eec_initialized = sfxge_ev_initialized,
580 .eec_rx = sfxge_ev_rx,
581 .eec_tx = sfxge_ev_tx,
582 .eec_exception = sfxge_ev_exception,
583 .eec_rxq_flush_done = sfxge_ev_rxq_flush_done,
584 .eec_rxq_flush_failed = sfxge_ev_rxq_flush_failed,
585 .eec_txq_flush_done = sfxge_ev_txq_flush_done,
586 .eec_software = sfxge_ev_software,
587 .eec_sram = sfxge_ev_sram,
588 .eec_wake_up = sfxge_ev_wake_up,
589 .eec_timer = sfxge_ev_timer,
590 .eec_link_change = sfxge_ev_link_change,
595 sfxge_ev_qpoll(struct sfxge_evq *evq)
601 if (__predict_false(evq->init_state != SFXGE_EVQ_STARTING &&
602 evq->init_state != SFXGE_EVQ_STARTED)) {
607 /* Synchronize the DMA memory for reading */
608 bus_dmamap_sync(evq->mem.esm_tag, evq->mem.esm_map,
609 BUS_DMASYNC_POSTREAD);
611 KASSERT(evq->rx_done == 0, ("evq->rx_done != 0"));
612 KASSERT(evq->tx_done == 0, ("evq->tx_done != 0"));
613 KASSERT(evq->txq == NULL, ("evq->txq != NULL"));
614 KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq"));
617 efx_ev_qpoll(evq->common, &evq->read_ptr, &sfxge_ev_callbacks, evq);
622 /* Perform any pending completion processing */
623 sfxge_ev_qcomplete(evq, B_TRUE);
625 /* Re-prime the event queue for interrupts */
626 if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
629 SFXGE_EVQ_UNLOCK(evq);
634 SFXGE_EVQ_UNLOCK(evq);
639 sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index)
641 struct sfxge_evq *evq;
643 evq = sc->evq[index];
645 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
646 ("evq->init_state != SFXGE_EVQ_STARTED"));
649 evq->init_state = SFXGE_EVQ_INITIALIZED;
651 evq->exception = B_FALSE;
654 /* Add event counts before discarding the common evq state */
655 efx_ev_qstats_update(evq->common, sc->ev_stats);
658 efx_ev_qdestroy(evq->common);
659 efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
660 EFX_EVQ_NBUFS(evq->entries));
661 SFXGE_EVQ_UNLOCK(evq);
665 sfxge_ev_qstart(struct sfxge_softc *sc, unsigned int index)
667 struct sfxge_evq *evq;
672 evq = sc->evq[index];
675 KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
676 ("evq->init_state != SFXGE_EVQ_INITIALIZED"));
678 /* Clear all events. */
679 (void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
681 /* Program the buffer table. */
682 if ((rc = efx_sram_buf_tbl_set(sc->enp, evq->buf_base_id, esmp,
683 EFX_EVQ_NBUFS(evq->entries))) != 0)
686 /* Create the common code event queue. */
687 if ((rc = efx_ev_qcreate(sc->enp, index, esmp, evq->entries,
688 evq->buf_base_id, &evq->common)) != 0)
693 /* Set the default moderation */
694 (void)efx_ev_qmoderate(evq->common, sc->ev_moderation);
696 /* Prime the event queue for interrupts */
697 if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
700 evq->init_state = SFXGE_EVQ_STARTING;
702 SFXGE_EVQ_UNLOCK(evq);
704 /* Wait for the initialization event */
707 /* Pause for 100 ms */
708 pause("sfxge evq init", hz / 10);
710 /* Check to see if the test event has been processed */
711 if (evq->init_state == SFXGE_EVQ_STARTED)
714 } while (++count < 20);
724 evq->init_state = SFXGE_EVQ_INITIALIZED;
726 SFXGE_EVQ_UNLOCK(evq);
727 efx_ev_qdestroy(evq->common);
729 efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
730 EFX_EVQ_NBUFS(evq->entries));
736 sfxge_ev_stop(struct sfxge_softc *sc)
738 struct sfxge_intr *intr;
745 KASSERT(intr->state == SFXGE_INTR_STARTED,
746 ("Interrupts not started"));
748 /* Stop the event queue(s) */
749 index = sc->evq_count;
751 sfxge_ev_qstop(sc, index);
753 /* Tear down the event module */
758 sfxge_ev_start(struct sfxge_softc *sc)
760 struct sfxge_intr *intr;
766 KASSERT(intr->state == SFXGE_INTR_STARTED,
767 ("intr->state != SFXGE_INTR_STARTED"));
769 /* Initialize the event module */
770 if ((rc = efx_ev_init(sc->enp)) != 0)
773 /* Start the event queues */
774 for (index = 0; index < sc->evq_count; index++) {
775 if ((rc = sfxge_ev_qstart(sc, index)) != 0)
782 /* Stop the event queue(s) */
784 sfxge_ev_qstop(sc, index);
786 /* Tear down the event module */
787 efx_ev_fini(sc->enp);
793 sfxge_ev_qfini(struct sfxge_softc *sc, unsigned int index)
795 struct sfxge_evq *evq;
797 evq = sc->evq[index];
799 KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
800 ("evq->init_state != SFXGE_EVQ_INITIALIZED"));
801 KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq"));
803 sfxge_dma_free(&evq->mem);
805 sc->evq[index] = NULL;
807 SFXGE_EVQ_LOCK_DESTROY(evq);
813 sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
815 struct sfxge_evq *evq;
819 KASSERT(index < SFXGE_RX_SCALE_MAX, ("index >= SFXGE_RX_SCALE_MAX"));
821 evq = malloc(sizeof(struct sfxge_evq), M_SFXGE, M_ZERO | M_WAITOK);
824 sc->evq[index] = evq;
827 /* Build an event queue with room for one event per tx and rx buffer,
828 * plus some extra for link state events and MCDI completions.
829 * There are three tx queues in the first event queue and one in
834 ROUNDUP_POW_OF_TWO(sc->rxq_entries +
835 3 * sc->txq_entries +
839 ROUNDUP_POW_OF_TWO(sc->rxq_entries +
843 /* Initialise TX completion list */
844 evq->txqs = &evq->txq;
846 /* Allocate DMA space. */
847 if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(evq->entries), esmp)) != 0)
850 /* Allocate buffer table entries. */
851 sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(evq->entries),
854 SFXGE_EVQ_LOCK_INIT(evq, device_get_nameunit(sc->dev), index);
856 evq->init_state = SFXGE_EVQ_INITIALIZED;
862 sfxge_ev_fini(struct sfxge_softc *sc)
864 struct sfxge_intr *intr;
869 KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
870 ("intr->state != SFXGE_INTR_INITIALIZED"));
872 sc->ev_moderation = 0;
874 /* Tear down the event queue(s). */
875 index = sc->evq_count;
877 sfxge_ev_qfini(sc, index);
883 sfxge_ev_init(struct sfxge_softc *sc)
885 struct sysctl_ctx_list *sysctl_ctx = device_get_sysctl_ctx(sc->dev);
886 struct sysctl_oid *sysctl_tree = device_get_sysctl_tree(sc->dev);
887 struct sfxge_intr *intr;
893 sc->evq_count = intr->n_alloc;
895 KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
896 ("intr->state != SFXGE_INTR_INITIALIZED"));
898 /* Set default interrupt moderation; add a sysctl to
899 * read and change it.
901 sc->ev_moderation = SFXGE_MODERATION;
902 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
903 OID_AUTO, "int_mod", CTLTYPE_UINT|CTLFLAG_RW,
904 sc, 0, sfxge_int_mod_handler, "IU",
905 "sfxge interrupt moderation (us)");
908 * Initialize the event queue(s) - one per interrupt.
910 for (index = 0; index < sc->evq_count; index++) {
911 if ((rc = sfxge_ev_qinit(sc, index)) != 0)
916 sfxge_ev_stat_init(sc);
923 sfxge_ev_qfini(sc, index);