2 * Copyright (c) 2010-2015 Solarflare Communications Inc.
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * The views and conclusions contained in the software and documentation are
30 * those of the authors and should not be interpreted as representing official
31 * policies, either expressed or implied, of the FreeBSD Project.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/param.h>
41 #include <sys/queue.h>
42 #include <sys/systm.h>
43 #include <sys/taskqueue.h>
45 #include "common/efx.h"
50 sfxge_ev_qcomplete(struct sfxge_evq *evq, boolean_t eop)
52 struct sfxge_softc *sc;
54 struct sfxge_rxq *rxq;
55 struct sfxge_txq *txq;
57 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
63 if ((txq = evq->txq) != NULL) {
65 evq->txqs = &(evq->txq);
68 struct sfxge_txq *next;
73 KASSERT(txq->evq_index == index,
74 ("txq->evq_index != index"));
76 if (txq->pending != txq->completed)
77 sfxge_tx_qcomplete(txq, evq);
80 } while (txq != NULL);
83 if (rxq->pending != rxq->completed)
84 sfxge_rx_qcomplete(rxq, eop);
88 sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size,
91 struct sfxge_evq *evq;
92 struct sfxge_softc *sc;
93 struct sfxge_rxq *rxq;
96 struct sfxge_rx_sw_desc *rx_desc;
99 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
106 rxq = sc->rxq[label];
107 KASSERT(rxq != NULL, ("rxq == NULL"));
108 KASSERT(evq->index == rxq->index,
109 ("evq->index != rxq->index"));
111 if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED))
114 stop = (id + 1) & rxq->ptr_mask;
115 id = rxq->pending & rxq->ptr_mask;
116 delta = (stop >= id) ? (stop - id) : (rxq->entries - id + stop);
117 rxq->pending += delta;
120 if ((!efx_nic_cfg_get(sc->enp)->enc_rx_batching_enabled) ||
122 (delta > efx_nic_cfg_get(sc->enp)->enc_rx_batch_max)) {
123 evq->exception = B_TRUE;
125 device_printf(sc->dev, "RX completion out of order"
126 " (id=%#x delta=%u flags=%#x); resetting\n",
128 sfxge_schedule_reset(sc);
134 rx_desc = &rxq->queue[id];
136 prefetch_read_many(rx_desc->mbuf);
138 for (; id != stop; id = (id + 1) & rxq->ptr_mask) {
139 rx_desc = &rxq->queue[id];
140 KASSERT(rx_desc->flags == EFX_DISCARD,
141 ("rx_desc->flags != EFX_DISCARD"));
142 rx_desc->flags = flags;
144 KASSERT(size < (1 << 16), ("size > (1 << 16)"));
145 rx_desc->size = (uint16_t)size;
150 if (rxq->pending - rxq->completed >= SFXGE_RX_BATCH)
151 sfxge_ev_qcomplete(evq, B_FALSE);
154 return (evq->rx_done >= SFXGE_EV_BATCH);
158 sfxge_ev_exception(void *arg, uint32_t code, uint32_t data)
160 struct sfxge_evq *evq;
161 struct sfxge_softc *sc;
163 evq = (struct sfxge_evq *)arg;
164 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
168 DBGPRINT(sc->dev, "[%d] %s", evq->index,
169 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
170 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
171 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
172 (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT) ? "UNKNOWN_SENSOREVT" :
173 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
174 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
175 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
176 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
177 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
180 evq->exception = B_TRUE;
182 if (code != EFX_EXCEPTION_UNKNOWN_SENSOREVT) {
183 device_printf(sc->dev,
184 "hardware exception (code=%u); resetting\n",
186 sfxge_schedule_reset(sc);
193 sfxge_ev_rxq_flush_done(void *arg, uint32_t rxq_index)
195 struct sfxge_evq *evq;
196 struct sfxge_softc *sc;
197 struct sfxge_rxq *rxq;
202 evq = (struct sfxge_evq *)arg;
203 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
206 rxq = sc->rxq[rxq_index];
208 KASSERT(rxq != NULL, ("rxq == NULL"));
210 /* Resend a software event on the correct queue */
212 if (index == evq->index) {
213 sfxge_rx_qflush_done(rxq);
217 evq = sc->evq[index];
220 KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
221 ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != level"));
222 magic = SFXGE_MAGIC_RX_QFLUSH_DONE | label;
224 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
225 ("evq not started"));
226 efx_ev_qpost(evq->common, magic);
232 sfxge_ev_rxq_flush_failed(void *arg, uint32_t rxq_index)
234 struct sfxge_evq *evq;
235 struct sfxge_softc *sc;
236 struct sfxge_rxq *rxq;
241 evq = (struct sfxge_evq *)arg;
242 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
245 rxq = sc->rxq[rxq_index];
247 KASSERT(rxq != NULL, ("rxq == NULL"));
249 /* Resend a software event on the correct queue */
251 evq = sc->evq[index];
254 KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
255 ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
256 magic = SFXGE_MAGIC_RX_QFLUSH_FAILED | label;
258 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
259 ("evq not started"));
260 efx_ev_qpost(evq->common, magic);
265 static struct sfxge_txq *
266 sfxge_get_txq_by_label(struct sfxge_evq *evq, enum sfxge_txq_type label)
270 KASSERT((evq->index == 0 && label < SFXGE_TXQ_NTYPES) ||
271 (label == SFXGE_TXQ_IP_TCP_UDP_CKSUM), ("unexpected txq label"));
272 index = (evq->index == 0) ? label : (evq->index - 1 + SFXGE_TXQ_NTYPES);
273 return (evq->sc->txq[index]);
277 sfxge_ev_tx(void *arg, uint32_t label, uint32_t id)
279 struct sfxge_evq *evq;
280 struct sfxge_txq *txq;
284 evq = (struct sfxge_evq *)arg;
285 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
287 txq = sfxge_get_txq_by_label(evq, label);
289 KASSERT(txq != NULL, ("txq == NULL"));
290 KASSERT(evq->index == txq->evq_index,
291 ("evq->index != txq->evq_index"));
293 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED))
296 stop = (id + 1) & txq->ptr_mask;
297 id = txq->pending & txq->ptr_mask;
299 delta = (stop >= id) ? (stop - id) : (txq->entries - id + stop);
300 txq->pending += delta;
304 if (txq->next == NULL &&
305 evq->txqs != &(txq->next)) {
307 evq->txqs = &(txq->next);
310 if (txq->pending - txq->completed >= SFXGE_TX_BATCH)
311 sfxge_tx_qcomplete(txq, evq);
314 return (evq->tx_done >= SFXGE_EV_BATCH);
318 sfxge_ev_txq_flush_done(void *arg, uint32_t txq_index)
320 struct sfxge_evq *evq;
321 struct sfxge_softc *sc;
322 struct sfxge_txq *txq;
326 evq = (struct sfxge_evq *)arg;
327 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
330 txq = sc->txq[txq_index];
332 KASSERT(txq != NULL, ("txq == NULL"));
333 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
334 ("txq not initialized"));
336 if (txq->evq_index == evq->index) {
337 sfxge_tx_qflush_done(txq);
341 /* Resend a software event on the correct queue */
342 evq = sc->evq[txq->evq_index];
345 KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
346 ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
347 magic = SFXGE_MAGIC_TX_QFLUSH_DONE | label;
349 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
350 ("evq not started"));
351 efx_ev_qpost(evq->common, magic);
357 sfxge_ev_software(void *arg, uint16_t magic)
359 struct sfxge_evq *evq;
360 struct sfxge_softc *sc;
363 evq = (struct sfxge_evq *)arg;
364 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
368 label = magic & SFXGE_MAGIC_DMAQ_LABEL_MASK;
369 magic &= ~SFXGE_MAGIC_DMAQ_LABEL_MASK;
372 case SFXGE_MAGIC_RX_QFLUSH_DONE: {
373 struct sfxge_rxq *rxq = sc->rxq[label];
375 KASSERT(rxq != NULL, ("rxq == NULL"));
376 KASSERT(evq->index == rxq->index,
377 ("evq->index != rxq->index"));
379 sfxge_rx_qflush_done(rxq);
382 case SFXGE_MAGIC_RX_QFLUSH_FAILED: {
383 struct sfxge_rxq *rxq = sc->rxq[label];
385 KASSERT(rxq != NULL, ("rxq == NULL"));
386 KASSERT(evq->index == rxq->index,
387 ("evq->index != rxq->index"));
389 sfxge_rx_qflush_failed(rxq);
392 case SFXGE_MAGIC_RX_QREFILL: {
393 struct sfxge_rxq *rxq = sc->rxq[label];
395 KASSERT(rxq != NULL, ("rxq == NULL"));
396 KASSERT(evq->index == rxq->index,
397 ("evq->index != rxq->index"));
399 sfxge_rx_qrefill(rxq);
402 case SFXGE_MAGIC_TX_QFLUSH_DONE: {
403 struct sfxge_txq *txq = sfxge_get_txq_by_label(evq, label);
405 KASSERT(txq != NULL, ("txq == NULL"));
406 KASSERT(evq->index == txq->evq_index,
407 ("evq->index != txq->evq_index"));
409 sfxge_tx_qflush_done(txq);
420 sfxge_ev_sram(void *arg, uint32_t code)
426 case EFX_SRAM_UPDATE:
427 EFSYS_PROBE(sram_update);
431 EFSYS_PROBE(sram_clear);
434 case EFX_SRAM_ILLEGAL_CLEAR:
435 EFSYS_PROBE(sram_illegal_clear);
439 KASSERT(B_FALSE, ("Impossible SRAM event"));
447 sfxge_ev_timer(void *arg, uint32_t index)
456 sfxge_ev_wake_up(void *arg, uint32_t index)
467 sfxge_ev_stat_update(struct sfxge_softc *sc)
469 struct sfxge_evq *evq;
473 SFXGE_ADAPTER_LOCK(sc);
475 if (__predict_false(sc->evq[0]->init_state != SFXGE_EVQ_STARTED))
479 if (now - sc->ev_stats_update_time < hz)
482 sc->ev_stats_update_time = now;
484 /* Add event counts from each event queue in turn */
485 for (index = 0; index < sc->evq_count; index++) {
486 evq = sc->evq[index];
488 efx_ev_qstats_update(evq->common, sc->ev_stats);
489 SFXGE_EVQ_UNLOCK(evq);
492 SFXGE_ADAPTER_UNLOCK(sc);
496 sfxge_ev_stat_handler(SYSCTL_HANDLER_ARGS)
498 struct sfxge_softc *sc = arg1;
499 unsigned int id = arg2;
501 sfxge_ev_stat_update(sc);
503 return (SYSCTL_OUT(req, &sc->ev_stats[id], sizeof(sc->ev_stats[id])));
507 sfxge_ev_stat_init(struct sfxge_softc *sc)
509 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
510 struct sysctl_oid_list *stat_list;
514 stat_list = SYSCTL_CHILDREN(sc->stats_node);
516 for (id = 0; id < EV_NQSTATS; id++) {
517 snprintf(name, sizeof(name), "ev_%s",
518 efx_ev_qstat_name(sc->enp, id));
521 OID_AUTO, name, CTLTYPE_U64|CTLFLAG_RD,
522 sc, id, sfxge_ev_stat_handler, "Q",
527 #endif /* EFSYS_OPT_QSTATS */
530 sfxge_ev_qmoderate(struct sfxge_softc *sc, unsigned int idx, unsigned int us)
532 struct sfxge_evq *evq;
538 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
539 ("evq->init_state != SFXGE_EVQ_STARTED"));
541 (void)efx_ev_qmoderate(eep, us);
545 sfxge_int_mod_handler(SYSCTL_HANDLER_ARGS)
547 struct sfxge_softc *sc = arg1;
548 struct sfxge_intr *intr = &sc->intr;
549 unsigned int moderation;
553 SFXGE_ADAPTER_LOCK(sc);
555 if (req->newptr != NULL) {
556 if ((error = SYSCTL_IN(req, &moderation, sizeof(moderation)))
560 /* We may not be calling efx_ev_qmoderate() now,
561 * so we have to range-check the value ourselves.
564 efx_nic_cfg_get(sc->enp)->enc_evq_timer_max_us) {
569 sc->ev_moderation = moderation;
570 if (intr->state == SFXGE_INTR_STARTED) {
571 for (index = 0; index < sc->evq_count; index++)
572 sfxge_ev_qmoderate(sc, index, moderation);
575 error = SYSCTL_OUT(req, &sc->ev_moderation,
576 sizeof(sc->ev_moderation));
580 SFXGE_ADAPTER_UNLOCK(sc);
586 sfxge_ev_initialized(void *arg)
588 struct sfxge_evq *evq;
590 evq = (struct sfxge_evq *)arg;
591 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
593 /* Init done events may be duplicated on 7xxx */
594 KASSERT(evq->init_state == SFXGE_EVQ_STARTING ||
595 evq->init_state == SFXGE_EVQ_STARTED,
596 ("evq not starting"));
598 evq->init_state = SFXGE_EVQ_STARTED;
604 sfxge_ev_link_change(void *arg, efx_link_mode_t link_mode)
606 struct sfxge_evq *evq;
607 struct sfxge_softc *sc;
609 evq = (struct sfxge_evq *)arg;
610 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
614 sfxge_mac_link_update(sc, link_mode);
619 static const efx_ev_callbacks_t sfxge_ev_callbacks = {
620 .eec_initialized = sfxge_ev_initialized,
621 .eec_rx = sfxge_ev_rx,
622 .eec_tx = sfxge_ev_tx,
623 .eec_exception = sfxge_ev_exception,
624 .eec_rxq_flush_done = sfxge_ev_rxq_flush_done,
625 .eec_rxq_flush_failed = sfxge_ev_rxq_flush_failed,
626 .eec_txq_flush_done = sfxge_ev_txq_flush_done,
627 .eec_software = sfxge_ev_software,
628 .eec_sram = sfxge_ev_sram,
629 .eec_wake_up = sfxge_ev_wake_up,
630 .eec_timer = sfxge_ev_timer,
631 .eec_link_change = sfxge_ev_link_change,
636 sfxge_ev_qpoll(struct sfxge_evq *evq)
642 if (__predict_false(evq->init_state != SFXGE_EVQ_STARTING &&
643 evq->init_state != SFXGE_EVQ_STARTED)) {
648 /* Synchronize the DMA memory for reading */
649 bus_dmamap_sync(evq->mem.esm_tag, evq->mem.esm_map,
650 BUS_DMASYNC_POSTREAD);
652 KASSERT(evq->rx_done == 0, ("evq->rx_done != 0"));
653 KASSERT(evq->tx_done == 0, ("evq->tx_done != 0"));
654 KASSERT(evq->txq == NULL, ("evq->txq != NULL"));
655 KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq"));
658 efx_ev_qpoll(evq->common, &evq->read_ptr, &sfxge_ev_callbacks, evq);
663 /* Perform any pending completion processing */
664 sfxge_ev_qcomplete(evq, B_TRUE);
666 /* Re-prime the event queue for interrupts */
667 if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
670 SFXGE_EVQ_UNLOCK(evq);
675 SFXGE_EVQ_UNLOCK(evq);
680 sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index)
682 struct sfxge_evq *evq;
684 evq = sc->evq[index];
686 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
687 ("evq->init_state != SFXGE_EVQ_STARTED"));
690 evq->init_state = SFXGE_EVQ_INITIALIZED;
692 evq->exception = B_FALSE;
695 /* Add event counts before discarding the common evq state */
696 efx_ev_qstats_update(evq->common, sc->ev_stats);
699 efx_ev_qdestroy(evq->common);
700 efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
701 EFX_EVQ_NBUFS(evq->entries));
702 SFXGE_EVQ_UNLOCK(evq);
706 sfxge_ev_qstart(struct sfxge_softc *sc, unsigned int index)
708 struct sfxge_evq *evq;
713 evq = sc->evq[index];
716 KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
717 ("evq->init_state != SFXGE_EVQ_INITIALIZED"));
719 /* Clear all events. */
720 (void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
722 /* Program the buffer table. */
723 if ((rc = efx_sram_buf_tbl_set(sc->enp, evq->buf_base_id, esmp,
724 EFX_EVQ_NBUFS(evq->entries))) != 0)
727 /* Create the common code event queue. */
728 if ((rc = efx_ev_qcreate(sc->enp, index, esmp, evq->entries,
729 evq->buf_base_id, &evq->common)) != 0)
734 /* Set the default moderation */
735 (void)efx_ev_qmoderate(evq->common, sc->ev_moderation);
737 /* Prime the event queue for interrupts */
738 if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
741 evq->init_state = SFXGE_EVQ_STARTING;
743 SFXGE_EVQ_UNLOCK(evq);
745 /* Wait for the initialization event */
748 /* Pause for 100 ms */
749 pause("sfxge evq init", hz / 10);
751 /* Check to see if the test event has been processed */
752 if (evq->init_state == SFXGE_EVQ_STARTED)
755 } while (++count < 20);
765 evq->init_state = SFXGE_EVQ_INITIALIZED;
767 SFXGE_EVQ_UNLOCK(evq);
768 efx_ev_qdestroy(evq->common);
770 efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
771 EFX_EVQ_NBUFS(evq->entries));
777 sfxge_ev_stop(struct sfxge_softc *sc)
779 struct sfxge_intr *intr;
786 KASSERT(intr->state == SFXGE_INTR_STARTED,
787 ("Interrupts not started"));
789 /* Stop the event queue(s) */
790 index = sc->evq_count;
792 sfxge_ev_qstop(sc, index);
794 /* Tear down the event module */
799 sfxge_ev_start(struct sfxge_softc *sc)
801 struct sfxge_intr *intr;
807 KASSERT(intr->state == SFXGE_INTR_STARTED,
808 ("intr->state != SFXGE_INTR_STARTED"));
810 /* Initialize the event module */
811 if ((rc = efx_ev_init(sc->enp)) != 0)
814 /* Start the event queues */
815 for (index = 0; index < sc->evq_count; index++) {
816 if ((rc = sfxge_ev_qstart(sc, index)) != 0)
823 /* Stop the event queue(s) */
825 sfxge_ev_qstop(sc, index);
827 /* Tear down the event module */
828 efx_ev_fini(sc->enp);
834 sfxge_ev_qfini(struct sfxge_softc *sc, unsigned int index)
836 struct sfxge_evq *evq;
838 evq = sc->evq[index];
840 KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
841 ("evq->init_state != SFXGE_EVQ_INITIALIZED"));
842 KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq"));
844 sfxge_dma_free(&evq->mem);
846 sc->evq[index] = NULL;
848 SFXGE_EVQ_LOCK_DESTROY(evq);
854 sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
856 struct sfxge_evq *evq;
860 KASSERT(index < SFXGE_RX_SCALE_MAX, ("index >= SFXGE_RX_SCALE_MAX"));
862 evq = malloc(sizeof(struct sfxge_evq), M_SFXGE, M_ZERO | M_WAITOK);
865 sc->evq[index] = evq;
868 /* Build an event queue with room for one event per tx and rx buffer,
869 * plus some extra for link state events and MCDI completions.
870 * There are three tx queues in the first event queue and one in
875 ROUNDUP_POW_OF_TWO(sc->rxq_entries +
876 3 * sc->txq_entries +
880 ROUNDUP_POW_OF_TWO(sc->rxq_entries +
884 /* Initialise TX completion list */
885 evq->txqs = &evq->txq;
887 /* Allocate DMA space. */
888 if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(evq->entries), esmp)) != 0)
891 /* Allocate buffer table entries. */
892 sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(evq->entries),
895 SFXGE_EVQ_LOCK_INIT(evq, device_get_nameunit(sc->dev), index);
897 evq->init_state = SFXGE_EVQ_INITIALIZED;
903 sfxge_ev_fini(struct sfxge_softc *sc)
905 struct sfxge_intr *intr;
910 KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
911 ("intr->state != SFXGE_INTR_INITIALIZED"));
913 sc->ev_moderation = 0;
915 /* Tear down the event queue(s). */
916 index = sc->evq_count;
918 sfxge_ev_qfini(sc, index);
924 sfxge_ev_init(struct sfxge_softc *sc)
926 struct sysctl_ctx_list *sysctl_ctx = device_get_sysctl_ctx(sc->dev);
927 struct sysctl_oid *sysctl_tree = device_get_sysctl_tree(sc->dev);
928 struct sfxge_intr *intr;
934 sc->evq_count = intr->n_alloc;
936 KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
937 ("intr->state != SFXGE_INTR_INITIALIZED"));
939 /* Set default interrupt moderation; add a sysctl to
940 * read and change it.
942 sc->ev_moderation = SFXGE_MODERATION;
943 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
944 OID_AUTO, "int_mod", CTLTYPE_UINT|CTLFLAG_RW,
945 sc, 0, sfxge_int_mod_handler, "IU",
946 "sfxge interrupt moderation (us)");
949 * Initialize the event queue(s) - one per interrupt.
951 for (index = 0; index < sc->evq_count; index++) {
952 if ((rc = sfxge_ev_qinit(sc, index)) != 0)
957 sfxge_ev_stat_init(sc);
964 sfxge_ev_qfini(sc, index);