2 * Copyright (c) 2010-2011 Solarflare Communications, Inc.
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/param.h>
37 #include <sys/queue.h>
38 #include <sys/systm.h>
39 #include <sys/taskqueue.h>
41 #include "common/efx.h"
46 sfxge_ev_qcomplete(struct sfxge_evq *evq, boolean_t eop)
48 struct sfxge_softc *sc;
50 struct sfxge_rxq *rxq;
51 struct sfxge_txq *txq;
57 if ((txq = evq->txq) != NULL) {
59 evq->txqs = &(evq->txq);
62 struct sfxge_txq *next;
67 KASSERT(txq->evq_index == index,
68 ("txq->evq_index != index"));
70 if (txq->pending != txq->completed)
71 sfxge_tx_qcomplete(txq);
74 } while (txq != NULL);
77 if (rxq->pending != rxq->completed)
78 sfxge_rx_qcomplete(rxq, eop);
82 sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size,
85 struct sfxge_evq *evq;
86 struct sfxge_softc *sc;
87 struct sfxge_rxq *rxq;
88 unsigned int expected;
89 struct sfxge_rx_sw_desc *rx_desc;
98 KASSERT(rxq != NULL, ("rxq == NULL"));
99 KASSERT(evq->index == rxq->index,
100 ("evq->index != rxq->index"));
102 if (rxq->init_state != SFXGE_RXQ_STARTED)
105 expected = rxq->pending++ & rxq->ptr_mask;
106 if (id != expected) {
107 evq->exception = B_TRUE;
109 device_printf(sc->dev, "RX completion out of order"
110 " (id=%#x expected=%#x flags=%#x); resetting\n",
111 id, expected, flags);
112 sfxge_schedule_reset(sc);
117 rx_desc = &rxq->queue[id];
119 KASSERT(rx_desc->flags == EFX_DISCARD,
120 ("rx_desc->flags != EFX_DISCARD"));
121 rx_desc->flags = flags;
123 KASSERT(size < (1 << 16), ("size > (1 << 16)"));
124 rx_desc->size = (uint16_t)size;
125 prefetch_read_many(rx_desc->mbuf);
129 if (rxq->pending - rxq->completed >= SFXGE_RX_BATCH)
130 sfxge_ev_qcomplete(evq, B_FALSE);
133 return (evq->rx_done >= SFXGE_EV_BATCH);
137 sfxge_ev_exception(void *arg, uint32_t code, uint32_t data)
139 struct sfxge_evq *evq;
140 struct sfxge_softc *sc;
142 evq = (struct sfxge_evq *)arg;
145 evq->exception = B_TRUE;
147 if (code != EFX_EXCEPTION_UNKNOWN_SENSOREVT) {
148 device_printf(sc->dev,
149 "hardware exception (code=%u); resetting\n",
151 sfxge_schedule_reset(sc);
158 sfxge_ev_rxq_flush_done(void *arg, uint32_t rxq_index)
160 struct sfxge_evq *evq;
161 struct sfxge_softc *sc;
162 struct sfxge_rxq *rxq;
167 evq = (struct sfxge_evq *)arg;
169 rxq = sc->rxq[rxq_index];
171 KASSERT(rxq != NULL, ("rxq == NULL"));
173 /* Resend a software event on the correct queue */
175 evq = sc->evq[index];
178 KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
179 ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != level"));
180 magic = SFXGE_MAGIC_RX_QFLUSH_DONE | label;
182 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
183 ("evq not started"));
184 efx_ev_qpost(evq->common, magic);
190 sfxge_ev_rxq_flush_failed(void *arg, uint32_t rxq_index)
192 struct sfxge_evq *evq;
193 struct sfxge_softc *sc;
194 struct sfxge_rxq *rxq;
199 evq = (struct sfxge_evq *)arg;
201 rxq = sc->rxq[rxq_index];
203 KASSERT(rxq != NULL, ("rxq == NULL"));
205 /* Resend a software event on the correct queue */
207 evq = sc->evq[index];
210 KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
211 ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
212 magic = SFXGE_MAGIC_RX_QFLUSH_FAILED | label;
214 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
215 ("evq not started"));
216 efx_ev_qpost(evq->common, magic);
221 static struct sfxge_txq *
222 sfxge_get_txq_by_label(struct sfxge_evq *evq, enum sfxge_txq_type label)
226 KASSERT((evq->index == 0 && label < SFXGE_TXQ_NTYPES) ||
227 (label == SFXGE_TXQ_IP_TCP_UDP_CKSUM), ("unexpected txq label"));
228 index = (evq->index == 0) ? label : (evq->index - 1 + SFXGE_TXQ_NTYPES);
229 return (evq->sc->txq[index]);
233 sfxge_ev_tx(void *arg, uint32_t label, uint32_t id)
235 struct sfxge_evq *evq;
236 struct sfxge_txq *txq;
240 evq = (struct sfxge_evq *)arg;
241 txq = sfxge_get_txq_by_label(evq, label);
243 KASSERT(txq != NULL, ("txq == NULL"));
244 KASSERT(evq->index == txq->evq_index,
245 ("evq->index != txq->evq_index"));
247 if (txq->init_state != SFXGE_TXQ_STARTED)
250 stop = (id + 1) & txq->ptr_mask;
251 id = txq->pending & txq->ptr_mask;
253 delta = (stop >= id) ? (stop - id) : (txq->entries - id + stop);
254 txq->pending += delta;
258 if (txq->next == NULL &&
259 evq->txqs != &(txq->next)) {
261 evq->txqs = &(txq->next);
264 if (txq->pending - txq->completed >= SFXGE_TX_BATCH)
265 sfxge_tx_qcomplete(txq);
268 return (evq->tx_done >= SFXGE_EV_BATCH);
272 sfxge_ev_txq_flush_done(void *arg, uint32_t txq_index)
274 struct sfxge_evq *evq;
275 struct sfxge_softc *sc;
276 struct sfxge_txq *txq;
280 evq = (struct sfxge_evq *)arg;
282 txq = sc->txq[txq_index];
284 KASSERT(txq != NULL, ("txq == NULL"));
285 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
286 ("txq not initialized"));
288 /* Resend a software event on the correct queue */
289 evq = sc->evq[txq->evq_index];
292 KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
293 ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
294 magic = SFXGE_MAGIC_TX_QFLUSH_DONE | label;
296 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
297 ("evq not started"));
298 efx_ev_qpost(evq->common, magic);
304 sfxge_ev_software(void *arg, uint16_t magic)
306 struct sfxge_evq *evq;
307 struct sfxge_softc *sc;
310 evq = (struct sfxge_evq *)arg;
313 label = magic & SFXGE_MAGIC_DMAQ_LABEL_MASK;
314 magic &= ~SFXGE_MAGIC_DMAQ_LABEL_MASK;
317 case SFXGE_MAGIC_RX_QFLUSH_DONE: {
318 struct sfxge_rxq *rxq = sc->rxq[label];
320 KASSERT(rxq != NULL, ("rxq == NULL"));
321 KASSERT(evq->index == rxq->index,
322 ("evq->index != rxq->index"));
324 sfxge_rx_qflush_done(rxq);
327 case SFXGE_MAGIC_RX_QFLUSH_FAILED: {
328 struct sfxge_rxq *rxq = sc->rxq[label];
330 KASSERT(rxq != NULL, ("rxq == NULL"));
331 KASSERT(evq->index == rxq->index,
332 ("evq->index != rxq->index"));
334 sfxge_rx_qflush_failed(rxq);
337 case SFXGE_MAGIC_RX_QREFILL: {
338 struct sfxge_rxq *rxq = sc->rxq[label];
340 KASSERT(rxq != NULL, ("rxq == NULL"));
341 KASSERT(evq->index == rxq->index,
342 ("evq->index != rxq->index"));
344 sfxge_rx_qrefill(rxq);
347 case SFXGE_MAGIC_TX_QFLUSH_DONE: {
348 struct sfxge_txq *txq = sfxge_get_txq_by_label(evq, label);
350 KASSERT(txq != NULL, ("txq == NULL"));
351 KASSERT(evq->index == txq->evq_index,
352 ("evq->index != txq->evq_index"));
354 sfxge_tx_qflush_done(txq);
365 sfxge_ev_sram(void *arg, uint32_t code)
371 case EFX_SRAM_UPDATE:
372 EFSYS_PROBE(sram_update);
376 EFSYS_PROBE(sram_clear);
379 case EFX_SRAM_ILLEGAL_CLEAR:
380 EFSYS_PROBE(sram_illegal_clear);
384 KASSERT(B_FALSE, ("Impossible SRAM event"));
392 sfxge_ev_timer(void *arg, uint32_t index)
401 sfxge_ev_wake_up(void *arg, uint32_t index)
410 sfxge_ev_stat_update(struct sfxge_softc *sc)
412 struct sfxge_evq *evq;
416 sx_xlock(&sc->softc_lock);
418 if (sc->evq[0]->init_state != SFXGE_EVQ_STARTED)
422 if (now - sc->ev_stats_update_time < hz)
425 sc->ev_stats_update_time = now;
427 /* Add event counts from each event queue in turn */
428 for (index = 0; index < sc->intr.n_alloc; index++) {
429 evq = sc->evq[index];
430 mtx_lock(&evq->lock);
431 efx_ev_qstats_update(evq->common, sc->ev_stats);
432 mtx_unlock(&evq->lock);
435 sx_xunlock(&sc->softc_lock);
439 sfxge_ev_stat_handler(SYSCTL_HANDLER_ARGS)
441 struct sfxge_softc *sc = arg1;
442 unsigned int id = arg2;
444 sfxge_ev_stat_update(sc);
446 return (SYSCTL_OUT(req, &sc->ev_stats[id], sizeof(sc->ev_stats[id])));
450 sfxge_ev_stat_init(struct sfxge_softc *sc)
452 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
453 struct sysctl_oid_list *stat_list;
457 stat_list = SYSCTL_CHILDREN(sc->stats_node);
459 for (id = 0; id < EV_NQSTATS; id++) {
460 snprintf(name, sizeof(name), "ev_%s",
461 efx_ev_qstat_name(sc->enp, id));
464 OID_AUTO, name, CTLTYPE_U64|CTLFLAG_RD,
465 sc, id, sfxge_ev_stat_handler, "Q",
471 sfxge_ev_qmoderate(struct sfxge_softc *sc, unsigned int idx, unsigned int us)
473 struct sfxge_evq *evq;
479 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
480 ("evq->init_state != SFXGE_EVQ_STARTED"));
482 (void)efx_ev_qmoderate(eep, us);
486 sfxge_int_mod_handler(SYSCTL_HANDLER_ARGS)
488 struct sfxge_softc *sc = arg1;
489 struct sfxge_intr *intr = &sc->intr;
490 unsigned int moderation;
494 sx_xlock(&sc->softc_lock);
496 if (req->newptr != NULL) {
497 if ((error = SYSCTL_IN(req, &moderation, sizeof(moderation)))
501 /* We may not be calling efx_ev_qmoderate() now,
502 * so we have to range-check the value ourselves.
505 efx_nic_cfg_get(sc->enp)->enc_evq_moderation_max) {
510 sc->ev_moderation = moderation;
511 if (intr->state == SFXGE_INTR_STARTED) {
512 for (index = 0; index < intr->n_alloc; index++)
513 sfxge_ev_qmoderate(sc, index, moderation);
516 error = SYSCTL_OUT(req, &sc->ev_moderation,
517 sizeof(sc->ev_moderation));
521 sx_xunlock(&sc->softc_lock);
527 sfxge_ev_initialized(void *arg)
529 struct sfxge_evq *evq;
531 evq = (struct sfxge_evq *)arg;
533 KASSERT(evq->init_state == SFXGE_EVQ_STARTING,
534 ("evq not starting"));
536 evq->init_state = SFXGE_EVQ_STARTED;
542 sfxge_ev_link_change(void *arg, efx_link_mode_t link_mode)
544 struct sfxge_evq *evq;
545 struct sfxge_softc *sc;
547 evq = (struct sfxge_evq *)arg;
550 sfxge_mac_link_update(sc, link_mode);
555 static const efx_ev_callbacks_t sfxge_ev_callbacks = {
556 .eec_initialized = sfxge_ev_initialized,
557 .eec_rx = sfxge_ev_rx,
558 .eec_tx = sfxge_ev_tx,
559 .eec_exception = sfxge_ev_exception,
560 .eec_rxq_flush_done = sfxge_ev_rxq_flush_done,
561 .eec_rxq_flush_failed = sfxge_ev_rxq_flush_failed,
562 .eec_txq_flush_done = sfxge_ev_txq_flush_done,
563 .eec_software = sfxge_ev_software,
564 .eec_sram = sfxge_ev_sram,
565 .eec_wake_up = sfxge_ev_wake_up,
566 .eec_timer = sfxge_ev_timer,
567 .eec_link_change = sfxge_ev_link_change,
572 sfxge_ev_qpoll(struct sfxge_evq *evq)
576 mtx_lock(&evq->lock);
578 if (evq->init_state != SFXGE_EVQ_STARTING &&
579 evq->init_state != SFXGE_EVQ_STARTED) {
584 /* Synchronize the DMA memory for reading */
585 bus_dmamap_sync(evq->mem.esm_tag, evq->mem.esm_map,
586 BUS_DMASYNC_POSTREAD);
588 KASSERT(evq->rx_done == 0, ("evq->rx_done != 0"));
589 KASSERT(evq->tx_done == 0, ("evq->tx_done != 0"));
590 KASSERT(evq->txq == NULL, ("evq->txq != NULL"));
591 KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq"));
594 efx_ev_qpoll(evq->common, &evq->read_ptr, &sfxge_ev_callbacks, evq);
599 /* Perform any pending completion processing */
600 sfxge_ev_qcomplete(evq, B_TRUE);
602 /* Re-prime the event queue for interrupts */
603 if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
606 mtx_unlock(&evq->lock);
611 mtx_unlock(&(evq->lock));
616 sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index)
618 struct sfxge_evq *evq;
620 evq = sc->evq[index];
622 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
623 ("evq->init_state != SFXGE_EVQ_STARTED"));
625 mtx_lock(&evq->lock);
626 evq->init_state = SFXGE_EVQ_INITIALIZED;
628 evq->exception = B_FALSE;
630 /* Add event counts before discarding the common evq state */
631 efx_ev_qstats_update(evq->common, sc->ev_stats);
633 efx_ev_qdestroy(evq->common);
634 efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
635 EFX_EVQ_NBUFS(evq->entries));
636 mtx_unlock(&evq->lock);
640 sfxge_ev_qstart(struct sfxge_softc *sc, unsigned int index)
642 struct sfxge_evq *evq;
647 evq = sc->evq[index];
650 KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
651 ("evq->init_state != SFXGE_EVQ_INITIALIZED"));
653 /* Clear all events. */
654 (void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
656 /* Program the buffer table. */
657 if ((rc = efx_sram_buf_tbl_set(sc->enp, evq->buf_base_id, esmp,
658 EFX_EVQ_NBUFS(evq->entries))) != 0)
661 /* Create the common code event queue. */
662 if ((rc = efx_ev_qcreate(sc->enp, index, esmp, evq->entries,
663 evq->buf_base_id, &evq->common)) != 0)
666 mtx_lock(&evq->lock);
668 /* Set the default moderation */
669 (void)efx_ev_qmoderate(evq->common, sc->ev_moderation);
671 /* Prime the event queue for interrupts */
672 if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
675 evq->init_state = SFXGE_EVQ_STARTING;
677 mtx_unlock(&evq->lock);
679 /* Wait for the initialization event */
682 /* Pause for 100 ms */
683 pause("sfxge evq init", hz / 10);
685 /* Check to see if the test event has been processed */
686 if (evq->init_state == SFXGE_EVQ_STARTED)
689 } while (++count < 20);
698 mtx_lock(&evq->lock);
699 evq->init_state = SFXGE_EVQ_INITIALIZED;
701 mtx_unlock(&evq->lock);
702 efx_ev_qdestroy(evq->common);
704 efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
705 EFX_EVQ_NBUFS(evq->entries));
711 sfxge_ev_stop(struct sfxge_softc *sc)
713 struct sfxge_intr *intr;
720 KASSERT(intr->state == SFXGE_INTR_STARTED,
721 ("Interrupts not started"));
723 /* Stop the event queue(s) */
724 index = intr->n_alloc;
726 sfxge_ev_qstop(sc, index);
728 /* Tear down the event module */
733 sfxge_ev_start(struct sfxge_softc *sc)
735 struct sfxge_intr *intr;
741 KASSERT(intr->state == SFXGE_INTR_STARTED,
742 ("intr->state != SFXGE_INTR_STARTED"));
744 /* Initialize the event module */
745 if ((rc = efx_ev_init(sc->enp)) != 0)
748 /* Start the event queues */
749 for (index = 0; index < intr->n_alloc; index++) {
750 if ((rc = sfxge_ev_qstart(sc, index)) != 0)
757 /* Stop the event queue(s) */
759 sfxge_ev_qstop(sc, index);
761 /* Tear down the event module */
762 efx_ev_fini(sc->enp);
768 sfxge_ev_qfini(struct sfxge_softc *sc, unsigned int index)
770 struct sfxge_evq *evq;
772 evq = sc->evq[index];
774 KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
775 ("evq->init_state != SFXGE_EVQ_INITIALIZED"));
776 KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq"));
778 sfxge_dma_free(&evq->mem);
780 sc->evq[index] = NULL;
782 mtx_destroy(&evq->lock);
788 sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
790 struct sfxge_evq *evq;
794 KASSERT(index < SFXGE_RX_SCALE_MAX, ("index >= SFXGE_RX_SCALE_MAX"));
796 evq = malloc(sizeof(struct sfxge_evq), M_SFXGE, M_ZERO | M_WAITOK);
799 sc->evq[index] = evq;
802 /* Build an event queue with room for one event per tx and rx buffer,
803 * plus some extra for link state events and MCDI completions.
804 * There are three tx queues in the first event queue and one in
809 ROUNDUP_POW_OF_TWO(sc->rxq_entries +
810 3 * sc->txq_entries +
814 ROUNDUP_POW_OF_TWO(sc->rxq_entries +
818 /* Initialise TX completion list */
819 evq->txqs = &evq->txq;
821 /* Allocate DMA space. */
822 if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(evq->entries), esmp)) != 0)
825 /* Allocate buffer table entries. */
826 sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(evq->entries),
829 mtx_init(&evq->lock, "evq", NULL, MTX_DEF);
831 evq->init_state = SFXGE_EVQ_INITIALIZED;
837 sfxge_ev_fini(struct sfxge_softc *sc)
839 struct sfxge_intr *intr;
844 KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
845 ("intr->state != SFXGE_INTR_INITIALIZED"));
847 sc->ev_moderation = 0;
849 /* Tear down the event queue(s). */
850 index = intr->n_alloc;
852 sfxge_ev_qfini(sc, index);
856 sfxge_ev_init(struct sfxge_softc *sc)
858 struct sysctl_ctx_list *sysctl_ctx = device_get_sysctl_ctx(sc->dev);
859 struct sysctl_oid *sysctl_tree = device_get_sysctl_tree(sc->dev);
860 struct sfxge_intr *intr;
866 KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
867 ("intr->state != SFXGE_INTR_INITIALIZED"));
869 /* Set default interrupt moderation; add a sysctl to
870 * read and change it.
872 sc->ev_moderation = 30;
873 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
874 OID_AUTO, "int_mod", CTLTYPE_UINT|CTLFLAG_RW,
875 sc, 0, sfxge_int_mod_handler, "IU",
876 "sfxge interrupt moderation (us)");
879 * Initialize the event queue(s) - one per interrupt.
881 for (index = 0; index < intr->n_alloc; index++) {
882 if ((rc = sfxge_ev_qinit(sc, index)) != 0)
886 sfxge_ev_stat_init(sc);
892 sfxge_ev_qfini(sc, index);