1 /**************************************************************************
3 Copyright (c) 2007-2009, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <sys/bus_dma.h>
46 #include <sys/queue.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
52 #include <sys/sched.h>
54 #include <sys/systm.h>
55 #include <sys/syslog.h>
56 #include <sys/socket.h>
57 #include <sys/sglist.h>
60 #include <net/ethernet.h>
62 #include <net/if_vlan_var.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip6.h>
68 #include <netinet/tcp.h>
70 #include <dev/pci/pcireg.h>
71 #include <dev/pci/pcivar.h>
76 #include <cxgb_include.h>
80 int multiq_tx_enable = 1;
83 CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
86 extern struct sysctl_oid_list sysctl__hw_cxgb_children;
87 int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
88 TUNABLE_INT("hw.cxgb.txq_mr_size", &cxgb_txq_buf_ring_size);
89 SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
90 "size of per-queue mbuf ring");
92 static int cxgb_tx_coalesce_force = 0;
93 TUNABLE_INT("hw.cxgb.tx_coalesce_force", &cxgb_tx_coalesce_force);
94 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RW,
95 &cxgb_tx_coalesce_force, 0,
96 "coalesce small packets into a single work request regardless of ring state");
98 #define COALESCE_START_DEFAULT TX_ETH_Q_SIZE>>1
99 #define COALESCE_START_MAX (TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
100 #define COALESCE_STOP_DEFAULT TX_ETH_Q_SIZE>>2
101 #define COALESCE_STOP_MIN TX_ETH_Q_SIZE>>5
102 #define TX_RECLAIM_DEFAULT TX_ETH_Q_SIZE>>5
103 #define TX_RECLAIM_MAX TX_ETH_Q_SIZE>>2
104 #define TX_RECLAIM_MIN TX_ETH_Q_SIZE>>6
107 static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
108 TUNABLE_INT("hw.cxgb.tx_coalesce_enable_start",
109 &cxgb_tx_coalesce_enable_start);
110 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RW,
111 &cxgb_tx_coalesce_enable_start, 0,
112 "coalesce enable threshold");
113 static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
114 TUNABLE_INT("hw.cxgb.tx_coalesce_enable_stop", &cxgb_tx_coalesce_enable_stop);
115 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RW,
116 &cxgb_tx_coalesce_enable_stop, 0,
117 "coalesce disable threshold");
118 static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
119 TUNABLE_INT("hw.cxgb.tx_reclaim_threshold", &cxgb_tx_reclaim_threshold);
120 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RW,
121 &cxgb_tx_reclaim_threshold, 0,
122 "tx cleaning minimum threshold");
125 * XXX don't re-enable this until TOE stops assuming
128 static int recycle_enable = 0;
130 extern int cxgb_use_16k_clusters;
131 extern int nmbjumbop;
132 extern int nmbjumbo9;
133 extern int nmbjumbo16;
137 #define SGE_RX_SM_BUF_SIZE 1536
138 #define SGE_RX_DROP_THRES 16
139 #define SGE_RX_COPY_THRES 128
142 * Period of the Tx buffer reclaim timer. This timer does not need to run
143 * frequently as Tx buffers are usually reclaimed by new Tx packets.
145 #define TX_RECLAIM_PERIOD (hz >> 1)
148 * Values for sge_txq.flags
151 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
152 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
156 uint64_t flit[TX_DESC_FLITS];
166 struct rsp_desc { /* response queue descriptor */
167 struct rss_header rss_hdr;
170 uint8_t imm_data[47];
174 #define RX_SW_DESC_MAP_CREATED (1 << 0)
175 #define TX_SW_DESC_MAP_CREATED (1 << 1)
176 #define RX_SW_DESC_INUSE (1 << 3)
177 #define TX_SW_DESC_MAPPED (1 << 4)
179 #define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0)
180 #define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP)
181 #define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP)
182 #define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
184 struct tx_sw_desc { /* SW state per Tx descriptor */
190 struct rx_sw_desc { /* SW state per Rx descriptor */
203 struct refill_fl_cb_arg {
205 bus_dma_segment_t seg;
211 * Maps a number of flits to the number of Tx descriptors that can hold them.
214 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
216 * HW allows up to 4 descriptors to be combined into a WR.
218 static uint8_t flit_desc_map[] = {
220 #if SGE_NUM_GENBITS == 1
221 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
222 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
223 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
224 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
225 #elif SGE_NUM_GENBITS == 2
226 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
227 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
228 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
229 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
231 # error "SGE_NUM_GENBITS must be 1 or 2"
235 #define TXQ_LOCK_ASSERT(qs) mtx_assert(&(qs)->lock, MA_OWNED)
236 #define TXQ_TRYLOCK(qs) mtx_trylock(&(qs)->lock)
237 #define TXQ_LOCK(qs) mtx_lock(&(qs)->lock)
238 #define TXQ_UNLOCK(qs) mtx_unlock(&(qs)->lock)
239 #define TXQ_RING_EMPTY(qs) drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
240 #define TXQ_RING_NEEDS_ENQUEUE(qs) \
241 drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
242 #define TXQ_RING_FLUSH(qs) drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
243 #define TXQ_RING_DEQUEUE_COND(qs, func, arg) \
244 drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
245 #define TXQ_RING_DEQUEUE(qs) \
246 drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
250 static void sge_timer_cb(void *arg);
251 static void sge_timer_reclaim(void *arg, int ncount);
252 static void sge_txq_reclaim_handler(void *arg, int ncount);
253 static void cxgb_start_locked(struct sge_qset *qs);
256 * XXX need to cope with bursty scheduling by looking at a wider
257 * window than we are now for determining the need for coalescing
260 static __inline uint64_t
261 check_pkt_coalesce(struct sge_qset *qs)
267 if (__predict_false(cxgb_tx_coalesce_force))
269 txq = &qs->txq[TXQ_ETH];
270 sc = qs->port->adapter;
271 fill = &sc->tunq_fill[qs->idx];
273 if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
274 cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
275 if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
276 cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
278 * if the hardware transmit queue is more than 1/8 full
279 * we mark it as coalescing - we drop back from coalescing
280 * when we go below 1/32 full and there are no packets enqueued,
281 * this provides us with some degree of hysteresis
283 if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
284 TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
286 else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
289 return (sc->tunq_coalesce);
294 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
297 #if _BYTE_ORDER == _LITTLE_ENDIAN
299 wr_hilo |= (((uint64_t)wr_lo)<<32);
302 wr_hilo |= (((uint64_t)wr_hi)<<32);
304 wrp->wrh_hilo = wr_hilo;
308 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
317 struct coalesce_info {
323 coalesce_check(struct mbuf *m, void *arg)
325 struct coalesce_info *ci = arg;
326 int *count = &ci->count;
327 int *nbytes = &ci->nbytes;
329 if ((*nbytes == 0) || ((*nbytes + m->m_len <= 10500) &&
330 (*count < 7) && (m->m_next == NULL))) {
339 cxgb_dequeue(struct sge_qset *qs)
341 struct mbuf *m, *m_head, *m_tail;
342 struct coalesce_info ci;
345 if (check_pkt_coalesce(qs) == 0)
346 return TXQ_RING_DEQUEUE(qs);
348 m_head = m_tail = NULL;
349 ci.count = ci.nbytes = 0;
351 m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
352 if (m_head == NULL) {
354 } else if (m != NULL) {
355 m_tail->m_nextpkt = m;
360 panic("trying to coalesce %d packets in to one WR", ci.count);
365 * reclaim_completed_tx - reclaims completed Tx descriptors
366 * @adapter: the adapter
367 * @q: the Tx queue to reclaim completed descriptors from
369 * Reclaims Tx descriptors that the SGE has indicated it has processed,
370 * and frees the associated buffers if possible. Called with the Tx
374 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
376 struct sge_txq *q = &qs->txq[queue];
377 int reclaim = desc_reclaimable(q);
379 if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) ||
380 (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN))
381 cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
383 if (reclaim < reclaim_min)
386 mtx_assert(&qs->lock, MA_OWNED);
388 t3_free_tx_desc(qs, reclaim, queue);
389 q->cleaned += reclaim;
390 q->in_use -= reclaim;
392 if (isset(&qs->txq_stopped, TXQ_ETH))
393 clrbit(&qs->txq_stopped, TXQ_ETH);
399 * should_restart_tx - are there enough resources to restart a Tx queue?
402 * Checks if there are enough descriptors to restart a suspended Tx queue.
405 should_restart_tx(const struct sge_txq *q)
407 unsigned int r = q->processed - q->cleaned;
409 return q->in_use - r < (q->size >> 1);
413 * t3_sge_init - initialize SGE
415 * @p: the SGE parameters
417 * Performs SGE initialization needed every time after a chip reset.
418 * We do not initialize any of the queue sets here, instead the driver
419 * top-level must request those individually. We also do not enable DMA
420 * here, that should be done after the queues have been set up.
423 t3_sge_init(adapter_t *adap, struct sge_params *p)
427 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
429 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
430 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
431 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
432 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
433 #if SGE_NUM_GENBITS == 1
434 ctrl |= F_EGRGENCTRL;
436 if (adap->params.rev > 0) {
437 if (!(adap->flags & (USING_MSIX | USING_MSI)))
438 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
440 t3_write_reg(adap, A_SG_CONTROL, ctrl);
441 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
442 V_LORCQDRBTHRSH(512));
443 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
444 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
445 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
446 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
447 adap->params.rev < T3_REV_C ? 1000 : 500);
448 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
449 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
450 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
451 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
452 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
457 * sgl_len - calculates the size of an SGL of the given capacity
458 * @n: the number of SGL entries
460 * Calculates the number of flits needed for a scatter/gather list that
461 * can hold the given number of entries.
463 static __inline unsigned int
464 sgl_len(unsigned int n)
466 return ((3 * n) / 2 + (n & 1));
470 * get_imm_packet - return the next ingress packet buffer from a response
471 * @resp: the response descriptor containing the packet data
473 * Return a packet containing the immediate data of the given response.
476 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
479 if (resp->rss_hdr.opcode == CPL_RX_DATA) {
480 const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
481 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
482 } else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
483 const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
484 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
486 m->m_len = IMMED_PKT_SIZE;
487 m->m_ext.ext_buf = NULL;
488 m->m_ext.ext_type = 0;
489 memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len);
493 static __inline u_int
494 flits_to_desc(u_int n)
496 return (flit_desc_map[n]);
499 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
500 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
501 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
502 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
504 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
505 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
509 * t3_sge_err_intr_handler - SGE async event interrupt handler
510 * @adapter: the adapter
512 * Interrupt handler for SGE asynchronous (non-data) events.
515 t3_sge_err_intr_handler(adapter_t *adapter)
517 unsigned int v, status;
519 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
520 if (status & SGE_PARERR)
521 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
522 status & SGE_PARERR);
523 if (status & SGE_FRAMINGERR)
524 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
525 status & SGE_FRAMINGERR);
526 if (status & F_RSPQCREDITOVERFOW)
527 CH_ALERT(adapter, "SGE response queue credit overflow\n");
529 if (status & F_RSPQDISABLED) {
530 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
533 "packet delivered to disabled response queue (0x%x)\n",
534 (v >> S_RSPQ0DISABLED) & 0xff);
537 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
538 if (status & SGE_FATALERR)
539 t3_fatal_err(adapter);
543 t3_sge_prep(adapter_t *adap, struct sge_params *p)
545 int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
547 nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
548 nqsets *= adap->params.nports;
550 fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
552 while (!powerof2(fl_q_size))
555 use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
558 #if __FreeBSD_version >= 700111
560 jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
561 jumbo_buf_size = MJUM16BYTES;
563 jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
564 jumbo_buf_size = MJUM9BYTES;
567 jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
568 jumbo_buf_size = MJUMPAGESIZE;
570 while (!powerof2(jumbo_q_size))
573 if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
574 device_printf(adap->dev,
575 "Insufficient clusters and/or jumbo buffers.\n");
577 p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
579 for (i = 0; i < SGE_QSETS; ++i) {
580 struct qset_params *q = p->qset + i;
582 if (adap->params.nports > 2) {
583 q->coalesce_usecs = 50;
586 q->coalesce_usecs = 10;
588 q->coalesce_usecs = 5;
592 q->rspq_size = RSPQ_Q_SIZE;
593 q->fl_size = fl_q_size;
594 q->jumbo_size = jumbo_q_size;
595 q->jumbo_buf_size = jumbo_buf_size;
596 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
597 q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
598 q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
604 t3_sge_alloc(adapter_t *sc)
607 /* The parent tag. */
608 if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
609 1, 0, /* algnmnt, boundary */
610 BUS_SPACE_MAXADDR, /* lowaddr */
611 BUS_SPACE_MAXADDR, /* highaddr */
612 NULL, NULL, /* filter, filterarg */
613 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
614 BUS_SPACE_UNRESTRICTED, /* nsegments */
615 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
617 NULL, NULL, /* lock, lockarg */
619 device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
624 * DMA tag for normal sized RX frames
626 if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
627 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
628 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
629 device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
634 * DMA tag for jumbo sized RX frames.
636 if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
637 BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
638 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
639 device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
644 * DMA tag for TX frames.
646 if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
647 BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
648 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
649 NULL, NULL, &sc->tx_dmat)) {
650 device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
658 t3_sge_free(struct adapter * sc)
661 if (sc->tx_dmat != NULL)
662 bus_dma_tag_destroy(sc->tx_dmat);
664 if (sc->rx_jumbo_dmat != NULL)
665 bus_dma_tag_destroy(sc->rx_jumbo_dmat);
667 if (sc->rx_dmat != NULL)
668 bus_dma_tag_destroy(sc->rx_dmat);
670 if (sc->parent_dmat != NULL)
671 bus_dma_tag_destroy(sc->parent_dmat);
677 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
680 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
681 qs->rspq.polling = 0 /* p->polling */;
684 #if !defined(__i386__) && !defined(__amd64__)
686 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
688 struct refill_fl_cb_arg *cb_arg = arg;
690 cb_arg->error = error;
691 cb_arg->seg = segs[0];
697 * refill_fl - refill an SGE free-buffer list
698 * @sc: the controller softc
699 * @q: the free-list to refill
700 * @n: the number of new buffers to allocate
702 * (Re)populate an SGE free-buffer list with up to @n new packet buffers.
703 * The caller must assure that @n does not exceed the queue's capacity.
706 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
708 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
709 struct rx_desc *d = &q->desc[q->pidx];
710 struct refill_fl_cb_arg cb_arg;
718 * We allocate an uninitialized mbuf + cluster, mbuf is
719 * initialized after rx.
721 if (q->zone == zone_pack) {
722 if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
724 cl = m->m_ext.ext_buf;
726 if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
728 if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
729 uma_zfree(q->zone, cl);
733 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
734 if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
735 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
736 uma_zfree(q->zone, cl);
739 sd->flags |= RX_SW_DESC_MAP_CREATED;
741 #if !defined(__i386__) && !defined(__amd64__)
742 err = bus_dmamap_load(q->entry_tag, sd->map,
743 cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
745 if (err != 0 || cb_arg.error) {
746 if (q->zone == zone_pack)
747 uma_zfree(q->zone, cl);
752 cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
754 sd->flags |= RX_SW_DESC_INUSE;
757 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
758 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
759 d->len_gen = htobe32(V_FLD_GEN1(q->gen));
760 d->gen2 = htobe32(V_FLD_GEN2(q->gen));
765 if (++q->pidx == q->size) {
776 if (q->db_pending >= 32) {
778 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
784 * free_rx_bufs - free the Rx buffers on an SGE free list
785 * @sc: the controle softc
786 * @q: the SGE free list to clean up
788 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
789 * this queue should be stopped before calling this function.
792 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
794 u_int cidx = q->cidx;
796 while (q->credits--) {
797 struct rx_sw_desc *d = &q->sdesc[cidx];
799 if (d->flags & RX_SW_DESC_INUSE) {
800 bus_dmamap_unload(q->entry_tag, d->map);
801 bus_dmamap_destroy(q->entry_tag, d->map);
802 if (q->zone == zone_pack) {
803 m_init(d->m, zone_pack, MCLBYTES,
804 M_NOWAIT, MT_DATA, M_EXT);
805 uma_zfree(zone_pack, d->m);
807 m_init(d->m, zone_mbuf, MLEN,
808 M_NOWAIT, MT_DATA, 0);
809 uma_zfree(zone_mbuf, d->m);
810 uma_zfree(q->zone, d->rxsd_cl);
816 if (++cidx == q->size)
822 __refill_fl(adapter_t *adap, struct sge_fl *fl)
824 refill_fl(adap, fl, min(16U, fl->size - fl->credits));
828 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
830 uint32_t reclaimable = fl->size - fl->credits;
833 refill_fl(adap, fl, min(max, reclaimable));
837 * recycle_rx_buf - recycle a receive buffer
838 * @adapter: the adapter
839 * @q: the SGE free list
840 * @idx: index of buffer to recycle
842 * Recycles the specified buffer on the given free list by adding it at
843 * the next available slot on the list.
846 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
848 struct rx_desc *from = &q->desc[idx];
849 struct rx_desc *to = &q->desc[q->pidx];
851 q->sdesc[q->pidx] = q->sdesc[idx];
852 to->addr_lo = from->addr_lo; // already big endian
853 to->addr_hi = from->addr_hi; // likewise
854 wmb(); /* necessary ? */
855 to->len_gen = htobe32(V_FLD_GEN1(q->gen));
856 to->gen2 = htobe32(V_FLD_GEN2(q->gen));
859 if (++q->pidx == q->size) {
863 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
867 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
872 *addr = segs[0].ds_addr;
876 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
877 bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
878 bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
880 size_t len = nelem * elem_size;
885 if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
886 BUS_SPACE_MAXADDR_32BIT,
887 BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
888 len, 0, NULL, NULL, tag)) != 0) {
889 device_printf(sc->dev, "Cannot allocate descriptor tag\n");
893 if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
895 device_printf(sc->dev, "Cannot allocate descriptor memory\n");
899 bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
904 len = nelem * sw_size;
905 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
908 if (parent_entry_tag == NULL)
911 if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
912 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
913 NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
914 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
915 NULL, NULL, entry_tag)) != 0) {
916 device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
923 sge_slow_intr_handler(void *arg, int ncount)
927 t3_slow_intr_handler(sc);
928 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
929 (void) t3_read_reg(sc, A_PL_INT_ENABLE0);
933 * sge_timer_cb - perform periodic maintenance of an SGE qset
934 * @data: the SGE queue set to maintain
936 * Runs periodically from a timer to perform maintenance of an SGE queue
937 * set. It performs two tasks:
939 * a) Cleans up any completed Tx descriptors that may still be pending.
940 * Normal descriptor cleanup happens when new packets are added to a Tx
941 * queue so this timer is relatively infrequent and does any cleanup only
942 * if the Tx queue has not seen any new packets in a while. We make a
943 * best effort attempt to reclaim descriptors, in that we don't wait
944 * around if we cannot get a queue's lock (which most likely is because
945 * someone else is queueing new packets and so will also handle the clean
946 * up). Since control queues use immediate data exclusively we don't
947 * bother cleaning them up here.
949 * b) Replenishes Rx queues that have run out due to memory shortage.
950 * Normally new Rx buffers are added when existing ones are consumed but
951 * when out of memory a queue can become empty. We try to add only a few
952 * buffers here, the queue will be replenished fully as these new buffers
953 * are used up if memory shortage has subsided.
955 * c) Return coalesced response queue credits in case a response queue is
958 * d) Ring doorbells for T304 tunnel queues since we have seen doorbell
959 * fifo overflows and the FW doesn't implement any recovery scheme yet.
962 sge_timer_cb(void *arg)
965 if ((sc->flags & USING_MSIX) == 0) {
967 struct port_info *pi;
971 int reclaim_ofl, refill_rx;
973 if (sc->open_device_map == 0)
976 for (i = 0; i < sc->params.nports; i++) {
978 for (j = 0; j < pi->nqsets; j++) {
979 qs = &sc->sge.qs[pi->first_qset + j];
981 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
982 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
983 (qs->fl[1].credits < qs->fl[1].size));
984 if (reclaim_ofl || refill_rx) {
985 taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
992 if (sc->params.nports > 2) {
995 for_each_port(sc, i) {
996 struct port_info *pi = &sc->port[i];
998 t3_write_reg(sc, A_SG_KDOORBELL,
1000 (FW_TUNNEL_SGEEC_START + pi->first_qset));
1003 if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
1004 sc->open_device_map != 0)
1005 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1009 * This is meant to be a catch-all function to keep sge state private
1014 t3_sge_init_adapter(adapter_t *sc)
1016 callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE);
1017 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1018 TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
1023 t3_sge_reset_adapter(adapter_t *sc)
1025 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1030 t3_sge_init_port(struct port_info *pi)
1032 TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
1037 * refill_rspq - replenish an SGE response queue
1038 * @adapter: the adapter
1039 * @q: the response queue to replenish
1040 * @credits: how many new responses to make available
1042 * Replenishes a response queue by making the supplied number of responses
1045 static __inline void
1046 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1049 /* mbufs are allocated on demand when a rspq entry is processed. */
1050 t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
1051 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1055 sge_txq_reclaim_handler(void *arg, int ncount)
1057 struct sge_qset *qs = arg;
1060 for (i = 0; i < 3; i++)
1061 reclaim_completed_tx(qs, 16, i);
1065 sge_timer_reclaim(void *arg, int ncount)
1067 struct port_info *pi = arg;
1068 int i, nqsets = pi->nqsets;
1069 adapter_t *sc = pi->adapter;
1070 struct sge_qset *qs;
1073 KASSERT((sc->flags & USING_MSIX) == 0,
1074 ("can't call timer reclaim for msi-x"));
1076 for (i = 0; i < nqsets; i++) {
1077 qs = &sc->sge.qs[pi->first_qset + i];
1079 reclaim_completed_tx(qs, 16, TXQ_OFLD);
1080 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1081 &sc->sge.qs[0].rspq.lock;
1083 if (mtx_trylock(lock)) {
1084 /* XXX currently assume that we are *NOT* polling */
1085 uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1087 if (qs->fl[0].credits < qs->fl[0].size - 16)
1088 __refill_fl(sc, &qs->fl[0]);
1089 if (qs->fl[1].credits < qs->fl[1].size - 16)
1090 __refill_fl(sc, &qs->fl[1]);
1092 if (status & (1 << qs->rspq.cntxt_id)) {
1093 if (qs->rspq.credits) {
1094 refill_rspq(sc, &qs->rspq, 1);
1096 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
1097 1 << qs->rspq.cntxt_id);
1106 * init_qset_cntxt - initialize an SGE queue set context info
1107 * @qs: the queue set
1108 * @id: the queue set id
1110 * Initializes the TIDs and context ids for the queues of a queue set.
1113 init_qset_cntxt(struct sge_qset *qs, u_int id)
1116 qs->rspq.cntxt_id = id;
1117 qs->fl[0].cntxt_id = 2 * id;
1118 qs->fl[1].cntxt_id = 2 * id + 1;
1119 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1120 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1121 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1122 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1123 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1125 mbufq_init(&qs->txq[TXQ_ETH].sendq);
1126 mbufq_init(&qs->txq[TXQ_OFLD].sendq);
1127 mbufq_init(&qs->txq[TXQ_CTRL].sendq);
1132 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1134 txq->in_use += ndesc;
1136 * XXX we don't handle stopping of queue
1137 * presumably start handles this when we bump against the end
1139 txqs->gen = txq->gen;
1140 txq->unacked += ndesc;
1141 txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1143 txqs->pidx = txq->pidx;
1146 if (((txqs->pidx > txq->cidx) &&
1147 (txq->pidx < txqs->pidx) &&
1148 (txq->pidx >= txq->cidx)) ||
1149 ((txqs->pidx < txq->cidx) &&
1150 (txq->pidx >= txq-> cidx)) ||
1151 ((txqs->pidx < txq->cidx) &&
1152 (txq->cidx < txqs->pidx)))
1153 panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1154 txqs->pidx, txq->pidx, txq->cidx);
1156 if (txq->pidx >= txq->size) {
1157 txq->pidx -= txq->size;
1164 * calc_tx_descs - calculate the number of Tx descriptors for a packet
1165 * @m: the packet mbufs
1166 * @nsegs: the number of segments
1168 * Returns the number of Tx descriptors needed for the given Ethernet
1169 * packet. Ethernet packets require addition of WR and CPL headers.
1171 static __inline unsigned int
1172 calc_tx_descs(const struct mbuf *m, int nsegs)
1176 if (m->m_pkthdr.len <= PIO_LEN)
1179 flits = sgl_len(nsegs) + 2;
1180 if (m->m_pkthdr.csum_flags & CSUM_TSO)
1183 return flits_to_desc(flits);
1187 * make_sgl - populate a scatter/gather list for a packet
1188 * @sgp: the SGL to populate
1189 * @segs: the packet dma segments
1190 * @nsegs: the number of segments
1192 * Generates a scatter/gather list for the buffers that make up a packet
1193 * and returns the SGL size in 8-byte words. The caller must size the SGL
1196 static __inline void
1197 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1201 for (idx = 0, i = 0; i < nsegs; i++) {
1203 * firmware doesn't like empty segments
1205 if (segs[i].ds_len == 0)
1210 sgp->len[idx] = htobe32(segs[i].ds_len);
1211 sgp->addr[idx] = htobe64(segs[i].ds_addr);
1222 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1223 * @adap: the adapter
1226 * Ring the doorbell if a Tx queue is asleep. There is a natural race,
1227 * where the HW is going to sleep just after we checked, however,
1228 * then the interrupt handler will detect the outstanding TX packet
1229 * and ring the doorbell for us.
1231 * When GTS is disabled we unconditionally ring the doorbell.
1233 static __inline void
1234 check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1237 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1238 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1239 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1241 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1244 t3_write_reg(adap, A_SG_KDOORBELL,
1245 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1248 if (mustring || ++q->db_pending >= 32) {
1249 wmb(); /* write descriptors before telling HW */
1250 t3_write_reg(adap, A_SG_KDOORBELL,
1251 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1257 static __inline void
1258 wr_gen2(struct tx_desc *d, unsigned int gen)
1260 #if SGE_NUM_GENBITS == 2
1261 d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1266 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1267 * @ndesc: number of Tx descriptors spanned by the SGL
1268 * @txd: first Tx descriptor to be written
1269 * @txqs: txq state (generation and producer index)
1270 * @txq: the SGE Tx queue
1272 * @flits: number of flits to the start of the SGL in the first descriptor
1273 * @sgl_flits: the SGL size in flits
1274 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1275 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1277 * Write a work request header and an associated SGL. If the SGL is
1278 * small enough to fit into one Tx descriptor it has already been written
1279 * and we just need to write the WR header. Otherwise we distribute the
1280 * SGL across the number of descriptors it spans.
1283 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1284 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1285 unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1288 struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1289 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1291 if (__predict_true(ndesc == 1)) {
1292 set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1293 V_WR_SGLSFLT(flits)) | wr_hi,
1294 htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
1297 wr_gen2(txd, txqs->gen);
1300 unsigned int ogen = txqs->gen;
1301 const uint64_t *fp = (const uint64_t *)sgl;
1302 struct work_request_hdr *wp = wrp;
1304 wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1305 V_WR_SGLSFLT(flits)) | wr_hi;
1308 unsigned int avail = WR_FLITS - flits;
1310 if (avail > sgl_flits)
1312 memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1321 if (++txqs->pidx == txq->size) {
1329 * when the head of the mbuf chain
1330 * is freed all clusters will be freed
1333 wrp = (struct work_request_hdr *)txd;
1334 wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
1335 V_WR_SGLSFLT(1)) | wr_hi;
1336 wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
1338 V_WR_GEN(txqs->gen)) | wr_lo;
1339 wr_gen2(txd, txqs->gen);
1342 wrp->wrh_hi |= htonl(F_WR_EOP);
1344 wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1345 wr_gen2((struct tx_desc *)wp, ogen);
1349 /* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
1350 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1352 #define GET_VTAG(cntrl, m) \
1354 if ((m)->m_flags & M_VLANTAG) \
1355 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1359 t3_encap(struct sge_qset *qs, struct mbuf **m)
1363 struct sge_txq *txq;
1364 struct txq_state txqs;
1365 struct port_info *pi;
1366 unsigned int ndesc, flits, cntrl, mlen;
1367 int err, nsegs, tso_info = 0;
1369 struct work_request_hdr *wrp;
1370 struct tx_sw_desc *txsd;
1371 struct sg_ent *sgp, *sgl;
1372 uint32_t wr_hi, wr_lo, sgl_flits;
1373 bus_dma_segment_t segs[TX_MAX_SEGS];
1375 struct tx_desc *txd;
1379 txq = &qs->txq[TXQ_ETH];
1380 txd = &txq->desc[txq->pidx];
1381 txsd = &txq->sdesc[txq->pidx];
1387 mtx_assert(&qs->lock, MA_OWNED);
1388 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1389 KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
1391 if (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1392 m0->m_pkthdr.csum_flags & (CSUM_TSO))
1393 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1395 if (m0->m_nextpkt != NULL) {
1396 busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1400 if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1401 &m0, segs, &nsegs))) {
1403 printf("failed ... err=%d\n", err);
1406 mlen = m0->m_pkthdr.len;
1407 ndesc = calc_tx_descs(m0, nsegs);
1409 txq_prod(txq, ndesc, &txqs);
1411 KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
1414 if (m0->m_nextpkt != NULL) {
1415 struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1419 panic("trying to coalesce %d packets in to one WR", nsegs);
1420 txq->txq_coalesced += nsegs;
1421 wrp = (struct work_request_hdr *)txd;
1422 flits = nsegs*2 + 1;
1424 for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1425 struct cpl_tx_pkt_batch_entry *cbe;
1427 uint32_t *hflit = (uint32_t *)&flit;
1428 int cflags = m0->m_pkthdr.csum_flags;
1430 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1431 GET_VTAG(cntrl, m0);
1432 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1433 if (__predict_false(!(cflags & CSUM_IP)))
1434 cntrl |= F_TXPKT_IPCSUM_DIS;
1435 if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP))))
1436 cntrl |= F_TXPKT_L4CSUM_DIS;
1438 hflit[0] = htonl(cntrl);
1439 hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1440 flit |= htobe64(1 << 24);
1441 cbe = &cpl_batch->pkt_entry[i];
1442 cbe->cntrl = hflit[0];
1443 cbe->len = hflit[1];
1444 cbe->addr = htobe64(segs[i].ds_addr);
1447 wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1448 V_WR_SGLSFLT(flits)) |
1449 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1450 wr_lo = htonl(V_WR_LEN(flits) |
1451 V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1452 set_wr_hdr(wrp, wr_hi, wr_lo);
1454 ETHER_BPF_MTAP(pi->ifp, m0);
1455 wr_gen2(txd, txqs.gen);
1456 check_ring_tx_db(sc, txq, 0);
1458 } else if (tso_info) {
1460 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1461 struct ether_header *eh;
1466 GET_VTAG(cntrl, m0);
1467 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1468 hdr->cntrl = htonl(cntrl);
1469 hdr->len = htonl(mlen | 0x80000000);
1471 if (__predict_false(mlen < TCPPKTHDRSIZE)) {
1472 printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%#x,flags=%#x",
1473 m0, mlen, m0->m_pkthdr.tso_segsz,
1474 m0->m_pkthdr.csum_flags, m0->m_flags);
1475 panic("tx tso packet too small");
1478 /* Make sure that ether, ip, tcp headers are all in m0 */
1479 if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1480 m0 = m_pullup(m0, TCPPKTHDRSIZE);
1481 if (__predict_false(m0 == NULL)) {
1482 /* XXX panic probably an overreaction */
1483 panic("couldn't fit header into mbuf");
1487 eh = mtod(m0, struct ether_header *);
1488 eth_type = eh->ether_type;
1489 if (eth_type == htons(ETHERTYPE_VLAN)) {
1490 struct ether_vlan_header *evh = (void *)eh;
1492 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
1494 eth_type = evh->evl_proto;
1496 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
1500 if (eth_type == htons(ETHERTYPE_IP)) {
1501 struct ip *ip = l3hdr;
1503 tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
1504 tcp = (struct tcphdr *)(ip + 1);
1505 } else if (eth_type == htons(ETHERTYPE_IPV6)) {
1506 struct ip6_hdr *ip6 = l3hdr;
1508 KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1509 ("%s: CSUM_TSO with ip6_nxt %d",
1510 __func__, ip6->ip6_nxt));
1512 tso_info |= F_LSO_IPV6;
1513 tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
1514 tcp = (struct tcphdr *)(ip6 + 1);
1516 panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
1518 tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
1519 hdr->lso_info = htonl(tso_info);
1521 if (__predict_false(mlen <= PIO_LEN)) {
1523 * pkt not undersized but fits in PIO_LEN
1524 * Indicates a TSO bug at the higher levels.
1527 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1528 flits = (mlen + 7) / 8 + 3;
1529 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1530 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1531 F_WR_SOP | F_WR_EOP | txqs.compl);
1532 wr_lo = htonl(V_WR_LEN(flits) |
1533 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1534 set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1536 ETHER_BPF_MTAP(pi->ifp, m0);
1537 wr_gen2(txd, txqs.gen);
1538 check_ring_tx_db(sc, txq, 0);
1544 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1546 GET_VTAG(cntrl, m0);
1547 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1548 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1549 cntrl |= F_TXPKT_IPCSUM_DIS;
1550 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))))
1551 cntrl |= F_TXPKT_L4CSUM_DIS;
1552 cpl->cntrl = htonl(cntrl);
1553 cpl->len = htonl(mlen | 0x80000000);
1555 if (mlen <= PIO_LEN) {
1557 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1558 flits = (mlen + 7) / 8 + 2;
1560 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1561 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1562 F_WR_SOP | F_WR_EOP | txqs.compl);
1563 wr_lo = htonl(V_WR_LEN(flits) |
1564 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1565 set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1567 ETHER_BPF_MTAP(pi->ifp, m0);
1568 wr_gen2(txd, txqs.gen);
1569 check_ring_tx_db(sc, txq, 0);
1575 wrp = (struct work_request_hdr *)txd;
1576 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1577 make_sgl(sgp, segs, nsegs);
1579 sgl_flits = sgl_len(nsegs);
1581 ETHER_BPF_MTAP(pi->ifp, m0);
1583 KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1584 wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1585 wr_lo = htonl(V_WR_TID(txq->token));
1586 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1587 sgl_flits, wr_hi, wr_lo);
1588 check_ring_tx_db(sc, txq, 0);
1594 cxgb_tx_watchdog(void *arg)
1596 struct sge_qset *qs = arg;
1597 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1599 if (qs->coalescing != 0 &&
1600 (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
1603 else if (qs->coalescing == 0 &&
1604 (txq->in_use >= cxgb_tx_coalesce_enable_start))
1606 if (TXQ_TRYLOCK(qs)) {
1607 qs->qs_flags |= QS_FLUSHING;
1608 cxgb_start_locked(qs);
1609 qs->qs_flags &= ~QS_FLUSHING;
1612 if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1613 callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1614 qs, txq->txq_watchdog.c_cpu);
1618 cxgb_tx_timeout(void *arg)
1620 struct sge_qset *qs = arg;
1621 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1623 if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1625 if (TXQ_TRYLOCK(qs)) {
1626 qs->qs_flags |= QS_TIMEOUT;
1627 cxgb_start_locked(qs);
1628 qs->qs_flags &= ~QS_TIMEOUT;
1634 cxgb_start_locked(struct sge_qset *qs)
1636 struct mbuf *m_head = NULL;
1637 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1638 struct port_info *pi = qs->port;
1639 struct ifnet *ifp = pi->ifp;
1641 if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1642 reclaim_completed_tx(qs, 0, TXQ_ETH);
1644 if (!pi->link_config.link_ok) {
1648 TXQ_LOCK_ASSERT(qs);
1649 while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1650 pi->link_config.link_ok) {
1651 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1653 if (txq->size - txq->in_use <= TX_MAX_DESC)
1656 if ((m_head = cxgb_dequeue(qs)) == NULL)
1659 * Encapsulation can modify our pointer, and or make it
1660 * NULL on failure. In that event, we can't requeue.
1662 if (t3_encap(qs, &m_head) || m_head == NULL)
1668 if (txq->db_pending)
1669 check_ring_tx_db(pi->adapter, txq, 1);
1671 if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1672 pi->link_config.link_ok)
1673 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1674 qs, txq->txq_timer.c_cpu);
1680 cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1682 struct port_info *pi = qs->port;
1683 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1684 struct buf_ring *br = txq->txq_mr;
1687 avail = txq->size - txq->in_use;
1688 TXQ_LOCK_ASSERT(qs);
1691 * We can only do a direct transmit if the following are true:
1692 * - we aren't coalescing (ring < 3/4 full)
1693 * - the link is up -- checked in caller
1694 * - there are no packets enqueued already
1695 * - there is space in hardware transmit queue
1697 if (check_pkt_coalesce(qs) == 0 &&
1698 !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1699 if (t3_encap(qs, &m)) {
1701 (error = drbr_enqueue(ifp, br, m)) != 0)
1704 if (txq->db_pending)
1705 check_ring_tx_db(pi->adapter, txq, 1);
1708 * We've bypassed the buf ring so we need to update
1709 * the stats directly
1711 txq->txq_direct_packets++;
1712 txq->txq_direct_bytes += m->m_pkthdr.len;
1714 } else if ((error = drbr_enqueue(ifp, br, m)) != 0)
1717 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1718 if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1719 (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1720 cxgb_start_locked(qs);
1721 else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1722 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1723 qs, txq->txq_timer.c_cpu);
1728 cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
1730 struct sge_qset *qs;
1731 struct port_info *pi = ifp->if_softc;
1732 int error, qidx = pi->first_qset;
1734 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1735 ||(!pi->link_config.link_ok)) {
1740 if (m->m_flags & M_FLOWID)
1741 qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
1743 qs = &pi->adapter->sge.qs[qidx];
1745 if (TXQ_TRYLOCK(qs)) {
1747 error = cxgb_transmit_locked(ifp, qs, m);
1750 error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1755 cxgb_qflush(struct ifnet *ifp)
1758 * flush any enqueued mbufs in the buf_rings
1759 * and in the transmit queues
1766 * write_imm - write a packet into a Tx descriptor as immediate data
1767 * @d: the Tx descriptor to write
1769 * @len: the length of packet data to write as immediate data
1770 * @gen: the generation bit value to write
1772 * Writes a packet as immediate data into a Tx descriptor. The packet
1773 * contains a work request at its beginning. We must write the packet
1774 * carefully so the SGE doesn't read accidentally before it's written in
1777 static __inline void
1778 write_imm(struct tx_desc *d, caddr_t src,
1779 unsigned int len, unsigned int gen)
1781 struct work_request_hdr *from = (struct work_request_hdr *)src;
1782 struct work_request_hdr *to = (struct work_request_hdr *)d;
1783 uint32_t wr_hi, wr_lo;
1785 KASSERT(len <= WR_LEN && len >= sizeof(*from),
1786 ("%s: invalid len %d", __func__, len));
1788 memcpy(&to[1], &from[1], len - sizeof(*from));
1789 wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
1790 V_WR_BCNTLFLT(len & 7));
1791 wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
1792 set_wr_hdr(to, wr_hi, wr_lo);
1798 * check_desc_avail - check descriptor availability on a send queue
1799 * @adap: the adapter
1801 * @m: the packet needing the descriptors
1802 * @ndesc: the number of Tx descriptors needed
1803 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1805 * Checks if the requested number of Tx descriptors is available on an
1806 * SGE send queue. If the queue is already suspended or not enough
1807 * descriptors are available the packet is queued for later transmission.
1808 * Must be called with the Tx queue locked.
1810 * Returns 0 if enough descriptors are available, 1 if there aren't
1811 * enough descriptors and the packet has been queued, and 2 if the caller
1812 * needs to retry because there weren't enough descriptors at the
1813 * beginning of the call but some freed up in the mean time.
1816 check_desc_avail(adapter_t *adap, struct sge_txq *q,
1817 struct mbuf *m, unsigned int ndesc,
1821 * XXX We currently only use this for checking the control queue
1822 * the control queue is only used for binding qsets which happens
1823 * at init time so we are guaranteed enough descriptors
1825 if (__predict_false(!mbufq_empty(&q->sendq))) {
1826 addq_exit: mbufq_tail(&q->sendq, m);
1829 if (__predict_false(q->size - q->in_use < ndesc)) {
1831 struct sge_qset *qs = txq_to_qset(q, qid);
1833 setbit(&qs->txq_stopped, qid);
1834 if (should_restart_tx(q) &&
1835 test_and_clear_bit(qid, &qs->txq_stopped))
1846 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1847 * @q: the SGE control Tx queue
1849 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1850 * that send only immediate data (presently just the control queues) and
1851 * thus do not have any mbufs
1853 static __inline void
1854 reclaim_completed_tx_imm(struct sge_txq *q)
1856 unsigned int reclaim = q->processed - q->cleaned;
1858 q->in_use -= reclaim;
1859 q->cleaned += reclaim;
1863 * ctrl_xmit - send a packet through an SGE control Tx queue
1864 * @adap: the adapter
1865 * @q: the control queue
1868 * Send a packet through an SGE control Tx queue. Packets sent through
1869 * a control queue must fit entirely as immediate data in a single Tx
1870 * descriptor and have no page fragments.
1873 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1876 struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1877 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1879 KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
1881 wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
1882 wrp->wrh_lo = htonl(V_WR_TID(q->token));
1885 again: reclaim_completed_tx_imm(q);
1887 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1888 if (__predict_false(ret)) {
1895 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1898 if (++q->pidx >= q->size) {
1904 t3_write_reg(adap, A_SG_KDOORBELL,
1905 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1913 * restart_ctrlq - restart a suspended control queue
1914 * @qs: the queue set cotaining the control queue
1916 * Resumes transmission on a suspended Tx control queue.
1919 restart_ctrlq(void *data, int npending)
1922 struct sge_qset *qs = (struct sge_qset *)data;
1923 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1924 adapter_t *adap = qs->port->adapter;
1927 again: reclaim_completed_tx_imm(q);
1929 while (q->in_use < q->size &&
1930 (m = mbufq_dequeue(&q->sendq)) != NULL) {
1932 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1935 if (++q->pidx >= q->size) {
1941 if (!mbufq_empty(&q->sendq)) {
1942 setbit(&qs->txq_stopped, TXQ_CTRL);
1944 if (should_restart_tx(q) &&
1945 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1950 t3_write_reg(adap, A_SG_KDOORBELL,
1951 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1956 * Send a management message through control queue 0
1959 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1961 return ctrl_xmit(adap, &adap->sge.qs[0], m);
1965 * free_qset - free the resources of an SGE queue set
1966 * @sc: the controller owning the queue set
1969 * Release the HW and SW resources associated with an SGE queue set, such
1970 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
1971 * queue set must be quiesced prior to calling this.
1974 t3_free_qset(adapter_t *sc, struct sge_qset *q)
1978 reclaim_completed_tx(q, 0, TXQ_ETH);
1979 if (q->txq[TXQ_ETH].txq_mr != NULL)
1980 buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
1981 if (q->txq[TXQ_ETH].txq_ifq != NULL) {
1982 ifq_delete(q->txq[TXQ_ETH].txq_ifq);
1983 free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
1986 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1987 if (q->fl[i].desc) {
1988 mtx_lock_spin(&sc->sge.reg_lock);
1989 t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1990 mtx_unlock_spin(&sc->sge.reg_lock);
1991 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1992 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1994 bus_dma_tag_destroy(q->fl[i].desc_tag);
1995 bus_dma_tag_destroy(q->fl[i].entry_tag);
1997 if (q->fl[i].sdesc) {
1998 free_rx_bufs(sc, &q->fl[i]);
1999 free(q->fl[i].sdesc, M_DEVBUF);
2003 mtx_unlock(&q->lock);
2004 MTX_DESTROY(&q->lock);
2005 for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2006 if (q->txq[i].desc) {
2007 mtx_lock_spin(&sc->sge.reg_lock);
2008 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2009 mtx_unlock_spin(&sc->sge.reg_lock);
2010 bus_dmamap_unload(q->txq[i].desc_tag,
2011 q->txq[i].desc_map);
2012 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2013 q->txq[i].desc_map);
2014 bus_dma_tag_destroy(q->txq[i].desc_tag);
2015 bus_dma_tag_destroy(q->txq[i].entry_tag);
2017 if (q->txq[i].sdesc) {
2018 free(q->txq[i].sdesc, M_DEVBUF);
2023 mtx_lock_spin(&sc->sge.reg_lock);
2024 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
2025 mtx_unlock_spin(&sc->sge.reg_lock);
2027 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
2028 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
2030 bus_dma_tag_destroy(q->rspq.desc_tag);
2031 MTX_DESTROY(&q->rspq.lock);
2034 #if defined(INET6) || defined(INET)
2035 tcp_lro_free(&q->lro.ctrl);
2038 bzero(q, sizeof(*q));
2042 * t3_free_sge_resources - free SGE resources
2043 * @sc: the adapter softc
2045 * Frees resources used by the SGE queue sets.
2048 t3_free_sge_resources(adapter_t *sc, int nqsets)
2052 for (i = 0; i < nqsets; ++i) {
2053 TXQ_LOCK(&sc->sge.qs[i]);
2054 t3_free_qset(sc, &sc->sge.qs[i]);
2059 * t3_sge_start - enable SGE
2060 * @sc: the controller softc
2062 * Enables the SGE for DMAs. This is the last step in starting packet
2066 t3_sge_start(adapter_t *sc)
2068 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2072 * t3_sge_stop - disable SGE operation
2075 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2076 * from error interrupts) or from normal process context. In the latter
2077 * case it also disables any pending queue restart tasklets. Note that
2078 * if it is called in interrupt context it cannot disable the restart
2079 * tasklets as it cannot wait, however the tasklets will have no effect
2080 * since the doorbells are disabled and the driver will call this again
2081 * later from process context, at which time the tasklets will be stopped
2082 * if they are still running.
2085 t3_sge_stop(adapter_t *sc)
2089 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
2094 for (nqsets = i = 0; i < (sc)->params.nports; i++)
2095 nqsets += sc->port[i].nqsets;
2101 for (i = 0; i < nqsets; ++i) {
2102 struct sge_qset *qs = &sc->sge.qs[i];
2104 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2105 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2111 * t3_free_tx_desc - reclaims Tx descriptors and their buffers
2112 * @adapter: the adapter
2113 * @q: the Tx queue to reclaim descriptors from
2114 * @reclaimable: the number of descriptors to reclaim
2115 * @m_vec_size: maximum number of buffers to reclaim
2116 * @desc_reclaimed: returns the number of descriptors reclaimed
2118 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
2119 * Tx buffers. Called with the Tx queue lock held.
2121 * Returns number of buffers of reclaimed
2124 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2126 struct tx_sw_desc *txsd;
2127 unsigned int cidx, mask;
2128 struct sge_txq *q = &qs->txq[queue];
2131 T3_TRACE2(sc->tb[q->cntxt_id & 7],
2132 "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
2136 txsd = &q->sdesc[cidx];
2138 mtx_assert(&qs->lock, MA_OWNED);
2139 while (reclaimable--) {
2140 prefetch(q->sdesc[(cidx + 1) & mask].m);
2141 prefetch(q->sdesc[(cidx + 2) & mask].m);
2143 if (txsd->m != NULL) {
2144 if (txsd->flags & TX_SW_DESC_MAPPED) {
2145 bus_dmamap_unload(q->entry_tag, txsd->map);
2146 txsd->flags &= ~TX_SW_DESC_MAPPED;
2148 m_freem_list(txsd->m);
2154 if (++cidx == q->size) {
2164 * is_new_response - check if a response is newly written
2165 * @r: the response descriptor
2166 * @q: the response queue
2168 * Returns true if a response descriptor contains a yet unprocessed
2172 is_new_response(const struct rsp_desc *r,
2173 const struct sge_rspq *q)
2175 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2178 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2179 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2180 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2181 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2182 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2184 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2185 #define NOMEM_INTR_DELAY 2500
2189 * write_ofld_wr - write an offload work request
2190 * @adap: the adapter
2191 * @m: the packet to send
2193 * @pidx: index of the first Tx descriptor to write
2194 * @gen: the generation value to use
2195 * @ndesc: number of descriptors the packet will occupy
2197 * Write an offload work request to send the supplied packet. The packet
2198 * data already carry the work request with most fields populated.
2201 write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2202 unsigned int pidx, unsigned int gen, unsigned int ndesc)
2204 unsigned int sgl_flits, flits;
2205 int i, idx, nsegs, wrlen;
2206 struct work_request_hdr *from;
2207 struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
2208 struct tx_desc *d = &q->desc[pidx];
2209 struct txq_state txqs;
2210 struct sglist_seg *segs;
2211 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2214 from = (void *)(oh + 1); /* Start of WR within mbuf */
2215 wrlen = m->m_len - sizeof(*oh);
2217 if (!(oh->flags & F_HDR_SGL)) {
2218 write_imm(d, (caddr_t)from, wrlen, gen);
2221 * mbuf with "real" immediate tx data will be enqueue_wr'd by
2222 * t3_push_frames and freed in wr_ack. Others, like those sent
2223 * down by close_conn, t3_send_reset, etc. should be freed here.
2225 if (!(oh->flags & F_HDR_DF))
2230 memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
2234 sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
2236 nsegs = sgl->sg_nseg;
2237 segs = sgl->sg_segs;
2238 for (idx = 0, i = 0; i < nsegs; i++) {
2239 KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
2242 sgp->len[idx] = htobe32(segs[i].ss_len);
2243 sgp->addr[idx] = htobe64(segs[i].ss_paddr);
2251 sgl_flits = sgl_len(nsegs);
2256 write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
2257 from->wrh_hi, from->wrh_lo);
2261 * ofld_xmit - send a packet through an offload queue
2262 * @adap: the adapter
2263 * @q: the Tx offload queue
2266 * Send an offload packet through an SGE offload queue.
2269 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2273 unsigned int pidx, gen;
2274 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2275 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2277 ndesc = G_HDR_NDESC(oh->flags);
2280 again: reclaim_completed_tx(qs, 16, TXQ_OFLD);
2281 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2282 if (__predict_false(ret)) {
2294 if (q->pidx >= q->size) {
2299 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2300 check_ring_tx_db(adap, q, 1);
2307 * restart_offloadq - restart a suspended offload queue
2308 * @qs: the queue set cotaining the offload queue
2310 * Resumes transmission on a suspended Tx offload queue.
2313 restart_offloadq(void *data, int npending)
2316 struct sge_qset *qs = data;
2317 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2318 adapter_t *adap = qs->port->adapter;
2322 again: cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
2324 while ((m = mbufq_peek(&q->sendq)) != NULL) {
2325 unsigned int gen, pidx;
2326 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2327 unsigned int ndesc = G_HDR_NDESC(oh->flags);
2329 if (__predict_false(q->size - q->in_use < ndesc)) {
2330 setbit(&qs->txq_stopped, TXQ_OFLD);
2331 if (should_restart_tx(q) &&
2332 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2342 if (q->pidx >= q->size) {
2347 (void)mbufq_dequeue(&q->sendq);
2349 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2353 set_bit(TXQ_RUNNING, &q->flags);
2354 set_bit(TXQ_LAST_PKT_DB, &q->flags);
2358 t3_write_reg(adap, A_SG_KDOORBELL,
2359 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2363 * t3_offload_tx - send an offload packet
2366 * Sends an offload packet. We use the packet priority to select the
2367 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2368 * should be sent as regular or control, bits 1-3 select the queue set.
2371 t3_offload_tx(struct adapter *sc, struct mbuf *m)
2373 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2374 struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2376 if (oh->flags & F_HDR_CTRL) {
2377 m_adj(m, sizeof (*oh)); /* trim ofld_hdr off */
2378 return (ctrl_xmit(sc, qs, m));
2380 return (ofld_xmit(sc, qs, m));
2385 restart_tx(struct sge_qset *qs)
2387 struct adapter *sc = qs->port->adapter;
2389 if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2390 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2391 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2392 qs->txq[TXQ_OFLD].restarts++;
2393 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2396 if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2397 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2398 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2399 qs->txq[TXQ_CTRL].restarts++;
2400 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2405 * t3_sge_alloc_qset - initialize an SGE queue set
2406 * @sc: the controller softc
2407 * @id: the queue set id
2408 * @nports: how many Ethernet ports will be using this queue set
2409 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2410 * @p: configuration parameters for this queue set
2411 * @ntxq: number of Tx queues for the queue set
2412 * @pi: port info for queue set
2414 * Allocate resources and initialize an SGE queue set. A queue set
2415 * comprises a response queue, two Rx free-buffer queues, and up to 3
2416 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2417 * queue, offload queue, and control queue.
2420 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2421 const struct qset_params *p, int ntxq, struct port_info *pi)
2423 struct sge_qset *q = &sc->sge.qs[id];
2426 MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
2430 if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2431 M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
2432 device_printf(sc->dev, "failed to allocate mbuf ring\n");
2435 if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2436 M_NOWAIT | M_ZERO)) == NULL) {
2437 device_printf(sc->dev, "failed to allocate ifq\n");
2440 ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2441 callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2442 callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2443 q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2444 q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2446 init_qset_cntxt(q, id);
2448 if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2449 sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2450 &q->fl[0].desc, &q->fl[0].sdesc,
2451 &q->fl[0].desc_tag, &q->fl[0].desc_map,
2452 sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2453 printf("error %d from alloc ring fl0\n", ret);
2457 if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2458 sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2459 &q->fl[1].desc, &q->fl[1].sdesc,
2460 &q->fl[1].desc_tag, &q->fl[1].desc_map,
2461 sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2462 printf("error %d from alloc ring fl1\n", ret);
2466 if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2467 &q->rspq.phys_addr, &q->rspq.desc, NULL,
2468 &q->rspq.desc_tag, &q->rspq.desc_map,
2469 NULL, NULL)) != 0) {
2470 printf("error %d from alloc ring rspq\n", ret);
2474 for (i = 0; i < ntxq; ++i) {
2475 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2477 if ((ret = alloc_ring(sc, p->txq_size[i],
2478 sizeof(struct tx_desc), sz,
2479 &q->txq[i].phys_addr, &q->txq[i].desc,
2480 &q->txq[i].sdesc, &q->txq[i].desc_tag,
2481 &q->txq[i].desc_map,
2482 sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2483 printf("error %d from alloc ring tx %i\n", ret, i);
2486 mbufq_init(&q->txq[i].sendq);
2488 q->txq[i].size = p->txq_size[i];
2492 TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2494 TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2495 TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2496 TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2498 q->fl[0].gen = q->fl[1].gen = 1;
2499 q->fl[0].size = p->fl_size;
2500 q->fl[1].size = p->jumbo_size;
2504 q->rspq.size = p->rspq_size;
2506 q->txq[TXQ_ETH].stop_thres = nports *
2507 flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2509 q->fl[0].buf_size = MCLBYTES;
2510 q->fl[0].zone = zone_pack;
2511 q->fl[0].type = EXT_PACKET;
2513 if (p->jumbo_buf_size == MJUM16BYTES) {
2514 q->fl[1].zone = zone_jumbo16;
2515 q->fl[1].type = EXT_JUMBO16;
2516 } else if (p->jumbo_buf_size == MJUM9BYTES) {
2517 q->fl[1].zone = zone_jumbo9;
2518 q->fl[1].type = EXT_JUMBO9;
2519 } else if (p->jumbo_buf_size == MJUMPAGESIZE) {
2520 q->fl[1].zone = zone_jumbop;
2521 q->fl[1].type = EXT_JUMBOP;
2523 KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
2527 q->fl[1].buf_size = p->jumbo_buf_size;
2529 /* Allocate and setup the lro_ctrl structure */
2530 q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2531 #if defined(INET6) || defined(INET)
2532 ret = tcp_lro_init(&q->lro.ctrl);
2534 printf("error %d from tcp_lro_init\n", ret);
2538 q->lro.ctrl.ifp = pi->ifp;
2540 mtx_lock_spin(&sc->sge.reg_lock);
2541 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2542 q->rspq.phys_addr, q->rspq.size,
2543 q->fl[0].buf_size, 1, 0);
2545 printf("error %d from t3_sge_init_rspcntxt\n", ret);
2549 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2550 ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2551 q->fl[i].phys_addr, q->fl[i].size,
2552 q->fl[i].buf_size, p->cong_thres, 1,
2555 printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2560 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2561 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2562 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2565 printf("error %d from t3_sge_init_ecntxt\n", ret);
2570 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2571 USE_GTS, SGE_CNTXT_OFLD, id,
2572 q->txq[TXQ_OFLD].phys_addr,
2573 q->txq[TXQ_OFLD].size, 0, 1, 0);
2575 printf("error %d from t3_sge_init_ecntxt\n", ret);
2581 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2583 q->txq[TXQ_CTRL].phys_addr,
2584 q->txq[TXQ_CTRL].size,
2585 q->txq[TXQ_CTRL].token, 1, 0);
2587 printf("error %d from t3_sge_init_ecntxt\n", ret);
2592 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2593 device_get_unit(sc->dev), irq_vec_idx);
2594 MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2596 mtx_unlock_spin(&sc->sge.reg_lock);
2597 t3_update_qset_coalesce(q, p);
2599 refill_fl(sc, &q->fl[0], q->fl[0].size);
2600 refill_fl(sc, &q->fl[1], q->fl[1].size);
2601 refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2603 t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2604 V_NEWTIMER(q->rspq.holdoff_tmr));
2609 mtx_unlock_spin(&sc->sge.reg_lock);
2612 t3_free_qset(sc, q);
2618 * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2619 * ethernet data. Hardware assistance with various checksums and any vlan tag
2620 * will also be taken into account here.
2623 t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad)
2625 struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2626 struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2627 struct ifnet *ifp = pi->ifp;
2629 if ((ifp->if_capenable & IFCAP_RXCSUM) && !cpl->fragment &&
2630 cpl->csum_valid && cpl->csum == 0xffff) {
2631 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID);
2632 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2633 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID|CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
2634 m->m_pkthdr.csum_data = 0xffff;
2637 if (cpl->vlan_valid) {
2638 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2639 m->m_flags |= M_VLANTAG;
2642 m->m_pkthdr.rcvif = ifp;
2643 m->m_pkthdr.header = mtod(m, uint8_t *) + sizeof(*cpl) + ethpad;
2645 * adjust after conversion to mbuf chain
2647 m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2648 m->m_len -= (sizeof(*cpl) + ethpad);
2649 m->m_data += (sizeof(*cpl) + ethpad);
2653 * get_packet - return the next ingress packet buffer from a free list
2654 * @adap: the adapter that received the packet
2655 * @drop_thres: # of remaining buffers before we start dropping packets
2656 * @qs: the qset that the SGE free list holding the packet belongs to
2657 * @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2658 * @r: response descriptor
2660 * Get the next packet from a free list and complete setup of the
2661 * sk_buff. If the packet is small we make a copy and recycle the
2662 * original buffer, otherwise we use the original buffer itself. If a
2663 * positive drop threshold is supplied packets are dropped and their
2664 * buffers recycled if (a) the number of remaining buffers is under the
2665 * threshold and the packet is too big to copy, or (b) the packet should
2666 * be copied but there is no memory for the copy.
2669 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2670 struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2673 unsigned int len_cq = ntohl(r->len_cq);
2674 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2675 int mask, cidx = fl->cidx;
2676 struct rx_sw_desc *sd = &fl->sdesc[cidx];
2677 uint32_t len = G_RSPD_LEN(len_cq);
2678 uint32_t flags = M_EXT;
2679 uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
2684 mask = fl->size - 1;
2685 prefetch(fl->sdesc[(cidx + 1) & mask].m);
2686 prefetch(fl->sdesc[(cidx + 2) & mask].m);
2687 prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
2688 prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl);
2691 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2693 if (recycle_enable && len <= SGE_RX_COPY_THRES &&
2694 sopeop == RSPQ_SOP_EOP) {
2695 if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
2697 cl = mtod(m, void *);
2698 memcpy(cl, sd->rxsd_cl, len);
2699 recycle_rx_buf(adap, fl, fl->cidx);
2700 m->m_pkthdr.len = m->m_len = len;
2702 mh->mh_head = mh->mh_tail = m;
2707 bus_dmamap_unload(fl->entry_tag, sd->map);
2711 if ((sopeop == RSPQ_SOP_EOP) ||
2712 (sopeop == RSPQ_SOP))
2714 m_init(m, fl->zone, fl->buf_size, M_NOWAIT, MT_DATA, flags);
2715 if (fl->zone == zone_pack) {
2717 * restore clobbered data pointer
2719 m->m_data = m->m_ext.ext_buf;
2721 m_cljset(m, cl, fl->type);
2730 mh->mh_head = mh->mh_tail = m;
2731 m->m_pkthdr.len = len;
2736 case RSPQ_NSOP_NEOP:
2737 if (mh->mh_tail == NULL) {
2738 log(LOG_ERR, "discarding intermediate descriptor entry\n");
2742 mh->mh_tail->m_next = m;
2744 mh->mh_head->m_pkthdr.len += len;
2748 printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2750 if (++fl->cidx == fl->size)
2757 * handle_rsp_cntrl_info - handles control information in a response
2758 * @qs: the queue set corresponding to the response
2759 * @flags: the response control flags
2761 * Handles the control information of an SGE response, such as GTS
2762 * indications and completion credits for the queue set's Tx queues.
2763 * HW coalesces credits, we don't do any extra SW coalescing.
2765 static __inline void
2766 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2768 unsigned int credits;
2771 if (flags & F_RSPD_TXQ0_GTS)
2772 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2774 credits = G_RSPD_TXQ0_CR(flags);
2776 qs->txq[TXQ_ETH].processed += credits;
2778 credits = G_RSPD_TXQ2_CR(flags);
2780 qs->txq[TXQ_CTRL].processed += credits;
2783 if (flags & F_RSPD_TXQ1_GTS)
2784 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2786 credits = G_RSPD_TXQ1_CR(flags);
2788 qs->txq[TXQ_OFLD].processed += credits;
2793 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2794 unsigned int sleeping)
2800 * process_responses - process responses from an SGE response queue
2801 * @adap: the adapter
2802 * @qs: the queue set to which the response queue belongs
2803 * @budget: how many responses can be processed in this round
2805 * Process responses from an SGE response queue up to the supplied budget.
2806 * Responses include received packets as well as credits and other events
2807 * for the queues that belong to the response queue's queue set.
2808 * A negative budget is effectively unlimited.
2810 * Additionally choose the interrupt holdoff time for the next interrupt
2811 * on this queue. If the system is under memory shortage use a fairly
2812 * long delay to help recovery.
2815 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2817 struct sge_rspq *rspq = &qs->rspq;
2818 struct rsp_desc *r = &rspq->desc[rspq->cidx];
2819 int budget_left = budget;
2820 unsigned int sleeping = 0;
2821 #if defined(INET6) || defined(INET)
2822 int lro_enabled = qs->lro.enabled;
2824 struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2826 struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
2828 static int last_holdoff = 0;
2829 if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2830 printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2831 last_holdoff = rspq->holdoff_tmr;
2834 rspq->next_holdoff = rspq->holdoff_tmr;
2836 while (__predict_true(budget_left && is_new_response(r, rspq))) {
2837 int eth, eop = 0, ethpad = 0;
2838 uint32_t flags = ntohl(r->flags);
2839 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2840 uint8_t opcode = r->rss_hdr.opcode;
2842 eth = (opcode == CPL_RX_PKT);
2844 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2848 printf("async notification\n");
2850 if (mh->mh_head == NULL) {
2851 mh->mh_head = m_gethdr(M_DONTWAIT, MT_DATA);
2854 m = m_gethdr(M_DONTWAIT, MT_DATA);
2859 memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2860 m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2861 *mtod(m, char *) = CPL_ASYNC_NOTIF;
2862 opcode = CPL_ASYNC_NOTIF;
2864 rspq->async_notif++;
2866 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2867 struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
2871 rspq->next_holdoff = NOMEM_INTR_DELAY;
2875 if (mh->mh_head == NULL)
2878 mh->mh_tail->m_next = m;
2881 get_imm_packet(adap, r, m);
2882 mh->mh_head->m_pkthdr.len += m->m_len;
2885 } else if (r->len_cq) {
2886 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2888 eop = get_packet(adap, drop_thresh, qs, mh, r);
2890 if (r->rss_hdr.hash_type && !adap->timestamp)
2891 mh->mh_head->m_flags |= M_FLOWID;
2892 mh->mh_head->m_pkthdr.flowid = rss_hash;
2900 if (flags & RSPD_CTRL_MASK) {
2901 sleeping |= flags & RSPD_GTS_MASK;
2902 handle_rsp_cntrl_info(qs, flags);
2906 rspq->offload_pkts++;
2908 adap->cpl_handler[opcode](qs, r, mh->mh_head);
2910 m_freem(mh->mh_head);
2913 } else if (eth && eop) {
2914 struct mbuf *m = mh->mh_head;
2916 t3_rx_eth(adap, rspq, m, ethpad);
2919 * The T304 sends incoming packets on any qset. If LRO
2920 * is also enabled, we could end up sending packet up
2921 * lro_ctrl->ifp's input. That is incorrect.
2923 * The mbuf's rcvif was derived from the cpl header and
2924 * is accurate. Skip LRO and just use that.
2926 #if defined(INET6) || defined(INET)
2927 skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2929 if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2930 && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2932 /* successfully queue'd for LRO */
2937 * LRO not enabled, packet unsuitable for LRO,
2938 * or unable to queue. Pass it up right now in
2941 struct ifnet *ifp = m->m_pkthdr.rcvif;
2942 (*ifp->if_input)(ifp, m);
2949 if (__predict_false(++rspq->cidx == rspq->size)) {
2955 if (++rspq->credits >= 64) {
2956 refill_rspq(adap, rspq, rspq->credits);
2959 __refill_fl_lt(adap, &qs->fl[0], 32);
2960 __refill_fl_lt(adap, &qs->fl[1], 32);
2964 #if defined(INET6) || defined(INET)
2966 while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
2967 struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
2968 SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
2969 tcp_lro_flush(lro_ctrl, queued);
2974 check_ring_db(adap, qs, sleeping);
2976 mb(); /* commit Tx queue processed updates */
2977 if (__predict_false(qs->txq_stopped > 1))
2980 __refill_fl_lt(adap, &qs->fl[0], 512);
2981 __refill_fl_lt(adap, &qs->fl[1], 512);
2982 budget -= budget_left;
2987 * A helper function that processes responses and issues GTS.
2990 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
2993 static int last_holdoff = 0;
2995 work = process_responses(adap, rspq_to_qset(rq), -1);
2997 if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
2998 printf("next_holdoff=%d\n", rq->next_holdoff);
2999 last_holdoff = rq->next_holdoff;
3001 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3002 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3009 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3010 * Handles data events from SGE response queues as well as error and other
3011 * async events as they all use the same interrupt pin. We use one SGE
3012 * response queue per port in this mode and protect all response queues with
3016 t3b_intr(void *data)
3019 adapter_t *adap = data;
3020 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3022 t3_write_reg(adap, A_PL_CLI, 0);
3023 map = t3_read_reg(adap, A_SG_DATA_INTR);
3028 if (__predict_false(map & F_ERRINTR)) {
3029 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3030 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3031 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3034 mtx_lock(&q0->lock);
3035 for_each_port(adap, i)
3037 process_responses_gts(adap, &adap->sge.qs[i].rspq);
3038 mtx_unlock(&q0->lock);
3042 * The MSI interrupt handler. This needs to handle data events from SGE
3043 * response queues as well as error and other async events as they all use
3044 * the same MSI vector. We use one SGE response queue per port in this mode
3045 * and protect all response queues with queue 0's lock.
3048 t3_intr_msi(void *data)
3050 adapter_t *adap = data;
3051 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3052 int i, new_packets = 0;
3054 mtx_lock(&q0->lock);
3056 for_each_port(adap, i)
3057 if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3059 mtx_unlock(&q0->lock);
3060 if (new_packets == 0) {
3061 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3062 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3063 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3068 t3_intr_msix(void *data)
3070 struct sge_qset *qs = data;
3071 adapter_t *adap = qs->port->adapter;
3072 struct sge_rspq *rspq = &qs->rspq;
3074 if (process_responses_gts(adap, rspq) == 0)
3075 rspq->unhandled_irqs++;
3078 #define QDUMP_SBUF_SIZE 32 * 400
3080 t3_dump_rspq(SYSCTL_HANDLER_ARGS)
3082 struct sge_rspq *rspq;
3083 struct sge_qset *qs;
3084 int i, err, dump_end, idx;
3086 struct rsp_desc *rspd;
3090 qs = rspq_to_qset(rspq);
3091 if (rspq->rspq_dump_count == 0)
3093 if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3095 "dump count is too large %d\n", rspq->rspq_dump_count);
3096 rspq->rspq_dump_count = 0;
3099 if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3101 "dump start of %d is greater than queue size\n",
3102 rspq->rspq_dump_start);
3103 rspq->rspq_dump_start = 0;
3106 err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3109 err = sysctl_wire_old_buffer(req, 0);
3112 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3114 sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3115 (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3116 ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3117 sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3118 ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3120 sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3121 (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3123 dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3124 for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3125 idx = i & (RSPQ_Q_SIZE-1);
3127 rspd = &rspq->desc[idx];
3128 sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3129 idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3130 rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3131 sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3132 rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3133 be32toh(rspd->len_cq), rspd->intr_gen);
3136 err = sbuf_finish(sb);
3137 /* Output a trailing NUL. */
3139 err = SYSCTL_OUT(req, "", 1);
3145 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3147 struct sge_txq *txq;
3148 struct sge_qset *qs;
3149 int i, j, err, dump_end;
3151 struct tx_desc *txd;
3152 uint32_t *WR, wr_hi, wr_lo, gen;
3156 qs = txq_to_qset(txq, TXQ_ETH);
3157 if (txq->txq_dump_count == 0) {
3160 if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3162 "dump count is too large %d\n", txq->txq_dump_count);
3163 txq->txq_dump_count = 1;
3166 if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3168 "dump start of %d is greater than queue size\n",
3169 txq->txq_dump_start);
3170 txq->txq_dump_start = 0;
3173 err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3176 err = sysctl_wire_old_buffer(req, 0);
3179 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3181 sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3182 (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3183 (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3184 sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3185 ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3186 ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3187 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3188 txq->txq_dump_start,
3189 (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3191 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3192 for (i = txq->txq_dump_start; i < dump_end; i++) {
3193 txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3194 WR = (uint32_t *)txd->flit;
3195 wr_hi = ntohl(WR[0]);
3196 wr_lo = ntohl(WR[1]);
3197 gen = G_WR_GEN(wr_lo);
3199 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3201 for (j = 2; j < 30; j += 4)
3202 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3203 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3206 err = sbuf_finish(sb);
3207 /* Output a trailing NUL. */
3209 err = SYSCTL_OUT(req, "", 1);
3215 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3217 struct sge_txq *txq;
3218 struct sge_qset *qs;
3219 int i, j, err, dump_end;
3221 struct tx_desc *txd;
3222 uint32_t *WR, wr_hi, wr_lo, gen;
3225 qs = txq_to_qset(txq, TXQ_CTRL);
3226 if (txq->txq_dump_count == 0) {
3229 if (txq->txq_dump_count > 256) {
3231 "dump count is too large %d\n", txq->txq_dump_count);
3232 txq->txq_dump_count = 1;
3235 if (txq->txq_dump_start > 255) {
3237 "dump start of %d is greater than queue size\n",
3238 txq->txq_dump_start);
3239 txq->txq_dump_start = 0;
3243 err = sysctl_wire_old_buffer(req, 0);
3246 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3247 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3248 txq->txq_dump_start,
3249 (txq->txq_dump_start + txq->txq_dump_count) & 255);
3251 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3252 for (i = txq->txq_dump_start; i < dump_end; i++) {
3253 txd = &txq->desc[i & (255)];
3254 WR = (uint32_t *)txd->flit;
3255 wr_hi = ntohl(WR[0]);
3256 wr_lo = ntohl(WR[1]);
3257 gen = G_WR_GEN(wr_lo);
3259 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3261 for (j = 2; j < 30; j += 4)
3262 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3263 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3266 err = sbuf_finish(sb);
3267 /* Output a trailing NUL. */
3269 err = SYSCTL_OUT(req, "", 1);
3275 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3277 adapter_t *sc = arg1;
3278 struct qset_params *qsp = &sc->params.sge.qset[0];
3280 struct sge_qset *qs;
3281 int i, j, err, nqsets = 0;
3284 if ((sc->flags & FULL_INIT_DONE) == 0)
3287 coalesce_usecs = qsp->coalesce_usecs;
3288 err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3293 if (coalesce_usecs == qsp->coalesce_usecs)
3296 for (i = 0; i < sc->params.nports; i++)
3297 for (j = 0; j < sc->port[i].nqsets; j++)
3300 coalesce_usecs = max(1, coalesce_usecs);
3302 for (i = 0; i < nqsets; i++) {
3303 qs = &sc->sge.qs[i];
3304 qsp = &sc->params.sge.qset[i];
3305 qsp->coalesce_usecs = coalesce_usecs;
3307 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3308 &sc->sge.qs[0].rspq.lock;
3311 t3_update_qset_coalesce(qs, qsp);
3312 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3313 V_NEWTIMER(qs->rspq.holdoff_tmr));
3321 t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
3323 adapter_t *sc = arg1;
3326 if ((sc->flags & FULL_INIT_DONE) == 0)
3329 timestamp = sc->timestamp;
3330 rc = sysctl_handle_int(oidp, ×tamp, arg2, req);
3335 if (timestamp != sc->timestamp) {
3336 t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS,
3337 timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
3338 sc->timestamp = timestamp;
3345 t3_add_attach_sysctls(adapter_t *sc)
3347 struct sysctl_ctx_list *ctx;
3348 struct sysctl_oid_list *children;
3350 ctx = device_get_sysctl_ctx(sc->dev);
3351 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3353 /* random information */
3354 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3356 CTLFLAG_RD, &sc->fw_version,
3357 0, "firmware version");
3358 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3360 CTLFLAG_RD, &sc->params.rev,
3362 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3364 CTLFLAG_RD, &sc->port_types,
3365 0, "type of ports");
3366 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3368 CTLFLAG_RW, &cxgb_debug,
3369 0, "enable verbose debugging output");
3370 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
3371 CTLFLAG_RD, &sc->tunq_coalesce,
3372 "#tunneled packets freed");
3373 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3375 CTLFLAG_RD, &txq_fills,
3376 0, "#times txq overrun");
3377 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3379 CTLFLAG_RD, &sc->params.vpd.cclk,
3380 0, "core clock frequency (in KHz)");
3384 static const char *rspq_name = "rspq";
3385 static const char *txq_names[] =
3393 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3395 struct port_info *p = arg1;
3401 parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3403 t3_mac_update_stats(&p->mac);
3406 return (sysctl_handle_64(oidp, parg, 0, req));
3410 t3_add_configured_sysctls(adapter_t *sc)
3412 struct sysctl_ctx_list *ctx;
3413 struct sysctl_oid_list *children;
3416 ctx = device_get_sysctl_ctx(sc->dev);
3417 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3419 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3421 CTLTYPE_INT|CTLFLAG_RW, sc,
3422 0, t3_set_coalesce_usecs,
3423 "I", "interrupt coalescing timer (us)");
3425 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3427 CTLTYPE_INT | CTLFLAG_RW, sc,
3428 0, t3_pkt_timestamp,
3429 "I", "provide packet timestamp instead of connection hash");
3431 for (i = 0; i < sc->params.nports; i++) {
3432 struct port_info *pi = &sc->port[i];
3433 struct sysctl_oid *poid;
3434 struct sysctl_oid_list *poidlist;
3435 struct mac_stats *mstats = &pi->mac.stats;
3437 snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3438 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3439 pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
3440 poidlist = SYSCTL_CHILDREN(poid);
3441 SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3442 "nqsets", CTLFLAG_RD, &pi->nqsets,
3445 for (j = 0; j < pi->nqsets; j++) {
3446 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3447 struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3448 *ctrlqpoid, *lropoid;
3449 struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3450 *txqpoidlist, *ctrlqpoidlist,
3452 struct sge_txq *txq = &qs->txq[TXQ_ETH];
3454 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3456 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3457 qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
3458 qspoidlist = SYSCTL_CHILDREN(qspoid);
3460 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3461 CTLFLAG_RD, &qs->fl[0].empty, 0,
3462 "freelist #0 empty");
3463 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3464 CTLFLAG_RD, &qs->fl[1].empty, 0,
3465 "freelist #1 empty");
3467 rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3468 rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
3469 rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3471 txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3472 txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3473 txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3475 ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3476 txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
3477 ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3479 lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3480 "lro_stats", CTLFLAG_RD, NULL, "LRO statistics");
3481 lropoidlist = SYSCTL_CHILDREN(lropoid);
3483 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3484 CTLFLAG_RD, &qs->rspq.size,
3485 0, "#entries in response queue");
3486 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3487 CTLFLAG_RD, &qs->rspq.cidx,
3488 0, "consumer index");
3489 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3490 CTLFLAG_RD, &qs->rspq.credits,
3492 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
3493 CTLFLAG_RD, &qs->rspq.starved,
3494 0, "#times starved");
3495 SYSCTL_ADD_ULONG(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3496 CTLFLAG_RD, &qs->rspq.phys_addr,
3497 "physical_address_of the queue");
3498 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3499 CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3500 0, "start rspq dump entry");
3501 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3502 CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3503 0, "#rspq entries to dump");
3504 SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3505 CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
3506 0, t3_dump_rspq, "A", "dump of the response queue");
3508 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
3509 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3510 "#tunneled packets dropped");
3511 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3512 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
3513 0, "#tunneled packets waiting to be sent");
3515 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3516 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3517 0, "#tunneled packets queue producer index");
3518 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3519 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3520 0, "#tunneled packets queue consumer index");
3522 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
3523 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3524 0, "#tunneled packets processed by the card");
3525 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3526 CTLFLAG_RD, &txq->cleaned,
3527 0, "#tunneled packets cleaned");
3528 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3529 CTLFLAG_RD, &txq->in_use,
3530 0, "#tunneled packet slots in use");
3531 SYSCTL_ADD_ULONG(ctx, txqpoidlist, OID_AUTO, "frees",
3532 CTLFLAG_RD, &txq->txq_frees,
3533 "#tunneled packets freed");
3534 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3535 CTLFLAG_RD, &txq->txq_skipped,
3536 0, "#tunneled packet descriptors skipped");
3537 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
3538 CTLFLAG_RD, &txq->txq_coalesced,
3539 "#tunneled packets coalesced");
3540 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3541 CTLFLAG_RD, &txq->txq_enqueued,
3542 0, "#tunneled packets enqueued to hardware");
3543 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3544 CTLFLAG_RD, &qs->txq_stopped,
3545 0, "tx queues stopped");
3546 SYSCTL_ADD_ULONG(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3547 CTLFLAG_RD, &txq->phys_addr,
3548 "physical_address_of the queue");
3549 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3550 CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3551 0, "txq generation");
3552 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3553 CTLFLAG_RD, &txq->cidx,
3554 0, "hardware queue cidx");
3555 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3556 CTLFLAG_RD, &txq->pidx,
3557 0, "hardware queue pidx");
3558 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3559 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3560 0, "txq start idx for dump");
3561 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3562 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3563 0, "txq #entries to dump");
3564 SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3565 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3566 0, t3_dump_txq_eth, "A", "dump of the transmit queue");
3568 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3569 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3570 0, "ctrlq start idx for dump");
3571 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3572 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3573 0, "ctrl #entries to dump");
3574 SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3575 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
3576 0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
3578 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_queued",
3579 CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3580 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3581 CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3582 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3583 CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3584 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3585 CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3588 /* Now add a node for mac stats. */
3589 poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3590 CTLFLAG_RD, NULL, "MAC statistics");
3591 poidlist = SYSCTL_CHILDREN(poid);
3594 * We (ab)use the length argument (arg2) to pass on the offset
3595 * of the data that we are interested in. This is only required
3596 * for the quad counters that are updated from the hardware (we
3597 * make sure that we return the latest value).
3598 * sysctl_handle_macstat first updates *all* the counters from
3599 * the hardware, and then returns the latest value of the
3600 * requested counter. Best would be to update only the
3601 * requested counter from hardware, but t3_mac_update_stats()
3602 * hides all the register details and we don't want to dive into
3605 #define CXGB_SYSCTL_ADD_QUAD(a) SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3606 (CTLTYPE_U64 | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \
3607 sysctl_handle_macstat, "QU", 0)
3608 CXGB_SYSCTL_ADD_QUAD(tx_octets);
3609 CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3610 CXGB_SYSCTL_ADD_QUAD(tx_frames);
3611 CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3612 CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3613 CXGB_SYSCTL_ADD_QUAD(tx_pause);
3614 CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3615 CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3616 CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3617 CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3618 CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3619 CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3620 CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3621 CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3622 CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3623 CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3624 CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3625 CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3626 CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3627 CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3628 CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3629 CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3630 CXGB_SYSCTL_ADD_QUAD(rx_octets);
3631 CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3632 CXGB_SYSCTL_ADD_QUAD(rx_frames);
3633 CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3634 CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3635 CXGB_SYSCTL_ADD_QUAD(rx_pause);
3636 CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3637 CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3638 CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3639 CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3640 CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3641 CXGB_SYSCTL_ADD_QUAD(rx_runt);
3642 CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3643 CXGB_SYSCTL_ADD_QUAD(rx_short);
3644 CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3645 CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3646 CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3647 CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3648 CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3649 CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3650 CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3651 CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3652 CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3653 CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3654 #undef CXGB_SYSCTL_ADD_QUAD
3656 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3657 CTLFLAG_RD, &mstats->a, 0)
3658 CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3659 CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3660 CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3661 CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3662 CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3663 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3664 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3665 CXGB_SYSCTL_ADD_ULONG(num_toggled);
3666 CXGB_SYSCTL_ADD_ULONG(num_resets);
3667 CXGB_SYSCTL_ADD_ULONG(link_faults);
3668 #undef CXGB_SYSCTL_ADD_ULONG
3673 * t3_get_desc - dump an SGE descriptor for debugging purposes
3674 * @qs: the queue set
3675 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3676 * @idx: the descriptor index in the queue
3677 * @data: where to dump the descriptor contents
3679 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3680 * size of the descriptor.
3683 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3684 unsigned char *data)
3690 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3692 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3693 return sizeof(struct tx_desc);
3697 if (!qs->rspq.desc || idx >= qs->rspq.size)
3699 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3700 return sizeof(struct rsp_desc);
3704 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3706 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3707 return sizeof(struct rx_desc);