1 /**************************************************************************
3 Copyright (c) 2007-2009, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <sys/bus_dma.h>
46 #include <sys/queue.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
52 #include <sys/sched.h>
54 #include <sys/systm.h>
55 #include <sys/syslog.h>
56 #include <sys/socket.h>
57 #include <sys/sglist.h>
60 #include <net/if_var.h>
62 #include <net/ethernet.h>
63 #include <net/if_vlan_var.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip6.h>
69 #include <netinet/tcp.h>
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
77 #include <cxgb_include.h>
81 int multiq_tx_enable = 1;
84 CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
87 extern struct sysctl_oid_list sysctl__hw_cxgb_children;
88 int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
89 SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
90 "size of per-queue mbuf ring");
92 static int cxgb_tx_coalesce_force = 0;
93 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN,
94 &cxgb_tx_coalesce_force, 0,
95 "coalesce small packets into a single work request regardless of ring state");
97 #define COALESCE_START_DEFAULT TX_ETH_Q_SIZE>>1
98 #define COALESCE_START_MAX (TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
99 #define COALESCE_STOP_DEFAULT TX_ETH_Q_SIZE>>2
100 #define COALESCE_STOP_MIN TX_ETH_Q_SIZE>>5
101 #define TX_RECLAIM_DEFAULT TX_ETH_Q_SIZE>>5
102 #define TX_RECLAIM_MAX TX_ETH_Q_SIZE>>2
103 #define TX_RECLAIM_MIN TX_ETH_Q_SIZE>>6
106 static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
107 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RWTUN,
108 &cxgb_tx_coalesce_enable_start, 0,
109 "coalesce enable threshold");
110 static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
111 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RWTUN,
112 &cxgb_tx_coalesce_enable_stop, 0,
113 "coalesce disable threshold");
114 static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
115 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RWTUN,
116 &cxgb_tx_reclaim_threshold, 0,
117 "tx cleaning minimum threshold");
120 * XXX don't re-enable this until TOE stops assuming
123 static int recycle_enable = 0;
125 extern int cxgb_use_16k_clusters;
126 extern int nmbjumbop;
127 extern int nmbjumbo9;
128 extern int nmbjumbo16;
132 #define SGE_RX_SM_BUF_SIZE 1536
133 #define SGE_RX_DROP_THRES 16
134 #define SGE_RX_COPY_THRES 128
137 * Period of the Tx buffer reclaim timer. This timer does not need to run
138 * frequently as Tx buffers are usually reclaimed by new Tx packets.
140 #define TX_RECLAIM_PERIOD (hz >> 1)
143 * Values for sge_txq.flags
146 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
147 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
151 uint64_t flit[TX_DESC_FLITS];
161 struct rsp_desc { /* response queue descriptor */
162 struct rss_header rss_hdr;
165 uint8_t imm_data[47];
169 #define RX_SW_DESC_MAP_CREATED (1 << 0)
170 #define TX_SW_DESC_MAP_CREATED (1 << 1)
171 #define RX_SW_DESC_INUSE (1 << 3)
172 #define TX_SW_DESC_MAPPED (1 << 4)
174 #define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0)
175 #define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP)
176 #define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP)
177 #define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
179 struct tx_sw_desc { /* SW state per Tx descriptor */
185 struct rx_sw_desc { /* SW state per Rx descriptor */
198 struct refill_fl_cb_arg {
200 bus_dma_segment_t seg;
206 * Maps a number of flits to the number of Tx descriptors that can hold them.
209 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
211 * HW allows up to 4 descriptors to be combined into a WR.
213 static uint8_t flit_desc_map[] = {
215 #if SGE_NUM_GENBITS == 1
216 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
217 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
218 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
219 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
220 #elif SGE_NUM_GENBITS == 2
221 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
222 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
223 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
224 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
226 # error "SGE_NUM_GENBITS must be 1 or 2"
230 #define TXQ_LOCK_ASSERT(qs) mtx_assert(&(qs)->lock, MA_OWNED)
231 #define TXQ_TRYLOCK(qs) mtx_trylock(&(qs)->lock)
232 #define TXQ_LOCK(qs) mtx_lock(&(qs)->lock)
233 #define TXQ_UNLOCK(qs) mtx_unlock(&(qs)->lock)
234 #define TXQ_RING_EMPTY(qs) drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
235 #define TXQ_RING_NEEDS_ENQUEUE(qs) \
236 drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
237 #define TXQ_RING_FLUSH(qs) drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
238 #define TXQ_RING_DEQUEUE_COND(qs, func, arg) \
239 drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
240 #define TXQ_RING_DEQUEUE(qs) \
241 drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
245 static void sge_timer_cb(void *arg);
246 static void sge_timer_reclaim(void *arg, int ncount);
247 static void sge_txq_reclaim_handler(void *arg, int ncount);
248 static void cxgb_start_locked(struct sge_qset *qs);
251 * XXX need to cope with bursty scheduling by looking at a wider
252 * window than we are now for determining the need for coalescing
255 static __inline uint64_t
256 check_pkt_coalesce(struct sge_qset *qs)
262 if (__predict_false(cxgb_tx_coalesce_force))
264 txq = &qs->txq[TXQ_ETH];
265 sc = qs->port->adapter;
266 fill = &sc->tunq_fill[qs->idx];
268 if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
269 cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
270 if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
271 cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
273 * if the hardware transmit queue is more than 1/8 full
274 * we mark it as coalescing - we drop back from coalescing
275 * when we go below 1/32 full and there are no packets enqueued,
276 * this provides us with some degree of hysteresis
278 if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
279 TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
281 else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
284 return (sc->tunq_coalesce);
289 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
292 #if _BYTE_ORDER == _LITTLE_ENDIAN
294 wr_hilo |= (((uint64_t)wr_lo)<<32);
297 wr_hilo |= (((uint64_t)wr_hi)<<32);
299 wrp->wrh_hilo = wr_hilo;
303 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
312 struct coalesce_info {
318 coalesce_check(struct mbuf *m, void *arg)
320 struct coalesce_info *ci = arg;
321 int *count = &ci->count;
322 int *nbytes = &ci->nbytes;
324 if ((*nbytes == 0) || ((*nbytes + m->m_len <= 10500) &&
325 (*count < 7) && (m->m_next == NULL))) {
334 cxgb_dequeue(struct sge_qset *qs)
336 struct mbuf *m, *m_head, *m_tail;
337 struct coalesce_info ci;
340 if (check_pkt_coalesce(qs) == 0)
341 return TXQ_RING_DEQUEUE(qs);
343 m_head = m_tail = NULL;
344 ci.count = ci.nbytes = 0;
346 m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
347 if (m_head == NULL) {
349 } else if (m != NULL) {
350 m_tail->m_nextpkt = m;
355 panic("trying to coalesce %d packets in to one WR", ci.count);
360 * reclaim_completed_tx - reclaims completed Tx descriptors
361 * @adapter: the adapter
362 * @q: the Tx queue to reclaim completed descriptors from
364 * Reclaims Tx descriptors that the SGE has indicated it has processed,
365 * and frees the associated buffers if possible. Called with the Tx
369 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
371 struct sge_txq *q = &qs->txq[queue];
372 int reclaim = desc_reclaimable(q);
374 if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) ||
375 (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN))
376 cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
378 if (reclaim < reclaim_min)
381 mtx_assert(&qs->lock, MA_OWNED);
383 t3_free_tx_desc(qs, reclaim, queue);
384 q->cleaned += reclaim;
385 q->in_use -= reclaim;
387 if (isset(&qs->txq_stopped, TXQ_ETH))
388 clrbit(&qs->txq_stopped, TXQ_ETH);
394 * should_restart_tx - are there enough resources to restart a Tx queue?
397 * Checks if there are enough descriptors to restart a suspended Tx queue.
400 should_restart_tx(const struct sge_txq *q)
402 unsigned int r = q->processed - q->cleaned;
404 return q->in_use - r < (q->size >> 1);
408 * t3_sge_init - initialize SGE
410 * @p: the SGE parameters
412 * Performs SGE initialization needed every time after a chip reset.
413 * We do not initialize any of the queue sets here, instead the driver
414 * top-level must request those individually. We also do not enable DMA
415 * here, that should be done after the queues have been set up.
418 t3_sge_init(adapter_t *adap, struct sge_params *p)
422 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
424 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
425 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
426 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
427 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
428 #if SGE_NUM_GENBITS == 1
429 ctrl |= F_EGRGENCTRL;
431 if (adap->params.rev > 0) {
432 if (!(adap->flags & (USING_MSIX | USING_MSI)))
433 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
435 t3_write_reg(adap, A_SG_CONTROL, ctrl);
436 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
437 V_LORCQDRBTHRSH(512));
438 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
439 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
440 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
441 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
442 adap->params.rev < T3_REV_C ? 1000 : 500);
443 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
444 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
445 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
446 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
447 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
452 * sgl_len - calculates the size of an SGL of the given capacity
453 * @n: the number of SGL entries
455 * Calculates the number of flits needed for a scatter/gather list that
456 * can hold the given number of entries.
458 static __inline unsigned int
459 sgl_len(unsigned int n)
461 return ((3 * n) / 2 + (n & 1));
465 * get_imm_packet - return the next ingress packet buffer from a response
466 * @resp: the response descriptor containing the packet data
468 * Return a packet containing the immediate data of the given response.
471 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
474 if (resp->rss_hdr.opcode == CPL_RX_DATA) {
475 const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
476 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
477 } else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
478 const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
479 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
481 m->m_len = IMMED_PKT_SIZE;
482 m->m_ext.ext_buf = NULL;
483 m->m_ext.ext_type = 0;
484 memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len);
488 static __inline u_int
489 flits_to_desc(u_int n)
491 return (flit_desc_map[n]);
494 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
495 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
496 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
497 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
499 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
500 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
504 * t3_sge_err_intr_handler - SGE async event interrupt handler
505 * @adapter: the adapter
507 * Interrupt handler for SGE asynchronous (non-data) events.
510 t3_sge_err_intr_handler(adapter_t *adapter)
512 unsigned int v, status;
514 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
515 if (status & SGE_PARERR)
516 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
517 status & SGE_PARERR);
518 if (status & SGE_FRAMINGERR)
519 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
520 status & SGE_FRAMINGERR);
521 if (status & F_RSPQCREDITOVERFOW)
522 CH_ALERT(adapter, "SGE response queue credit overflow\n");
524 if (status & F_RSPQDISABLED) {
525 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
528 "packet delivered to disabled response queue (0x%x)\n",
529 (v >> S_RSPQ0DISABLED) & 0xff);
532 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
533 if (status & SGE_FATALERR)
534 t3_fatal_err(adapter);
538 t3_sge_prep(adapter_t *adap, struct sge_params *p)
540 int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
542 nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
543 nqsets *= adap->params.nports;
545 fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
547 while (!powerof2(fl_q_size))
550 use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
553 #if __FreeBSD_version >= 700111
555 jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
556 jumbo_buf_size = MJUM16BYTES;
558 jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
559 jumbo_buf_size = MJUM9BYTES;
562 jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
563 jumbo_buf_size = MJUMPAGESIZE;
565 while (!powerof2(jumbo_q_size))
568 if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
569 device_printf(adap->dev,
570 "Insufficient clusters and/or jumbo buffers.\n");
572 p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
574 for (i = 0; i < SGE_QSETS; ++i) {
575 struct qset_params *q = p->qset + i;
577 if (adap->params.nports > 2) {
578 q->coalesce_usecs = 50;
581 q->coalesce_usecs = 10;
583 q->coalesce_usecs = 5;
587 q->rspq_size = RSPQ_Q_SIZE;
588 q->fl_size = fl_q_size;
589 q->jumbo_size = jumbo_q_size;
590 q->jumbo_buf_size = jumbo_buf_size;
591 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
592 q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
593 q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
599 t3_sge_alloc(adapter_t *sc)
602 /* The parent tag. */
603 if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
604 1, 0, /* algnmnt, boundary */
605 BUS_SPACE_MAXADDR, /* lowaddr */
606 BUS_SPACE_MAXADDR, /* highaddr */
607 NULL, NULL, /* filter, filterarg */
608 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
609 BUS_SPACE_UNRESTRICTED, /* nsegments */
610 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
612 NULL, NULL, /* lock, lockarg */
614 device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
619 * DMA tag for normal sized RX frames
621 if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
622 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
623 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
624 device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
629 * DMA tag for jumbo sized RX frames.
631 if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
632 BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
633 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
634 device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
639 * DMA tag for TX frames.
641 if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
642 BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
643 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
644 NULL, NULL, &sc->tx_dmat)) {
645 device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
653 t3_sge_free(struct adapter * sc)
656 if (sc->tx_dmat != NULL)
657 bus_dma_tag_destroy(sc->tx_dmat);
659 if (sc->rx_jumbo_dmat != NULL)
660 bus_dma_tag_destroy(sc->rx_jumbo_dmat);
662 if (sc->rx_dmat != NULL)
663 bus_dma_tag_destroy(sc->rx_dmat);
665 if (sc->parent_dmat != NULL)
666 bus_dma_tag_destroy(sc->parent_dmat);
672 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
675 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
676 qs->rspq.polling = 0 /* p->polling */;
679 #if !defined(__i386__) && !defined(__amd64__)
681 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
683 struct refill_fl_cb_arg *cb_arg = arg;
685 cb_arg->error = error;
686 cb_arg->seg = segs[0];
692 * refill_fl - refill an SGE free-buffer list
693 * @sc: the controller softc
694 * @q: the free-list to refill
695 * @n: the number of new buffers to allocate
697 * (Re)populate an SGE free-buffer list with up to @n new packet buffers.
698 * The caller must assure that @n does not exceed the queue's capacity.
701 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
703 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
704 struct rx_desc *d = &q->desc[q->pidx];
705 struct refill_fl_cb_arg cb_arg;
713 * We allocate an uninitialized mbuf + cluster, mbuf is
714 * initialized after rx.
716 if (q->zone == zone_pack) {
717 if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
719 cl = m->m_ext.ext_buf;
721 if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
723 if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
724 uma_zfree(q->zone, cl);
728 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
729 if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
730 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
731 uma_zfree(q->zone, cl);
734 sd->flags |= RX_SW_DESC_MAP_CREATED;
736 #if !defined(__i386__) && !defined(__amd64__)
737 err = bus_dmamap_load(q->entry_tag, sd->map,
738 cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
740 if (err != 0 || cb_arg.error) {
741 if (q->zone != zone_pack)
742 uma_zfree(q->zone, cl);
747 cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
749 sd->flags |= RX_SW_DESC_INUSE;
752 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
753 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
754 d->len_gen = htobe32(V_FLD_GEN1(q->gen));
755 d->gen2 = htobe32(V_FLD_GEN2(q->gen));
760 if (++q->pidx == q->size) {
771 if (q->db_pending >= 32) {
773 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
779 * free_rx_bufs - free the Rx buffers on an SGE free list
780 * @sc: the controle softc
781 * @q: the SGE free list to clean up
783 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
784 * this queue should be stopped before calling this function.
787 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
789 u_int cidx = q->cidx;
791 while (q->credits--) {
792 struct rx_sw_desc *d = &q->sdesc[cidx];
794 if (d->flags & RX_SW_DESC_INUSE) {
795 bus_dmamap_unload(q->entry_tag, d->map);
796 bus_dmamap_destroy(q->entry_tag, d->map);
797 if (q->zone == zone_pack) {
798 m_init(d->m, M_NOWAIT, MT_DATA, M_EXT);
799 uma_zfree(zone_pack, d->m);
801 m_init(d->m, M_NOWAIT, MT_DATA, 0);
802 uma_zfree(zone_mbuf, d->m);
803 uma_zfree(q->zone, d->rxsd_cl);
809 if (++cidx == q->size)
815 __refill_fl(adapter_t *adap, struct sge_fl *fl)
817 refill_fl(adap, fl, min(16U, fl->size - fl->credits));
821 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
823 uint32_t reclaimable = fl->size - fl->credits;
826 refill_fl(adap, fl, min(max, reclaimable));
830 * recycle_rx_buf - recycle a receive buffer
831 * @adapter: the adapter
832 * @q: the SGE free list
833 * @idx: index of buffer to recycle
835 * Recycles the specified buffer on the given free list by adding it at
836 * the next available slot on the list.
839 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
841 struct rx_desc *from = &q->desc[idx];
842 struct rx_desc *to = &q->desc[q->pidx];
844 q->sdesc[q->pidx] = q->sdesc[idx];
845 to->addr_lo = from->addr_lo; // already big endian
846 to->addr_hi = from->addr_hi; // likewise
847 wmb(); /* necessary ? */
848 to->len_gen = htobe32(V_FLD_GEN1(q->gen));
849 to->gen2 = htobe32(V_FLD_GEN2(q->gen));
852 if (++q->pidx == q->size) {
856 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
860 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
865 *addr = segs[0].ds_addr;
869 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
870 bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
871 bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
873 size_t len = nelem * elem_size;
878 if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
879 BUS_SPACE_MAXADDR_32BIT,
880 BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
881 len, 0, NULL, NULL, tag)) != 0) {
882 device_printf(sc->dev, "Cannot allocate descriptor tag\n");
886 if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
888 device_printf(sc->dev, "Cannot allocate descriptor memory\n");
892 bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
897 len = nelem * sw_size;
898 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
901 if (parent_entry_tag == NULL)
904 if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
905 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
906 NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
907 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
908 NULL, NULL, entry_tag)) != 0) {
909 device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
916 sge_slow_intr_handler(void *arg, int ncount)
920 t3_slow_intr_handler(sc);
921 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
922 (void) t3_read_reg(sc, A_PL_INT_ENABLE0);
926 * sge_timer_cb - perform periodic maintenance of an SGE qset
927 * @data: the SGE queue set to maintain
929 * Runs periodically from a timer to perform maintenance of an SGE queue
930 * set. It performs two tasks:
932 * a) Cleans up any completed Tx descriptors that may still be pending.
933 * Normal descriptor cleanup happens when new packets are added to a Tx
934 * queue so this timer is relatively infrequent and does any cleanup only
935 * if the Tx queue has not seen any new packets in a while. We make a
936 * best effort attempt to reclaim descriptors, in that we don't wait
937 * around if we cannot get a queue's lock (which most likely is because
938 * someone else is queueing new packets and so will also handle the clean
939 * up). Since control queues use immediate data exclusively we don't
940 * bother cleaning them up here.
942 * b) Replenishes Rx queues that have run out due to memory shortage.
943 * Normally new Rx buffers are added when existing ones are consumed but
944 * when out of memory a queue can become empty. We try to add only a few
945 * buffers here, the queue will be replenished fully as these new buffers
946 * are used up if memory shortage has subsided.
948 * c) Return coalesced response queue credits in case a response queue is
951 * d) Ring doorbells for T304 tunnel queues since we have seen doorbell
952 * fifo overflows and the FW doesn't implement any recovery scheme yet.
955 sge_timer_cb(void *arg)
958 if ((sc->flags & USING_MSIX) == 0) {
960 struct port_info *pi;
964 int reclaim_ofl, refill_rx;
966 if (sc->open_device_map == 0)
969 for (i = 0; i < sc->params.nports; i++) {
971 for (j = 0; j < pi->nqsets; j++) {
972 qs = &sc->sge.qs[pi->first_qset + j];
974 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
975 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
976 (qs->fl[1].credits < qs->fl[1].size));
977 if (reclaim_ofl || refill_rx) {
978 taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
985 if (sc->params.nports > 2) {
988 for_each_port(sc, i) {
989 struct port_info *pi = &sc->port[i];
991 t3_write_reg(sc, A_SG_KDOORBELL,
993 (FW_TUNNEL_SGEEC_START + pi->first_qset));
996 if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
997 sc->open_device_map != 0)
998 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1002 * This is meant to be a catch-all function to keep sge state private
1007 t3_sge_init_adapter(adapter_t *sc)
1009 callout_init(&sc->sge_timer_ch, 1);
1010 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1011 TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
1016 t3_sge_reset_adapter(adapter_t *sc)
1018 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1023 t3_sge_init_port(struct port_info *pi)
1025 TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
1030 * refill_rspq - replenish an SGE response queue
1031 * @adapter: the adapter
1032 * @q: the response queue to replenish
1033 * @credits: how many new responses to make available
1035 * Replenishes a response queue by making the supplied number of responses
1038 static __inline void
1039 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1042 /* mbufs are allocated on demand when a rspq entry is processed. */
1043 t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
1044 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1048 sge_txq_reclaim_handler(void *arg, int ncount)
1050 struct sge_qset *qs = arg;
1053 for (i = 0; i < 3; i++)
1054 reclaim_completed_tx(qs, 16, i);
1058 sge_timer_reclaim(void *arg, int ncount)
1060 struct port_info *pi = arg;
1061 int i, nqsets = pi->nqsets;
1062 adapter_t *sc = pi->adapter;
1063 struct sge_qset *qs;
1066 KASSERT((sc->flags & USING_MSIX) == 0,
1067 ("can't call timer reclaim for msi-x"));
1069 for (i = 0; i < nqsets; i++) {
1070 qs = &sc->sge.qs[pi->first_qset + i];
1072 reclaim_completed_tx(qs, 16, TXQ_OFLD);
1073 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1074 &sc->sge.qs[0].rspq.lock;
1076 if (mtx_trylock(lock)) {
1077 /* XXX currently assume that we are *NOT* polling */
1078 uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1080 if (qs->fl[0].credits < qs->fl[0].size - 16)
1081 __refill_fl(sc, &qs->fl[0]);
1082 if (qs->fl[1].credits < qs->fl[1].size - 16)
1083 __refill_fl(sc, &qs->fl[1]);
1085 if (status & (1 << qs->rspq.cntxt_id)) {
1086 if (qs->rspq.credits) {
1087 refill_rspq(sc, &qs->rspq, 1);
1089 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
1090 1 << qs->rspq.cntxt_id);
1099 * init_qset_cntxt - initialize an SGE queue set context info
1100 * @qs: the queue set
1101 * @id: the queue set id
1103 * Initializes the TIDs and context ids for the queues of a queue set.
1106 init_qset_cntxt(struct sge_qset *qs, u_int id)
1109 qs->rspq.cntxt_id = id;
1110 qs->fl[0].cntxt_id = 2 * id;
1111 qs->fl[1].cntxt_id = 2 * id + 1;
1112 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1113 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1114 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1115 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1116 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1118 /* XXX: a sane limit is needed instead of INT_MAX */
1119 mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX);
1120 mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX);
1121 mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX);
1126 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1128 txq->in_use += ndesc;
1130 * XXX we don't handle stopping of queue
1131 * presumably start handles this when we bump against the end
1133 txqs->gen = txq->gen;
1134 txq->unacked += ndesc;
1135 txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1137 txqs->pidx = txq->pidx;
1140 if (((txqs->pidx > txq->cidx) &&
1141 (txq->pidx < txqs->pidx) &&
1142 (txq->pidx >= txq->cidx)) ||
1143 ((txqs->pidx < txq->cidx) &&
1144 (txq->pidx >= txq-> cidx)) ||
1145 ((txqs->pidx < txq->cidx) &&
1146 (txq->cidx < txqs->pidx)))
1147 panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1148 txqs->pidx, txq->pidx, txq->cidx);
1150 if (txq->pidx >= txq->size) {
1151 txq->pidx -= txq->size;
1158 * calc_tx_descs - calculate the number of Tx descriptors for a packet
1159 * @m: the packet mbufs
1160 * @nsegs: the number of segments
1162 * Returns the number of Tx descriptors needed for the given Ethernet
1163 * packet. Ethernet packets require addition of WR and CPL headers.
1165 static __inline unsigned int
1166 calc_tx_descs(const struct mbuf *m, int nsegs)
1170 if (m->m_pkthdr.len <= PIO_LEN)
1173 flits = sgl_len(nsegs) + 2;
1174 if (m->m_pkthdr.csum_flags & CSUM_TSO)
1177 return flits_to_desc(flits);
1181 * make_sgl - populate a scatter/gather list for a packet
1182 * @sgp: the SGL to populate
1183 * @segs: the packet dma segments
1184 * @nsegs: the number of segments
1186 * Generates a scatter/gather list for the buffers that make up a packet
1187 * and returns the SGL size in 8-byte words. The caller must size the SGL
1190 static __inline void
1191 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1195 for (idx = 0, i = 0; i < nsegs; i++) {
1197 * firmware doesn't like empty segments
1199 if (segs[i].ds_len == 0)
1204 sgp->len[idx] = htobe32(segs[i].ds_len);
1205 sgp->addr[idx] = htobe64(segs[i].ds_addr);
1216 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1217 * @adap: the adapter
1220 * Ring the doorbell if a Tx queue is asleep. There is a natural race,
1221 * where the HW is going to sleep just after we checked, however,
1222 * then the interrupt handler will detect the outstanding TX packet
1223 * and ring the doorbell for us.
1225 * When GTS is disabled we unconditionally ring the doorbell.
1227 static __inline void
1228 check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1231 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1232 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1233 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1235 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1238 t3_write_reg(adap, A_SG_KDOORBELL,
1239 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1242 if (mustring || ++q->db_pending >= 32) {
1243 wmb(); /* write descriptors before telling HW */
1244 t3_write_reg(adap, A_SG_KDOORBELL,
1245 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1251 static __inline void
1252 wr_gen2(struct tx_desc *d, unsigned int gen)
1254 #if SGE_NUM_GENBITS == 2
1255 d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1260 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1261 * @ndesc: number of Tx descriptors spanned by the SGL
1262 * @txd: first Tx descriptor to be written
1263 * @txqs: txq state (generation and producer index)
1264 * @txq: the SGE Tx queue
1266 * @flits: number of flits to the start of the SGL in the first descriptor
1267 * @sgl_flits: the SGL size in flits
1268 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1269 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1271 * Write a work request header and an associated SGL. If the SGL is
1272 * small enough to fit into one Tx descriptor it has already been written
1273 * and we just need to write the WR header. Otherwise we distribute the
1274 * SGL across the number of descriptors it spans.
1277 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1278 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1279 unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1282 struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1283 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1285 if (__predict_true(ndesc == 1)) {
1286 set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1287 V_WR_SGLSFLT(flits)) | wr_hi,
1288 htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
1291 wr_gen2(txd, txqs->gen);
1294 unsigned int ogen = txqs->gen;
1295 const uint64_t *fp = (const uint64_t *)sgl;
1296 struct work_request_hdr *wp = wrp;
1298 wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1299 V_WR_SGLSFLT(flits)) | wr_hi;
1302 unsigned int avail = WR_FLITS - flits;
1304 if (avail > sgl_flits)
1306 memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1315 if (++txqs->pidx == txq->size) {
1323 * when the head of the mbuf chain
1324 * is freed all clusters will be freed
1327 wrp = (struct work_request_hdr *)txd;
1328 wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
1329 V_WR_SGLSFLT(1)) | wr_hi;
1330 wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
1332 V_WR_GEN(txqs->gen)) | wr_lo;
1333 wr_gen2(txd, txqs->gen);
1336 wrp->wrh_hi |= htonl(F_WR_EOP);
1338 wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1339 wr_gen2((struct tx_desc *)wp, ogen);
1343 /* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
1344 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1346 #define GET_VTAG(cntrl, m) \
1348 if ((m)->m_flags & M_VLANTAG) \
1349 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1353 t3_encap(struct sge_qset *qs, struct mbuf **m)
1357 struct sge_txq *txq;
1358 struct txq_state txqs;
1359 struct port_info *pi;
1360 unsigned int ndesc, flits, cntrl, mlen;
1361 int err, nsegs, tso_info = 0;
1363 struct work_request_hdr *wrp;
1364 struct tx_sw_desc *txsd;
1365 struct sg_ent *sgp, *sgl;
1366 uint32_t wr_hi, wr_lo, sgl_flits;
1367 bus_dma_segment_t segs[TX_MAX_SEGS];
1369 struct tx_desc *txd;
1373 txq = &qs->txq[TXQ_ETH];
1374 txd = &txq->desc[txq->pidx];
1375 txsd = &txq->sdesc[txq->pidx];
1381 mtx_assert(&qs->lock, MA_OWNED);
1382 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1383 KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
1385 if (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1386 m0->m_pkthdr.csum_flags & (CSUM_TSO))
1387 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1389 if (m0->m_nextpkt != NULL) {
1390 busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1394 if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1395 &m0, segs, &nsegs))) {
1397 printf("failed ... err=%d\n", err);
1400 mlen = m0->m_pkthdr.len;
1401 ndesc = calc_tx_descs(m0, nsegs);
1403 txq_prod(txq, ndesc, &txqs);
1405 KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
1408 if (m0->m_nextpkt != NULL) {
1409 struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1413 panic("trying to coalesce %d packets in to one WR", nsegs);
1414 txq->txq_coalesced += nsegs;
1415 wrp = (struct work_request_hdr *)txd;
1416 flits = nsegs*2 + 1;
1418 for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1419 struct cpl_tx_pkt_batch_entry *cbe;
1421 uint32_t *hflit = (uint32_t *)&flit;
1422 int cflags = m0->m_pkthdr.csum_flags;
1424 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1425 GET_VTAG(cntrl, m0);
1426 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1427 if (__predict_false(!(cflags & CSUM_IP)))
1428 cntrl |= F_TXPKT_IPCSUM_DIS;
1429 if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP |
1430 CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1431 cntrl |= F_TXPKT_L4CSUM_DIS;
1433 hflit[0] = htonl(cntrl);
1434 hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1435 flit |= htobe64(1 << 24);
1436 cbe = &cpl_batch->pkt_entry[i];
1437 cbe->cntrl = hflit[0];
1438 cbe->len = hflit[1];
1439 cbe->addr = htobe64(segs[i].ds_addr);
1442 wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1443 V_WR_SGLSFLT(flits)) |
1444 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1445 wr_lo = htonl(V_WR_LEN(flits) |
1446 V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1447 set_wr_hdr(wrp, wr_hi, wr_lo);
1449 ETHER_BPF_MTAP(pi->ifp, m0);
1450 wr_gen2(txd, txqs.gen);
1451 check_ring_tx_db(sc, txq, 0);
1453 } else if (tso_info) {
1455 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1456 struct ether_header *eh;
1461 GET_VTAG(cntrl, m0);
1462 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1463 hdr->cntrl = htonl(cntrl);
1464 hdr->len = htonl(mlen | 0x80000000);
1466 if (__predict_false(mlen < TCPPKTHDRSIZE)) {
1467 printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x",
1468 m0, mlen, m0->m_pkthdr.tso_segsz,
1469 (int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags);
1470 panic("tx tso packet too small");
1473 /* Make sure that ether, ip, tcp headers are all in m0 */
1474 if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1475 m0 = m_pullup(m0, TCPPKTHDRSIZE);
1476 if (__predict_false(m0 == NULL)) {
1477 /* XXX panic probably an overreaction */
1478 panic("couldn't fit header into mbuf");
1482 eh = mtod(m0, struct ether_header *);
1483 eth_type = eh->ether_type;
1484 if (eth_type == htons(ETHERTYPE_VLAN)) {
1485 struct ether_vlan_header *evh = (void *)eh;
1487 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
1489 eth_type = evh->evl_proto;
1491 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
1495 if (eth_type == htons(ETHERTYPE_IP)) {
1496 struct ip *ip = l3hdr;
1498 tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
1499 tcp = (struct tcphdr *)(ip + 1);
1500 } else if (eth_type == htons(ETHERTYPE_IPV6)) {
1501 struct ip6_hdr *ip6 = l3hdr;
1503 KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1504 ("%s: CSUM_TSO with ip6_nxt %d",
1505 __func__, ip6->ip6_nxt));
1507 tso_info |= F_LSO_IPV6;
1508 tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
1509 tcp = (struct tcphdr *)(ip6 + 1);
1511 panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
1513 tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
1514 hdr->lso_info = htonl(tso_info);
1516 if (__predict_false(mlen <= PIO_LEN)) {
1518 * pkt not undersized but fits in PIO_LEN
1519 * Indicates a TSO bug at the higher levels.
1522 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1523 flits = (mlen + 7) / 8 + 3;
1524 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1525 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1526 F_WR_SOP | F_WR_EOP | txqs.compl);
1527 wr_lo = htonl(V_WR_LEN(flits) |
1528 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1529 set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1531 ETHER_BPF_MTAP(pi->ifp, m0);
1532 wr_gen2(txd, txqs.gen);
1533 check_ring_tx_db(sc, txq, 0);
1539 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1541 GET_VTAG(cntrl, m0);
1542 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1543 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1544 cntrl |= F_TXPKT_IPCSUM_DIS;
1545 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP |
1546 CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1547 cntrl |= F_TXPKT_L4CSUM_DIS;
1548 cpl->cntrl = htonl(cntrl);
1549 cpl->len = htonl(mlen | 0x80000000);
1551 if (mlen <= PIO_LEN) {
1553 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1554 flits = (mlen + 7) / 8 + 2;
1556 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1557 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1558 F_WR_SOP | F_WR_EOP | txqs.compl);
1559 wr_lo = htonl(V_WR_LEN(flits) |
1560 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1561 set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1563 ETHER_BPF_MTAP(pi->ifp, m0);
1564 wr_gen2(txd, txqs.gen);
1565 check_ring_tx_db(sc, txq, 0);
1571 wrp = (struct work_request_hdr *)txd;
1572 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1573 make_sgl(sgp, segs, nsegs);
1575 sgl_flits = sgl_len(nsegs);
1577 ETHER_BPF_MTAP(pi->ifp, m0);
1579 KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1580 wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1581 wr_lo = htonl(V_WR_TID(txq->token));
1582 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1583 sgl_flits, wr_hi, wr_lo);
1584 check_ring_tx_db(sc, txq, 0);
1590 cxgb_tx_watchdog(void *arg)
1592 struct sge_qset *qs = arg;
1593 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1595 if (qs->coalescing != 0 &&
1596 (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
1599 else if (qs->coalescing == 0 &&
1600 (txq->in_use >= cxgb_tx_coalesce_enable_start))
1602 if (TXQ_TRYLOCK(qs)) {
1603 qs->qs_flags |= QS_FLUSHING;
1604 cxgb_start_locked(qs);
1605 qs->qs_flags &= ~QS_FLUSHING;
1608 if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1609 callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1610 qs, txq->txq_watchdog.c_cpu);
1614 cxgb_tx_timeout(void *arg)
1616 struct sge_qset *qs = arg;
1617 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1619 if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1621 if (TXQ_TRYLOCK(qs)) {
1622 qs->qs_flags |= QS_TIMEOUT;
1623 cxgb_start_locked(qs);
1624 qs->qs_flags &= ~QS_TIMEOUT;
1630 cxgb_start_locked(struct sge_qset *qs)
1632 struct mbuf *m_head = NULL;
1633 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1634 struct port_info *pi = qs->port;
1635 struct ifnet *ifp = pi->ifp;
1637 if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1638 reclaim_completed_tx(qs, 0, TXQ_ETH);
1640 if (!pi->link_config.link_ok) {
1644 TXQ_LOCK_ASSERT(qs);
1645 while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1646 pi->link_config.link_ok) {
1647 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1649 if (txq->size - txq->in_use <= TX_MAX_DESC)
1652 if ((m_head = cxgb_dequeue(qs)) == NULL)
1655 * Encapsulation can modify our pointer, and or make it
1656 * NULL on failure. In that event, we can't requeue.
1658 if (t3_encap(qs, &m_head) || m_head == NULL)
1664 if (txq->db_pending)
1665 check_ring_tx_db(pi->adapter, txq, 1);
1667 if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1668 pi->link_config.link_ok)
1669 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1670 qs, txq->txq_timer.c_cpu);
1676 cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1678 struct port_info *pi = qs->port;
1679 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1680 struct buf_ring *br = txq->txq_mr;
1683 avail = txq->size - txq->in_use;
1684 TXQ_LOCK_ASSERT(qs);
1687 * We can only do a direct transmit if the following are true:
1688 * - we aren't coalescing (ring < 3/4 full)
1689 * - the link is up -- checked in caller
1690 * - there are no packets enqueued already
1691 * - there is space in hardware transmit queue
1693 if (check_pkt_coalesce(qs) == 0 &&
1694 !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1695 if (t3_encap(qs, &m)) {
1697 (error = drbr_enqueue(ifp, br, m)) != 0)
1700 if (txq->db_pending)
1701 check_ring_tx_db(pi->adapter, txq, 1);
1704 * We've bypassed the buf ring so we need to update
1705 * the stats directly
1707 txq->txq_direct_packets++;
1708 txq->txq_direct_bytes += m->m_pkthdr.len;
1710 } else if ((error = drbr_enqueue(ifp, br, m)) != 0)
1713 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1714 if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1715 (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1716 cxgb_start_locked(qs);
1717 else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1718 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1719 qs, txq->txq_timer.c_cpu);
1724 cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
1726 struct sge_qset *qs;
1727 struct port_info *pi = ifp->if_softc;
1728 int error, qidx = pi->first_qset;
1730 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1731 ||(!pi->link_config.link_ok)) {
1736 /* check if flowid is set */
1737 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1738 qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
1740 qs = &pi->adapter->sge.qs[qidx];
1742 if (TXQ_TRYLOCK(qs)) {
1744 error = cxgb_transmit_locked(ifp, qs, m);
1747 error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1752 cxgb_qflush(struct ifnet *ifp)
1755 * flush any enqueued mbufs in the buf_rings
1756 * and in the transmit queues
1763 * write_imm - write a packet into a Tx descriptor as immediate data
1764 * @d: the Tx descriptor to write
1766 * @len: the length of packet data to write as immediate data
1767 * @gen: the generation bit value to write
1769 * Writes a packet as immediate data into a Tx descriptor. The packet
1770 * contains a work request at its beginning. We must write the packet
1771 * carefully so the SGE doesn't read accidentally before it's written in
1774 static __inline void
1775 write_imm(struct tx_desc *d, caddr_t src,
1776 unsigned int len, unsigned int gen)
1778 struct work_request_hdr *from = (struct work_request_hdr *)src;
1779 struct work_request_hdr *to = (struct work_request_hdr *)d;
1780 uint32_t wr_hi, wr_lo;
1782 KASSERT(len <= WR_LEN && len >= sizeof(*from),
1783 ("%s: invalid len %d", __func__, len));
1785 memcpy(&to[1], &from[1], len - sizeof(*from));
1786 wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
1787 V_WR_BCNTLFLT(len & 7));
1788 wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
1789 set_wr_hdr(to, wr_hi, wr_lo);
1795 * check_desc_avail - check descriptor availability on a send queue
1796 * @adap: the adapter
1798 * @m: the packet needing the descriptors
1799 * @ndesc: the number of Tx descriptors needed
1800 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1802 * Checks if the requested number of Tx descriptors is available on an
1803 * SGE send queue. If the queue is already suspended or not enough
1804 * descriptors are available the packet is queued for later transmission.
1805 * Must be called with the Tx queue locked.
1807 * Returns 0 if enough descriptors are available, 1 if there aren't
1808 * enough descriptors and the packet has been queued, and 2 if the caller
1809 * needs to retry because there weren't enough descriptors at the
1810 * beginning of the call but some freed up in the mean time.
1813 check_desc_avail(adapter_t *adap, struct sge_txq *q,
1814 struct mbuf *m, unsigned int ndesc,
1818 * XXX We currently only use this for checking the control queue
1819 * the control queue is only used for binding qsets which happens
1820 * at init time so we are guaranteed enough descriptors
1822 if (__predict_false(mbufq_len(&q->sendq))) {
1823 addq_exit: (void )mbufq_enqueue(&q->sendq, m);
1826 if (__predict_false(q->size - q->in_use < ndesc)) {
1828 struct sge_qset *qs = txq_to_qset(q, qid);
1830 setbit(&qs->txq_stopped, qid);
1831 if (should_restart_tx(q) &&
1832 test_and_clear_bit(qid, &qs->txq_stopped))
1843 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1844 * @q: the SGE control Tx queue
1846 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1847 * that send only immediate data (presently just the control queues) and
1848 * thus do not have any mbufs
1850 static __inline void
1851 reclaim_completed_tx_imm(struct sge_txq *q)
1853 unsigned int reclaim = q->processed - q->cleaned;
1855 q->in_use -= reclaim;
1856 q->cleaned += reclaim;
1860 * ctrl_xmit - send a packet through an SGE control Tx queue
1861 * @adap: the adapter
1862 * @q: the control queue
1865 * Send a packet through an SGE control Tx queue. Packets sent through
1866 * a control queue must fit entirely as immediate data in a single Tx
1867 * descriptor and have no page fragments.
1870 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1873 struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1874 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1876 KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
1878 wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
1879 wrp->wrh_lo = htonl(V_WR_TID(q->token));
1882 again: reclaim_completed_tx_imm(q);
1884 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1885 if (__predict_false(ret)) {
1892 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1895 if (++q->pidx >= q->size) {
1901 t3_write_reg(adap, A_SG_KDOORBELL,
1902 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1910 * restart_ctrlq - restart a suspended control queue
1911 * @qs: the queue set cotaining the control queue
1913 * Resumes transmission on a suspended Tx control queue.
1916 restart_ctrlq(void *data, int npending)
1919 struct sge_qset *qs = (struct sge_qset *)data;
1920 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1921 adapter_t *adap = qs->port->adapter;
1924 again: reclaim_completed_tx_imm(q);
1926 while (q->in_use < q->size &&
1927 (m = mbufq_dequeue(&q->sendq)) != NULL) {
1929 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1932 if (++q->pidx >= q->size) {
1938 if (mbufq_len(&q->sendq)) {
1939 setbit(&qs->txq_stopped, TXQ_CTRL);
1941 if (should_restart_tx(q) &&
1942 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1947 t3_write_reg(adap, A_SG_KDOORBELL,
1948 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1953 * Send a management message through control queue 0
1956 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1958 return ctrl_xmit(adap, &adap->sge.qs[0], m);
1962 * free_qset - free the resources of an SGE queue set
1963 * @sc: the controller owning the queue set
1966 * Release the HW and SW resources associated with an SGE queue set, such
1967 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
1968 * queue set must be quiesced prior to calling this.
1971 t3_free_qset(adapter_t *sc, struct sge_qset *q)
1975 reclaim_completed_tx(q, 0, TXQ_ETH);
1976 if (q->txq[TXQ_ETH].txq_mr != NULL)
1977 buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
1978 if (q->txq[TXQ_ETH].txq_ifq != NULL) {
1979 ifq_delete(q->txq[TXQ_ETH].txq_ifq);
1980 free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
1983 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1984 if (q->fl[i].desc) {
1985 mtx_lock_spin(&sc->sge.reg_lock);
1986 t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1987 mtx_unlock_spin(&sc->sge.reg_lock);
1988 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1989 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1991 bus_dma_tag_destroy(q->fl[i].desc_tag);
1992 bus_dma_tag_destroy(q->fl[i].entry_tag);
1994 if (q->fl[i].sdesc) {
1995 free_rx_bufs(sc, &q->fl[i]);
1996 free(q->fl[i].sdesc, M_DEVBUF);
2000 mtx_unlock(&q->lock);
2001 MTX_DESTROY(&q->lock);
2002 for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2003 if (q->txq[i].desc) {
2004 mtx_lock_spin(&sc->sge.reg_lock);
2005 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2006 mtx_unlock_spin(&sc->sge.reg_lock);
2007 bus_dmamap_unload(q->txq[i].desc_tag,
2008 q->txq[i].desc_map);
2009 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2010 q->txq[i].desc_map);
2011 bus_dma_tag_destroy(q->txq[i].desc_tag);
2012 bus_dma_tag_destroy(q->txq[i].entry_tag);
2014 if (q->txq[i].sdesc) {
2015 free(q->txq[i].sdesc, M_DEVBUF);
2020 mtx_lock_spin(&sc->sge.reg_lock);
2021 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
2022 mtx_unlock_spin(&sc->sge.reg_lock);
2024 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
2025 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
2027 bus_dma_tag_destroy(q->rspq.desc_tag);
2028 MTX_DESTROY(&q->rspq.lock);
2031 #if defined(INET6) || defined(INET)
2032 tcp_lro_free(&q->lro.ctrl);
2035 bzero(q, sizeof(*q));
2039 * t3_free_sge_resources - free SGE resources
2040 * @sc: the adapter softc
2042 * Frees resources used by the SGE queue sets.
2045 t3_free_sge_resources(adapter_t *sc, int nqsets)
2049 for (i = 0; i < nqsets; ++i) {
2050 TXQ_LOCK(&sc->sge.qs[i]);
2051 t3_free_qset(sc, &sc->sge.qs[i]);
2056 * t3_sge_start - enable SGE
2057 * @sc: the controller softc
2059 * Enables the SGE for DMAs. This is the last step in starting packet
2063 t3_sge_start(adapter_t *sc)
2065 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2069 * t3_sge_stop - disable SGE operation
2072 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2073 * from error interrupts) or from normal process context. In the latter
2074 * case it also disables any pending queue restart tasklets. Note that
2075 * if it is called in interrupt context it cannot disable the restart
2076 * tasklets as it cannot wait, however the tasklets will have no effect
2077 * since the doorbells are disabled and the driver will call this again
2078 * later from process context, at which time the tasklets will be stopped
2079 * if they are still running.
2082 t3_sge_stop(adapter_t *sc)
2086 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
2091 for (nqsets = i = 0; i < (sc)->params.nports; i++)
2092 nqsets += sc->port[i].nqsets;
2098 for (i = 0; i < nqsets; ++i) {
2099 struct sge_qset *qs = &sc->sge.qs[i];
2101 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2102 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2108 * t3_free_tx_desc - reclaims Tx descriptors and their buffers
2109 * @adapter: the adapter
2110 * @q: the Tx queue to reclaim descriptors from
2111 * @reclaimable: the number of descriptors to reclaim
2112 * @m_vec_size: maximum number of buffers to reclaim
2113 * @desc_reclaimed: returns the number of descriptors reclaimed
2115 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
2116 * Tx buffers. Called with the Tx queue lock held.
2118 * Returns number of buffers of reclaimed
2121 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2123 struct tx_sw_desc *txsd;
2124 unsigned int cidx, mask;
2125 struct sge_txq *q = &qs->txq[queue];
2128 T3_TRACE2(sc->tb[q->cntxt_id & 7],
2129 "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
2133 txsd = &q->sdesc[cidx];
2135 mtx_assert(&qs->lock, MA_OWNED);
2136 while (reclaimable--) {
2137 prefetch(q->sdesc[(cidx + 1) & mask].m);
2138 prefetch(q->sdesc[(cidx + 2) & mask].m);
2140 if (txsd->m != NULL) {
2141 if (txsd->flags & TX_SW_DESC_MAPPED) {
2142 bus_dmamap_unload(q->entry_tag, txsd->map);
2143 txsd->flags &= ~TX_SW_DESC_MAPPED;
2145 m_freem_list(txsd->m);
2151 if (++cidx == q->size) {
2161 * is_new_response - check if a response is newly written
2162 * @r: the response descriptor
2163 * @q: the response queue
2165 * Returns true if a response descriptor contains a yet unprocessed
2169 is_new_response(const struct rsp_desc *r,
2170 const struct sge_rspq *q)
2172 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2175 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2176 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2177 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2178 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2179 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2181 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2182 #define NOMEM_INTR_DELAY 2500
2186 * write_ofld_wr - write an offload work request
2187 * @adap: the adapter
2188 * @m: the packet to send
2190 * @pidx: index of the first Tx descriptor to write
2191 * @gen: the generation value to use
2192 * @ndesc: number of descriptors the packet will occupy
2194 * Write an offload work request to send the supplied packet. The packet
2195 * data already carry the work request with most fields populated.
2198 write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2199 unsigned int pidx, unsigned int gen, unsigned int ndesc)
2201 unsigned int sgl_flits, flits;
2202 int i, idx, nsegs, wrlen;
2203 struct work_request_hdr *from;
2204 struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
2205 struct tx_desc *d = &q->desc[pidx];
2206 struct txq_state txqs;
2207 struct sglist_seg *segs;
2208 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2211 from = (void *)(oh + 1); /* Start of WR within mbuf */
2212 wrlen = m->m_len - sizeof(*oh);
2214 if (!(oh->flags & F_HDR_SGL)) {
2215 write_imm(d, (caddr_t)from, wrlen, gen);
2218 * mbuf with "real" immediate tx data will be enqueue_wr'd by
2219 * t3_push_frames and freed in wr_ack. Others, like those sent
2220 * down by close_conn, t3_send_reset, etc. should be freed here.
2222 if (!(oh->flags & F_HDR_DF))
2227 memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
2231 sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
2233 nsegs = sgl->sg_nseg;
2234 segs = sgl->sg_segs;
2235 for (idx = 0, i = 0; i < nsegs; i++) {
2236 KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
2239 sgp->len[idx] = htobe32(segs[i].ss_len);
2240 sgp->addr[idx] = htobe64(segs[i].ss_paddr);
2248 sgl_flits = sgl_len(nsegs);
2253 write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
2254 from->wrh_hi, from->wrh_lo);
2258 * ofld_xmit - send a packet through an offload queue
2259 * @adap: the adapter
2260 * @q: the Tx offload queue
2263 * Send an offload packet through an SGE offload queue.
2266 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2270 unsigned int pidx, gen;
2271 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2272 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2274 ndesc = G_HDR_NDESC(oh->flags);
2277 again: reclaim_completed_tx(qs, 16, TXQ_OFLD);
2278 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2279 if (__predict_false(ret)) {
2291 if (q->pidx >= q->size) {
2296 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2297 check_ring_tx_db(adap, q, 1);
2304 * restart_offloadq - restart a suspended offload queue
2305 * @qs: the queue set cotaining the offload queue
2307 * Resumes transmission on a suspended Tx offload queue.
2310 restart_offloadq(void *data, int npending)
2313 struct sge_qset *qs = data;
2314 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2315 adapter_t *adap = qs->port->adapter;
2319 again: cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
2321 while ((m = mbufq_first(&q->sendq)) != NULL) {
2322 unsigned int gen, pidx;
2323 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2324 unsigned int ndesc = G_HDR_NDESC(oh->flags);
2326 if (__predict_false(q->size - q->in_use < ndesc)) {
2327 setbit(&qs->txq_stopped, TXQ_OFLD);
2328 if (should_restart_tx(q) &&
2329 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2339 if (q->pidx >= q->size) {
2344 (void)mbufq_dequeue(&q->sendq);
2346 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2350 set_bit(TXQ_RUNNING, &q->flags);
2351 set_bit(TXQ_LAST_PKT_DB, &q->flags);
2355 t3_write_reg(adap, A_SG_KDOORBELL,
2356 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2360 * t3_offload_tx - send an offload packet
2363 * Sends an offload packet. We use the packet priority to select the
2364 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2365 * should be sent as regular or control, bits 1-3 select the queue set.
2368 t3_offload_tx(struct adapter *sc, struct mbuf *m)
2370 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2371 struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2373 if (oh->flags & F_HDR_CTRL) {
2374 m_adj(m, sizeof (*oh)); /* trim ofld_hdr off */
2375 return (ctrl_xmit(sc, qs, m));
2377 return (ofld_xmit(sc, qs, m));
2382 restart_tx(struct sge_qset *qs)
2384 struct adapter *sc = qs->port->adapter;
2386 if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2387 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2388 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2389 qs->txq[TXQ_OFLD].restarts++;
2390 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2393 if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2394 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2395 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2396 qs->txq[TXQ_CTRL].restarts++;
2397 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2402 * t3_sge_alloc_qset - initialize an SGE queue set
2403 * @sc: the controller softc
2404 * @id: the queue set id
2405 * @nports: how many Ethernet ports will be using this queue set
2406 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2407 * @p: configuration parameters for this queue set
2408 * @ntxq: number of Tx queues for the queue set
2409 * @pi: port info for queue set
2411 * Allocate resources and initialize an SGE queue set. A queue set
2412 * comprises a response queue, two Rx free-buffer queues, and up to 3
2413 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2414 * queue, offload queue, and control queue.
2417 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2418 const struct qset_params *p, int ntxq, struct port_info *pi)
2420 struct sge_qset *q = &sc->sge.qs[id];
2423 MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
2427 if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2428 M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
2429 device_printf(sc->dev, "failed to allocate mbuf ring\n");
2432 if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2433 M_NOWAIT | M_ZERO)) == NULL) {
2434 device_printf(sc->dev, "failed to allocate ifq\n");
2437 ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2438 callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2439 callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2440 q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2441 q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2443 init_qset_cntxt(q, id);
2445 if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2446 sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2447 &q->fl[0].desc, &q->fl[0].sdesc,
2448 &q->fl[0].desc_tag, &q->fl[0].desc_map,
2449 sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2450 printf("error %d from alloc ring fl0\n", ret);
2454 if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2455 sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2456 &q->fl[1].desc, &q->fl[1].sdesc,
2457 &q->fl[1].desc_tag, &q->fl[1].desc_map,
2458 sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2459 printf("error %d from alloc ring fl1\n", ret);
2463 if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2464 &q->rspq.phys_addr, &q->rspq.desc, NULL,
2465 &q->rspq.desc_tag, &q->rspq.desc_map,
2466 NULL, NULL)) != 0) {
2467 printf("error %d from alloc ring rspq\n", ret);
2471 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2472 device_get_unit(sc->dev), irq_vec_idx);
2473 MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2475 for (i = 0; i < ntxq; ++i) {
2476 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2478 if ((ret = alloc_ring(sc, p->txq_size[i],
2479 sizeof(struct tx_desc), sz,
2480 &q->txq[i].phys_addr, &q->txq[i].desc,
2481 &q->txq[i].sdesc, &q->txq[i].desc_tag,
2482 &q->txq[i].desc_map,
2483 sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2484 printf("error %d from alloc ring tx %i\n", ret, i);
2487 mbufq_init(&q->txq[i].sendq, INT_MAX);
2489 q->txq[i].size = p->txq_size[i];
2493 TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2495 TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2496 TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2497 TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2499 q->fl[0].gen = q->fl[1].gen = 1;
2500 q->fl[0].size = p->fl_size;
2501 q->fl[1].size = p->jumbo_size;
2505 q->rspq.size = p->rspq_size;
2507 q->txq[TXQ_ETH].stop_thres = nports *
2508 flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2510 q->fl[0].buf_size = MCLBYTES;
2511 q->fl[0].zone = zone_pack;
2512 q->fl[0].type = EXT_PACKET;
2514 if (p->jumbo_buf_size == MJUM16BYTES) {
2515 q->fl[1].zone = zone_jumbo16;
2516 q->fl[1].type = EXT_JUMBO16;
2517 } else if (p->jumbo_buf_size == MJUM9BYTES) {
2518 q->fl[1].zone = zone_jumbo9;
2519 q->fl[1].type = EXT_JUMBO9;
2520 } else if (p->jumbo_buf_size == MJUMPAGESIZE) {
2521 q->fl[1].zone = zone_jumbop;
2522 q->fl[1].type = EXT_JUMBOP;
2524 KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
2528 q->fl[1].buf_size = p->jumbo_buf_size;
2530 /* Allocate and setup the lro_ctrl structure */
2531 q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2532 #if defined(INET6) || defined(INET)
2533 ret = tcp_lro_init(&q->lro.ctrl);
2535 printf("error %d from tcp_lro_init\n", ret);
2539 q->lro.ctrl.ifp = pi->ifp;
2541 mtx_lock_spin(&sc->sge.reg_lock);
2542 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2543 q->rspq.phys_addr, q->rspq.size,
2544 q->fl[0].buf_size, 1, 0);
2546 printf("error %d from t3_sge_init_rspcntxt\n", ret);
2550 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2551 ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2552 q->fl[i].phys_addr, q->fl[i].size,
2553 q->fl[i].buf_size, p->cong_thres, 1,
2556 printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2561 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2562 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2563 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2566 printf("error %d from t3_sge_init_ecntxt\n", ret);
2571 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2572 USE_GTS, SGE_CNTXT_OFLD, id,
2573 q->txq[TXQ_OFLD].phys_addr,
2574 q->txq[TXQ_OFLD].size, 0, 1, 0);
2576 printf("error %d from t3_sge_init_ecntxt\n", ret);
2582 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2584 q->txq[TXQ_CTRL].phys_addr,
2585 q->txq[TXQ_CTRL].size,
2586 q->txq[TXQ_CTRL].token, 1, 0);
2588 printf("error %d from t3_sge_init_ecntxt\n", ret);
2593 mtx_unlock_spin(&sc->sge.reg_lock);
2594 t3_update_qset_coalesce(q, p);
2596 refill_fl(sc, &q->fl[0], q->fl[0].size);
2597 refill_fl(sc, &q->fl[1], q->fl[1].size);
2598 refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2600 t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2601 V_NEWTIMER(q->rspq.holdoff_tmr));
2606 mtx_unlock_spin(&sc->sge.reg_lock);
2609 t3_free_qset(sc, q);
2615 * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2616 * ethernet data. Hardware assistance with various checksums and any vlan tag
2617 * will also be taken into account here.
2620 t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
2622 struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2623 struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2624 struct ifnet *ifp = pi->ifp;
2626 if (cpl->vlan_valid) {
2627 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2628 m->m_flags |= M_VLANTAG;
2631 m->m_pkthdr.rcvif = ifp;
2633 * adjust after conversion to mbuf chain
2635 m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2636 m->m_len -= (sizeof(*cpl) + ethpad);
2637 m->m_data += (sizeof(*cpl) + ethpad);
2639 if (!cpl->fragment && cpl->csum_valid && cpl->csum == 0xffff) {
2640 struct ether_header *eh = mtod(m, void *);
2643 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2644 struct ether_vlan_header *evh = mtod(m, void *);
2646 eh_type = evh->evl_proto;
2648 eh_type = eh->ether_type;
2650 if (ifp->if_capenable & IFCAP_RXCSUM &&
2651 eh_type == htons(ETHERTYPE_IP)) {
2652 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
2653 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2654 m->m_pkthdr.csum_data = 0xffff;
2655 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2656 eh_type == htons(ETHERTYPE_IPV6)) {
2657 m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
2659 m->m_pkthdr.csum_data = 0xffff;
2665 * get_packet - return the next ingress packet buffer from a free list
2666 * @adap: the adapter that received the packet
2667 * @drop_thres: # of remaining buffers before we start dropping packets
2668 * @qs: the qset that the SGE free list holding the packet belongs to
2669 * @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2670 * @r: response descriptor
2672 * Get the next packet from a free list and complete setup of the
2673 * sk_buff. If the packet is small we make a copy and recycle the
2674 * original buffer, otherwise we use the original buffer itself. If a
2675 * positive drop threshold is supplied packets are dropped and their
2676 * buffers recycled if (a) the number of remaining buffers is under the
2677 * threshold and the packet is too big to copy, or (b) the packet should
2678 * be copied but there is no memory for the copy.
2681 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2682 struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2685 unsigned int len_cq = ntohl(r->len_cq);
2686 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2687 int mask, cidx = fl->cidx;
2688 struct rx_sw_desc *sd = &fl->sdesc[cidx];
2689 uint32_t len = G_RSPD_LEN(len_cq);
2690 uint32_t flags = M_EXT;
2691 uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
2696 mask = fl->size - 1;
2697 prefetch(fl->sdesc[(cidx + 1) & mask].m);
2698 prefetch(fl->sdesc[(cidx + 2) & mask].m);
2699 prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
2700 prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl);
2703 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2705 if (recycle_enable && len <= SGE_RX_COPY_THRES &&
2706 sopeop == RSPQ_SOP_EOP) {
2707 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
2709 cl = mtod(m, void *);
2710 memcpy(cl, sd->rxsd_cl, len);
2711 recycle_rx_buf(adap, fl, fl->cidx);
2712 m->m_pkthdr.len = m->m_len = len;
2714 mh->mh_head = mh->mh_tail = m;
2719 bus_dmamap_unload(fl->entry_tag, sd->map);
2723 if ((sopeop == RSPQ_SOP_EOP) ||
2724 (sopeop == RSPQ_SOP))
2726 m_init(m, M_NOWAIT, MT_DATA, flags);
2727 if (fl->zone == zone_pack) {
2729 * restore clobbered data pointer
2731 m->m_data = m->m_ext.ext_buf;
2733 m_cljset(m, cl, fl->type);
2742 mh->mh_head = mh->mh_tail = m;
2743 m->m_pkthdr.len = len;
2748 case RSPQ_NSOP_NEOP:
2749 if (mh->mh_tail == NULL) {
2750 log(LOG_ERR, "discarding intermediate descriptor entry\n");
2754 mh->mh_tail->m_next = m;
2756 mh->mh_head->m_pkthdr.len += len;
2760 printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2762 if (++fl->cidx == fl->size)
2769 * handle_rsp_cntrl_info - handles control information in a response
2770 * @qs: the queue set corresponding to the response
2771 * @flags: the response control flags
2773 * Handles the control information of an SGE response, such as GTS
2774 * indications and completion credits for the queue set's Tx queues.
2775 * HW coalesces credits, we don't do any extra SW coalescing.
2777 static __inline void
2778 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2780 unsigned int credits;
2783 if (flags & F_RSPD_TXQ0_GTS)
2784 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2786 credits = G_RSPD_TXQ0_CR(flags);
2788 qs->txq[TXQ_ETH].processed += credits;
2790 credits = G_RSPD_TXQ2_CR(flags);
2792 qs->txq[TXQ_CTRL].processed += credits;
2795 if (flags & F_RSPD_TXQ1_GTS)
2796 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2798 credits = G_RSPD_TXQ1_CR(flags);
2800 qs->txq[TXQ_OFLD].processed += credits;
2805 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2806 unsigned int sleeping)
2812 * process_responses - process responses from an SGE response queue
2813 * @adap: the adapter
2814 * @qs: the queue set to which the response queue belongs
2815 * @budget: how many responses can be processed in this round
2817 * Process responses from an SGE response queue up to the supplied budget.
2818 * Responses include received packets as well as credits and other events
2819 * for the queues that belong to the response queue's queue set.
2820 * A negative budget is effectively unlimited.
2822 * Additionally choose the interrupt holdoff time for the next interrupt
2823 * on this queue. If the system is under memory shortage use a fairly
2824 * long delay to help recovery.
2827 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2829 struct sge_rspq *rspq = &qs->rspq;
2830 struct rsp_desc *r = &rspq->desc[rspq->cidx];
2831 int budget_left = budget;
2832 unsigned int sleeping = 0;
2833 #if defined(INET6) || defined(INET)
2834 int lro_enabled = qs->lro.enabled;
2836 struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2838 struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
2840 static int last_holdoff = 0;
2841 if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2842 printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2843 last_holdoff = rspq->holdoff_tmr;
2846 rspq->next_holdoff = rspq->holdoff_tmr;
2848 while (__predict_true(budget_left && is_new_response(r, rspq))) {
2849 int eth, eop = 0, ethpad = 0;
2850 uint32_t flags = ntohl(r->flags);
2851 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2852 uint8_t opcode = r->rss_hdr.opcode;
2854 eth = (opcode == CPL_RX_PKT);
2856 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2860 printf("async notification\n");
2862 if (mh->mh_head == NULL) {
2863 mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA);
2866 m = m_gethdr(M_NOWAIT, MT_DATA);
2871 memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2872 m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2873 *mtod(m, char *) = CPL_ASYNC_NOTIF;
2874 opcode = CPL_ASYNC_NOTIF;
2876 rspq->async_notif++;
2878 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2879 struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
2883 rspq->next_holdoff = NOMEM_INTR_DELAY;
2887 if (mh->mh_head == NULL)
2890 mh->mh_tail->m_next = m;
2893 get_imm_packet(adap, r, m);
2894 mh->mh_head->m_pkthdr.len += m->m_len;
2897 } else if (r->len_cq) {
2898 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2900 eop = get_packet(adap, drop_thresh, qs, mh, r);
2902 if (r->rss_hdr.hash_type && !adap->timestamp) {
2903 M_HASHTYPE_SET(mh->mh_head, M_HASHTYPE_OPAQUE);
2904 mh->mh_head->m_pkthdr.flowid = rss_hash;
2913 if (flags & RSPD_CTRL_MASK) {
2914 sleeping |= flags & RSPD_GTS_MASK;
2915 handle_rsp_cntrl_info(qs, flags);
2919 rspq->offload_pkts++;
2921 adap->cpl_handler[opcode](qs, r, mh->mh_head);
2923 m_freem(mh->mh_head);
2926 } else if (eth && eop) {
2927 struct mbuf *m = mh->mh_head;
2929 t3_rx_eth(adap, m, ethpad);
2932 * The T304 sends incoming packets on any qset. If LRO
2933 * is also enabled, we could end up sending packet up
2934 * lro_ctrl->ifp's input. That is incorrect.
2936 * The mbuf's rcvif was derived from the cpl header and
2937 * is accurate. Skip LRO and just use that.
2939 #if defined(INET6) || defined(INET)
2940 skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2942 if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2943 && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2945 /* successfully queue'd for LRO */
2950 * LRO not enabled, packet unsuitable for LRO,
2951 * or unable to queue. Pass it up right now in
2954 struct ifnet *ifp = m->m_pkthdr.rcvif;
2955 (*ifp->if_input)(ifp, m);
2962 if (__predict_false(++rspq->cidx == rspq->size)) {
2968 if (++rspq->credits >= 64) {
2969 refill_rspq(adap, rspq, rspq->credits);
2972 __refill_fl_lt(adap, &qs->fl[0], 32);
2973 __refill_fl_lt(adap, &qs->fl[1], 32);
2977 #if defined(INET6) || defined(INET)
2979 tcp_lro_flush_all(lro_ctrl);
2983 check_ring_db(adap, qs, sleeping);
2985 mb(); /* commit Tx queue processed updates */
2986 if (__predict_false(qs->txq_stopped > 1))
2989 __refill_fl_lt(adap, &qs->fl[0], 512);
2990 __refill_fl_lt(adap, &qs->fl[1], 512);
2991 budget -= budget_left;
2996 * A helper function that processes responses and issues GTS.
2999 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
3002 static int last_holdoff = 0;
3004 work = process_responses(adap, rspq_to_qset(rq), -1);
3006 if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
3007 printf("next_holdoff=%d\n", rq->next_holdoff);
3008 last_holdoff = rq->next_holdoff;
3010 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3011 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3018 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3019 * Handles data events from SGE response queues as well as error and other
3020 * async events as they all use the same interrupt pin. We use one SGE
3021 * response queue per port in this mode and protect all response queues with
3025 t3b_intr(void *data)
3028 adapter_t *adap = data;
3029 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3031 t3_write_reg(adap, A_PL_CLI, 0);
3032 map = t3_read_reg(adap, A_SG_DATA_INTR);
3037 if (__predict_false(map & F_ERRINTR)) {
3038 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3039 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3040 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3043 mtx_lock(&q0->lock);
3044 for_each_port(adap, i)
3046 process_responses_gts(adap, &adap->sge.qs[i].rspq);
3047 mtx_unlock(&q0->lock);
3051 * The MSI interrupt handler. This needs to handle data events from SGE
3052 * response queues as well as error and other async events as they all use
3053 * the same MSI vector. We use one SGE response queue per port in this mode
3054 * and protect all response queues with queue 0's lock.
3057 t3_intr_msi(void *data)
3059 adapter_t *adap = data;
3060 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3061 int i, new_packets = 0;
3063 mtx_lock(&q0->lock);
3065 for_each_port(adap, i)
3066 if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3068 mtx_unlock(&q0->lock);
3069 if (new_packets == 0) {
3070 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3071 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3072 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3077 t3_intr_msix(void *data)
3079 struct sge_qset *qs = data;
3080 adapter_t *adap = qs->port->adapter;
3081 struct sge_rspq *rspq = &qs->rspq;
3083 if (process_responses_gts(adap, rspq) == 0)
3084 rspq->unhandled_irqs++;
3087 #define QDUMP_SBUF_SIZE 32 * 400
3089 t3_dump_rspq(SYSCTL_HANDLER_ARGS)
3091 struct sge_rspq *rspq;
3092 struct sge_qset *qs;
3093 int i, err, dump_end, idx;
3095 struct rsp_desc *rspd;
3099 qs = rspq_to_qset(rspq);
3100 if (rspq->rspq_dump_count == 0)
3102 if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3104 "dump count is too large %d\n", rspq->rspq_dump_count);
3105 rspq->rspq_dump_count = 0;
3108 if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3110 "dump start of %d is greater than queue size\n",
3111 rspq->rspq_dump_start);
3112 rspq->rspq_dump_start = 0;
3115 err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3118 err = sysctl_wire_old_buffer(req, 0);
3121 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3123 sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3124 (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3125 ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3126 sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3127 ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3129 sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3130 (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3132 dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3133 for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3134 idx = i & (RSPQ_Q_SIZE-1);
3136 rspd = &rspq->desc[idx];
3137 sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3138 idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3139 rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3140 sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3141 rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3142 be32toh(rspd->len_cq), rspd->intr_gen);
3145 err = sbuf_finish(sb);
3151 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3153 struct sge_txq *txq;
3154 struct sge_qset *qs;
3155 int i, j, err, dump_end;
3157 struct tx_desc *txd;
3158 uint32_t *WR, wr_hi, wr_lo, gen;
3162 qs = txq_to_qset(txq, TXQ_ETH);
3163 if (txq->txq_dump_count == 0) {
3166 if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3168 "dump count is too large %d\n", txq->txq_dump_count);
3169 txq->txq_dump_count = 1;
3172 if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3174 "dump start of %d is greater than queue size\n",
3175 txq->txq_dump_start);
3176 txq->txq_dump_start = 0;
3179 err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3182 err = sysctl_wire_old_buffer(req, 0);
3185 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3187 sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3188 (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3189 (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3190 sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3191 ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3192 ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3193 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3194 txq->txq_dump_start,
3195 (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3197 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3198 for (i = txq->txq_dump_start; i < dump_end; i++) {
3199 txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3200 WR = (uint32_t *)txd->flit;
3201 wr_hi = ntohl(WR[0]);
3202 wr_lo = ntohl(WR[1]);
3203 gen = G_WR_GEN(wr_lo);
3205 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3207 for (j = 2; j < 30; j += 4)
3208 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3209 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3212 err = sbuf_finish(sb);
3218 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3220 struct sge_txq *txq;
3221 struct sge_qset *qs;
3222 int i, j, err, dump_end;
3224 struct tx_desc *txd;
3225 uint32_t *WR, wr_hi, wr_lo, gen;
3228 qs = txq_to_qset(txq, TXQ_CTRL);
3229 if (txq->txq_dump_count == 0) {
3232 if (txq->txq_dump_count > 256) {
3234 "dump count is too large %d\n", txq->txq_dump_count);
3235 txq->txq_dump_count = 1;
3238 if (txq->txq_dump_start > 255) {
3240 "dump start of %d is greater than queue size\n",
3241 txq->txq_dump_start);
3242 txq->txq_dump_start = 0;
3246 err = sysctl_wire_old_buffer(req, 0);
3249 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3250 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3251 txq->txq_dump_start,
3252 (txq->txq_dump_start + txq->txq_dump_count) & 255);
3254 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3255 for (i = txq->txq_dump_start; i < dump_end; i++) {
3256 txd = &txq->desc[i & (255)];
3257 WR = (uint32_t *)txd->flit;
3258 wr_hi = ntohl(WR[0]);
3259 wr_lo = ntohl(WR[1]);
3260 gen = G_WR_GEN(wr_lo);
3262 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3264 for (j = 2; j < 30; j += 4)
3265 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3266 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3269 err = sbuf_finish(sb);
3275 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3277 adapter_t *sc = arg1;
3278 struct qset_params *qsp = &sc->params.sge.qset[0];
3280 struct sge_qset *qs;
3281 int i, j, err, nqsets = 0;
3284 if ((sc->flags & FULL_INIT_DONE) == 0)
3287 coalesce_usecs = qsp->coalesce_usecs;
3288 err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3293 if (coalesce_usecs == qsp->coalesce_usecs)
3296 for (i = 0; i < sc->params.nports; i++)
3297 for (j = 0; j < sc->port[i].nqsets; j++)
3300 coalesce_usecs = max(1, coalesce_usecs);
3302 for (i = 0; i < nqsets; i++) {
3303 qs = &sc->sge.qs[i];
3304 qsp = &sc->params.sge.qset[i];
3305 qsp->coalesce_usecs = coalesce_usecs;
3307 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3308 &sc->sge.qs[0].rspq.lock;
3311 t3_update_qset_coalesce(qs, qsp);
3312 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3313 V_NEWTIMER(qs->rspq.holdoff_tmr));
3321 t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
3323 adapter_t *sc = arg1;
3326 if ((sc->flags & FULL_INIT_DONE) == 0)
3329 timestamp = sc->timestamp;
3330 rc = sysctl_handle_int(oidp, ×tamp, arg2, req);
3335 if (timestamp != sc->timestamp) {
3336 t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS,
3337 timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
3338 sc->timestamp = timestamp;
3345 t3_add_attach_sysctls(adapter_t *sc)
3347 struct sysctl_ctx_list *ctx;
3348 struct sysctl_oid_list *children;
3350 ctx = device_get_sysctl_ctx(sc->dev);
3351 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3353 /* random information */
3354 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3356 CTLFLAG_RD, sc->fw_version,
3357 0, "firmware version");
3358 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3360 CTLFLAG_RD, &sc->params.rev,
3362 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3364 CTLFLAG_RD, sc->port_types,
3365 0, "type of ports");
3366 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3368 CTLFLAG_RW, &cxgb_debug,
3369 0, "enable verbose debugging output");
3370 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
3371 CTLFLAG_RD, &sc->tunq_coalesce,
3372 "#tunneled packets freed");
3373 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3375 CTLFLAG_RD, &txq_fills,
3376 0, "#times txq overrun");
3377 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3379 CTLFLAG_RD, &sc->params.vpd.cclk,
3380 0, "core clock frequency (in KHz)");
3384 static const char *rspq_name = "rspq";
3385 static const char *txq_names[] =
3393 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3395 struct port_info *p = arg1;
3401 cxgb_refresh_stats(p);
3402 parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3404 return (sysctl_handle_64(oidp, parg, 0, req));
3408 t3_add_configured_sysctls(adapter_t *sc)
3410 struct sysctl_ctx_list *ctx;
3411 struct sysctl_oid_list *children;
3414 ctx = device_get_sysctl_ctx(sc->dev);
3415 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3417 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3419 CTLTYPE_INT|CTLFLAG_RW, sc,
3420 0, t3_set_coalesce_usecs,
3421 "I", "interrupt coalescing timer (us)");
3423 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3425 CTLTYPE_INT | CTLFLAG_RW, sc,
3426 0, t3_pkt_timestamp,
3427 "I", "provide packet timestamp instead of connection hash");
3429 for (i = 0; i < sc->params.nports; i++) {
3430 struct port_info *pi = &sc->port[i];
3431 struct sysctl_oid *poid;
3432 struct sysctl_oid_list *poidlist;
3433 struct mac_stats *mstats = &pi->mac.stats;
3435 snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3436 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3437 pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
3438 poidlist = SYSCTL_CHILDREN(poid);
3439 SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3440 "nqsets", CTLFLAG_RD, &pi->nqsets,
3443 for (j = 0; j < pi->nqsets; j++) {
3444 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3445 struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3446 *ctrlqpoid, *lropoid;
3447 struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3448 *txqpoidlist, *ctrlqpoidlist,
3450 struct sge_txq *txq = &qs->txq[TXQ_ETH];
3452 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3454 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3455 qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
3456 qspoidlist = SYSCTL_CHILDREN(qspoid);
3458 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3459 CTLFLAG_RD, &qs->fl[0].empty, 0,
3460 "freelist #0 empty");
3461 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3462 CTLFLAG_RD, &qs->fl[1].empty, 0,
3463 "freelist #1 empty");
3465 rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3466 rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
3467 rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3469 txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3470 txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3471 txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3473 ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3474 txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
3475 ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3477 lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3478 "lro_stats", CTLFLAG_RD, NULL, "LRO statistics");
3479 lropoidlist = SYSCTL_CHILDREN(lropoid);
3481 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3482 CTLFLAG_RD, &qs->rspq.size,
3483 0, "#entries in response queue");
3484 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3485 CTLFLAG_RD, &qs->rspq.cidx,
3486 0, "consumer index");
3487 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3488 CTLFLAG_RD, &qs->rspq.credits,
3490 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
3491 CTLFLAG_RD, &qs->rspq.starved,
3492 0, "#times starved");
3493 SYSCTL_ADD_UAUTO(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3494 CTLFLAG_RD, &qs->rspq.phys_addr,
3495 "physical_address_of the queue");
3496 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3497 CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3498 0, "start rspq dump entry");
3499 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3500 CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3501 0, "#rspq entries to dump");
3502 SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3503 CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
3504 0, t3_dump_rspq, "A", "dump of the response queue");
3506 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
3507 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3508 "#tunneled packets dropped");
3509 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3510 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len,
3511 0, "#tunneled packets waiting to be sent");
3513 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3514 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3515 0, "#tunneled packets queue producer index");
3516 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3517 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3518 0, "#tunneled packets queue consumer index");
3520 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
3521 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3522 0, "#tunneled packets processed by the card");
3523 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3524 CTLFLAG_RD, &txq->cleaned,
3525 0, "#tunneled packets cleaned");
3526 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3527 CTLFLAG_RD, &txq->in_use,
3528 0, "#tunneled packet slots in use");
3529 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "frees",
3530 CTLFLAG_RD, &txq->txq_frees,
3531 "#tunneled packets freed");
3532 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3533 CTLFLAG_RD, &txq->txq_skipped,
3534 0, "#tunneled packet descriptors skipped");
3535 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
3536 CTLFLAG_RD, &txq->txq_coalesced,
3537 "#tunneled packets coalesced");
3538 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3539 CTLFLAG_RD, &txq->txq_enqueued,
3540 0, "#tunneled packets enqueued to hardware");
3541 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3542 CTLFLAG_RD, &qs->txq_stopped,
3543 0, "tx queues stopped");
3544 SYSCTL_ADD_UAUTO(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3545 CTLFLAG_RD, &txq->phys_addr,
3546 "physical_address_of the queue");
3547 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3548 CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3549 0, "txq generation");
3550 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3551 CTLFLAG_RD, &txq->cidx,
3552 0, "hardware queue cidx");
3553 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3554 CTLFLAG_RD, &txq->pidx,
3555 0, "hardware queue pidx");
3556 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3557 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3558 0, "txq start idx for dump");
3559 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3560 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3561 0, "txq #entries to dump");
3562 SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3563 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3564 0, t3_dump_txq_eth, "A", "dump of the transmit queue");
3566 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3567 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3568 0, "ctrlq start idx for dump");
3569 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3570 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3571 0, "ctrl #entries to dump");
3572 SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3573 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
3574 0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
3576 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_queued",
3577 CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3578 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3579 CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3580 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3581 CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3582 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3583 CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3586 /* Now add a node for mac stats. */
3587 poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3588 CTLFLAG_RD, NULL, "MAC statistics");
3589 poidlist = SYSCTL_CHILDREN(poid);
3592 * We (ab)use the length argument (arg2) to pass on the offset
3593 * of the data that we are interested in. This is only required
3594 * for the quad counters that are updated from the hardware (we
3595 * make sure that we return the latest value).
3596 * sysctl_handle_macstat first updates *all* the counters from
3597 * the hardware, and then returns the latest value of the
3598 * requested counter. Best would be to update only the
3599 * requested counter from hardware, but t3_mac_update_stats()
3600 * hides all the register details and we don't want to dive into
3603 #define CXGB_SYSCTL_ADD_QUAD(a) SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3604 (CTLTYPE_U64 | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \
3605 sysctl_handle_macstat, "QU", 0)
3606 CXGB_SYSCTL_ADD_QUAD(tx_octets);
3607 CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3608 CXGB_SYSCTL_ADD_QUAD(tx_frames);
3609 CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3610 CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3611 CXGB_SYSCTL_ADD_QUAD(tx_pause);
3612 CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3613 CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3614 CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3615 CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3616 CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3617 CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3618 CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3619 CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3620 CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3621 CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3622 CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3623 CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3624 CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3625 CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3626 CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3627 CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3628 CXGB_SYSCTL_ADD_QUAD(rx_octets);
3629 CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3630 CXGB_SYSCTL_ADD_QUAD(rx_frames);
3631 CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3632 CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3633 CXGB_SYSCTL_ADD_QUAD(rx_pause);
3634 CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3635 CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3636 CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3637 CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3638 CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3639 CXGB_SYSCTL_ADD_QUAD(rx_runt);
3640 CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3641 CXGB_SYSCTL_ADD_QUAD(rx_short);
3642 CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3643 CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3644 CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3645 CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3646 CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3647 CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3648 CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3649 CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3650 CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3651 CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3652 #undef CXGB_SYSCTL_ADD_QUAD
3654 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3655 CTLFLAG_RD, &mstats->a, 0)
3656 CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3657 CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3658 CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3659 CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3660 CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3661 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3662 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3663 CXGB_SYSCTL_ADD_ULONG(num_toggled);
3664 CXGB_SYSCTL_ADD_ULONG(num_resets);
3665 CXGB_SYSCTL_ADD_ULONG(link_faults);
3666 #undef CXGB_SYSCTL_ADD_ULONG
3671 * t3_get_desc - dump an SGE descriptor for debugging purposes
3672 * @qs: the queue set
3673 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3674 * @idx: the descriptor index in the queue
3675 * @data: where to dump the descriptor contents
3677 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3678 * size of the descriptor.
3681 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3682 unsigned char *data)
3688 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3690 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3691 return sizeof(struct tx_desc);
3695 if (!qs->rspq.desc || idx >= qs->rspq.size)
3697 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3698 return sizeof(struct rsp_desc);
3702 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3704 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3705 return sizeof(struct rx_desc);