1 /**************************************************************************
3 Copyright (c) 2007-2009, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <sys/bus_dma.h>
46 #include <sys/queue.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
52 #include <sys/sched.h>
54 #include <sys/systm.h>
55 #include <sys/syslog.h>
56 #include <sys/socket.h>
57 #include <sys/sglist.h>
60 #include <net/if_var.h>
62 #include <net/ethernet.h>
63 #include <net/if_vlan_var.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip6.h>
69 #include <netinet/tcp.h>
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
77 #include <cxgb_include.h>
81 int multiq_tx_enable = 1;
84 CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
87 extern struct sysctl_oid_list sysctl__hw_cxgb_children;
88 int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
89 SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
90 "size of per-queue mbuf ring");
92 static int cxgb_tx_coalesce_force = 0;
93 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN,
94 &cxgb_tx_coalesce_force, 0,
95 "coalesce small packets into a single work request regardless of ring state");
97 #define COALESCE_START_DEFAULT TX_ETH_Q_SIZE>>1
98 #define COALESCE_START_MAX (TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
99 #define COALESCE_STOP_DEFAULT TX_ETH_Q_SIZE>>2
100 #define COALESCE_STOP_MIN TX_ETH_Q_SIZE>>5
101 #define TX_RECLAIM_DEFAULT TX_ETH_Q_SIZE>>5
102 #define TX_RECLAIM_MAX TX_ETH_Q_SIZE>>2
103 #define TX_RECLAIM_MIN TX_ETH_Q_SIZE>>6
106 static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
107 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RWTUN,
108 &cxgb_tx_coalesce_enable_start, 0,
109 "coalesce enable threshold");
110 static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
111 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RWTUN,
112 &cxgb_tx_coalesce_enable_stop, 0,
113 "coalesce disable threshold");
114 static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
115 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RWTUN,
116 &cxgb_tx_reclaim_threshold, 0,
117 "tx cleaning minimum threshold");
120 * XXX don't re-enable this until TOE stops assuming
123 static int recycle_enable = 0;
125 extern int cxgb_use_16k_clusters;
126 extern int nmbjumbop;
127 extern int nmbjumbo9;
128 extern int nmbjumbo16;
132 #define SGE_RX_SM_BUF_SIZE 1536
133 #define SGE_RX_DROP_THRES 16
134 #define SGE_RX_COPY_THRES 128
137 * Period of the Tx buffer reclaim timer. This timer does not need to run
138 * frequently as Tx buffers are usually reclaimed by new Tx packets.
140 #define TX_RECLAIM_PERIOD (hz >> 1)
143 * Values for sge_txq.flags
146 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
147 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
151 uint64_t flit[TX_DESC_FLITS];
161 struct rsp_desc { /* response queue descriptor */
162 struct rss_header rss_hdr;
165 uint8_t imm_data[47];
169 #define RX_SW_DESC_MAP_CREATED (1 << 0)
170 #define TX_SW_DESC_MAP_CREATED (1 << 1)
171 #define RX_SW_DESC_INUSE (1 << 3)
172 #define TX_SW_DESC_MAPPED (1 << 4)
174 #define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0)
175 #define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP)
176 #define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP)
177 #define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
179 struct tx_sw_desc { /* SW state per Tx descriptor */
185 struct rx_sw_desc { /* SW state per Rx descriptor */
198 struct refill_fl_cb_arg {
200 bus_dma_segment_t seg;
206 * Maps a number of flits to the number of Tx descriptors that can hold them.
209 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
211 * HW allows up to 4 descriptors to be combined into a WR.
213 static uint8_t flit_desc_map[] = {
215 #if SGE_NUM_GENBITS == 1
216 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
217 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
218 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
219 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
220 #elif SGE_NUM_GENBITS == 2
221 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
222 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
223 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
224 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
226 # error "SGE_NUM_GENBITS must be 1 or 2"
230 #define TXQ_LOCK_ASSERT(qs) mtx_assert(&(qs)->lock, MA_OWNED)
231 #define TXQ_TRYLOCK(qs) mtx_trylock(&(qs)->lock)
232 #define TXQ_LOCK(qs) mtx_lock(&(qs)->lock)
233 #define TXQ_UNLOCK(qs) mtx_unlock(&(qs)->lock)
234 #define TXQ_RING_EMPTY(qs) drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
235 #define TXQ_RING_NEEDS_ENQUEUE(qs) \
236 drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
237 #define TXQ_RING_FLUSH(qs) drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
238 #define TXQ_RING_DEQUEUE_COND(qs, func, arg) \
239 drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
240 #define TXQ_RING_DEQUEUE(qs) \
241 drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
245 static void sge_timer_cb(void *arg);
246 static void sge_timer_reclaim(void *arg, int ncount);
247 static void sge_txq_reclaim_handler(void *arg, int ncount);
248 static void cxgb_start_locked(struct sge_qset *qs);
251 * XXX need to cope with bursty scheduling by looking at a wider
252 * window than we are now for determining the need for coalescing
255 static __inline uint64_t
256 check_pkt_coalesce(struct sge_qset *qs)
262 if (__predict_false(cxgb_tx_coalesce_force))
264 txq = &qs->txq[TXQ_ETH];
265 sc = qs->port->adapter;
266 fill = &sc->tunq_fill[qs->idx];
268 if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
269 cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
270 if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
271 cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
273 * if the hardware transmit queue is more than 1/8 full
274 * we mark it as coalescing - we drop back from coalescing
275 * when we go below 1/32 full and there are no packets enqueued,
276 * this provides us with some degree of hysteresis
278 if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
279 TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
281 else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
284 return (sc->tunq_coalesce);
289 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
292 #if _BYTE_ORDER == _LITTLE_ENDIAN
294 wr_hilo |= (((uint64_t)wr_lo)<<32);
297 wr_hilo |= (((uint64_t)wr_hi)<<32);
299 wrp->wrh_hilo = wr_hilo;
303 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
312 struct coalesce_info {
318 coalesce_check(struct mbuf *m, void *arg)
320 struct coalesce_info *ci = arg;
321 int *count = &ci->count;
322 int *nbytes = &ci->nbytes;
324 if ((*nbytes == 0) || ((*nbytes + m->m_len <= 10500) &&
325 (*count < 7) && (m->m_next == NULL))) {
334 cxgb_dequeue(struct sge_qset *qs)
336 struct mbuf *m, *m_head, *m_tail;
337 struct coalesce_info ci;
340 if (check_pkt_coalesce(qs) == 0)
341 return TXQ_RING_DEQUEUE(qs);
343 m_head = m_tail = NULL;
344 ci.count = ci.nbytes = 0;
346 m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
347 if (m_head == NULL) {
349 } else if (m != NULL) {
350 m_tail->m_nextpkt = m;
355 panic("trying to coalesce %d packets in to one WR", ci.count);
360 * reclaim_completed_tx - reclaims completed Tx descriptors
361 * @adapter: the adapter
362 * @q: the Tx queue to reclaim completed descriptors from
364 * Reclaims Tx descriptors that the SGE has indicated it has processed,
365 * and frees the associated buffers if possible. Called with the Tx
369 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
371 struct sge_txq *q = &qs->txq[queue];
372 int reclaim = desc_reclaimable(q);
374 if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) ||
375 (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN))
376 cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
378 if (reclaim < reclaim_min)
381 mtx_assert(&qs->lock, MA_OWNED);
383 t3_free_tx_desc(qs, reclaim, queue);
384 q->cleaned += reclaim;
385 q->in_use -= reclaim;
387 if (isset(&qs->txq_stopped, TXQ_ETH))
388 clrbit(&qs->txq_stopped, TXQ_ETH);
394 * should_restart_tx - are there enough resources to restart a Tx queue?
397 * Checks if there are enough descriptors to restart a suspended Tx queue.
400 should_restart_tx(const struct sge_txq *q)
402 unsigned int r = q->processed - q->cleaned;
404 return q->in_use - r < (q->size >> 1);
408 * t3_sge_init - initialize SGE
410 * @p: the SGE parameters
412 * Performs SGE initialization needed every time after a chip reset.
413 * We do not initialize any of the queue sets here, instead the driver
414 * top-level must request those individually. We also do not enable DMA
415 * here, that should be done after the queues have been set up.
418 t3_sge_init(adapter_t *adap, struct sge_params *p)
422 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
424 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
425 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
426 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
427 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
428 #if SGE_NUM_GENBITS == 1
429 ctrl |= F_EGRGENCTRL;
431 if (adap->params.rev > 0) {
432 if (!(adap->flags & (USING_MSIX | USING_MSI)))
433 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
435 t3_write_reg(adap, A_SG_CONTROL, ctrl);
436 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
437 V_LORCQDRBTHRSH(512));
438 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
439 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
440 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
441 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
442 adap->params.rev < T3_REV_C ? 1000 : 500);
443 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
444 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
445 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
446 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
447 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
452 * sgl_len - calculates the size of an SGL of the given capacity
453 * @n: the number of SGL entries
455 * Calculates the number of flits needed for a scatter/gather list that
456 * can hold the given number of entries.
458 static __inline unsigned int
459 sgl_len(unsigned int n)
461 return ((3 * n) / 2 + (n & 1));
465 * get_imm_packet - return the next ingress packet buffer from a response
466 * @resp: the response descriptor containing the packet data
468 * Return a packet containing the immediate data of the given response.
471 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
474 if (resp->rss_hdr.opcode == CPL_RX_DATA) {
475 const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
476 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
477 } else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
478 const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
479 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
481 m->m_len = IMMED_PKT_SIZE;
482 m->m_ext.ext_buf = NULL;
483 m->m_ext.ext_type = 0;
484 memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len);
488 static __inline u_int
489 flits_to_desc(u_int n)
491 return (flit_desc_map[n]);
494 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
495 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
496 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
497 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
499 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
500 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
504 * t3_sge_err_intr_handler - SGE async event interrupt handler
505 * @adapter: the adapter
507 * Interrupt handler for SGE asynchronous (non-data) events.
510 t3_sge_err_intr_handler(adapter_t *adapter)
512 unsigned int v, status;
514 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
515 if (status & SGE_PARERR)
516 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
517 status & SGE_PARERR);
518 if (status & SGE_FRAMINGERR)
519 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
520 status & SGE_FRAMINGERR);
521 if (status & F_RSPQCREDITOVERFOW)
522 CH_ALERT(adapter, "SGE response queue credit overflow\n");
524 if (status & F_RSPQDISABLED) {
525 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
528 "packet delivered to disabled response queue (0x%x)\n",
529 (v >> S_RSPQ0DISABLED) & 0xff);
532 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
533 if (status & SGE_FATALERR)
534 t3_fatal_err(adapter);
538 t3_sge_prep(adapter_t *adap, struct sge_params *p)
540 int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
542 nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
543 nqsets *= adap->params.nports;
545 fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
547 while (!powerof2(fl_q_size))
550 use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
553 #if __FreeBSD_version >= 700111
555 jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
556 jumbo_buf_size = MJUM16BYTES;
558 jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
559 jumbo_buf_size = MJUM9BYTES;
562 jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
563 jumbo_buf_size = MJUMPAGESIZE;
565 while (!powerof2(jumbo_q_size))
568 if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
569 device_printf(adap->dev,
570 "Insufficient clusters and/or jumbo buffers.\n");
572 p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
574 for (i = 0; i < SGE_QSETS; ++i) {
575 struct qset_params *q = p->qset + i;
577 if (adap->params.nports > 2) {
578 q->coalesce_usecs = 50;
581 q->coalesce_usecs = 10;
583 q->coalesce_usecs = 5;
587 q->rspq_size = RSPQ_Q_SIZE;
588 q->fl_size = fl_q_size;
589 q->jumbo_size = jumbo_q_size;
590 q->jumbo_buf_size = jumbo_buf_size;
591 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
592 q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
593 q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
599 t3_sge_alloc(adapter_t *sc)
602 /* The parent tag. */
603 if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
604 1, 0, /* algnmnt, boundary */
605 BUS_SPACE_MAXADDR, /* lowaddr */
606 BUS_SPACE_MAXADDR, /* highaddr */
607 NULL, NULL, /* filter, filterarg */
608 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
609 BUS_SPACE_UNRESTRICTED, /* nsegments */
610 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
612 NULL, NULL, /* lock, lockarg */
614 device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
619 * DMA tag for normal sized RX frames
621 if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
622 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
623 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
624 device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
629 * DMA tag for jumbo sized RX frames.
631 if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
632 BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
633 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
634 device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
639 * DMA tag for TX frames.
641 if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
642 BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
643 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
644 NULL, NULL, &sc->tx_dmat)) {
645 device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
653 t3_sge_free(struct adapter * sc)
656 if (sc->tx_dmat != NULL)
657 bus_dma_tag_destroy(sc->tx_dmat);
659 if (sc->rx_jumbo_dmat != NULL)
660 bus_dma_tag_destroy(sc->rx_jumbo_dmat);
662 if (sc->rx_dmat != NULL)
663 bus_dma_tag_destroy(sc->rx_dmat);
665 if (sc->parent_dmat != NULL)
666 bus_dma_tag_destroy(sc->parent_dmat);
672 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
675 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
676 qs->rspq.polling = 0 /* p->polling */;
679 #if !defined(__i386__) && !defined(__amd64__)
681 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
683 struct refill_fl_cb_arg *cb_arg = arg;
685 cb_arg->error = error;
686 cb_arg->seg = segs[0];
692 * refill_fl - refill an SGE free-buffer list
693 * @sc: the controller softc
694 * @q: the free-list to refill
695 * @n: the number of new buffers to allocate
697 * (Re)populate an SGE free-buffer list with up to @n new packet buffers.
698 * The caller must assure that @n does not exceed the queue's capacity.
701 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
703 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
704 struct rx_desc *d = &q->desc[q->pidx];
705 struct refill_fl_cb_arg cb_arg;
713 * We allocate an uninitialized mbuf + cluster, mbuf is
714 * initialized after rx.
716 if (q->zone == zone_pack) {
717 if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
719 cl = m->m_ext.ext_buf;
721 if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
723 if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
724 uma_zfree(q->zone, cl);
728 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
729 if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
730 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
731 uma_zfree(q->zone, cl);
734 sd->flags |= RX_SW_DESC_MAP_CREATED;
736 #if !defined(__i386__) && !defined(__amd64__)
737 err = bus_dmamap_load(q->entry_tag, sd->map,
738 cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
740 if (err != 0 || cb_arg.error) {
741 if (q->zone == zone_pack)
742 uma_zfree(q->zone, cl);
747 cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
749 sd->flags |= RX_SW_DESC_INUSE;
752 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
753 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
754 d->len_gen = htobe32(V_FLD_GEN1(q->gen));
755 d->gen2 = htobe32(V_FLD_GEN2(q->gen));
760 if (++q->pidx == q->size) {
771 if (q->db_pending >= 32) {
773 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
779 * free_rx_bufs - free the Rx buffers on an SGE free list
780 * @sc: the controle softc
781 * @q: the SGE free list to clean up
783 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
784 * this queue should be stopped before calling this function.
787 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
789 u_int cidx = q->cidx;
791 while (q->credits--) {
792 struct rx_sw_desc *d = &q->sdesc[cidx];
794 if (d->flags & RX_SW_DESC_INUSE) {
795 bus_dmamap_unload(q->entry_tag, d->map);
796 bus_dmamap_destroy(q->entry_tag, d->map);
797 if (q->zone == zone_pack) {
798 m_init(d->m, zone_pack, MCLBYTES,
799 M_NOWAIT, MT_DATA, M_EXT);
800 uma_zfree(zone_pack, d->m);
802 m_init(d->m, zone_mbuf, MLEN,
803 M_NOWAIT, MT_DATA, 0);
804 uma_zfree(zone_mbuf, d->m);
805 uma_zfree(q->zone, d->rxsd_cl);
811 if (++cidx == q->size)
817 __refill_fl(adapter_t *adap, struct sge_fl *fl)
819 refill_fl(adap, fl, min(16U, fl->size - fl->credits));
823 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
825 uint32_t reclaimable = fl->size - fl->credits;
828 refill_fl(adap, fl, min(max, reclaimable));
832 * recycle_rx_buf - recycle a receive buffer
833 * @adapter: the adapter
834 * @q: the SGE free list
835 * @idx: index of buffer to recycle
837 * Recycles the specified buffer on the given free list by adding it at
838 * the next available slot on the list.
841 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
843 struct rx_desc *from = &q->desc[idx];
844 struct rx_desc *to = &q->desc[q->pidx];
846 q->sdesc[q->pidx] = q->sdesc[idx];
847 to->addr_lo = from->addr_lo; // already big endian
848 to->addr_hi = from->addr_hi; // likewise
849 wmb(); /* necessary ? */
850 to->len_gen = htobe32(V_FLD_GEN1(q->gen));
851 to->gen2 = htobe32(V_FLD_GEN2(q->gen));
854 if (++q->pidx == q->size) {
858 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
862 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
867 *addr = segs[0].ds_addr;
871 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
872 bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
873 bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
875 size_t len = nelem * elem_size;
880 if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
881 BUS_SPACE_MAXADDR_32BIT,
882 BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
883 len, 0, NULL, NULL, tag)) != 0) {
884 device_printf(sc->dev, "Cannot allocate descriptor tag\n");
888 if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
890 device_printf(sc->dev, "Cannot allocate descriptor memory\n");
894 bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
899 len = nelem * sw_size;
900 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
903 if (parent_entry_tag == NULL)
906 if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
907 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
908 NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
909 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
910 NULL, NULL, entry_tag)) != 0) {
911 device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
918 sge_slow_intr_handler(void *arg, int ncount)
922 t3_slow_intr_handler(sc);
923 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
924 (void) t3_read_reg(sc, A_PL_INT_ENABLE0);
928 * sge_timer_cb - perform periodic maintenance of an SGE qset
929 * @data: the SGE queue set to maintain
931 * Runs periodically from a timer to perform maintenance of an SGE queue
932 * set. It performs two tasks:
934 * a) Cleans up any completed Tx descriptors that may still be pending.
935 * Normal descriptor cleanup happens when new packets are added to a Tx
936 * queue so this timer is relatively infrequent and does any cleanup only
937 * if the Tx queue has not seen any new packets in a while. We make a
938 * best effort attempt to reclaim descriptors, in that we don't wait
939 * around if we cannot get a queue's lock (which most likely is because
940 * someone else is queueing new packets and so will also handle the clean
941 * up). Since control queues use immediate data exclusively we don't
942 * bother cleaning them up here.
944 * b) Replenishes Rx queues that have run out due to memory shortage.
945 * Normally new Rx buffers are added when existing ones are consumed but
946 * when out of memory a queue can become empty. We try to add only a few
947 * buffers here, the queue will be replenished fully as these new buffers
948 * are used up if memory shortage has subsided.
950 * c) Return coalesced response queue credits in case a response queue is
953 * d) Ring doorbells for T304 tunnel queues since we have seen doorbell
954 * fifo overflows and the FW doesn't implement any recovery scheme yet.
957 sge_timer_cb(void *arg)
960 if ((sc->flags & USING_MSIX) == 0) {
962 struct port_info *pi;
966 int reclaim_ofl, refill_rx;
968 if (sc->open_device_map == 0)
971 for (i = 0; i < sc->params.nports; i++) {
973 for (j = 0; j < pi->nqsets; j++) {
974 qs = &sc->sge.qs[pi->first_qset + j];
976 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
977 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
978 (qs->fl[1].credits < qs->fl[1].size));
979 if (reclaim_ofl || refill_rx) {
980 taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
987 if (sc->params.nports > 2) {
990 for_each_port(sc, i) {
991 struct port_info *pi = &sc->port[i];
993 t3_write_reg(sc, A_SG_KDOORBELL,
995 (FW_TUNNEL_SGEEC_START + pi->first_qset));
998 if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
999 sc->open_device_map != 0)
1000 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1004 * This is meant to be a catch-all function to keep sge state private
1009 t3_sge_init_adapter(adapter_t *sc)
1011 callout_init(&sc->sge_timer_ch, 1);
1012 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1013 TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
1018 t3_sge_reset_adapter(adapter_t *sc)
1020 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1025 t3_sge_init_port(struct port_info *pi)
1027 TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
1032 * refill_rspq - replenish an SGE response queue
1033 * @adapter: the adapter
1034 * @q: the response queue to replenish
1035 * @credits: how many new responses to make available
1037 * Replenishes a response queue by making the supplied number of responses
1040 static __inline void
1041 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1044 /* mbufs are allocated on demand when a rspq entry is processed. */
1045 t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
1046 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1050 sge_txq_reclaim_handler(void *arg, int ncount)
1052 struct sge_qset *qs = arg;
1055 for (i = 0; i < 3; i++)
1056 reclaim_completed_tx(qs, 16, i);
1060 sge_timer_reclaim(void *arg, int ncount)
1062 struct port_info *pi = arg;
1063 int i, nqsets = pi->nqsets;
1064 adapter_t *sc = pi->adapter;
1065 struct sge_qset *qs;
1068 KASSERT((sc->flags & USING_MSIX) == 0,
1069 ("can't call timer reclaim for msi-x"));
1071 for (i = 0; i < nqsets; i++) {
1072 qs = &sc->sge.qs[pi->first_qset + i];
1074 reclaim_completed_tx(qs, 16, TXQ_OFLD);
1075 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1076 &sc->sge.qs[0].rspq.lock;
1078 if (mtx_trylock(lock)) {
1079 /* XXX currently assume that we are *NOT* polling */
1080 uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1082 if (qs->fl[0].credits < qs->fl[0].size - 16)
1083 __refill_fl(sc, &qs->fl[0]);
1084 if (qs->fl[1].credits < qs->fl[1].size - 16)
1085 __refill_fl(sc, &qs->fl[1]);
1087 if (status & (1 << qs->rspq.cntxt_id)) {
1088 if (qs->rspq.credits) {
1089 refill_rspq(sc, &qs->rspq, 1);
1091 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
1092 1 << qs->rspq.cntxt_id);
1101 * init_qset_cntxt - initialize an SGE queue set context info
1102 * @qs: the queue set
1103 * @id: the queue set id
1105 * Initializes the TIDs and context ids for the queues of a queue set.
1108 init_qset_cntxt(struct sge_qset *qs, u_int id)
1111 qs->rspq.cntxt_id = id;
1112 qs->fl[0].cntxt_id = 2 * id;
1113 qs->fl[1].cntxt_id = 2 * id + 1;
1114 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1115 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1116 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1117 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1118 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1120 /* XXX: a sane limit is needed instead of INT_MAX */
1121 mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX);
1122 mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX);
1123 mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX);
1128 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1130 txq->in_use += ndesc;
1132 * XXX we don't handle stopping of queue
1133 * presumably start handles this when we bump against the end
1135 txqs->gen = txq->gen;
1136 txq->unacked += ndesc;
1137 txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1139 txqs->pidx = txq->pidx;
1142 if (((txqs->pidx > txq->cidx) &&
1143 (txq->pidx < txqs->pidx) &&
1144 (txq->pidx >= txq->cidx)) ||
1145 ((txqs->pidx < txq->cidx) &&
1146 (txq->pidx >= txq-> cidx)) ||
1147 ((txqs->pidx < txq->cidx) &&
1148 (txq->cidx < txqs->pidx)))
1149 panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1150 txqs->pidx, txq->pidx, txq->cidx);
1152 if (txq->pidx >= txq->size) {
1153 txq->pidx -= txq->size;
1160 * calc_tx_descs - calculate the number of Tx descriptors for a packet
1161 * @m: the packet mbufs
1162 * @nsegs: the number of segments
1164 * Returns the number of Tx descriptors needed for the given Ethernet
1165 * packet. Ethernet packets require addition of WR and CPL headers.
1167 static __inline unsigned int
1168 calc_tx_descs(const struct mbuf *m, int nsegs)
1172 if (m->m_pkthdr.len <= PIO_LEN)
1175 flits = sgl_len(nsegs) + 2;
1176 if (m->m_pkthdr.csum_flags & CSUM_TSO)
1179 return flits_to_desc(flits);
1183 * make_sgl - populate a scatter/gather list for a packet
1184 * @sgp: the SGL to populate
1185 * @segs: the packet dma segments
1186 * @nsegs: the number of segments
1188 * Generates a scatter/gather list for the buffers that make up a packet
1189 * and returns the SGL size in 8-byte words. The caller must size the SGL
1192 static __inline void
1193 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1197 for (idx = 0, i = 0; i < nsegs; i++) {
1199 * firmware doesn't like empty segments
1201 if (segs[i].ds_len == 0)
1206 sgp->len[idx] = htobe32(segs[i].ds_len);
1207 sgp->addr[idx] = htobe64(segs[i].ds_addr);
1218 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1219 * @adap: the adapter
1222 * Ring the doorbell if a Tx queue is asleep. There is a natural race,
1223 * where the HW is going to sleep just after we checked, however,
1224 * then the interrupt handler will detect the outstanding TX packet
1225 * and ring the doorbell for us.
1227 * When GTS is disabled we unconditionally ring the doorbell.
1229 static __inline void
1230 check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1233 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1234 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1235 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1237 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1240 t3_write_reg(adap, A_SG_KDOORBELL,
1241 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1244 if (mustring || ++q->db_pending >= 32) {
1245 wmb(); /* write descriptors before telling HW */
1246 t3_write_reg(adap, A_SG_KDOORBELL,
1247 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1253 static __inline void
1254 wr_gen2(struct tx_desc *d, unsigned int gen)
1256 #if SGE_NUM_GENBITS == 2
1257 d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1262 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1263 * @ndesc: number of Tx descriptors spanned by the SGL
1264 * @txd: first Tx descriptor to be written
1265 * @txqs: txq state (generation and producer index)
1266 * @txq: the SGE Tx queue
1268 * @flits: number of flits to the start of the SGL in the first descriptor
1269 * @sgl_flits: the SGL size in flits
1270 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1271 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1273 * Write a work request header and an associated SGL. If the SGL is
1274 * small enough to fit into one Tx descriptor it has already been written
1275 * and we just need to write the WR header. Otherwise we distribute the
1276 * SGL across the number of descriptors it spans.
1279 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1280 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1281 unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1284 struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1285 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1287 if (__predict_true(ndesc == 1)) {
1288 set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1289 V_WR_SGLSFLT(flits)) | wr_hi,
1290 htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
1293 wr_gen2(txd, txqs->gen);
1296 unsigned int ogen = txqs->gen;
1297 const uint64_t *fp = (const uint64_t *)sgl;
1298 struct work_request_hdr *wp = wrp;
1300 wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1301 V_WR_SGLSFLT(flits)) | wr_hi;
1304 unsigned int avail = WR_FLITS - flits;
1306 if (avail > sgl_flits)
1308 memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1317 if (++txqs->pidx == txq->size) {
1325 * when the head of the mbuf chain
1326 * is freed all clusters will be freed
1329 wrp = (struct work_request_hdr *)txd;
1330 wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
1331 V_WR_SGLSFLT(1)) | wr_hi;
1332 wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
1334 V_WR_GEN(txqs->gen)) | wr_lo;
1335 wr_gen2(txd, txqs->gen);
1338 wrp->wrh_hi |= htonl(F_WR_EOP);
1340 wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1341 wr_gen2((struct tx_desc *)wp, ogen);
1345 /* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
1346 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1348 #define GET_VTAG(cntrl, m) \
1350 if ((m)->m_flags & M_VLANTAG) \
1351 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1355 t3_encap(struct sge_qset *qs, struct mbuf **m)
1359 struct sge_txq *txq;
1360 struct txq_state txqs;
1361 struct port_info *pi;
1362 unsigned int ndesc, flits, cntrl, mlen;
1363 int err, nsegs, tso_info = 0;
1365 struct work_request_hdr *wrp;
1366 struct tx_sw_desc *txsd;
1367 struct sg_ent *sgp, *sgl;
1368 uint32_t wr_hi, wr_lo, sgl_flits;
1369 bus_dma_segment_t segs[TX_MAX_SEGS];
1371 struct tx_desc *txd;
1375 txq = &qs->txq[TXQ_ETH];
1376 txd = &txq->desc[txq->pidx];
1377 txsd = &txq->sdesc[txq->pidx];
1383 mtx_assert(&qs->lock, MA_OWNED);
1384 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1385 KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
1387 if (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1388 m0->m_pkthdr.csum_flags & (CSUM_TSO))
1389 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1391 if (m0->m_nextpkt != NULL) {
1392 busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1396 if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1397 &m0, segs, &nsegs))) {
1399 printf("failed ... err=%d\n", err);
1402 mlen = m0->m_pkthdr.len;
1403 ndesc = calc_tx_descs(m0, nsegs);
1405 txq_prod(txq, ndesc, &txqs);
1407 KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
1410 if (m0->m_nextpkt != NULL) {
1411 struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1415 panic("trying to coalesce %d packets in to one WR", nsegs);
1416 txq->txq_coalesced += nsegs;
1417 wrp = (struct work_request_hdr *)txd;
1418 flits = nsegs*2 + 1;
1420 for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1421 struct cpl_tx_pkt_batch_entry *cbe;
1423 uint32_t *hflit = (uint32_t *)&flit;
1424 int cflags = m0->m_pkthdr.csum_flags;
1426 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1427 GET_VTAG(cntrl, m0);
1428 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1429 if (__predict_false(!(cflags & CSUM_IP)))
1430 cntrl |= F_TXPKT_IPCSUM_DIS;
1431 if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP |
1432 CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1433 cntrl |= F_TXPKT_L4CSUM_DIS;
1435 hflit[0] = htonl(cntrl);
1436 hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1437 flit |= htobe64(1 << 24);
1438 cbe = &cpl_batch->pkt_entry[i];
1439 cbe->cntrl = hflit[0];
1440 cbe->len = hflit[1];
1441 cbe->addr = htobe64(segs[i].ds_addr);
1444 wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1445 V_WR_SGLSFLT(flits)) |
1446 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1447 wr_lo = htonl(V_WR_LEN(flits) |
1448 V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1449 set_wr_hdr(wrp, wr_hi, wr_lo);
1451 ETHER_BPF_MTAP(pi->ifp, m0);
1452 wr_gen2(txd, txqs.gen);
1453 check_ring_tx_db(sc, txq, 0);
1455 } else if (tso_info) {
1457 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1458 struct ether_header *eh;
1463 GET_VTAG(cntrl, m0);
1464 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1465 hdr->cntrl = htonl(cntrl);
1466 hdr->len = htonl(mlen | 0x80000000);
1468 if (__predict_false(mlen < TCPPKTHDRSIZE)) {
1469 printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x",
1470 m0, mlen, m0->m_pkthdr.tso_segsz,
1471 (int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags);
1472 panic("tx tso packet too small");
1475 /* Make sure that ether, ip, tcp headers are all in m0 */
1476 if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1477 m0 = m_pullup(m0, TCPPKTHDRSIZE);
1478 if (__predict_false(m0 == NULL)) {
1479 /* XXX panic probably an overreaction */
1480 panic("couldn't fit header into mbuf");
1484 eh = mtod(m0, struct ether_header *);
1485 eth_type = eh->ether_type;
1486 if (eth_type == htons(ETHERTYPE_VLAN)) {
1487 struct ether_vlan_header *evh = (void *)eh;
1489 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
1491 eth_type = evh->evl_proto;
1493 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
1497 if (eth_type == htons(ETHERTYPE_IP)) {
1498 struct ip *ip = l3hdr;
1500 tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
1501 tcp = (struct tcphdr *)(ip + 1);
1502 } else if (eth_type == htons(ETHERTYPE_IPV6)) {
1503 struct ip6_hdr *ip6 = l3hdr;
1505 KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1506 ("%s: CSUM_TSO with ip6_nxt %d",
1507 __func__, ip6->ip6_nxt));
1509 tso_info |= F_LSO_IPV6;
1510 tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
1511 tcp = (struct tcphdr *)(ip6 + 1);
1513 panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
1515 tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
1516 hdr->lso_info = htonl(tso_info);
1518 if (__predict_false(mlen <= PIO_LEN)) {
1520 * pkt not undersized but fits in PIO_LEN
1521 * Indicates a TSO bug at the higher levels.
1524 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1525 flits = (mlen + 7) / 8 + 3;
1526 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1527 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1528 F_WR_SOP | F_WR_EOP | txqs.compl);
1529 wr_lo = htonl(V_WR_LEN(flits) |
1530 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1531 set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1533 ETHER_BPF_MTAP(pi->ifp, m0);
1534 wr_gen2(txd, txqs.gen);
1535 check_ring_tx_db(sc, txq, 0);
1541 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1543 GET_VTAG(cntrl, m0);
1544 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1545 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1546 cntrl |= F_TXPKT_IPCSUM_DIS;
1547 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP |
1548 CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1549 cntrl |= F_TXPKT_L4CSUM_DIS;
1550 cpl->cntrl = htonl(cntrl);
1551 cpl->len = htonl(mlen | 0x80000000);
1553 if (mlen <= PIO_LEN) {
1555 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1556 flits = (mlen + 7) / 8 + 2;
1558 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1559 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1560 F_WR_SOP | F_WR_EOP | txqs.compl);
1561 wr_lo = htonl(V_WR_LEN(flits) |
1562 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1563 set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1565 ETHER_BPF_MTAP(pi->ifp, m0);
1566 wr_gen2(txd, txqs.gen);
1567 check_ring_tx_db(sc, txq, 0);
1573 wrp = (struct work_request_hdr *)txd;
1574 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1575 make_sgl(sgp, segs, nsegs);
1577 sgl_flits = sgl_len(nsegs);
1579 ETHER_BPF_MTAP(pi->ifp, m0);
1581 KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1582 wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1583 wr_lo = htonl(V_WR_TID(txq->token));
1584 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1585 sgl_flits, wr_hi, wr_lo);
1586 check_ring_tx_db(sc, txq, 0);
1592 cxgb_tx_watchdog(void *arg)
1594 struct sge_qset *qs = arg;
1595 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1597 if (qs->coalescing != 0 &&
1598 (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
1601 else if (qs->coalescing == 0 &&
1602 (txq->in_use >= cxgb_tx_coalesce_enable_start))
1604 if (TXQ_TRYLOCK(qs)) {
1605 qs->qs_flags |= QS_FLUSHING;
1606 cxgb_start_locked(qs);
1607 qs->qs_flags &= ~QS_FLUSHING;
1610 if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1611 callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1612 qs, txq->txq_watchdog.c_cpu);
1616 cxgb_tx_timeout(void *arg)
1618 struct sge_qset *qs = arg;
1619 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1621 if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1623 if (TXQ_TRYLOCK(qs)) {
1624 qs->qs_flags |= QS_TIMEOUT;
1625 cxgb_start_locked(qs);
1626 qs->qs_flags &= ~QS_TIMEOUT;
1632 cxgb_start_locked(struct sge_qset *qs)
1634 struct mbuf *m_head = NULL;
1635 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1636 struct port_info *pi = qs->port;
1637 struct ifnet *ifp = pi->ifp;
1639 if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1640 reclaim_completed_tx(qs, 0, TXQ_ETH);
1642 if (!pi->link_config.link_ok) {
1646 TXQ_LOCK_ASSERT(qs);
1647 while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1648 pi->link_config.link_ok) {
1649 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1651 if (txq->size - txq->in_use <= TX_MAX_DESC)
1654 if ((m_head = cxgb_dequeue(qs)) == NULL)
1657 * Encapsulation can modify our pointer, and or make it
1658 * NULL on failure. In that event, we can't requeue.
1660 if (t3_encap(qs, &m_head) || m_head == NULL)
1666 if (txq->db_pending)
1667 check_ring_tx_db(pi->adapter, txq, 1);
1669 if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1670 pi->link_config.link_ok)
1671 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1672 qs, txq->txq_timer.c_cpu);
1678 cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1680 struct port_info *pi = qs->port;
1681 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1682 struct buf_ring *br = txq->txq_mr;
1685 avail = txq->size - txq->in_use;
1686 TXQ_LOCK_ASSERT(qs);
1689 * We can only do a direct transmit if the following are true:
1690 * - we aren't coalescing (ring < 3/4 full)
1691 * - the link is up -- checked in caller
1692 * - there are no packets enqueued already
1693 * - there is space in hardware transmit queue
1695 if (check_pkt_coalesce(qs) == 0 &&
1696 !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1697 if (t3_encap(qs, &m)) {
1699 (error = drbr_enqueue(ifp, br, m)) != 0)
1702 if (txq->db_pending)
1703 check_ring_tx_db(pi->adapter, txq, 1);
1706 * We've bypassed the buf ring so we need to update
1707 * the stats directly
1709 txq->txq_direct_packets++;
1710 txq->txq_direct_bytes += m->m_pkthdr.len;
1712 } else if ((error = drbr_enqueue(ifp, br, m)) != 0)
1715 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1716 if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1717 (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1718 cxgb_start_locked(qs);
1719 else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1720 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1721 qs, txq->txq_timer.c_cpu);
1726 cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
1728 struct sge_qset *qs;
1729 struct port_info *pi = ifp->if_softc;
1730 int error, qidx = pi->first_qset;
1732 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1733 ||(!pi->link_config.link_ok)) {
1738 /* check if flowid is set */
1739 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1740 qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
1742 qs = &pi->adapter->sge.qs[qidx];
1744 if (TXQ_TRYLOCK(qs)) {
1746 error = cxgb_transmit_locked(ifp, qs, m);
1749 error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1754 cxgb_qflush(struct ifnet *ifp)
1757 * flush any enqueued mbufs in the buf_rings
1758 * and in the transmit queues
1765 * write_imm - write a packet into a Tx descriptor as immediate data
1766 * @d: the Tx descriptor to write
1768 * @len: the length of packet data to write as immediate data
1769 * @gen: the generation bit value to write
1771 * Writes a packet as immediate data into a Tx descriptor. The packet
1772 * contains a work request at its beginning. We must write the packet
1773 * carefully so the SGE doesn't read accidentally before it's written in
1776 static __inline void
1777 write_imm(struct tx_desc *d, caddr_t src,
1778 unsigned int len, unsigned int gen)
1780 struct work_request_hdr *from = (struct work_request_hdr *)src;
1781 struct work_request_hdr *to = (struct work_request_hdr *)d;
1782 uint32_t wr_hi, wr_lo;
1784 KASSERT(len <= WR_LEN && len >= sizeof(*from),
1785 ("%s: invalid len %d", __func__, len));
1787 memcpy(&to[1], &from[1], len - sizeof(*from));
1788 wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
1789 V_WR_BCNTLFLT(len & 7));
1790 wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
1791 set_wr_hdr(to, wr_hi, wr_lo);
1797 * check_desc_avail - check descriptor availability on a send queue
1798 * @adap: the adapter
1800 * @m: the packet needing the descriptors
1801 * @ndesc: the number of Tx descriptors needed
1802 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1804 * Checks if the requested number of Tx descriptors is available on an
1805 * SGE send queue. If the queue is already suspended or not enough
1806 * descriptors are available the packet is queued for later transmission.
1807 * Must be called with the Tx queue locked.
1809 * Returns 0 if enough descriptors are available, 1 if there aren't
1810 * enough descriptors and the packet has been queued, and 2 if the caller
1811 * needs to retry because there weren't enough descriptors at the
1812 * beginning of the call but some freed up in the mean time.
1815 check_desc_avail(adapter_t *adap, struct sge_txq *q,
1816 struct mbuf *m, unsigned int ndesc,
1820 * XXX We currently only use this for checking the control queue
1821 * the control queue is only used for binding qsets which happens
1822 * at init time so we are guaranteed enough descriptors
1824 if (__predict_false(mbufq_len(&q->sendq))) {
1825 addq_exit: (void )mbufq_enqueue(&q->sendq, m);
1828 if (__predict_false(q->size - q->in_use < ndesc)) {
1830 struct sge_qset *qs = txq_to_qset(q, qid);
1832 setbit(&qs->txq_stopped, qid);
1833 if (should_restart_tx(q) &&
1834 test_and_clear_bit(qid, &qs->txq_stopped))
1845 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1846 * @q: the SGE control Tx queue
1848 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1849 * that send only immediate data (presently just the control queues) and
1850 * thus do not have any mbufs
1852 static __inline void
1853 reclaim_completed_tx_imm(struct sge_txq *q)
1855 unsigned int reclaim = q->processed - q->cleaned;
1857 q->in_use -= reclaim;
1858 q->cleaned += reclaim;
1862 * ctrl_xmit - send a packet through an SGE control Tx queue
1863 * @adap: the adapter
1864 * @q: the control queue
1867 * Send a packet through an SGE control Tx queue. Packets sent through
1868 * a control queue must fit entirely as immediate data in a single Tx
1869 * descriptor and have no page fragments.
1872 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1875 struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1876 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1878 KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
1880 wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
1881 wrp->wrh_lo = htonl(V_WR_TID(q->token));
1884 again: reclaim_completed_tx_imm(q);
1886 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1887 if (__predict_false(ret)) {
1894 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1897 if (++q->pidx >= q->size) {
1903 t3_write_reg(adap, A_SG_KDOORBELL,
1904 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1912 * restart_ctrlq - restart a suspended control queue
1913 * @qs: the queue set cotaining the control queue
1915 * Resumes transmission on a suspended Tx control queue.
1918 restart_ctrlq(void *data, int npending)
1921 struct sge_qset *qs = (struct sge_qset *)data;
1922 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1923 adapter_t *adap = qs->port->adapter;
1926 again: reclaim_completed_tx_imm(q);
1928 while (q->in_use < q->size &&
1929 (m = mbufq_dequeue(&q->sendq)) != NULL) {
1931 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1934 if (++q->pidx >= q->size) {
1940 if (mbufq_len(&q->sendq)) {
1941 setbit(&qs->txq_stopped, TXQ_CTRL);
1943 if (should_restart_tx(q) &&
1944 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1949 t3_write_reg(adap, A_SG_KDOORBELL,
1950 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1955 * Send a management message through control queue 0
1958 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1960 return ctrl_xmit(adap, &adap->sge.qs[0], m);
1964 * free_qset - free the resources of an SGE queue set
1965 * @sc: the controller owning the queue set
1968 * Release the HW and SW resources associated with an SGE queue set, such
1969 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
1970 * queue set must be quiesced prior to calling this.
1973 t3_free_qset(adapter_t *sc, struct sge_qset *q)
1977 reclaim_completed_tx(q, 0, TXQ_ETH);
1978 if (q->txq[TXQ_ETH].txq_mr != NULL)
1979 buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
1980 if (q->txq[TXQ_ETH].txq_ifq != NULL) {
1981 ifq_delete(q->txq[TXQ_ETH].txq_ifq);
1982 free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
1985 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1986 if (q->fl[i].desc) {
1987 mtx_lock_spin(&sc->sge.reg_lock);
1988 t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1989 mtx_unlock_spin(&sc->sge.reg_lock);
1990 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1991 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1993 bus_dma_tag_destroy(q->fl[i].desc_tag);
1994 bus_dma_tag_destroy(q->fl[i].entry_tag);
1996 if (q->fl[i].sdesc) {
1997 free_rx_bufs(sc, &q->fl[i]);
1998 free(q->fl[i].sdesc, M_DEVBUF);
2002 mtx_unlock(&q->lock);
2003 MTX_DESTROY(&q->lock);
2004 for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2005 if (q->txq[i].desc) {
2006 mtx_lock_spin(&sc->sge.reg_lock);
2007 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2008 mtx_unlock_spin(&sc->sge.reg_lock);
2009 bus_dmamap_unload(q->txq[i].desc_tag,
2010 q->txq[i].desc_map);
2011 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2012 q->txq[i].desc_map);
2013 bus_dma_tag_destroy(q->txq[i].desc_tag);
2014 bus_dma_tag_destroy(q->txq[i].entry_tag);
2016 if (q->txq[i].sdesc) {
2017 free(q->txq[i].sdesc, M_DEVBUF);
2022 mtx_lock_spin(&sc->sge.reg_lock);
2023 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
2024 mtx_unlock_spin(&sc->sge.reg_lock);
2026 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
2027 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
2029 bus_dma_tag_destroy(q->rspq.desc_tag);
2030 MTX_DESTROY(&q->rspq.lock);
2033 #if defined(INET6) || defined(INET)
2034 tcp_lro_free(&q->lro.ctrl);
2037 bzero(q, sizeof(*q));
2041 * t3_free_sge_resources - free SGE resources
2042 * @sc: the adapter softc
2044 * Frees resources used by the SGE queue sets.
2047 t3_free_sge_resources(adapter_t *sc, int nqsets)
2051 for (i = 0; i < nqsets; ++i) {
2052 TXQ_LOCK(&sc->sge.qs[i]);
2053 t3_free_qset(sc, &sc->sge.qs[i]);
2058 * t3_sge_start - enable SGE
2059 * @sc: the controller softc
2061 * Enables the SGE for DMAs. This is the last step in starting packet
2065 t3_sge_start(adapter_t *sc)
2067 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2071 * t3_sge_stop - disable SGE operation
2074 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2075 * from error interrupts) or from normal process context. In the latter
2076 * case it also disables any pending queue restart tasklets. Note that
2077 * if it is called in interrupt context it cannot disable the restart
2078 * tasklets as it cannot wait, however the tasklets will have no effect
2079 * since the doorbells are disabled and the driver will call this again
2080 * later from process context, at which time the tasklets will be stopped
2081 * if they are still running.
2084 t3_sge_stop(adapter_t *sc)
2088 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
2093 for (nqsets = i = 0; i < (sc)->params.nports; i++)
2094 nqsets += sc->port[i].nqsets;
2100 for (i = 0; i < nqsets; ++i) {
2101 struct sge_qset *qs = &sc->sge.qs[i];
2103 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2104 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2110 * t3_free_tx_desc - reclaims Tx descriptors and their buffers
2111 * @adapter: the adapter
2112 * @q: the Tx queue to reclaim descriptors from
2113 * @reclaimable: the number of descriptors to reclaim
2114 * @m_vec_size: maximum number of buffers to reclaim
2115 * @desc_reclaimed: returns the number of descriptors reclaimed
2117 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
2118 * Tx buffers. Called with the Tx queue lock held.
2120 * Returns number of buffers of reclaimed
2123 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2125 struct tx_sw_desc *txsd;
2126 unsigned int cidx, mask;
2127 struct sge_txq *q = &qs->txq[queue];
2130 T3_TRACE2(sc->tb[q->cntxt_id & 7],
2131 "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
2135 txsd = &q->sdesc[cidx];
2137 mtx_assert(&qs->lock, MA_OWNED);
2138 while (reclaimable--) {
2139 prefetch(q->sdesc[(cidx + 1) & mask].m);
2140 prefetch(q->sdesc[(cidx + 2) & mask].m);
2142 if (txsd->m != NULL) {
2143 if (txsd->flags & TX_SW_DESC_MAPPED) {
2144 bus_dmamap_unload(q->entry_tag, txsd->map);
2145 txsd->flags &= ~TX_SW_DESC_MAPPED;
2147 m_freem_list(txsd->m);
2153 if (++cidx == q->size) {
2163 * is_new_response - check if a response is newly written
2164 * @r: the response descriptor
2165 * @q: the response queue
2167 * Returns true if a response descriptor contains a yet unprocessed
2171 is_new_response(const struct rsp_desc *r,
2172 const struct sge_rspq *q)
2174 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2177 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2178 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2179 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2180 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2181 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2183 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2184 #define NOMEM_INTR_DELAY 2500
2188 * write_ofld_wr - write an offload work request
2189 * @adap: the adapter
2190 * @m: the packet to send
2192 * @pidx: index of the first Tx descriptor to write
2193 * @gen: the generation value to use
2194 * @ndesc: number of descriptors the packet will occupy
2196 * Write an offload work request to send the supplied packet. The packet
2197 * data already carry the work request with most fields populated.
2200 write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2201 unsigned int pidx, unsigned int gen, unsigned int ndesc)
2203 unsigned int sgl_flits, flits;
2204 int i, idx, nsegs, wrlen;
2205 struct work_request_hdr *from;
2206 struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
2207 struct tx_desc *d = &q->desc[pidx];
2208 struct txq_state txqs;
2209 struct sglist_seg *segs;
2210 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2213 from = (void *)(oh + 1); /* Start of WR within mbuf */
2214 wrlen = m->m_len - sizeof(*oh);
2216 if (!(oh->flags & F_HDR_SGL)) {
2217 write_imm(d, (caddr_t)from, wrlen, gen);
2220 * mbuf with "real" immediate tx data will be enqueue_wr'd by
2221 * t3_push_frames and freed in wr_ack. Others, like those sent
2222 * down by close_conn, t3_send_reset, etc. should be freed here.
2224 if (!(oh->flags & F_HDR_DF))
2229 memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
2233 sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
2235 nsegs = sgl->sg_nseg;
2236 segs = sgl->sg_segs;
2237 for (idx = 0, i = 0; i < nsegs; i++) {
2238 KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
2241 sgp->len[idx] = htobe32(segs[i].ss_len);
2242 sgp->addr[idx] = htobe64(segs[i].ss_paddr);
2250 sgl_flits = sgl_len(nsegs);
2255 write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
2256 from->wrh_hi, from->wrh_lo);
2260 * ofld_xmit - send a packet through an offload queue
2261 * @adap: the adapter
2262 * @q: the Tx offload queue
2265 * Send an offload packet through an SGE offload queue.
2268 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2272 unsigned int pidx, gen;
2273 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2274 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2276 ndesc = G_HDR_NDESC(oh->flags);
2279 again: reclaim_completed_tx(qs, 16, TXQ_OFLD);
2280 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2281 if (__predict_false(ret)) {
2293 if (q->pidx >= q->size) {
2298 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2299 check_ring_tx_db(adap, q, 1);
2306 * restart_offloadq - restart a suspended offload queue
2307 * @qs: the queue set cotaining the offload queue
2309 * Resumes transmission on a suspended Tx offload queue.
2312 restart_offloadq(void *data, int npending)
2315 struct sge_qset *qs = data;
2316 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2317 adapter_t *adap = qs->port->adapter;
2321 again: cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
2323 while ((m = mbufq_first(&q->sendq)) != NULL) {
2324 unsigned int gen, pidx;
2325 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2326 unsigned int ndesc = G_HDR_NDESC(oh->flags);
2328 if (__predict_false(q->size - q->in_use < ndesc)) {
2329 setbit(&qs->txq_stopped, TXQ_OFLD);
2330 if (should_restart_tx(q) &&
2331 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2341 if (q->pidx >= q->size) {
2346 (void)mbufq_dequeue(&q->sendq);
2348 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2352 set_bit(TXQ_RUNNING, &q->flags);
2353 set_bit(TXQ_LAST_PKT_DB, &q->flags);
2357 t3_write_reg(adap, A_SG_KDOORBELL,
2358 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2362 * t3_offload_tx - send an offload packet
2365 * Sends an offload packet. We use the packet priority to select the
2366 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2367 * should be sent as regular or control, bits 1-3 select the queue set.
2370 t3_offload_tx(struct adapter *sc, struct mbuf *m)
2372 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2373 struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2375 if (oh->flags & F_HDR_CTRL) {
2376 m_adj(m, sizeof (*oh)); /* trim ofld_hdr off */
2377 return (ctrl_xmit(sc, qs, m));
2379 return (ofld_xmit(sc, qs, m));
2384 restart_tx(struct sge_qset *qs)
2386 struct adapter *sc = qs->port->adapter;
2388 if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2389 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2390 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2391 qs->txq[TXQ_OFLD].restarts++;
2392 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2395 if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2396 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2397 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2398 qs->txq[TXQ_CTRL].restarts++;
2399 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2404 * t3_sge_alloc_qset - initialize an SGE queue set
2405 * @sc: the controller softc
2406 * @id: the queue set id
2407 * @nports: how many Ethernet ports will be using this queue set
2408 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2409 * @p: configuration parameters for this queue set
2410 * @ntxq: number of Tx queues for the queue set
2411 * @pi: port info for queue set
2413 * Allocate resources and initialize an SGE queue set. A queue set
2414 * comprises a response queue, two Rx free-buffer queues, and up to 3
2415 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2416 * queue, offload queue, and control queue.
2419 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2420 const struct qset_params *p, int ntxq, struct port_info *pi)
2422 struct sge_qset *q = &sc->sge.qs[id];
2425 MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
2429 if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2430 M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
2431 device_printf(sc->dev, "failed to allocate mbuf ring\n");
2434 if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2435 M_NOWAIT | M_ZERO)) == NULL) {
2436 device_printf(sc->dev, "failed to allocate ifq\n");
2439 ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2440 callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2441 callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2442 q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2443 q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2445 init_qset_cntxt(q, id);
2447 if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2448 sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2449 &q->fl[0].desc, &q->fl[0].sdesc,
2450 &q->fl[0].desc_tag, &q->fl[0].desc_map,
2451 sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2452 printf("error %d from alloc ring fl0\n", ret);
2456 if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2457 sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2458 &q->fl[1].desc, &q->fl[1].sdesc,
2459 &q->fl[1].desc_tag, &q->fl[1].desc_map,
2460 sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2461 printf("error %d from alloc ring fl1\n", ret);
2465 if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2466 &q->rspq.phys_addr, &q->rspq.desc, NULL,
2467 &q->rspq.desc_tag, &q->rspq.desc_map,
2468 NULL, NULL)) != 0) {
2469 printf("error %d from alloc ring rspq\n", ret);
2473 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2474 device_get_unit(sc->dev), irq_vec_idx);
2475 MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2477 for (i = 0; i < ntxq; ++i) {
2478 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2480 if ((ret = alloc_ring(sc, p->txq_size[i],
2481 sizeof(struct tx_desc), sz,
2482 &q->txq[i].phys_addr, &q->txq[i].desc,
2483 &q->txq[i].sdesc, &q->txq[i].desc_tag,
2484 &q->txq[i].desc_map,
2485 sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2486 printf("error %d from alloc ring tx %i\n", ret, i);
2489 mbufq_init(&q->txq[i].sendq, INT_MAX);
2491 q->txq[i].size = p->txq_size[i];
2495 TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2497 TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2498 TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2499 TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2501 q->fl[0].gen = q->fl[1].gen = 1;
2502 q->fl[0].size = p->fl_size;
2503 q->fl[1].size = p->jumbo_size;
2507 q->rspq.size = p->rspq_size;
2509 q->txq[TXQ_ETH].stop_thres = nports *
2510 flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2512 q->fl[0].buf_size = MCLBYTES;
2513 q->fl[0].zone = zone_pack;
2514 q->fl[0].type = EXT_PACKET;
2516 if (p->jumbo_buf_size == MJUM16BYTES) {
2517 q->fl[1].zone = zone_jumbo16;
2518 q->fl[1].type = EXT_JUMBO16;
2519 } else if (p->jumbo_buf_size == MJUM9BYTES) {
2520 q->fl[1].zone = zone_jumbo9;
2521 q->fl[1].type = EXT_JUMBO9;
2522 } else if (p->jumbo_buf_size == MJUMPAGESIZE) {
2523 q->fl[1].zone = zone_jumbop;
2524 q->fl[1].type = EXT_JUMBOP;
2526 KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
2530 q->fl[1].buf_size = p->jumbo_buf_size;
2532 /* Allocate and setup the lro_ctrl structure */
2533 q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2534 #if defined(INET6) || defined(INET)
2535 ret = tcp_lro_init(&q->lro.ctrl);
2537 printf("error %d from tcp_lro_init\n", ret);
2541 q->lro.ctrl.ifp = pi->ifp;
2543 mtx_lock_spin(&sc->sge.reg_lock);
2544 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2545 q->rspq.phys_addr, q->rspq.size,
2546 q->fl[0].buf_size, 1, 0);
2548 printf("error %d from t3_sge_init_rspcntxt\n", ret);
2552 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2553 ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2554 q->fl[i].phys_addr, q->fl[i].size,
2555 q->fl[i].buf_size, p->cong_thres, 1,
2558 printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2563 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2564 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2565 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2568 printf("error %d from t3_sge_init_ecntxt\n", ret);
2573 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2574 USE_GTS, SGE_CNTXT_OFLD, id,
2575 q->txq[TXQ_OFLD].phys_addr,
2576 q->txq[TXQ_OFLD].size, 0, 1, 0);
2578 printf("error %d from t3_sge_init_ecntxt\n", ret);
2584 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2586 q->txq[TXQ_CTRL].phys_addr,
2587 q->txq[TXQ_CTRL].size,
2588 q->txq[TXQ_CTRL].token, 1, 0);
2590 printf("error %d from t3_sge_init_ecntxt\n", ret);
2595 mtx_unlock_spin(&sc->sge.reg_lock);
2596 t3_update_qset_coalesce(q, p);
2598 refill_fl(sc, &q->fl[0], q->fl[0].size);
2599 refill_fl(sc, &q->fl[1], q->fl[1].size);
2600 refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2602 t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2603 V_NEWTIMER(q->rspq.holdoff_tmr));
2608 mtx_unlock_spin(&sc->sge.reg_lock);
2611 t3_free_qset(sc, q);
2617 * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2618 * ethernet data. Hardware assistance with various checksums and any vlan tag
2619 * will also be taken into account here.
2622 t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
2624 struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2625 struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2626 struct ifnet *ifp = pi->ifp;
2628 if (cpl->vlan_valid) {
2629 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2630 m->m_flags |= M_VLANTAG;
2633 m->m_pkthdr.rcvif = ifp;
2635 * adjust after conversion to mbuf chain
2637 m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2638 m->m_len -= (sizeof(*cpl) + ethpad);
2639 m->m_data += (sizeof(*cpl) + ethpad);
2641 if (!cpl->fragment && cpl->csum_valid && cpl->csum == 0xffff) {
2642 struct ether_header *eh = mtod(m, void *);
2645 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2646 struct ether_vlan_header *evh = mtod(m, void *);
2648 eh_type = evh->evl_proto;
2650 eh_type = eh->ether_type;
2652 if (ifp->if_capenable & IFCAP_RXCSUM &&
2653 eh_type == htons(ETHERTYPE_IP)) {
2654 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
2655 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2656 m->m_pkthdr.csum_data = 0xffff;
2657 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2658 eh_type == htons(ETHERTYPE_IPV6)) {
2659 m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
2661 m->m_pkthdr.csum_data = 0xffff;
2667 * get_packet - return the next ingress packet buffer from a free list
2668 * @adap: the adapter that received the packet
2669 * @drop_thres: # of remaining buffers before we start dropping packets
2670 * @qs: the qset that the SGE free list holding the packet belongs to
2671 * @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2672 * @r: response descriptor
2674 * Get the next packet from a free list and complete setup of the
2675 * sk_buff. If the packet is small we make a copy and recycle the
2676 * original buffer, otherwise we use the original buffer itself. If a
2677 * positive drop threshold is supplied packets are dropped and their
2678 * buffers recycled if (a) the number of remaining buffers is under the
2679 * threshold and the packet is too big to copy, or (b) the packet should
2680 * be copied but there is no memory for the copy.
2683 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2684 struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2687 unsigned int len_cq = ntohl(r->len_cq);
2688 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2689 int mask, cidx = fl->cidx;
2690 struct rx_sw_desc *sd = &fl->sdesc[cidx];
2691 uint32_t len = G_RSPD_LEN(len_cq);
2692 uint32_t flags = M_EXT;
2693 uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
2698 mask = fl->size - 1;
2699 prefetch(fl->sdesc[(cidx + 1) & mask].m);
2700 prefetch(fl->sdesc[(cidx + 2) & mask].m);
2701 prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
2702 prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl);
2705 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2707 if (recycle_enable && len <= SGE_RX_COPY_THRES &&
2708 sopeop == RSPQ_SOP_EOP) {
2709 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
2711 cl = mtod(m, void *);
2712 memcpy(cl, sd->rxsd_cl, len);
2713 recycle_rx_buf(adap, fl, fl->cidx);
2714 m->m_pkthdr.len = m->m_len = len;
2716 mh->mh_head = mh->mh_tail = m;
2721 bus_dmamap_unload(fl->entry_tag, sd->map);
2725 if ((sopeop == RSPQ_SOP_EOP) ||
2726 (sopeop == RSPQ_SOP))
2728 m_init(m, fl->zone, fl->buf_size, M_NOWAIT, MT_DATA, flags);
2729 if (fl->zone == zone_pack) {
2731 * restore clobbered data pointer
2733 m->m_data = m->m_ext.ext_buf;
2735 m_cljset(m, cl, fl->type);
2744 mh->mh_head = mh->mh_tail = m;
2745 m->m_pkthdr.len = len;
2750 case RSPQ_NSOP_NEOP:
2751 if (mh->mh_tail == NULL) {
2752 log(LOG_ERR, "discarding intermediate descriptor entry\n");
2756 mh->mh_tail->m_next = m;
2758 mh->mh_head->m_pkthdr.len += len;
2762 printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2764 if (++fl->cidx == fl->size)
2771 * handle_rsp_cntrl_info - handles control information in a response
2772 * @qs: the queue set corresponding to the response
2773 * @flags: the response control flags
2775 * Handles the control information of an SGE response, such as GTS
2776 * indications and completion credits for the queue set's Tx queues.
2777 * HW coalesces credits, we don't do any extra SW coalescing.
2779 static __inline void
2780 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2782 unsigned int credits;
2785 if (flags & F_RSPD_TXQ0_GTS)
2786 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2788 credits = G_RSPD_TXQ0_CR(flags);
2790 qs->txq[TXQ_ETH].processed += credits;
2792 credits = G_RSPD_TXQ2_CR(flags);
2794 qs->txq[TXQ_CTRL].processed += credits;
2797 if (flags & F_RSPD_TXQ1_GTS)
2798 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2800 credits = G_RSPD_TXQ1_CR(flags);
2802 qs->txq[TXQ_OFLD].processed += credits;
2807 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2808 unsigned int sleeping)
2814 * process_responses - process responses from an SGE response queue
2815 * @adap: the adapter
2816 * @qs: the queue set to which the response queue belongs
2817 * @budget: how many responses can be processed in this round
2819 * Process responses from an SGE response queue up to the supplied budget.
2820 * Responses include received packets as well as credits and other events
2821 * for the queues that belong to the response queue's queue set.
2822 * A negative budget is effectively unlimited.
2824 * Additionally choose the interrupt holdoff time for the next interrupt
2825 * on this queue. If the system is under memory shortage use a fairly
2826 * long delay to help recovery.
2829 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2831 struct sge_rspq *rspq = &qs->rspq;
2832 struct rsp_desc *r = &rspq->desc[rspq->cidx];
2833 int budget_left = budget;
2834 unsigned int sleeping = 0;
2835 #if defined(INET6) || defined(INET)
2836 int lro_enabled = qs->lro.enabled;
2838 struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2840 struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
2842 static int last_holdoff = 0;
2843 if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2844 printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2845 last_holdoff = rspq->holdoff_tmr;
2848 rspq->next_holdoff = rspq->holdoff_tmr;
2850 while (__predict_true(budget_left && is_new_response(r, rspq))) {
2851 int eth, eop = 0, ethpad = 0;
2852 uint32_t flags = ntohl(r->flags);
2853 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2854 uint8_t opcode = r->rss_hdr.opcode;
2856 eth = (opcode == CPL_RX_PKT);
2858 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2862 printf("async notification\n");
2864 if (mh->mh_head == NULL) {
2865 mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA);
2868 m = m_gethdr(M_NOWAIT, MT_DATA);
2873 memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2874 m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2875 *mtod(m, char *) = CPL_ASYNC_NOTIF;
2876 opcode = CPL_ASYNC_NOTIF;
2878 rspq->async_notif++;
2880 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2881 struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
2885 rspq->next_holdoff = NOMEM_INTR_DELAY;
2889 if (mh->mh_head == NULL)
2892 mh->mh_tail->m_next = m;
2895 get_imm_packet(adap, r, m);
2896 mh->mh_head->m_pkthdr.len += m->m_len;
2899 } else if (r->len_cq) {
2900 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2902 eop = get_packet(adap, drop_thresh, qs, mh, r);
2904 if (r->rss_hdr.hash_type && !adap->timestamp) {
2905 M_HASHTYPE_SET(mh->mh_head, M_HASHTYPE_OPAQUE);
2906 mh->mh_head->m_pkthdr.flowid = rss_hash;
2915 if (flags & RSPD_CTRL_MASK) {
2916 sleeping |= flags & RSPD_GTS_MASK;
2917 handle_rsp_cntrl_info(qs, flags);
2921 rspq->offload_pkts++;
2923 adap->cpl_handler[opcode](qs, r, mh->mh_head);
2925 m_freem(mh->mh_head);
2928 } else if (eth && eop) {
2929 struct mbuf *m = mh->mh_head;
2931 t3_rx_eth(adap, m, ethpad);
2934 * The T304 sends incoming packets on any qset. If LRO
2935 * is also enabled, we could end up sending packet up
2936 * lro_ctrl->ifp's input. That is incorrect.
2938 * The mbuf's rcvif was derived from the cpl header and
2939 * is accurate. Skip LRO and just use that.
2941 #if defined(INET6) || defined(INET)
2942 skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2944 if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2945 && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2947 /* successfully queue'd for LRO */
2952 * LRO not enabled, packet unsuitable for LRO,
2953 * or unable to queue. Pass it up right now in
2956 struct ifnet *ifp = m->m_pkthdr.rcvif;
2957 (*ifp->if_input)(ifp, m);
2964 if (__predict_false(++rspq->cidx == rspq->size)) {
2970 if (++rspq->credits >= 64) {
2971 refill_rspq(adap, rspq, rspq->credits);
2974 __refill_fl_lt(adap, &qs->fl[0], 32);
2975 __refill_fl_lt(adap, &qs->fl[1], 32);
2979 #if defined(INET6) || defined(INET)
2981 while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
2982 struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
2983 SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
2984 tcp_lro_flush(lro_ctrl, queued);
2989 check_ring_db(adap, qs, sleeping);
2991 mb(); /* commit Tx queue processed updates */
2992 if (__predict_false(qs->txq_stopped > 1))
2995 __refill_fl_lt(adap, &qs->fl[0], 512);
2996 __refill_fl_lt(adap, &qs->fl[1], 512);
2997 budget -= budget_left;
3002 * A helper function that processes responses and issues GTS.
3005 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
3008 static int last_holdoff = 0;
3010 work = process_responses(adap, rspq_to_qset(rq), -1);
3012 if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
3013 printf("next_holdoff=%d\n", rq->next_holdoff);
3014 last_holdoff = rq->next_holdoff;
3016 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3017 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3024 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3025 * Handles data events from SGE response queues as well as error and other
3026 * async events as they all use the same interrupt pin. We use one SGE
3027 * response queue per port in this mode and protect all response queues with
3031 t3b_intr(void *data)
3034 adapter_t *adap = data;
3035 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3037 t3_write_reg(adap, A_PL_CLI, 0);
3038 map = t3_read_reg(adap, A_SG_DATA_INTR);
3043 if (__predict_false(map & F_ERRINTR)) {
3044 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3045 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3046 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3049 mtx_lock(&q0->lock);
3050 for_each_port(adap, i)
3052 process_responses_gts(adap, &adap->sge.qs[i].rspq);
3053 mtx_unlock(&q0->lock);
3057 * The MSI interrupt handler. This needs to handle data events from SGE
3058 * response queues as well as error and other async events as they all use
3059 * the same MSI vector. We use one SGE response queue per port in this mode
3060 * and protect all response queues with queue 0's lock.
3063 t3_intr_msi(void *data)
3065 adapter_t *adap = data;
3066 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3067 int i, new_packets = 0;
3069 mtx_lock(&q0->lock);
3071 for_each_port(adap, i)
3072 if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3074 mtx_unlock(&q0->lock);
3075 if (new_packets == 0) {
3076 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3077 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3078 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3083 t3_intr_msix(void *data)
3085 struct sge_qset *qs = data;
3086 adapter_t *adap = qs->port->adapter;
3087 struct sge_rspq *rspq = &qs->rspq;
3089 if (process_responses_gts(adap, rspq) == 0)
3090 rspq->unhandled_irqs++;
3093 #define QDUMP_SBUF_SIZE 32 * 400
3095 t3_dump_rspq(SYSCTL_HANDLER_ARGS)
3097 struct sge_rspq *rspq;
3098 struct sge_qset *qs;
3099 int i, err, dump_end, idx;
3101 struct rsp_desc *rspd;
3105 qs = rspq_to_qset(rspq);
3106 if (rspq->rspq_dump_count == 0)
3108 if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3110 "dump count is too large %d\n", rspq->rspq_dump_count);
3111 rspq->rspq_dump_count = 0;
3114 if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3116 "dump start of %d is greater than queue size\n",
3117 rspq->rspq_dump_start);
3118 rspq->rspq_dump_start = 0;
3121 err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3124 err = sysctl_wire_old_buffer(req, 0);
3127 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3129 sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3130 (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3131 ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3132 sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3133 ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3135 sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3136 (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3138 dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3139 for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3140 idx = i & (RSPQ_Q_SIZE-1);
3142 rspd = &rspq->desc[idx];
3143 sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3144 idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3145 rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3146 sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3147 rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3148 be32toh(rspd->len_cq), rspd->intr_gen);
3151 err = sbuf_finish(sb);
3157 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3159 struct sge_txq *txq;
3160 struct sge_qset *qs;
3161 int i, j, err, dump_end;
3163 struct tx_desc *txd;
3164 uint32_t *WR, wr_hi, wr_lo, gen;
3168 qs = txq_to_qset(txq, TXQ_ETH);
3169 if (txq->txq_dump_count == 0) {
3172 if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3174 "dump count is too large %d\n", txq->txq_dump_count);
3175 txq->txq_dump_count = 1;
3178 if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3180 "dump start of %d is greater than queue size\n",
3181 txq->txq_dump_start);
3182 txq->txq_dump_start = 0;
3185 err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3188 err = sysctl_wire_old_buffer(req, 0);
3191 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3193 sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3194 (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3195 (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3196 sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3197 ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3198 ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3199 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3200 txq->txq_dump_start,
3201 (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3203 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3204 for (i = txq->txq_dump_start; i < dump_end; i++) {
3205 txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3206 WR = (uint32_t *)txd->flit;
3207 wr_hi = ntohl(WR[0]);
3208 wr_lo = ntohl(WR[1]);
3209 gen = G_WR_GEN(wr_lo);
3211 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3213 for (j = 2; j < 30; j += 4)
3214 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3215 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3218 err = sbuf_finish(sb);
3224 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3226 struct sge_txq *txq;
3227 struct sge_qset *qs;
3228 int i, j, err, dump_end;
3230 struct tx_desc *txd;
3231 uint32_t *WR, wr_hi, wr_lo, gen;
3234 qs = txq_to_qset(txq, TXQ_CTRL);
3235 if (txq->txq_dump_count == 0) {
3238 if (txq->txq_dump_count > 256) {
3240 "dump count is too large %d\n", txq->txq_dump_count);
3241 txq->txq_dump_count = 1;
3244 if (txq->txq_dump_start > 255) {
3246 "dump start of %d is greater than queue size\n",
3247 txq->txq_dump_start);
3248 txq->txq_dump_start = 0;
3252 err = sysctl_wire_old_buffer(req, 0);
3255 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3256 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3257 txq->txq_dump_start,
3258 (txq->txq_dump_start + txq->txq_dump_count) & 255);
3260 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3261 for (i = txq->txq_dump_start; i < dump_end; i++) {
3262 txd = &txq->desc[i & (255)];
3263 WR = (uint32_t *)txd->flit;
3264 wr_hi = ntohl(WR[0]);
3265 wr_lo = ntohl(WR[1]);
3266 gen = G_WR_GEN(wr_lo);
3268 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3270 for (j = 2; j < 30; j += 4)
3271 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3272 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3275 err = sbuf_finish(sb);
3281 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3283 adapter_t *sc = arg1;
3284 struct qset_params *qsp = &sc->params.sge.qset[0];
3286 struct sge_qset *qs;
3287 int i, j, err, nqsets = 0;
3290 if ((sc->flags & FULL_INIT_DONE) == 0)
3293 coalesce_usecs = qsp->coalesce_usecs;
3294 err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3299 if (coalesce_usecs == qsp->coalesce_usecs)
3302 for (i = 0; i < sc->params.nports; i++)
3303 for (j = 0; j < sc->port[i].nqsets; j++)
3306 coalesce_usecs = max(1, coalesce_usecs);
3308 for (i = 0; i < nqsets; i++) {
3309 qs = &sc->sge.qs[i];
3310 qsp = &sc->params.sge.qset[i];
3311 qsp->coalesce_usecs = coalesce_usecs;
3313 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3314 &sc->sge.qs[0].rspq.lock;
3317 t3_update_qset_coalesce(qs, qsp);
3318 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3319 V_NEWTIMER(qs->rspq.holdoff_tmr));
3327 t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
3329 adapter_t *sc = arg1;
3332 if ((sc->flags & FULL_INIT_DONE) == 0)
3335 timestamp = sc->timestamp;
3336 rc = sysctl_handle_int(oidp, ×tamp, arg2, req);
3341 if (timestamp != sc->timestamp) {
3342 t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS,
3343 timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
3344 sc->timestamp = timestamp;
3351 t3_add_attach_sysctls(adapter_t *sc)
3353 struct sysctl_ctx_list *ctx;
3354 struct sysctl_oid_list *children;
3356 ctx = device_get_sysctl_ctx(sc->dev);
3357 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3359 /* random information */
3360 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3362 CTLFLAG_RD, sc->fw_version,
3363 0, "firmware version");
3364 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3366 CTLFLAG_RD, &sc->params.rev,
3368 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3370 CTLFLAG_RD, sc->port_types,
3371 0, "type of ports");
3372 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3374 CTLFLAG_RW, &cxgb_debug,
3375 0, "enable verbose debugging output");
3376 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
3377 CTLFLAG_RD, &sc->tunq_coalesce,
3378 "#tunneled packets freed");
3379 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3381 CTLFLAG_RD, &txq_fills,
3382 0, "#times txq overrun");
3383 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3385 CTLFLAG_RD, &sc->params.vpd.cclk,
3386 0, "core clock frequency (in KHz)");
3390 static const char *rspq_name = "rspq";
3391 static const char *txq_names[] =
3399 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3401 struct port_info *p = arg1;
3407 cxgb_refresh_stats(p);
3408 parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3410 return (sysctl_handle_64(oidp, parg, 0, req));
3414 t3_add_configured_sysctls(adapter_t *sc)
3416 struct sysctl_ctx_list *ctx;
3417 struct sysctl_oid_list *children;
3420 ctx = device_get_sysctl_ctx(sc->dev);
3421 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3423 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3425 CTLTYPE_INT|CTLFLAG_RW, sc,
3426 0, t3_set_coalesce_usecs,
3427 "I", "interrupt coalescing timer (us)");
3429 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3431 CTLTYPE_INT | CTLFLAG_RW, sc,
3432 0, t3_pkt_timestamp,
3433 "I", "provide packet timestamp instead of connection hash");
3435 for (i = 0; i < sc->params.nports; i++) {
3436 struct port_info *pi = &sc->port[i];
3437 struct sysctl_oid *poid;
3438 struct sysctl_oid_list *poidlist;
3439 struct mac_stats *mstats = &pi->mac.stats;
3441 snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3442 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3443 pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
3444 poidlist = SYSCTL_CHILDREN(poid);
3445 SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3446 "nqsets", CTLFLAG_RD, &pi->nqsets,
3449 for (j = 0; j < pi->nqsets; j++) {
3450 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3451 struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3452 *ctrlqpoid, *lropoid;
3453 struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3454 *txqpoidlist, *ctrlqpoidlist,
3456 struct sge_txq *txq = &qs->txq[TXQ_ETH];
3458 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3460 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3461 qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
3462 qspoidlist = SYSCTL_CHILDREN(qspoid);
3464 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3465 CTLFLAG_RD, &qs->fl[0].empty, 0,
3466 "freelist #0 empty");
3467 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3468 CTLFLAG_RD, &qs->fl[1].empty, 0,
3469 "freelist #1 empty");
3471 rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3472 rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
3473 rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3475 txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3476 txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3477 txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3479 ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3480 txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
3481 ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3483 lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3484 "lro_stats", CTLFLAG_RD, NULL, "LRO statistics");
3485 lropoidlist = SYSCTL_CHILDREN(lropoid);
3487 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3488 CTLFLAG_RD, &qs->rspq.size,
3489 0, "#entries in response queue");
3490 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3491 CTLFLAG_RD, &qs->rspq.cidx,
3492 0, "consumer index");
3493 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3494 CTLFLAG_RD, &qs->rspq.credits,
3496 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
3497 CTLFLAG_RD, &qs->rspq.starved,
3498 0, "#times starved");
3499 SYSCTL_ADD_UAUTO(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3500 CTLFLAG_RD, &qs->rspq.phys_addr,
3501 "physical_address_of the queue");
3502 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3503 CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3504 0, "start rspq dump entry");
3505 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3506 CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3507 0, "#rspq entries to dump");
3508 SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3509 CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
3510 0, t3_dump_rspq, "A", "dump of the response queue");
3512 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
3513 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3514 "#tunneled packets dropped");
3515 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3516 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len,
3517 0, "#tunneled packets waiting to be sent");
3519 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3520 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3521 0, "#tunneled packets queue producer index");
3522 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3523 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3524 0, "#tunneled packets queue consumer index");
3526 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
3527 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3528 0, "#tunneled packets processed by the card");
3529 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3530 CTLFLAG_RD, &txq->cleaned,
3531 0, "#tunneled packets cleaned");
3532 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3533 CTLFLAG_RD, &txq->in_use,
3534 0, "#tunneled packet slots in use");
3535 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "frees",
3536 CTLFLAG_RD, &txq->txq_frees,
3537 "#tunneled packets freed");
3538 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3539 CTLFLAG_RD, &txq->txq_skipped,
3540 0, "#tunneled packet descriptors skipped");
3541 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
3542 CTLFLAG_RD, &txq->txq_coalesced,
3543 "#tunneled packets coalesced");
3544 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3545 CTLFLAG_RD, &txq->txq_enqueued,
3546 0, "#tunneled packets enqueued to hardware");
3547 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3548 CTLFLAG_RD, &qs->txq_stopped,
3549 0, "tx queues stopped");
3550 SYSCTL_ADD_UAUTO(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3551 CTLFLAG_RD, &txq->phys_addr,
3552 "physical_address_of the queue");
3553 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3554 CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3555 0, "txq generation");
3556 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3557 CTLFLAG_RD, &txq->cidx,
3558 0, "hardware queue cidx");
3559 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3560 CTLFLAG_RD, &txq->pidx,
3561 0, "hardware queue pidx");
3562 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3563 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3564 0, "txq start idx for dump");
3565 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3566 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3567 0, "txq #entries to dump");
3568 SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3569 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3570 0, t3_dump_txq_eth, "A", "dump of the transmit queue");
3572 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3573 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3574 0, "ctrlq start idx for dump");
3575 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3576 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3577 0, "ctrl #entries to dump");
3578 SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3579 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
3580 0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
3582 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_queued",
3583 CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3584 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3585 CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3586 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3587 CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3588 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3589 CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3592 /* Now add a node for mac stats. */
3593 poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3594 CTLFLAG_RD, NULL, "MAC statistics");
3595 poidlist = SYSCTL_CHILDREN(poid);
3598 * We (ab)use the length argument (arg2) to pass on the offset
3599 * of the data that we are interested in. This is only required
3600 * for the quad counters that are updated from the hardware (we
3601 * make sure that we return the latest value).
3602 * sysctl_handle_macstat first updates *all* the counters from
3603 * the hardware, and then returns the latest value of the
3604 * requested counter. Best would be to update only the
3605 * requested counter from hardware, but t3_mac_update_stats()
3606 * hides all the register details and we don't want to dive into
3609 #define CXGB_SYSCTL_ADD_QUAD(a) SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3610 (CTLTYPE_U64 | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \
3611 sysctl_handle_macstat, "QU", 0)
3612 CXGB_SYSCTL_ADD_QUAD(tx_octets);
3613 CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3614 CXGB_SYSCTL_ADD_QUAD(tx_frames);
3615 CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3616 CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3617 CXGB_SYSCTL_ADD_QUAD(tx_pause);
3618 CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3619 CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3620 CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3621 CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3622 CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3623 CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3624 CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3625 CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3626 CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3627 CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3628 CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3629 CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3630 CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3631 CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3632 CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3633 CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3634 CXGB_SYSCTL_ADD_QUAD(rx_octets);
3635 CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3636 CXGB_SYSCTL_ADD_QUAD(rx_frames);
3637 CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3638 CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3639 CXGB_SYSCTL_ADD_QUAD(rx_pause);
3640 CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3641 CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3642 CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3643 CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3644 CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3645 CXGB_SYSCTL_ADD_QUAD(rx_runt);
3646 CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3647 CXGB_SYSCTL_ADD_QUAD(rx_short);
3648 CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3649 CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3650 CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3651 CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3652 CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3653 CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3654 CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3655 CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3656 CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3657 CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3658 #undef CXGB_SYSCTL_ADD_QUAD
3660 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3661 CTLFLAG_RD, &mstats->a, 0)
3662 CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3663 CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3664 CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3665 CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3666 CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3667 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3668 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3669 CXGB_SYSCTL_ADD_ULONG(num_toggled);
3670 CXGB_SYSCTL_ADD_ULONG(num_resets);
3671 CXGB_SYSCTL_ADD_ULONG(link_faults);
3672 #undef CXGB_SYSCTL_ADD_ULONG
3677 * t3_get_desc - dump an SGE descriptor for debugging purposes
3678 * @qs: the queue set
3679 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3680 * @idx: the descriptor index in the queue
3681 * @data: where to dump the descriptor contents
3683 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3684 * size of the descriptor.
3687 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3688 unsigned char *data)
3694 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3696 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3697 return sizeof(struct tx_desc);
3701 if (!qs->rspq.desc || idx >= qs->rspq.size)
3703 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3704 return sizeof(struct rsp_desc);
3708 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3710 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3711 return sizeof(struct rx_desc);