1 /**************************************************************************
3 Copyright (c) 2007-2009, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <sys/bus_dma.h>
46 #include <sys/queue.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
52 #include <sys/sched.h>
54 #include <sys/systm.h>
55 #include <sys/syslog.h>
56 #include <sys/socket.h>
57 #include <sys/sglist.h>
60 #include <net/if_var.h>
62 #include <net/ethernet.h>
63 #include <net/if_vlan_var.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip6.h>
69 #include <netinet/tcp.h>
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
77 #include <cxgb_include.h>
81 int multiq_tx_enable = 1;
84 CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
87 extern struct sysctl_oid_list sysctl__hw_cxgb_children;
88 int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
89 TUNABLE_INT("hw.cxgb.txq_mr_size", &cxgb_txq_buf_ring_size);
90 SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
91 "size of per-queue mbuf ring");
93 static int cxgb_tx_coalesce_force = 0;
94 TUNABLE_INT("hw.cxgb.tx_coalesce_force", &cxgb_tx_coalesce_force);
95 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RW,
96 &cxgb_tx_coalesce_force, 0,
97 "coalesce small packets into a single work request regardless of ring state");
99 #define COALESCE_START_DEFAULT TX_ETH_Q_SIZE>>1
100 #define COALESCE_START_MAX (TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
101 #define COALESCE_STOP_DEFAULT TX_ETH_Q_SIZE>>2
102 #define COALESCE_STOP_MIN TX_ETH_Q_SIZE>>5
103 #define TX_RECLAIM_DEFAULT TX_ETH_Q_SIZE>>5
104 #define TX_RECLAIM_MAX TX_ETH_Q_SIZE>>2
105 #define TX_RECLAIM_MIN TX_ETH_Q_SIZE>>6
108 static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
109 TUNABLE_INT("hw.cxgb.tx_coalesce_enable_start",
110 &cxgb_tx_coalesce_enable_start);
111 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RW,
112 &cxgb_tx_coalesce_enable_start, 0,
113 "coalesce enable threshold");
114 static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
115 TUNABLE_INT("hw.cxgb.tx_coalesce_enable_stop", &cxgb_tx_coalesce_enable_stop);
116 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RW,
117 &cxgb_tx_coalesce_enable_stop, 0,
118 "coalesce disable threshold");
119 static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
120 TUNABLE_INT("hw.cxgb.tx_reclaim_threshold", &cxgb_tx_reclaim_threshold);
121 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RW,
122 &cxgb_tx_reclaim_threshold, 0,
123 "tx cleaning minimum threshold");
126 * XXX don't re-enable this until TOE stops assuming
129 static int recycle_enable = 0;
131 extern int cxgb_use_16k_clusters;
132 extern int nmbjumbop;
133 extern int nmbjumbo9;
134 extern int nmbjumbo16;
138 #define SGE_RX_SM_BUF_SIZE 1536
139 #define SGE_RX_DROP_THRES 16
140 #define SGE_RX_COPY_THRES 128
143 * Period of the Tx buffer reclaim timer. This timer does not need to run
144 * frequently as Tx buffers are usually reclaimed by new Tx packets.
146 #define TX_RECLAIM_PERIOD (hz >> 1)
149 * Values for sge_txq.flags
152 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
153 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
157 uint64_t flit[TX_DESC_FLITS];
167 struct rsp_desc { /* response queue descriptor */
168 struct rss_header rss_hdr;
171 uint8_t imm_data[47];
175 #define RX_SW_DESC_MAP_CREATED (1 << 0)
176 #define TX_SW_DESC_MAP_CREATED (1 << 1)
177 #define RX_SW_DESC_INUSE (1 << 3)
178 #define TX_SW_DESC_MAPPED (1 << 4)
180 #define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0)
181 #define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP)
182 #define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP)
183 #define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
185 struct tx_sw_desc { /* SW state per Tx descriptor */
191 struct rx_sw_desc { /* SW state per Rx descriptor */
204 struct refill_fl_cb_arg {
206 bus_dma_segment_t seg;
212 * Maps a number of flits to the number of Tx descriptors that can hold them.
215 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
217 * HW allows up to 4 descriptors to be combined into a WR.
219 static uint8_t flit_desc_map[] = {
221 #if SGE_NUM_GENBITS == 1
222 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
223 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
224 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
225 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
226 #elif SGE_NUM_GENBITS == 2
227 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
228 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
229 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
230 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
232 # error "SGE_NUM_GENBITS must be 1 or 2"
236 #define TXQ_LOCK_ASSERT(qs) mtx_assert(&(qs)->lock, MA_OWNED)
237 #define TXQ_TRYLOCK(qs) mtx_trylock(&(qs)->lock)
238 #define TXQ_LOCK(qs) mtx_lock(&(qs)->lock)
239 #define TXQ_UNLOCK(qs) mtx_unlock(&(qs)->lock)
240 #define TXQ_RING_EMPTY(qs) drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
241 #define TXQ_RING_NEEDS_ENQUEUE(qs) \
242 drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
243 #define TXQ_RING_FLUSH(qs) drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
244 #define TXQ_RING_DEQUEUE_COND(qs, func, arg) \
245 drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
246 #define TXQ_RING_DEQUEUE(qs) \
247 drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
251 static void sge_timer_cb(void *arg);
252 static void sge_timer_reclaim(void *arg, int ncount);
253 static void sge_txq_reclaim_handler(void *arg, int ncount);
254 static void cxgb_start_locked(struct sge_qset *qs);
257 * XXX need to cope with bursty scheduling by looking at a wider
258 * window than we are now for determining the need for coalescing
261 static __inline uint64_t
262 check_pkt_coalesce(struct sge_qset *qs)
268 if (__predict_false(cxgb_tx_coalesce_force))
270 txq = &qs->txq[TXQ_ETH];
271 sc = qs->port->adapter;
272 fill = &sc->tunq_fill[qs->idx];
274 if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
275 cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
276 if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
277 cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
279 * if the hardware transmit queue is more than 1/8 full
280 * we mark it as coalescing - we drop back from coalescing
281 * when we go below 1/32 full and there are no packets enqueued,
282 * this provides us with some degree of hysteresis
284 if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
285 TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
287 else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
290 return (sc->tunq_coalesce);
295 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
298 #if _BYTE_ORDER == _LITTLE_ENDIAN
300 wr_hilo |= (((uint64_t)wr_lo)<<32);
303 wr_hilo |= (((uint64_t)wr_hi)<<32);
305 wrp->wrh_hilo = wr_hilo;
309 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
318 struct coalesce_info {
324 coalesce_check(struct mbuf *m, void *arg)
326 struct coalesce_info *ci = arg;
327 int *count = &ci->count;
328 int *nbytes = &ci->nbytes;
330 if ((*nbytes == 0) || ((*nbytes + m->m_len <= 10500) &&
331 (*count < 7) && (m->m_next == NULL))) {
340 cxgb_dequeue(struct sge_qset *qs)
342 struct mbuf *m, *m_head, *m_tail;
343 struct coalesce_info ci;
346 if (check_pkt_coalesce(qs) == 0)
347 return TXQ_RING_DEQUEUE(qs);
349 m_head = m_tail = NULL;
350 ci.count = ci.nbytes = 0;
352 m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
353 if (m_head == NULL) {
355 } else if (m != NULL) {
356 m_tail->m_nextpkt = m;
361 panic("trying to coalesce %d packets in to one WR", ci.count);
366 * reclaim_completed_tx - reclaims completed Tx descriptors
367 * @adapter: the adapter
368 * @q: the Tx queue to reclaim completed descriptors from
370 * Reclaims Tx descriptors that the SGE has indicated it has processed,
371 * and frees the associated buffers if possible. Called with the Tx
375 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
377 struct sge_txq *q = &qs->txq[queue];
378 int reclaim = desc_reclaimable(q);
380 if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) ||
381 (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN))
382 cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
384 if (reclaim < reclaim_min)
387 mtx_assert(&qs->lock, MA_OWNED);
389 t3_free_tx_desc(qs, reclaim, queue);
390 q->cleaned += reclaim;
391 q->in_use -= reclaim;
393 if (isset(&qs->txq_stopped, TXQ_ETH))
394 clrbit(&qs->txq_stopped, TXQ_ETH);
400 * should_restart_tx - are there enough resources to restart a Tx queue?
403 * Checks if there are enough descriptors to restart a suspended Tx queue.
406 should_restart_tx(const struct sge_txq *q)
408 unsigned int r = q->processed - q->cleaned;
410 return q->in_use - r < (q->size >> 1);
414 * t3_sge_init - initialize SGE
416 * @p: the SGE parameters
418 * Performs SGE initialization needed every time after a chip reset.
419 * We do not initialize any of the queue sets here, instead the driver
420 * top-level must request those individually. We also do not enable DMA
421 * here, that should be done after the queues have been set up.
424 t3_sge_init(adapter_t *adap, struct sge_params *p)
428 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
430 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
431 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
432 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
433 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
434 #if SGE_NUM_GENBITS == 1
435 ctrl |= F_EGRGENCTRL;
437 if (adap->params.rev > 0) {
438 if (!(adap->flags & (USING_MSIX | USING_MSI)))
439 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
441 t3_write_reg(adap, A_SG_CONTROL, ctrl);
442 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
443 V_LORCQDRBTHRSH(512));
444 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
445 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
446 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
447 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
448 adap->params.rev < T3_REV_C ? 1000 : 500);
449 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
450 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
451 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
452 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
453 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
458 * sgl_len - calculates the size of an SGL of the given capacity
459 * @n: the number of SGL entries
461 * Calculates the number of flits needed for a scatter/gather list that
462 * can hold the given number of entries.
464 static __inline unsigned int
465 sgl_len(unsigned int n)
467 return ((3 * n) / 2 + (n & 1));
471 * get_imm_packet - return the next ingress packet buffer from a response
472 * @resp: the response descriptor containing the packet data
474 * Return a packet containing the immediate data of the given response.
477 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
480 if (resp->rss_hdr.opcode == CPL_RX_DATA) {
481 const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
482 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
483 } else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
484 const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
485 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
487 m->m_len = IMMED_PKT_SIZE;
488 m->m_ext.ext_buf = NULL;
489 m->m_ext.ext_type = 0;
490 memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len);
494 static __inline u_int
495 flits_to_desc(u_int n)
497 return (flit_desc_map[n]);
500 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
501 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
502 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
503 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
505 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
506 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
510 * t3_sge_err_intr_handler - SGE async event interrupt handler
511 * @adapter: the adapter
513 * Interrupt handler for SGE asynchronous (non-data) events.
516 t3_sge_err_intr_handler(adapter_t *adapter)
518 unsigned int v, status;
520 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
521 if (status & SGE_PARERR)
522 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
523 status & SGE_PARERR);
524 if (status & SGE_FRAMINGERR)
525 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
526 status & SGE_FRAMINGERR);
527 if (status & F_RSPQCREDITOVERFOW)
528 CH_ALERT(adapter, "SGE response queue credit overflow\n");
530 if (status & F_RSPQDISABLED) {
531 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
534 "packet delivered to disabled response queue (0x%x)\n",
535 (v >> S_RSPQ0DISABLED) & 0xff);
538 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
539 if (status & SGE_FATALERR)
540 t3_fatal_err(adapter);
544 t3_sge_prep(adapter_t *adap, struct sge_params *p)
546 int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
548 nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
549 nqsets *= adap->params.nports;
551 fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
553 while (!powerof2(fl_q_size))
556 use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
559 #if __FreeBSD_version >= 700111
561 jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
562 jumbo_buf_size = MJUM16BYTES;
564 jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
565 jumbo_buf_size = MJUM9BYTES;
568 jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
569 jumbo_buf_size = MJUMPAGESIZE;
571 while (!powerof2(jumbo_q_size))
574 if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
575 device_printf(adap->dev,
576 "Insufficient clusters and/or jumbo buffers.\n");
578 p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
580 for (i = 0; i < SGE_QSETS; ++i) {
581 struct qset_params *q = p->qset + i;
583 if (adap->params.nports > 2) {
584 q->coalesce_usecs = 50;
587 q->coalesce_usecs = 10;
589 q->coalesce_usecs = 5;
593 q->rspq_size = RSPQ_Q_SIZE;
594 q->fl_size = fl_q_size;
595 q->jumbo_size = jumbo_q_size;
596 q->jumbo_buf_size = jumbo_buf_size;
597 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
598 q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
599 q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
605 t3_sge_alloc(adapter_t *sc)
608 /* The parent tag. */
609 if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
610 1, 0, /* algnmnt, boundary */
611 BUS_SPACE_MAXADDR, /* lowaddr */
612 BUS_SPACE_MAXADDR, /* highaddr */
613 NULL, NULL, /* filter, filterarg */
614 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
615 BUS_SPACE_UNRESTRICTED, /* nsegments */
616 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
618 NULL, NULL, /* lock, lockarg */
620 device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
625 * DMA tag for normal sized RX frames
627 if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
628 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
629 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
630 device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
635 * DMA tag for jumbo sized RX frames.
637 if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
638 BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
639 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
640 device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
645 * DMA tag for TX frames.
647 if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
648 BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
649 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
650 NULL, NULL, &sc->tx_dmat)) {
651 device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
659 t3_sge_free(struct adapter * sc)
662 if (sc->tx_dmat != NULL)
663 bus_dma_tag_destroy(sc->tx_dmat);
665 if (sc->rx_jumbo_dmat != NULL)
666 bus_dma_tag_destroy(sc->rx_jumbo_dmat);
668 if (sc->rx_dmat != NULL)
669 bus_dma_tag_destroy(sc->rx_dmat);
671 if (sc->parent_dmat != NULL)
672 bus_dma_tag_destroy(sc->parent_dmat);
678 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
681 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
682 qs->rspq.polling = 0 /* p->polling */;
685 #if !defined(__i386__) && !defined(__amd64__)
687 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
689 struct refill_fl_cb_arg *cb_arg = arg;
691 cb_arg->error = error;
692 cb_arg->seg = segs[0];
698 * refill_fl - refill an SGE free-buffer list
699 * @sc: the controller softc
700 * @q: the free-list to refill
701 * @n: the number of new buffers to allocate
703 * (Re)populate an SGE free-buffer list with up to @n new packet buffers.
704 * The caller must assure that @n does not exceed the queue's capacity.
707 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
709 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
710 struct rx_desc *d = &q->desc[q->pidx];
711 struct refill_fl_cb_arg cb_arg;
719 * We allocate an uninitialized mbuf + cluster, mbuf is
720 * initialized after rx.
722 if (q->zone == zone_pack) {
723 if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
725 cl = m->m_ext.ext_buf;
727 if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
729 if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
730 uma_zfree(q->zone, cl);
734 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
735 if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
736 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
737 uma_zfree(q->zone, cl);
740 sd->flags |= RX_SW_DESC_MAP_CREATED;
742 #if !defined(__i386__) && !defined(__amd64__)
743 err = bus_dmamap_load(q->entry_tag, sd->map,
744 cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
746 if (err != 0 || cb_arg.error) {
747 if (q->zone == zone_pack)
748 uma_zfree(q->zone, cl);
753 cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
755 sd->flags |= RX_SW_DESC_INUSE;
758 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
759 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
760 d->len_gen = htobe32(V_FLD_GEN1(q->gen));
761 d->gen2 = htobe32(V_FLD_GEN2(q->gen));
766 if (++q->pidx == q->size) {
777 if (q->db_pending >= 32) {
779 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
785 * free_rx_bufs - free the Rx buffers on an SGE free list
786 * @sc: the controle softc
787 * @q: the SGE free list to clean up
789 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
790 * this queue should be stopped before calling this function.
793 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
795 u_int cidx = q->cidx;
797 while (q->credits--) {
798 struct rx_sw_desc *d = &q->sdesc[cidx];
800 if (d->flags & RX_SW_DESC_INUSE) {
801 bus_dmamap_unload(q->entry_tag, d->map);
802 bus_dmamap_destroy(q->entry_tag, d->map);
803 if (q->zone == zone_pack) {
804 m_init(d->m, zone_pack, MCLBYTES,
805 M_NOWAIT, MT_DATA, M_EXT);
806 uma_zfree(zone_pack, d->m);
808 m_init(d->m, zone_mbuf, MLEN,
809 M_NOWAIT, MT_DATA, 0);
810 uma_zfree(zone_mbuf, d->m);
811 uma_zfree(q->zone, d->rxsd_cl);
817 if (++cidx == q->size)
823 __refill_fl(adapter_t *adap, struct sge_fl *fl)
825 refill_fl(adap, fl, min(16U, fl->size - fl->credits));
829 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
831 uint32_t reclaimable = fl->size - fl->credits;
834 refill_fl(adap, fl, min(max, reclaimable));
838 * recycle_rx_buf - recycle a receive buffer
839 * @adapter: the adapter
840 * @q: the SGE free list
841 * @idx: index of buffer to recycle
843 * Recycles the specified buffer on the given free list by adding it at
844 * the next available slot on the list.
847 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
849 struct rx_desc *from = &q->desc[idx];
850 struct rx_desc *to = &q->desc[q->pidx];
852 q->sdesc[q->pidx] = q->sdesc[idx];
853 to->addr_lo = from->addr_lo; // already big endian
854 to->addr_hi = from->addr_hi; // likewise
855 wmb(); /* necessary ? */
856 to->len_gen = htobe32(V_FLD_GEN1(q->gen));
857 to->gen2 = htobe32(V_FLD_GEN2(q->gen));
860 if (++q->pidx == q->size) {
864 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
868 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
873 *addr = segs[0].ds_addr;
877 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
878 bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
879 bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
881 size_t len = nelem * elem_size;
886 if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
887 BUS_SPACE_MAXADDR_32BIT,
888 BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
889 len, 0, NULL, NULL, tag)) != 0) {
890 device_printf(sc->dev, "Cannot allocate descriptor tag\n");
894 if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
896 device_printf(sc->dev, "Cannot allocate descriptor memory\n");
900 bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
905 len = nelem * sw_size;
906 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
909 if (parent_entry_tag == NULL)
912 if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
913 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
914 NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
915 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
916 NULL, NULL, entry_tag)) != 0) {
917 device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
924 sge_slow_intr_handler(void *arg, int ncount)
928 t3_slow_intr_handler(sc);
929 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
930 (void) t3_read_reg(sc, A_PL_INT_ENABLE0);
934 * sge_timer_cb - perform periodic maintenance of an SGE qset
935 * @data: the SGE queue set to maintain
937 * Runs periodically from a timer to perform maintenance of an SGE queue
938 * set. It performs two tasks:
940 * a) Cleans up any completed Tx descriptors that may still be pending.
941 * Normal descriptor cleanup happens when new packets are added to a Tx
942 * queue so this timer is relatively infrequent and does any cleanup only
943 * if the Tx queue has not seen any new packets in a while. We make a
944 * best effort attempt to reclaim descriptors, in that we don't wait
945 * around if we cannot get a queue's lock (which most likely is because
946 * someone else is queueing new packets and so will also handle the clean
947 * up). Since control queues use immediate data exclusively we don't
948 * bother cleaning them up here.
950 * b) Replenishes Rx queues that have run out due to memory shortage.
951 * Normally new Rx buffers are added when existing ones are consumed but
952 * when out of memory a queue can become empty. We try to add only a few
953 * buffers here, the queue will be replenished fully as these new buffers
954 * are used up if memory shortage has subsided.
956 * c) Return coalesced response queue credits in case a response queue is
959 * d) Ring doorbells for T304 tunnel queues since we have seen doorbell
960 * fifo overflows and the FW doesn't implement any recovery scheme yet.
963 sge_timer_cb(void *arg)
966 if ((sc->flags & USING_MSIX) == 0) {
968 struct port_info *pi;
972 int reclaim_ofl, refill_rx;
974 if (sc->open_device_map == 0)
977 for (i = 0; i < sc->params.nports; i++) {
979 for (j = 0; j < pi->nqsets; j++) {
980 qs = &sc->sge.qs[pi->first_qset + j];
982 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
983 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
984 (qs->fl[1].credits < qs->fl[1].size));
985 if (reclaim_ofl || refill_rx) {
986 taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
993 if (sc->params.nports > 2) {
996 for_each_port(sc, i) {
997 struct port_info *pi = &sc->port[i];
999 t3_write_reg(sc, A_SG_KDOORBELL,
1001 (FW_TUNNEL_SGEEC_START + pi->first_qset));
1004 if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
1005 sc->open_device_map != 0)
1006 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1010 * This is meant to be a catch-all function to keep sge state private
1015 t3_sge_init_adapter(adapter_t *sc)
1017 callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE);
1018 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1019 TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
1024 t3_sge_reset_adapter(adapter_t *sc)
1026 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1031 t3_sge_init_port(struct port_info *pi)
1033 TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
1038 * refill_rspq - replenish an SGE response queue
1039 * @adapter: the adapter
1040 * @q: the response queue to replenish
1041 * @credits: how many new responses to make available
1043 * Replenishes a response queue by making the supplied number of responses
1046 static __inline void
1047 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1050 /* mbufs are allocated on demand when a rspq entry is processed. */
1051 t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
1052 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1056 sge_txq_reclaim_handler(void *arg, int ncount)
1058 struct sge_qset *qs = arg;
1061 for (i = 0; i < 3; i++)
1062 reclaim_completed_tx(qs, 16, i);
1066 sge_timer_reclaim(void *arg, int ncount)
1068 struct port_info *pi = arg;
1069 int i, nqsets = pi->nqsets;
1070 adapter_t *sc = pi->adapter;
1071 struct sge_qset *qs;
1074 KASSERT((sc->flags & USING_MSIX) == 0,
1075 ("can't call timer reclaim for msi-x"));
1077 for (i = 0; i < nqsets; i++) {
1078 qs = &sc->sge.qs[pi->first_qset + i];
1080 reclaim_completed_tx(qs, 16, TXQ_OFLD);
1081 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1082 &sc->sge.qs[0].rspq.lock;
1084 if (mtx_trylock(lock)) {
1085 /* XXX currently assume that we are *NOT* polling */
1086 uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1088 if (qs->fl[0].credits < qs->fl[0].size - 16)
1089 __refill_fl(sc, &qs->fl[0]);
1090 if (qs->fl[1].credits < qs->fl[1].size - 16)
1091 __refill_fl(sc, &qs->fl[1]);
1093 if (status & (1 << qs->rspq.cntxt_id)) {
1094 if (qs->rspq.credits) {
1095 refill_rspq(sc, &qs->rspq, 1);
1097 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
1098 1 << qs->rspq.cntxt_id);
1107 * init_qset_cntxt - initialize an SGE queue set context info
1108 * @qs: the queue set
1109 * @id: the queue set id
1111 * Initializes the TIDs and context ids for the queues of a queue set.
1114 init_qset_cntxt(struct sge_qset *qs, u_int id)
1117 qs->rspq.cntxt_id = id;
1118 qs->fl[0].cntxt_id = 2 * id;
1119 qs->fl[1].cntxt_id = 2 * id + 1;
1120 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1121 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1122 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1123 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1124 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1126 mbufq_init(&qs->txq[TXQ_ETH].sendq);
1127 mbufq_init(&qs->txq[TXQ_OFLD].sendq);
1128 mbufq_init(&qs->txq[TXQ_CTRL].sendq);
1133 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1135 txq->in_use += ndesc;
1137 * XXX we don't handle stopping of queue
1138 * presumably start handles this when we bump against the end
1140 txqs->gen = txq->gen;
1141 txq->unacked += ndesc;
1142 txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1144 txqs->pidx = txq->pidx;
1147 if (((txqs->pidx > txq->cidx) &&
1148 (txq->pidx < txqs->pidx) &&
1149 (txq->pidx >= txq->cidx)) ||
1150 ((txqs->pidx < txq->cidx) &&
1151 (txq->pidx >= txq-> cidx)) ||
1152 ((txqs->pidx < txq->cidx) &&
1153 (txq->cidx < txqs->pidx)))
1154 panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1155 txqs->pidx, txq->pidx, txq->cidx);
1157 if (txq->pidx >= txq->size) {
1158 txq->pidx -= txq->size;
1165 * calc_tx_descs - calculate the number of Tx descriptors for a packet
1166 * @m: the packet mbufs
1167 * @nsegs: the number of segments
1169 * Returns the number of Tx descriptors needed for the given Ethernet
1170 * packet. Ethernet packets require addition of WR and CPL headers.
1172 static __inline unsigned int
1173 calc_tx_descs(const struct mbuf *m, int nsegs)
1177 if (m->m_pkthdr.len <= PIO_LEN)
1180 flits = sgl_len(nsegs) + 2;
1181 if (m->m_pkthdr.csum_flags & CSUM_TSO)
1184 return flits_to_desc(flits);
1188 * make_sgl - populate a scatter/gather list for a packet
1189 * @sgp: the SGL to populate
1190 * @segs: the packet dma segments
1191 * @nsegs: the number of segments
1193 * Generates a scatter/gather list for the buffers that make up a packet
1194 * and returns the SGL size in 8-byte words. The caller must size the SGL
1197 static __inline void
1198 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1202 for (idx = 0, i = 0; i < nsegs; i++) {
1204 * firmware doesn't like empty segments
1206 if (segs[i].ds_len == 0)
1211 sgp->len[idx] = htobe32(segs[i].ds_len);
1212 sgp->addr[idx] = htobe64(segs[i].ds_addr);
1223 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1224 * @adap: the adapter
1227 * Ring the doorbell if a Tx queue is asleep. There is a natural race,
1228 * where the HW is going to sleep just after we checked, however,
1229 * then the interrupt handler will detect the outstanding TX packet
1230 * and ring the doorbell for us.
1232 * When GTS is disabled we unconditionally ring the doorbell.
1234 static __inline void
1235 check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1238 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1239 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1240 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1242 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1245 t3_write_reg(adap, A_SG_KDOORBELL,
1246 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1249 if (mustring || ++q->db_pending >= 32) {
1250 wmb(); /* write descriptors before telling HW */
1251 t3_write_reg(adap, A_SG_KDOORBELL,
1252 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1258 static __inline void
1259 wr_gen2(struct tx_desc *d, unsigned int gen)
1261 #if SGE_NUM_GENBITS == 2
1262 d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1267 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1268 * @ndesc: number of Tx descriptors spanned by the SGL
1269 * @txd: first Tx descriptor to be written
1270 * @txqs: txq state (generation and producer index)
1271 * @txq: the SGE Tx queue
1273 * @flits: number of flits to the start of the SGL in the first descriptor
1274 * @sgl_flits: the SGL size in flits
1275 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1276 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1278 * Write a work request header and an associated SGL. If the SGL is
1279 * small enough to fit into one Tx descriptor it has already been written
1280 * and we just need to write the WR header. Otherwise we distribute the
1281 * SGL across the number of descriptors it spans.
1284 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1285 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1286 unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1289 struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1290 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1292 if (__predict_true(ndesc == 1)) {
1293 set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1294 V_WR_SGLSFLT(flits)) | wr_hi,
1295 htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
1298 wr_gen2(txd, txqs->gen);
1301 unsigned int ogen = txqs->gen;
1302 const uint64_t *fp = (const uint64_t *)sgl;
1303 struct work_request_hdr *wp = wrp;
1305 wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1306 V_WR_SGLSFLT(flits)) | wr_hi;
1309 unsigned int avail = WR_FLITS - flits;
1311 if (avail > sgl_flits)
1313 memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1322 if (++txqs->pidx == txq->size) {
1330 * when the head of the mbuf chain
1331 * is freed all clusters will be freed
1334 wrp = (struct work_request_hdr *)txd;
1335 wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
1336 V_WR_SGLSFLT(1)) | wr_hi;
1337 wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
1339 V_WR_GEN(txqs->gen)) | wr_lo;
1340 wr_gen2(txd, txqs->gen);
1343 wrp->wrh_hi |= htonl(F_WR_EOP);
1345 wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1346 wr_gen2((struct tx_desc *)wp, ogen);
1350 /* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
1351 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1353 #define GET_VTAG(cntrl, m) \
1355 if ((m)->m_flags & M_VLANTAG) \
1356 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1360 t3_encap(struct sge_qset *qs, struct mbuf **m)
1364 struct sge_txq *txq;
1365 struct txq_state txqs;
1366 struct port_info *pi;
1367 unsigned int ndesc, flits, cntrl, mlen;
1368 int err, nsegs, tso_info = 0;
1370 struct work_request_hdr *wrp;
1371 struct tx_sw_desc *txsd;
1372 struct sg_ent *sgp, *sgl;
1373 uint32_t wr_hi, wr_lo, sgl_flits;
1374 bus_dma_segment_t segs[TX_MAX_SEGS];
1376 struct tx_desc *txd;
1380 txq = &qs->txq[TXQ_ETH];
1381 txd = &txq->desc[txq->pidx];
1382 txsd = &txq->sdesc[txq->pidx];
1388 mtx_assert(&qs->lock, MA_OWNED);
1389 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1390 KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
1392 if (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1393 m0->m_pkthdr.csum_flags & (CSUM_TSO))
1394 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1396 if (m0->m_nextpkt != NULL) {
1397 busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1401 if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1402 &m0, segs, &nsegs))) {
1404 printf("failed ... err=%d\n", err);
1407 mlen = m0->m_pkthdr.len;
1408 ndesc = calc_tx_descs(m0, nsegs);
1410 txq_prod(txq, ndesc, &txqs);
1412 KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
1415 if (m0->m_nextpkt != NULL) {
1416 struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1420 panic("trying to coalesce %d packets in to one WR", nsegs);
1421 txq->txq_coalesced += nsegs;
1422 wrp = (struct work_request_hdr *)txd;
1423 flits = nsegs*2 + 1;
1425 for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1426 struct cpl_tx_pkt_batch_entry *cbe;
1428 uint32_t *hflit = (uint32_t *)&flit;
1429 int cflags = m0->m_pkthdr.csum_flags;
1431 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1432 GET_VTAG(cntrl, m0);
1433 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1434 if (__predict_false(!(cflags & CSUM_IP)))
1435 cntrl |= F_TXPKT_IPCSUM_DIS;
1436 if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP |
1437 CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1438 cntrl |= F_TXPKT_L4CSUM_DIS;
1440 hflit[0] = htonl(cntrl);
1441 hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1442 flit |= htobe64(1 << 24);
1443 cbe = &cpl_batch->pkt_entry[i];
1444 cbe->cntrl = hflit[0];
1445 cbe->len = hflit[1];
1446 cbe->addr = htobe64(segs[i].ds_addr);
1449 wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1450 V_WR_SGLSFLT(flits)) |
1451 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1452 wr_lo = htonl(V_WR_LEN(flits) |
1453 V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1454 set_wr_hdr(wrp, wr_hi, wr_lo);
1456 ETHER_BPF_MTAP(pi->ifp, m0);
1457 wr_gen2(txd, txqs.gen);
1458 check_ring_tx_db(sc, txq, 0);
1460 } else if (tso_info) {
1462 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1463 struct ether_header *eh;
1468 GET_VTAG(cntrl, m0);
1469 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1470 hdr->cntrl = htonl(cntrl);
1471 hdr->len = htonl(mlen | 0x80000000);
1473 if (__predict_false(mlen < TCPPKTHDRSIZE)) {
1474 printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x",
1475 m0, mlen, m0->m_pkthdr.tso_segsz,
1476 (int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags);
1477 panic("tx tso packet too small");
1480 /* Make sure that ether, ip, tcp headers are all in m0 */
1481 if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1482 m0 = m_pullup(m0, TCPPKTHDRSIZE);
1483 if (__predict_false(m0 == NULL)) {
1484 /* XXX panic probably an overreaction */
1485 panic("couldn't fit header into mbuf");
1489 eh = mtod(m0, struct ether_header *);
1490 eth_type = eh->ether_type;
1491 if (eth_type == htons(ETHERTYPE_VLAN)) {
1492 struct ether_vlan_header *evh = (void *)eh;
1494 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
1496 eth_type = evh->evl_proto;
1498 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
1502 if (eth_type == htons(ETHERTYPE_IP)) {
1503 struct ip *ip = l3hdr;
1505 tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
1506 tcp = (struct tcphdr *)(ip + 1);
1507 } else if (eth_type == htons(ETHERTYPE_IPV6)) {
1508 struct ip6_hdr *ip6 = l3hdr;
1510 KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1511 ("%s: CSUM_TSO with ip6_nxt %d",
1512 __func__, ip6->ip6_nxt));
1514 tso_info |= F_LSO_IPV6;
1515 tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
1516 tcp = (struct tcphdr *)(ip6 + 1);
1518 panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
1520 tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
1521 hdr->lso_info = htonl(tso_info);
1523 if (__predict_false(mlen <= PIO_LEN)) {
1525 * pkt not undersized but fits in PIO_LEN
1526 * Indicates a TSO bug at the higher levels.
1529 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1530 flits = (mlen + 7) / 8 + 3;
1531 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1532 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1533 F_WR_SOP | F_WR_EOP | txqs.compl);
1534 wr_lo = htonl(V_WR_LEN(flits) |
1535 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1536 set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1538 ETHER_BPF_MTAP(pi->ifp, m0);
1539 wr_gen2(txd, txqs.gen);
1540 check_ring_tx_db(sc, txq, 0);
1546 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1548 GET_VTAG(cntrl, m0);
1549 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1550 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1551 cntrl |= F_TXPKT_IPCSUM_DIS;
1552 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP |
1553 CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1554 cntrl |= F_TXPKT_L4CSUM_DIS;
1555 cpl->cntrl = htonl(cntrl);
1556 cpl->len = htonl(mlen | 0x80000000);
1558 if (mlen <= PIO_LEN) {
1560 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1561 flits = (mlen + 7) / 8 + 2;
1563 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1564 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1565 F_WR_SOP | F_WR_EOP | txqs.compl);
1566 wr_lo = htonl(V_WR_LEN(flits) |
1567 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1568 set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1570 ETHER_BPF_MTAP(pi->ifp, m0);
1571 wr_gen2(txd, txqs.gen);
1572 check_ring_tx_db(sc, txq, 0);
1578 wrp = (struct work_request_hdr *)txd;
1579 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1580 make_sgl(sgp, segs, nsegs);
1582 sgl_flits = sgl_len(nsegs);
1584 ETHER_BPF_MTAP(pi->ifp, m0);
1586 KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1587 wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1588 wr_lo = htonl(V_WR_TID(txq->token));
1589 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1590 sgl_flits, wr_hi, wr_lo);
1591 check_ring_tx_db(sc, txq, 0);
1597 cxgb_tx_watchdog(void *arg)
1599 struct sge_qset *qs = arg;
1600 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1602 if (qs->coalescing != 0 &&
1603 (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
1606 else if (qs->coalescing == 0 &&
1607 (txq->in_use >= cxgb_tx_coalesce_enable_start))
1609 if (TXQ_TRYLOCK(qs)) {
1610 qs->qs_flags |= QS_FLUSHING;
1611 cxgb_start_locked(qs);
1612 qs->qs_flags &= ~QS_FLUSHING;
1615 if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1616 callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1617 qs, txq->txq_watchdog.c_cpu);
1621 cxgb_tx_timeout(void *arg)
1623 struct sge_qset *qs = arg;
1624 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1626 if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1628 if (TXQ_TRYLOCK(qs)) {
1629 qs->qs_flags |= QS_TIMEOUT;
1630 cxgb_start_locked(qs);
1631 qs->qs_flags &= ~QS_TIMEOUT;
1637 cxgb_start_locked(struct sge_qset *qs)
1639 struct mbuf *m_head = NULL;
1640 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1641 struct port_info *pi = qs->port;
1642 struct ifnet *ifp = pi->ifp;
1644 if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1645 reclaim_completed_tx(qs, 0, TXQ_ETH);
1647 if (!pi->link_config.link_ok) {
1651 TXQ_LOCK_ASSERT(qs);
1652 while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1653 pi->link_config.link_ok) {
1654 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1656 if (txq->size - txq->in_use <= TX_MAX_DESC)
1659 if ((m_head = cxgb_dequeue(qs)) == NULL)
1662 * Encapsulation can modify our pointer, and or make it
1663 * NULL on failure. In that event, we can't requeue.
1665 if (t3_encap(qs, &m_head) || m_head == NULL)
1671 if (txq->db_pending)
1672 check_ring_tx_db(pi->adapter, txq, 1);
1674 if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1675 pi->link_config.link_ok)
1676 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1677 qs, txq->txq_timer.c_cpu);
1683 cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1685 struct port_info *pi = qs->port;
1686 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1687 struct buf_ring *br = txq->txq_mr;
1690 avail = txq->size - txq->in_use;
1691 TXQ_LOCK_ASSERT(qs);
1694 * We can only do a direct transmit if the following are true:
1695 * - we aren't coalescing (ring < 3/4 full)
1696 * - the link is up -- checked in caller
1697 * - there are no packets enqueued already
1698 * - there is space in hardware transmit queue
1700 if (check_pkt_coalesce(qs) == 0 &&
1701 !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1702 if (t3_encap(qs, &m)) {
1704 (error = drbr_enqueue(ifp, br, m)) != 0)
1707 if (txq->db_pending)
1708 check_ring_tx_db(pi->adapter, txq, 1);
1711 * We've bypassed the buf ring so we need to update
1712 * the stats directly
1714 txq->txq_direct_packets++;
1715 txq->txq_direct_bytes += m->m_pkthdr.len;
1717 } else if ((error = drbr_enqueue(ifp, br, m)) != 0)
1720 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1721 if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1722 (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1723 cxgb_start_locked(qs);
1724 else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1725 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1726 qs, txq->txq_timer.c_cpu);
1731 cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
1733 struct sge_qset *qs;
1734 struct port_info *pi = ifp->if_softc;
1735 int error, qidx = pi->first_qset;
1737 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1738 ||(!pi->link_config.link_ok)) {
1743 if (m->m_flags & M_FLOWID)
1744 qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
1746 qs = &pi->adapter->sge.qs[qidx];
1748 if (TXQ_TRYLOCK(qs)) {
1750 error = cxgb_transmit_locked(ifp, qs, m);
1753 error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1758 cxgb_qflush(struct ifnet *ifp)
1761 * flush any enqueued mbufs in the buf_rings
1762 * and in the transmit queues
1769 * write_imm - write a packet into a Tx descriptor as immediate data
1770 * @d: the Tx descriptor to write
1772 * @len: the length of packet data to write as immediate data
1773 * @gen: the generation bit value to write
1775 * Writes a packet as immediate data into a Tx descriptor. The packet
1776 * contains a work request at its beginning. We must write the packet
1777 * carefully so the SGE doesn't read accidentally before it's written in
1780 static __inline void
1781 write_imm(struct tx_desc *d, caddr_t src,
1782 unsigned int len, unsigned int gen)
1784 struct work_request_hdr *from = (struct work_request_hdr *)src;
1785 struct work_request_hdr *to = (struct work_request_hdr *)d;
1786 uint32_t wr_hi, wr_lo;
1788 KASSERT(len <= WR_LEN && len >= sizeof(*from),
1789 ("%s: invalid len %d", __func__, len));
1791 memcpy(&to[1], &from[1], len - sizeof(*from));
1792 wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
1793 V_WR_BCNTLFLT(len & 7));
1794 wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
1795 set_wr_hdr(to, wr_hi, wr_lo);
1801 * check_desc_avail - check descriptor availability on a send queue
1802 * @adap: the adapter
1804 * @m: the packet needing the descriptors
1805 * @ndesc: the number of Tx descriptors needed
1806 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1808 * Checks if the requested number of Tx descriptors is available on an
1809 * SGE send queue. If the queue is already suspended or not enough
1810 * descriptors are available the packet is queued for later transmission.
1811 * Must be called with the Tx queue locked.
1813 * Returns 0 if enough descriptors are available, 1 if there aren't
1814 * enough descriptors and the packet has been queued, and 2 if the caller
1815 * needs to retry because there weren't enough descriptors at the
1816 * beginning of the call but some freed up in the mean time.
1819 check_desc_avail(adapter_t *adap, struct sge_txq *q,
1820 struct mbuf *m, unsigned int ndesc,
1824 * XXX We currently only use this for checking the control queue
1825 * the control queue is only used for binding qsets which happens
1826 * at init time so we are guaranteed enough descriptors
1828 if (__predict_false(!mbufq_empty(&q->sendq))) {
1829 addq_exit: mbufq_tail(&q->sendq, m);
1832 if (__predict_false(q->size - q->in_use < ndesc)) {
1834 struct sge_qset *qs = txq_to_qset(q, qid);
1836 setbit(&qs->txq_stopped, qid);
1837 if (should_restart_tx(q) &&
1838 test_and_clear_bit(qid, &qs->txq_stopped))
1849 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1850 * @q: the SGE control Tx queue
1852 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1853 * that send only immediate data (presently just the control queues) and
1854 * thus do not have any mbufs
1856 static __inline void
1857 reclaim_completed_tx_imm(struct sge_txq *q)
1859 unsigned int reclaim = q->processed - q->cleaned;
1861 q->in_use -= reclaim;
1862 q->cleaned += reclaim;
1866 * ctrl_xmit - send a packet through an SGE control Tx queue
1867 * @adap: the adapter
1868 * @q: the control queue
1871 * Send a packet through an SGE control Tx queue. Packets sent through
1872 * a control queue must fit entirely as immediate data in a single Tx
1873 * descriptor and have no page fragments.
1876 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1879 struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1880 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1882 KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
1884 wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
1885 wrp->wrh_lo = htonl(V_WR_TID(q->token));
1888 again: reclaim_completed_tx_imm(q);
1890 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1891 if (__predict_false(ret)) {
1898 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1901 if (++q->pidx >= q->size) {
1907 t3_write_reg(adap, A_SG_KDOORBELL,
1908 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1916 * restart_ctrlq - restart a suspended control queue
1917 * @qs: the queue set cotaining the control queue
1919 * Resumes transmission on a suspended Tx control queue.
1922 restart_ctrlq(void *data, int npending)
1925 struct sge_qset *qs = (struct sge_qset *)data;
1926 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1927 adapter_t *adap = qs->port->adapter;
1930 again: reclaim_completed_tx_imm(q);
1932 while (q->in_use < q->size &&
1933 (m = mbufq_dequeue(&q->sendq)) != NULL) {
1935 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1938 if (++q->pidx >= q->size) {
1944 if (!mbufq_empty(&q->sendq)) {
1945 setbit(&qs->txq_stopped, TXQ_CTRL);
1947 if (should_restart_tx(q) &&
1948 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1953 t3_write_reg(adap, A_SG_KDOORBELL,
1954 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1959 * Send a management message through control queue 0
1962 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1964 return ctrl_xmit(adap, &adap->sge.qs[0], m);
1968 * free_qset - free the resources of an SGE queue set
1969 * @sc: the controller owning the queue set
1972 * Release the HW and SW resources associated with an SGE queue set, such
1973 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
1974 * queue set must be quiesced prior to calling this.
1977 t3_free_qset(adapter_t *sc, struct sge_qset *q)
1981 reclaim_completed_tx(q, 0, TXQ_ETH);
1982 if (q->txq[TXQ_ETH].txq_mr != NULL)
1983 buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
1984 if (q->txq[TXQ_ETH].txq_ifq != NULL) {
1985 ifq_delete(q->txq[TXQ_ETH].txq_ifq);
1986 free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
1989 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1990 if (q->fl[i].desc) {
1991 mtx_lock_spin(&sc->sge.reg_lock);
1992 t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1993 mtx_unlock_spin(&sc->sge.reg_lock);
1994 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1995 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1997 bus_dma_tag_destroy(q->fl[i].desc_tag);
1998 bus_dma_tag_destroy(q->fl[i].entry_tag);
2000 if (q->fl[i].sdesc) {
2001 free_rx_bufs(sc, &q->fl[i]);
2002 free(q->fl[i].sdesc, M_DEVBUF);
2006 mtx_unlock(&q->lock);
2007 MTX_DESTROY(&q->lock);
2008 for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2009 if (q->txq[i].desc) {
2010 mtx_lock_spin(&sc->sge.reg_lock);
2011 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2012 mtx_unlock_spin(&sc->sge.reg_lock);
2013 bus_dmamap_unload(q->txq[i].desc_tag,
2014 q->txq[i].desc_map);
2015 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2016 q->txq[i].desc_map);
2017 bus_dma_tag_destroy(q->txq[i].desc_tag);
2018 bus_dma_tag_destroy(q->txq[i].entry_tag);
2020 if (q->txq[i].sdesc) {
2021 free(q->txq[i].sdesc, M_DEVBUF);
2026 mtx_lock_spin(&sc->sge.reg_lock);
2027 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
2028 mtx_unlock_spin(&sc->sge.reg_lock);
2030 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
2031 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
2033 bus_dma_tag_destroy(q->rspq.desc_tag);
2034 MTX_DESTROY(&q->rspq.lock);
2037 #if defined(INET6) || defined(INET)
2038 tcp_lro_free(&q->lro.ctrl);
2041 bzero(q, sizeof(*q));
2045 * t3_free_sge_resources - free SGE resources
2046 * @sc: the adapter softc
2048 * Frees resources used by the SGE queue sets.
2051 t3_free_sge_resources(adapter_t *sc, int nqsets)
2055 for (i = 0; i < nqsets; ++i) {
2056 TXQ_LOCK(&sc->sge.qs[i]);
2057 t3_free_qset(sc, &sc->sge.qs[i]);
2062 * t3_sge_start - enable SGE
2063 * @sc: the controller softc
2065 * Enables the SGE for DMAs. This is the last step in starting packet
2069 t3_sge_start(adapter_t *sc)
2071 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2075 * t3_sge_stop - disable SGE operation
2078 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2079 * from error interrupts) or from normal process context. In the latter
2080 * case it also disables any pending queue restart tasklets. Note that
2081 * if it is called in interrupt context it cannot disable the restart
2082 * tasklets as it cannot wait, however the tasklets will have no effect
2083 * since the doorbells are disabled and the driver will call this again
2084 * later from process context, at which time the tasklets will be stopped
2085 * if they are still running.
2088 t3_sge_stop(adapter_t *sc)
2092 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
2097 for (nqsets = i = 0; i < (sc)->params.nports; i++)
2098 nqsets += sc->port[i].nqsets;
2104 for (i = 0; i < nqsets; ++i) {
2105 struct sge_qset *qs = &sc->sge.qs[i];
2107 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2108 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2114 * t3_free_tx_desc - reclaims Tx descriptors and their buffers
2115 * @adapter: the adapter
2116 * @q: the Tx queue to reclaim descriptors from
2117 * @reclaimable: the number of descriptors to reclaim
2118 * @m_vec_size: maximum number of buffers to reclaim
2119 * @desc_reclaimed: returns the number of descriptors reclaimed
2121 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
2122 * Tx buffers. Called with the Tx queue lock held.
2124 * Returns number of buffers of reclaimed
2127 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2129 struct tx_sw_desc *txsd;
2130 unsigned int cidx, mask;
2131 struct sge_txq *q = &qs->txq[queue];
2134 T3_TRACE2(sc->tb[q->cntxt_id & 7],
2135 "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
2139 txsd = &q->sdesc[cidx];
2141 mtx_assert(&qs->lock, MA_OWNED);
2142 while (reclaimable--) {
2143 prefetch(q->sdesc[(cidx + 1) & mask].m);
2144 prefetch(q->sdesc[(cidx + 2) & mask].m);
2146 if (txsd->m != NULL) {
2147 if (txsd->flags & TX_SW_DESC_MAPPED) {
2148 bus_dmamap_unload(q->entry_tag, txsd->map);
2149 txsd->flags &= ~TX_SW_DESC_MAPPED;
2151 m_freem_list(txsd->m);
2157 if (++cidx == q->size) {
2167 * is_new_response - check if a response is newly written
2168 * @r: the response descriptor
2169 * @q: the response queue
2171 * Returns true if a response descriptor contains a yet unprocessed
2175 is_new_response(const struct rsp_desc *r,
2176 const struct sge_rspq *q)
2178 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2181 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2182 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2183 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2184 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2185 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2187 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2188 #define NOMEM_INTR_DELAY 2500
2192 * write_ofld_wr - write an offload work request
2193 * @adap: the adapter
2194 * @m: the packet to send
2196 * @pidx: index of the first Tx descriptor to write
2197 * @gen: the generation value to use
2198 * @ndesc: number of descriptors the packet will occupy
2200 * Write an offload work request to send the supplied packet. The packet
2201 * data already carry the work request with most fields populated.
2204 write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2205 unsigned int pidx, unsigned int gen, unsigned int ndesc)
2207 unsigned int sgl_flits, flits;
2208 int i, idx, nsegs, wrlen;
2209 struct work_request_hdr *from;
2210 struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
2211 struct tx_desc *d = &q->desc[pidx];
2212 struct txq_state txqs;
2213 struct sglist_seg *segs;
2214 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2217 from = (void *)(oh + 1); /* Start of WR within mbuf */
2218 wrlen = m->m_len - sizeof(*oh);
2220 if (!(oh->flags & F_HDR_SGL)) {
2221 write_imm(d, (caddr_t)from, wrlen, gen);
2224 * mbuf with "real" immediate tx data will be enqueue_wr'd by
2225 * t3_push_frames and freed in wr_ack. Others, like those sent
2226 * down by close_conn, t3_send_reset, etc. should be freed here.
2228 if (!(oh->flags & F_HDR_DF))
2233 memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
2237 sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
2239 nsegs = sgl->sg_nseg;
2240 segs = sgl->sg_segs;
2241 for (idx = 0, i = 0; i < nsegs; i++) {
2242 KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
2245 sgp->len[idx] = htobe32(segs[i].ss_len);
2246 sgp->addr[idx] = htobe64(segs[i].ss_paddr);
2254 sgl_flits = sgl_len(nsegs);
2259 write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
2260 from->wrh_hi, from->wrh_lo);
2264 * ofld_xmit - send a packet through an offload queue
2265 * @adap: the adapter
2266 * @q: the Tx offload queue
2269 * Send an offload packet through an SGE offload queue.
2272 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2276 unsigned int pidx, gen;
2277 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2278 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2280 ndesc = G_HDR_NDESC(oh->flags);
2283 again: reclaim_completed_tx(qs, 16, TXQ_OFLD);
2284 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2285 if (__predict_false(ret)) {
2297 if (q->pidx >= q->size) {
2302 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2303 check_ring_tx_db(adap, q, 1);
2310 * restart_offloadq - restart a suspended offload queue
2311 * @qs: the queue set cotaining the offload queue
2313 * Resumes transmission on a suspended Tx offload queue.
2316 restart_offloadq(void *data, int npending)
2319 struct sge_qset *qs = data;
2320 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2321 adapter_t *adap = qs->port->adapter;
2325 again: cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
2327 while ((m = mbufq_peek(&q->sendq)) != NULL) {
2328 unsigned int gen, pidx;
2329 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2330 unsigned int ndesc = G_HDR_NDESC(oh->flags);
2332 if (__predict_false(q->size - q->in_use < ndesc)) {
2333 setbit(&qs->txq_stopped, TXQ_OFLD);
2334 if (should_restart_tx(q) &&
2335 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2345 if (q->pidx >= q->size) {
2350 (void)mbufq_dequeue(&q->sendq);
2352 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2356 set_bit(TXQ_RUNNING, &q->flags);
2357 set_bit(TXQ_LAST_PKT_DB, &q->flags);
2361 t3_write_reg(adap, A_SG_KDOORBELL,
2362 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2366 * t3_offload_tx - send an offload packet
2369 * Sends an offload packet. We use the packet priority to select the
2370 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2371 * should be sent as regular or control, bits 1-3 select the queue set.
2374 t3_offload_tx(struct adapter *sc, struct mbuf *m)
2376 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2377 struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2379 if (oh->flags & F_HDR_CTRL) {
2380 m_adj(m, sizeof (*oh)); /* trim ofld_hdr off */
2381 return (ctrl_xmit(sc, qs, m));
2383 return (ofld_xmit(sc, qs, m));
2388 restart_tx(struct sge_qset *qs)
2390 struct adapter *sc = qs->port->adapter;
2392 if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2393 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2394 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2395 qs->txq[TXQ_OFLD].restarts++;
2396 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2399 if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2400 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2401 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2402 qs->txq[TXQ_CTRL].restarts++;
2403 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2408 * t3_sge_alloc_qset - initialize an SGE queue set
2409 * @sc: the controller softc
2410 * @id: the queue set id
2411 * @nports: how many Ethernet ports will be using this queue set
2412 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2413 * @p: configuration parameters for this queue set
2414 * @ntxq: number of Tx queues for the queue set
2415 * @pi: port info for queue set
2417 * Allocate resources and initialize an SGE queue set. A queue set
2418 * comprises a response queue, two Rx free-buffer queues, and up to 3
2419 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2420 * queue, offload queue, and control queue.
2423 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2424 const struct qset_params *p, int ntxq, struct port_info *pi)
2426 struct sge_qset *q = &sc->sge.qs[id];
2429 MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
2433 if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2434 M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
2435 device_printf(sc->dev, "failed to allocate mbuf ring\n");
2438 if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2439 M_NOWAIT | M_ZERO)) == NULL) {
2440 device_printf(sc->dev, "failed to allocate ifq\n");
2443 ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2444 callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2445 callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2446 q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2447 q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2449 init_qset_cntxt(q, id);
2451 if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2452 sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2453 &q->fl[0].desc, &q->fl[0].sdesc,
2454 &q->fl[0].desc_tag, &q->fl[0].desc_map,
2455 sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2456 printf("error %d from alloc ring fl0\n", ret);
2460 if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2461 sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2462 &q->fl[1].desc, &q->fl[1].sdesc,
2463 &q->fl[1].desc_tag, &q->fl[1].desc_map,
2464 sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2465 printf("error %d from alloc ring fl1\n", ret);
2469 if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2470 &q->rspq.phys_addr, &q->rspq.desc, NULL,
2471 &q->rspq.desc_tag, &q->rspq.desc_map,
2472 NULL, NULL)) != 0) {
2473 printf("error %d from alloc ring rspq\n", ret);
2477 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2478 device_get_unit(sc->dev), irq_vec_idx);
2479 MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2481 for (i = 0; i < ntxq; ++i) {
2482 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2484 if ((ret = alloc_ring(sc, p->txq_size[i],
2485 sizeof(struct tx_desc), sz,
2486 &q->txq[i].phys_addr, &q->txq[i].desc,
2487 &q->txq[i].sdesc, &q->txq[i].desc_tag,
2488 &q->txq[i].desc_map,
2489 sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2490 printf("error %d from alloc ring tx %i\n", ret, i);
2493 mbufq_init(&q->txq[i].sendq);
2495 q->txq[i].size = p->txq_size[i];
2499 TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2501 TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2502 TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2503 TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2505 q->fl[0].gen = q->fl[1].gen = 1;
2506 q->fl[0].size = p->fl_size;
2507 q->fl[1].size = p->jumbo_size;
2511 q->rspq.size = p->rspq_size;
2513 q->txq[TXQ_ETH].stop_thres = nports *
2514 flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2516 q->fl[0].buf_size = MCLBYTES;
2517 q->fl[0].zone = zone_pack;
2518 q->fl[0].type = EXT_PACKET;
2520 if (p->jumbo_buf_size == MJUM16BYTES) {
2521 q->fl[1].zone = zone_jumbo16;
2522 q->fl[1].type = EXT_JUMBO16;
2523 } else if (p->jumbo_buf_size == MJUM9BYTES) {
2524 q->fl[1].zone = zone_jumbo9;
2525 q->fl[1].type = EXT_JUMBO9;
2526 } else if (p->jumbo_buf_size == MJUMPAGESIZE) {
2527 q->fl[1].zone = zone_jumbop;
2528 q->fl[1].type = EXT_JUMBOP;
2530 KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
2534 q->fl[1].buf_size = p->jumbo_buf_size;
2536 /* Allocate and setup the lro_ctrl structure */
2537 q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2538 #if defined(INET6) || defined(INET)
2539 ret = tcp_lro_init(&q->lro.ctrl);
2541 printf("error %d from tcp_lro_init\n", ret);
2545 q->lro.ctrl.ifp = pi->ifp;
2547 mtx_lock_spin(&sc->sge.reg_lock);
2548 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2549 q->rspq.phys_addr, q->rspq.size,
2550 q->fl[0].buf_size, 1, 0);
2552 printf("error %d from t3_sge_init_rspcntxt\n", ret);
2556 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2557 ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2558 q->fl[i].phys_addr, q->fl[i].size,
2559 q->fl[i].buf_size, p->cong_thres, 1,
2562 printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2567 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2568 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2569 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2572 printf("error %d from t3_sge_init_ecntxt\n", ret);
2577 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2578 USE_GTS, SGE_CNTXT_OFLD, id,
2579 q->txq[TXQ_OFLD].phys_addr,
2580 q->txq[TXQ_OFLD].size, 0, 1, 0);
2582 printf("error %d from t3_sge_init_ecntxt\n", ret);
2588 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2590 q->txq[TXQ_CTRL].phys_addr,
2591 q->txq[TXQ_CTRL].size,
2592 q->txq[TXQ_CTRL].token, 1, 0);
2594 printf("error %d from t3_sge_init_ecntxt\n", ret);
2599 mtx_unlock_spin(&sc->sge.reg_lock);
2600 t3_update_qset_coalesce(q, p);
2602 refill_fl(sc, &q->fl[0], q->fl[0].size);
2603 refill_fl(sc, &q->fl[1], q->fl[1].size);
2604 refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2606 t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2607 V_NEWTIMER(q->rspq.holdoff_tmr));
2612 mtx_unlock_spin(&sc->sge.reg_lock);
2615 t3_free_qset(sc, q);
2621 * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2622 * ethernet data. Hardware assistance with various checksums and any vlan tag
2623 * will also be taken into account here.
2626 t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
2628 struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2629 struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2630 struct ifnet *ifp = pi->ifp;
2632 if (cpl->vlan_valid) {
2633 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2634 m->m_flags |= M_VLANTAG;
2637 m->m_pkthdr.rcvif = ifp;
2639 * adjust after conversion to mbuf chain
2641 m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2642 m->m_len -= (sizeof(*cpl) + ethpad);
2643 m->m_data += (sizeof(*cpl) + ethpad);
2645 if (!cpl->fragment && cpl->csum_valid && cpl->csum == 0xffff) {
2646 struct ether_header *eh = mtod(m, void *);
2649 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2650 struct ether_vlan_header *evh = mtod(m, void *);
2652 eh_type = evh->evl_proto;
2654 eh_type = eh->ether_type;
2656 if (ifp->if_capenable & IFCAP_RXCSUM &&
2657 eh_type == htons(ETHERTYPE_IP)) {
2658 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
2659 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2660 m->m_pkthdr.csum_data = 0xffff;
2661 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2662 eh_type == htons(ETHERTYPE_IPV6)) {
2663 m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
2665 m->m_pkthdr.csum_data = 0xffff;
2671 * get_packet - return the next ingress packet buffer from a free list
2672 * @adap: the adapter that received the packet
2673 * @drop_thres: # of remaining buffers before we start dropping packets
2674 * @qs: the qset that the SGE free list holding the packet belongs to
2675 * @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2676 * @r: response descriptor
2678 * Get the next packet from a free list and complete setup of the
2679 * sk_buff. If the packet is small we make a copy and recycle the
2680 * original buffer, otherwise we use the original buffer itself. If a
2681 * positive drop threshold is supplied packets are dropped and their
2682 * buffers recycled if (a) the number of remaining buffers is under the
2683 * threshold and the packet is too big to copy, or (b) the packet should
2684 * be copied but there is no memory for the copy.
2687 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2688 struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2691 unsigned int len_cq = ntohl(r->len_cq);
2692 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2693 int mask, cidx = fl->cidx;
2694 struct rx_sw_desc *sd = &fl->sdesc[cidx];
2695 uint32_t len = G_RSPD_LEN(len_cq);
2696 uint32_t flags = M_EXT;
2697 uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
2702 mask = fl->size - 1;
2703 prefetch(fl->sdesc[(cidx + 1) & mask].m);
2704 prefetch(fl->sdesc[(cidx + 2) & mask].m);
2705 prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
2706 prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl);
2709 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2711 if (recycle_enable && len <= SGE_RX_COPY_THRES &&
2712 sopeop == RSPQ_SOP_EOP) {
2713 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
2715 cl = mtod(m, void *);
2716 memcpy(cl, sd->rxsd_cl, len);
2717 recycle_rx_buf(adap, fl, fl->cidx);
2718 m->m_pkthdr.len = m->m_len = len;
2720 mh->mh_head = mh->mh_tail = m;
2725 bus_dmamap_unload(fl->entry_tag, sd->map);
2729 if ((sopeop == RSPQ_SOP_EOP) ||
2730 (sopeop == RSPQ_SOP))
2732 m_init(m, fl->zone, fl->buf_size, M_NOWAIT, MT_DATA, flags);
2733 if (fl->zone == zone_pack) {
2735 * restore clobbered data pointer
2737 m->m_data = m->m_ext.ext_buf;
2739 m_cljset(m, cl, fl->type);
2748 mh->mh_head = mh->mh_tail = m;
2749 m->m_pkthdr.len = len;
2754 case RSPQ_NSOP_NEOP:
2755 if (mh->mh_tail == NULL) {
2756 log(LOG_ERR, "discarding intermediate descriptor entry\n");
2760 mh->mh_tail->m_next = m;
2762 mh->mh_head->m_pkthdr.len += len;
2766 printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2768 if (++fl->cidx == fl->size)
2775 * handle_rsp_cntrl_info - handles control information in a response
2776 * @qs: the queue set corresponding to the response
2777 * @flags: the response control flags
2779 * Handles the control information of an SGE response, such as GTS
2780 * indications and completion credits for the queue set's Tx queues.
2781 * HW coalesces credits, we don't do any extra SW coalescing.
2783 static __inline void
2784 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2786 unsigned int credits;
2789 if (flags & F_RSPD_TXQ0_GTS)
2790 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2792 credits = G_RSPD_TXQ0_CR(flags);
2794 qs->txq[TXQ_ETH].processed += credits;
2796 credits = G_RSPD_TXQ2_CR(flags);
2798 qs->txq[TXQ_CTRL].processed += credits;
2801 if (flags & F_RSPD_TXQ1_GTS)
2802 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2804 credits = G_RSPD_TXQ1_CR(flags);
2806 qs->txq[TXQ_OFLD].processed += credits;
2811 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2812 unsigned int sleeping)
2818 * process_responses - process responses from an SGE response queue
2819 * @adap: the adapter
2820 * @qs: the queue set to which the response queue belongs
2821 * @budget: how many responses can be processed in this round
2823 * Process responses from an SGE response queue up to the supplied budget.
2824 * Responses include received packets as well as credits and other events
2825 * for the queues that belong to the response queue's queue set.
2826 * A negative budget is effectively unlimited.
2828 * Additionally choose the interrupt holdoff time for the next interrupt
2829 * on this queue. If the system is under memory shortage use a fairly
2830 * long delay to help recovery.
2833 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2835 struct sge_rspq *rspq = &qs->rspq;
2836 struct rsp_desc *r = &rspq->desc[rspq->cidx];
2837 int budget_left = budget;
2838 unsigned int sleeping = 0;
2839 #if defined(INET6) || defined(INET)
2840 int lro_enabled = qs->lro.enabled;
2842 struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2844 struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
2846 static int last_holdoff = 0;
2847 if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2848 printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2849 last_holdoff = rspq->holdoff_tmr;
2852 rspq->next_holdoff = rspq->holdoff_tmr;
2854 while (__predict_true(budget_left && is_new_response(r, rspq))) {
2855 int eth, eop = 0, ethpad = 0;
2856 uint32_t flags = ntohl(r->flags);
2857 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2858 uint8_t opcode = r->rss_hdr.opcode;
2860 eth = (opcode == CPL_RX_PKT);
2862 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2866 printf("async notification\n");
2868 if (mh->mh_head == NULL) {
2869 mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA);
2872 m = m_gethdr(M_NOWAIT, MT_DATA);
2877 memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2878 m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2879 *mtod(m, char *) = CPL_ASYNC_NOTIF;
2880 opcode = CPL_ASYNC_NOTIF;
2882 rspq->async_notif++;
2884 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2885 struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
2889 rspq->next_holdoff = NOMEM_INTR_DELAY;
2893 if (mh->mh_head == NULL)
2896 mh->mh_tail->m_next = m;
2899 get_imm_packet(adap, r, m);
2900 mh->mh_head->m_pkthdr.len += m->m_len;
2903 } else if (r->len_cq) {
2904 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2906 eop = get_packet(adap, drop_thresh, qs, mh, r);
2908 if (r->rss_hdr.hash_type && !adap->timestamp)
2909 mh->mh_head->m_flags |= M_FLOWID;
2910 mh->mh_head->m_pkthdr.flowid = rss_hash;
2918 if (flags & RSPD_CTRL_MASK) {
2919 sleeping |= flags & RSPD_GTS_MASK;
2920 handle_rsp_cntrl_info(qs, flags);
2924 rspq->offload_pkts++;
2926 adap->cpl_handler[opcode](qs, r, mh->mh_head);
2928 m_freem(mh->mh_head);
2931 } else if (eth && eop) {
2932 struct mbuf *m = mh->mh_head;
2934 t3_rx_eth(adap, m, ethpad);
2937 * The T304 sends incoming packets on any qset. If LRO
2938 * is also enabled, we could end up sending packet up
2939 * lro_ctrl->ifp's input. That is incorrect.
2941 * The mbuf's rcvif was derived from the cpl header and
2942 * is accurate. Skip LRO and just use that.
2944 #if defined(INET6) || defined(INET)
2945 skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2947 if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2948 && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2950 /* successfully queue'd for LRO */
2955 * LRO not enabled, packet unsuitable for LRO,
2956 * or unable to queue. Pass it up right now in
2959 struct ifnet *ifp = m->m_pkthdr.rcvif;
2960 (*ifp->if_input)(ifp, m);
2967 if (__predict_false(++rspq->cidx == rspq->size)) {
2973 if (++rspq->credits >= 64) {
2974 refill_rspq(adap, rspq, rspq->credits);
2977 __refill_fl_lt(adap, &qs->fl[0], 32);
2978 __refill_fl_lt(adap, &qs->fl[1], 32);
2982 #if defined(INET6) || defined(INET)
2984 while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
2985 struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
2986 SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
2987 tcp_lro_flush(lro_ctrl, queued);
2992 check_ring_db(adap, qs, sleeping);
2994 mb(); /* commit Tx queue processed updates */
2995 if (__predict_false(qs->txq_stopped > 1))
2998 __refill_fl_lt(adap, &qs->fl[0], 512);
2999 __refill_fl_lt(adap, &qs->fl[1], 512);
3000 budget -= budget_left;
3005 * A helper function that processes responses and issues GTS.
3008 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
3011 static int last_holdoff = 0;
3013 work = process_responses(adap, rspq_to_qset(rq), -1);
3015 if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
3016 printf("next_holdoff=%d\n", rq->next_holdoff);
3017 last_holdoff = rq->next_holdoff;
3019 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3020 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3027 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3028 * Handles data events from SGE response queues as well as error and other
3029 * async events as they all use the same interrupt pin. We use one SGE
3030 * response queue per port in this mode and protect all response queues with
3034 t3b_intr(void *data)
3037 adapter_t *adap = data;
3038 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3040 t3_write_reg(adap, A_PL_CLI, 0);
3041 map = t3_read_reg(adap, A_SG_DATA_INTR);
3046 if (__predict_false(map & F_ERRINTR)) {
3047 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3048 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3049 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3052 mtx_lock(&q0->lock);
3053 for_each_port(adap, i)
3055 process_responses_gts(adap, &adap->sge.qs[i].rspq);
3056 mtx_unlock(&q0->lock);
3060 * The MSI interrupt handler. This needs to handle data events from SGE
3061 * response queues as well as error and other async events as they all use
3062 * the same MSI vector. We use one SGE response queue per port in this mode
3063 * and protect all response queues with queue 0's lock.
3066 t3_intr_msi(void *data)
3068 adapter_t *adap = data;
3069 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3070 int i, new_packets = 0;
3072 mtx_lock(&q0->lock);
3074 for_each_port(adap, i)
3075 if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3077 mtx_unlock(&q0->lock);
3078 if (new_packets == 0) {
3079 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3080 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3081 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3086 t3_intr_msix(void *data)
3088 struct sge_qset *qs = data;
3089 adapter_t *adap = qs->port->adapter;
3090 struct sge_rspq *rspq = &qs->rspq;
3092 if (process_responses_gts(adap, rspq) == 0)
3093 rspq->unhandled_irqs++;
3096 #define QDUMP_SBUF_SIZE 32 * 400
3098 t3_dump_rspq(SYSCTL_HANDLER_ARGS)
3100 struct sge_rspq *rspq;
3101 struct sge_qset *qs;
3102 int i, err, dump_end, idx;
3104 struct rsp_desc *rspd;
3108 qs = rspq_to_qset(rspq);
3109 if (rspq->rspq_dump_count == 0)
3111 if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3113 "dump count is too large %d\n", rspq->rspq_dump_count);
3114 rspq->rspq_dump_count = 0;
3117 if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3119 "dump start of %d is greater than queue size\n",
3120 rspq->rspq_dump_start);
3121 rspq->rspq_dump_start = 0;
3124 err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3127 err = sysctl_wire_old_buffer(req, 0);
3130 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3132 sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3133 (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3134 ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3135 sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3136 ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3138 sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3139 (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3141 dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3142 for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3143 idx = i & (RSPQ_Q_SIZE-1);
3145 rspd = &rspq->desc[idx];
3146 sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3147 idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3148 rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3149 sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3150 rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3151 be32toh(rspd->len_cq), rspd->intr_gen);
3154 err = sbuf_finish(sb);
3155 /* Output a trailing NUL. */
3157 err = SYSCTL_OUT(req, "", 1);
3163 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3165 struct sge_txq *txq;
3166 struct sge_qset *qs;
3167 int i, j, err, dump_end;
3169 struct tx_desc *txd;
3170 uint32_t *WR, wr_hi, wr_lo, gen;
3174 qs = txq_to_qset(txq, TXQ_ETH);
3175 if (txq->txq_dump_count == 0) {
3178 if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3180 "dump count is too large %d\n", txq->txq_dump_count);
3181 txq->txq_dump_count = 1;
3184 if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3186 "dump start of %d is greater than queue size\n",
3187 txq->txq_dump_start);
3188 txq->txq_dump_start = 0;
3191 err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3194 err = sysctl_wire_old_buffer(req, 0);
3197 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3199 sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3200 (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3201 (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3202 sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3203 ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3204 ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3205 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3206 txq->txq_dump_start,
3207 (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3209 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3210 for (i = txq->txq_dump_start; i < dump_end; i++) {
3211 txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3212 WR = (uint32_t *)txd->flit;
3213 wr_hi = ntohl(WR[0]);
3214 wr_lo = ntohl(WR[1]);
3215 gen = G_WR_GEN(wr_lo);
3217 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3219 for (j = 2; j < 30; j += 4)
3220 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3221 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3224 err = sbuf_finish(sb);
3225 /* Output a trailing NUL. */
3227 err = SYSCTL_OUT(req, "", 1);
3233 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3235 struct sge_txq *txq;
3236 struct sge_qset *qs;
3237 int i, j, err, dump_end;
3239 struct tx_desc *txd;
3240 uint32_t *WR, wr_hi, wr_lo, gen;
3243 qs = txq_to_qset(txq, TXQ_CTRL);
3244 if (txq->txq_dump_count == 0) {
3247 if (txq->txq_dump_count > 256) {
3249 "dump count is too large %d\n", txq->txq_dump_count);
3250 txq->txq_dump_count = 1;
3253 if (txq->txq_dump_start > 255) {
3255 "dump start of %d is greater than queue size\n",
3256 txq->txq_dump_start);
3257 txq->txq_dump_start = 0;
3261 err = sysctl_wire_old_buffer(req, 0);
3264 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3265 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3266 txq->txq_dump_start,
3267 (txq->txq_dump_start + txq->txq_dump_count) & 255);
3269 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3270 for (i = txq->txq_dump_start; i < dump_end; i++) {
3271 txd = &txq->desc[i & (255)];
3272 WR = (uint32_t *)txd->flit;
3273 wr_hi = ntohl(WR[0]);
3274 wr_lo = ntohl(WR[1]);
3275 gen = G_WR_GEN(wr_lo);
3277 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3279 for (j = 2; j < 30; j += 4)
3280 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3281 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3284 err = sbuf_finish(sb);
3285 /* Output a trailing NUL. */
3287 err = SYSCTL_OUT(req, "", 1);
3293 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3295 adapter_t *sc = arg1;
3296 struct qset_params *qsp = &sc->params.sge.qset[0];
3298 struct sge_qset *qs;
3299 int i, j, err, nqsets = 0;
3302 if ((sc->flags & FULL_INIT_DONE) == 0)
3305 coalesce_usecs = qsp->coalesce_usecs;
3306 err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3311 if (coalesce_usecs == qsp->coalesce_usecs)
3314 for (i = 0; i < sc->params.nports; i++)
3315 for (j = 0; j < sc->port[i].nqsets; j++)
3318 coalesce_usecs = max(1, coalesce_usecs);
3320 for (i = 0; i < nqsets; i++) {
3321 qs = &sc->sge.qs[i];
3322 qsp = &sc->params.sge.qset[i];
3323 qsp->coalesce_usecs = coalesce_usecs;
3325 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3326 &sc->sge.qs[0].rspq.lock;
3329 t3_update_qset_coalesce(qs, qsp);
3330 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3331 V_NEWTIMER(qs->rspq.holdoff_tmr));
3339 t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
3341 adapter_t *sc = arg1;
3344 if ((sc->flags & FULL_INIT_DONE) == 0)
3347 timestamp = sc->timestamp;
3348 rc = sysctl_handle_int(oidp, ×tamp, arg2, req);
3353 if (timestamp != sc->timestamp) {
3354 t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS,
3355 timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
3356 sc->timestamp = timestamp;
3363 t3_add_attach_sysctls(adapter_t *sc)
3365 struct sysctl_ctx_list *ctx;
3366 struct sysctl_oid_list *children;
3368 ctx = device_get_sysctl_ctx(sc->dev);
3369 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3371 /* random information */
3372 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3374 CTLFLAG_RD, &sc->fw_version,
3375 0, "firmware version");
3376 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3378 CTLFLAG_RD, &sc->params.rev,
3380 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3382 CTLFLAG_RD, &sc->port_types,
3383 0, "type of ports");
3384 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3386 CTLFLAG_RW, &cxgb_debug,
3387 0, "enable verbose debugging output");
3388 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
3389 CTLFLAG_RD, &sc->tunq_coalesce,
3390 "#tunneled packets freed");
3391 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3393 CTLFLAG_RD, &txq_fills,
3394 0, "#times txq overrun");
3395 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3397 CTLFLAG_RD, &sc->params.vpd.cclk,
3398 0, "core clock frequency (in KHz)");
3402 static const char *rspq_name = "rspq";
3403 static const char *txq_names[] =
3411 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3413 struct port_info *p = arg1;
3419 parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3421 t3_mac_update_stats(&p->mac);
3424 return (sysctl_handle_64(oidp, parg, 0, req));
3428 t3_add_configured_sysctls(adapter_t *sc)
3430 struct sysctl_ctx_list *ctx;
3431 struct sysctl_oid_list *children;
3434 ctx = device_get_sysctl_ctx(sc->dev);
3435 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3437 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3439 CTLTYPE_INT|CTLFLAG_RW, sc,
3440 0, t3_set_coalesce_usecs,
3441 "I", "interrupt coalescing timer (us)");
3443 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3445 CTLTYPE_INT | CTLFLAG_RW, sc,
3446 0, t3_pkt_timestamp,
3447 "I", "provide packet timestamp instead of connection hash");
3449 for (i = 0; i < sc->params.nports; i++) {
3450 struct port_info *pi = &sc->port[i];
3451 struct sysctl_oid *poid;
3452 struct sysctl_oid_list *poidlist;
3453 struct mac_stats *mstats = &pi->mac.stats;
3455 snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3456 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3457 pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
3458 poidlist = SYSCTL_CHILDREN(poid);
3459 SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3460 "nqsets", CTLFLAG_RD, &pi->nqsets,
3463 for (j = 0; j < pi->nqsets; j++) {
3464 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3465 struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3466 *ctrlqpoid, *lropoid;
3467 struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3468 *txqpoidlist, *ctrlqpoidlist,
3470 struct sge_txq *txq = &qs->txq[TXQ_ETH];
3472 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3474 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3475 qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
3476 qspoidlist = SYSCTL_CHILDREN(qspoid);
3478 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3479 CTLFLAG_RD, &qs->fl[0].empty, 0,
3480 "freelist #0 empty");
3481 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3482 CTLFLAG_RD, &qs->fl[1].empty, 0,
3483 "freelist #1 empty");
3485 rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3486 rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
3487 rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3489 txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3490 txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3491 txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3493 ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3494 txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
3495 ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3497 lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3498 "lro_stats", CTLFLAG_RD, NULL, "LRO statistics");
3499 lropoidlist = SYSCTL_CHILDREN(lropoid);
3501 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3502 CTLFLAG_RD, &qs->rspq.size,
3503 0, "#entries in response queue");
3504 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3505 CTLFLAG_RD, &qs->rspq.cidx,
3506 0, "consumer index");
3507 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3508 CTLFLAG_RD, &qs->rspq.credits,
3510 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
3511 CTLFLAG_RD, &qs->rspq.starved,
3512 0, "#times starved");
3513 SYSCTL_ADD_ULONG(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3514 CTLFLAG_RD, &qs->rspq.phys_addr,
3515 "physical_address_of the queue");
3516 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3517 CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3518 0, "start rspq dump entry");
3519 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3520 CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3521 0, "#rspq entries to dump");
3522 SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3523 CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
3524 0, t3_dump_rspq, "A", "dump of the response queue");
3526 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
3527 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3528 "#tunneled packets dropped");
3529 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3530 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
3531 0, "#tunneled packets waiting to be sent");
3533 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3534 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3535 0, "#tunneled packets queue producer index");
3536 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3537 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3538 0, "#tunneled packets queue consumer index");
3540 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
3541 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3542 0, "#tunneled packets processed by the card");
3543 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3544 CTLFLAG_RD, &txq->cleaned,
3545 0, "#tunneled packets cleaned");
3546 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3547 CTLFLAG_RD, &txq->in_use,
3548 0, "#tunneled packet slots in use");
3549 SYSCTL_ADD_ULONG(ctx, txqpoidlist, OID_AUTO, "frees",
3550 CTLFLAG_RD, &txq->txq_frees,
3551 "#tunneled packets freed");
3552 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3553 CTLFLAG_RD, &txq->txq_skipped,
3554 0, "#tunneled packet descriptors skipped");
3555 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
3556 CTLFLAG_RD, &txq->txq_coalesced,
3557 "#tunneled packets coalesced");
3558 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3559 CTLFLAG_RD, &txq->txq_enqueued,
3560 0, "#tunneled packets enqueued to hardware");
3561 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3562 CTLFLAG_RD, &qs->txq_stopped,
3563 0, "tx queues stopped");
3564 SYSCTL_ADD_ULONG(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3565 CTLFLAG_RD, &txq->phys_addr,
3566 "physical_address_of the queue");
3567 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3568 CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3569 0, "txq generation");
3570 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3571 CTLFLAG_RD, &txq->cidx,
3572 0, "hardware queue cidx");
3573 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3574 CTLFLAG_RD, &txq->pidx,
3575 0, "hardware queue pidx");
3576 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3577 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3578 0, "txq start idx for dump");
3579 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3580 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3581 0, "txq #entries to dump");
3582 SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3583 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3584 0, t3_dump_txq_eth, "A", "dump of the transmit queue");
3586 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3587 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3588 0, "ctrlq start idx for dump");
3589 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3590 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3591 0, "ctrl #entries to dump");
3592 SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3593 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
3594 0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
3596 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_queued",
3597 CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3598 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3599 CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3600 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3601 CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3602 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3603 CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3606 /* Now add a node for mac stats. */
3607 poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3608 CTLFLAG_RD, NULL, "MAC statistics");
3609 poidlist = SYSCTL_CHILDREN(poid);
3612 * We (ab)use the length argument (arg2) to pass on the offset
3613 * of the data that we are interested in. This is only required
3614 * for the quad counters that are updated from the hardware (we
3615 * make sure that we return the latest value).
3616 * sysctl_handle_macstat first updates *all* the counters from
3617 * the hardware, and then returns the latest value of the
3618 * requested counter. Best would be to update only the
3619 * requested counter from hardware, but t3_mac_update_stats()
3620 * hides all the register details and we don't want to dive into
3623 #define CXGB_SYSCTL_ADD_QUAD(a) SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3624 (CTLTYPE_U64 | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \
3625 sysctl_handle_macstat, "QU", 0)
3626 CXGB_SYSCTL_ADD_QUAD(tx_octets);
3627 CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3628 CXGB_SYSCTL_ADD_QUAD(tx_frames);
3629 CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3630 CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3631 CXGB_SYSCTL_ADD_QUAD(tx_pause);
3632 CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3633 CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3634 CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3635 CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3636 CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3637 CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3638 CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3639 CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3640 CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3641 CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3642 CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3643 CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3644 CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3645 CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3646 CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3647 CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3648 CXGB_SYSCTL_ADD_QUAD(rx_octets);
3649 CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3650 CXGB_SYSCTL_ADD_QUAD(rx_frames);
3651 CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3652 CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3653 CXGB_SYSCTL_ADD_QUAD(rx_pause);
3654 CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3655 CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3656 CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3657 CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3658 CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3659 CXGB_SYSCTL_ADD_QUAD(rx_runt);
3660 CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3661 CXGB_SYSCTL_ADD_QUAD(rx_short);
3662 CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3663 CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3664 CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3665 CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3666 CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3667 CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3668 CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3669 CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3670 CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3671 CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3672 #undef CXGB_SYSCTL_ADD_QUAD
3674 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3675 CTLFLAG_RD, &mstats->a, 0)
3676 CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3677 CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3678 CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3679 CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3680 CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3681 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3682 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3683 CXGB_SYSCTL_ADD_ULONG(num_toggled);
3684 CXGB_SYSCTL_ADD_ULONG(num_resets);
3685 CXGB_SYSCTL_ADD_ULONG(link_faults);
3686 #undef CXGB_SYSCTL_ADD_ULONG
3691 * t3_get_desc - dump an SGE descriptor for debugging purposes
3692 * @qs: the queue set
3693 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3694 * @idx: the descriptor index in the queue
3695 * @data: where to dump the descriptor contents
3697 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3698 * size of the descriptor.
3701 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3702 unsigned char *data)
3708 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3710 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3711 return sizeof(struct tx_desc);
3715 if (!qs->rspq.desc || idx >= qs->rspq.size)
3717 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3718 return sizeof(struct rsp_desc);
3722 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3724 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3725 return sizeof(struct rx_desc);