1 /**************************************************************************
2 SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 Copyright (c) 2007-2009, Chelsio Inc.
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
13 2. Neither the name of the Chelsio Corporation nor the names of its
14 contributors may be used to endorse or promote products derived from
15 this software without specific prior written permission.
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
29 ***************************************************************************/
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include "opt_inet6.h"
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/module.h>
43 #include <machine/bus.h>
44 #include <machine/resource.h>
46 #include <sys/queue.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
52 #include <sys/sched.h>
54 #include <sys/systm.h>
55 #include <sys/syslog.h>
56 #include <sys/socket.h>
57 #include <sys/sglist.h>
60 #include <net/if_var.h>
62 #include <net/ethernet.h>
63 #include <net/if_vlan_var.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip6.h>
69 #include <netinet/tcp.h>
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
77 #include <cxgb_include.h>
81 int multiq_tx_enable = 1;
84 CTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
87 extern struct sysctl_oid_list sysctl__hw_cxgb_children;
88 int cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
89 SYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
90 "size of per-queue mbuf ring");
92 static int cxgb_tx_coalesce_force = 0;
93 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RWTUN,
94 &cxgb_tx_coalesce_force, 0,
95 "coalesce small packets into a single work request regardless of ring state");
97 #define COALESCE_START_DEFAULT TX_ETH_Q_SIZE>>1
98 #define COALESCE_START_MAX (TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
99 #define COALESCE_STOP_DEFAULT TX_ETH_Q_SIZE>>2
100 #define COALESCE_STOP_MIN TX_ETH_Q_SIZE>>5
101 #define TX_RECLAIM_DEFAULT TX_ETH_Q_SIZE>>5
102 #define TX_RECLAIM_MAX TX_ETH_Q_SIZE>>2
103 #define TX_RECLAIM_MIN TX_ETH_Q_SIZE>>6
106 static int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
107 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RWTUN,
108 &cxgb_tx_coalesce_enable_start, 0,
109 "coalesce enable threshold");
110 static int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
111 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RWTUN,
112 &cxgb_tx_coalesce_enable_stop, 0,
113 "coalesce disable threshold");
114 static int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
115 SYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RWTUN,
116 &cxgb_tx_reclaim_threshold, 0,
117 "tx cleaning minimum threshold");
120 * XXX don't re-enable this until TOE stops assuming
123 static int recycle_enable = 0;
125 extern int cxgb_use_16k_clusters;
126 extern int nmbjumbop;
127 extern int nmbjumbo9;
128 extern int nmbjumbo16;
132 #define SGE_RX_SM_BUF_SIZE 1536
133 #define SGE_RX_DROP_THRES 16
134 #define SGE_RX_COPY_THRES 128
137 * Period of the Tx buffer reclaim timer. This timer does not need to run
138 * frequently as Tx buffers are usually reclaimed by new Tx packets.
140 #define TX_RECLAIM_PERIOD (hz >> 1)
143 * Values for sge_txq.flags
146 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
147 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
151 uint64_t flit[TX_DESC_FLITS];
161 struct rsp_desc { /* response queue descriptor */
162 struct rss_header rss_hdr;
165 uint8_t imm_data[47];
169 #define RX_SW_DESC_MAP_CREATED (1 << 0)
170 #define TX_SW_DESC_MAP_CREATED (1 << 1)
171 #define RX_SW_DESC_INUSE (1 << 3)
172 #define TX_SW_DESC_MAPPED (1 << 4)
174 #define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0)
175 #define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP)
176 #define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP)
177 #define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
179 struct tx_sw_desc { /* SW state per Tx descriptor */
185 struct rx_sw_desc { /* SW state per Rx descriptor */
198 struct refill_fl_cb_arg {
200 bus_dma_segment_t seg;
206 * Maps a number of flits to the number of Tx descriptors that can hold them.
209 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
211 * HW allows up to 4 descriptors to be combined into a WR.
213 static uint8_t flit_desc_map[] = {
215 #if SGE_NUM_GENBITS == 1
216 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
217 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
218 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
219 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
220 #elif SGE_NUM_GENBITS == 2
221 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
222 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
223 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
224 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
226 # error "SGE_NUM_GENBITS must be 1 or 2"
230 #define TXQ_LOCK_ASSERT(qs) mtx_assert(&(qs)->lock, MA_OWNED)
231 #define TXQ_TRYLOCK(qs) mtx_trylock(&(qs)->lock)
232 #define TXQ_LOCK(qs) mtx_lock(&(qs)->lock)
233 #define TXQ_UNLOCK(qs) mtx_unlock(&(qs)->lock)
234 #define TXQ_RING_EMPTY(qs) drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
235 #define TXQ_RING_NEEDS_ENQUEUE(qs) \
236 drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
237 #define TXQ_RING_FLUSH(qs) drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
238 #define TXQ_RING_DEQUEUE_COND(qs, func, arg) \
239 drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
240 #define TXQ_RING_DEQUEUE(qs) \
241 drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
245 static void sge_timer_cb(void *arg);
246 static void sge_timer_reclaim(void *arg, int ncount);
247 static void sge_txq_reclaim_handler(void *arg, int ncount);
248 static void cxgb_start_locked(struct sge_qset *qs);
251 * XXX need to cope with bursty scheduling by looking at a wider
252 * window than we are now for determining the need for coalescing
255 static __inline uint64_t
256 check_pkt_coalesce(struct sge_qset *qs)
262 if (__predict_false(cxgb_tx_coalesce_force))
264 txq = &qs->txq[TXQ_ETH];
265 sc = qs->port->adapter;
266 fill = &sc->tunq_fill[qs->idx];
268 if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
269 cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
270 if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
271 cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
273 * if the hardware transmit queue is more than 1/8 full
274 * we mark it as coalescing - we drop back from coalescing
275 * when we go below 1/32 full and there are no packets enqueued,
276 * this provides us with some degree of hysteresis
278 if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
279 TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
281 else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
284 return (sc->tunq_coalesce);
289 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
292 #if _BYTE_ORDER == _LITTLE_ENDIAN
294 wr_hilo |= (((uint64_t)wr_lo)<<32);
297 wr_hilo |= (((uint64_t)wr_hi)<<32);
299 wrp->wrh_hilo = wr_hilo;
303 set_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
312 struct coalesce_info {
319 coalesce_check(struct mbuf *m, void *arg)
321 struct coalesce_info *ci = arg;
323 if ((m->m_next != NULL) ||
324 ((mtod(m, vm_offset_t) & PAGE_MASK) + m->m_len > PAGE_SIZE))
327 if ((ci->count == 0) || (ci->noncoal == 0 && (ci->count < 7) &&
328 (ci->nbytes + m->m_len <= 10500))) {
330 ci->nbytes += m->m_len;
337 cxgb_dequeue(struct sge_qset *qs)
339 struct mbuf *m, *m_head, *m_tail;
340 struct coalesce_info ci;
343 if (check_pkt_coalesce(qs) == 0)
344 return TXQ_RING_DEQUEUE(qs);
346 m_head = m_tail = NULL;
347 ci.count = ci.nbytes = ci.noncoal = 0;
349 m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
350 if (m_head == NULL) {
352 } else if (m != NULL) {
353 m_tail->m_nextpkt = m;
358 panic("trying to coalesce %d packets in to one WR", ci.count);
363 * reclaim_completed_tx - reclaims completed Tx descriptors
364 * @adapter: the adapter
365 * @q: the Tx queue to reclaim completed descriptors from
367 * Reclaims Tx descriptors that the SGE has indicated it has processed,
368 * and frees the associated buffers if possible. Called with the Tx
372 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
374 struct sge_txq *q = &qs->txq[queue];
375 int reclaim = desc_reclaimable(q);
377 if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) ||
378 (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN))
379 cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
381 if (reclaim < reclaim_min)
384 mtx_assert(&qs->lock, MA_OWNED);
386 t3_free_tx_desc(qs, reclaim, queue);
387 q->cleaned += reclaim;
388 q->in_use -= reclaim;
390 if (isset(&qs->txq_stopped, TXQ_ETH))
391 clrbit(&qs->txq_stopped, TXQ_ETH);
398 cxgb_debugnet_poll_tx(struct sge_qset *qs)
401 return (reclaim_completed_tx(qs, TX_RECLAIM_MAX, TXQ_ETH));
406 * should_restart_tx - are there enough resources to restart a Tx queue?
409 * Checks if there are enough descriptors to restart a suspended Tx queue.
412 should_restart_tx(const struct sge_txq *q)
414 unsigned int r = q->processed - q->cleaned;
416 return q->in_use - r < (q->size >> 1);
420 * t3_sge_init - initialize SGE
422 * @p: the SGE parameters
424 * Performs SGE initialization needed every time after a chip reset.
425 * We do not initialize any of the queue sets here, instead the driver
426 * top-level must request those individually. We also do not enable DMA
427 * here, that should be done after the queues have been set up.
430 t3_sge_init(adapter_t *adap, struct sge_params *p)
434 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
436 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
437 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
438 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
439 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
440 #if SGE_NUM_GENBITS == 1
441 ctrl |= F_EGRGENCTRL;
443 if (adap->params.rev > 0) {
444 if (!(adap->flags & (USING_MSIX | USING_MSI)))
445 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
447 t3_write_reg(adap, A_SG_CONTROL, ctrl);
448 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
449 V_LORCQDRBTHRSH(512));
450 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
451 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
452 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
453 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
454 adap->params.rev < T3_REV_C ? 1000 : 500);
455 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
456 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
457 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
458 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
459 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
464 * sgl_len - calculates the size of an SGL of the given capacity
465 * @n: the number of SGL entries
467 * Calculates the number of flits needed for a scatter/gather list that
468 * can hold the given number of entries.
470 static __inline unsigned int
471 sgl_len(unsigned int n)
473 return ((3 * n) / 2 + (n & 1));
477 * get_imm_packet - return the next ingress packet buffer from a response
478 * @resp: the response descriptor containing the packet data
480 * Return a packet containing the immediate data of the given response.
483 get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
486 if (resp->rss_hdr.opcode == CPL_RX_DATA) {
487 const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
488 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
489 } else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
490 const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
491 m->m_len = sizeof(*cpl) + ntohs(cpl->len);
493 m->m_len = IMMED_PKT_SIZE;
494 m->m_ext.ext_buf = NULL;
495 m->m_ext.ext_type = 0;
496 memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len);
500 static __inline u_int
501 flits_to_desc(u_int n)
503 return (flit_desc_map[n]);
506 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
507 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
508 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
509 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
511 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
512 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
516 * t3_sge_err_intr_handler - SGE async event interrupt handler
517 * @adapter: the adapter
519 * Interrupt handler for SGE asynchronous (non-data) events.
522 t3_sge_err_intr_handler(adapter_t *adapter)
524 unsigned int v, status;
526 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
527 if (status & SGE_PARERR)
528 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
529 status & SGE_PARERR);
530 if (status & SGE_FRAMINGERR)
531 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
532 status & SGE_FRAMINGERR);
533 if (status & F_RSPQCREDITOVERFOW)
534 CH_ALERT(adapter, "SGE response queue credit overflow\n");
536 if (status & F_RSPQDISABLED) {
537 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
540 "packet delivered to disabled response queue (0x%x)\n",
541 (v >> S_RSPQ0DISABLED) & 0xff);
544 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
545 if (status & SGE_FATALERR)
546 t3_fatal_err(adapter);
550 t3_sge_prep(adapter_t *adap, struct sge_params *p)
552 int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
554 nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
555 nqsets *= adap->params.nports;
557 fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
559 while (!powerof2(fl_q_size))
562 use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
566 jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
567 jumbo_buf_size = MJUM16BYTES;
569 jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
570 jumbo_buf_size = MJUM9BYTES;
572 while (!powerof2(jumbo_q_size))
575 if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
576 device_printf(adap->dev,
577 "Insufficient clusters and/or jumbo buffers.\n");
579 p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
581 for (i = 0; i < SGE_QSETS; ++i) {
582 struct qset_params *q = p->qset + i;
584 if (adap->params.nports > 2) {
585 q->coalesce_usecs = 50;
588 q->coalesce_usecs = 10;
590 q->coalesce_usecs = 5;
594 q->rspq_size = RSPQ_Q_SIZE;
595 q->fl_size = fl_q_size;
596 q->jumbo_size = jumbo_q_size;
597 q->jumbo_buf_size = jumbo_buf_size;
598 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
599 q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
600 q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
606 t3_sge_alloc(adapter_t *sc)
609 /* The parent tag. */
610 if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
611 1, 0, /* algnmnt, boundary */
612 BUS_SPACE_MAXADDR, /* lowaddr */
613 BUS_SPACE_MAXADDR, /* highaddr */
614 NULL, NULL, /* filter, filterarg */
615 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
616 BUS_SPACE_UNRESTRICTED, /* nsegments */
617 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
619 NULL, NULL, /* lock, lockarg */
621 device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
626 * DMA tag for normal sized RX frames
628 if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
629 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
630 MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
631 device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
636 * DMA tag for jumbo sized RX frames.
638 if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
639 BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
640 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
641 device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
646 * DMA tag for TX frames.
648 if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
649 BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
650 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
651 NULL, NULL, &sc->tx_dmat)) {
652 device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
660 t3_sge_free(struct adapter * sc)
663 if (sc->tx_dmat != NULL)
664 bus_dma_tag_destroy(sc->tx_dmat);
666 if (sc->rx_jumbo_dmat != NULL)
667 bus_dma_tag_destroy(sc->rx_jumbo_dmat);
669 if (sc->rx_dmat != NULL)
670 bus_dma_tag_destroy(sc->rx_dmat);
672 if (sc->parent_dmat != NULL)
673 bus_dma_tag_destroy(sc->parent_dmat);
679 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
682 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
683 qs->rspq.polling = 0 /* p->polling */;
686 #if !defined(__i386__) && !defined(__amd64__)
688 refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
690 struct refill_fl_cb_arg *cb_arg = arg;
692 cb_arg->error = error;
693 cb_arg->seg = segs[0];
699 * refill_fl - refill an SGE free-buffer list
700 * @sc: the controller softc
701 * @q: the free-list to refill
702 * @n: the number of new buffers to allocate
704 * (Re)populate an SGE free-buffer list with up to @n new packet buffers.
705 * The caller must assure that @n does not exceed the queue's capacity.
708 refill_fl(adapter_t *sc, struct sge_fl *q, int n)
710 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
711 struct rx_desc *d = &q->desc[q->pidx];
712 struct refill_fl_cb_arg cb_arg;
720 * We allocate an uninitialized mbuf + cluster, mbuf is
721 * initialized after rx.
723 if (q->zone == zone_pack) {
724 if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
726 cl = m->m_ext.ext_buf;
728 if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
730 if ((m = m_gethdr_raw(M_NOWAIT, 0)) == NULL) {
731 uma_zfree(q->zone, cl);
735 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
736 if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
737 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
738 uma_zfree(q->zone, cl);
741 sd->flags |= RX_SW_DESC_MAP_CREATED;
743 #if !defined(__i386__) && !defined(__amd64__)
744 err = bus_dmamap_load(q->entry_tag, sd->map,
745 cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
747 if (err != 0 || cb_arg.error) {
748 if (q->zone != zone_pack)
749 uma_zfree(q->zone, cl);
754 cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
756 sd->flags |= RX_SW_DESC_INUSE;
759 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
760 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
761 d->len_gen = htobe32(V_FLD_GEN1(q->gen));
762 d->gen2 = htobe32(V_FLD_GEN2(q->gen));
767 if (++q->pidx == q->size) {
778 if (q->db_pending >= 32) {
780 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
786 * free_rx_bufs - free the Rx buffers on an SGE free list
787 * @sc: the controle softc
788 * @q: the SGE free list to clean up
790 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
791 * this queue should be stopped before calling this function.
794 free_rx_bufs(adapter_t *sc, struct sge_fl *q)
796 u_int cidx = q->cidx;
798 while (q->credits--) {
799 struct rx_sw_desc *d = &q->sdesc[cidx];
801 if (d->flags & RX_SW_DESC_INUSE) {
802 bus_dmamap_unload(q->entry_tag, d->map);
803 bus_dmamap_destroy(q->entry_tag, d->map);
804 if (q->zone == zone_pack) {
805 m_init(d->m, M_NOWAIT, MT_DATA, M_EXT);
806 uma_zfree(zone_pack, d->m);
808 m_init(d->m, M_NOWAIT, MT_DATA, 0);
810 uma_zfree(q->zone, d->rxsd_cl);
816 if (++cidx == q->size)
822 __refill_fl(adapter_t *adap, struct sge_fl *fl)
824 refill_fl(adap, fl, min(16U, fl->size - fl->credits));
828 __refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
830 uint32_t reclaimable = fl->size - fl->credits;
833 refill_fl(adap, fl, min(max, reclaimable));
837 * recycle_rx_buf - recycle a receive buffer
838 * @adapter: the adapter
839 * @q: the SGE free list
840 * @idx: index of buffer to recycle
842 * Recycles the specified buffer on the given free list by adding it at
843 * the next available slot on the list.
846 recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
848 struct rx_desc *from = &q->desc[idx];
849 struct rx_desc *to = &q->desc[q->pidx];
851 q->sdesc[q->pidx] = q->sdesc[idx];
852 to->addr_lo = from->addr_lo; // already big endian
853 to->addr_hi = from->addr_hi; // likewise
854 wmb(); /* necessary ? */
855 to->len_gen = htobe32(V_FLD_GEN1(q->gen));
856 to->gen2 = htobe32(V_FLD_GEN2(q->gen));
859 if (++q->pidx == q->size) {
863 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
867 alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
872 *addr = segs[0].ds_addr;
876 alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
877 bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
878 bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
880 size_t len = nelem * elem_size;
885 if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
886 BUS_SPACE_MAXADDR_32BIT,
887 BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
888 len, 0, NULL, NULL, tag)) != 0) {
889 device_printf(sc->dev, "Cannot allocate descriptor tag\n");
893 if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
895 device_printf(sc->dev, "Cannot allocate descriptor memory\n");
899 bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
904 len = nelem * sw_size;
905 s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
908 if (parent_entry_tag == NULL)
911 if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
912 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
913 NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
914 TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
915 NULL, NULL, entry_tag)) != 0) {
916 device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
923 sge_slow_intr_handler(void *arg, int ncount)
927 t3_slow_intr_handler(sc);
928 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
929 (void) t3_read_reg(sc, A_PL_INT_ENABLE0);
933 * sge_timer_cb - perform periodic maintenance of an SGE qset
934 * @data: the SGE queue set to maintain
936 * Runs periodically from a timer to perform maintenance of an SGE queue
937 * set. It performs two tasks:
939 * a) Cleans up any completed Tx descriptors that may still be pending.
940 * Normal descriptor cleanup happens when new packets are added to a Tx
941 * queue so this timer is relatively infrequent and does any cleanup only
942 * if the Tx queue has not seen any new packets in a while. We make a
943 * best effort attempt to reclaim descriptors, in that we don't wait
944 * around if we cannot get a queue's lock (which most likely is because
945 * someone else is queueing new packets and so will also handle the clean
946 * up). Since control queues use immediate data exclusively we don't
947 * bother cleaning them up here.
949 * b) Replenishes Rx queues that have run out due to memory shortage.
950 * Normally new Rx buffers are added when existing ones are consumed but
951 * when out of memory a queue can become empty. We try to add only a few
952 * buffers here, the queue will be replenished fully as these new buffers
953 * are used up if memory shortage has subsided.
955 * c) Return coalesced response queue credits in case a response queue is
958 * d) Ring doorbells for T304 tunnel queues since we have seen doorbell
959 * fifo overflows and the FW doesn't implement any recovery scheme yet.
962 sge_timer_cb(void *arg)
965 if ((sc->flags & USING_MSIX) == 0) {
967 struct port_info *pi;
971 int reclaim_ofl, refill_rx;
973 if (sc->open_device_map == 0)
976 for (i = 0; i < sc->params.nports; i++) {
978 for (j = 0; j < pi->nqsets; j++) {
979 qs = &sc->sge.qs[pi->first_qset + j];
981 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
982 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
983 (qs->fl[1].credits < qs->fl[1].size));
984 if (reclaim_ofl || refill_rx) {
985 taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
992 if (sc->params.nports > 2) {
995 for_each_port(sc, i) {
996 struct port_info *pi = &sc->port[i];
998 t3_write_reg(sc, A_SG_KDOORBELL,
1000 (FW_TUNNEL_SGEEC_START + pi->first_qset));
1003 if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
1004 sc->open_device_map != 0)
1005 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1009 * This is meant to be a catch-all function to keep sge state private
1014 t3_sge_init_adapter(adapter_t *sc)
1016 callout_init(&sc->sge_timer_ch, 1);
1017 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1018 TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
1023 t3_sge_reset_adapter(adapter_t *sc)
1025 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1030 t3_sge_init_port(struct port_info *pi)
1032 TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
1037 * refill_rspq - replenish an SGE response queue
1038 * @adapter: the adapter
1039 * @q: the response queue to replenish
1040 * @credits: how many new responses to make available
1042 * Replenishes a response queue by making the supplied number of responses
1045 static __inline void
1046 refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1049 /* mbufs are allocated on demand when a rspq entry is processed. */
1050 t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
1051 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1055 sge_txq_reclaim_handler(void *arg, int ncount)
1057 struct sge_qset *qs = arg;
1060 for (i = 0; i < 3; i++)
1061 reclaim_completed_tx(qs, 16, i);
1065 sge_timer_reclaim(void *arg, int ncount)
1067 struct port_info *pi = arg;
1068 int i, nqsets = pi->nqsets;
1069 adapter_t *sc = pi->adapter;
1070 struct sge_qset *qs;
1073 KASSERT((sc->flags & USING_MSIX) == 0,
1074 ("can't call timer reclaim for msi-x"));
1076 for (i = 0; i < nqsets; i++) {
1077 qs = &sc->sge.qs[pi->first_qset + i];
1079 reclaim_completed_tx(qs, 16, TXQ_OFLD);
1080 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1081 &sc->sge.qs[0].rspq.lock;
1083 if (mtx_trylock(lock)) {
1084 /* XXX currently assume that we are *NOT* polling */
1085 uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1087 if (qs->fl[0].credits < qs->fl[0].size - 16)
1088 __refill_fl(sc, &qs->fl[0]);
1089 if (qs->fl[1].credits < qs->fl[1].size - 16)
1090 __refill_fl(sc, &qs->fl[1]);
1092 if (status & (1 << qs->rspq.cntxt_id)) {
1093 if (qs->rspq.credits) {
1094 refill_rspq(sc, &qs->rspq, 1);
1096 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
1097 1 << qs->rspq.cntxt_id);
1106 * init_qset_cntxt - initialize an SGE queue set context info
1107 * @qs: the queue set
1108 * @id: the queue set id
1110 * Initializes the TIDs and context ids for the queues of a queue set.
1113 init_qset_cntxt(struct sge_qset *qs, u_int id)
1116 qs->rspq.cntxt_id = id;
1117 qs->fl[0].cntxt_id = 2 * id;
1118 qs->fl[1].cntxt_id = 2 * id + 1;
1119 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1120 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1121 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1122 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1123 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1125 /* XXX: a sane limit is needed instead of INT_MAX */
1126 mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX);
1127 mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX);
1128 mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX);
1133 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1135 txq->in_use += ndesc;
1137 * XXX we don't handle stopping of queue
1138 * presumably start handles this when we bump against the end
1140 txqs->gen = txq->gen;
1141 txq->unacked += ndesc;
1142 txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1144 txqs->pidx = txq->pidx;
1147 if (((txqs->pidx > txq->cidx) &&
1148 (txq->pidx < txqs->pidx) &&
1149 (txq->pidx >= txq->cidx)) ||
1150 ((txqs->pidx < txq->cidx) &&
1151 (txq->pidx >= txq-> cidx)) ||
1152 ((txqs->pidx < txq->cidx) &&
1153 (txq->cidx < txqs->pidx)))
1154 panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1155 txqs->pidx, txq->pidx, txq->cidx);
1157 if (txq->pidx >= txq->size) {
1158 txq->pidx -= txq->size;
1165 * calc_tx_descs - calculate the number of Tx descriptors for a packet
1166 * @m: the packet mbufs
1167 * @nsegs: the number of segments
1169 * Returns the number of Tx descriptors needed for the given Ethernet
1170 * packet. Ethernet packets require addition of WR and CPL headers.
1172 static __inline unsigned int
1173 calc_tx_descs(const struct mbuf *m, int nsegs)
1177 if (m->m_pkthdr.len <= PIO_LEN)
1180 flits = sgl_len(nsegs) + 2;
1181 if (m->m_pkthdr.csum_flags & CSUM_TSO)
1184 return flits_to_desc(flits);
1188 * make_sgl - populate a scatter/gather list for a packet
1189 * @sgp: the SGL to populate
1190 * @segs: the packet dma segments
1191 * @nsegs: the number of segments
1193 * Generates a scatter/gather list for the buffers that make up a packet
1194 * and returns the SGL size in 8-byte words. The caller must size the SGL
1197 static __inline void
1198 make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1202 for (idx = 0, i = 0; i < nsegs; i++) {
1204 * firmware doesn't like empty segments
1206 if (segs[i].ds_len == 0)
1211 sgp->len[idx] = htobe32(segs[i].ds_len);
1212 sgp->addr[idx] = htobe64(segs[i].ds_addr);
1223 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1224 * @adap: the adapter
1227 * Ring the doorbell if a Tx queue is asleep. There is a natural race,
1228 * where the HW is going to sleep just after we checked, however,
1229 * then the interrupt handler will detect the outstanding TX packet
1230 * and ring the doorbell for us.
1232 * When GTS is disabled we unconditionally ring the doorbell.
1234 static __inline void
1235 check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1238 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1239 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1240 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1242 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1245 t3_write_reg(adap, A_SG_KDOORBELL,
1246 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1249 if (mustring || ++q->db_pending >= 32) {
1250 wmb(); /* write descriptors before telling HW */
1251 t3_write_reg(adap, A_SG_KDOORBELL,
1252 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1258 static __inline void
1259 wr_gen2(struct tx_desc *d, unsigned int gen)
1261 #if SGE_NUM_GENBITS == 2
1262 d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1267 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1268 * @ndesc: number of Tx descriptors spanned by the SGL
1269 * @txd: first Tx descriptor to be written
1270 * @txqs: txq state (generation and producer index)
1271 * @txq: the SGE Tx queue
1273 * @flits: number of flits to the start of the SGL in the first descriptor
1274 * @sgl_flits: the SGL size in flits
1275 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1276 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1278 * Write a work request header and an associated SGL. If the SGL is
1279 * small enough to fit into one Tx descriptor it has already been written
1280 * and we just need to write the WR header. Otherwise we distribute the
1281 * SGL across the number of descriptors it spans.
1284 write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1285 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1286 unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1289 struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1290 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1292 if (__predict_true(ndesc == 1)) {
1293 set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1294 V_WR_SGLSFLT(flits)) | wr_hi,
1295 htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
1298 wr_gen2(txd, txqs->gen);
1301 unsigned int ogen = txqs->gen;
1302 const uint64_t *fp = (const uint64_t *)sgl;
1303 struct work_request_hdr *wp = wrp;
1305 wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1306 V_WR_SGLSFLT(flits)) | wr_hi;
1309 unsigned int avail = WR_FLITS - flits;
1311 if (avail > sgl_flits)
1313 memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1322 if (++txqs->pidx == txq->size) {
1330 * when the head of the mbuf chain
1331 * is freed all clusters will be freed
1334 wrp = (struct work_request_hdr *)txd;
1335 wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
1336 V_WR_SGLSFLT(1)) | wr_hi;
1337 wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
1339 V_WR_GEN(txqs->gen)) | wr_lo;
1340 wr_gen2(txd, txqs->gen);
1343 wrp->wrh_hi |= htonl(F_WR_EOP);
1345 wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1346 wr_gen2((struct tx_desc *)wp, ogen);
1350 /* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
1351 #define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1353 #define GET_VTAG(cntrl, m) \
1355 if ((m)->m_flags & M_VLANTAG) \
1356 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1360 t3_encap(struct sge_qset *qs, struct mbuf **m)
1364 struct sge_txq *txq;
1365 struct txq_state txqs;
1366 struct port_info *pi;
1367 unsigned int ndesc, flits, cntrl, mlen;
1368 int err, nsegs, tso_info = 0;
1370 struct work_request_hdr *wrp;
1371 struct tx_sw_desc *txsd;
1372 struct sg_ent *sgp, *sgl;
1373 uint32_t wr_hi, wr_lo, sgl_flits;
1374 bus_dma_segment_t segs[TX_MAX_SEGS];
1376 struct tx_desc *txd;
1380 txq = &qs->txq[TXQ_ETH];
1381 txd = &txq->desc[txq->pidx];
1382 txsd = &txq->sdesc[txq->pidx];
1388 mtx_assert(&qs->lock, MA_OWNED);
1389 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1390 KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
1392 if (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1393 m0->m_pkthdr.csum_flags & (CSUM_TSO))
1394 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1396 if (m0->m_nextpkt != NULL) {
1397 busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1401 if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1402 &m0, segs, &nsegs))) {
1404 printf("failed ... err=%d\n", err);
1407 mlen = m0->m_pkthdr.len;
1408 ndesc = calc_tx_descs(m0, nsegs);
1410 txq_prod(txq, ndesc, &txqs);
1412 KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
1415 if (m0->m_nextpkt != NULL) {
1416 struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1420 panic("trying to coalesce %d packets in to one WR", nsegs);
1421 txq->txq_coalesced += nsegs;
1422 wrp = (struct work_request_hdr *)txd;
1423 flits = nsegs*2 + 1;
1425 for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1426 struct cpl_tx_pkt_batch_entry *cbe;
1428 uint32_t *hflit = (uint32_t *)&flit;
1429 int cflags = m0->m_pkthdr.csum_flags;
1431 cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1432 GET_VTAG(cntrl, m0);
1433 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1434 if (__predict_false(!(cflags & CSUM_IP)))
1435 cntrl |= F_TXPKT_IPCSUM_DIS;
1436 if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP |
1437 CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1438 cntrl |= F_TXPKT_L4CSUM_DIS;
1440 hflit[0] = htonl(cntrl);
1441 hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1442 flit |= htobe64(1 << 24);
1443 cbe = &cpl_batch->pkt_entry[i];
1444 cbe->cntrl = hflit[0];
1445 cbe->len = hflit[1];
1446 cbe->addr = htobe64(segs[i].ds_addr);
1449 wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1450 V_WR_SGLSFLT(flits)) |
1451 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1452 wr_lo = htonl(V_WR_LEN(flits) |
1453 V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1454 set_wr_hdr(wrp, wr_hi, wr_lo);
1456 ETHER_BPF_MTAP(pi->ifp, m0);
1457 wr_gen2(txd, txqs.gen);
1458 check_ring_tx_db(sc, txq, 0);
1460 } else if (tso_info) {
1462 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1463 struct ether_header *eh;
1468 GET_VTAG(cntrl, m0);
1469 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1470 hdr->cntrl = htonl(cntrl);
1471 hdr->len = htonl(mlen | 0x80000000);
1473 if (__predict_false(mlen < TCPPKTHDRSIZE)) {
1474 printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x",
1475 m0, mlen, m0->m_pkthdr.tso_segsz,
1476 (int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags);
1477 panic("tx tso packet too small");
1480 /* Make sure that ether, ip, tcp headers are all in m0 */
1481 if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1482 m0 = m_pullup(m0, TCPPKTHDRSIZE);
1483 if (__predict_false(m0 == NULL)) {
1484 /* XXX panic probably an overreaction */
1485 panic("couldn't fit header into mbuf");
1489 eh = mtod(m0, struct ether_header *);
1490 eth_type = eh->ether_type;
1491 if (eth_type == htons(ETHERTYPE_VLAN)) {
1492 struct ether_vlan_header *evh = (void *)eh;
1494 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
1496 eth_type = evh->evl_proto;
1498 tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
1502 if (eth_type == htons(ETHERTYPE_IP)) {
1503 struct ip *ip = l3hdr;
1505 tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
1506 tcp = (struct tcphdr *)(ip + 1);
1507 } else if (eth_type == htons(ETHERTYPE_IPV6)) {
1508 struct ip6_hdr *ip6 = l3hdr;
1510 KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1511 ("%s: CSUM_TSO with ip6_nxt %d",
1512 __func__, ip6->ip6_nxt));
1514 tso_info |= F_LSO_IPV6;
1515 tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
1516 tcp = (struct tcphdr *)(ip6 + 1);
1518 panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
1520 tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
1521 hdr->lso_info = htonl(tso_info);
1523 if (__predict_false(mlen <= PIO_LEN)) {
1525 * pkt not undersized but fits in PIO_LEN
1526 * Indicates a TSO bug at the higher levels.
1529 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1530 flits = (mlen + 7) / 8 + 3;
1531 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1532 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1533 F_WR_SOP | F_WR_EOP | txqs.compl);
1534 wr_lo = htonl(V_WR_LEN(flits) |
1535 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1536 set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1538 ETHER_BPF_MTAP(pi->ifp, m0);
1539 wr_gen2(txd, txqs.gen);
1540 check_ring_tx_db(sc, txq, 0);
1546 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1548 GET_VTAG(cntrl, m0);
1549 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1550 if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1551 cntrl |= F_TXPKT_IPCSUM_DIS;
1552 if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP |
1553 CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1554 cntrl |= F_TXPKT_L4CSUM_DIS;
1555 cpl->cntrl = htonl(cntrl);
1556 cpl->len = htonl(mlen | 0x80000000);
1558 if (mlen <= PIO_LEN) {
1560 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1561 flits = (mlen + 7) / 8 + 2;
1563 wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1564 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1565 F_WR_SOP | F_WR_EOP | txqs.compl);
1566 wr_lo = htonl(V_WR_LEN(flits) |
1567 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1568 set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1570 ETHER_BPF_MTAP(pi->ifp, m0);
1571 wr_gen2(txd, txqs.gen);
1572 check_ring_tx_db(sc, txq, 0);
1578 wrp = (struct work_request_hdr *)txd;
1579 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1580 make_sgl(sgp, segs, nsegs);
1582 sgl_flits = sgl_len(nsegs);
1584 ETHER_BPF_MTAP(pi->ifp, m0);
1586 KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1587 wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1588 wr_lo = htonl(V_WR_TID(txq->token));
1589 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1590 sgl_flits, wr_hi, wr_lo);
1591 check_ring_tx_db(sc, txq, 0);
1598 cxgb_debugnet_encap(struct sge_qset *qs, struct mbuf **m)
1602 error = t3_encap(qs, m);
1604 check_ring_tx_db(qs->port->adapter, &qs->txq[TXQ_ETH], 1);
1605 else if (*m != NULL) {
1614 cxgb_tx_watchdog(void *arg)
1616 struct sge_qset *qs = arg;
1617 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1619 if (qs->coalescing != 0 &&
1620 (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
1623 else if (qs->coalescing == 0 &&
1624 (txq->in_use >= cxgb_tx_coalesce_enable_start))
1626 if (TXQ_TRYLOCK(qs)) {
1627 qs->qs_flags |= QS_FLUSHING;
1628 cxgb_start_locked(qs);
1629 qs->qs_flags &= ~QS_FLUSHING;
1632 if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1633 callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1634 qs, txq->txq_watchdog.c_cpu);
1638 cxgb_tx_timeout(void *arg)
1640 struct sge_qset *qs = arg;
1641 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1643 if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1645 if (TXQ_TRYLOCK(qs)) {
1646 qs->qs_flags |= QS_TIMEOUT;
1647 cxgb_start_locked(qs);
1648 qs->qs_flags &= ~QS_TIMEOUT;
1654 cxgb_start_locked(struct sge_qset *qs)
1656 struct mbuf *m_head = NULL;
1657 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1658 struct port_info *pi = qs->port;
1659 struct ifnet *ifp = pi->ifp;
1661 if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1662 reclaim_completed_tx(qs, 0, TXQ_ETH);
1664 if (!pi->link_config.link_ok) {
1668 TXQ_LOCK_ASSERT(qs);
1669 while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1670 pi->link_config.link_ok) {
1671 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1673 if (txq->size - txq->in_use <= TX_MAX_DESC)
1676 if ((m_head = cxgb_dequeue(qs)) == NULL)
1679 * Encapsulation can modify our pointer, and or make it
1680 * NULL on failure. In that event, we can't requeue.
1682 if (t3_encap(qs, &m_head) || m_head == NULL)
1688 if (txq->db_pending)
1689 check_ring_tx_db(pi->adapter, txq, 1);
1691 if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1692 pi->link_config.link_ok)
1693 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1694 qs, txq->txq_timer.c_cpu);
1700 cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1702 struct port_info *pi = qs->port;
1703 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1704 struct buf_ring *br = txq->txq_mr;
1707 avail = txq->size - txq->in_use;
1708 TXQ_LOCK_ASSERT(qs);
1711 * We can only do a direct transmit if the following are true:
1712 * - we aren't coalescing (ring < 3/4 full)
1713 * - the link is up -- checked in caller
1714 * - there are no packets enqueued already
1715 * - there is space in hardware transmit queue
1717 if (check_pkt_coalesce(qs) == 0 &&
1718 !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1719 if (t3_encap(qs, &m)) {
1721 (error = drbr_enqueue(ifp, br, m)) != 0)
1724 if (txq->db_pending)
1725 check_ring_tx_db(pi->adapter, txq, 1);
1728 * We've bypassed the buf ring so we need to update
1729 * the stats directly
1731 txq->txq_direct_packets++;
1732 txq->txq_direct_bytes += m->m_pkthdr.len;
1734 } else if ((error = drbr_enqueue(ifp, br, m)) != 0)
1737 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1738 if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1739 (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1740 cxgb_start_locked(qs);
1741 else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1742 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1743 qs, txq->txq_timer.c_cpu);
1748 cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
1750 struct sge_qset *qs;
1751 struct port_info *pi = ifp->if_softc;
1752 int error, qidx = pi->first_qset;
1754 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1755 ||(!pi->link_config.link_ok)) {
1760 /* check if flowid is set */
1761 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1762 qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
1764 qs = &pi->adapter->sge.qs[qidx];
1766 if (TXQ_TRYLOCK(qs)) {
1768 error = cxgb_transmit_locked(ifp, qs, m);
1771 error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1776 cxgb_qflush(struct ifnet *ifp)
1779 * flush any enqueued mbufs in the buf_rings
1780 * and in the transmit queues
1787 * write_imm - write a packet into a Tx descriptor as immediate data
1788 * @d: the Tx descriptor to write
1790 * @len: the length of packet data to write as immediate data
1791 * @gen: the generation bit value to write
1793 * Writes a packet as immediate data into a Tx descriptor. The packet
1794 * contains a work request at its beginning. We must write the packet
1795 * carefully so the SGE doesn't read accidentally before it's written in
1798 static __inline void
1799 write_imm(struct tx_desc *d, caddr_t src,
1800 unsigned int len, unsigned int gen)
1802 struct work_request_hdr *from = (struct work_request_hdr *)src;
1803 struct work_request_hdr *to = (struct work_request_hdr *)d;
1804 uint32_t wr_hi, wr_lo;
1806 KASSERT(len <= WR_LEN && len >= sizeof(*from),
1807 ("%s: invalid len %d", __func__, len));
1809 memcpy(&to[1], &from[1], len - sizeof(*from));
1810 wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
1811 V_WR_BCNTLFLT(len & 7));
1812 wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
1813 set_wr_hdr(to, wr_hi, wr_lo);
1819 * check_desc_avail - check descriptor availability on a send queue
1820 * @adap: the adapter
1822 * @m: the packet needing the descriptors
1823 * @ndesc: the number of Tx descriptors needed
1824 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1826 * Checks if the requested number of Tx descriptors is available on an
1827 * SGE send queue. If the queue is already suspended or not enough
1828 * descriptors are available the packet is queued for later transmission.
1829 * Must be called with the Tx queue locked.
1831 * Returns 0 if enough descriptors are available, 1 if there aren't
1832 * enough descriptors and the packet has been queued, and 2 if the caller
1833 * needs to retry because there weren't enough descriptors at the
1834 * beginning of the call but some freed up in the mean time.
1837 check_desc_avail(adapter_t *adap, struct sge_txq *q,
1838 struct mbuf *m, unsigned int ndesc,
1842 * XXX We currently only use this for checking the control queue
1843 * the control queue is only used for binding qsets which happens
1844 * at init time so we are guaranteed enough descriptors
1846 if (__predict_false(mbufq_len(&q->sendq))) {
1847 addq_exit: (void )mbufq_enqueue(&q->sendq, m);
1850 if (__predict_false(q->size - q->in_use < ndesc)) {
1852 struct sge_qset *qs = txq_to_qset(q, qid);
1854 setbit(&qs->txq_stopped, qid);
1855 if (should_restart_tx(q) &&
1856 test_and_clear_bit(qid, &qs->txq_stopped))
1867 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1868 * @q: the SGE control Tx queue
1870 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1871 * that send only immediate data (presently just the control queues) and
1872 * thus do not have any mbufs
1874 static __inline void
1875 reclaim_completed_tx_imm(struct sge_txq *q)
1877 unsigned int reclaim = q->processed - q->cleaned;
1879 q->in_use -= reclaim;
1880 q->cleaned += reclaim;
1884 * ctrl_xmit - send a packet through an SGE control Tx queue
1885 * @adap: the adapter
1886 * @q: the control queue
1889 * Send a packet through an SGE control Tx queue. Packets sent through
1890 * a control queue must fit entirely as immediate data in a single Tx
1891 * descriptor and have no page fragments.
1894 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1897 struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1898 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1900 KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
1902 wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
1903 wrp->wrh_lo = htonl(V_WR_TID(q->token));
1906 again: reclaim_completed_tx_imm(q);
1908 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1909 if (__predict_false(ret)) {
1916 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1919 if (++q->pidx >= q->size) {
1925 t3_write_reg(adap, A_SG_KDOORBELL,
1926 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1934 * restart_ctrlq - restart a suspended control queue
1935 * @qs: the queue set cotaining the control queue
1937 * Resumes transmission on a suspended Tx control queue.
1940 restart_ctrlq(void *data, int npending)
1943 struct sge_qset *qs = (struct sge_qset *)data;
1944 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1945 adapter_t *adap = qs->port->adapter;
1948 again: reclaim_completed_tx_imm(q);
1950 while (q->in_use < q->size &&
1951 (m = mbufq_dequeue(&q->sendq)) != NULL) {
1953 write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1956 if (++q->pidx >= q->size) {
1962 if (mbufq_len(&q->sendq)) {
1963 setbit(&qs->txq_stopped, TXQ_CTRL);
1965 if (should_restart_tx(q) &&
1966 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1971 t3_write_reg(adap, A_SG_KDOORBELL,
1972 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1977 * Send a management message through control queue 0
1980 t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1982 return ctrl_xmit(adap, &adap->sge.qs[0], m);
1986 * free_qset - free the resources of an SGE queue set
1987 * @sc: the controller owning the queue set
1990 * Release the HW and SW resources associated with an SGE queue set, such
1991 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
1992 * queue set must be quiesced prior to calling this.
1995 t3_free_qset(adapter_t *sc, struct sge_qset *q)
1999 reclaim_completed_tx(q, 0, TXQ_ETH);
2000 if (q->txq[TXQ_ETH].txq_mr != NULL)
2001 buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
2002 if (q->txq[TXQ_ETH].txq_ifq != NULL) {
2003 ifq_delete(q->txq[TXQ_ETH].txq_ifq);
2004 free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
2007 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2008 if (q->fl[i].desc) {
2009 mtx_lock_spin(&sc->sge.reg_lock);
2010 t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
2011 mtx_unlock_spin(&sc->sge.reg_lock);
2012 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
2013 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
2015 bus_dma_tag_destroy(q->fl[i].desc_tag);
2016 bus_dma_tag_destroy(q->fl[i].entry_tag);
2018 if (q->fl[i].sdesc) {
2019 free_rx_bufs(sc, &q->fl[i]);
2020 free(q->fl[i].sdesc, M_DEVBUF);
2024 mtx_unlock(&q->lock);
2025 MTX_DESTROY(&q->lock);
2026 for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2027 if (q->txq[i].desc) {
2028 mtx_lock_spin(&sc->sge.reg_lock);
2029 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2030 mtx_unlock_spin(&sc->sge.reg_lock);
2031 bus_dmamap_unload(q->txq[i].desc_tag,
2032 q->txq[i].desc_map);
2033 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2034 q->txq[i].desc_map);
2035 bus_dma_tag_destroy(q->txq[i].desc_tag);
2036 bus_dma_tag_destroy(q->txq[i].entry_tag);
2038 if (q->txq[i].sdesc) {
2039 free(q->txq[i].sdesc, M_DEVBUF);
2044 mtx_lock_spin(&sc->sge.reg_lock);
2045 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
2046 mtx_unlock_spin(&sc->sge.reg_lock);
2048 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
2049 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
2051 bus_dma_tag_destroy(q->rspq.desc_tag);
2052 MTX_DESTROY(&q->rspq.lock);
2055 #if defined(INET6) || defined(INET)
2056 tcp_lro_free(&q->lro.ctrl);
2059 bzero(q, sizeof(*q));
2063 * t3_free_sge_resources - free SGE resources
2064 * @sc: the adapter softc
2066 * Frees resources used by the SGE queue sets.
2069 t3_free_sge_resources(adapter_t *sc, int nqsets)
2073 for (i = 0; i < nqsets; ++i) {
2074 TXQ_LOCK(&sc->sge.qs[i]);
2075 t3_free_qset(sc, &sc->sge.qs[i]);
2080 * t3_sge_start - enable SGE
2081 * @sc: the controller softc
2083 * Enables the SGE for DMAs. This is the last step in starting packet
2087 t3_sge_start(adapter_t *sc)
2089 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2093 * t3_sge_stop - disable SGE operation
2096 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2097 * from error interrupts) or from normal process context. In the latter
2098 * case it also disables any pending queue restart tasklets. Note that
2099 * if it is called in interrupt context it cannot disable the restart
2100 * tasklets as it cannot wait, however the tasklets will have no effect
2101 * since the doorbells are disabled and the driver will call this again
2102 * later from process context, at which time the tasklets will be stopped
2103 * if they are still running.
2106 t3_sge_stop(adapter_t *sc)
2110 t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
2115 for (nqsets = i = 0; i < (sc)->params.nports; i++)
2116 nqsets += sc->port[i].nqsets;
2122 for (i = 0; i < nqsets; ++i) {
2123 struct sge_qset *qs = &sc->sge.qs[i];
2125 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2126 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2132 * t3_free_tx_desc - reclaims Tx descriptors and their buffers
2133 * @adapter: the adapter
2134 * @q: the Tx queue to reclaim descriptors from
2135 * @reclaimable: the number of descriptors to reclaim
2136 * @m_vec_size: maximum number of buffers to reclaim
2137 * @desc_reclaimed: returns the number of descriptors reclaimed
2139 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
2140 * Tx buffers. Called with the Tx queue lock held.
2142 * Returns number of buffers of reclaimed
2145 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2147 struct tx_sw_desc *txsd;
2148 unsigned int cidx, mask;
2149 struct sge_txq *q = &qs->txq[queue];
2152 T3_TRACE2(sc->tb[q->cntxt_id & 7],
2153 "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
2157 txsd = &q->sdesc[cidx];
2159 mtx_assert(&qs->lock, MA_OWNED);
2160 while (reclaimable--) {
2161 prefetch(q->sdesc[(cidx + 1) & mask].m);
2162 prefetch(q->sdesc[(cidx + 2) & mask].m);
2164 if (txsd->m != NULL) {
2165 if (txsd->flags & TX_SW_DESC_MAPPED) {
2166 bus_dmamap_unload(q->entry_tag, txsd->map);
2167 txsd->flags &= ~TX_SW_DESC_MAPPED;
2169 m_freem_list(txsd->m);
2175 if (++cidx == q->size) {
2185 * is_new_response - check if a response is newly written
2186 * @r: the response descriptor
2187 * @q: the response queue
2189 * Returns true if a response descriptor contains a yet unprocessed
2193 is_new_response(const struct rsp_desc *r,
2194 const struct sge_rspq *q)
2196 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2199 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2200 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2201 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2202 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2203 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2205 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2206 #define NOMEM_INTR_DELAY 2500
2210 * write_ofld_wr - write an offload work request
2211 * @adap: the adapter
2212 * @m: the packet to send
2214 * @pidx: index of the first Tx descriptor to write
2215 * @gen: the generation value to use
2216 * @ndesc: number of descriptors the packet will occupy
2218 * Write an offload work request to send the supplied packet. The packet
2219 * data already carry the work request with most fields populated.
2222 write_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2223 unsigned int pidx, unsigned int gen, unsigned int ndesc)
2225 unsigned int sgl_flits, flits;
2226 int i, idx, nsegs, wrlen;
2227 struct work_request_hdr *from;
2228 struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
2229 struct tx_desc *d = &q->desc[pidx];
2230 struct txq_state txqs;
2231 struct sglist_seg *segs;
2232 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2235 from = (void *)(oh + 1); /* Start of WR within mbuf */
2236 wrlen = m->m_len - sizeof(*oh);
2238 if (!(oh->flags & F_HDR_SGL)) {
2239 write_imm(d, (caddr_t)from, wrlen, gen);
2242 * mbuf with "real" immediate tx data will be enqueue_wr'd by
2243 * t3_push_frames and freed in wr_ack. Others, like those sent
2244 * down by close_conn, t3_send_reset, etc. should be freed here.
2246 if (!(oh->flags & F_HDR_DF))
2251 memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
2255 sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
2257 nsegs = sgl->sg_nseg;
2258 segs = sgl->sg_segs;
2259 for (idx = 0, i = 0; i < nsegs; i++) {
2260 KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
2263 sgp->len[idx] = htobe32(segs[i].ss_len);
2264 sgp->addr[idx] = htobe64(segs[i].ss_paddr);
2272 sgl_flits = sgl_len(nsegs);
2277 write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
2278 from->wrh_hi, from->wrh_lo);
2282 * ofld_xmit - send a packet through an offload queue
2283 * @adap: the adapter
2284 * @q: the Tx offload queue
2287 * Send an offload packet through an SGE offload queue.
2290 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2294 unsigned int pidx, gen;
2295 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2296 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2298 ndesc = G_HDR_NDESC(oh->flags);
2301 again: reclaim_completed_tx(qs, 16, TXQ_OFLD);
2302 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2303 if (__predict_false(ret)) {
2315 if (q->pidx >= q->size) {
2320 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2321 check_ring_tx_db(adap, q, 1);
2328 * restart_offloadq - restart a suspended offload queue
2329 * @qs: the queue set cotaining the offload queue
2331 * Resumes transmission on a suspended Tx offload queue.
2334 restart_offloadq(void *data, int npending)
2337 struct sge_qset *qs = data;
2338 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2339 adapter_t *adap = qs->port->adapter;
2343 again: cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
2345 while ((m = mbufq_first(&q->sendq)) != NULL) {
2346 unsigned int gen, pidx;
2347 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2348 unsigned int ndesc = G_HDR_NDESC(oh->flags);
2350 if (__predict_false(q->size - q->in_use < ndesc)) {
2351 setbit(&qs->txq_stopped, TXQ_OFLD);
2352 if (should_restart_tx(q) &&
2353 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2363 if (q->pidx >= q->size) {
2368 (void)mbufq_dequeue(&q->sendq);
2370 write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2374 set_bit(TXQ_RUNNING, &q->flags);
2375 set_bit(TXQ_LAST_PKT_DB, &q->flags);
2379 t3_write_reg(adap, A_SG_KDOORBELL,
2380 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2384 * t3_offload_tx - send an offload packet
2387 * Sends an offload packet. We use the packet priority to select the
2388 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2389 * should be sent as regular or control, bits 1-3 select the queue set.
2392 t3_offload_tx(struct adapter *sc, struct mbuf *m)
2394 struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2395 struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2397 if (oh->flags & F_HDR_CTRL) {
2398 m_adj(m, sizeof (*oh)); /* trim ofld_hdr off */
2399 return (ctrl_xmit(sc, qs, m));
2401 return (ofld_xmit(sc, qs, m));
2406 restart_tx(struct sge_qset *qs)
2408 struct adapter *sc = qs->port->adapter;
2410 if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2411 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2412 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2413 qs->txq[TXQ_OFLD].restarts++;
2414 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2417 if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2418 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2419 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2420 qs->txq[TXQ_CTRL].restarts++;
2421 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2426 * t3_sge_alloc_qset - initialize an SGE queue set
2427 * @sc: the controller softc
2428 * @id: the queue set id
2429 * @nports: how many Ethernet ports will be using this queue set
2430 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2431 * @p: configuration parameters for this queue set
2432 * @ntxq: number of Tx queues for the queue set
2433 * @pi: port info for queue set
2435 * Allocate resources and initialize an SGE queue set. A queue set
2436 * comprises a response queue, two Rx free-buffer queues, and up to 3
2437 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2438 * queue, offload queue, and control queue.
2441 t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2442 const struct qset_params *p, int ntxq, struct port_info *pi)
2444 struct sge_qset *q = &sc->sge.qs[id];
2447 MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
2451 if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2452 M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
2453 device_printf(sc->dev, "failed to allocate mbuf ring\n");
2456 if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2457 M_NOWAIT | M_ZERO)) == NULL) {
2458 device_printf(sc->dev, "failed to allocate ifq\n");
2461 ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2462 callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2463 callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2464 q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2465 q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2467 init_qset_cntxt(q, id);
2469 if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2470 sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2471 &q->fl[0].desc, &q->fl[0].sdesc,
2472 &q->fl[0].desc_tag, &q->fl[0].desc_map,
2473 sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2474 printf("error %d from alloc ring fl0\n", ret);
2478 if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2479 sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2480 &q->fl[1].desc, &q->fl[1].sdesc,
2481 &q->fl[1].desc_tag, &q->fl[1].desc_map,
2482 sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2483 printf("error %d from alloc ring fl1\n", ret);
2487 if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2488 &q->rspq.phys_addr, &q->rspq.desc, NULL,
2489 &q->rspq.desc_tag, &q->rspq.desc_map,
2490 NULL, NULL)) != 0) {
2491 printf("error %d from alloc ring rspq\n", ret);
2495 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2496 device_get_unit(sc->dev), irq_vec_idx);
2497 MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2499 for (i = 0; i < ntxq; ++i) {
2500 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2502 if ((ret = alloc_ring(sc, p->txq_size[i],
2503 sizeof(struct tx_desc), sz,
2504 &q->txq[i].phys_addr, &q->txq[i].desc,
2505 &q->txq[i].sdesc, &q->txq[i].desc_tag,
2506 &q->txq[i].desc_map,
2507 sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2508 printf("error %d from alloc ring tx %i\n", ret, i);
2511 mbufq_init(&q->txq[i].sendq, INT_MAX);
2513 q->txq[i].size = p->txq_size[i];
2517 TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2519 TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2520 TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2521 TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2523 q->fl[0].gen = q->fl[1].gen = 1;
2524 q->fl[0].size = p->fl_size;
2525 q->fl[1].size = p->jumbo_size;
2529 q->rspq.size = p->rspq_size;
2531 q->txq[TXQ_ETH].stop_thres = nports *
2532 flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2534 q->fl[0].buf_size = MCLBYTES;
2535 q->fl[0].zone = zone_pack;
2536 q->fl[0].type = EXT_PACKET;
2538 if (p->jumbo_buf_size == MJUM16BYTES) {
2539 q->fl[1].zone = zone_jumbo16;
2540 q->fl[1].type = EXT_JUMBO16;
2541 } else if (p->jumbo_buf_size == MJUM9BYTES) {
2542 q->fl[1].zone = zone_jumbo9;
2543 q->fl[1].type = EXT_JUMBO9;
2544 } else if (p->jumbo_buf_size == MJUMPAGESIZE) {
2545 q->fl[1].zone = zone_jumbop;
2546 q->fl[1].type = EXT_JUMBOP;
2548 KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
2552 q->fl[1].buf_size = p->jumbo_buf_size;
2554 /* Allocate and setup the lro_ctrl structure */
2555 q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2556 #if defined(INET6) || defined(INET)
2557 ret = tcp_lro_init(&q->lro.ctrl);
2559 printf("error %d from tcp_lro_init\n", ret);
2563 q->lro.ctrl.ifp = pi->ifp;
2565 mtx_lock_spin(&sc->sge.reg_lock);
2566 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2567 q->rspq.phys_addr, q->rspq.size,
2568 q->fl[0].buf_size, 1, 0);
2570 printf("error %d from t3_sge_init_rspcntxt\n", ret);
2574 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2575 ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2576 q->fl[i].phys_addr, q->fl[i].size,
2577 q->fl[i].buf_size, p->cong_thres, 1,
2580 printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2585 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2586 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2587 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2590 printf("error %d from t3_sge_init_ecntxt\n", ret);
2595 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2596 USE_GTS, SGE_CNTXT_OFLD, id,
2597 q->txq[TXQ_OFLD].phys_addr,
2598 q->txq[TXQ_OFLD].size, 0, 1, 0);
2600 printf("error %d from t3_sge_init_ecntxt\n", ret);
2606 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2608 q->txq[TXQ_CTRL].phys_addr,
2609 q->txq[TXQ_CTRL].size,
2610 q->txq[TXQ_CTRL].token, 1, 0);
2612 printf("error %d from t3_sge_init_ecntxt\n", ret);
2617 mtx_unlock_spin(&sc->sge.reg_lock);
2618 t3_update_qset_coalesce(q, p);
2620 refill_fl(sc, &q->fl[0], q->fl[0].size);
2621 refill_fl(sc, &q->fl[1], q->fl[1].size);
2622 refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2624 t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2625 V_NEWTIMER(q->rspq.holdoff_tmr));
2630 mtx_unlock_spin(&sc->sge.reg_lock);
2633 t3_free_qset(sc, q);
2639 * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2640 * ethernet data. Hardware assistance with various checksums and any vlan tag
2641 * will also be taken into account here.
2644 t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
2646 struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2647 struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2648 struct ifnet *ifp = pi->ifp;
2650 if (cpl->vlan_valid) {
2651 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2652 m->m_flags |= M_VLANTAG;
2655 m->m_pkthdr.rcvif = ifp;
2657 * adjust after conversion to mbuf chain
2659 m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2660 m->m_len -= (sizeof(*cpl) + ethpad);
2661 m->m_data += (sizeof(*cpl) + ethpad);
2663 if (!cpl->fragment && cpl->csum_valid && cpl->csum == 0xffff) {
2664 struct ether_header *eh = mtod(m, void *);
2667 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2668 struct ether_vlan_header *evh = mtod(m, void *);
2670 eh_type = evh->evl_proto;
2672 eh_type = eh->ether_type;
2674 if (ifp->if_capenable & IFCAP_RXCSUM &&
2675 eh_type == htons(ETHERTYPE_IP)) {
2676 m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
2677 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2678 m->m_pkthdr.csum_data = 0xffff;
2679 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2680 eh_type == htons(ETHERTYPE_IPV6)) {
2681 m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
2683 m->m_pkthdr.csum_data = 0xffff;
2689 * get_packet - return the next ingress packet buffer from a free list
2690 * @adap: the adapter that received the packet
2691 * @drop_thres: # of remaining buffers before we start dropping packets
2692 * @qs: the qset that the SGE free list holding the packet belongs to
2693 * @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2694 * @r: response descriptor
2696 * Get the next packet from a free list and complete setup of the
2697 * sk_buff. If the packet is small we make a copy and recycle the
2698 * original buffer, otherwise we use the original buffer itself. If a
2699 * positive drop threshold is supplied packets are dropped and their
2700 * buffers recycled if (a) the number of remaining buffers is under the
2701 * threshold and the packet is too big to copy, or (b) the packet should
2702 * be copied but there is no memory for the copy.
2705 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2706 struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2709 unsigned int len_cq = ntohl(r->len_cq);
2710 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2711 int mask, cidx = fl->cidx;
2712 struct rx_sw_desc *sd = &fl->sdesc[cidx];
2713 uint32_t len = G_RSPD_LEN(len_cq);
2714 uint32_t flags = M_EXT;
2715 uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
2720 mask = fl->size - 1;
2721 prefetch(fl->sdesc[(cidx + 1) & mask].m);
2722 prefetch(fl->sdesc[(cidx + 2) & mask].m);
2723 prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
2724 prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl);
2727 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2729 if (recycle_enable && len <= SGE_RX_COPY_THRES &&
2730 sopeop == RSPQ_SOP_EOP) {
2731 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
2733 cl = mtod(m, void *);
2734 memcpy(cl, sd->rxsd_cl, len);
2735 recycle_rx_buf(adap, fl, fl->cidx);
2736 m->m_pkthdr.len = m->m_len = len;
2738 mh->mh_head = mh->mh_tail = m;
2743 bus_dmamap_unload(fl->entry_tag, sd->map);
2747 if ((sopeop == RSPQ_SOP_EOP) ||
2748 (sopeop == RSPQ_SOP))
2750 m_init(m, M_NOWAIT, MT_DATA, flags);
2751 if (fl->zone == zone_pack) {
2753 * restore clobbered data pointer
2755 m->m_data = m->m_ext.ext_buf;
2757 m_cljset(m, cl, fl->type);
2766 mh->mh_head = mh->mh_tail = m;
2767 m->m_pkthdr.len = len;
2772 case RSPQ_NSOP_NEOP:
2773 if (mh->mh_tail == NULL) {
2774 log(LOG_ERR, "discarding intermediate descriptor entry\n");
2779 mh->mh_tail->m_next = m;
2781 mh->mh_head->m_pkthdr.len += len;
2784 if (cxgb_debug && m != NULL)
2785 printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2787 if (++fl->cidx == fl->size)
2794 * handle_rsp_cntrl_info - handles control information in a response
2795 * @qs: the queue set corresponding to the response
2796 * @flags: the response control flags
2798 * Handles the control information of an SGE response, such as GTS
2799 * indications and completion credits for the queue set's Tx queues.
2800 * HW coalesces credits, we don't do any extra SW coalescing.
2802 static __inline void
2803 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2805 unsigned int credits;
2808 if (flags & F_RSPD_TXQ0_GTS)
2809 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2811 credits = G_RSPD_TXQ0_CR(flags);
2813 qs->txq[TXQ_ETH].processed += credits;
2815 credits = G_RSPD_TXQ2_CR(flags);
2817 qs->txq[TXQ_CTRL].processed += credits;
2820 if (flags & F_RSPD_TXQ1_GTS)
2821 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2823 credits = G_RSPD_TXQ1_CR(flags);
2825 qs->txq[TXQ_OFLD].processed += credits;
2830 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2831 unsigned int sleeping)
2837 * process_responses - process responses from an SGE response queue
2838 * @adap: the adapter
2839 * @qs: the queue set to which the response queue belongs
2840 * @budget: how many responses can be processed in this round
2842 * Process responses from an SGE response queue up to the supplied budget.
2843 * Responses include received packets as well as credits and other events
2844 * for the queues that belong to the response queue's queue set.
2845 * A negative budget is effectively unlimited.
2847 * Additionally choose the interrupt holdoff time for the next interrupt
2848 * on this queue. If the system is under memory shortage use a fairly
2849 * long delay to help recovery.
2852 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2854 struct sge_rspq *rspq = &qs->rspq;
2855 struct rsp_desc *r = &rspq->desc[rspq->cidx];
2856 int budget_left = budget;
2857 unsigned int sleeping = 0;
2858 #if defined(INET6) || defined(INET)
2859 int lro_enabled = qs->lro.enabled;
2861 struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2863 struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
2865 static int last_holdoff = 0;
2866 if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2867 printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2868 last_holdoff = rspq->holdoff_tmr;
2871 rspq->next_holdoff = rspq->holdoff_tmr;
2873 while (__predict_true(budget_left && is_new_response(r, rspq))) {
2874 int eth, eop = 0, ethpad = 0;
2875 uint32_t flags = ntohl(r->flags);
2876 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2877 uint8_t opcode = r->rss_hdr.opcode;
2879 eth = (opcode == CPL_RX_PKT);
2881 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2885 printf("async notification\n");
2887 if (mh->mh_head == NULL) {
2888 mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA);
2891 m = m_gethdr(M_NOWAIT, MT_DATA);
2896 memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2897 m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2898 *mtod(m, uint8_t *) = CPL_ASYNC_NOTIF;
2899 opcode = CPL_ASYNC_NOTIF;
2901 rspq->async_notif++;
2903 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2904 struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
2908 rspq->next_holdoff = NOMEM_INTR_DELAY;
2912 if (mh->mh_head == NULL)
2915 mh->mh_tail->m_next = m;
2918 get_imm_packet(adap, r, m);
2919 mh->mh_head->m_pkthdr.len += m->m_len;
2922 } else if (r->len_cq) {
2923 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2925 eop = get_packet(adap, drop_thresh, qs, mh, r);
2927 if (r->rss_hdr.hash_type && !adap->timestamp) {
2928 M_HASHTYPE_SET(mh->mh_head,
2929 M_HASHTYPE_OPAQUE_HASH);
2930 mh->mh_head->m_pkthdr.flowid = rss_hash;
2939 if (flags & RSPD_CTRL_MASK) {
2940 sleeping |= flags & RSPD_GTS_MASK;
2941 handle_rsp_cntrl_info(qs, flags);
2945 rspq->offload_pkts++;
2947 adap->cpl_handler[opcode](qs, r, mh->mh_head);
2949 m_freem(mh->mh_head);
2952 } else if (eth && eop) {
2953 struct mbuf *m = mh->mh_head;
2955 t3_rx_eth(adap, m, ethpad);
2958 * The T304 sends incoming packets on any qset. If LRO
2959 * is also enabled, we could end up sending packet up
2960 * lro_ctrl->ifp's input. That is incorrect.
2962 * The mbuf's rcvif was derived from the cpl header and
2963 * is accurate. Skip LRO and just use that.
2965 #if defined(INET6) || defined(INET)
2966 skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2968 if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2969 && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2971 /* successfully queue'd for LRO */
2976 * LRO not enabled, packet unsuitable for LRO,
2977 * or unable to queue. Pass it up right now in
2980 struct ifnet *ifp = m->m_pkthdr.rcvif;
2981 (*ifp->if_input)(ifp, m);
2988 if (__predict_false(++rspq->cidx == rspq->size)) {
2994 if (++rspq->credits >= 64) {
2995 refill_rspq(adap, rspq, rspq->credits);
2998 __refill_fl_lt(adap, &qs->fl[0], 32);
2999 __refill_fl_lt(adap, &qs->fl[1], 32);
3003 #if defined(INET6) || defined(INET)
3005 tcp_lro_flush_all(lro_ctrl);
3009 check_ring_db(adap, qs, sleeping);
3011 mb(); /* commit Tx queue processed updates */
3012 if (__predict_false(qs->txq_stopped > 1))
3015 __refill_fl_lt(adap, &qs->fl[0], 512);
3016 __refill_fl_lt(adap, &qs->fl[1], 512);
3017 budget -= budget_left;
3022 * A helper function that processes responses and issues GTS.
3025 process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
3028 static int last_holdoff = 0;
3030 work = process_responses(adap, rspq_to_qset(rq), -1);
3032 if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
3033 printf("next_holdoff=%d\n", rq->next_holdoff);
3034 last_holdoff = rq->next_holdoff;
3036 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3037 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3044 cxgb_debugnet_poll_rx(adapter_t *adap, struct sge_qset *qs)
3047 return (process_responses_gts(adap, &qs->rspq));
3052 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3053 * Handles data events from SGE response queues as well as error and other
3054 * async events as they all use the same interrupt pin. We use one SGE
3055 * response queue per port in this mode and protect all response queues with
3059 t3b_intr(void *data)
3062 adapter_t *adap = data;
3063 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3065 t3_write_reg(adap, A_PL_CLI, 0);
3066 map = t3_read_reg(adap, A_SG_DATA_INTR);
3071 if (__predict_false(map & F_ERRINTR)) {
3072 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3073 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3074 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3077 mtx_lock(&q0->lock);
3078 for_each_port(adap, i)
3080 process_responses_gts(adap, &adap->sge.qs[i].rspq);
3081 mtx_unlock(&q0->lock);
3085 * The MSI interrupt handler. This needs to handle data events from SGE
3086 * response queues as well as error and other async events as they all use
3087 * the same MSI vector. We use one SGE response queue per port in this mode
3088 * and protect all response queues with queue 0's lock.
3091 t3_intr_msi(void *data)
3093 adapter_t *adap = data;
3094 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3095 int i, new_packets = 0;
3097 mtx_lock(&q0->lock);
3099 for_each_port(adap, i)
3100 if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3102 mtx_unlock(&q0->lock);
3103 if (new_packets == 0) {
3104 t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3105 (void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3106 taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3111 t3_intr_msix(void *data)
3113 struct sge_qset *qs = data;
3114 adapter_t *adap = qs->port->adapter;
3115 struct sge_rspq *rspq = &qs->rspq;
3117 if (process_responses_gts(adap, rspq) == 0)
3118 rspq->unhandled_irqs++;
3121 #define QDUMP_SBUF_SIZE 32 * 400
3123 t3_dump_rspq(SYSCTL_HANDLER_ARGS)
3125 struct sge_rspq *rspq;
3126 struct sge_qset *qs;
3127 int i, err, dump_end, idx;
3129 struct rsp_desc *rspd;
3133 qs = rspq_to_qset(rspq);
3134 if (rspq->rspq_dump_count == 0)
3136 if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3138 "dump count is too large %d\n", rspq->rspq_dump_count);
3139 rspq->rspq_dump_count = 0;
3142 if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3144 "dump start of %d is greater than queue size\n",
3145 rspq->rspq_dump_start);
3146 rspq->rspq_dump_start = 0;
3149 err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3152 err = sysctl_wire_old_buffer(req, 0);
3155 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3157 sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3158 (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3159 ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3160 sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3161 ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3163 sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3164 (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3166 dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3167 for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3168 idx = i & (RSPQ_Q_SIZE-1);
3170 rspd = &rspq->desc[idx];
3171 sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3172 idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3173 rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3174 sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3175 rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3176 be32toh(rspd->len_cq), rspd->intr_gen);
3179 err = sbuf_finish(sb);
3185 t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3187 struct sge_txq *txq;
3188 struct sge_qset *qs;
3189 int i, j, err, dump_end;
3191 struct tx_desc *txd;
3192 uint32_t *WR, wr_hi, wr_lo, gen;
3196 qs = txq_to_qset(txq, TXQ_ETH);
3197 if (txq->txq_dump_count == 0) {
3200 if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3202 "dump count is too large %d\n", txq->txq_dump_count);
3203 txq->txq_dump_count = 1;
3206 if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3208 "dump start of %d is greater than queue size\n",
3209 txq->txq_dump_start);
3210 txq->txq_dump_start = 0;
3213 err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3216 err = sysctl_wire_old_buffer(req, 0);
3219 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3221 sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3222 (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3223 (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3224 sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3225 ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3226 ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3227 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3228 txq->txq_dump_start,
3229 (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3231 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3232 for (i = txq->txq_dump_start; i < dump_end; i++) {
3233 txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3234 WR = (uint32_t *)txd->flit;
3235 wr_hi = ntohl(WR[0]);
3236 wr_lo = ntohl(WR[1]);
3237 gen = G_WR_GEN(wr_lo);
3239 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3241 for (j = 2; j < 30; j += 4)
3242 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3243 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3246 err = sbuf_finish(sb);
3252 t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3254 struct sge_txq *txq;
3255 struct sge_qset *qs;
3256 int i, j, err, dump_end;
3258 struct tx_desc *txd;
3259 uint32_t *WR, wr_hi, wr_lo, gen;
3262 qs = txq_to_qset(txq, TXQ_CTRL);
3263 if (txq->txq_dump_count == 0) {
3266 if (txq->txq_dump_count > 256) {
3268 "dump count is too large %d\n", txq->txq_dump_count);
3269 txq->txq_dump_count = 1;
3272 if (txq->txq_dump_start > 255) {
3274 "dump start of %d is greater than queue size\n",
3275 txq->txq_dump_start);
3276 txq->txq_dump_start = 0;
3280 err = sysctl_wire_old_buffer(req, 0);
3283 sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3284 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3285 txq->txq_dump_start,
3286 (txq->txq_dump_start + txq->txq_dump_count) & 255);
3288 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3289 for (i = txq->txq_dump_start; i < dump_end; i++) {
3290 txd = &txq->desc[i & (255)];
3291 WR = (uint32_t *)txd->flit;
3292 wr_hi = ntohl(WR[0]);
3293 wr_lo = ntohl(WR[1]);
3294 gen = G_WR_GEN(wr_lo);
3296 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3298 for (j = 2; j < 30; j += 4)
3299 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3300 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3303 err = sbuf_finish(sb);
3309 t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3311 adapter_t *sc = arg1;
3312 struct qset_params *qsp = &sc->params.sge.qset[0];
3314 struct sge_qset *qs;
3315 int i, j, err, nqsets = 0;
3318 if ((sc->flags & FULL_INIT_DONE) == 0)
3321 coalesce_usecs = qsp->coalesce_usecs;
3322 err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3327 if (coalesce_usecs == qsp->coalesce_usecs)
3330 for (i = 0; i < sc->params.nports; i++)
3331 for (j = 0; j < sc->port[i].nqsets; j++)
3334 coalesce_usecs = max(1, coalesce_usecs);
3336 for (i = 0; i < nqsets; i++) {
3337 qs = &sc->sge.qs[i];
3338 qsp = &sc->params.sge.qset[i];
3339 qsp->coalesce_usecs = coalesce_usecs;
3341 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3342 &sc->sge.qs[0].rspq.lock;
3345 t3_update_qset_coalesce(qs, qsp);
3346 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3347 V_NEWTIMER(qs->rspq.holdoff_tmr));
3355 t3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
3357 adapter_t *sc = arg1;
3360 if ((sc->flags & FULL_INIT_DONE) == 0)
3363 timestamp = sc->timestamp;
3364 rc = sysctl_handle_int(oidp, ×tamp, arg2, req);
3369 if (timestamp != sc->timestamp) {
3370 t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS,
3371 timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
3372 sc->timestamp = timestamp;
3379 t3_add_attach_sysctls(adapter_t *sc)
3381 struct sysctl_ctx_list *ctx;
3382 struct sysctl_oid_list *children;
3384 ctx = device_get_sysctl_ctx(sc->dev);
3385 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3387 /* random information */
3388 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3390 CTLFLAG_RD, sc->fw_version,
3391 0, "firmware version");
3392 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3394 CTLFLAG_RD, &sc->params.rev,
3396 SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3398 CTLFLAG_RD, sc->port_types,
3399 0, "type of ports");
3400 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3402 CTLFLAG_RW, &cxgb_debug,
3403 0, "enable verbose debugging output");
3404 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
3405 CTLFLAG_RD, &sc->tunq_coalesce,
3406 "#tunneled packets freed");
3407 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3409 CTLFLAG_RD, &txq_fills,
3410 0, "#times txq overrun");
3411 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3413 CTLFLAG_RD, &sc->params.vpd.cclk,
3414 0, "core clock frequency (in KHz)");
3418 static const char *rspq_name = "rspq";
3419 static const char *txq_names[] =
3427 sysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3429 struct port_info *p = arg1;
3435 cxgb_refresh_stats(p);
3436 parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3438 return (sysctl_handle_64(oidp, parg, 0, req));
3442 t3_add_configured_sysctls(adapter_t *sc)
3444 struct sysctl_ctx_list *ctx;
3445 struct sysctl_oid_list *children;
3448 ctx = device_get_sysctl_ctx(sc->dev);
3449 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3451 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3453 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc,
3454 0, t3_set_coalesce_usecs,
3455 "I", "interrupt coalescing timer (us)");
3457 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3459 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc,
3460 0, t3_pkt_timestamp,
3461 "I", "provide packet timestamp instead of connection hash");
3463 for (i = 0; i < sc->params.nports; i++) {
3464 struct port_info *pi = &sc->port[i];
3465 struct sysctl_oid *poid;
3466 struct sysctl_oid_list *poidlist;
3467 struct mac_stats *mstats = &pi->mac.stats;
3469 snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3470 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3471 pi->namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3473 poidlist = SYSCTL_CHILDREN(poid);
3474 SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3475 "nqsets", CTLFLAG_RD, &pi->nqsets,
3478 for (j = 0; j < pi->nqsets; j++) {
3479 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3480 struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3481 *ctrlqpoid, *lropoid;
3482 struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3483 *txqpoidlist, *ctrlqpoidlist,
3485 struct sge_txq *txq = &qs->txq[TXQ_ETH];
3487 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3489 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3490 qs->namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3492 qspoidlist = SYSCTL_CHILDREN(qspoid);
3494 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3495 CTLFLAG_RD, &qs->fl[0].empty, 0,
3496 "freelist #0 empty");
3497 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3498 CTLFLAG_RD, &qs->fl[1].empty, 0,
3499 "freelist #1 empty");
3501 rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3502 rspq_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3504 rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3506 txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3507 txq_names[0], CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3509 txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3511 ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3512 txq_names[2], CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3513 "ctrlq statistics");
3514 ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3516 lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3517 "lro_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3519 lropoidlist = SYSCTL_CHILDREN(lropoid);
3521 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3522 CTLFLAG_RD, &qs->rspq.size,
3523 0, "#entries in response queue");
3524 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3525 CTLFLAG_RD, &qs->rspq.cidx,
3526 0, "consumer index");
3527 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3528 CTLFLAG_RD, &qs->rspq.credits,
3530 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
3531 CTLFLAG_RD, &qs->rspq.starved,
3532 0, "#times starved");
3533 SYSCTL_ADD_UAUTO(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3534 CTLFLAG_RD, &qs->rspq.phys_addr,
3535 "physical_address_of the queue");
3536 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3537 CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3538 0, "start rspq dump entry");
3539 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3540 CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3541 0, "#rspq entries to dump");
3542 SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3543 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3544 &qs->rspq, 0, t3_dump_rspq, "A",
3545 "dump of the response queue");
3547 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
3548 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3549 "#tunneled packets dropped");
3550 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3551 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len,
3552 0, "#tunneled packets waiting to be sent");
3554 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3555 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3556 0, "#tunneled packets queue producer index");
3557 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3558 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3559 0, "#tunneled packets queue consumer index");
3561 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
3562 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3563 0, "#tunneled packets processed by the card");
3564 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3565 CTLFLAG_RD, &txq->cleaned,
3566 0, "#tunneled packets cleaned");
3567 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3568 CTLFLAG_RD, &txq->in_use,
3569 0, "#tunneled packet slots in use");
3570 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "frees",
3571 CTLFLAG_RD, &txq->txq_frees,
3572 "#tunneled packets freed");
3573 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3574 CTLFLAG_RD, &txq->txq_skipped,
3575 0, "#tunneled packet descriptors skipped");
3576 SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
3577 CTLFLAG_RD, &txq->txq_coalesced,
3578 "#tunneled packets coalesced");
3579 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3580 CTLFLAG_RD, &txq->txq_enqueued,
3581 0, "#tunneled packets enqueued to hardware");
3582 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3583 CTLFLAG_RD, &qs->txq_stopped,
3584 0, "tx queues stopped");
3585 SYSCTL_ADD_UAUTO(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3586 CTLFLAG_RD, &txq->phys_addr,
3587 "physical_address_of the queue");
3588 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3589 CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3590 0, "txq generation");
3591 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3592 CTLFLAG_RD, &txq->cidx,
3593 0, "hardware queue cidx");
3594 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3595 CTLFLAG_RD, &txq->pidx,
3596 0, "hardware queue pidx");
3597 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3598 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3599 0, "txq start idx for dump");
3600 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3601 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3602 0, "txq #entries to dump");
3603 SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3604 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3605 &qs->txq[TXQ_ETH], 0, t3_dump_txq_eth, "A",
3606 "dump of the transmit queue");
3608 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3609 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3610 0, "ctrlq start idx for dump");
3611 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3612 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3613 0, "ctrl #entries to dump");
3614 SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3615 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3616 &qs->txq[TXQ_CTRL], 0, t3_dump_txq_ctrl, "A",
3617 "dump of the transmit queue");
3619 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_queued",
3620 CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3621 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3622 CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3623 SYSCTL_ADD_U64(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3624 CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3625 SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3626 CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3629 /* Now add a node for mac stats. */
3630 poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3631 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC statistics");
3632 poidlist = SYSCTL_CHILDREN(poid);
3635 * We (ab)use the length argument (arg2) to pass on the offset
3636 * of the data that we are interested in. This is only required
3637 * for the quad counters that are updated from the hardware (we
3638 * make sure that we return the latest value).
3639 * sysctl_handle_macstat first updates *all* the counters from
3640 * the hardware, and then returns the latest value of the
3641 * requested counter. Best would be to update only the
3642 * requested counter from hardware, but t3_mac_update_stats()
3643 * hides all the register details and we don't want to dive into
3646 #define CXGB_SYSCTL_ADD_QUAD(a) SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3647 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pi, \
3648 offsetof(struct mac_stats, a), sysctl_handle_macstat, "QU", 0)
3649 CXGB_SYSCTL_ADD_QUAD(tx_octets);
3650 CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3651 CXGB_SYSCTL_ADD_QUAD(tx_frames);
3652 CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3653 CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3654 CXGB_SYSCTL_ADD_QUAD(tx_pause);
3655 CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3656 CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3657 CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3658 CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3659 CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3660 CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3661 CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3662 CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3663 CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3664 CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3665 CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3666 CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3667 CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3668 CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3669 CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3670 CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3671 CXGB_SYSCTL_ADD_QUAD(rx_octets);
3672 CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3673 CXGB_SYSCTL_ADD_QUAD(rx_frames);
3674 CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3675 CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3676 CXGB_SYSCTL_ADD_QUAD(rx_pause);
3677 CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3678 CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3679 CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3680 CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3681 CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3682 CXGB_SYSCTL_ADD_QUAD(rx_runt);
3683 CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3684 CXGB_SYSCTL_ADD_QUAD(rx_short);
3685 CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3686 CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3687 CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3688 CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3689 CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3690 CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3691 CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3692 CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3693 CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3694 CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3695 #undef CXGB_SYSCTL_ADD_QUAD
3697 #define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3698 CTLFLAG_RD, &mstats->a, 0)
3699 CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3700 CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3701 CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3702 CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3703 CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3704 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3705 CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3706 CXGB_SYSCTL_ADD_ULONG(num_toggled);
3707 CXGB_SYSCTL_ADD_ULONG(num_resets);
3708 CXGB_SYSCTL_ADD_ULONG(link_faults);
3709 #undef CXGB_SYSCTL_ADD_ULONG
3714 * t3_get_desc - dump an SGE descriptor for debugging purposes
3715 * @qs: the queue set
3716 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3717 * @idx: the descriptor index in the queue
3718 * @data: where to dump the descriptor contents
3720 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3721 * size of the descriptor.
3724 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3725 unsigned char *data)
3731 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3733 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3734 return sizeof(struct tx_desc);
3738 if (!qs->rspq.desc || idx >= qs->rspq.size)
3740 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3741 return sizeof(struct rsp_desc);
3745 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3747 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3748 return sizeof(struct rx_desc);