2 * Copyright (c) 2014 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
35 #include <sys/param.h>
37 #include <sys/eventhandler.h>
40 #include <sys/module.h>
41 #include <sys/selinfo.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <machine/bus.h>
45 #include <net/ethernet.h>
47 #include <net/if_media.h>
48 #include <net/if_var.h>
49 #include <net/if_clone.h>
50 #include <net/if_types.h>
51 #include <net/netmap.h>
52 #include <dev/netmap/netmap_kern.h>
54 #include "common/common.h"
55 #include "common/t4_regs.h"
56 #include "common/t4_regs_values.h"
58 extern int fl_pad; /* XXXNM */
60 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe netmap parameters");
63 * 0 = normal netmap rx
65 * 2 = supermassive black hole (buffer packing enabled)
68 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0,
69 "Sink incoming packets.");
72 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN,
73 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated.");
76 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN,
77 &rx_nframes, 0, "max # of frames received before waking up netmap rx.");
79 int holdoff_tmr_idx = 2;
80 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN,
81 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues.");
85 * -1: no congestion feedback (not recommended).
86 * 0: backpressure the channel instead of dropping packets right away.
87 * 1: no backpressure, drop packets for the congested queue immediately.
89 static int nm_cong_drop = 1;
90 TUNABLE_INT("hw.cxgbe.nm_cong_drop", &nm_cong_drop);
93 SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN,
94 &starve_fl, 0, "Don't ring fl db for netmap rx queues.");
97 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
101 struct adapter *sc = vi->pi->adapter;
102 struct sge_params *sp = &sc->params.sge;
103 struct netmap_adapter *na = NA(vi->ifp);
107 MPASS(nm_rxq->iq_desc != NULL);
108 MPASS(nm_rxq->fl_desc != NULL);
110 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE);
111 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len);
113 bzero(&c, sizeof(c));
114 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
115 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
117 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
119 MPASS(!forwarding_intr_to_fwq(sc));
120 KASSERT(nm_rxq->intr_idx < sc->intr_count,
121 ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx));
122 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
123 c.type_to_iqandstindex = htobe32(v |
124 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
125 V_FW_IQ_CMD_VIID(vi->viid) |
126 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
127 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) |
128 F_FW_IQ_CMD_IQGTSMODE |
129 V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
130 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
131 c.iqsize = htobe16(vi->qsize_rxq);
132 c.iqaddr = htobe64(nm_rxq->iq_ba);
134 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
135 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF |
136 F_FW_IQ_CMD_FL0CONGEN);
138 c.iqns_to_fl0congen |=
139 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
140 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
141 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
142 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0));
143 c.fl0dcaen_to_fl0cidxfthresh =
144 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ?
145 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) |
146 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ?
147 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
148 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE);
149 c.fl0addr = htobe64(nm_rxq->fl_ba);
151 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
153 device_printf(sc->dev,
154 "failed to create netmap ingress queue: %d\n", rc);
159 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE);
160 nm_rxq->iq_gen = F_RSPD_GEN;
161 nm_rxq->iq_cntxt_id = be16toh(c.iqid);
162 nm_rxq->iq_abs_id = be16toh(c.physiqid);
163 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start;
164 if (cntxt_id >= sc->sge.niq) {
165 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)",
166 __func__, cntxt_id, sc->sge.niq - 1);
168 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq;
170 nm_rxq->fl_cntxt_id = be16toh(c.fl0id);
171 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
172 MPASS(nm_rxq->fl_sidx == na->num_rx_desc);
173 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start;
174 if (cntxt_id >= sc->sge.neq) {
175 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)",
176 __func__, cntxt_id, sc->sge.neq - 1);
178 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq;
180 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) |
181 sc->chip_params->sge_fl_db;
183 if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) {
186 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
187 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
188 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
189 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
190 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
191 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
196 for (i = 0; i < 4; i++) {
198 val |= 1 << (i << 2);
202 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
204 /* report error but carry on */
205 device_printf(sc->dev,
206 "failed to set congestion manager context for "
207 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc);
211 t4_write_reg(sc, sc->sge_gts_reg,
212 V_INGRESSQID(nm_rxq->iq_cntxt_id) |
213 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
219 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
221 struct adapter *sc = vi->pi->adapter;
224 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
225 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff);
227 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n",
228 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc);
229 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID;
234 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
238 struct adapter *sc = vi->pi->adapter;
239 struct netmap_adapter *na = NA(vi->ifp);
240 struct fw_eq_eth_cmd c;
243 MPASS(nm_txq->desc != NULL);
245 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
246 bzero(nm_txq->desc, len);
248 bzero(&c, sizeof(c));
249 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
250 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
251 V_FW_EQ_ETH_CMD_VFN(0));
252 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
253 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
254 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
255 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
257 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
258 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
259 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
260 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
261 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
262 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE));
263 c.eqaddr = htobe64(nm_txq->ba);
265 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
267 device_printf(vi->dev,
268 "failed to create netmap egress queue: %d\n", rc);
272 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
273 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start;
274 if (cntxt_id >= sc->sge.neq)
275 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__,
276 cntxt_id, sc->sge.neq - 1);
277 sc->sge.eqmap[cntxt_id] = (void *)nm_txq;
279 nm_txq->pidx = nm_txq->cidx = 0;
280 MPASS(nm_txq->sidx == na->num_tx_desc);
281 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0;
283 nm_txq->doorbells = sc->doorbells;
284 if (isset(&nm_txq->doorbells, DOORBELL_UDB) ||
285 isset(&nm_txq->doorbells, DOORBELL_UDBWC) ||
286 isset(&nm_txq->doorbells, DOORBELL_WCWR)) {
287 uint32_t s_qpp = sc->params.sge.eq_s_qpp;
288 uint32_t mask = (1 << s_qpp) - 1;
289 volatile uint8_t *udb;
291 udb = sc->udbs_base + UDBS_DB_OFFSET;
292 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT;
293 nm_txq->udb_qid = nm_txq->cntxt_id & mask;
294 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
295 clrbit(&nm_txq->doorbells, DOORBELL_WCWR);
297 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT;
300 nm_txq->udb = (volatile void *)udb;
307 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
309 struct adapter *sc = vi->pi->adapter;
312 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
314 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__,
315 nm_txq->cntxt_id, rc);
316 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
321 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
322 struct netmap_adapter *na)
324 struct netmap_slot *slot;
325 struct netmap_kring *kring;
326 struct sge_nm_rxq *nm_rxq;
327 struct sge_nm_txq *nm_txq;
329 struct hw_buf_info *hwb;
331 ASSERT_SYNCHRONIZED_OP(sc);
333 if ((vi->flags & VI_INIT_DONE) == 0 ||
334 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
337 hwb = &sc->sge.hw_buf_info[0];
338 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) {
339 if (hwb->size == NETMAP_BUF_SIZE(na))
342 if (i >= SGE_FLBUF_SIZES) {
343 if_printf(ifp, "no hwidx for netmap buffer size %d.\n",
344 NETMAP_BUF_SIZE(na));
349 /* Must set caps before calling netmap_reset */
350 nm_set_native_flags(na);
352 for_each_nm_rxq(vi, i, nm_rxq) {
353 struct irq *irq = &sc->irq[vi->first_intr + i];
355 kring = na->rx_rings[nm_rxq->nid];
356 if (!nm_kring_pending_on(kring) ||
357 nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID)
360 alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop));
361 nm_rxq->fl_hwidx = hwidx;
362 slot = netmap_reset(na, NR_RX, i, 0);
363 MPASS(slot != NULL); /* XXXNM: error check, not assert */
365 /* We deal with 8 bufs at a time */
366 MPASS((na->num_rx_desc & 7) == 0);
367 MPASS(na->num_rx_desc == nm_rxq->fl_sidx);
368 for (j = 0; j < nm_rxq->fl_sidx; j++) {
371 PNMB(na, &slot[j], &ba);
373 nm_rxq->fl_desc[j] = htobe64(ba | hwidx);
375 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8;
377 j /= 8; /* driver pidx to hardware pidx */
379 t4_write_reg(sc, sc->sge_kdoorbell_reg,
380 nm_rxq->fl_db_val | V_PIDX(j));
382 atomic_cmpset_int(&irq->nm_state, NM_OFF, NM_ON);
385 for_each_nm_txq(vi, i, nm_txq) {
386 kring = na->tx_rings[nm_txq->nid];
387 if (!nm_kring_pending_on(kring) ||
388 nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID)
391 alloc_nm_txq_hwq(vi, nm_txq);
392 slot = netmap_reset(na, NR_TX, i, 0);
393 MPASS(slot != NULL); /* XXXNM: error check, not assert */
396 if (vi->nm_rss == NULL) {
397 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE,
400 for (i = 0; i < vi->rss_size;) {
401 for_each_nm_rxq(vi, j, nm_rxq) {
402 vi->nm_rss[i++] = nm_rxq->iq_abs_id;
403 if (i == vi->rss_size)
407 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
408 vi->nm_rss, vi->rss_size);
410 if_printf(ifp, "netmap rss_config failed: %d\n", rc);
416 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
417 struct netmap_adapter *na)
419 struct netmap_kring *kring;
421 struct sge_nm_txq *nm_txq;
422 struct sge_nm_rxq *nm_rxq;
424 ASSERT_SYNCHRONIZED_OP(sc);
426 if ((vi->flags & VI_INIT_DONE) == 0)
429 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
430 vi->rss, vi->rss_size);
432 if_printf(ifp, "failed to restore RSS config: %d\n", rc);
433 nm_clear_native_flags(na);
435 for_each_nm_txq(vi, i, nm_txq) {
436 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
438 kring = na->tx_rings[nm_txq->nid];
439 if (!nm_kring_pending_off(kring) ||
440 nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID)
443 /* Wait for hw pidx to catch up ... */
444 while (be16toh(nm_txq->pidx) != spg->pidx)
447 /* ... and then for the cidx. */
448 while (spg->pidx != spg->cidx)
451 free_nm_txq_hwq(vi, nm_txq);
453 for_each_nm_rxq(vi, i, nm_rxq) {
454 struct irq *irq = &sc->irq[vi->first_intr + i];
456 kring = na->rx_rings[nm_rxq->nid];
457 if (!nm_kring_pending_off(kring) ||
458 nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID)
461 while (!atomic_cmpset_int(&irq->nm_state, NM_ON, NM_OFF))
464 free_nm_rxq_hwq(vi, nm_rxq);
471 cxgbe_netmap_reg(struct netmap_adapter *na, int on)
473 struct ifnet *ifp = na->ifp;
474 struct vi_info *vi = ifp->if_softc;
475 struct adapter *sc = vi->pi->adapter;
478 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg");
482 rc = cxgbe_netmap_on(sc, vi, ifp, na);
484 rc = cxgbe_netmap_off(sc, vi, ifp, na);
485 end_synchronized_op(sc, 0);
490 /* How many packets can a single type1 WR carry in n descriptors */
492 ndesc_to_npkt(const int n)
495 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC);
499 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC))
501 /* Space (in descriptors) needed for a type1 WR that carries n packets */
503 npkt_to_ndesc(const int n)
506 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
508 return ((n + 2) / 2);
511 /* Space (in 16B units) needed for a type1 WR that carries n packets */
513 npkt_to_len16(const int n)
516 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
521 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx)
524 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq)
527 u_int db = nm_txq->doorbells;
529 MPASS(nm_txq->pidx != nm_txq->dbidx);
531 n = NMIDXDIFF(nm_txq, dbidx);
533 clrbit(&db, DOORBELL_WCWR);
536 switch (ffs(db) - 1) {
538 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
541 case DOORBELL_WCWR: {
542 volatile uint64_t *dst, *src;
545 * Queues whose 128B doorbell segment fits in the page do not
546 * use relative qid (udb_qid is always 0). Only queues with
547 * doorbell segments can do WCWR.
549 KASSERT(nm_txq->udb_qid == 0 && n == 1,
550 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p",
551 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq));
553 dst = (volatile void *)((uintptr_t)nm_txq->udb +
554 UDBS_WR_OFFSET - UDBS_DB_OFFSET);
555 src = (void *)&nm_txq->desc[nm_txq->dbidx];
556 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1])
563 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
568 t4_write_reg(sc, sc->sge_kdoorbell_reg,
569 V_QID(nm_txq->cntxt_id) | V_PIDX(n));
572 nm_txq->dbidx = nm_txq->pidx;
575 int lazy_tx_credit_flush = 1;
578 * Write work requests to send 'npkt' frames and ring the doorbell to send them
579 * on their way. No need to check for wraparound.
582 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq,
583 struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum)
585 struct netmap_ring *ring = kring->ring;
586 struct netmap_slot *slot;
587 const u_int lim = kring->nkr_num_slots - 1;
588 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx];
591 struct cpl_tx_pkt_core *cpl;
592 struct ulptx_sgl *usgl;
596 n = min(npkt, MAX_NPKT_IN_TYPE1_WR);
599 wr = (void *)&nm_txq->desc[nm_txq->pidx];
600 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
601 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n)));
605 cpl = (void *)(wr + 1);
607 for (i = 0; i < n; i++) {
608 slot = &ring->slot[kring->nr_hwcur];
609 PNMB(kring->na, slot, &ba);
612 cpl->ctrl0 = nm_txq->cpl_ctrl0;
614 cpl->len = htobe16(slot->len);
616 * netmap(4) says "netmap does not use features such as
617 * checksum offloading, TCP segmentation offloading,
618 * encryption, VLAN encapsulation/decapsulation, etc."
620 * So the ncxl interfaces have tx hardware checksumming
621 * disabled by default. But you can override netmap by
622 * enabling IFCAP_TXCSUM on the interface manully.
624 cpl->ctrl1 = txcsum ? 0 :
625 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS);
627 usgl = (void *)(cpl + 1);
628 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
630 usgl->len0 = htobe32(slot->len);
631 usgl->addr0 = htobe64(ba);
633 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
634 cpl = (void *)(usgl + 1);
635 MPASS(slot->len + len <= UINT16_MAX);
637 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim);
639 wr->plen = htobe16(len);
642 nm_txq->pidx += npkt_to_ndesc(n);
643 MPASS(nm_txq->pidx <= nm_txq->sidx);
644 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) {
646 * This routine doesn't know how to write WRs that wrap
647 * around. Make sure it wasn't asked to.
653 if (npkt == 0 && npkt_remaining == 0) {
655 if (lazy_tx_credit_flush == 0) {
656 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
658 nm_txq->equeqidx = nm_txq->pidx;
659 nm_txq->equiqidx = nm_txq->pidx;
661 ring_nm_txq_db(sc, nm_txq);
665 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
666 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
668 nm_txq->equeqidx = nm_txq->pidx;
669 nm_txq->equiqidx = nm_txq->pidx;
670 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
671 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
672 nm_txq->equeqidx = nm_txq->pidx;
674 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC)
675 ring_nm_txq_db(sc, nm_txq);
678 /* Will get called again. */
679 MPASS(npkt_remaining);
682 /* How many contiguous free descriptors starting at pidx */
684 contiguous_ndesc_available(struct sge_nm_txq *nm_txq)
687 if (nm_txq->cidx > nm_txq->pidx)
688 return (nm_txq->cidx - nm_txq->pidx - 1);
689 else if (nm_txq->cidx > 0)
690 return (nm_txq->sidx - nm_txq->pidx);
692 return (nm_txq->sidx - nm_txq->pidx - 1);
696 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq)
698 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
699 uint16_t hw_cidx = spg->cidx; /* snapshot */
700 struct fw_eth_tx_pkts_wr *wr;
703 hw_cidx = be16toh(hw_cidx);
705 while (nm_txq->cidx != hw_cidx) {
706 wr = (void *)&nm_txq->desc[nm_txq->cidx];
708 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)));
709 MPASS(wr->type == 1);
710 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR);
713 nm_txq->cidx += npkt_to_ndesc(wr->npkt);
716 * We never sent a WR that wrapped around so the credits coming
717 * back, WR by WR, should never cause the cidx to wrap around
720 MPASS(nm_txq->cidx <= nm_txq->sidx);
721 if (__predict_false(nm_txq->cidx == nm_txq->sidx))
729 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags)
731 struct netmap_adapter *na = kring->na;
732 struct ifnet *ifp = na->ifp;
733 struct vi_info *vi = ifp->if_softc;
734 struct adapter *sc = vi->pi->adapter;
735 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id];
736 const u_int head = kring->rhead;
738 int n, d, npkt_remaining, ndesc_remaining, txcsum;
741 * Tx was at kring->nr_hwcur last time around and now we need to advance
742 * to kring->rhead. Note that the driver's pidx moves independent of
743 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation
744 * between descriptors and frames isn't 1:1).
747 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
748 kring->nkr_num_slots - kring->nr_hwcur + head;
749 txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
750 while (npkt_remaining) {
751 reclaimed += reclaim_nm_tx_desc(nm_txq);
752 ndesc_remaining = contiguous_ndesc_available(nm_txq);
753 /* Can't run out of descriptors with packets still remaining */
754 MPASS(ndesc_remaining > 0);
756 /* # of desc needed to tx all remaining packets */
757 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC;
758 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR)
759 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR);
761 if (d <= ndesc_remaining)
764 /* Can't send all, calculate how many can be sent */
765 n = (ndesc_remaining / SGE_MAX_WR_NDESC) *
766 MAX_NPKT_IN_TYPE1_WR;
767 if (ndesc_remaining % SGE_MAX_WR_NDESC)
768 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC);
771 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */
773 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum);
775 MPASS(npkt_remaining == 0);
776 MPASS(kring->nr_hwcur == head);
777 MPASS(nm_txq->dbidx == nm_txq->pidx);
780 * Second part: reclaim buffers for completed transmissions.
782 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
783 reclaimed += reclaim_nm_tx_desc(nm_txq);
784 kring->nr_hwtail += reclaimed;
785 if (kring->nr_hwtail >= kring->nkr_num_slots)
786 kring->nr_hwtail -= kring->nkr_num_slots;
793 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
795 struct netmap_adapter *na = kring->na;
796 struct netmap_ring *ring = kring->ring;
797 struct ifnet *ifp = na->ifp;
798 struct vi_info *vi = ifp->if_softc;
799 struct adapter *sc = vi->pi->adapter;
800 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id];
801 u_int const head = kring->rhead;
803 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
806 return (0); /* No updates ever. */
808 if (netmap_no_pendintr || force_update) {
809 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx);
810 kring->nr_kflags &= ~NKR_PENDINTR;
813 if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) {
815 t4_write_reg(sc, sc->sge_kdoorbell_reg,
816 nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved));
817 nm_rxq->fl_db_saved = 0;
820 /* Userspace done with buffers from kring->nr_hwcur to head */
821 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
822 kring->nkr_num_slots - kring->nr_hwcur + head;
825 u_int fl_pidx = nm_rxq->fl_pidx;
826 struct netmap_slot *slot = &ring->slot[fl_pidx];
828 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx;
831 * We always deal with 8 buffers at a time. We must have
832 * stopped at an 8B boundary (fl_pidx) last time around and we
833 * must have a multiple of 8B buffers to give to the freelist.
835 MPASS((fl_pidx & 7) == 0);
838 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots);
839 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx);
842 for (i = 0; i < 8; i++, fl_pidx++, slot++) {
845 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx);
846 slot->flags &= ~NS_BUF_CHANGED;
847 MPASS(fl_pidx <= nm_rxq->fl_sidx);
850 if (fl_pidx == nm_rxq->fl_sidx) {
852 slot = &ring->slot[0];
854 if (++dbinc == 8 && n >= 32) {
857 nm_rxq->fl_db_saved += dbinc;
859 t4_write_reg(sc, sc->sge_kdoorbell_reg,
860 nm_rxq->fl_db_val | V_PIDX(dbinc));
865 MPASS(nm_rxq->fl_pidx == fl_pidx);
870 nm_rxq->fl_db_saved += dbinc;
872 t4_write_reg(sc, sc->sge_kdoorbell_reg,
873 nm_rxq->fl_db_val | V_PIDX(dbinc));
882 cxgbe_nm_attach(struct vi_info *vi)
884 struct port_info *pi;
886 struct netmap_adapter na;
888 MPASS(vi->nnmrxq > 0);
889 MPASS(vi->ifp != NULL);
894 bzero(&na, sizeof(na));
897 na.na_flags = NAF_BDG_MAYSLEEP;
899 /* Netmap doesn't know about the space reserved for the status page. */
900 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE;
903 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So
904 * num_rx_desc is based on the number of buffers that can be held in the
905 * freelist, and not the number of entries in the iq. (These two are
906 * not exactly the same due to the space taken up by the status page).
908 na.num_rx_desc = rounddown(vi->qsize_rxq, 8);
909 na.nm_txsync = cxgbe_netmap_txsync;
910 na.nm_rxsync = cxgbe_netmap_rxsync;
911 na.nm_register = cxgbe_netmap_reg;
912 na.num_tx_rings = vi->nnmtxq;
913 na.num_rx_rings = vi->nnmrxq;
918 cxgbe_nm_detach(struct vi_info *vi)
921 MPASS(vi->nnmrxq > 0);
922 MPASS(vi->ifp != NULL);
924 netmap_detach(vi->ifp);
927 static inline const void *
928 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl)
931 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL);
933 /* data[0] is RSS header */
934 return (&cpl->data[1]);
938 handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp,
939 const struct cpl_sge_egr_update *egr)
942 struct sge_nm_txq *nm_txq;
944 oq = be32toh(egr->opcode_qid);
945 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE);
946 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start];
948 netmap_tx_irq(ifp, nm_txq->nid);
952 t4_nm_intr(void *arg)
954 struct sge_nm_rxq *nm_rxq = arg;
955 struct vi_info *vi = nm_rxq->vi;
956 struct adapter *sc = vi->pi->adapter;
957 struct ifnet *ifp = vi->ifp;
958 struct netmap_adapter *na = NA(ifp);
959 struct netmap_kring *kring = na->rx_rings[nm_rxq->nid];
960 struct netmap_ring *ring = kring->ring;
961 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx];
966 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx);
967 u_int fl_credits = fl_cidx & 7;
968 u_int ndesc = 0; /* desc processed since last cidx update */
969 u_int nframes = 0; /* frames processed since last netmap wakeup */
971 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) {
975 lq = be32toh(d->rsp.pldbuflen_qid);
976 opcode = d->rss.opcode;
979 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) {
980 case X_RSPD_TYPE_FLBUF:
984 case X_RSPD_TYPE_CPL:
985 MPASS(opcode < NUM_CPL_CMDS);
990 cpl = unwrap_nm_fw6_msg(cpl);
992 case CPL_SGE_EGR_UPDATE:
993 handle_nm_sge_egr_update(sc, ifp, cpl);
996 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) -
997 sc->params.sge.fl_pktshift;
998 ring->slot[fl_cidx].flags = 0;
1000 if (!(lq & F_RSPD_NEWBUF)) {
1001 MPASS(black_hole == 2);
1005 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx))
1009 panic("%s: unexpected opcode 0x%x on nm_rxq %p",
1010 __func__, opcode, nm_rxq);
1014 case X_RSPD_TYPE_INTR:
1015 /* Not equipped to handle forwarded interrupts. */
1016 panic("%s: netmap queue received interrupt for iq %u\n",
1020 panic("%s: illegal response type %d on nm_rxq %p",
1021 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq);
1025 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) {
1026 nm_rxq->iq_cidx = 0;
1027 d = &nm_rxq->iq_desc[0];
1028 nm_rxq->iq_gen ^= F_RSPD_GEN;
1031 if (__predict_false(++nframes == rx_nframes) && !black_hole) {
1032 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1033 netmap_rx_irq(ifp, nm_rxq->nid, &work);
1037 if (__predict_false(++ndesc == rx_ndesc)) {
1038 if (black_hole && fl_credits >= 8) {
1040 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8,
1042 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1043 nm_rxq->fl_db_val | V_PIDX(fl_credits));
1044 fl_credits = fl_cidx & 7;
1046 t4_write_reg(sc, sc->sge_gts_reg,
1048 V_INGRESSQID(nm_rxq->iq_cntxt_id) |
1049 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1054 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1057 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx);
1058 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1059 nm_rxq->fl_db_val | V_PIDX(fl_credits));
1060 } else if (nframes > 0)
1061 netmap_rx_irq(ifp, nm_rxq->nid, &work);
1063 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) |
1064 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) |
1065 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));