2 * Copyright (c) 2014 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
35 #include <sys/param.h>
37 #include <sys/eventhandler.h>
40 #include <sys/module.h>
41 #include <sys/selinfo.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <machine/bus.h>
45 #include <net/ethernet.h>
47 #include <net/if_media.h>
48 #include <net/if_var.h>
49 #include <net/if_clone.h>
50 #include <net/if_types.h>
51 #include <net/netmap.h>
52 #include <dev/netmap/netmap_kern.h>
54 #include "common/common.h"
55 #include "common/t4_regs.h"
56 #include "common/t4_regs_values.h"
58 extern int fl_pad; /* XXXNM */
60 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe netmap parameters");
63 * 0 = normal netmap rx
65 * 2 = supermassive black hole (buffer packing enabled)
68 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0,
69 "Sink incoming packets.");
72 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN,
73 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated.");
75 int holdoff_tmr_idx = 2;
76 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN,
77 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues.");
81 * -1: no congestion feedback (not recommended).
82 * 0: backpressure the channel instead of dropping packets right away.
83 * 1: no backpressure, drop packets for the congested queue immediately.
85 static int nm_cong_drop = 1;
86 TUNABLE_INT("hw.cxgbe.nm_cong_drop", &nm_cong_drop);
88 /* netmap ifnet routines */
89 static void cxgbe_nm_init(void *);
90 static int cxgbe_nm_ioctl(struct ifnet *, unsigned long, caddr_t);
91 static int cxgbe_nm_transmit(struct ifnet *, struct mbuf *);
92 static void cxgbe_nm_qflush(struct ifnet *);
94 static int cxgbe_nm_init_synchronized(struct vi_info *);
95 static int cxgbe_nm_uninit_synchronized(struct vi_info *);
97 /* T4 netmap VI (ncxgbe) interface */
98 static int ncxgbe_probe(device_t);
99 static int ncxgbe_attach(device_t);
100 static int ncxgbe_detach(device_t);
101 static device_method_t ncxgbe_methods[] = {
102 DEVMETHOD(device_probe, ncxgbe_probe),
103 DEVMETHOD(device_attach, ncxgbe_attach),
104 DEVMETHOD(device_detach, ncxgbe_detach),
107 static driver_t ncxgbe_driver = {
110 sizeof(struct vi_info)
113 /* T5 netmap VI (ncxl) interface */
114 static driver_t ncxl_driver = {
117 sizeof(struct vi_info)
121 cxgbe_nm_init(void *arg)
123 struct vi_info *vi = arg;
124 struct adapter *sc = vi->pi->adapter;
126 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nminit") != 0)
128 cxgbe_nm_init_synchronized(vi);
129 end_synchronized_op(sc, 0);
135 cxgbe_nm_init_synchronized(struct vi_info *vi)
137 struct adapter *sc = vi->pi->adapter;
138 struct ifnet *ifp = vi->ifp;
141 ASSERT_SYNCHRONIZED_OP(sc);
143 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
144 return (0); /* already running */
146 if (!(sc->flags & FULL_INIT_DONE) &&
147 ((rc = adapter_full_init(sc)) != 0))
148 return (rc); /* error message displayed already */
150 if (!(vi->flags & VI_INIT_DONE) &&
151 ((rc = vi_full_init(vi)) != 0))
152 return (rc); /* error message displayed already */
154 rc = update_mac_settings(ifp, XGMAC_ALL);
156 return (rc); /* error message displayed already */
158 ifp->if_drv_flags |= IFF_DRV_RUNNING;
159 callout_reset(&vi->tick, hz, vi_tick, vi);
165 cxgbe_nm_uninit_synchronized(struct vi_info *vi)
168 struct adapter *sc = vi->pi->adapter;
170 struct ifnet *ifp = vi->ifp;
172 ASSERT_SYNCHRONIZED_OP(sc);
174 callout_stop(&vi->tick);
175 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
181 cxgbe_nm_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
183 int rc = 0, mtu, flags;
184 struct vi_info *vi = ifp->if_softc;
185 struct adapter *sc = vi->pi->adapter;
186 struct ifreq *ifr = (struct ifreq *)data;
189 MPASS(vi->ifp == ifp);
194 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
197 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmtu");
201 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
202 rc = update_mac_settings(ifp, XGMAC_MTU);
203 end_synchronized_op(sc, 0);
207 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nflg");
211 if (ifp->if_flags & IFF_UP) {
212 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
213 flags = vi->if_flags;
214 if ((ifp->if_flags ^ flags) &
215 (IFF_PROMISC | IFF_ALLMULTI)) {
216 rc = update_mac_settings(ifp,
217 XGMAC_PROMISC | XGMAC_ALLMULTI);
220 rc = cxgbe_nm_init_synchronized(vi);
221 vi->if_flags = ifp->if_flags;
222 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
223 rc = cxgbe_nm_uninit_synchronized(vi);
224 end_synchronized_op(sc, 0);
228 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
229 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4nmulti");
232 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
233 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
234 end_synchronized_op(sc, LOCK_HELD);
238 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
239 if (mask & IFCAP_TXCSUM) {
240 ifp->if_capenable ^= IFCAP_TXCSUM;
241 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
243 if (mask & IFCAP_TXCSUM_IPV6) {
244 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
245 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
247 if (mask & IFCAP_RXCSUM)
248 ifp->if_capenable ^= IFCAP_RXCSUM;
249 if (mask & IFCAP_RXCSUM_IPV6)
250 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
255 ifmedia_ioctl(ifp, ifr, &vi->media, cmd);
259 rc = ether_ioctl(ifp, cmd, data);
266 cxgbe_nm_transmit(struct ifnet *ifp, struct mbuf *m)
274 cxgbe_nm_qflush(struct ifnet *ifp)
281 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
285 struct adapter *sc = vi->pi->adapter;
286 struct sge_params *sp = &sc->params.sge;
287 struct netmap_adapter *na = NA(vi->ifp);
291 MPASS(nm_rxq->iq_desc != NULL);
292 MPASS(nm_rxq->fl_desc != NULL);
294 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE);
295 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len);
297 bzero(&c, sizeof(c));
298 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
299 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
301 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
303 if (vi->flags & INTR_RXQ) {
304 KASSERT(nm_rxq->intr_idx < sc->intr_count,
305 ("%s: invalid direct intr_idx %d", __func__,
307 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
309 CXGBE_UNIMPLEMENTED(__func__); /* XXXNM: needs review */
310 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) |
313 c.type_to_iqandstindex = htobe32(v |
314 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
315 V_FW_IQ_CMD_VIID(vi->viid) |
316 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
317 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) |
318 F_FW_IQ_CMD_IQGTSMODE |
319 V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
320 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
321 c.iqsize = htobe16(vi->qsize_rxq);
322 c.iqaddr = htobe64(nm_rxq->iq_ba);
324 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
325 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF |
326 F_FW_IQ_CMD_FL0CONGEN);
328 c.iqns_to_fl0congen |=
329 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
330 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
331 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
332 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0));
333 c.fl0dcaen_to_fl0cidxfthresh =
334 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B) |
335 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
336 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE);
337 c.fl0addr = htobe64(nm_rxq->fl_ba);
339 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
341 device_printf(sc->dev,
342 "failed to create netmap ingress queue: %d\n", rc);
347 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE);
348 nm_rxq->iq_gen = F_RSPD_GEN;
349 nm_rxq->iq_cntxt_id = be16toh(c.iqid);
350 nm_rxq->iq_abs_id = be16toh(c.physiqid);
351 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start;
352 if (cntxt_id >= sc->sge.niq) {
353 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)",
354 __func__, cntxt_id, sc->sge.niq - 1);
356 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq;
358 nm_rxq->fl_cntxt_id = be16toh(c.fl0id);
359 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
360 MPASS(nm_rxq->fl_sidx == na->num_rx_desc);
361 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start;
362 if (cntxt_id >= sc->sge.neq) {
363 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)",
364 __func__, cntxt_id, sc->sge.neq - 1);
366 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq;
368 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) |
369 sc->chip_params->sge_fl_db;
371 if (is_t5(sc) && cong >= 0) {
374 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
375 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
376 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
377 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
378 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
379 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
384 for (i = 0; i < 4; i++) {
386 val |= 1 << (i << 2);
390 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
392 /* report error but carry on */
393 device_printf(sc->dev,
394 "failed to set congestion manager context for "
395 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc);
399 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
400 V_INGRESSQID(nm_rxq->iq_cntxt_id) |
401 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
407 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
409 struct adapter *sc = vi->pi->adapter;
412 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
413 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff);
415 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n",
416 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc);
421 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
425 struct adapter *sc = vi->pi->adapter;
426 struct netmap_adapter *na = NA(vi->ifp);
427 struct fw_eq_eth_cmd c;
430 MPASS(nm_txq->desc != NULL);
432 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
433 bzero(nm_txq->desc, len);
435 bzero(&c, sizeof(c));
436 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
437 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
438 V_FW_EQ_ETH_CMD_VFN(0));
439 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
440 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
441 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
442 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
444 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
445 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
446 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
447 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
448 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
449 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE));
450 c.eqaddr = htobe64(nm_txq->ba);
452 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
454 device_printf(vi->dev,
455 "failed to create netmap egress queue: %d\n", rc);
459 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
460 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start;
461 if (cntxt_id >= sc->sge.neq)
462 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__,
463 cntxt_id, sc->sge.neq - 1);
464 sc->sge.eqmap[cntxt_id] = (void *)nm_txq;
466 nm_txq->pidx = nm_txq->cidx = 0;
467 MPASS(nm_txq->sidx == na->num_tx_desc);
468 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0;
470 nm_txq->doorbells = sc->doorbells;
471 if (isset(&nm_txq->doorbells, DOORBELL_UDB) ||
472 isset(&nm_txq->doorbells, DOORBELL_UDBWC) ||
473 isset(&nm_txq->doorbells, DOORBELL_WCWR)) {
474 uint32_t s_qpp = sc->params.sge.eq_s_qpp;
475 uint32_t mask = (1 << s_qpp) - 1;
476 volatile uint8_t *udb;
478 udb = sc->udbs_base + UDBS_DB_OFFSET;
479 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT;
480 nm_txq->udb_qid = nm_txq->cntxt_id & mask;
481 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
482 clrbit(&nm_txq->doorbells, DOORBELL_WCWR);
484 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT;
487 nm_txq->udb = (volatile void *)udb;
494 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
496 struct adapter *sc = vi->pi->adapter;
499 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
501 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__,
502 nm_txq->cntxt_id, rc);
507 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
508 struct netmap_adapter *na)
510 struct netmap_slot *slot;
511 struct sge_nm_rxq *nm_rxq;
512 struct sge_nm_txq *nm_txq;
514 struct hw_buf_info *hwb;
517 ASSERT_SYNCHRONIZED_OP(sc);
519 if ((vi->flags & VI_INIT_DONE) == 0 ||
520 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
523 hwb = &sc->sge.hw_buf_info[0];
524 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) {
525 if (hwb->size == NETMAP_BUF_SIZE(na))
528 if (i >= SGE_FLBUF_SIZES) {
529 if_printf(ifp, "no hwidx for netmap buffer size %d.\n",
530 NETMAP_BUF_SIZE(na));
535 /* Must set caps before calling netmap_reset */
536 nm_set_native_flags(na);
538 for_each_nm_rxq(vi, i, nm_rxq) {
539 alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop));
540 nm_rxq->fl_hwidx = hwidx;
541 slot = netmap_reset(na, NR_RX, i, 0);
542 MPASS(slot != NULL); /* XXXNM: error check, not assert */
544 /* We deal with 8 bufs at a time */
545 MPASS((na->num_rx_desc & 7) == 0);
546 MPASS(na->num_rx_desc == nm_rxq->fl_sidx);
547 for (j = 0; j < nm_rxq->fl_sidx; j++) {
550 PNMB(na, &slot[j], &ba);
552 nm_rxq->fl_desc[j] = htobe64(ba | hwidx);
554 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8;
556 j /= 8; /* driver pidx to hardware pidx */
558 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
559 nm_rxq->fl_db_val | V_PIDX(j));
562 for_each_nm_txq(vi, i, nm_txq) {
563 alloc_nm_txq_hwq(vi, nm_txq);
564 slot = netmap_reset(na, NR_TX, i, 0);
565 MPASS(slot != NULL); /* XXXNM: error check, not assert */
568 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO |
570 for (i = 0; i < vi->rss_size;) {
571 for_each_nm_rxq(vi, j, nm_rxq) {
572 rss[i++] = nm_rxq->iq_abs_id;
573 if (i == vi->rss_size)
577 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
580 if_printf(ifp, "netmap rss_config failed: %d\n", rc);
583 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
585 if_printf(ifp, "netmap enable_vi failed: %d\n", rc);
591 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
592 struct netmap_adapter *na)
595 struct sge_nm_txq *nm_txq;
596 struct sge_nm_rxq *nm_rxq;
598 ASSERT_SYNCHRONIZED_OP(sc);
600 if ((vi->flags & VI_INIT_DONE) == 0)
603 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
605 if_printf(ifp, "netmap disable_vi failed: %d\n", rc);
606 nm_clear_native_flags(na);
608 for_each_nm_txq(vi, i, nm_txq) {
609 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
611 /* Wait for hw pidx to catch up ... */
612 while (be16toh(nm_txq->pidx) != spg->pidx)
615 /* ... and then for the cidx. */
616 while (spg->pidx != spg->cidx)
619 free_nm_txq_hwq(vi, nm_txq);
621 for_each_nm_rxq(vi, i, nm_rxq) {
622 free_nm_rxq_hwq(vi, nm_rxq);
629 cxgbe_netmap_reg(struct netmap_adapter *na, int on)
631 struct ifnet *ifp = na->ifp;
632 struct vi_info *vi = ifp->if_softc;
633 struct adapter *sc = vi->pi->adapter;
636 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg");
640 rc = cxgbe_netmap_on(sc, vi, ifp, na);
642 rc = cxgbe_netmap_off(sc, vi, ifp, na);
643 end_synchronized_op(sc, 0);
648 /* How many packets can a single type1 WR carry in n descriptors */
650 ndesc_to_npkt(const int n)
653 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC);
657 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC))
659 /* Space (in descriptors) needed for a type1 WR that carries n packets */
661 npkt_to_ndesc(const int n)
664 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
666 return ((n + 2) / 2);
669 /* Space (in 16B units) needed for a type1 WR that carries n packets */
671 npkt_to_len16(const int n)
674 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
679 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx)
682 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq)
685 u_int db = nm_txq->doorbells;
687 MPASS(nm_txq->pidx != nm_txq->dbidx);
689 n = NMIDXDIFF(nm_txq, dbidx);
691 clrbit(&db, DOORBELL_WCWR);
694 switch (ffs(db) - 1) {
696 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
699 case DOORBELL_WCWR: {
700 volatile uint64_t *dst, *src;
703 * Queues whose 128B doorbell segment fits in the page do not
704 * use relative qid (udb_qid is always 0). Only queues with
705 * doorbell segments can do WCWR.
707 KASSERT(nm_txq->udb_qid == 0 && n == 1,
708 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p",
709 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq));
711 dst = (volatile void *)((uintptr_t)nm_txq->udb +
712 UDBS_WR_OFFSET - UDBS_DB_OFFSET);
713 src = (void *)&nm_txq->desc[nm_txq->dbidx];
714 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1])
721 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
726 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
727 V_QID(nm_txq->cntxt_id) | V_PIDX(n));
730 nm_txq->dbidx = nm_txq->pidx;
733 int lazy_tx_credit_flush = 1;
736 * Write work requests to send 'npkt' frames and ring the doorbell to send them
737 * on their way. No need to check for wraparound.
740 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq,
741 struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum)
743 struct netmap_ring *ring = kring->ring;
744 struct netmap_slot *slot;
745 const u_int lim = kring->nkr_num_slots - 1;
746 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx];
749 struct cpl_tx_pkt_core *cpl;
750 struct ulptx_sgl *usgl;
754 n = min(npkt, MAX_NPKT_IN_TYPE1_WR);
757 wr = (void *)&nm_txq->desc[nm_txq->pidx];
758 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
759 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n)));
763 cpl = (void *)(wr + 1);
765 for (i = 0; i < n; i++) {
766 slot = &ring->slot[kring->nr_hwcur];
767 PNMB(kring->na, slot, &ba);
770 cpl->ctrl0 = nm_txq->cpl_ctrl0;
772 cpl->len = htobe16(slot->len);
774 * netmap(4) says "netmap does not use features such as
775 * checksum offloading, TCP segmentation offloading,
776 * encryption, VLAN encapsulation/decapsulation, etc."
778 * So the ncxl interfaces have tx hardware checksumming
779 * disabled by default. But you can override netmap by
780 * enabling IFCAP_TXCSUM on the interface manully.
782 cpl->ctrl1 = txcsum ? 0 :
783 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS);
785 usgl = (void *)(cpl + 1);
786 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
788 usgl->len0 = htobe32(slot->len);
789 usgl->addr0 = htobe64(ba);
791 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
792 cpl = (void *)(usgl + 1);
793 MPASS(slot->len + len <= UINT16_MAX);
795 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim);
797 wr->plen = htobe16(len);
800 nm_txq->pidx += npkt_to_ndesc(n);
801 MPASS(nm_txq->pidx <= nm_txq->sidx);
802 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) {
804 * This routine doesn't know how to write WRs that wrap
805 * around. Make sure it wasn't asked to.
811 if (npkt == 0 && npkt_remaining == 0) {
813 if (lazy_tx_credit_flush == 0) {
814 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
816 nm_txq->equeqidx = nm_txq->pidx;
817 nm_txq->equiqidx = nm_txq->pidx;
819 ring_nm_txq_db(sc, nm_txq);
823 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
824 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
826 nm_txq->equeqidx = nm_txq->pidx;
827 nm_txq->equiqidx = nm_txq->pidx;
828 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
829 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
830 nm_txq->equeqidx = nm_txq->pidx;
832 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC)
833 ring_nm_txq_db(sc, nm_txq);
836 /* Will get called again. */
837 MPASS(npkt_remaining);
840 /* How many contiguous free descriptors starting at pidx */
842 contiguous_ndesc_available(struct sge_nm_txq *nm_txq)
845 if (nm_txq->cidx > nm_txq->pidx)
846 return (nm_txq->cidx - nm_txq->pidx - 1);
847 else if (nm_txq->cidx > 0)
848 return (nm_txq->sidx - nm_txq->pidx);
850 return (nm_txq->sidx - nm_txq->pidx - 1);
854 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq)
856 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
857 uint16_t hw_cidx = spg->cidx; /* snapshot */
858 struct fw_eth_tx_pkts_wr *wr;
861 hw_cidx = be16toh(hw_cidx);
863 while (nm_txq->cidx != hw_cidx) {
864 wr = (void *)&nm_txq->desc[nm_txq->cidx];
866 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)));
867 MPASS(wr->type == 1);
868 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR);
871 nm_txq->cidx += npkt_to_ndesc(wr->npkt);
874 * We never sent a WR that wrapped around so the credits coming
875 * back, WR by WR, should never cause the cidx to wrap around
878 MPASS(nm_txq->cidx <= nm_txq->sidx);
879 if (__predict_false(nm_txq->cidx == nm_txq->sidx))
887 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags)
889 struct netmap_adapter *na = kring->na;
890 struct ifnet *ifp = na->ifp;
891 struct vi_info *vi = ifp->if_softc;
892 struct adapter *sc = vi->pi->adapter;
893 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_txq + kring->ring_id];
894 const u_int head = kring->rhead;
896 int n, d, npkt_remaining, ndesc_remaining, txcsum;
899 * Tx was at kring->nr_hwcur last time around and now we need to advance
900 * to kring->rhead. Note that the driver's pidx moves independent of
901 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation
902 * between descriptors and frames isn't 1:1).
905 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
906 kring->nkr_num_slots - kring->nr_hwcur + head;
907 txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
908 while (npkt_remaining) {
909 reclaimed += reclaim_nm_tx_desc(nm_txq);
910 ndesc_remaining = contiguous_ndesc_available(nm_txq);
911 /* Can't run out of descriptors with packets still remaining */
912 MPASS(ndesc_remaining > 0);
914 /* # of desc needed to tx all remaining packets */
915 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC;
916 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR)
917 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR);
919 if (d <= ndesc_remaining)
922 /* Can't send all, calculate how many can be sent */
923 n = (ndesc_remaining / SGE_MAX_WR_NDESC) *
924 MAX_NPKT_IN_TYPE1_WR;
925 if (ndesc_remaining % SGE_MAX_WR_NDESC)
926 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC);
929 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */
931 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum);
933 MPASS(npkt_remaining == 0);
934 MPASS(kring->nr_hwcur == head);
935 MPASS(nm_txq->dbidx == nm_txq->pidx);
938 * Second part: reclaim buffers for completed transmissions.
940 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
941 reclaimed += reclaim_nm_tx_desc(nm_txq);
942 kring->nr_hwtail += reclaimed;
943 if (kring->nr_hwtail >= kring->nkr_num_slots)
944 kring->nr_hwtail -= kring->nkr_num_slots;
951 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
953 struct netmap_adapter *na = kring->na;
954 struct netmap_ring *ring = kring->ring;
955 struct ifnet *ifp = na->ifp;
956 struct vi_info *vi = ifp->if_softc;
957 struct adapter *sc = vi->pi->adapter;
958 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_rxq + kring->ring_id];
959 u_int const head = kring->rhead;
961 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
964 return (0); /* No updates ever. */
966 if (netmap_no_pendintr || force_update) {
967 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx);
968 kring->nr_kflags &= ~NKR_PENDINTR;
971 /* Userspace done with buffers from kring->nr_hwcur to head */
972 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
973 kring->nkr_num_slots - kring->nr_hwcur + head;
976 u_int fl_pidx = nm_rxq->fl_pidx;
977 struct netmap_slot *slot = &ring->slot[fl_pidx];
979 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx;
982 * We always deal with 8 buffers at a time. We must have
983 * stopped at an 8B boundary (fl_pidx) last time around and we
984 * must have a multiple of 8B buffers to give to the freelist.
986 MPASS((fl_pidx & 7) == 0);
989 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots);
990 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx);
993 for (i = 0; i < 8; i++, fl_pidx++, slot++) {
996 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx);
997 slot->flags &= ~NS_BUF_CHANGED;
998 MPASS(fl_pidx <= nm_rxq->fl_sidx);
1001 if (fl_pidx == nm_rxq->fl_sidx) {
1003 slot = &ring->slot[0];
1005 if (++dbinc == 8 && n >= 32) {
1007 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
1008 nm_rxq->fl_db_val | V_PIDX(dbinc));
1012 MPASS(nm_rxq->fl_pidx == fl_pidx);
1016 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
1017 nm_rxq->fl_db_val | V_PIDX(dbinc));
1025 ncxgbe_probe(device_t dev)
1028 struct vi_info *vi = device_get_softc(dev);
1030 snprintf(buf, sizeof(buf), "port %d netmap vi", vi->pi->port_id);
1031 device_set_desc_copy(dev, buf);
1033 return (BUS_PROBE_DEFAULT);
1037 ncxgbe_attach(device_t dev)
1040 struct port_info *pi;
1042 struct netmap_adapter na;
1046 vi = device_get_softc(dev);
1051 * Allocate a virtual interface exclusively for netmap use. Give it the
1052 * MAC address normally reserved for use by a TOE interface. (The TOE
1053 * driver on FreeBSD doesn't use it).
1055 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
1056 vi->hw_addr, &vi->rss_size, FW_VI_FUNC_OFLD, 0);
1058 device_printf(dev, "unable to allocate netmap virtual "
1059 "interface for port %d: %d\n", pi->port_id, -rc);
1063 vi->xact_addr_filt = -1;
1064 callout_init(&vi->tick, 1);
1066 ifp = if_alloc(IFT_ETHER);
1068 device_printf(dev, "Cannot allocate netmap ifnet\n");
1074 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1075 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1077 ifp->if_init = cxgbe_nm_init;
1078 ifp->if_ioctl = cxgbe_nm_ioctl;
1079 ifp->if_transmit = cxgbe_nm_transmit;
1080 ifp->if_qflush = cxgbe_nm_qflush;
1081 ifp->if_get_counter = cxgbe_get_counter;
1084 * netmap(4) says "netmap does not use features such as checksum
1085 * offloading, TCP segmentation offloading, encryption, VLAN
1086 * encapsulation/decapsulation, etc."
1088 * By default we comply with the statement above. But we do declare the
1089 * ifnet capable of L3/L4 checksumming so that a user can override
1090 * netmap and have the hardware do the L3/L4 checksums.
1092 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU |
1094 ifp->if_capenable = 0;
1095 ifp->if_hwassist = 0;
1097 /* vi->media has already been setup by the caller */
1099 ether_ifattach(ifp, vi->hw_addr);
1101 device_printf(dev, "%d txq, %d rxq (netmap)\n", vi->ntxq, vi->nrxq);
1106 * Register with netmap in the kernel.
1108 bzero(&na, sizeof(na));
1111 na.na_flags = NAF_BDG_MAYSLEEP;
1113 /* Netmap doesn't know about the space reserved for the status page. */
1114 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE;
1117 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So
1118 * num_rx_desc is based on the number of buffers that can be held in the
1119 * freelist, and not the number of entries in the iq. (These two are
1120 * not exactly the same due to the space taken up by the status page).
1122 na.num_rx_desc = rounddown(vi->qsize_rxq, 8);
1123 na.nm_txsync = cxgbe_netmap_txsync;
1124 na.nm_rxsync = cxgbe_netmap_rxsync;
1125 na.nm_register = cxgbe_netmap_reg;
1126 na.num_tx_rings = vi->ntxq;
1127 na.num_rx_rings = vi->nrxq;
1128 netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */
1134 ncxgbe_detach(device_t dev)
1139 vi = device_get_softc(dev);
1140 sc = vi->pi->adapter;
1144 netmap_detach(vi->ifp);
1145 ether_ifdetach(vi->ifp);
1146 cxgbe_nm_uninit_synchronized(vi);
1147 callout_drain(&vi->tick);
1149 ifmedia_removeall(&vi->media);
1152 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
1154 end_synchronized_op(sc, 0);
1160 handle_nm_fw6_msg(struct adapter *sc, struct ifnet *ifp,
1161 const struct cpl_fw6_msg *cpl)
1163 const struct cpl_sge_egr_update *egr;
1165 struct sge_nm_txq *nm_txq;
1167 if (cpl->type != FW_TYPE_RSSCPL && cpl->type != FW6_TYPE_RSSCPL)
1168 panic("%s: FW_TYPE 0x%x on nm_rxq.", __func__, cpl->type);
1170 /* data[0] is RSS header */
1171 egr = (const void *)&cpl->data[1];
1172 oq = be32toh(egr->opcode_qid);
1173 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE);
1174 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start];
1176 netmap_tx_irq(ifp, nm_txq->nid);
1180 t4_nm_intr(void *arg)
1182 struct sge_nm_rxq *nm_rxq = arg;
1183 struct vi_info *vi = nm_rxq->vi;
1184 struct adapter *sc = vi->pi->adapter;
1185 struct ifnet *ifp = vi->ifp;
1186 struct netmap_adapter *na = NA(ifp);
1187 struct netmap_kring *kring = &na->rx_rings[nm_rxq->nid];
1188 struct netmap_ring *ring = kring->ring;
1189 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx];
1191 u_int n = 0, work = 0;
1193 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx);
1194 u_int fl_credits = fl_cidx & 7;
1196 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) {
1200 lq = be32toh(d->rsp.pldbuflen_qid);
1201 opcode = d->rss.opcode;
1203 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) {
1204 case X_RSPD_TYPE_FLBUF:
1205 if (black_hole != 2) {
1206 /* No buffer packing so new buf every time */
1207 MPASS(lq & F_RSPD_NEWBUF);
1212 case X_RSPD_TYPE_CPL:
1213 MPASS(opcode < NUM_CPL_CMDS);
1218 handle_nm_fw6_msg(sc, ifp,
1219 (const void *)&d->cpl[0]);
1222 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) -
1223 sc->params.sge.fl_pktshift;
1224 ring->slot[fl_cidx].flags = kring->nkr_slot_flags;
1225 fl_cidx += (lq & F_RSPD_NEWBUF) ? 1 : 0;
1226 fl_credits += (lq & F_RSPD_NEWBUF) ? 1 : 0;
1227 if (__predict_false(fl_cidx == nm_rxq->fl_sidx))
1231 panic("%s: unexpected opcode 0x%x on nm_rxq %p",
1232 __func__, opcode, nm_rxq);
1236 case X_RSPD_TYPE_INTR:
1237 /* Not equipped to handle forwarded interrupts. */
1238 panic("%s: netmap queue received interrupt for iq %u\n",
1242 panic("%s: illegal response type %d on nm_rxq %p",
1243 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq);
1247 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) {
1248 nm_rxq->iq_cidx = 0;
1249 d = &nm_rxq->iq_desc[0];
1250 nm_rxq->iq_gen ^= F_RSPD_GEN;
1253 if (__predict_false(++n == rx_ndesc)) {
1254 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1255 if (black_hole && fl_credits >= 8) {
1257 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8,
1259 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
1260 nm_rxq->fl_db_val | V_PIDX(fl_credits));
1261 fl_credits = fl_cidx & 7;
1262 } else if (!black_hole) {
1263 netmap_rx_irq(ifp, nm_rxq->nid, &work);
1266 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
1267 V_CIDXINC(n) | V_INGRESSQID(nm_rxq->iq_cntxt_id) |
1268 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1273 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1276 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx);
1277 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
1278 nm_rxq->fl_db_val | V_PIDX(fl_credits));
1280 netmap_rx_irq(ifp, nm_rxq->nid, &work);
1282 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(n) |
1283 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) |
1284 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
1287 static devclass_t ncxgbe_devclass, ncxl_devclass;
1289 DRIVER_MODULE(ncxgbe, cxgbe, ncxgbe_driver, ncxgbe_devclass, 0, 0);
1290 MODULE_VERSION(ncxgbe, 1);
1292 DRIVER_MODULE(ncxl, cxl, ncxl_driver, ncxl_devclass, 0, 0);
1293 MODULE_VERSION(ncxl, 1);