2 * Copyright (c) 2014 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
35 #include <sys/param.h>
36 #include <sys/eventhandler.h>
38 #include <sys/types.h>
40 #include <sys/selinfo.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <machine/bus.h>
44 #include <net/ethernet.h>
46 #include <net/if_media.h>
47 #include <net/if_var.h>
48 #include <net/if_clone.h>
49 #include <net/if_types.h>
50 #include <net/netmap.h>
51 #include <dev/netmap/netmap_kern.h>
53 #include "common/common.h"
54 #include "common/t4_regs.h"
55 #include "common/t4_regs_values.h"
57 extern int fl_pad; /* XXXNM */
58 extern int spg_len; /* XXXNM */
59 extern int fl_pktshift; /* XXXNM */
61 /* netmap ifnet routines */
62 static void cxgbe_nm_init(void *);
63 static int cxgbe_nm_ioctl(struct ifnet *, unsigned long, caddr_t);
64 static int cxgbe_nm_transmit(struct ifnet *, struct mbuf *);
65 static void cxgbe_nm_qflush(struct ifnet *);
67 static int cxgbe_nm_init_synchronized(struct port_info *);
68 static int cxgbe_nm_uninit_synchronized(struct port_info *);
71 cxgbe_nm_init(void *arg)
73 struct port_info *pi = arg;
74 struct adapter *sc = pi->adapter;
76 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nminit") != 0)
78 cxgbe_nm_init_synchronized(pi);
79 end_synchronized_op(sc, 0);
85 cxgbe_nm_init_synchronized(struct port_info *pi)
87 struct adapter *sc = pi->adapter;
88 struct ifnet *ifp = pi->nm_ifp;
91 ASSERT_SYNCHRONIZED_OP(sc);
93 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
94 return (0); /* already running */
96 if (!(sc->flags & FULL_INIT_DONE) &&
97 ((rc = adapter_full_init(sc)) != 0))
98 return (rc); /* error message displayed already */
100 if (!(pi->flags & PORT_INIT_DONE) &&
101 ((rc = port_full_init(pi)) != 0))
102 return (rc); /* error message displayed already */
104 rc = update_mac_settings(ifp, XGMAC_ALL);
106 return (rc); /* error message displayed already */
108 ifp->if_drv_flags |= IFF_DRV_RUNNING;
114 cxgbe_nm_uninit_synchronized(struct port_info *pi)
117 struct adapter *sc = pi->adapter;
119 struct ifnet *ifp = pi->nm_ifp;
121 ASSERT_SYNCHRONIZED_OP(sc);
123 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
129 cxgbe_nm_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
131 int rc = 0, mtu, flags;
132 struct port_info *pi = ifp->if_softc;
133 struct adapter *sc = pi->adapter;
134 struct ifreq *ifr = (struct ifreq *)data;
137 MPASS(pi->nm_ifp == ifp);
142 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
145 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmtu");
149 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
150 rc = update_mac_settings(ifp, XGMAC_MTU);
151 end_synchronized_op(sc, 0);
155 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nflg");
159 if (ifp->if_flags & IFF_UP) {
160 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
161 flags = pi->nmif_flags;
162 if ((ifp->if_flags ^ flags) &
163 (IFF_PROMISC | IFF_ALLMULTI)) {
164 rc = update_mac_settings(ifp,
165 XGMAC_PROMISC | XGMAC_ALLMULTI);
168 rc = cxgbe_nm_init_synchronized(pi);
169 pi->nmif_flags = ifp->if_flags;
170 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
171 rc = cxgbe_nm_uninit_synchronized(pi);
172 end_synchronized_op(sc, 0);
176 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
177 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4nmulti");
180 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
181 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
182 end_synchronized_op(sc, LOCK_HELD);
186 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
187 if (mask & IFCAP_TXCSUM) {
188 ifp->if_capenable ^= IFCAP_TXCSUM;
189 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
191 if (mask & IFCAP_TXCSUM_IPV6) {
192 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
193 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
195 if (mask & IFCAP_RXCSUM)
196 ifp->if_capenable ^= IFCAP_RXCSUM;
197 if (mask & IFCAP_RXCSUM_IPV6)
198 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
203 ifmedia_ioctl(ifp, ifr, &pi->nm_media, cmd);
207 rc = ether_ioctl(ifp, cmd, data);
214 cxgbe_nm_transmit(struct ifnet *ifp, struct mbuf *m)
222 cxgbe_nm_qflush(struct ifnet *ifp)
229 alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int cong)
233 struct adapter *sc = pi->adapter;
234 struct netmap_adapter *na = NA(pi->nm_ifp);
238 MPASS(nm_rxq->iq_desc != NULL);
239 MPASS(nm_rxq->fl_desc != NULL);
241 bzero(nm_rxq->iq_desc, pi->qsize_rxq * IQ_ESIZE);
242 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + spg_len);
244 bzero(&c, sizeof(c));
245 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
246 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
248 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
250 if (pi->flags & INTR_NM_RXQ) {
251 KASSERT(nm_rxq->intr_idx < sc->intr_count,
252 ("%s: invalid direct intr_idx %d", __func__,
254 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
256 CXGBE_UNIMPLEMENTED(__func__); /* XXXNM: needs review */
257 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) |
260 c.type_to_iqandstindex = htobe32(v |
261 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
262 V_FW_IQ_CMD_VIID(pi->nm_viid) |
263 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
264 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
265 F_FW_IQ_CMD_IQGTSMODE |
266 V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
267 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
268 c.iqsize = htobe16(pi->qsize_rxq);
269 c.iqaddr = htobe64(nm_rxq->iq_ba);
271 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
272 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF |
273 F_FW_IQ_CMD_FL0CONGEN);
275 c.iqns_to_fl0congen |=
276 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
277 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
278 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0));
279 c.fl0dcaen_to_fl0cidxfthresh =
280 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
281 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
282 c.fl0size = htobe16(na->num_rx_desc + spg_len / EQ_ESIZE);
283 c.fl0addr = htobe64(nm_rxq->fl_ba);
285 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
287 device_printf(sc->dev,
288 "failed to create netmap ingress queue: %d\n", rc);
293 MPASS(nm_rxq->iq_sidx == pi->qsize_rxq - spg_len / IQ_ESIZE);
294 nm_rxq->iq_gen = F_RSPD_GEN;
295 nm_rxq->iq_cntxt_id = be16toh(c.iqid);
296 nm_rxq->iq_abs_id = be16toh(c.physiqid);
297 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start;
298 if (cntxt_id >= sc->sge.niq) {
299 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)",
300 __func__, cntxt_id, sc->sge.niq - 1);
302 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq;
304 nm_rxq->fl_cntxt_id = be16toh(c.fl0id);
305 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
306 MPASS(nm_rxq->fl_sidx == na->num_rx_desc);
307 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start;
308 if (cntxt_id >= sc->sge.neq) {
309 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)",
310 __func__, cntxt_id, sc->sge.neq - 1);
312 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq;
314 nm_rxq->fl_db_val = F_DBPRIO | V_QID(nm_rxq->fl_cntxt_id) | V_PIDX(0);
316 nm_rxq->fl_db_val |= F_DBTYPE;
318 if (is_t5(sc) && cong >= 0) {
321 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
322 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
323 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
324 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
325 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
326 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
331 for (i = 0; i < 4; i++) {
333 val |= 1 << (i << 2);
337 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
339 /* report error but carry on */
340 device_printf(sc->dev,
341 "failed to set congestion manager context for "
342 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc);
346 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
347 V_SEINTARM(V_QINTR_TIMER_IDX(1)) |
348 V_INGRESSQID(nm_rxq->iq_cntxt_id));
354 free_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq)
356 struct adapter *sc = pi->adapter;
359 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
360 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff);
362 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n",
363 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc);
368 alloc_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
372 struct adapter *sc = pi->adapter;
373 struct netmap_adapter *na = NA(pi->nm_ifp);
374 struct fw_eq_eth_cmd c;
377 MPASS(nm_txq->desc != NULL);
379 len = na->num_tx_desc * EQ_ESIZE + spg_len;
380 bzero(nm_txq->desc, len);
382 bzero(&c, sizeof(c));
383 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
384 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
385 V_FW_EQ_ETH_CMD_VFN(0));
386 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
387 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
388 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
389 V_FW_EQ_ETH_CMD_VIID(pi->nm_viid));
391 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
392 V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
393 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
394 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
395 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
396 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE));
397 c.eqaddr = htobe64(nm_txq->ba);
399 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
401 device_printf(pi->dev,
402 "failed to create netmap egress queue: %d\n", rc);
406 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
407 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start;
408 if (cntxt_id >= sc->sge.neq)
409 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__,
410 cntxt_id, sc->sge.neq - 1);
411 sc->sge.eqmap[cntxt_id] = (void *)nm_txq;
413 nm_txq->pidx = nm_txq->cidx = 0;
414 MPASS(nm_txq->sidx == na->num_tx_desc);
415 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0;
417 nm_txq->doorbells = sc->doorbells;
418 if (isset(&nm_txq->doorbells, DOORBELL_UDB) ||
419 isset(&nm_txq->doorbells, DOORBELL_UDBWC) ||
420 isset(&nm_txq->doorbells, DOORBELL_WCWR)) {
421 uint32_t s_qpp = sc->sge.eq_s_qpp;
422 uint32_t mask = (1 << s_qpp) - 1;
423 volatile uint8_t *udb;
425 udb = sc->udbs_base + UDBS_DB_OFFSET;
426 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT;
427 nm_txq->udb_qid = nm_txq->cntxt_id & mask;
428 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
429 clrbit(&nm_txq->doorbells, DOORBELL_WCWR);
431 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT;
434 nm_txq->udb = (volatile void *)udb;
441 free_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
443 struct adapter *sc = pi->adapter;
446 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
448 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__,
449 nm_txq->cntxt_id, rc);
454 cxgbe_netmap_on(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
455 struct netmap_adapter *na)
457 struct netmap_slot *slot;
458 struct sge_nm_rxq *nm_rxq;
459 struct sge_nm_txq *nm_txq;
461 struct hw_buf_info *hwb;
464 ASSERT_SYNCHRONIZED_OP(sc);
466 if ((pi->flags & PORT_INIT_DONE) == 0 ||
467 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
470 hwb = &sc->sge.hw_buf_info[0];
471 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) {
472 if (hwb->size == NETMAP_BUF_SIZE(na))
475 if (i >= SGE_FLBUF_SIZES) {
476 if_printf(ifp, "no hwidx for netmap buffer size %d.\n",
477 NETMAP_BUF_SIZE(na));
482 /* Must set caps before calling netmap_reset */
483 nm_set_native_flags(na);
485 for_each_nm_rxq(pi, i, nm_rxq) {
486 alloc_nm_rxq_hwq(pi, nm_rxq, tnl_cong(pi));
487 nm_rxq->fl_hwidx = hwidx;
488 slot = netmap_reset(na, NR_RX, i, 0);
489 MPASS(slot != NULL); /* XXXNM: error check, not assert */
491 /* We deal with 8 bufs at a time */
492 MPASS((na->num_rx_desc & 7) == 0);
493 MPASS(na->num_rx_desc == nm_rxq->fl_sidx);
494 for (j = 0; j < nm_rxq->fl_sidx - 8; j++) {
497 PNMB(na, &slot[j], &ba);
498 nm_rxq->fl_desc[j] = htobe64(ba | hwidx);
502 j /= 8; /* driver pidx to hardware pidx */
504 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
505 nm_rxq->fl_db_val | V_PIDX(j));
508 for_each_nm_txq(pi, i, nm_txq) {
509 alloc_nm_txq_hwq(pi, nm_txq);
510 slot = netmap_reset(na, NR_TX, i, 0);
511 MPASS(slot != NULL); /* XXXNM: error check, not assert */
514 rss = malloc(pi->nm_rss_size * sizeof (*rss), M_CXGBE, M_ZERO |
516 for (i = 0; i < pi->nm_rss_size;) {
517 for_each_nm_rxq(pi, j, nm_rxq) {
518 rss[i++] = nm_rxq->iq_abs_id;
519 if (i == pi->nm_rss_size)
523 rc = -t4_config_rss_range(sc, sc->mbox, pi->nm_viid, 0, pi->nm_rss_size,
524 rss, pi->nm_rss_size);
526 if_printf(ifp, "netmap rss_config failed: %d\n", rc);
529 rc = -t4_enable_vi(sc, sc->mbox, pi->nm_viid, true, true);
531 if_printf(ifp, "netmap enable_vi failed: %d\n", rc);
537 cxgbe_netmap_off(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
538 struct netmap_adapter *na)
541 struct sge_nm_txq *nm_txq;
542 struct sge_nm_rxq *nm_rxq;
544 ASSERT_SYNCHRONIZED_OP(sc);
546 rc = -t4_enable_vi(sc, sc->mbox, pi->nm_viid, false, false);
548 if_printf(ifp, "netmap disable_vi failed: %d\n", rc);
549 nm_clear_native_flags(na);
551 for_each_nm_txq(pi, i, nm_txq) {
552 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
554 /* Wait for hw pidx to catch up ... */
555 while (be16toh(nm_txq->pidx) != spg->pidx)
558 /* ... and then for the cidx. */
559 while (spg->pidx != spg->cidx)
562 free_nm_txq_hwq(pi, nm_txq);
564 for_each_nm_rxq(pi, i, nm_rxq) {
565 free_nm_rxq_hwq(pi, nm_rxq);
572 cxgbe_netmap_reg(struct netmap_adapter *na, int on)
574 struct ifnet *ifp = na->ifp;
575 struct port_info *pi = ifp->if_softc;
576 struct adapter *sc = pi->adapter;
579 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmreg");
583 rc = cxgbe_netmap_on(sc, pi, ifp, na);
585 rc = cxgbe_netmap_off(sc, pi, ifp, na);
586 end_synchronized_op(sc, 0);
591 /* How many packets can a single type1 WR carry in n descriptors */
593 ndesc_to_npkt(const int n)
596 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC);
600 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC))
602 /* Space (in descriptors) needed for a type1 WR that carries n packets */
604 npkt_to_ndesc(const int n)
607 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
609 return ((n + 2) / 2);
612 /* Space (in 16B units) needed for a type1 WR that carries n packets */
614 npkt_to_len16(const int n)
617 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
622 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx)
625 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq)
628 u_int db = nm_txq->doorbells;
630 MPASS(nm_txq->pidx != nm_txq->dbidx);
632 n = NMIDXDIFF(nm_txq, dbidx);
634 clrbit(&db, DOORBELL_WCWR);
637 switch (ffs(db) - 1) {
639 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
642 case DOORBELL_WCWR: {
643 volatile uint64_t *dst, *src;
646 * Queues whose 128B doorbell segment fits in the page do not
647 * use relative qid (udb_qid is always 0). Only queues with
648 * doorbell segments can do WCWR.
650 KASSERT(nm_txq->udb_qid == 0 && n == 1,
651 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p",
652 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq));
654 dst = (volatile void *)((uintptr_t)nm_txq->udb +
655 UDBS_WR_OFFSET - UDBS_DB_OFFSET);
656 src = (void *)&nm_txq->desc[nm_txq->dbidx];
657 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1])
664 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
669 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
670 V_QID(nm_txq->cntxt_id) | V_PIDX(n));
673 nm_txq->dbidx = nm_txq->pidx;
676 int lazy_tx_credit_flush = 1;
679 * Write work requests to send 'npkt' frames and ring the doorbell to send them
680 * on their way. No need to check for wraparound.
683 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq,
684 struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum)
686 struct netmap_ring *ring = kring->ring;
687 struct netmap_slot *slot;
688 const u_int lim = kring->nkr_num_slots - 1;
689 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx];
692 struct cpl_tx_pkt_core *cpl;
693 struct ulptx_sgl *usgl;
697 n = min(npkt, MAX_NPKT_IN_TYPE1_WR);
700 wr = (void *)&nm_txq->desc[nm_txq->pidx];
701 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
702 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n)));
706 cpl = (void *)(wr + 1);
708 for (i = 0; i < n; i++) {
709 slot = &ring->slot[kring->nr_hwcur];
710 PNMB(kring->na, slot, &ba);
712 cpl->ctrl0 = nm_txq->cpl_ctrl0;
714 cpl->len = htobe16(slot->len);
716 * netmap(4) says "netmap does not use features such as
717 * checksum offloading, TCP segmentation offloading,
718 * encryption, VLAN encapsulation/decapsulation, etc."
720 * So the ncxl interfaces have tx hardware checksumming
721 * disabled by default. But you can override netmap by
722 * enabling IFCAP_TXCSUM on the interface manully.
724 cpl->ctrl1 = txcsum ? 0 :
725 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS);
727 usgl = (void *)(cpl + 1);
728 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
730 usgl->len0 = htobe32(slot->len);
731 usgl->addr0 = htobe64(ba);
733 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
734 cpl = (void *)(usgl + 1);
735 MPASS(slot->len + len <= UINT16_MAX);
737 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim);
739 wr->plen = htobe16(len);
742 nm_txq->pidx += npkt_to_ndesc(n);
743 MPASS(nm_txq->pidx <= nm_txq->sidx);
744 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) {
746 * This routine doesn't know how to write WRs that wrap
747 * around. Make sure it wasn't asked to.
753 if (npkt == 0 && npkt_remaining == 0) {
755 if (lazy_tx_credit_flush == 0) {
756 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
758 nm_txq->equeqidx = nm_txq->pidx;
759 nm_txq->equiqidx = nm_txq->pidx;
761 ring_nm_txq_db(sc, nm_txq);
765 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
766 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
768 nm_txq->equeqidx = nm_txq->pidx;
769 nm_txq->equiqidx = nm_txq->pidx;
770 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
771 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
772 nm_txq->equeqidx = nm_txq->pidx;
774 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC)
775 ring_nm_txq_db(sc, nm_txq);
778 /* Will get called again. */
779 MPASS(npkt_remaining);
782 /* How many contiguous free descriptors starting at pidx */
784 contiguous_ndesc_available(struct sge_nm_txq *nm_txq)
787 if (nm_txq->cidx > nm_txq->pidx)
788 return (nm_txq->cidx - nm_txq->pidx - 1);
789 else if (nm_txq->cidx > 0)
790 return (nm_txq->sidx - nm_txq->pidx);
792 return (nm_txq->sidx - nm_txq->pidx - 1);
796 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq)
798 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
799 uint16_t hw_cidx = spg->cidx; /* snapshot */
800 struct fw_eth_tx_pkts_wr *wr;
803 hw_cidx = be16toh(hw_cidx);
805 while (nm_txq->cidx != hw_cidx) {
806 wr = (void *)&nm_txq->desc[nm_txq->cidx];
808 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)));
809 MPASS(wr->type == 1);
810 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR);
813 nm_txq->cidx += npkt_to_ndesc(wr->npkt);
816 * We never sent a WR that wrapped around so the credits coming
817 * back, WR by WR, should never cause the cidx to wrap around
820 MPASS(nm_txq->cidx <= nm_txq->sidx);
821 if (__predict_false(nm_txq->cidx == nm_txq->sidx))
829 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags)
831 struct netmap_adapter *na = kring->na;
832 struct ifnet *ifp = na->ifp;
833 struct port_info *pi = ifp->if_softc;
834 struct adapter *sc = pi->adapter;
835 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[pi->first_nm_txq + kring->ring_id];
836 const u_int head = kring->rhead;
838 int n, d, npkt_remaining, ndesc_remaining, txcsum;
841 * Tx was at kring->nr_hwcur last time around and now we need to advance
842 * to kring->rhead. Note that the driver's pidx moves independent of
843 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation
844 * between descriptors and frames isn't 1:1).
847 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
848 kring->nkr_num_slots - kring->nr_hwcur + head;
849 txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
850 while (npkt_remaining) {
851 reclaimed += reclaim_nm_tx_desc(nm_txq);
852 ndesc_remaining = contiguous_ndesc_available(nm_txq);
853 /* Can't run out of descriptors with packets still remaining */
854 MPASS(ndesc_remaining > 0);
856 /* # of desc needed to tx all remaining packets */
857 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC;
858 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR)
859 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR);
861 if (d <= ndesc_remaining)
864 /* Can't send all, calculate how many can be sent */
865 n = (ndesc_remaining / SGE_MAX_WR_NDESC) *
866 MAX_NPKT_IN_TYPE1_WR;
867 if (ndesc_remaining % SGE_MAX_WR_NDESC)
868 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC);
871 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */
873 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum);
875 MPASS(npkt_remaining == 0);
876 MPASS(kring->nr_hwcur == head);
877 MPASS(nm_txq->dbidx == nm_txq->pidx);
880 * Second part: reclaim buffers for completed transmissions.
882 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
883 reclaimed += reclaim_nm_tx_desc(nm_txq);
884 kring->nr_hwtail += reclaimed;
885 if (kring->nr_hwtail >= kring->nkr_num_slots)
886 kring->nr_hwtail -= kring->nkr_num_slots;
889 nm_txsync_finalize(kring);
895 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
897 struct netmap_adapter *na = kring->na;
898 struct netmap_ring *ring = kring->ring;
899 struct ifnet *ifp = na->ifp;
900 struct port_info *pi = ifp->if_softc;
901 struct adapter *sc = pi->adapter;
902 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[pi->first_nm_rxq + kring->ring_id];
903 u_int const head = nm_rxsync_prologue(kring);
905 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
907 if (netmap_no_pendintr || force_update) {
908 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx);
909 kring->nr_kflags &= ~NKR_PENDINTR;
912 /* Userspace done with buffers from kring->nr_hwcur to head */
913 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
914 kring->nkr_num_slots - kring->nr_hwcur + head;
917 u_int fl_pidx = nm_rxq->fl_pidx;
918 struct netmap_slot *slot = &ring->slot[fl_pidx];
920 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx;
923 * We always deal with 8 buffers at a time. We must have
924 * stopped at an 8B boundary (fl_pidx) last time around and we
925 * must have a multiple of 8B buffers to give to the freelist.
927 MPASS((fl_pidx & 7) == 0);
930 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots);
931 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx);
934 for (i = 0; i < 8; i++, fl_pidx++, slot++) {
936 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx);
937 slot->flags &= ~NS_BUF_CHANGED;
938 MPASS(fl_pidx <= nm_rxq->fl_sidx);
941 if (fl_pidx == nm_rxq->fl_sidx) {
943 slot = &ring->slot[0];
945 if (++dbinc == 8 && n >= 32) {
947 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
948 nm_rxq->fl_db_val | V_PIDX(dbinc));
952 MPASS(nm_rxq->fl_pidx == fl_pidx);
956 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
957 nm_rxq->fl_db_val | V_PIDX(dbinc));
961 nm_rxsync_finalize(kring);
967 * Create an ifnet solely for netmap use and register it with the kernel.
970 create_netmap_ifnet(struct port_info *pi)
972 struct adapter *sc = pi->adapter;
973 struct netmap_adapter na;
975 device_t dev = pi->dev;
976 uint8_t mac[ETHER_ADDR_LEN];
979 if (pi->nnmtxq <= 0 || pi->nnmrxq <= 0)
981 MPASS(pi->nm_ifp == NULL);
984 * Allocate a virtual interface exclusively for netmap use. Give it the
985 * MAC address normally reserved for use by a TOE interface. (The TOE
986 * driver on FreeBSD doesn't use it).
988 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, &mac[0],
989 &pi->nm_rss_size, FW_VI_FUNC_OFLD, 0);
991 device_printf(dev, "unable to allocate netmap virtual "
992 "interface for port %d: %d\n", pi->port_id, -rc);
996 pi->nm_xact_addr_filt = -1;
998 ifp = if_alloc(IFT_ETHER);
1000 device_printf(dev, "Cannot allocate netmap ifnet\n");
1006 if_initname(ifp, is_t4(pi->adapter) ? "ncxgbe" : "ncxl",
1007 device_get_unit(dev));
1008 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1010 ifp->if_init = cxgbe_nm_init;
1011 ifp->if_ioctl = cxgbe_nm_ioctl;
1012 ifp->if_transmit = cxgbe_nm_transmit;
1013 ifp->if_qflush = cxgbe_nm_qflush;
1016 * netmap(4) says "netmap does not use features such as checksum
1017 * offloading, TCP segmentation offloading, encryption, VLAN
1018 * encapsulation/decapsulation, etc."
1020 * By default we comply with the statement above. But we do declare the
1021 * ifnet capable of L3/L4 checksumming so that a user can override
1022 * netmap and have the hardware do the L3/L4 checksums.
1024 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU |
1026 ifp->if_capenable = 0;
1027 ifp->if_hwassist = 0;
1029 /* nm_media has already been setup by the caller */
1031 ether_ifattach(ifp, mac);
1034 * Register with netmap in the kernel.
1036 bzero(&na, sizeof(na));
1038 na.ifp = pi->nm_ifp;
1039 na.na_flags = NAF_BDG_MAYSLEEP;
1041 /* Netmap doesn't know about the space reserved for the status page. */
1042 na.num_tx_desc = pi->qsize_txq - spg_len / EQ_ESIZE;
1045 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So
1046 * num_rx_desc is based on the number of buffers that can be held in the
1047 * freelist, and not the number of entries in the iq. (These two are
1048 * not exactly the same due to the space taken up by the status page).
1050 na.num_rx_desc = (pi->qsize_rxq / 8) * 8;
1051 na.nm_txsync = cxgbe_netmap_txsync;
1052 na.nm_rxsync = cxgbe_netmap_rxsync;
1053 na.nm_register = cxgbe_netmap_reg;
1054 na.num_tx_rings = pi->nnmtxq;
1055 na.num_rx_rings = pi->nnmrxq;
1056 netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */
1062 destroy_netmap_ifnet(struct port_info *pi)
1064 struct adapter *sc = pi->adapter;
1066 if (pi->nm_ifp == NULL)
1069 netmap_detach(pi->nm_ifp);
1070 ifmedia_removeall(&pi->nm_media);
1071 ether_ifdetach(pi->nm_ifp);
1072 if_free(pi->nm_ifp);
1073 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->nm_viid);
1079 handle_nm_fw6_msg(struct adapter *sc, struct ifnet *ifp,
1080 const struct cpl_fw6_msg *cpl)
1082 const struct cpl_sge_egr_update *egr;
1084 struct sge_nm_txq *nm_txq;
1086 if (cpl->type != FW_TYPE_RSSCPL && cpl->type != FW6_TYPE_RSSCPL)
1087 panic("%s: FW_TYPE 0x%x on nm_rxq.", __func__, cpl->type);
1089 /* data[0] is RSS header */
1090 egr = (const void *)&cpl->data[1];
1091 oq = be32toh(egr->opcode_qid);
1092 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE);
1093 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start];
1095 netmap_tx_irq(ifp, nm_txq->nid);
1099 t4_nm_intr(void *arg)
1101 struct sge_nm_rxq *nm_rxq = arg;
1102 struct port_info *pi = nm_rxq->pi;
1103 struct adapter *sc = pi->adapter;
1104 struct ifnet *ifp = pi->nm_ifp;
1105 struct netmap_adapter *na = NA(ifp);
1106 struct netmap_kring *kring = &na->rx_rings[nm_rxq->nid];
1107 struct netmap_ring *ring = kring->ring;
1108 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx];
1113 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx);
1115 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) {
1119 lq = be32toh(d->rsp.pldbuflen_qid);
1120 opcode = d->rss.opcode;
1122 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) {
1123 case X_RSPD_TYPE_FLBUF:
1124 /* No buffer packing so new buf every time */
1125 MPASS(lq & F_RSPD_NEWBUF);
1129 case X_RSPD_TYPE_CPL:
1130 MPASS(opcode < NUM_CPL_CMDS);
1135 handle_nm_fw6_msg(sc, ifp,
1136 (const void *)&d->cpl[0]);
1139 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - fl_pktshift;
1140 ring->slot[fl_cidx].flags = kring->nkr_slot_flags;
1141 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx))
1145 panic("%s: unexpected opcode 0x%x on nm_rxq %p",
1146 __func__, opcode, nm_rxq);
1150 case X_RSPD_TYPE_INTR:
1151 /* Not equipped to handle forwarded interrupts. */
1152 panic("%s: netmap queue received interrupt for iq %u\n",
1156 panic("%s: illegal response type %d on nm_rxq %p",
1157 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq);
1161 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) {
1162 nm_rxq->iq_cidx = 0;
1163 d = &nm_rxq->iq_desc[0];
1164 nm_rxq->iq_gen ^= F_RSPD_GEN;
1167 if (__predict_false(++n == 64)) { /* XXXNM: tune */
1168 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
1169 V_CIDXINC(n) | V_INGRESSQID(nm_rxq->iq_cntxt_id) |
1170 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1174 if (fl_cidx != nm_rxq->fl_cidx) {
1175 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1176 netmap_rx_irq(ifp, nm_rxq->nid, &processed);
1178 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(n) |
1179 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) |
1180 V_SEINTARM(V_QINTR_TIMER_IDX(1)));