2 * Copyright (c) 2014 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
35 #include <sys/param.h>
36 #include <sys/eventhandler.h>
38 #include <sys/types.h>
40 #include <sys/selinfo.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <machine/bus.h>
44 #include <net/ethernet.h>
46 #include <net/if_media.h>
47 #include <net/if_var.h>
48 #include <net/if_clone.h>
49 #include <net/if_types.h>
50 #include <net/netmap.h>
51 #include <dev/netmap/netmap_kern.h>
53 #include "common/common.h"
54 #include "common/t4_regs.h"
55 #include "common/t4_regs_values.h"
57 extern int fl_pad; /* XXXNM */
58 extern int spg_len; /* XXXNM */
59 extern int fl_pktshift; /* XXXNM */
61 /* netmap ifnet routines */
62 static void cxgbe_nm_init(void *);
63 static int cxgbe_nm_ioctl(struct ifnet *, unsigned long, caddr_t);
64 static int cxgbe_nm_transmit(struct ifnet *, struct mbuf *);
65 static void cxgbe_nm_qflush(struct ifnet *);
67 static int cxgbe_nm_init_synchronized(struct port_info *);
68 static int cxgbe_nm_uninit_synchronized(struct port_info *);
71 cxgbe_nm_init(void *arg)
73 struct port_info *pi = arg;
74 struct adapter *sc = pi->adapter;
76 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nminit") != 0)
78 cxgbe_nm_init_synchronized(pi);
79 end_synchronized_op(sc, 0);
85 cxgbe_nm_init_synchronized(struct port_info *pi)
87 struct adapter *sc = pi->adapter;
88 struct ifnet *ifp = pi->nm_ifp;
91 ASSERT_SYNCHRONIZED_OP(sc);
93 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
94 return (0); /* already running */
96 if (!(sc->flags & FULL_INIT_DONE) &&
97 ((rc = adapter_full_init(sc)) != 0))
98 return (rc); /* error message displayed already */
100 if (!(pi->flags & PORT_INIT_DONE) &&
101 ((rc = port_full_init(pi)) != 0))
102 return (rc); /* error message displayed already */
104 rc = update_mac_settings(ifp, XGMAC_ALL);
106 return (rc); /* error message displayed already */
108 ifp->if_drv_flags |= IFF_DRV_RUNNING;
114 cxgbe_nm_uninit_synchronized(struct port_info *pi)
117 struct adapter *sc = pi->adapter;
119 struct ifnet *ifp = pi->nm_ifp;
121 ASSERT_SYNCHRONIZED_OP(sc);
123 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
129 cxgbe_nm_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
131 int rc = 0, mtu, flags;
132 struct port_info *pi = ifp->if_softc;
133 struct adapter *sc = pi->adapter;
134 struct ifreq *ifr = (struct ifreq *)data;
137 MPASS(pi->nm_ifp == ifp);
142 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
145 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmtu");
149 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
150 rc = update_mac_settings(ifp, XGMAC_MTU);
151 end_synchronized_op(sc, 0);
155 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nflg");
159 if (ifp->if_flags & IFF_UP) {
160 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
161 flags = pi->nmif_flags;
162 if ((ifp->if_flags ^ flags) &
163 (IFF_PROMISC | IFF_ALLMULTI)) {
164 rc = update_mac_settings(ifp,
165 XGMAC_PROMISC | XGMAC_ALLMULTI);
168 rc = cxgbe_nm_init_synchronized(pi);
169 pi->nmif_flags = ifp->if_flags;
170 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
171 rc = cxgbe_nm_uninit_synchronized(pi);
172 end_synchronized_op(sc, 0);
176 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
177 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4nmulti");
180 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
181 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
182 end_synchronized_op(sc, LOCK_HELD);
186 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
187 if (mask & IFCAP_TXCSUM) {
188 ifp->if_capenable ^= IFCAP_TXCSUM;
189 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
191 if (mask & IFCAP_TXCSUM_IPV6) {
192 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
193 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
195 if (mask & IFCAP_RXCSUM)
196 ifp->if_capenable ^= IFCAP_RXCSUM;
197 if (mask & IFCAP_RXCSUM_IPV6)
198 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
203 ifmedia_ioctl(ifp, ifr, &pi->nm_media, cmd);
207 rc = ether_ioctl(ifp, cmd, data);
214 cxgbe_nm_transmit(struct ifnet *ifp, struct mbuf *m)
222 cxgbe_nm_qflush(struct ifnet *ifp)
229 alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq)
233 struct adapter *sc = pi->adapter;
234 struct netmap_adapter *na = NA(pi->nm_ifp);
238 MPASS(nm_rxq->iq_desc != NULL);
239 MPASS(nm_rxq->fl_desc != NULL);
241 bzero(nm_rxq->iq_desc, pi->qsize_rxq * RX_IQ_ESIZE);
242 bzero(nm_rxq->fl_desc, na->num_rx_desc * RX_FL_ESIZE + spg_len);
244 bzero(&c, sizeof(c));
245 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
246 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
248 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
250 if (pi->flags & INTR_NM_RXQ) {
251 KASSERT(nm_rxq->intr_idx < sc->intr_count,
252 ("%s: invalid direct intr_idx %d", __func__,
254 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
256 CXGBE_UNIMPLEMENTED(__func__); /* XXXNM: needs review */
257 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) |
260 c.type_to_iqandstindex = htobe32(v |
261 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
262 V_FW_IQ_CMD_VIID(pi->nm_viid) |
263 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
264 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
265 F_FW_IQ_CMD_IQGTSMODE |
266 V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
267 V_FW_IQ_CMD_IQESIZE(ilog2(RX_IQ_ESIZE) - 4));
268 c.iqsize = htobe16(pi->qsize_rxq);
269 c.iqaddr = htobe64(nm_rxq->iq_ba);
270 c.iqns_to_fl0congen |=
271 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
272 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
273 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0));
274 c.fl0dcaen_to_fl0cidxfthresh =
275 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
276 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
277 c.fl0size = htobe16(na->num_rx_desc + spg_len / RX_FL_ESIZE);
278 c.fl0addr = htobe64(nm_rxq->fl_ba);
280 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
282 device_printf(sc->dev,
283 "failed to create netmap ingress queue: %d\n", rc);
288 MPASS(nm_rxq->iq_sidx == pi->qsize_rxq - spg_len / RX_IQ_ESIZE);
289 nm_rxq->iq_gen = F_RSPD_GEN;
290 nm_rxq->iq_cntxt_id = be16toh(c.iqid);
291 nm_rxq->iq_abs_id = be16toh(c.physiqid);
292 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start;
293 if (cntxt_id >= sc->sge.niq) {
294 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)",
295 __func__, cntxt_id, sc->sge.niq - 1);
297 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq;
299 nm_rxq->fl_cntxt_id = be16toh(c.fl0id);
300 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
301 MPASS(nm_rxq->fl_sidx == na->num_rx_desc);
302 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start;
303 if (cntxt_id >= sc->sge.neq) {
304 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)",
305 __func__, cntxt_id, sc->sge.neq - 1);
307 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq;
309 nm_rxq->fl_db_val = F_DBPRIO | V_QID(nm_rxq->fl_cntxt_id) | V_PIDX(0);
311 nm_rxq->fl_db_val |= F_DBTYPE;
313 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(F_QINTR_CNT_EN) |
314 V_INGRESSQID(nm_rxq->iq_cntxt_id));
320 free_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq)
322 struct adapter *sc = pi->adapter;
325 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
326 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff);
328 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n",
329 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc);
334 alloc_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
338 struct adapter *sc = pi->adapter;
339 struct netmap_adapter *na = NA(pi->nm_ifp);
340 struct fw_eq_eth_cmd c;
343 MPASS(nm_txq->desc != NULL);
345 len = na->num_tx_desc * EQ_ESIZE + spg_len;
346 bzero(nm_txq->desc, len);
348 bzero(&c, sizeof(c));
349 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
350 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
351 V_FW_EQ_ETH_CMD_VFN(0));
352 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
353 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
354 c.autoequiqe_to_viid = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->nm_viid));
356 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
357 V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
358 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
359 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
360 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
361 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE));
362 c.eqaddr = htobe64(nm_txq->ba);
364 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
366 device_printf(pi->dev,
367 "failed to create netmap egress queue: %d\n", rc);
371 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
372 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start;
373 if (cntxt_id >= sc->sge.neq)
374 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__,
375 cntxt_id, sc->sge.neq - 1);
376 sc->sge.eqmap[cntxt_id] = (void *)nm_txq;
378 nm_txq->pidx = nm_txq->cidx = 0;
379 MPASS(nm_txq->sidx == na->num_tx_desc);
380 nm_txq->equiqidx = nm_txq-> equeqidx = nm_txq->dbidx = 0;
382 nm_txq->doorbells = sc->doorbells;
383 if (isset(&nm_txq->doorbells, DOORBELL_UDB) ||
384 isset(&nm_txq->doorbells, DOORBELL_UDBWC) ||
385 isset(&nm_txq->doorbells, DOORBELL_WCWR)) {
386 uint32_t s_qpp = sc->sge.eq_s_qpp;
387 uint32_t mask = (1 << s_qpp) - 1;
388 volatile uint8_t *udb;
390 udb = sc->udbs_base + UDBS_DB_OFFSET;
391 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT;
392 nm_txq->udb_qid = nm_txq->cntxt_id & mask;
393 if (nm_txq->udb_qid > PAGE_SIZE / UDBS_SEG_SIZE)
394 clrbit(&nm_txq->doorbells, DOORBELL_WCWR);
396 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT;
399 nm_txq->udb = (volatile void *)udb;
406 free_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
408 struct adapter *sc = pi->adapter;
411 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
413 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__,
414 nm_txq->cntxt_id, rc);
419 cxgbe_netmap_on(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
420 struct netmap_adapter *na)
422 struct netmap_slot *slot;
423 struct sge_nm_rxq *nm_rxq;
424 struct sge_nm_txq *nm_txq;
426 struct hw_buf_info *hwb;
429 ASSERT_SYNCHRONIZED_OP(sc);
431 if ((pi->flags & PORT_INIT_DONE) == 0 ||
432 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
435 hwb = &sc->sge.hw_buf_info[0];
436 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) {
437 if (hwb->size == NETMAP_BUF_SIZE)
440 if (i >= SGE_FLBUF_SIZES) {
441 if_printf(ifp, "no hwidx for netmap buffer size %d.\n",
447 /* Must set caps before calling netmap_reset */
448 na->na_flags |= (NAF_NATIVE_ON | NAF_NETMAP_ON);
449 ifp->if_capenable |= IFCAP_NETMAP;
451 for_each_nm_rxq(pi, i, nm_rxq) {
452 alloc_nm_rxq_hwq(pi, nm_rxq);
453 nm_rxq->fl_hwidx = hwidx;
454 slot = netmap_reset(na, NR_RX, i, 0);
455 MPASS(slot != NULL); /* XXXNM: error check, not assert */
457 /* We deal with 8 bufs at a time */
458 MPASS((na->num_rx_desc & 7) == 0);
459 MPASS(na->num_rx_desc == nm_rxq->fl_sidx);
460 for (j = 0; j < nm_rxq->fl_sidx - 8; j++) {
464 nm_rxq->fl_desc[j] = htobe64(ba | hwidx);
468 j /= 8; /* driver pidx to hardware pidx */
470 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
471 nm_rxq->fl_db_val | V_PIDX(j));
474 for_each_nm_txq(pi, i, nm_txq) {
475 alloc_nm_txq_hwq(pi, nm_txq);
476 slot = netmap_reset(na, NR_TX, i, 0);
477 MPASS(slot != NULL); /* XXXNM: error check, not assert */
480 rss = malloc(pi->nm_rss_size * sizeof (*rss), M_CXGBE, M_ZERO |
482 for (i = 0; i < pi->nm_rss_size;) {
483 for_each_nm_rxq(pi, j, nm_rxq) {
484 rss[i++] = nm_rxq->iq_abs_id;
485 if (i == pi->nm_rss_size)
489 rc = -t4_config_rss_range(sc, sc->mbox, pi->nm_viid, 0, pi->nm_rss_size,
490 rss, pi->nm_rss_size);
492 if_printf(ifp, "netmap rss_config failed: %d\n", rc);
495 rc = -t4_enable_vi(sc, sc->mbox, pi->nm_viid, true, true);
497 if_printf(ifp, "netmap enable_vi failed: %d\n", rc);
503 cxgbe_netmap_off(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
504 struct netmap_adapter *na)
507 struct sge_nm_txq *nm_txq;
508 struct sge_nm_rxq *nm_rxq;
510 ASSERT_SYNCHRONIZED_OP(sc);
512 rc = -t4_enable_vi(sc, sc->mbox, pi->nm_viid, false, false);
514 if_printf(ifp, "netmap disable_vi failed: %d\n", rc);
515 na->na_flags &= ~(NAF_NATIVE_ON | NAF_NETMAP_ON);
516 ifp->if_capenable &= ~IFCAP_NETMAP;
519 * XXXNM: We need to make sure that the tx queues are quiet and won't
520 * request any more SGE_EGR_UPDATEs.
523 for_each_nm_txq(pi, i, nm_txq) {
524 free_nm_txq_hwq(pi, nm_txq);
526 for_each_nm_rxq(pi, i, nm_rxq) {
527 free_nm_rxq_hwq(pi, nm_rxq);
534 cxgbe_netmap_reg(struct netmap_adapter *na, int on)
536 struct ifnet *ifp = na->ifp;
537 struct port_info *pi = ifp->if_softc;
538 struct adapter *sc = pi->adapter;
541 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmreg");
545 rc = cxgbe_netmap_on(sc, pi, ifp, na);
547 rc = cxgbe_netmap_off(sc, pi, ifp, na);
548 end_synchronized_op(sc, 0);
553 /* How many packets can a single type1 WR carry in n descriptors */
555 ndesc_to_npkt(const int n)
558 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC);
562 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC))
564 /* Space (in descriptors) needed for a type1 WR that carries n packets */
566 npkt_to_ndesc(const int n)
569 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
571 return ((n + 2) / 2);
574 /* Space (in 16B units) needed for a type1 WR that carries n packets */
576 npkt_to_len16(const int n)
579 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
584 static inline uint16_t
585 idxdiff(uint16_t head, uint16_t tail, uint16_t wrap)
591 return (head - tail);
593 return (wrap - tail + head);
595 #define IDXDIFF(q, idx) idxdiff((q)->pidx, (q)->idx, (q)->sidx)
598 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq)
601 u_int db = nm_txq->doorbells;
603 MPASS(nm_txq->pidx != nm_txq->dbidx);
605 n = IDXDIFF(nm_txq, dbidx);
607 clrbit(&db, DOORBELL_WCWR);
610 switch (ffs(db) - 1) {
612 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
615 case DOORBELL_WCWR: {
616 volatile uint64_t *dst, *src;
619 * Queues whose 128B doorbell segment fits in the page do not
620 * use relative qid (udb_qid is always 0). Only queues with
621 * doorbell segments can do WCWR.
623 KASSERT(nm_txq->udb_qid == 0 && n == 1,
624 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p",
625 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq));
627 dst = (volatile void *)((uintptr_t)nm_txq->udb +
628 UDBS_WR_OFFSET - UDBS_DB_OFFSET);
629 src = (void *)&nm_txq->desc[nm_txq->dbidx];
630 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1])
637 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
642 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
643 V_QID(nm_txq->cntxt_id) | V_PIDX(n));
646 nm_txq->dbidx = nm_txq->pidx;
649 int lazy_tx_credit_flush = 1;
652 * Write work requests to send 'npkt' frames and ring the doorbell to send them
653 * on their way. No need to check for wraparound.
656 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq,
657 struct netmap_kring *kring, int npkt, int npkt_remaining)
659 struct netmap_ring *ring = kring->ring;
660 struct netmap_slot *slot;
661 const u_int lim = kring->nkr_num_slots - 1;
662 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx];
665 struct cpl_tx_pkt_core *cpl;
666 struct ulptx_sgl *usgl;
670 n = min(npkt, MAX_NPKT_IN_TYPE1_WR);
673 wr = (void *)&nm_txq->desc[nm_txq->pidx];
674 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
675 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n)));
679 cpl = (void *)(wr + 1);
681 for (i = 0; i < n; i++) {
682 slot = &ring->slot[kring->nr_hwcur];
685 cpl->ctrl0 = nm_txq->cpl_ctrl0;
687 cpl->len = htobe16(slot->len);
689 * netmap(4) says "netmap does not use features such as
690 * checksum offloading, TCP segmentation offloading,
691 * encryption, VLAN encapsulation/decapsulation, etc."
693 * XXXNM: it makes sense to enable checksum offload.
695 cpl->ctrl1 = htobe64(F_TXPKT_IPCSUM_DIS |
698 usgl = (void *)(cpl + 1);
699 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
701 usgl->len0 = htobe32(slot->len);
702 usgl->addr0 = htobe64(ba);
704 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
705 cpl = (void *)(usgl + 1);
706 MPASS(slot->len + len <= UINT16_MAX);
708 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim);
710 wr->plen = htobe16(len);
713 nm_txq->pidx += npkt_to_ndesc(n);
714 MPASS(nm_txq->pidx <= nm_txq->sidx);
715 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) {
717 * This routine doesn't know how to write WRs that wrap
718 * around. Make sure it wasn't asked to.
724 if (npkt == 0 && npkt_remaining == 0) {
726 if (lazy_tx_credit_flush == 0) {
727 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
729 nm_txq->equeqidx = nm_txq->pidx;
730 nm_txq->equiqidx = nm_txq->pidx;
732 ring_nm_txq_db(sc, nm_txq);
736 if (IDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
737 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
739 nm_txq->equeqidx = nm_txq->pidx;
740 nm_txq->equiqidx = nm_txq->pidx;
741 } else if (IDXDIFF(nm_txq, equeqidx) >= 64) {
742 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
743 nm_txq->equeqidx = nm_txq->pidx;
745 if (IDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC)
746 ring_nm_txq_db(sc, nm_txq);
749 /* Will get called again. */
750 MPASS(npkt_remaining);
753 /* How many contiguous free descriptors starting at pidx */
755 contiguous_ndesc_available(struct sge_nm_txq *nm_txq)
758 if (nm_txq->cidx > nm_txq->pidx)
759 return (nm_txq->cidx - nm_txq->pidx - 1);
760 else if (nm_txq->cidx > 0)
761 return (nm_txq->sidx - nm_txq->pidx);
763 return (nm_txq->sidx - nm_txq->pidx - 1);
767 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq)
769 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
770 uint16_t hw_cidx = spg->cidx; /* snapshot */
771 struct fw_eth_tx_pkts_wr *wr;
774 hw_cidx = be16toh(hw_cidx);
776 while (nm_txq->cidx != hw_cidx) {
777 wr = (void *)&nm_txq->desc[nm_txq->cidx];
779 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)));
780 MPASS(wr->type == 1);
781 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR);
784 nm_txq->cidx += npkt_to_ndesc(wr->npkt);
785 if (__predict_false(nm_txq->cidx >= nm_txq->sidx))
786 nm_txq->cidx -= nm_txq->sidx;
793 cxgbe_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
795 struct netmap_kring *kring = &na->tx_rings[ring_nr];
796 struct ifnet *ifp = na->ifp;
797 struct port_info *pi = ifp->if_softc;
798 struct adapter *sc = pi->adapter;
799 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[pi->first_nm_txq + ring_nr];
800 const u_int head = kring->rhead;
802 int n, d, npkt_remaining, ndesc_remaining;
805 * Tx was at kring->nr_hwcur last time around and now we need to advance
806 * to kring->rhead. Note that the driver's pidx moves independent of
807 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation
808 * between descriptors and frames isn't 1:1).
811 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
812 kring->nkr_num_slots - kring->nr_hwcur + head;
813 while (npkt_remaining) {
814 reclaimed += reclaim_nm_tx_desc(nm_txq);
815 ndesc_remaining = contiguous_ndesc_available(nm_txq);
816 /* Can't run out of descriptors with packets still remaining */
817 MPASS(ndesc_remaining > 0);
819 /* # of desc needed to tx all remaining packets */
820 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC;
821 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR)
822 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR);
824 if (d <= ndesc_remaining)
827 /* Can't send all, calculate how many can be sent */
828 n = (ndesc_remaining / SGE_MAX_WR_NDESC) *
829 MAX_NPKT_IN_TYPE1_WR;
830 if (ndesc_remaining % SGE_MAX_WR_NDESC)
831 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC);
834 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */
836 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining);
838 MPASS(npkt_remaining == 0);
839 MPASS(kring->nr_hwcur == head);
840 MPASS(nm_txq->dbidx == nm_txq->pidx);
843 * Second part: reclaim buffers for completed transmissions.
845 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
846 reclaimed += reclaim_nm_tx_desc(nm_txq);
847 kring->nr_hwtail += reclaimed;
848 if (kring->nr_hwtail >= kring->nkr_num_slots)
849 kring->nr_hwtail -= kring->nkr_num_slots;
852 nm_txsync_finalize(kring);
858 cxgbe_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
860 struct netmap_kring *kring = &na->rx_rings[ring_nr];
861 struct netmap_ring *ring = kring->ring;
862 struct ifnet *ifp = na->ifp;
863 struct port_info *pi = ifp->if_softc;
864 struct adapter *sc = pi->adapter;
865 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[pi->first_nm_rxq + ring_nr];
866 u_int const head = nm_rxsync_prologue(kring);
868 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
870 if (netmap_no_pendintr || force_update) {
871 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx);
872 kring->nr_kflags &= ~NKR_PENDINTR;
875 /* Userspace done with buffers from kring->nr_hwcur to head */
876 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
877 kring->nkr_num_slots - kring->nr_hwcur + head;
880 u_int fl_pidx = nm_rxq->fl_pidx;
881 struct netmap_slot *slot = &ring->slot[fl_pidx];
883 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx;
886 * We always deal with 8 buffers at a time. We must have
887 * stopped at an 8B boundary (fl_pidx) last time around and we
888 * must have a multiple of 8B buffers to give to the freelist.
890 MPASS((fl_pidx & 7) == 0);
893 kring->nr_hwcur += n;
894 if (kring->nr_hwcur >= kring->nkr_num_slots)
895 kring->nr_hwcur -= kring->nkr_num_slots;
897 nm_rxq->fl_pidx += n;
898 if (nm_rxq->fl_pidx >= nm_rxq->fl_sidx)
899 nm_rxq->fl_pidx -= nm_rxq->fl_sidx;
902 for (i = 0; i < 8; i++, fl_pidx++, slot++) {
904 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx);
905 slot->flags &= ~NS_BUF_CHANGED;
906 MPASS(fl_pidx <= nm_rxq->fl_sidx);
909 if (fl_pidx == nm_rxq->fl_sidx) {
911 slot = &ring->slot[0];
913 if (++dbinc == 8 && n >= 32) {
915 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
916 nm_rxq->fl_db_val | V_PIDX(dbinc));
920 MPASS(nm_rxq->fl_pidx == fl_pidx);
924 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
925 nm_rxq->fl_db_val | V_PIDX(dbinc));
929 nm_rxsync_finalize(kring);
935 * Create an ifnet solely for netmap use and register it with the kernel.
938 create_netmap_ifnet(struct port_info *pi)
940 struct adapter *sc = pi->adapter;
941 struct netmap_adapter na;
943 device_t dev = pi->dev;
944 uint8_t mac[ETHER_ADDR_LEN];
947 if (pi->nnmtxq <= 0 || pi->nnmrxq <= 0)
949 MPASS(pi->nm_ifp == NULL);
952 * Allocate a virtual interface exclusively for netmap use. Give it the
953 * MAC address normally reserved for use by a TOE interface. (The TOE
954 * driver on FreeBSD doesn't use it).
956 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, &mac[0],
957 &pi->nm_rss_size, FW_VI_FUNC_OFLD, 0);
959 device_printf(dev, "unable to allocate netmap virtual "
960 "interface for port %d: %d\n", pi->port_id, -rc);
964 pi->nm_xact_addr_filt = -1;
966 ifp = if_alloc(IFT_ETHER);
968 device_printf(dev, "Cannot allocate netmap ifnet\n");
974 if_initname(ifp, is_t4(pi->adapter) ? "ncxgbe" : "ncxl",
975 device_get_unit(dev));
976 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
978 ifp->if_init = cxgbe_nm_init;
979 ifp->if_ioctl = cxgbe_nm_ioctl;
980 ifp->if_transmit = cxgbe_nm_transmit;
981 ifp->if_qflush = cxgbe_nm_qflush;
984 * netmap(4) says "netmap does not use features such as checksum
985 * offloading, TCP segmentation offloading, encryption, VLAN
986 * encapsulation/decapsulation, etc."
988 * By default we comply with the statement above. But we do declare the
989 * ifnet capable of L3/L4 checksumming so that a user can override
990 * netmap and have the hardware do the L3/L4 checksums.
992 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU |
994 ifp->if_capenable = 0;
995 ifp->if_hwassist = 0;
997 /* nm_media has already been setup by the caller */
999 ether_ifattach(ifp, mac);
1002 * Register with netmap in the kernel.
1004 bzero(&na, sizeof(na));
1006 na.ifp = pi->nm_ifp;
1007 na.na_flags = NAF_BDG_MAYSLEEP;
1009 /* Netmap doesn't know about the space reserved for the status page. */
1010 na.num_tx_desc = pi->qsize_txq - spg_len / EQ_ESIZE;
1013 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So
1014 * num_rx_desc is based on the number of buffers that can be held in the
1015 * freelist, and not the number of entries in the iq. (These two are
1016 * not exactly the same due to the space taken up by the status page).
1018 na.num_rx_desc = (pi->qsize_rxq / 8) * 8;
1019 na.nm_txsync = cxgbe_netmap_txsync;
1020 na.nm_rxsync = cxgbe_netmap_rxsync;
1021 na.nm_register = cxgbe_netmap_reg;
1022 na.num_tx_rings = pi->nnmtxq;
1023 na.num_rx_rings = pi->nnmrxq;
1024 netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */
1030 destroy_netmap_ifnet(struct port_info *pi)
1032 struct adapter *sc = pi->adapter;
1034 if (pi->nm_ifp == NULL)
1037 netmap_detach(pi->nm_ifp);
1038 ifmedia_removeall(&pi->nm_media);
1039 ether_ifdetach(pi->nm_ifp);
1040 if_free(pi->nm_ifp);
1041 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->nm_viid);
1047 handle_nm_fw6_msg(struct adapter *sc, struct ifnet *ifp,
1048 const struct cpl_fw6_msg *cpl)
1050 const struct cpl_sge_egr_update *egr;
1052 struct sge_nm_txq *nm_txq;
1054 if (cpl->type != FW_TYPE_RSSCPL && cpl->type != FW6_TYPE_RSSCPL)
1055 panic("%s: FW_TYPE 0x%x on nm_rxq.", __func__, cpl->type);
1057 /* data[0] is RSS header */
1058 egr = (const void *)&cpl->data[1];
1059 oq = be32toh(egr->opcode_qid);
1060 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE);
1061 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start];
1063 netmap_tx_irq(ifp, nm_txq->nid);
1067 t4_nm_intr(void *arg)
1069 struct sge_nm_rxq *nm_rxq = arg;
1070 struct port_info *pi = nm_rxq->pi;
1071 struct adapter *sc = pi->adapter;
1072 struct ifnet *ifp = pi->nm_ifp;
1073 struct netmap_adapter *na = NA(ifp);
1074 struct netmap_kring *kring = &na->rx_rings[nm_rxq->nid];
1075 struct netmap_ring *ring = kring->ring;
1076 struct nm_iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx];
1081 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx);
1083 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) {
1087 lq = be32toh(d->rsp.pldbuflen_qid);
1088 opcode = d->rss.opcode;
1090 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) {
1091 case X_RSPD_TYPE_FLBUF:
1092 /* No buffer packing so new buf every time */
1093 MPASS(lq & F_RSPD_NEWBUF);
1097 case X_RSPD_TYPE_CPL:
1098 MPASS(opcode < NUM_CPL_CMDS);
1103 handle_nm_fw6_msg(sc, ifp, &d->u.fw6_msg);
1106 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - fl_pktshift;
1107 ring->slot[fl_cidx].flags = kring->nkr_slot_flags;
1108 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx))
1112 panic("%s: unexpected opcode 0x%x on nm_rxq %p",
1113 __func__, opcode, nm_rxq);
1117 case X_RSPD_TYPE_INTR:
1118 /* Not equipped to handle forwarded interrupts. */
1119 panic("%s: netmap queue received interrupt for iq %u\n",
1123 panic("%s: illegal response type %d on nm_rxq %p",
1124 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq);
1128 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) {
1129 nm_rxq->iq_cidx = 0;
1130 d = &nm_rxq->iq_desc[0];
1131 nm_rxq->iq_gen ^= F_RSPD_GEN;
1134 if (__predict_false(++n == 64)) { /* XXXNM: tune */
1135 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
1136 V_CIDXINC(n) | V_INGRESSQID(nm_rxq->iq_cntxt_id) |
1137 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1141 if (fl_cidx != nm_rxq->fl_cidx) {
1142 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1143 netmap_rx_irq(ifp, nm_rxq->nid, &processed);
1145 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(n) |
1146 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | V_SEINTARM(F_QINTR_CNT_EN));