2 * Copyright (c) 2014 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
35 #include <sys/param.h>
37 #include <sys/eventhandler.h>
40 #include <sys/module.h>
41 #include <sys/selinfo.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <machine/bus.h>
45 #include <net/ethernet.h>
47 #include <net/if_media.h>
48 #include <net/if_var.h>
49 #include <net/if_clone.h>
50 #include <net/if_types.h>
51 #include <net/netmap.h>
52 #include <dev/netmap/netmap_kern.h>
54 #include "common/common.h"
55 #include "common/t4_regs.h"
56 #include "common/t4_regs_values.h"
58 extern int fl_pad; /* XXXNM */
61 * 0 = normal netmap rx
63 * 2 = supermassive black hole (buffer packing enabled)
66 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RWTUN, &black_hole, 0,
67 "Sink incoming packets.");
70 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN,
71 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated.");
74 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN,
75 &rx_nframes, 0, "max # of frames received before waking up netmap rx.");
77 int holdoff_tmr_idx = 2;
78 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN,
79 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues.");
83 * -1: no congestion feedback (not recommended).
84 * 0: backpressure the channel instead of dropping packets right away.
85 * 1: no backpressure, drop packets for the congested queue immediately.
87 static int nm_cong_drop = 1;
88 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_cong_drop, CTLFLAG_RWTUN,
90 "Congestion control for netmap rx queues (0 = backpressure, 1 = drop");
93 SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN,
94 &starve_fl, 0, "Don't ring fl db for netmap rx queues.");
97 * Try to process tx credits in bulk. This may cause a delay in the return of
98 * tx credits and is suitable for bursty or non-stop tx only.
100 int lazy_tx_credit_flush = 1;
101 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lazy_tx_credit_flush, CTLFLAG_RWTUN,
102 &lazy_tx_credit_flush, 0, "lazy credit flush for netmap tx queues.");
105 * Split the netmap rx queues into two groups that populate separate halves of
106 * the RSS indirection table. This allows filters with hashmask to steer to a
107 * particular group of queues.
109 static int nm_split_rss = 0;
110 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_split_rss, CTLFLAG_RWTUN,
111 &nm_split_rss, 0, "Split the netmap rx queues into two groups.");
114 * netmap(4) says "netmap does not use features such as checksum offloading, TCP
115 * segmentation offloading, encryption, VLAN encapsulation/decapsulation, etc."
116 * but this knob can be used to get the hardware to checksum all tx traffic
119 static int nm_txcsum = 0;
120 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_txcsum, CTLFLAG_RWTUN,
121 &nm_txcsum, 0, "Enable transmit checksum offloading.");
123 static int free_nm_rxq_hwq(struct vi_info *, struct sge_nm_rxq *);
124 static int free_nm_txq_hwq(struct vi_info *, struct sge_nm_txq *);
127 alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx,
131 struct sysctl_oid *oid;
132 struct sysctl_oid_list *children;
133 struct sysctl_ctx_list *ctx;
136 struct adapter *sc = vi->adapter;
137 struct netmap_adapter *na = NA(vi->ifp);
141 len = vi->qsize_rxq * IQ_ESIZE;
142 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map,
143 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc);
147 len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len;
148 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map,
149 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc);
156 nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE;
157 nm_rxq->iq_gen = F_RSPD_GEN;
158 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
159 nm_rxq->fl_sidx = na->num_rx_desc;
160 nm_rxq->fl_sidx2 = nm_rxq->fl_sidx; /* copy for rxsync cacheline */
161 nm_rxq->intr_idx = intr_idx;
162 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID;
165 children = SYSCTL_CHILDREN(vi->nm_rxq_oid);
167 snprintf(name, sizeof(name), "%d", idx);
168 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name,
169 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue");
170 children = SYSCTL_CHILDREN(oid);
172 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD,
173 &nm_rxq->iq_abs_id, 0, "absolute id of the queue");
174 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
175 &nm_rxq->iq_cntxt_id, 0, "SGE context id of the queue");
176 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
177 &nm_rxq->iq_cidx, 0, "consumer index");
179 children = SYSCTL_CHILDREN(oid);
180 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl",
181 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist");
182 children = SYSCTL_CHILDREN(oid);
184 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
185 &nm_rxq->fl_cntxt_id, 0, "SGE context id of the freelist");
186 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
187 &nm_rxq->fl_cidx, 0, "consumer index");
188 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
189 &nm_rxq->fl_pidx, 0, "producer index");
195 free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
197 struct adapter *sc = vi->adapter;
199 if (!(vi->flags & VI_INIT_DONE))
202 if (nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID)
203 free_nm_rxq_hwq(vi, nm_rxq);
204 MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID);
206 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba,
208 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba,
215 alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx)
219 struct port_info *pi = vi->pi;
220 struct adapter *sc = pi->adapter;
221 struct netmap_adapter *na = NA(vi->ifp);
223 struct sysctl_oid *oid;
224 struct sysctl_oid_list *children = SYSCTL_CHILDREN(vi->nm_txq_oid);
226 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
227 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map,
228 &nm_txq->ba, (void **)&nm_txq->desc);
232 nm_txq->pidx = nm_txq->cidx = 0;
233 nm_txq->sidx = na->num_tx_desc;
235 nm_txq->iqidx = iqidx;
236 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
237 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
238 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
239 if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0))
240 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
242 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
243 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
245 snprintf(name, sizeof(name), "%d", idx);
246 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name,
247 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queue");
248 children = SYSCTL_CHILDREN(oid);
250 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
251 &nm_txq->cntxt_id, 0, "SGE context id of the queue");
252 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
253 &nm_txq->cidx, 0, "consumer index");
254 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
255 &nm_txq->pidx, 0, "producer index");
261 free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
263 struct adapter *sc = vi->adapter;
265 if (!(vi->flags & VI_INIT_DONE))
268 if (nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID)
269 free_nm_txq_hwq(vi, nm_txq);
270 MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID);
272 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba,
279 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
283 struct adapter *sc = vi->adapter;
284 struct sge_params *sp = &sc->params.sge;
285 struct netmap_adapter *na = NA(vi->ifp);
289 MPASS(nm_rxq->iq_desc != NULL);
290 MPASS(nm_rxq->fl_desc != NULL);
292 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE);
293 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len);
295 bzero(&c, sizeof(c));
296 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
297 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
299 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_IQSTART | FW_LEN16(c));
300 if (nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID)
301 c.alloc_to_len16 |= htobe32(F_FW_IQ_CMD_ALLOC);
303 c.iqid = htobe16(nm_rxq->iq_cntxt_id);
304 c.fl0id = htobe16(nm_rxq->fl_cntxt_id);
305 c.fl1id = htobe16(0xffff);
306 c.physiqid = htobe16(nm_rxq->iq_abs_id);
308 MPASS(!forwarding_intr_to_fwq(sc));
309 KASSERT(nm_rxq->intr_idx < sc->intr_count,
310 ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx));
311 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
312 c.type_to_iqandstindex = htobe32(v |
313 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
314 V_FW_IQ_CMD_VIID(vi->viid) |
315 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
316 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) |
317 F_FW_IQ_CMD_IQGTSMODE |
318 V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
319 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
320 c.iqsize = htobe16(vi->qsize_rxq);
321 c.iqaddr = htobe64(nm_rxq->iq_ba);
323 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
324 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF |
325 F_FW_IQ_CMD_FL0CONGEN);
327 c.iqns_to_fl0congen |=
328 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
329 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
330 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
331 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0));
332 c.fl0dcaen_to_fl0cidxfthresh =
333 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ?
334 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) |
335 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ?
336 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
337 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE);
338 c.fl0addr = htobe64(nm_rxq->fl_ba);
340 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
342 device_printf(sc->dev,
343 "failed to create netmap ingress queue: %d\n", rc);
348 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE);
349 nm_rxq->iq_gen = F_RSPD_GEN;
350 nm_rxq->iq_cntxt_id = be16toh(c.iqid);
351 nm_rxq->iq_abs_id = be16toh(c.physiqid);
352 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start;
353 if (cntxt_id >= sc->sge.iqmap_sz) {
354 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)",
355 __func__, cntxt_id, sc->sge.iqmap_sz - 1);
357 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq;
359 nm_rxq->fl_cntxt_id = be16toh(c.fl0id);
360 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
361 nm_rxq->fl_db_saved = 0;
362 /* matches the X_FETCHBURSTMAX_512B or X_FETCHBURSTMAX_256B above. */
363 nm_rxq->fl_db_threshold = chip_id(sc) <= CHELSIO_T5 ? 8 : 4;
364 MPASS(nm_rxq->fl_sidx == na->num_rx_desc);
365 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start;
366 if (cntxt_id >= sc->sge.eqmap_sz) {
367 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)",
368 __func__, cntxt_id, sc->sge.eqmap_sz - 1);
370 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq;
372 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) |
373 sc->chip_params->sge_fl_db;
375 if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) {
378 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
379 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
380 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
385 for (i = 0; i < 4; i++) {
387 val |= 1 << (i << 2);
391 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
393 /* report error but carry on */
394 device_printf(sc->dev,
395 "failed to set congestion manager context for "
396 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc);
400 t4_write_reg(sc, sc->sge_gts_reg,
401 V_INGRESSQID(nm_rxq->iq_cntxt_id) |
402 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
408 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
410 struct adapter *sc = vi->adapter;
413 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
414 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff);
416 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n",
417 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc);
418 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID;
423 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
427 struct adapter *sc = vi->adapter;
428 struct netmap_adapter *na = NA(vi->ifp);
429 struct fw_eq_eth_cmd c;
432 MPASS(nm_txq->desc != NULL);
434 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
435 bzero(nm_txq->desc, len);
437 bzero(&c, sizeof(c));
438 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
439 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
440 V_FW_EQ_ETH_CMD_VFN(0));
441 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
442 if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID)
443 c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC);
445 c.eqid_pkd = htobe32(V_FW_EQ_ETH_CMD_EQID(nm_txq->cntxt_id));
446 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
447 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
449 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
450 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
451 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
453 htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
454 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
455 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
456 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE));
457 c.eqaddr = htobe64(nm_txq->ba);
459 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
461 device_printf(vi->dev,
462 "failed to create netmap egress queue: %d\n", rc);
466 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
467 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start;
468 if (cntxt_id >= sc->sge.eqmap_sz)
469 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__,
470 cntxt_id, sc->sge.eqmap_sz - 1);
471 sc->sge.eqmap[cntxt_id] = (void *)nm_txq;
473 nm_txq->pidx = nm_txq->cidx = 0;
474 MPASS(nm_txq->sidx == na->num_tx_desc);
475 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0;
477 nm_txq->doorbells = sc->doorbells;
478 if (isset(&nm_txq->doorbells, DOORBELL_UDB) ||
479 isset(&nm_txq->doorbells, DOORBELL_UDBWC) ||
480 isset(&nm_txq->doorbells, DOORBELL_WCWR)) {
481 uint32_t s_qpp = sc->params.sge.eq_s_qpp;
482 uint32_t mask = (1 << s_qpp) - 1;
483 volatile uint8_t *udb;
485 udb = sc->udbs_base + UDBS_DB_OFFSET;
486 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT;
487 nm_txq->udb_qid = nm_txq->cntxt_id & mask;
488 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
489 clrbit(&nm_txq->doorbells, DOORBELL_WCWR);
491 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT;
494 nm_txq->udb = (volatile void *)udb;
497 if (sc->params.fw_vers < FW_VERSION32(1, 25, 1, 0)) {
500 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
501 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) |
502 V_FW_PARAMS_PARAM_YZ(nm_txq->cntxt_id);
504 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
506 device_printf(vi->dev,
507 "failed to bind netmap txq %d to class 0xff: %d\n",
508 nm_txq->cntxt_id, rc);
517 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
519 struct adapter *sc = vi->adapter;
522 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
524 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__,
525 nm_txq->cntxt_id, rc);
526 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
531 cxgbe_netmap_simple_rss(struct adapter *sc, struct vi_info *vi,
532 struct ifnet *ifp, struct netmap_adapter *na)
534 struct netmap_kring *kring;
535 struct sge_nm_rxq *nm_rxq;
536 int rc, i, j, nm_state, defq;
540 * Check if there's at least one active (or about to go active) netmap
544 for_each_nm_rxq(vi, j, nm_rxq) {
545 nm_state = atomic_load_int(&nm_rxq->nm_state);
546 kring = na->rx_rings[nm_rxq->nid];
547 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) ||
548 (nm_state == NM_OFF && nm_kring_pending_on(kring))) {
549 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID);
551 defq = nm_rxq->iq_abs_id;
558 /* No active netmap queues. Switch back to NIC queues. */
562 for (i = 0; i < vi->rss_size;) {
563 for_each_nm_rxq(vi, j, nm_rxq) {
564 nm_state = atomic_load_int(&nm_rxq->nm_state);
565 kring = na->rx_rings[nm_rxq->nid];
566 if ((nm_state != NM_OFF &&
567 !nm_kring_pending_off(kring)) ||
568 (nm_state == NM_OFF &&
569 nm_kring_pending_on(kring))) {
570 MPASS(nm_rxq->iq_cntxt_id !=
571 INVALID_NM_RXQ_CNTXT_ID);
572 vi->nm_rss[i++] = nm_rxq->iq_abs_id;
573 if (i == vi->rss_size)
581 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
584 if_printf(ifp, "netmap rss_config failed: %d\n", rc);
586 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0);
588 if_printf(ifp, "netmap defaultq config failed: %d\n", rc);
595 * Odd number of rx queues work best for split RSS mode as the first queue can
596 * be dedicated for non-RSS traffic and the rest divided into two equal halves.
599 cxgbe_netmap_split_rss(struct adapter *sc, struct vi_info *vi,
600 struct ifnet *ifp, struct netmap_adapter *na)
602 struct netmap_kring *kring;
603 struct sge_nm_rxq *nm_rxq;
604 int rc, i, j, nm_state, defq;
605 int nactive[2] = {0, 0};
606 int dq[2] = {-1, -1};
607 bool dq_norss; /* default queue should not be in RSS table. */
609 MPASS(nm_split_rss != 0);
610 MPASS(vi->nnmrxq > 1);
612 for_each_nm_rxq(vi, i, nm_rxq) {
613 j = i / ((vi->nnmrxq + 1) / 2);
614 nm_state = atomic_load_int(&nm_rxq->nm_state);
615 kring = na->rx_rings[nm_rxq->nid];
616 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) ||
617 (nm_state == NM_OFF && nm_kring_pending_on(kring))) {
618 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID);
621 dq[j] = nm_rxq->iq_abs_id;
627 if (nactive[0] == 0 || nactive[1] == 0)
628 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na));
630 MPASS(dq[0] != -1 && dq[1] != -1);
631 if (nactive[0] > nactive[1]) {
634 } else if (nactive[0] < nactive[1]) {
643 nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq];
644 while (i < vi->rss_size / 2) {
645 for (j = 0; j < (vi->nnmrxq + 1) / 2; j++) {
646 nm_state = atomic_load_int(&nm_rxq[j].nm_state);
647 kring = na->rx_rings[nm_rxq[j].nid];
648 if ((nm_state == NM_OFF &&
649 !nm_kring_pending_on(kring)) ||
650 (nm_state == NM_ON &&
651 nm_kring_pending_off(kring))) {
654 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID);
655 if (dq_norss && defq == nm_rxq[j].iq_abs_id)
657 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id;
658 if (i == vi->rss_size / 2)
662 while (i < vi->rss_size) {
663 for (j = (vi->nnmrxq + 1) / 2; j < vi->nnmrxq; j++) {
664 nm_state = atomic_load_int(&nm_rxq[j].nm_state);
665 kring = na->rx_rings[nm_rxq[j].nid];
666 if ((nm_state == NM_OFF &&
667 !nm_kring_pending_on(kring)) ||
668 (nm_state == NM_ON &&
669 nm_kring_pending_off(kring))) {
672 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID);
673 if (dq_norss && defq == nm_rxq[j].iq_abs_id)
675 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id;
676 if (i == vi->rss_size)
681 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
682 vi->nm_rss, vi->rss_size);
684 if_printf(ifp, "netmap split_rss_config failed: %d\n", rc);
686 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0);
688 if_printf(ifp, "netmap defaultq config failed: %d\n", rc);
694 cxgbe_netmap_rss(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
695 struct netmap_adapter *na)
698 if (nm_split_rss == 0 || vi->nnmrxq == 1)
699 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na));
701 return (cxgbe_netmap_split_rss(sc, vi, ifp, na));
705 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
706 struct netmap_adapter *na)
708 struct netmap_slot *slot;
709 struct netmap_kring *kring;
710 struct sge_nm_rxq *nm_rxq;
711 struct sge_nm_txq *nm_txq;
713 struct rx_buf_info *rxb;
715 ASSERT_SYNCHRONIZED_OP(sc);
716 MPASS(vi->nnmrxq > 0);
717 MPASS(vi->nnmtxq > 0);
719 if ((vi->flags & VI_INIT_DONE) == 0 ||
720 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
721 if_printf(ifp, "cannot enable netmap operation because "
722 "interface is not UP.\n");
726 rxb = &sc->sge.rx_buf_info[0];
727 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
728 if (rxb->size1 == NETMAP_BUF_SIZE(na)) {
732 if (rxb->size2 == NETMAP_BUF_SIZE(na)) {
737 if (i >= SW_ZONE_SIZES) {
738 if_printf(ifp, "no hwidx for netmap buffer size %d.\n",
739 NETMAP_BUF_SIZE(na));
743 /* Must set caps before calling netmap_reset */
744 nm_set_native_flags(na);
746 for_each_nm_rxq(vi, i, nm_rxq) {
747 kring = na->rx_rings[nm_rxq->nid];
748 if (!nm_kring_pending_on(kring))
751 alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop));
752 nm_rxq->fl_hwidx = hwidx;
753 slot = netmap_reset(na, NR_RX, i, 0);
754 MPASS(slot != NULL); /* XXXNM: error check, not assert */
756 /* We deal with 8 bufs at a time */
757 MPASS((na->num_rx_desc & 7) == 0);
758 MPASS(na->num_rx_desc == nm_rxq->fl_sidx);
759 for (j = 0; j < nm_rxq->fl_sidx; j++) {
762 PNMB(na, &slot[j], &ba);
764 nm_rxq->fl_desc[j] = htobe64(ba | hwidx);
766 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8;
768 j /= 8; /* driver pidx to hardware pidx */
770 t4_write_reg(sc, sc->sge_kdoorbell_reg,
771 nm_rxq->fl_db_val | V_PIDX(j));
773 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_OFF, NM_ON);
776 for_each_nm_txq(vi, i, nm_txq) {
777 kring = na->tx_rings[nm_txq->nid];
778 if (!nm_kring_pending_on(kring))
781 alloc_nm_txq_hwq(vi, nm_txq);
782 slot = netmap_reset(na, NR_TX, i, 0);
783 MPASS(slot != NULL); /* XXXNM: error check, not assert */
786 if (vi->nm_rss == NULL) {
787 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE,
791 return (cxgbe_netmap_rss(sc, vi, ifp, na));
795 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
796 struct netmap_adapter *na)
798 struct netmap_kring *kring;
799 int rc, i, nm_state, nactive;
800 struct sge_nm_txq *nm_txq;
801 struct sge_nm_rxq *nm_rxq;
803 ASSERT_SYNCHRONIZED_OP(sc);
804 MPASS(vi->nnmrxq > 0);
805 MPASS(vi->nnmtxq > 0);
807 if (!nm_netmap_on(na))
810 if ((vi->flags & VI_INIT_DONE) == 0)
813 /* First remove the queues that are stopping from the RSS table. */
814 rc = cxgbe_netmap_rss(sc, vi, ifp, na);
816 return (rc); /* error message logged already. */
818 for_each_nm_txq(vi, i, nm_txq) {
819 kring = na->tx_rings[nm_txq->nid];
820 if (!nm_kring_pending_off(kring))
822 MPASS(nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID);
824 rc = -t4_eth_eq_stop(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
826 device_printf(vi->dev,
827 "failed to stop nm_txq[%d]: %d.\n", i, rc);
831 /* XXX: netmap, not the driver, should do this. */
832 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
833 kring->rtail = kring->nr_hwtail = kring->nkr_num_slots - 1;
836 for_each_nm_rxq(vi, i, nm_rxq) {
837 nm_state = atomic_load_int(&nm_rxq->nm_state);
838 kring = na->rx_rings[nm_rxq->nid];
839 if (nm_state != NM_OFF && !nm_kring_pending_off(kring))
841 if (!nm_kring_pending_off(kring))
843 MPASS(nm_state != NM_OFF);
844 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID);
846 rc = -t4_iq_stop(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
847 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff);
849 device_printf(vi->dev,
850 "failed to stop nm_rxq[%d]: %d.\n", i, rc);
854 while (!atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_OFF))
857 /* XXX: netmap, not the driver, should do this. */
858 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
859 kring->rtail = kring->nr_hwtail = 0;
861 netmap_krings_mode_commit(na, 0);
863 nm_clear_native_flags(na);
869 cxgbe_netmap_reg(struct netmap_adapter *na, int on)
871 struct ifnet *ifp = na->ifp;
872 struct vi_info *vi = ifp->if_softc;
873 struct adapter *sc = vi->adapter;
876 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg");
880 rc = cxgbe_netmap_on(sc, vi, ifp, na);
882 rc = cxgbe_netmap_off(sc, vi, ifp, na);
883 end_synchronized_op(sc, 0);
888 /* How many packets can a single type1 WR carry in n descriptors */
890 ndesc_to_npkt(const int n)
893 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC);
897 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC))
900 * Space (in descriptors) needed for a type1 WR (TX_PKTS or TX_PKTS2) that
904 npkt_to_ndesc(const int n)
907 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
909 return ((n + 2) / 2);
913 * Space (in 16B units) needed for a type1 WR (TX_PKTS or TX_PKTS2) that
917 npkt_to_len16(const int n)
920 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
925 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx)
928 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq)
931 u_int db = nm_txq->doorbells;
933 MPASS(nm_txq->pidx != nm_txq->dbidx);
935 n = NMIDXDIFF(nm_txq, dbidx);
937 clrbit(&db, DOORBELL_WCWR);
940 switch (ffs(db) - 1) {
942 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
945 case DOORBELL_WCWR: {
946 volatile uint64_t *dst, *src;
949 * Queues whose 128B doorbell segment fits in the page do not
950 * use relative qid (udb_qid is always 0). Only queues with
951 * doorbell segments can do WCWR.
953 KASSERT(nm_txq->udb_qid == 0 && n == 1,
954 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p",
955 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq));
957 dst = (volatile void *)((uintptr_t)nm_txq->udb +
958 UDBS_WR_OFFSET - UDBS_DB_OFFSET);
959 src = (void *)&nm_txq->desc[nm_txq->dbidx];
960 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1])
967 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
972 t4_write_reg(sc, sc->sge_kdoorbell_reg,
973 V_QID(nm_txq->cntxt_id) | V_PIDX(n));
976 nm_txq->dbidx = nm_txq->pidx;
980 * Write work requests to send 'npkt' frames and ring the doorbell to send them
981 * on their way. No need to check for wraparound.
984 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq,
985 struct netmap_kring *kring, int npkt, int npkt_remaining)
987 struct netmap_ring *ring = kring->ring;
988 struct netmap_slot *slot;
989 const u_int lim = kring->nkr_num_slots - 1;
990 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx];
993 struct cpl_tx_pkt_core *cpl;
994 struct ulptx_sgl *usgl;
998 n = min(npkt, MAX_NPKT_IN_TYPE1_WR);
1001 wr = (void *)&nm_txq->desc[nm_txq->pidx];
1002 wr->op_pkd = nm_txq->op_pkd;
1003 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n)));
1007 cpl = (void *)(wr + 1);
1009 for (i = 0; i < n; i++) {
1010 slot = &ring->slot[kring->nr_hwcur];
1011 PNMB(kring->na, slot, &ba);
1014 cpl->ctrl0 = nm_txq->cpl_ctrl0;
1016 cpl->len = htobe16(slot->len);
1017 cpl->ctrl1 = nm_txcsum ? 0 :
1018 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS);
1020 usgl = (void *)(cpl + 1);
1021 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1023 usgl->len0 = htobe32(slot->len);
1024 usgl->addr0 = htobe64(ba);
1026 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
1027 cpl = (void *)(usgl + 1);
1028 MPASS(slot->len + len <= UINT16_MAX);
1030 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim);
1032 wr->plen = htobe16(len);
1035 nm_txq->pidx += npkt_to_ndesc(n);
1036 MPASS(nm_txq->pidx <= nm_txq->sidx);
1037 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) {
1039 * This routine doesn't know how to write WRs that wrap
1040 * around. Make sure it wasn't asked to.
1046 if (npkt == 0 && npkt_remaining == 0) {
1048 if (lazy_tx_credit_flush == 0) {
1049 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
1051 nm_txq->equeqidx = nm_txq->pidx;
1052 nm_txq->equiqidx = nm_txq->pidx;
1054 ring_nm_txq_db(sc, nm_txq);
1058 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
1059 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
1061 nm_txq->equeqidx = nm_txq->pidx;
1062 nm_txq->equiqidx = nm_txq->pidx;
1063 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
1064 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
1065 nm_txq->equeqidx = nm_txq->pidx;
1067 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC)
1068 ring_nm_txq_db(sc, nm_txq);
1071 /* Will get called again. */
1072 MPASS(npkt_remaining);
1075 /* How many contiguous free descriptors starting at pidx */
1077 contiguous_ndesc_available(struct sge_nm_txq *nm_txq)
1080 if (nm_txq->cidx > nm_txq->pidx)
1081 return (nm_txq->cidx - nm_txq->pidx - 1);
1082 else if (nm_txq->cidx > 0)
1083 return (nm_txq->sidx - nm_txq->pidx);
1085 return (nm_txq->sidx - nm_txq->pidx - 1);
1089 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq)
1091 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
1092 uint16_t hw_cidx = spg->cidx; /* snapshot */
1093 struct fw_eth_tx_pkts_wr *wr;
1096 hw_cidx = be16toh(hw_cidx);
1098 while (nm_txq->cidx != hw_cidx) {
1099 wr = (void *)&nm_txq->desc[nm_txq->cidx];
1101 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)) ||
1102 wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)));
1103 MPASS(wr->type == 1);
1104 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR);
1107 nm_txq->cidx += npkt_to_ndesc(wr->npkt);
1110 * We never sent a WR that wrapped around so the credits coming
1111 * back, WR by WR, should never cause the cidx to wrap around
1114 MPASS(nm_txq->cidx <= nm_txq->sidx);
1115 if (__predict_false(nm_txq->cidx == nm_txq->sidx))
1123 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags)
1125 struct netmap_adapter *na = kring->na;
1126 struct ifnet *ifp = na->ifp;
1127 struct vi_info *vi = ifp->if_softc;
1128 struct adapter *sc = vi->adapter;
1129 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id];
1130 const u_int head = kring->rhead;
1131 u_int reclaimed = 0;
1132 int n, d, npkt_remaining, ndesc_remaining;
1135 * Tx was at kring->nr_hwcur last time around and now we need to advance
1136 * to kring->rhead. Note that the driver's pidx moves independent of
1137 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation
1138 * between descriptors and frames isn't 1:1).
1141 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
1142 kring->nkr_num_slots - kring->nr_hwcur + head;
1143 while (npkt_remaining) {
1144 reclaimed += reclaim_nm_tx_desc(nm_txq);
1145 ndesc_remaining = contiguous_ndesc_available(nm_txq);
1146 /* Can't run out of descriptors with packets still remaining */
1147 MPASS(ndesc_remaining > 0);
1149 /* # of desc needed to tx all remaining packets */
1150 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC;
1151 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR)
1152 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR);
1154 if (d <= ndesc_remaining)
1157 /* Can't send all, calculate how many can be sent */
1158 n = (ndesc_remaining / SGE_MAX_WR_NDESC) *
1159 MAX_NPKT_IN_TYPE1_WR;
1160 if (ndesc_remaining % SGE_MAX_WR_NDESC)
1161 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC);
1164 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */
1165 npkt_remaining -= n;
1166 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining);
1168 MPASS(npkt_remaining == 0);
1169 MPASS(kring->nr_hwcur == head);
1170 MPASS(nm_txq->dbidx == nm_txq->pidx);
1173 * Second part: reclaim buffers for completed transmissions.
1175 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
1176 reclaimed += reclaim_nm_tx_desc(nm_txq);
1177 kring->nr_hwtail += reclaimed;
1178 if (kring->nr_hwtail >= kring->nkr_num_slots)
1179 kring->nr_hwtail -= kring->nkr_num_slots;
1186 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
1188 struct netmap_adapter *na = kring->na;
1189 struct netmap_ring *ring = kring->ring;
1190 struct ifnet *ifp = na->ifp;
1191 struct vi_info *vi = ifp->if_softc;
1192 struct adapter *sc = vi->adapter;
1193 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id];
1194 u_int const head = kring->rhead;
1196 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1199 return (0); /* No updates ever. */
1201 if (netmap_no_pendintr || force_update) {
1202 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx);
1203 kring->nr_kflags &= ~NKR_PENDINTR;
1206 if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) {
1208 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1209 nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved));
1210 nm_rxq->fl_db_saved = 0;
1213 /* Userspace done with buffers from kring->nr_hwcur to head */
1214 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
1215 kring->nkr_num_slots - kring->nr_hwcur + head;
1218 u_int fl_pidx = nm_rxq->fl_pidx;
1219 struct netmap_slot *slot = &ring->slot[fl_pidx];
1221 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx;
1224 * We always deal with 8 buffers at a time. We must have
1225 * stopped at an 8B boundary (fl_pidx) last time around and we
1226 * must have a multiple of 8B buffers to give to the freelist.
1228 MPASS((fl_pidx & 7) == 0);
1229 MPASS((n & 7) == 0);
1231 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots);
1232 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx2);
1235 for (i = 0; i < 8; i++, fl_pidx++, slot++) {
1236 PNMB(na, slot, &ba);
1238 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx);
1239 slot->flags &= ~NS_BUF_CHANGED;
1240 MPASS(fl_pidx <= nm_rxq->fl_sidx2);
1243 if (fl_pidx == nm_rxq->fl_sidx2) {
1245 slot = &ring->slot[0];
1247 if (++dbinc == nm_rxq->fl_db_threshold) {
1250 nm_rxq->fl_db_saved += dbinc;
1252 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1253 nm_rxq->fl_db_val | V_PIDX(dbinc));
1258 MPASS(nm_rxq->fl_pidx == fl_pidx);
1263 nm_rxq->fl_db_saved += dbinc;
1265 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1266 nm_rxq->fl_db_val | V_PIDX(dbinc));
1275 cxgbe_nm_attach(struct vi_info *vi)
1277 struct port_info *pi;
1279 struct netmap_adapter na;
1281 MPASS(vi->nnmrxq > 0);
1282 MPASS(vi->ifp != NULL);
1287 bzero(&na, sizeof(na));
1290 na.na_flags = NAF_BDG_MAYSLEEP;
1292 /* Netmap doesn't know about the space reserved for the status page. */
1293 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE;
1296 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So
1297 * num_rx_desc is based on the number of buffers that can be held in the
1298 * freelist, and not the number of entries in the iq. (These two are
1299 * not exactly the same due to the space taken up by the status page).
1301 na.num_rx_desc = rounddown(vi->qsize_rxq, 8);
1302 na.nm_txsync = cxgbe_netmap_txsync;
1303 na.nm_rxsync = cxgbe_netmap_rxsync;
1304 na.nm_register = cxgbe_netmap_reg;
1305 na.num_tx_rings = vi->nnmtxq;
1306 na.num_rx_rings = vi->nnmrxq;
1307 na.rx_buf_maxsize = MAX_MTU;
1308 netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */
1312 cxgbe_nm_detach(struct vi_info *vi)
1315 MPASS(vi->nnmrxq > 0);
1316 MPASS(vi->ifp != NULL);
1318 netmap_detach(vi->ifp);
1321 static inline const void *
1322 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl)
1325 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL);
1327 /* data[0] is RSS header */
1328 return (&cpl->data[1]);
1332 handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp,
1333 const struct cpl_sge_egr_update *egr)
1336 struct sge_nm_txq *nm_txq;
1338 oq = be32toh(egr->opcode_qid);
1339 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE);
1340 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start];
1342 netmap_tx_irq(ifp, nm_txq->nid);
1346 service_nm_rxq(struct sge_nm_rxq *nm_rxq)
1348 struct vi_info *vi = nm_rxq->vi;
1349 struct adapter *sc = vi->adapter;
1350 struct ifnet *ifp = vi->ifp;
1351 struct netmap_adapter *na = NA(ifp);
1352 struct netmap_kring *kring = na->rx_rings[nm_rxq->nid];
1353 struct netmap_ring *ring = kring->ring;
1354 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx];
1359 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx);
1360 u_int fl_credits = fl_cidx & 7;
1361 u_int ndesc = 0; /* desc processed since last cidx update */
1362 u_int nframes = 0; /* frames processed since last netmap wakeup */
1364 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) {
1368 lq = be32toh(d->rsp.pldbuflen_qid);
1369 opcode = d->rss.opcode;
1372 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) {
1373 case X_RSPD_TYPE_FLBUF:
1377 case X_RSPD_TYPE_CPL:
1378 MPASS(opcode < NUM_CPL_CMDS);
1383 cpl = unwrap_nm_fw6_msg(cpl);
1385 case CPL_SGE_EGR_UPDATE:
1386 handle_nm_sge_egr_update(sc, ifp, cpl);
1389 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) -
1390 sc->params.sge.fl_pktshift;
1391 ring->slot[fl_cidx].flags = 0;
1393 if (!(lq & F_RSPD_NEWBUF)) {
1394 MPASS(black_hole == 2);
1398 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx))
1402 panic("%s: unexpected opcode 0x%x on nm_rxq %p",
1403 __func__, opcode, nm_rxq);
1407 case X_RSPD_TYPE_INTR:
1408 /* Not equipped to handle forwarded interrupts. */
1409 panic("%s: netmap queue received interrupt for iq %u\n",
1413 panic("%s: illegal response type %d on nm_rxq %p",
1414 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq);
1418 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) {
1419 nm_rxq->iq_cidx = 0;
1420 d = &nm_rxq->iq_desc[0];
1421 nm_rxq->iq_gen ^= F_RSPD_GEN;
1424 if (__predict_false(++nframes == rx_nframes) && !black_hole) {
1425 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1426 netmap_rx_irq(ifp, nm_rxq->nid, &work);
1430 if (__predict_false(++ndesc == rx_ndesc)) {
1431 if (black_hole && fl_credits >= 8) {
1433 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8,
1435 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1436 nm_rxq->fl_db_val | V_PIDX(fl_credits));
1437 fl_credits = fl_cidx & 7;
1439 t4_write_reg(sc, sc->sge_gts_reg,
1441 V_INGRESSQID(nm_rxq->iq_cntxt_id) |
1442 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1447 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1450 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx);
1451 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1452 nm_rxq->fl_db_val | V_PIDX(fl_credits));
1453 } else if (nframes > 0)
1454 netmap_rx_irq(ifp, nm_rxq->nid, &work);
1456 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) |
1457 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) |
1458 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));