2 * Copyright (c) 2014 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
30 #include "opt_inet6.h"
33 #include <sys/param.h>
35 #include <sys/eventhandler.h>
38 #include <sys/module.h>
39 #include <sys/selinfo.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <machine/bus.h>
43 #include <net/ethernet.h>
45 #include <net/if_media.h>
46 #include <net/if_var.h>
47 #include <net/if_clone.h>
48 #include <net/if_types.h>
49 #include <net/netmap.h>
50 #include <dev/netmap/netmap_kern.h>
52 #include "common/common.h"
53 #include "common/t4_regs.h"
54 #include "common/t4_regs_values.h"
56 extern int fl_pad; /* XXXNM */
59 * 0 = normal netmap rx
61 * 2 = supermassive black hole (buffer packing enabled)
64 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RWTUN, &black_hole, 0,
65 "Sink incoming packets.");
68 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN,
69 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated.");
72 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN,
73 &rx_nframes, 0, "max # of frames received before waking up netmap rx.");
75 int holdoff_tmr_idx = 2;
76 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN,
77 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues.");
81 * -1: no congestion feedback (not recommended).
82 * 0: backpressure the channel instead of dropping packets right away.
83 * 1: no backpressure, drop packets for the congested queue immediately.
85 static int nm_cong_drop = 1;
86 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_cong_drop, CTLFLAG_RWTUN,
88 "Congestion control for netmap rx queues (0 = backpressure, 1 = drop");
91 SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN,
92 &starve_fl, 0, "Don't ring fl db for netmap rx queues.");
95 * Try to process tx credits in bulk. This may cause a delay in the return of
96 * tx credits and is suitable for bursty or non-stop tx only.
98 int lazy_tx_credit_flush = 1;
99 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lazy_tx_credit_flush, CTLFLAG_RWTUN,
100 &lazy_tx_credit_flush, 0, "lazy credit flush for netmap tx queues.");
103 * Split the netmap rx queues into two groups that populate separate halves of
104 * the RSS indirection table. This allows filters with hashmask to steer to a
105 * particular group of queues.
107 static int nm_split_rss = 0;
108 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_split_rss, CTLFLAG_RWTUN,
109 &nm_split_rss, 0, "Split the netmap rx queues into two groups.");
112 * netmap(4) says "netmap does not use features such as checksum offloading, TCP
113 * segmentation offloading, encryption, VLAN encapsulation/decapsulation, etc."
114 * but this knob can be used to get the hardware to checksum all tx traffic
117 static int nm_txcsum = 0;
118 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_txcsum, CTLFLAG_RWTUN,
119 &nm_txcsum, 0, "Enable transmit checksum offloading.");
121 static int free_nm_rxq_hwq(struct vi_info *, struct sge_nm_rxq *);
122 static int free_nm_txq_hwq(struct vi_info *, struct sge_nm_txq *);
125 alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx,
129 struct sysctl_oid *oid;
130 struct sysctl_oid_list *children;
131 struct sysctl_ctx_list *ctx;
134 struct adapter *sc = vi->adapter;
135 struct netmap_adapter *na = NA(vi->ifp);
139 len = vi->qsize_rxq * IQ_ESIZE;
140 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map,
141 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc);
145 len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len;
146 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map,
147 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc);
154 nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE;
155 nm_rxq->iq_gen = F_RSPD_GEN;
156 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
157 nm_rxq->fl_sidx = na->num_rx_desc;
158 nm_rxq->fl_sidx2 = nm_rxq->fl_sidx; /* copy for rxsync cacheline */
159 nm_rxq->intr_idx = intr_idx;
160 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID;
163 children = SYSCTL_CHILDREN(vi->nm_rxq_oid);
165 snprintf(name, sizeof(name), "%d", idx);
166 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name,
167 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue");
168 children = SYSCTL_CHILDREN(oid);
170 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD,
171 &nm_rxq->iq_abs_id, 0, "absolute id of the queue");
172 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
173 &nm_rxq->iq_cntxt_id, 0, "SGE context id of the queue");
174 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
175 &nm_rxq->iq_cidx, 0, "consumer index");
177 children = SYSCTL_CHILDREN(oid);
178 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl",
179 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist");
180 children = SYSCTL_CHILDREN(oid);
182 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
183 &nm_rxq->fl_cntxt_id, 0, "SGE context id of the freelist");
184 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
185 &nm_rxq->fl_cidx, 0, "consumer index");
186 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
187 &nm_rxq->fl_pidx, 0, "producer index");
193 free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
195 struct adapter *sc = vi->adapter;
197 if (!(vi->flags & VI_INIT_DONE))
200 if (nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID)
201 free_nm_rxq_hwq(vi, nm_rxq);
202 MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID);
204 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba,
206 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba,
213 alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx)
217 struct port_info *pi = vi->pi;
218 struct adapter *sc = pi->adapter;
219 struct netmap_adapter *na = NA(vi->ifp);
221 struct sysctl_oid *oid;
222 struct sysctl_oid_list *children = SYSCTL_CHILDREN(vi->nm_txq_oid);
224 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
225 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map,
226 &nm_txq->ba, (void **)&nm_txq->desc);
230 nm_txq->pidx = nm_txq->cidx = 0;
231 nm_txq->sidx = na->num_tx_desc;
233 nm_txq->iqidx = iqidx;
234 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
235 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
236 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
237 if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0))
238 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
240 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
241 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
243 snprintf(name, sizeof(name), "%d", idx);
244 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name,
245 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queue");
246 children = SYSCTL_CHILDREN(oid);
248 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
249 &nm_txq->cntxt_id, 0, "SGE context id of the queue");
250 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
251 &nm_txq->cidx, 0, "consumer index");
252 SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
253 &nm_txq->pidx, 0, "producer index");
259 free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
261 struct adapter *sc = vi->adapter;
263 if (!(vi->flags & VI_INIT_DONE))
266 if (nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID)
267 free_nm_txq_hwq(vi, nm_txq);
268 MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID);
270 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba,
277 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
281 struct adapter *sc = vi->adapter;
282 struct port_info *pi = vi->pi;
283 struct sge_params *sp = &sc->params.sge;
284 struct netmap_adapter *na = NA(vi->ifp);
286 const int cong_drop = nm_cong_drop;
287 const int cong_map = pi->rx_e_chan_map;
290 MPASS(nm_rxq->iq_desc != NULL);
291 MPASS(nm_rxq->fl_desc != NULL);
293 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE);
294 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len);
296 bzero(&c, sizeof(c));
297 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
298 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
300 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_IQSTART | FW_LEN16(c));
301 if (nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID)
302 c.alloc_to_len16 |= htobe32(F_FW_IQ_CMD_ALLOC);
304 c.iqid = htobe16(nm_rxq->iq_cntxt_id);
305 c.fl0id = htobe16(nm_rxq->fl_cntxt_id);
306 c.fl1id = htobe16(0xffff);
307 c.physiqid = htobe16(nm_rxq->iq_abs_id);
309 MPASS(!forwarding_intr_to_fwq(sc));
310 KASSERT(nm_rxq->intr_idx < sc->intr_count,
311 ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx));
312 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
313 c.type_to_iqandstindex = htobe32(v |
314 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
315 V_FW_IQ_CMD_VIID(vi->viid) |
316 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
317 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
318 F_FW_IQ_CMD_IQGTSMODE |
319 V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
320 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
321 c.iqsize = htobe16(vi->qsize_rxq);
322 c.iqaddr = htobe64(nm_rxq->iq_ba);
323 if (cong_drop != -1) {
324 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
325 V_FW_IQ_CMD_FL0CNGCHMAP(cong_map) | F_FW_IQ_CMD_FL0CONGCIF |
326 F_FW_IQ_CMD_FL0CONGEN);
328 c.iqns_to_fl0congen |=
329 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
330 V_FW_IQ_CMD_IQTYPE(FW_IQ_IQTYPE_NIC) |
331 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
332 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
333 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0));
334 c.fl0dcaen_to_fl0cidxfthresh =
335 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ?
336 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) |
337 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ?
338 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
339 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE);
340 c.fl0addr = htobe64(nm_rxq->fl_ba);
342 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
344 device_printf(sc->dev,
345 "failed to create netmap ingress queue: %d\n", rc);
350 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE);
351 nm_rxq->iq_gen = F_RSPD_GEN;
352 nm_rxq->iq_cntxt_id = be16toh(c.iqid);
353 nm_rxq->iq_abs_id = be16toh(c.physiqid);
354 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start;
355 if (cntxt_id >= sc->sge.iqmap_sz) {
356 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)",
357 __func__, cntxt_id, sc->sge.iqmap_sz - 1);
359 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq;
361 nm_rxq->fl_cntxt_id = be16toh(c.fl0id);
362 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
363 nm_rxq->fl_db_saved = 0;
364 /* matches the X_FETCHBURSTMAX_512B or X_FETCHBURSTMAX_256B above. */
365 nm_rxq->fl_db_threshold = chip_id(sc) <= CHELSIO_T5 ? 8 : 4;
366 MPASS(nm_rxq->fl_sidx == na->num_rx_desc);
367 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start;
368 if (cntxt_id >= sc->sge.eqmap_sz) {
369 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)",
370 __func__, cntxt_id, sc->sge.eqmap_sz - 1);
372 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq;
374 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) |
375 sc->chip_params->sge_fl_db;
377 if (chip_id(sc) >= CHELSIO_T5 && cong_drop != -1) {
378 t4_sge_set_conm_context(sc, nm_rxq->iq_cntxt_id, cong_drop,
382 t4_write_reg(sc, sc->sge_gts_reg,
383 V_INGRESSQID(nm_rxq->iq_cntxt_id) |
384 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
390 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
392 struct adapter *sc = vi->adapter;
395 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
396 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff);
398 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n",
399 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc);
400 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID;
405 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
409 struct adapter *sc = vi->adapter;
410 struct netmap_adapter *na = NA(vi->ifp);
411 struct fw_eq_eth_cmd c;
414 MPASS(nm_txq->desc != NULL);
416 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
417 bzero(nm_txq->desc, len);
419 bzero(&c, sizeof(c));
420 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
421 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
422 V_FW_EQ_ETH_CMD_VFN(0));
423 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
424 if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID)
425 c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC);
427 c.eqid_pkd = htobe32(V_FW_EQ_ETH_CMD_EQID(nm_txq->cntxt_id));
428 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
429 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
431 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
432 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
433 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
435 htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
436 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
437 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
438 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE));
439 c.eqaddr = htobe64(nm_txq->ba);
441 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
443 device_printf(vi->dev,
444 "failed to create netmap egress queue: %d\n", rc);
448 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
449 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start;
450 if (cntxt_id >= sc->sge.eqmap_sz)
451 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__,
452 cntxt_id, sc->sge.eqmap_sz - 1);
453 sc->sge.eqmap[cntxt_id] = (void *)nm_txq;
455 nm_txq->pidx = nm_txq->cidx = 0;
456 MPASS(nm_txq->sidx == na->num_tx_desc);
457 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0;
459 nm_txq->doorbells = sc->doorbells;
460 if (isset(&nm_txq->doorbells, DOORBELL_UDB) ||
461 isset(&nm_txq->doorbells, DOORBELL_UDBWC) ||
462 isset(&nm_txq->doorbells, DOORBELL_WCWR)) {
463 uint32_t s_qpp = sc->params.sge.eq_s_qpp;
464 uint32_t mask = (1 << s_qpp) - 1;
465 volatile uint8_t *udb;
467 udb = sc->udbs_base + UDBS_DB_OFFSET;
468 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT;
469 nm_txq->udb_qid = nm_txq->cntxt_id & mask;
470 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
471 clrbit(&nm_txq->doorbells, DOORBELL_WCWR);
473 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT;
476 nm_txq->udb = (volatile void *)udb;
479 if (sc->params.fw_vers < FW_VERSION32(1, 25, 1, 0)) {
482 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
483 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) |
484 V_FW_PARAMS_PARAM_YZ(nm_txq->cntxt_id);
486 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
488 device_printf(vi->dev,
489 "failed to bind netmap txq %d to class 0xff: %d\n",
490 nm_txq->cntxt_id, rc);
499 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
501 struct adapter *sc = vi->adapter;
504 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
506 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__,
507 nm_txq->cntxt_id, rc);
508 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
513 cxgbe_netmap_simple_rss(struct adapter *sc, struct vi_info *vi,
514 if_t ifp, struct netmap_adapter *na)
516 struct netmap_kring *kring;
517 struct sge_nm_rxq *nm_rxq;
518 int rc, i, j, nm_state, defq;
522 * Check if there's at least one active (or about to go active) netmap
526 for_each_nm_rxq(vi, j, nm_rxq) {
527 nm_state = atomic_load_int(&nm_rxq->nm_state);
528 kring = na->rx_rings[nm_rxq->nid];
529 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) ||
530 (nm_state == NM_OFF && nm_kring_pending_on(kring))) {
531 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID);
533 defq = nm_rxq->iq_abs_id;
540 /* No active netmap queues. Switch back to NIC queues. */
544 for (i = 0; i < vi->rss_size;) {
545 for_each_nm_rxq(vi, j, nm_rxq) {
546 nm_state = atomic_load_int(&nm_rxq->nm_state);
547 kring = na->rx_rings[nm_rxq->nid];
548 if ((nm_state != NM_OFF &&
549 !nm_kring_pending_off(kring)) ||
550 (nm_state == NM_OFF &&
551 nm_kring_pending_on(kring))) {
552 MPASS(nm_rxq->iq_cntxt_id !=
553 INVALID_NM_RXQ_CNTXT_ID);
554 vi->nm_rss[i++] = nm_rxq->iq_abs_id;
555 if (i == vi->rss_size)
563 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
566 if_printf(ifp, "netmap rss_config failed: %d\n", rc);
568 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0);
570 if_printf(ifp, "netmap defaultq config failed: %d\n", rc);
577 * Odd number of rx queues work best for split RSS mode as the first queue can
578 * be dedicated for non-RSS traffic and the rest divided into two equal halves.
581 cxgbe_netmap_split_rss(struct adapter *sc, struct vi_info *vi,
582 if_t ifp, struct netmap_adapter *na)
584 struct netmap_kring *kring;
585 struct sge_nm_rxq *nm_rxq;
586 int rc, i, j, nm_state, defq;
587 int nactive[2] = {0, 0};
588 int dq[2] = {-1, -1};
589 bool dq_norss; /* default queue should not be in RSS table. */
591 MPASS(nm_split_rss != 0);
592 MPASS(vi->nnmrxq > 1);
594 for_each_nm_rxq(vi, i, nm_rxq) {
595 j = i / ((vi->nnmrxq + 1) / 2);
596 nm_state = atomic_load_int(&nm_rxq->nm_state);
597 kring = na->rx_rings[nm_rxq->nid];
598 if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) ||
599 (nm_state == NM_OFF && nm_kring_pending_on(kring))) {
600 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID);
603 dq[j] = nm_rxq->iq_abs_id;
609 if (nactive[0] == 0 || nactive[1] == 0)
610 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na));
612 MPASS(dq[0] != -1 && dq[1] != -1);
613 if (nactive[0] > nactive[1]) {
616 } else if (nactive[0] < nactive[1]) {
625 nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq];
626 while (i < vi->rss_size / 2) {
627 for (j = 0; j < (vi->nnmrxq + 1) / 2; j++) {
628 nm_state = atomic_load_int(&nm_rxq[j].nm_state);
629 kring = na->rx_rings[nm_rxq[j].nid];
630 if ((nm_state == NM_OFF &&
631 !nm_kring_pending_on(kring)) ||
632 (nm_state == NM_ON &&
633 nm_kring_pending_off(kring))) {
636 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID);
637 if (dq_norss && defq == nm_rxq[j].iq_abs_id)
639 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id;
640 if (i == vi->rss_size / 2)
644 while (i < vi->rss_size) {
645 for (j = (vi->nnmrxq + 1) / 2; j < vi->nnmrxq; j++) {
646 nm_state = atomic_load_int(&nm_rxq[j].nm_state);
647 kring = na->rx_rings[nm_rxq[j].nid];
648 if ((nm_state == NM_OFF &&
649 !nm_kring_pending_on(kring)) ||
650 (nm_state == NM_ON &&
651 nm_kring_pending_off(kring))) {
654 MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID);
655 if (dq_norss && defq == nm_rxq[j].iq_abs_id)
657 vi->nm_rss[i++] = nm_rxq[j].iq_abs_id;
658 if (i == vi->rss_size)
663 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
664 vi->nm_rss, vi->rss_size);
666 if_printf(ifp, "netmap split_rss_config failed: %d\n", rc);
668 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0);
670 if_printf(ifp, "netmap defaultq config failed: %d\n", rc);
676 cxgbe_netmap_rss(struct adapter *sc, struct vi_info *vi, if_t ifp,
677 struct netmap_adapter *na)
680 if (nm_split_rss == 0 || vi->nnmrxq == 1)
681 return (cxgbe_netmap_simple_rss(sc, vi, ifp, na));
683 return (cxgbe_netmap_split_rss(sc, vi, ifp, na));
687 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, if_t ifp,
688 struct netmap_adapter *na)
690 struct netmap_slot *slot;
691 struct netmap_kring *kring;
692 struct sge_nm_rxq *nm_rxq;
693 struct sge_nm_txq *nm_txq;
695 struct rx_buf_info *rxb;
697 ASSERT_SYNCHRONIZED_OP(sc);
698 MPASS(vi->nnmrxq > 0);
699 MPASS(vi->nnmtxq > 0);
701 if ((vi->flags & VI_INIT_DONE) == 0 ||
702 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
703 if_printf(ifp, "cannot enable netmap operation because "
704 "interface is not UP.\n");
708 rxb = &sc->sge.rx_buf_info[0];
709 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
710 if (rxb->size1 == NETMAP_BUF_SIZE(na)) {
714 if (rxb->size2 == NETMAP_BUF_SIZE(na)) {
719 if (i >= SW_ZONE_SIZES) {
720 if_printf(ifp, "no hwidx for netmap buffer size %d.\n",
721 NETMAP_BUF_SIZE(na));
725 /* Must set caps before calling netmap_reset */
726 nm_set_native_flags(na);
728 for_each_nm_rxq(vi, i, nm_rxq) {
729 kring = na->rx_rings[nm_rxq->nid];
730 if (!nm_kring_pending_on(kring))
733 alloc_nm_rxq_hwq(vi, nm_rxq);
734 nm_rxq->fl_hwidx = hwidx;
735 slot = netmap_reset(na, NR_RX, i, 0);
736 MPASS(slot != NULL); /* XXXNM: error check, not assert */
738 /* We deal with 8 bufs at a time */
739 MPASS((na->num_rx_desc & 7) == 0);
740 MPASS(na->num_rx_desc == nm_rxq->fl_sidx);
741 for (j = 0; j < nm_rxq->fl_sidx; j++) {
744 PNMB(na, &slot[j], &ba);
746 nm_rxq->fl_desc[j] = htobe64(ba | hwidx);
748 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8;
750 j /= 8; /* driver pidx to hardware pidx */
752 t4_write_reg(sc, sc->sge_kdoorbell_reg,
753 nm_rxq->fl_db_val | V_PIDX(j));
755 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_OFF, NM_ON);
758 for_each_nm_txq(vi, i, nm_txq) {
759 kring = na->tx_rings[nm_txq->nid];
760 if (!nm_kring_pending_on(kring))
763 alloc_nm_txq_hwq(vi, nm_txq);
764 slot = netmap_reset(na, NR_TX, i, 0);
765 MPASS(slot != NULL); /* XXXNM: error check, not assert */
768 if (vi->nm_rss == NULL) {
769 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE,
773 return (cxgbe_netmap_rss(sc, vi, ifp, na));
777 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, if_t ifp,
778 struct netmap_adapter *na)
780 struct netmap_kring *kring;
781 int rc, i, nm_state, nactive;
782 struct sge_nm_txq *nm_txq;
783 struct sge_nm_rxq *nm_rxq;
785 ASSERT_SYNCHRONIZED_OP(sc);
786 MPASS(vi->nnmrxq > 0);
787 MPASS(vi->nnmtxq > 0);
789 if (!nm_netmap_on(na))
792 if ((vi->flags & VI_INIT_DONE) == 0)
795 /* First remove the queues that are stopping from the RSS table. */
796 rc = cxgbe_netmap_rss(sc, vi, ifp, na);
798 return (rc); /* error message logged already. */
800 for_each_nm_txq(vi, i, nm_txq) {
801 kring = na->tx_rings[nm_txq->nid];
802 if (!nm_kring_pending_off(kring))
804 MPASS(nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID);
806 rc = -t4_eth_eq_stop(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
808 device_printf(vi->dev,
809 "failed to stop nm_txq[%d]: %d.\n", i, rc);
813 /* XXX: netmap, not the driver, should do this. */
814 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
815 kring->rtail = kring->nr_hwtail = kring->nkr_num_slots - 1;
818 for_each_nm_rxq(vi, i, nm_rxq) {
819 nm_state = atomic_load_int(&nm_rxq->nm_state);
820 kring = na->rx_rings[nm_rxq->nid];
821 if (nm_state != NM_OFF && !nm_kring_pending_off(kring))
823 if (!nm_kring_pending_off(kring))
825 MPASS(nm_state != NM_OFF);
826 MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID);
828 rc = -t4_iq_stop(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
829 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff);
831 device_printf(vi->dev,
832 "failed to stop nm_rxq[%d]: %d.\n", i, rc);
836 while (!atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_OFF))
839 /* XXX: netmap, not the driver, should do this. */
840 kring->rhead = kring->rcur = kring->nr_hwcur = 0;
841 kring->rtail = kring->nr_hwtail = 0;
843 netmap_krings_mode_commit(na, 0);
845 nm_clear_native_flags(na);
851 cxgbe_netmap_reg(struct netmap_adapter *na, int on)
854 struct vi_info *vi = if_getsoftc(ifp);
855 struct adapter *sc = vi->adapter;
858 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg");
862 rc = cxgbe_netmap_on(sc, vi, ifp, na);
864 rc = cxgbe_netmap_off(sc, vi, ifp, na);
865 end_synchronized_op(sc, 0);
870 /* How many packets can a single type1 WR carry in n descriptors */
872 ndesc_to_npkt(const int n)
875 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC);
879 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC))
882 * Space (in descriptors) needed for a type1 WR (TX_PKTS or TX_PKTS2) that
886 npkt_to_ndesc(const int n)
889 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
891 return ((n + 2) / 2);
895 * Space (in 16B units) needed for a type1 WR (TX_PKTS or TX_PKTS2) that
899 npkt_to_len16(const int n)
902 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
907 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx)
910 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq)
913 u_int db = nm_txq->doorbells;
915 MPASS(nm_txq->pidx != nm_txq->dbidx);
917 n = NMIDXDIFF(nm_txq, dbidx);
919 clrbit(&db, DOORBELL_WCWR);
922 switch (ffs(db) - 1) {
924 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
927 case DOORBELL_WCWR: {
928 volatile uint64_t *dst, *src;
931 * Queues whose 128B doorbell segment fits in the page do not
932 * use relative qid (udb_qid is always 0). Only queues with
933 * doorbell segments can do WCWR.
935 KASSERT(nm_txq->udb_qid == 0 && n == 1,
936 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p",
937 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq));
939 dst = (volatile void *)((uintptr_t)nm_txq->udb +
940 UDBS_WR_OFFSET - UDBS_DB_OFFSET);
941 src = (void *)&nm_txq->desc[nm_txq->dbidx];
942 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1])
949 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
954 t4_write_reg(sc, sc->sge_kdoorbell_reg,
955 V_QID(nm_txq->cntxt_id) | V_PIDX(n));
958 nm_txq->dbidx = nm_txq->pidx;
962 * Write work requests to send 'npkt' frames and ring the doorbell to send them
963 * on their way. No need to check for wraparound.
966 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq,
967 struct netmap_kring *kring, int npkt, int npkt_remaining)
969 struct netmap_ring *ring = kring->ring;
970 struct netmap_slot *slot;
971 const u_int lim = kring->nkr_num_slots - 1;
972 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx];
975 struct cpl_tx_pkt_core *cpl;
976 struct ulptx_sgl *usgl;
980 n = min(npkt, MAX_NPKT_IN_TYPE1_WR);
983 wr = (void *)&nm_txq->desc[nm_txq->pidx];
984 wr->op_pkd = nm_txq->op_pkd;
985 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n)));
989 cpl = (void *)(wr + 1);
991 for (i = 0; i < n; i++) {
992 slot = &ring->slot[kring->nr_hwcur];
993 PNMB(kring->na, slot, &ba);
996 cpl->ctrl0 = nm_txq->cpl_ctrl0;
998 cpl->len = htobe16(slot->len);
999 cpl->ctrl1 = nm_txcsum ? 0 :
1000 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS);
1002 usgl = (void *)(cpl + 1);
1003 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1005 usgl->len0 = htobe32(slot->len);
1006 usgl->addr0 = htobe64(ba);
1008 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
1009 cpl = (void *)(usgl + 1);
1010 MPASS(slot->len + len <= UINT16_MAX);
1012 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim);
1014 wr->plen = htobe16(len);
1017 nm_txq->pidx += npkt_to_ndesc(n);
1018 MPASS(nm_txq->pidx <= nm_txq->sidx);
1019 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) {
1021 * This routine doesn't know how to write WRs that wrap
1022 * around. Make sure it wasn't asked to.
1028 if (npkt == 0 && npkt_remaining == 0) {
1030 if (lazy_tx_credit_flush == 0) {
1031 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
1033 nm_txq->equeqidx = nm_txq->pidx;
1034 nm_txq->equiqidx = nm_txq->pidx;
1036 ring_nm_txq_db(sc, nm_txq);
1040 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
1041 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
1043 nm_txq->equeqidx = nm_txq->pidx;
1044 nm_txq->equiqidx = nm_txq->pidx;
1045 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
1046 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
1047 nm_txq->equeqidx = nm_txq->pidx;
1049 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC)
1050 ring_nm_txq_db(sc, nm_txq);
1053 /* Will get called again. */
1054 MPASS(npkt_remaining);
1057 /* How many contiguous free descriptors starting at pidx */
1059 contiguous_ndesc_available(struct sge_nm_txq *nm_txq)
1062 if (nm_txq->cidx > nm_txq->pidx)
1063 return (nm_txq->cidx - nm_txq->pidx - 1);
1064 else if (nm_txq->cidx > 0)
1065 return (nm_txq->sidx - nm_txq->pidx);
1067 return (nm_txq->sidx - nm_txq->pidx - 1);
1071 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq)
1073 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
1074 uint16_t hw_cidx = spg->cidx; /* snapshot */
1075 struct fw_eth_tx_pkts_wr *wr;
1078 hw_cidx = be16toh(hw_cidx);
1080 while (nm_txq->cidx != hw_cidx) {
1081 wr = (void *)&nm_txq->desc[nm_txq->cidx];
1083 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)) ||
1084 wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)));
1085 MPASS(wr->type == 1);
1086 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR);
1089 nm_txq->cidx += npkt_to_ndesc(wr->npkt);
1092 * We never sent a WR that wrapped around so the credits coming
1093 * back, WR by WR, should never cause the cidx to wrap around
1096 MPASS(nm_txq->cidx <= nm_txq->sidx);
1097 if (__predict_false(nm_txq->cidx == nm_txq->sidx))
1105 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags)
1107 struct netmap_adapter *na = kring->na;
1109 struct vi_info *vi = if_getsoftc(ifp);
1110 struct adapter *sc = vi->adapter;
1111 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id];
1112 const u_int head = kring->rhead;
1113 u_int reclaimed = 0;
1114 int n, d, npkt_remaining, ndesc_remaining;
1117 * Tx was at kring->nr_hwcur last time around and now we need to advance
1118 * to kring->rhead. Note that the driver's pidx moves independent of
1119 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation
1120 * between descriptors and frames isn't 1:1).
1123 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
1124 kring->nkr_num_slots - kring->nr_hwcur + head;
1125 while (npkt_remaining) {
1126 reclaimed += reclaim_nm_tx_desc(nm_txq);
1127 ndesc_remaining = contiguous_ndesc_available(nm_txq);
1128 /* Can't run out of descriptors with packets still remaining */
1129 MPASS(ndesc_remaining > 0);
1131 /* # of desc needed to tx all remaining packets */
1132 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC;
1133 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR)
1134 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR);
1136 if (d <= ndesc_remaining)
1139 /* Can't send all, calculate how many can be sent */
1140 n = (ndesc_remaining / SGE_MAX_WR_NDESC) *
1141 MAX_NPKT_IN_TYPE1_WR;
1142 if (ndesc_remaining % SGE_MAX_WR_NDESC)
1143 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC);
1146 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */
1147 npkt_remaining -= n;
1148 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining);
1150 MPASS(npkt_remaining == 0);
1151 MPASS(kring->nr_hwcur == head);
1152 MPASS(nm_txq->dbidx == nm_txq->pidx);
1155 * Second part: reclaim buffers for completed transmissions.
1157 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
1158 reclaimed += reclaim_nm_tx_desc(nm_txq);
1159 kring->nr_hwtail += reclaimed;
1160 if (kring->nr_hwtail >= kring->nkr_num_slots)
1161 kring->nr_hwtail -= kring->nkr_num_slots;
1168 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
1170 struct netmap_adapter *na = kring->na;
1171 struct netmap_ring *ring = kring->ring;
1173 struct vi_info *vi = if_getsoftc(ifp);
1174 struct adapter *sc = vi->adapter;
1175 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id];
1176 u_int const head = kring->rhead;
1178 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
1181 return (0); /* No updates ever. */
1183 if (netmap_no_pendintr || force_update) {
1184 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx);
1185 kring->nr_kflags &= ~NKR_PENDINTR;
1188 if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) {
1190 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1191 nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved));
1192 nm_rxq->fl_db_saved = 0;
1195 /* Userspace done with buffers from kring->nr_hwcur to head */
1196 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
1197 kring->nkr_num_slots - kring->nr_hwcur + head;
1200 u_int fl_pidx = nm_rxq->fl_pidx;
1201 struct netmap_slot *slot = &ring->slot[fl_pidx];
1203 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx;
1206 * We always deal with 8 buffers at a time. We must have
1207 * stopped at an 8B boundary (fl_pidx) last time around and we
1208 * must have a multiple of 8B buffers to give to the freelist.
1210 MPASS((fl_pidx & 7) == 0);
1211 MPASS((n & 7) == 0);
1213 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots);
1214 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx2);
1217 for (i = 0; i < 8; i++, fl_pidx++, slot++) {
1218 PNMB(na, slot, &ba);
1220 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx);
1221 slot->flags &= ~NS_BUF_CHANGED;
1222 MPASS(fl_pidx <= nm_rxq->fl_sidx2);
1225 if (fl_pidx == nm_rxq->fl_sidx2) {
1227 slot = &ring->slot[0];
1229 if (++dbinc == nm_rxq->fl_db_threshold) {
1232 nm_rxq->fl_db_saved += dbinc;
1234 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1235 nm_rxq->fl_db_val | V_PIDX(dbinc));
1240 MPASS(nm_rxq->fl_pidx == fl_pidx);
1245 nm_rxq->fl_db_saved += dbinc;
1247 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1248 nm_rxq->fl_db_val | V_PIDX(dbinc));
1257 cxgbe_nm_attach(struct vi_info *vi)
1259 struct port_info *pi;
1261 struct netmap_adapter na;
1263 MPASS(vi->nnmrxq > 0);
1264 MPASS(vi->ifp != NULL);
1269 bzero(&na, sizeof(na));
1272 na.na_flags = NAF_BDG_MAYSLEEP;
1274 /* Netmap doesn't know about the space reserved for the status page. */
1275 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE;
1278 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So
1279 * num_rx_desc is based on the number of buffers that can be held in the
1280 * freelist, and not the number of entries in the iq. (These two are
1281 * not exactly the same due to the space taken up by the status page).
1283 na.num_rx_desc = rounddown(vi->qsize_rxq, 8);
1284 na.nm_txsync = cxgbe_netmap_txsync;
1285 na.nm_rxsync = cxgbe_netmap_rxsync;
1286 na.nm_register = cxgbe_netmap_reg;
1287 na.num_tx_rings = vi->nnmtxq;
1288 na.num_rx_rings = vi->nnmrxq;
1289 na.rx_buf_maxsize = MAX_MTU;
1290 netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */
1294 cxgbe_nm_detach(struct vi_info *vi)
1297 MPASS(vi->nnmrxq > 0);
1298 MPASS(vi->ifp != NULL);
1300 netmap_detach(vi->ifp);
1303 static inline const void *
1304 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl)
1307 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL);
1309 /* data[0] is RSS header */
1310 return (&cpl->data[1]);
1314 handle_nm_sge_egr_update(struct adapter *sc, if_t ifp,
1315 const struct cpl_sge_egr_update *egr)
1318 struct sge_nm_txq *nm_txq;
1320 oq = be32toh(egr->opcode_qid);
1321 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE);
1322 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start];
1324 netmap_tx_irq(ifp, nm_txq->nid);
1328 service_nm_rxq(struct sge_nm_rxq *nm_rxq)
1330 struct vi_info *vi = nm_rxq->vi;
1331 struct adapter *sc = vi->adapter;
1333 struct netmap_adapter *na = NA(ifp);
1334 struct netmap_kring *kring = na->rx_rings[nm_rxq->nid];
1335 struct netmap_ring *ring = kring->ring;
1336 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx];
1341 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx);
1342 u_int fl_credits = fl_cidx & 7;
1343 u_int ndesc = 0; /* desc processed since last cidx update */
1344 u_int nframes = 0; /* frames processed since last netmap wakeup */
1346 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) {
1350 lq = be32toh(d->rsp.pldbuflen_qid);
1351 opcode = d->rss.opcode;
1354 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) {
1355 case X_RSPD_TYPE_FLBUF:
1359 case X_RSPD_TYPE_CPL:
1360 MPASS(opcode < NUM_CPL_CMDS);
1365 cpl = unwrap_nm_fw6_msg(cpl);
1367 case CPL_SGE_EGR_UPDATE:
1368 handle_nm_sge_egr_update(sc, ifp, cpl);
1371 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) -
1372 sc->params.sge.fl_pktshift;
1373 ring->slot[fl_cidx].flags = 0;
1375 if (!(lq & F_RSPD_NEWBUF)) {
1376 MPASS(black_hole == 2);
1380 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx))
1384 panic("%s: unexpected opcode 0x%x on nm_rxq %p",
1385 __func__, opcode, nm_rxq);
1389 case X_RSPD_TYPE_INTR:
1390 /* Not equipped to handle forwarded interrupts. */
1391 panic("%s: netmap queue received interrupt for iq %u\n",
1395 panic("%s: illegal response type %d on nm_rxq %p",
1396 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq);
1400 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) {
1401 nm_rxq->iq_cidx = 0;
1402 d = &nm_rxq->iq_desc[0];
1403 nm_rxq->iq_gen ^= F_RSPD_GEN;
1406 if (__predict_false(++nframes == rx_nframes) && !black_hole) {
1407 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1408 netmap_rx_irq(ifp, nm_rxq->nid, &work);
1412 if (__predict_false(++ndesc == rx_ndesc)) {
1413 if (black_hole && fl_credits >= 8) {
1415 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8,
1417 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1418 nm_rxq->fl_db_val | V_PIDX(fl_credits));
1419 fl_credits = fl_cidx & 7;
1421 t4_write_reg(sc, sc->sge_gts_reg,
1423 V_INGRESSQID(nm_rxq->iq_cntxt_id) |
1424 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1429 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1432 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx);
1433 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1434 nm_rxq->fl_db_val | V_PIDX(fl_credits));
1435 } else if (nframes > 0)
1436 netmap_rx_irq(ifp, nm_rxq->nid, &work);
1438 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) |
1439 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) |
1440 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));