]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/t4_netmap.c
MFC 341098: Add read-only sysctls for all tunables in the cxgbe(4) driver.
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / t4_netmap.c
1 /*-
2  * Copyright (c) 2014 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #ifdef DEV_NETMAP
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/eventhandler.h>
38 #include <sys/lock.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/selinfo.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <machine/bus.h>
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/if_media.h>
48 #include <net/if_var.h>
49 #include <net/if_clone.h>
50 #include <net/if_types.h>
51 #include <net/netmap.h>
52 #include <dev/netmap/netmap_kern.h>
53
54 #include "common/common.h"
55 #include "common/t4_regs.h"
56 #include "common/t4_regs_values.h"
57
58 extern int fl_pad;      /* XXXNM */
59
60 /*
61  * 0 = normal netmap rx
62  * 1 = black hole
63  * 2 = supermassive black hole (buffer packing enabled)
64  */
65 int black_hole = 0;
66 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0,
67     "Sink incoming packets.");
68
69 int rx_ndesc = 256;
70 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN,
71     &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated.");
72
73 int rx_nframes = 64;
74 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN,
75     &rx_nframes, 0, "max # of frames received before waking up netmap rx.");
76
77 int holdoff_tmr_idx = 2;
78 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN,
79     &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues.");
80
81 /*
82  * Congestion drops.
83  * -1: no congestion feedback (not recommended).
84  *  0: backpressure the channel instead of dropping packets right away.
85  *  1: no backpressure, drop packets for the congested queue immediately.
86  */
87 static int nm_cong_drop = 1;
88 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_cong_drop, CTLFLAG_RDTUN,
89     &nm_cong_drop, 0,
90     "Congestion control for netmap rx queues (0 = backpressure, 1 = drop");
91
92 int starve_fl = 0;
93 SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN,
94     &starve_fl, 0, "Don't ring fl db for netmap rx queues.");
95
96 /*
97  * Try to process tx credits in bulk.  This may cause a delay in the return of
98  * tx credits and is suitable for bursty or non-stop tx only.
99  */
100 int lazy_tx_credit_flush = 1;
101 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lazy_tx_credit_flush, CTLFLAG_RWTUN,
102     &lazy_tx_credit_flush, 0, "lazy credit flush for netmap tx queues.");
103
104 static int
105 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
106 {
107         int rc, cntxt_id, i;
108         __be32 v;
109         struct adapter *sc = vi->pi->adapter;
110         struct sge_params *sp = &sc->params.sge;
111         struct netmap_adapter *na = NA(vi->ifp);
112         struct fw_iq_cmd c;
113
114         MPASS(na != NULL);
115         MPASS(nm_rxq->iq_desc != NULL);
116         MPASS(nm_rxq->fl_desc != NULL);
117
118         bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE);
119         bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len);
120
121         bzero(&c, sizeof(c));
122         c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
123             F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
124             V_FW_IQ_CMD_VFN(0));
125         c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
126             FW_LEN16(c));
127         MPASS(!forwarding_intr_to_fwq(sc));
128         KASSERT(nm_rxq->intr_idx < sc->intr_count,
129             ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx));
130         v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
131         c.type_to_iqandstindex = htobe32(v |
132             V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
133             V_FW_IQ_CMD_VIID(vi->viid) |
134             V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
135         c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) |
136             F_FW_IQ_CMD_IQGTSMODE |
137             V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
138             V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
139         c.iqsize = htobe16(vi->qsize_rxq);
140         c.iqaddr = htobe64(nm_rxq->iq_ba);
141         if (cong >= 0) {
142                 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
143                     V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF |
144                     F_FW_IQ_CMD_FL0CONGEN);
145         }
146         c.iqns_to_fl0congen |=
147             htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
148                 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
149                 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
150                 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0));
151         c.fl0dcaen_to_fl0cidxfthresh =
152             htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ?
153                 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) |
154                 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ?
155                 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
156         c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE);
157         c.fl0addr = htobe64(nm_rxq->fl_ba);
158
159         rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
160         if (rc != 0) {
161                 device_printf(sc->dev,
162                     "failed to create netmap ingress queue: %d\n", rc);
163                 return (rc);
164         }
165
166         nm_rxq->iq_cidx = 0;
167         MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE);
168         nm_rxq->iq_gen = F_RSPD_GEN;
169         nm_rxq->iq_cntxt_id = be16toh(c.iqid);
170         nm_rxq->iq_abs_id = be16toh(c.physiqid);
171         cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start;
172         if (cntxt_id >= sc->sge.niq) {
173                 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)",
174                     __func__, cntxt_id, sc->sge.niq - 1);
175         }
176         sc->sge.iqmap[cntxt_id] = (void *)nm_rxq;
177
178         nm_rxq->fl_cntxt_id = be16toh(c.fl0id);
179         nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
180         MPASS(nm_rxq->fl_sidx == na->num_rx_desc);
181         cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start;
182         if (cntxt_id >= sc->sge.neq) {
183                 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)",
184                     __func__, cntxt_id, sc->sge.neq - 1);
185         }
186         sc->sge.eqmap[cntxt_id] = (void *)nm_rxq;
187
188         nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) |
189             sc->chip_params->sge_fl_db;
190
191         if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) {
192                 uint32_t param, val;
193
194                 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
195                     V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
196                     V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
197                 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
198                     V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
199                     V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
200                 if (cong == 0)
201                         val = 1 << 19;
202                 else {
203                         val = 2 << 19;
204                         for (i = 0; i < 4; i++) {
205                                 if (cong & (1 << i))
206                                         val |= 1 << (i << 2);
207                         }
208                 }
209
210                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
211                 if (rc != 0) {
212                         /* report error but carry on */
213                         device_printf(sc->dev,
214                             "failed to set congestion manager context for "
215                             "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc);
216                 }
217         }
218
219         t4_write_reg(sc, sc->sge_gts_reg,
220             V_INGRESSQID(nm_rxq->iq_cntxt_id) |
221             V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
222
223         return (rc);
224 }
225
226 static int
227 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
228 {
229         struct adapter *sc = vi->pi->adapter;
230         int rc;
231
232         rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
233             nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff);
234         if (rc != 0)
235                 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n",
236                     __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc);
237         nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID;
238         return (rc);
239 }
240
241 static int
242 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
243 {
244         int rc, cntxt_id;
245         size_t len;
246         struct adapter *sc = vi->pi->adapter;
247         struct netmap_adapter *na = NA(vi->ifp);
248         struct fw_eq_eth_cmd c;
249
250         MPASS(na != NULL);
251         MPASS(nm_txq->desc != NULL);
252
253         len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
254         bzero(nm_txq->desc, len);
255
256         bzero(&c, sizeof(c));
257         c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
258             F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
259             V_FW_EQ_ETH_CMD_VFN(0));
260         c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
261             F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
262         c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
263             F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
264         c.fetchszm_to_iqid =
265             htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
266                 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
267                 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
268         c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
269                       V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
270                       V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE));
271         c.eqaddr = htobe64(nm_txq->ba);
272
273         rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
274         if (rc != 0) {
275                 device_printf(vi->dev,
276                     "failed to create netmap egress queue: %d\n", rc);
277                 return (rc);
278         }
279
280         nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
281         cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start;
282         if (cntxt_id >= sc->sge.neq)
283             panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__,
284                 cntxt_id, sc->sge.neq - 1);
285         sc->sge.eqmap[cntxt_id] = (void *)nm_txq;
286
287         nm_txq->pidx = nm_txq->cidx = 0;
288         MPASS(nm_txq->sidx == na->num_tx_desc);
289         nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0;
290
291         nm_txq->doorbells = sc->doorbells;
292         if (isset(&nm_txq->doorbells, DOORBELL_UDB) ||
293             isset(&nm_txq->doorbells, DOORBELL_UDBWC) ||
294             isset(&nm_txq->doorbells, DOORBELL_WCWR)) {
295                 uint32_t s_qpp = sc->params.sge.eq_s_qpp;
296                 uint32_t mask = (1 << s_qpp) - 1;
297                 volatile uint8_t *udb;
298
299                 udb = sc->udbs_base + UDBS_DB_OFFSET;
300                 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT;
301                 nm_txq->udb_qid = nm_txq->cntxt_id & mask;
302                 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
303                         clrbit(&nm_txq->doorbells, DOORBELL_WCWR);
304                 else {
305                         udb += nm_txq->udb_qid << UDBS_SEG_SHIFT;
306                         nm_txq->udb_qid = 0;
307                 }
308                 nm_txq->udb = (volatile void *)udb;
309         }
310
311         return (rc);
312 }
313
314 static int
315 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
316 {
317         struct adapter *sc = vi->pi->adapter;
318         int rc;
319
320         rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
321         if (rc != 0)
322                 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__,
323                     nm_txq->cntxt_id, rc);
324         nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID;
325         return (rc);
326 }
327
328 static int
329 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
330     struct netmap_adapter *na)
331 {
332         struct netmap_slot *slot;
333         struct netmap_kring *kring;
334         struct sge_nm_rxq *nm_rxq;
335         struct sge_nm_txq *nm_txq;
336         int rc, i, j, hwidx;
337         struct hw_buf_info *hwb;
338
339         ASSERT_SYNCHRONIZED_OP(sc);
340
341         if ((vi->flags & VI_INIT_DONE) == 0 ||
342             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
343                 return (EAGAIN);
344
345         hwb = &sc->sge.hw_buf_info[0];
346         for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) {
347                 if (hwb->size == NETMAP_BUF_SIZE(na))
348                         break;
349         }
350         if (i >= SGE_FLBUF_SIZES) {
351                 if_printf(ifp, "no hwidx for netmap buffer size %d.\n",
352                     NETMAP_BUF_SIZE(na));
353                 return (ENXIO);
354         }
355         hwidx = i;
356
357         /* Must set caps before calling netmap_reset */
358         nm_set_native_flags(na);
359
360         for_each_nm_rxq(vi, i, nm_rxq) {
361                 kring = na->rx_rings[nm_rxq->nid];
362                 if (!nm_kring_pending_on(kring) ||
363                     nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID)
364                         continue;
365
366                 alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop));
367                 nm_rxq->fl_hwidx = hwidx;
368                 slot = netmap_reset(na, NR_RX, i, 0);
369                 MPASS(slot != NULL);    /* XXXNM: error check, not assert */
370
371                 /* We deal with 8 bufs at a time */
372                 MPASS((na->num_rx_desc & 7) == 0);
373                 MPASS(na->num_rx_desc == nm_rxq->fl_sidx);
374                 for (j = 0; j < nm_rxq->fl_sidx; j++) {
375                         uint64_t ba;
376
377                         PNMB(na, &slot[j], &ba);
378                         MPASS(ba != 0);
379                         nm_rxq->fl_desc[j] = htobe64(ba | hwidx);
380                 }
381                 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8;
382                 MPASS((j & 7) == 0);
383                 j /= 8; /* driver pidx to hardware pidx */
384                 wmb();
385                 t4_write_reg(sc, sc->sge_kdoorbell_reg,
386                     nm_rxq->fl_db_val | V_PIDX(j));
387
388                 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_OFF, NM_ON);
389         }
390
391         for_each_nm_txq(vi, i, nm_txq) {
392                 kring = na->tx_rings[nm_txq->nid];
393                 if (!nm_kring_pending_on(kring) ||
394                     nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID)
395                         continue;
396
397                 alloc_nm_txq_hwq(vi, nm_txq);
398                 slot = netmap_reset(na, NR_TX, i, 0);
399                 MPASS(slot != NULL);    /* XXXNM: error check, not assert */
400         }
401
402         if (vi->nm_rss == NULL) {
403                 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE,
404                     M_ZERO | M_WAITOK);
405         }
406         for (i = 0; i < vi->rss_size;) {
407                 for_each_nm_rxq(vi, j, nm_rxq) {
408                         vi->nm_rss[i++] = nm_rxq->iq_abs_id;
409                         if (i == vi->rss_size)
410                                 break;
411                 }
412         }
413         rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
414             vi->nm_rss, vi->rss_size);
415         if (rc != 0)
416                 if_printf(ifp, "netmap rss_config failed: %d\n", rc);
417
418         return (rc);
419 }
420
421 static int
422 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
423     struct netmap_adapter *na)
424 {
425         struct netmap_kring *kring;
426         int rc, i;
427         struct sge_nm_txq *nm_txq;
428         struct sge_nm_rxq *nm_rxq;
429
430         ASSERT_SYNCHRONIZED_OP(sc);
431
432         if ((vi->flags & VI_INIT_DONE) == 0)
433                 return (0);
434
435         rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
436             vi->rss, vi->rss_size);
437         if (rc != 0)
438                 if_printf(ifp, "failed to restore RSS config: %d\n", rc);
439         nm_clear_native_flags(na);
440
441         for_each_nm_txq(vi, i, nm_txq) {
442                 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
443
444                 kring = na->tx_rings[nm_txq->nid];
445                 if (!nm_kring_pending_off(kring) ||
446                     nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID)
447                         continue;
448
449                 /* Wait for hw pidx to catch up ... */
450                 while (be16toh(nm_txq->pidx) != spg->pidx)
451                         pause("nmpidx", 1);
452
453                 /* ... and then for the cidx. */
454                 while (spg->pidx != spg->cidx)
455                         pause("nmcidx", 1);
456
457                 free_nm_txq_hwq(vi, nm_txq);
458         }
459         for_each_nm_rxq(vi, i, nm_rxq) {
460                 kring = na->rx_rings[nm_rxq->nid];
461                 if (!nm_kring_pending_off(kring) ||
462                     nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID)
463                         continue;
464
465                 while (!atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_OFF))
466                         pause("nmst", 1);
467
468                 free_nm_rxq_hwq(vi, nm_rxq);
469         }
470
471         return (rc);
472 }
473
474 static int
475 cxgbe_netmap_reg(struct netmap_adapter *na, int on)
476 {
477         struct ifnet *ifp = na->ifp;
478         struct vi_info *vi = ifp->if_softc;
479         struct adapter *sc = vi->pi->adapter;
480         int rc;
481
482         rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg");
483         if (rc != 0)
484                 return (rc);
485         if (on)
486                 rc = cxgbe_netmap_on(sc, vi, ifp, na);
487         else
488                 rc = cxgbe_netmap_off(sc, vi, ifp, na);
489         end_synchronized_op(sc, 0);
490
491         return (rc);
492 }
493
494 /* How many packets can a single type1 WR carry in n descriptors */
495 static inline int
496 ndesc_to_npkt(const int n)
497 {
498
499         MPASS(n > 0 && n <= SGE_MAX_WR_NDESC);
500
501         return (n * 2 - 1);
502 }
503 #define MAX_NPKT_IN_TYPE1_WR    (ndesc_to_npkt(SGE_MAX_WR_NDESC))
504
505 /* Space (in descriptors) needed for a type1 WR that carries n packets */
506 static inline int
507 npkt_to_ndesc(const int n)
508 {
509
510         MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
511
512         return ((n + 2) / 2);
513 }
514
515 /* Space (in 16B units) needed for a type1 WR that carries n packets */
516 static inline int
517 npkt_to_len16(const int n)
518 {
519
520         MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
521
522         return (n * 2 + 1);
523 }
524
525 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx)
526
527 static void
528 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq)
529 {
530         int n;
531         u_int db = nm_txq->doorbells;
532
533         MPASS(nm_txq->pidx != nm_txq->dbidx);
534
535         n = NMIDXDIFF(nm_txq, dbidx);
536         if (n > 1)
537                 clrbit(&db, DOORBELL_WCWR);
538         wmb();
539
540         switch (ffs(db) - 1) {
541         case DOORBELL_UDB:
542                 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
543                 break;
544
545         case DOORBELL_WCWR: {
546                 volatile uint64_t *dst, *src;
547
548                 /*
549                  * Queues whose 128B doorbell segment fits in the page do not
550                  * use relative qid (udb_qid is always 0).  Only queues with
551                  * doorbell segments can do WCWR.
552                  */
553                 KASSERT(nm_txq->udb_qid == 0 && n == 1,
554                     ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p",
555                     __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq));
556
557                 dst = (volatile void *)((uintptr_t)nm_txq->udb +
558                     UDBS_WR_OFFSET - UDBS_DB_OFFSET);
559                 src = (void *)&nm_txq->desc[nm_txq->dbidx];
560                 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1])
561                         *dst++ = *src++;
562                 wmb();
563                 break;
564         }
565
566         case DOORBELL_UDBWC:
567                 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
568                 wmb();
569                 break;
570
571         case DOORBELL_KDB:
572                 t4_write_reg(sc, sc->sge_kdoorbell_reg,
573                     V_QID(nm_txq->cntxt_id) | V_PIDX(n));
574                 break;
575         }
576         nm_txq->dbidx = nm_txq->pidx;
577 }
578
579 /*
580  * Write work requests to send 'npkt' frames and ring the doorbell to send them
581  * on their way.  No need to check for wraparound.
582  */
583 static void
584 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq,
585     struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum)
586 {
587         struct netmap_ring *ring = kring->ring;
588         struct netmap_slot *slot;
589         const u_int lim = kring->nkr_num_slots - 1;
590         struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx];
591         uint16_t len;
592         uint64_t ba;
593         struct cpl_tx_pkt_core *cpl;
594         struct ulptx_sgl *usgl;
595         int i, n;
596
597         while (npkt) {
598                 n = min(npkt, MAX_NPKT_IN_TYPE1_WR);
599                 len = 0;
600
601                 wr = (void *)&nm_txq->desc[nm_txq->pidx];
602                 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
603                 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n)));
604                 wr->npkt = n;
605                 wr->r3 = 0;
606                 wr->type = 1;
607                 cpl = (void *)(wr + 1);
608
609                 for (i = 0; i < n; i++) {
610                         slot = &ring->slot[kring->nr_hwcur];
611                         PNMB(kring->na, slot, &ba);
612                         MPASS(ba != 0);
613
614                         cpl->ctrl0 = nm_txq->cpl_ctrl0;
615                         cpl->pack = 0;
616                         cpl->len = htobe16(slot->len);
617                         /*
618                          * netmap(4) says "netmap does not use features such as
619                          * checksum offloading, TCP segmentation offloading,
620                          * encryption, VLAN encapsulation/decapsulation, etc."
621                          *
622                          * So the ncxl interfaces have tx hardware checksumming
623                          * disabled by default.  But you can override netmap by
624                          * enabling IFCAP_TXCSUM on the interface manully.
625                          */
626                         cpl->ctrl1 = txcsum ? 0 :
627                             htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS);
628
629                         usgl = (void *)(cpl + 1);
630                         usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
631                             V_ULPTX_NSGE(1));
632                         usgl->len0 = htobe32(slot->len);
633                         usgl->addr0 = htobe64(ba);
634
635                         slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
636                         cpl = (void *)(usgl + 1);
637                         MPASS(slot->len + len <= UINT16_MAX);
638                         len += slot->len;
639                         kring->nr_hwcur = nm_next(kring->nr_hwcur, lim);
640                 }
641                 wr->plen = htobe16(len);
642
643                 npkt -= n;
644                 nm_txq->pidx += npkt_to_ndesc(n);
645                 MPASS(nm_txq->pidx <= nm_txq->sidx);
646                 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) {
647                         /*
648                          * This routine doesn't know how to write WRs that wrap
649                          * around.  Make sure it wasn't asked to.
650                          */
651                         MPASS(npkt == 0);
652                         nm_txq->pidx = 0;
653                 }
654
655                 if (npkt == 0 && npkt_remaining == 0) {
656                         /* All done. */
657                         if (lazy_tx_credit_flush == 0) {
658                                 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
659                                     F_FW_WR_EQUIQ);
660                                 nm_txq->equeqidx = nm_txq->pidx;
661                                 nm_txq->equiqidx = nm_txq->pidx;
662                         }
663                         ring_nm_txq_db(sc, nm_txq);
664                         return;
665                 }
666
667                 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
668                         wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
669                             F_FW_WR_EQUIQ);
670                         nm_txq->equeqidx = nm_txq->pidx;
671                         nm_txq->equiqidx = nm_txq->pidx;
672                 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
673                         wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
674                         nm_txq->equeqidx = nm_txq->pidx;
675                 }
676                 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC)
677                         ring_nm_txq_db(sc, nm_txq);
678         }
679
680         /* Will get called again. */
681         MPASS(npkt_remaining);
682 }
683
684 /* How many contiguous free descriptors starting at pidx */
685 static inline int
686 contiguous_ndesc_available(struct sge_nm_txq *nm_txq)
687 {
688
689         if (nm_txq->cidx > nm_txq->pidx)
690                 return (nm_txq->cidx - nm_txq->pidx - 1);
691         else if (nm_txq->cidx > 0)
692                 return (nm_txq->sidx - nm_txq->pidx);
693         else
694                 return (nm_txq->sidx - nm_txq->pidx - 1);
695 }
696
697 static int
698 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq)
699 {
700         struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
701         uint16_t hw_cidx = spg->cidx;   /* snapshot */
702         struct fw_eth_tx_pkts_wr *wr;
703         int n = 0;
704
705         hw_cidx = be16toh(hw_cidx);
706
707         while (nm_txq->cidx != hw_cidx) {
708                 wr = (void *)&nm_txq->desc[nm_txq->cidx];
709
710                 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)));
711                 MPASS(wr->type == 1);
712                 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR);
713
714                 n += wr->npkt;
715                 nm_txq->cidx += npkt_to_ndesc(wr->npkt);
716
717                 /*
718                  * We never sent a WR that wrapped around so the credits coming
719                  * back, WR by WR, should never cause the cidx to wrap around
720                  * either.
721                  */
722                 MPASS(nm_txq->cidx <= nm_txq->sidx);
723                 if (__predict_false(nm_txq->cidx == nm_txq->sidx))
724                         nm_txq->cidx = 0;
725         }
726
727         return (n);
728 }
729
730 static int
731 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags)
732 {
733         struct netmap_adapter *na = kring->na;
734         struct ifnet *ifp = na->ifp;
735         struct vi_info *vi = ifp->if_softc;
736         struct adapter *sc = vi->pi->adapter;
737         struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id];
738         const u_int head = kring->rhead;
739         u_int reclaimed = 0;
740         int n, d, npkt_remaining, ndesc_remaining, txcsum;
741
742         /*
743          * Tx was at kring->nr_hwcur last time around and now we need to advance
744          * to kring->rhead.  Note that the driver's pidx moves independent of
745          * netmap's kring->nr_hwcur (pidx counts descriptors and the relation
746          * between descriptors and frames isn't 1:1).
747          */
748
749         npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
750             kring->nkr_num_slots - kring->nr_hwcur + head;
751         txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
752         while (npkt_remaining) {
753                 reclaimed += reclaim_nm_tx_desc(nm_txq);
754                 ndesc_remaining = contiguous_ndesc_available(nm_txq);
755                 /* Can't run out of descriptors with packets still remaining */
756                 MPASS(ndesc_remaining > 0);
757
758                 /* # of desc needed to tx all remaining packets */
759                 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC;
760                 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR)
761                         d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR);
762
763                 if (d <= ndesc_remaining)
764                         n = npkt_remaining;
765                 else {
766                         /* Can't send all, calculate how many can be sent */
767                         n = (ndesc_remaining / SGE_MAX_WR_NDESC) *
768                             MAX_NPKT_IN_TYPE1_WR;
769                         if (ndesc_remaining % SGE_MAX_WR_NDESC)
770                                 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC);
771                 }
772
773                 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */
774                 npkt_remaining -= n;
775                 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum);
776         }
777         MPASS(npkt_remaining == 0);
778         MPASS(kring->nr_hwcur == head);
779         MPASS(nm_txq->dbidx == nm_txq->pidx);
780
781         /*
782          * Second part: reclaim buffers for completed transmissions.
783          */
784         if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
785                 reclaimed += reclaim_nm_tx_desc(nm_txq);
786                 kring->nr_hwtail += reclaimed;
787                 if (kring->nr_hwtail >= kring->nkr_num_slots)
788                         kring->nr_hwtail -= kring->nkr_num_slots;
789         }
790
791         return (0);
792 }
793
794 static int
795 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
796 {
797         struct netmap_adapter *na = kring->na;
798         struct netmap_ring *ring = kring->ring;
799         struct ifnet *ifp = na->ifp;
800         struct vi_info *vi = ifp->if_softc;
801         struct adapter *sc = vi->pi->adapter;
802         struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id];
803         u_int const head = kring->rhead;
804         u_int n;
805         int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
806
807         if (black_hole)
808                 return (0);     /* No updates ever. */
809
810         if (netmap_no_pendintr || force_update) {
811                 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx);
812                 kring->nr_kflags &= ~NKR_PENDINTR;
813         }
814
815         if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) {
816                 wmb();
817                 t4_write_reg(sc, sc->sge_kdoorbell_reg,
818                     nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved));
819                 nm_rxq->fl_db_saved = 0;
820         }
821
822         /* Userspace done with buffers from kring->nr_hwcur to head */
823         n = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
824             kring->nkr_num_slots - kring->nr_hwcur + head;
825         n &= ~7U;
826         if (n > 0) {
827                 u_int fl_pidx = nm_rxq->fl_pidx;
828                 struct netmap_slot *slot = &ring->slot[fl_pidx];
829                 uint64_t ba;
830                 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx;
831
832                 /*
833                  * We always deal with 8 buffers at a time.  We must have
834                  * stopped at an 8B boundary (fl_pidx) last time around and we
835                  * must have a multiple of 8B buffers to give to the freelist.
836                  */
837                 MPASS((fl_pidx & 7) == 0);
838                 MPASS((n & 7) == 0);
839
840                 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots);
841                 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx);
842
843                 while (n > 0) {
844                         for (i = 0; i < 8; i++, fl_pidx++, slot++) {
845                                 PNMB(na, slot, &ba);
846                                 MPASS(ba != 0);
847                                 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx);
848                                 slot->flags &= ~NS_BUF_CHANGED;
849                                 MPASS(fl_pidx <= nm_rxq->fl_sidx);
850                         }
851                         n -= 8;
852                         if (fl_pidx == nm_rxq->fl_sidx) {
853                                 fl_pidx = 0;
854                                 slot = &ring->slot[0];
855                         }
856                         if (++dbinc == 8 && n >= 32) {
857                                 wmb();
858                                 if (starve_fl)
859                                         nm_rxq->fl_db_saved += dbinc;
860                                 else {
861                                         t4_write_reg(sc, sc->sge_kdoorbell_reg,
862                                             nm_rxq->fl_db_val | V_PIDX(dbinc));
863                                 }
864                                 dbinc = 0;
865                         }
866                 }
867                 MPASS(nm_rxq->fl_pidx == fl_pidx);
868
869                 if (dbinc > 0) {
870                         wmb();
871                         if (starve_fl)
872                                 nm_rxq->fl_db_saved += dbinc;
873                         else {
874                                 t4_write_reg(sc, sc->sge_kdoorbell_reg,
875                                     nm_rxq->fl_db_val | V_PIDX(dbinc));
876                         }
877                 }
878         }
879
880         return (0);
881 }
882
883 void
884 cxgbe_nm_attach(struct vi_info *vi)
885 {
886         struct port_info *pi;
887         struct adapter *sc;
888         struct netmap_adapter na;
889
890         MPASS(vi->nnmrxq > 0);
891         MPASS(vi->ifp != NULL);
892
893         pi = vi->pi;
894         sc = pi->adapter;
895
896         bzero(&na, sizeof(na));
897
898         na.ifp = vi->ifp;
899         na.na_flags = NAF_BDG_MAYSLEEP;
900
901         /* Netmap doesn't know about the space reserved for the status page. */
902         na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE;
903
904         /*
905          * The freelist's cidx/pidx drives netmap's rx cidx/pidx.  So
906          * num_rx_desc is based on the number of buffers that can be held in the
907          * freelist, and not the number of entries in the iq.  (These two are
908          * not exactly the same due to the space taken up by the status page).
909          */
910         na.num_rx_desc = rounddown(vi->qsize_rxq, 8);
911         na.nm_txsync = cxgbe_netmap_txsync;
912         na.nm_rxsync = cxgbe_netmap_rxsync;
913         na.nm_register = cxgbe_netmap_reg;
914         na.num_tx_rings = vi->nnmtxq;
915         na.num_rx_rings = vi->nnmrxq;
916         netmap_attach(&na);     /* This adds IFCAP_NETMAP to if_capabilities */
917 }
918
919 void
920 cxgbe_nm_detach(struct vi_info *vi)
921 {
922
923         MPASS(vi->nnmrxq > 0);
924         MPASS(vi->ifp != NULL);
925
926         netmap_detach(vi->ifp);
927 }
928
929 static inline const void *
930 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl)
931 {
932
933         MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL);
934
935         /* data[0] is RSS header */
936         return (&cpl->data[1]);
937 }
938
939 static void
940 handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp,
941     const struct cpl_sge_egr_update *egr)
942 {
943         uint32_t oq;
944         struct sge_nm_txq *nm_txq;
945
946         oq = be32toh(egr->opcode_qid);
947         MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE);
948         nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start];
949
950         netmap_tx_irq(ifp, nm_txq->nid);
951 }
952
953 void
954 service_nm_rxq(struct sge_nm_rxq *nm_rxq)
955 {
956         struct vi_info *vi = nm_rxq->vi;
957         struct adapter *sc = vi->pi->adapter;
958         struct ifnet *ifp = vi->ifp;
959         struct netmap_adapter *na = NA(ifp);
960         struct netmap_kring *kring = na->rx_rings[nm_rxq->nid];
961         struct netmap_ring *ring = kring->ring;
962         struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx];
963         const void *cpl;
964         uint32_t lq;
965         u_int work = 0;
966         uint8_t opcode;
967         uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx);
968         u_int fl_credits = fl_cidx & 7;
969         u_int ndesc = 0;        /* desc processed since last cidx update */
970         u_int nframes = 0;      /* frames processed since last netmap wakeup */
971
972         while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) {
973
974                 rmb();
975
976                 lq = be32toh(d->rsp.pldbuflen_qid);
977                 opcode = d->rss.opcode;
978                 cpl = &d->cpl[0];
979
980                 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) {
981                 case X_RSPD_TYPE_FLBUF:
982
983                         /* fall through */
984
985                 case X_RSPD_TYPE_CPL:
986                         MPASS(opcode < NUM_CPL_CMDS);
987
988                         switch (opcode) {
989                         case CPL_FW4_MSG:
990                         case CPL_FW6_MSG:
991                                 cpl = unwrap_nm_fw6_msg(cpl);
992                                 /* fall through */
993                         case CPL_SGE_EGR_UPDATE:
994                                 handle_nm_sge_egr_update(sc, ifp, cpl);
995                                 break;
996                         case CPL_RX_PKT:
997                                 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) -
998                                     sc->params.sge.fl_pktshift;
999                                 ring->slot[fl_cidx].flags = 0;
1000                                 nframes++;
1001                                 if (!(lq & F_RSPD_NEWBUF)) {
1002                                         MPASS(black_hole == 2);
1003                                         break;
1004                                 }
1005                                 fl_credits++;
1006                                 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx))
1007                                         fl_cidx = 0;
1008                                 break;
1009                         default:
1010                                 panic("%s: unexpected opcode 0x%x on nm_rxq %p",
1011                                     __func__, opcode, nm_rxq);
1012                         }
1013                         break;
1014
1015                 case X_RSPD_TYPE_INTR:
1016                         /* Not equipped to handle forwarded interrupts. */
1017                         panic("%s: netmap queue received interrupt for iq %u\n",
1018                             __func__, lq);
1019
1020                 default:
1021                         panic("%s: illegal response type %d on nm_rxq %p",
1022                             __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq);
1023                 }
1024
1025                 d++;
1026                 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) {
1027                         nm_rxq->iq_cidx = 0;
1028                         d = &nm_rxq->iq_desc[0];
1029                         nm_rxq->iq_gen ^= F_RSPD_GEN;
1030                 }
1031
1032                 if (__predict_false(++nframes == rx_nframes) && !black_hole) {
1033                         atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1034                         netmap_rx_irq(ifp, nm_rxq->nid, &work);
1035                         nframes = 0;
1036                 }
1037
1038                 if (__predict_false(++ndesc == rx_ndesc)) {
1039                         if (black_hole && fl_credits >= 8) {
1040                                 fl_credits /= 8;
1041                                 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8,
1042                                     nm_rxq->fl_sidx);
1043                                 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1044                                     nm_rxq->fl_db_val | V_PIDX(fl_credits));
1045                                 fl_credits = fl_cidx & 7;
1046                         }
1047                         t4_write_reg(sc, sc->sge_gts_reg,
1048                             V_CIDXINC(ndesc) |
1049                             V_INGRESSQID(nm_rxq->iq_cntxt_id) |
1050                             V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1051                         ndesc = 0;
1052                 }
1053         }
1054
1055         atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1056         if (black_hole) {
1057                 fl_credits /= 8;
1058                 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx);
1059                 t4_write_reg(sc, sc->sge_kdoorbell_reg,
1060                     nm_rxq->fl_db_val | V_PIDX(fl_credits));
1061         } else if (nframes > 0)
1062                 netmap_rx_irq(ifp, nm_rxq->nid, &work);
1063
1064         t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) |
1065             V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) |
1066             V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
1067 }
1068 #endif