2 * Copyright (C) 2011 Luigi Rizzo. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * netmap support for "re"
30 * For details on netmap support please see ixgbe_netmap.h
34 #include <net/netmap.h>
35 #include <sys/selinfo.h>
37 #include <vm/pmap.h> /* vtophys ? */
38 #include <dev/netmap/netmap_kern.h>
42 * support for netmap register/unregisted. We are already under core lock.
43 * only called on the first register or the last unregister.
46 re_netmap_reg(struct ifnet *ifp, int onoff)
48 struct rl_softc *adapter = ifp->if_softc;
49 struct netmap_adapter *na = NA(ifp);
54 /* Tell the stack that the interface is no longer active */
55 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
60 ifp->if_capenable |= IFCAP_NETMAP;
62 /* save if_transmit to restore it later */
63 na->if_transmit = ifp->if_transmit;
64 ifp->if_transmit = netmap_transmit;
66 re_init_locked(adapter);
68 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
74 /* restore if_transmit */
75 ifp->if_transmit = na->if_transmit;
76 ifp->if_capenable &= ~IFCAP_NETMAP;
77 re_init_locked(adapter); /* also enables intr */
84 * Reconcile kernel and user view of the transmit ring.
87 re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags)
89 struct rl_softc *sc = ifp->if_softc;
90 struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
91 struct netmap_adapter *na = NA(sc->rl_ifp);
92 struct netmap_kring *kring = &na->tx_rings[ring_nr];
93 struct netmap_ring *ring = kring->ring;
94 int j, k, l, n, lim = kring->nkr_num_slots - 1;
98 return netmap_ring_reinit(kring);
100 /* Sync the TX descriptor list */
101 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
102 sc->rl_ldata.rl_tx_list_map,
103 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
105 /* XXX move after the transmissions */
106 /* record completed transmissions */
107 for (n = 0, l = sc->rl_ldata.rl_tx_considx;
108 l != sc->rl_ldata.rl_tx_prodidx;
109 n++, l = RL_TX_DESC_NXT(sc, l)) {
111 le32toh(sc->rl_ldata.rl_tx_list[l].rl_cmdstat);
112 if (cmdstat & RL_TDESC_STAT_OWN)
116 sc->rl_ldata.rl_tx_considx = l;
117 sc->rl_ldata.rl_tx_free += n;
118 kring->nr_hwavail += n;
121 /* update avail to what the kernel knows */
122 ring->avail = kring->nr_hwavail;
125 if (j != k) { /* we have new packets to send */
126 l = sc->rl_ldata.rl_tx_prodidx;
127 for (n = 0; j != k; n++) {
128 struct netmap_slot *slot = &ring->slot[j];
129 struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[l];
130 int cmd = slot->len | RL_TDESC_CMD_EOF |
131 RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ;
133 void *addr = PNMB(slot, &paddr);
136 if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
137 // XXX what about prodidx ?
138 return netmap_ring_reinit(kring);
141 if (l == lim) /* mark end of ring */
142 cmd |= RL_TDESC_CMD_EOR;
144 if (slot->flags & NS_BUF_CHANGED) {
145 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
146 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
147 /* buffer has changed, unload and reload map */
148 netmap_reload_map(sc->rl_ldata.rl_tx_mtag,
149 txd[l].tx_dmamap, addr);
150 slot->flags &= ~NS_BUF_CHANGED;
152 slot->flags &= ~NS_REPORT;
153 desc->rl_cmdstat = htole32(cmd);
154 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
155 txd[l].tx_dmamap, BUS_DMASYNC_PREWRITE);
156 j = (j == lim) ? 0 : j + 1;
157 l = (l == lim) ? 0 : l + 1;
159 sc->rl_ldata.rl_tx_prodidx = l;
160 kring->nr_hwcur = k; /* the saved ring->cur */
161 ring->avail -= n; // XXX see others
162 kring->nr_hwavail = ring->avail;
164 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
165 sc->rl_ldata.rl_tx_list_map,
166 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
169 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
176 * Reconcile kernel and user view of the receive ring.
179 re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags)
181 struct rl_softc *sc = ifp->if_softc;
182 struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
183 struct netmap_adapter *na = NA(sc->rl_ifp);
184 struct netmap_kring *kring = &na->rx_rings[ring_nr];
185 struct netmap_ring *ring = kring->ring;
186 int j, l, n, lim = kring->nkr_num_slots - 1;
187 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
188 u_int k = ring->cur, resvd = ring->reserved;
192 return netmap_ring_reinit(kring);
194 /* XXX check sync modes */
195 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
196 sc->rl_ldata.rl_rx_list_map,
197 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
200 * Import newly received packets into the netmap ring.
201 * j is an index in the netmap ring, l in the NIC ring.
203 * The device uses all the buffers in the ring, so we need
204 * another termination condition in addition to RL_RDESC_STAT_OWN
205 * cleared (all buffers could have it cleared. The easiest one
206 * is to limit the amount of data reported up to 'lim'
208 l = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
209 j = netmap_idx_n2k(kring, l); /* the kring index */
210 if (netmap_no_pendintr || force_update) {
211 uint16_t slot_flags = kring->nkr_slot_flags;
213 for (n = kring->nr_hwavail; n < lim ; n++) {
214 struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[l];
215 uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
218 if ((rxstat & RL_RDESC_STAT_OWN) != 0)
220 total_len = rxstat & sc->rl_rxlenmask;
221 /* XXX subtract crc */
222 total_len = (total_len < 4) ? 0 : total_len - 4;
223 kring->ring->slot[j].len = total_len;
224 kring->ring->slot[j].flags = slot_flags;
225 /* sync was in re_newbuf() */
226 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
227 rxd[l].rx_dmamap, BUS_DMASYNC_POSTREAD);
228 j = (j == lim) ? 0 : j + 1;
229 l = (l == lim) ? 0 : l + 1;
231 if (n != kring->nr_hwavail) {
232 sc->rl_ldata.rl_rx_prodidx = l;
233 sc->rl_ifp->if_ipackets += n - kring->nr_hwavail;
234 kring->nr_hwavail = n;
236 kring->nr_kflags &= ~NKR_PENDINTR;
239 /* skip past packets that userspace has released */
242 if (resvd + ring->avail >= lim + 1) {
243 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
244 ring->reserved = resvd = 0; // XXX panic...
246 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
248 if (j != k) { /* userspace has released some packets. */
249 l = netmap_idx_k2n(kring, j); /* the NIC index */
250 for (n = 0; j != k; n++) {
251 struct netmap_slot *slot = ring->slot + j;
252 struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[l];
253 int cmd = NETMAP_BUF_SIZE | RL_RDESC_CMD_OWN;
255 void *addr = PNMB(slot, &paddr);
257 if (addr == netmap_buffer_base) { /* bad buf */
258 return netmap_ring_reinit(kring);
261 if (l == lim) /* mark end of ring */
262 cmd |= RL_RDESC_CMD_EOR;
264 slot->flags &= ~NS_REPORT;
265 if (slot->flags & NS_BUF_CHANGED) {
266 netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
267 rxd[l].rx_dmamap, addr);
268 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
269 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
270 slot->flags &= ~NS_BUF_CHANGED;
272 desc->rl_cmdstat = htole32(cmd);
273 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
274 rxd[l].rx_dmamap, BUS_DMASYNC_PREREAD);
275 j = (j == lim) ? 0 : j + 1;
276 l = (l == lim) ? 0 : l + 1;
278 kring->nr_hwavail -= n;
280 /* Flush the RX DMA ring */
282 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
283 sc->rl_ldata.rl_rx_list_map,
284 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
286 /* tell userspace that there are new packets */
287 ring->avail = kring->nr_hwavail - resvd;
292 * Additional routines to init the tx and rx rings.
293 * In other drivers we do that inline in the main code.
296 re_netmap_tx_init(struct rl_softc *sc)
298 struct rl_txdesc *txd;
299 struct rl_desc *desc;
301 struct netmap_adapter *na = NA(sc->rl_ifp);
302 struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
304 /* slot is NULL if we are not in netmap mode */
307 /* in netmap mode, overwrite addresses and maps */
308 txd = sc->rl_ldata.rl_tx_desc;
309 desc = sc->rl_ldata.rl_tx_list;
310 n = sc->rl_ldata.rl_tx_desc_cnt;
312 /* l points in the netmap ring, i points in the NIC ring */
313 for (i = 0; i < n; i++) {
315 int l = netmap_idx_n2k(&na->tx_rings[0], i);
316 void *addr = PNMB(slot + l, &paddr);
318 desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
319 desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
320 netmap_load_map(sc->rl_ldata.rl_tx_mtag,
321 txd[i].tx_dmamap, addr);
326 re_netmap_rx_init(struct rl_softc *sc)
328 struct netmap_adapter *na = NA(sc->rl_ifp);
329 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
330 struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
336 n = sc->rl_ldata.rl_rx_desc_cnt;
338 * Userspace owned hwavail packets before the reset,
339 * so the NIC that last hwavail descriptors of the ring
340 * are still owned by the driver (and keep one empty).
342 max_avail = n - 1 - na->rx_rings[0].nr_hwavail;
343 for (i = 0; i < n; i++) {
346 int l = netmap_idx_n2k(&na->rx_rings[0], i);
348 addr = PNMB(slot + l, &paddr);
350 netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
351 sc->rl_ldata.rl_rx_desc[i].rx_dmamap, addr);
352 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
353 sc->rl_ldata.rl_rx_desc[i].rx_dmamap, BUS_DMASYNC_PREREAD);
354 desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
355 desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
356 cmdstat = NETMAP_BUF_SIZE;
357 if (i == n - 1) /* mark the end of ring */
358 cmdstat |= RL_RDESC_CMD_EOR;
360 cmdstat |= RL_RDESC_CMD_OWN;
361 desc[i].rl_cmdstat = htole32(cmdstat);
367 re_netmap_attach(struct rl_softc *sc)
369 struct netmap_adapter na;
371 bzero(&na, sizeof(na));
374 na.na_flags = NAF_BDG_MAYSLEEP;
375 na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt;
376 na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt;
377 na.nm_txsync = re_netmap_txsync;
378 na.nm_rxsync = re_netmap_rxsync;
379 na.nm_register = re_netmap_reg;
380 netmap_attach(&na, 1);