2 * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * netmap support for "lem"
32 * For details on netmap support please see ixgbe_netmap.h
35 #include <net/netmap.h>
36 #include <sys/selinfo.h>
38 #include <vm/pmap.h> /* vtophys ? */
39 #include <dev/netmap/netmap_kern.h>
46 lem_netmap_reg(struct ifnet *ifp, int onoff)
48 struct adapter *adapter = ifp->if_softc;
49 struct netmap_adapter *na = NA(ifp);
55 EM_CORE_LOCK(adapter);
57 lem_disable_intr(adapter);
59 /* Tell the stack that the interface is no longer active */
60 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
62 #ifndef EM_LEGACY_IRQ // XXX do we need this ?
63 taskqueue_block(adapter->tq);
64 taskqueue_drain(adapter->tq, &adapter->rxtx_task);
65 taskqueue_drain(adapter->tq, &adapter->link_task);
66 #endif /* !EM_LEGCY_IRQ */
68 ifp->if_capenable |= IFCAP_NETMAP;
70 na->if_transmit = ifp->if_transmit;
71 ifp->if_transmit = netmap_transmit;
73 lem_init_locked(adapter);
74 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
80 /* return to non-netmap mode */
81 ifp->if_transmit = na->if_transmit;
82 ifp->if_capenable &= ~IFCAP_NETMAP;
83 lem_init_locked(adapter); /* also enable intr */
87 taskqueue_unblock(adapter->tq); // XXX do we need this ?
88 #endif /* !EM_LEGCY_IRQ */
90 EM_CORE_UNLOCK(adapter);
97 * Reconcile kernel and user view of the transmit ring.
100 lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags)
102 struct adapter *adapter = ifp->if_softc;
103 struct netmap_adapter *na = NA(ifp);
104 struct netmap_kring *kring = &na->tx_rings[ring_nr];
105 struct netmap_ring *ring = kring->ring;
106 u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
108 /* generate an interrupt approximately every half ring */
109 int report_frequency = kring->nkr_num_slots >> 1;
111 ND("%s: hwofs %d, hwcur %d hwavail %d lease %d cur %d avail %d",
113 kring->nkr_hwofs, kring->nr_hwcur, kring->nr_hwavail,
115 ring->cur, ring->avail);
116 /* take a copy of ring->cur now, and never read it again */
119 return netmap_ring_reinit(kring);
121 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
122 BUS_DMASYNC_POSTREAD);
124 * Process new packets to send. j is the current index in the
125 * netmap ring, l is the corresponding index in the NIC ring.
128 if (netmap_verbose > 255)
129 RD(5, "device %s send %d->%d", ifp->if_xname, j, k);
130 if (j != k) { /* we have new packets to send */
131 l = netmap_idx_k2n(kring, j);
132 for (n = 0; j != k; n++) {
133 /* slot is the current slot in the netmap ring */
134 struct netmap_slot *slot = &ring->slot[j];
135 /* curr is the current slot in the nic ring */
136 struct e1000_tx_desc *curr = &adapter->tx_desc_base[l];
137 struct em_buffer *txbuf = &adapter->tx_buffer_area[l];
138 int flags = ((slot->flags & NS_REPORT) ||
139 j == 0 || j == report_frequency) ?
140 E1000_TXD_CMD_RS : 0;
142 void *addr = PNMB(slot, &paddr);
143 u_int len = slot->len;
145 if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
146 return netmap_ring_reinit(kring);
148 ND("slot %d NIC %d %s", j, l, nm_dump_buf(addr, len, 128, NULL));
150 slot->flags &= ~NS_REPORT;
151 if (1 || slot->flags & NS_BUF_CHANGED) {
152 /* buffer has changed, reload map */
153 netmap_reload_map(adapter->txtag, txbuf->map, addr);
154 curr->buffer_addr = htole64(paddr);
155 slot->flags &= ~NS_BUF_CHANGED;
157 curr->upper.data = 0;
159 htole32( adapter->txd_cmd | len |
160 (E1000_TXD_CMD_EOP | flags) );
162 ND("len %d kring %d nic %d", len, j, l);
163 bus_dmamap_sync(adapter->txtag, txbuf->map,
164 BUS_DMASYNC_PREWRITE);
165 j = (j == lim) ? 0 : j + 1;
166 l = (l == lim) ? 0 : l + 1;
168 ND("sent %d packets from %d, TDT now %d", n, kring->nr_hwcur, l);
169 kring->nr_hwcur = k; /* the saved ring->cur */
170 kring->nr_hwavail -= n;
172 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
173 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
175 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), l);
178 if (n == 0 || kring->nr_hwavail < 1) {
181 /* record completed transmissions using TDH */
182 l = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
183 ND("tdh is now %d", l);
184 if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */
186 l -= kring->nkr_num_slots;
188 delta = l - adapter->next_tx_to_clean;
190 /* some tx completed, increment hwavail. */
192 delta += kring->nkr_num_slots;
193 if (netmap_verbose > 255)
194 RD(5, "%s tx recover %d bufs",
195 ifp->if_xname, delta);
196 adapter->next_tx_to_clean = l;
197 kring->nr_hwavail += delta;
200 /* update avail to what the kernel knows */
201 ring->avail = kring->nr_hwavail;
208 * Reconcile kernel and user view of the receive ring.
211 lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags)
213 struct adapter *adapter = ifp->if_softc;
214 struct netmap_adapter *na = NA(ifp);
215 struct netmap_kring *kring = &na->rx_rings[ring_nr];
216 struct netmap_ring *ring = kring->ring;
217 int j, l, n, lim = kring->nkr_num_slots - 1;
218 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
219 u_int k = ring->cur, resvd = ring->reserved;
222 return netmap_ring_reinit(kring);
225 /* XXX check sync modes */
226 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
227 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
230 * Import newly received packets into the netmap ring.
231 * j is an index in the netmap ring, l in the NIC ring.
233 l = adapter->next_rx_desc_to_check;
234 j = netmap_idx_n2k(kring, l);
235 ND("%s: next NIC %d kring %d (ofs %d), hwcur %d hwavail %d cur %d avail %d",
237 l, j, kring->nkr_hwofs, kring->nr_hwcur, kring->nr_hwavail,
238 ring->cur, ring->avail);
239 if (netmap_no_pendintr || force_update) {
240 uint16_t slot_flags = kring->nkr_slot_flags;
243 struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
244 uint32_t staterr = le32toh(curr->status);
247 if ((staterr & E1000_RXD_STAT_DD) == 0)
249 len = le16toh(curr->length) - 4; // CRC
251 D("bogus pkt size at %d", j);
254 ND("\n%s", nm_dump_buf(NMB(&ring->slot[j]),
256 ring->slot[j].len = len;
257 ring->slot[j].flags = slot_flags;
258 bus_dmamap_sync(adapter->rxtag,
259 adapter->rx_buffer_area[l].map,
260 BUS_DMASYNC_POSTREAD);
261 j = (j == lim) ? 0 : j + 1;
262 l = (l == lim) ? 0 : l + 1;
264 if (n) { /* update the state variables */
265 adapter->next_rx_desc_to_check = l;
266 kring->nr_hwavail += n;
268 kring->nr_kflags &= ~NKR_PENDINTR;
271 /* skip past packets that userspace has released */
272 j = kring->nr_hwcur; /* netmap ring index */
274 if (resvd + ring->avail >= lim + 1) {
275 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
276 ring->reserved = resvd = 0; // XXX panic...
278 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
280 if (j != k) { /* userspace has released some packets. */
281 l = netmap_idx_k2n(kring, j); /* NIC ring index */
282 for (n = 0; j != k; n++) {
283 struct netmap_slot *slot = &ring->slot[j];
284 struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
285 struct em_buffer *rxbuf = &adapter->rx_buffer_area[l];
287 void *addr = PNMB(slot, &paddr);
289 if (addr == netmap_buffer_base) { /* bad buf */
290 return netmap_ring_reinit(kring);
293 if (slot->flags & NS_BUF_CHANGED) {
294 /* buffer has changed, reload map */
295 netmap_reload_map(adapter->rxtag, rxbuf->map, addr);
296 curr->buffer_addr = htole64(paddr);
297 slot->flags &= ~NS_BUF_CHANGED;
301 bus_dmamap_sync(adapter->rxtag, rxbuf->map,
302 BUS_DMASYNC_PREREAD);
304 j = (j == lim) ? 0 : j + 1;
305 l = (l == lim) ? 0 : l + 1;
307 kring->nr_hwavail -= n;
309 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
310 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
312 * IMPORTANT: we must leave one free slot in the ring,
313 * so move l back by one unit
315 l = (l == 0) ? lim : l - 1;
316 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), l);
318 /* tell userspace that there are new packets */
319 ring->avail = kring->nr_hwavail - resvd;
325 lem_netmap_attach(struct adapter *adapter)
327 struct netmap_adapter na;
329 bzero(&na, sizeof(na));
331 na.ifp = adapter->ifp;
332 na.na_flags = NAF_BDG_MAYSLEEP;
333 na.num_tx_desc = adapter->num_tx_desc;
334 na.num_rx_desc = adapter->num_rx_desc;
335 na.nm_txsync = lem_netmap_txsync;
336 na.nm_rxsync = lem_netmap_rxsync;
337 na.nm_register = lem_netmap_reg;
338 netmap_attach(&na, 1);