2 * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * netmap support for em.
31 * For more details on netmap support please see ixgbe_netmap.h
35 #include <net/netmap.h>
36 #include <sys/selinfo.h>
38 #include <vm/pmap.h> /* vtophys ? */
39 #include <dev/netmap/netmap_kern.h>
42 static void em_netmap_block_tasks(struct adapter *);
43 static void em_netmap_unblock_tasks(struct adapter *);
47 em_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
49 struct adapter *adapter = ifp->if_softc;
51 ASSERT(queueid < adapter->num_queues);
53 case NETMAP_CORE_LOCK:
54 EM_CORE_LOCK(adapter);
56 case NETMAP_CORE_UNLOCK:
57 EM_CORE_UNLOCK(adapter);
60 EM_TX_LOCK(&adapter->tx_rings[queueid]);
62 case NETMAP_TX_UNLOCK:
63 EM_TX_UNLOCK(&adapter->tx_rings[queueid]);
66 EM_RX_LOCK(&adapter->rx_rings[queueid]);
68 case NETMAP_RX_UNLOCK:
69 EM_RX_UNLOCK(&adapter->rx_rings[queueid]);
75 // XXX do we need to block/unblock the tasks ?
77 em_netmap_block_tasks(struct adapter *adapter)
79 if (adapter->msix > 1) { /* MSIX */
81 struct tx_ring *txr = adapter->tx_rings;
82 struct rx_ring *rxr = adapter->rx_rings;
84 for (i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
85 taskqueue_block(txr->tq);
86 taskqueue_drain(txr->tq, &txr->tx_task);
87 taskqueue_block(rxr->tq);
88 taskqueue_drain(rxr->tq, &rxr->rx_task);
91 taskqueue_block(adapter->tq);
92 taskqueue_drain(adapter->tq, &adapter->link_task);
93 taskqueue_drain(adapter->tq, &adapter->que_task);
99 em_netmap_unblock_tasks(struct adapter *adapter)
101 if (adapter->msix > 1) {
102 struct tx_ring *txr = adapter->tx_rings;
103 struct rx_ring *rxr = adapter->rx_rings;
106 for (i = 0; i < adapter->num_queues; i++) {
107 taskqueue_unblock(txr->tq);
108 taskqueue_unblock(rxr->tq);
110 } else { /* legacy */
111 taskqueue_unblock(adapter->tq);
117 * Register/unregister routine
120 em_netmap_reg(struct ifnet *ifp, int onoff)
122 struct adapter *adapter = ifp->if_softc;
123 struct netmap_adapter *na = NA(ifp);
127 return EINVAL; /* no netmap support here */
129 em_disable_intr(adapter);
131 /* Tell the stack that the interface is no longer active */
132 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
134 em_netmap_block_tasks(adapter);
137 ifp->if_capenable |= IFCAP_NETMAP;
139 na->if_transmit = ifp->if_transmit;
140 ifp->if_transmit = netmap_start;
142 em_init_locked(adapter);
143 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
149 /* return to non-netmap mode */
150 ifp->if_transmit = na->if_transmit;
151 ifp->if_capenable &= ~IFCAP_NETMAP;
152 em_init_locked(adapter); /* also enable intr */
154 em_netmap_unblock_tasks(adapter);
160 * Reconcile kernel and user view of the transmit ring.
163 em_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
165 struct adapter *adapter = ifp->if_softc;
166 struct tx_ring *txr = &adapter->tx_rings[ring_nr];
167 struct netmap_adapter *na = NA(ifp);
168 struct netmap_kring *kring = &na->tx_rings[ring_nr];
169 struct netmap_ring *ring = kring->ring;
170 u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
172 /* generate an interrupt approximately every half ring */
173 u_int report_frequency = kring->nkr_num_slots >> 1;
177 return netmap_ring_reinit(kring);
181 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
182 BUS_DMASYNC_POSTREAD);
185 * Process new packets to send. j is the current index in the
186 * netmap ring, l is the corresponding index in the NIC ring.
189 if (j != k) { /* we have new packets to send */
190 l = netmap_idx_k2n(kring, j);
191 for (n = 0; j != k; n++) {
192 /* slot is the current slot in the netmap ring */
193 struct netmap_slot *slot = &ring->slot[j];
194 /* curr is the current slot in the nic ring */
195 struct e1000_tx_desc *curr = &txr->tx_base[l];
196 struct em_buffer *txbuf = &txr->tx_buffers[l];
197 int flags = ((slot->flags & NS_REPORT) ||
198 j == 0 || j == report_frequency) ?
199 E1000_TXD_CMD_RS : 0;
201 void *addr = PNMB(slot, &paddr);
202 u_int len = slot->len;
204 if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
207 return netmap_ring_reinit(kring);
210 slot->flags &= ~NS_REPORT;
211 if (slot->flags & NS_BUF_CHANGED) {
212 curr->buffer_addr = htole64(paddr);
213 /* buffer has changed, reload map */
214 netmap_reload_map(txr->txtag, txbuf->map, addr);
215 slot->flags &= ~NS_BUF_CHANGED;
217 curr->upper.data = 0;
218 curr->lower.data = htole32(adapter->txd_cmd | len |
219 (E1000_TXD_CMD_EOP | flags) );
220 bus_dmamap_sync(txr->txtag, txbuf->map,
221 BUS_DMASYNC_PREWRITE);
222 j = (j == lim) ? 0 : j + 1;
223 l = (l == lim) ? 0 : l + 1;
225 kring->nr_hwcur = k; /* the saved ring->cur */
226 kring->nr_hwavail -= n;
228 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
229 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
231 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), l);
234 if (n == 0 || kring->nr_hwavail < 1) {
237 /* record completed transmissions using TDH */
238 l = E1000_READ_REG(&adapter->hw, E1000_TDH(ring_nr));
239 if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */
241 l -= kring->nkr_num_slots;
243 delta = l - txr->next_to_clean;
245 /* some completed, increment hwavail. */
247 delta += kring->nkr_num_slots;
248 txr->next_to_clean = l;
249 kring->nr_hwavail += delta;
252 /* update avail to what the kernel knows */
253 ring->avail = kring->nr_hwavail;
262 * Reconcile kernel and user view of the receive ring.
265 em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
267 struct adapter *adapter = ifp->if_softc;
268 struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
269 struct netmap_adapter *na = NA(ifp);
270 struct netmap_kring *kring = &na->rx_rings[ring_nr];
271 struct netmap_ring *ring = kring->ring;
272 u_int j, l, n, lim = kring->nkr_num_slots - 1;
273 int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
274 u_int k = ring->cur, resvd = ring->reserved;
278 return netmap_ring_reinit(kring);
283 /* XXX check sync modes */
284 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
285 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
288 * Import newly received packets into the netmap ring.
289 * j is an index in the netmap ring, l in the NIC ring.
291 l = rxr->next_to_check;
292 j = netmap_idx_n2k(kring, l);
293 if (netmap_no_pendintr || force_update) {
294 uint16_t slot_flags = kring->nkr_slot_flags;
297 struct e1000_rx_desc *curr = &rxr->rx_base[l];
298 uint32_t staterr = le32toh(curr->status);
300 if ((staterr & E1000_RXD_STAT_DD) == 0)
302 ring->slot[j].len = le16toh(curr->length);
303 ring->slot[j].flags = slot_flags;
304 bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[l].map,
305 BUS_DMASYNC_POSTREAD);
306 j = (j == lim) ? 0 : j + 1;
307 /* make sure next_to_refresh follows next_to_check */
308 rxr->next_to_refresh = l; // XXX
309 l = (l == lim) ? 0 : l + 1;
311 if (n) { /* update the state variables */
312 rxr->next_to_check = l;
313 kring->nr_hwavail += n;
315 kring->nr_kflags &= ~NKR_PENDINTR;
318 /* skip past packets that userspace has released */
319 j = kring->nr_hwcur; /* netmap ring index */
321 if (resvd + ring->avail >= lim + 1) {
322 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
323 ring->reserved = resvd = 0; // XXX panic...
325 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
327 if (j != k) { /* userspace has released some packets. */
328 l = netmap_idx_k2n(kring, j); /* NIC ring index */
329 for (n = 0; j != k; n++) {
330 struct netmap_slot *slot = &ring->slot[j];
331 struct e1000_rx_desc *curr = &rxr->rx_base[l];
332 struct em_buffer *rxbuf = &rxr->rx_buffers[l];
334 void *addr = PNMB(slot, &paddr);
336 if (addr == netmap_buffer_base) { /* bad buf */
339 return netmap_ring_reinit(kring);
342 if (slot->flags & NS_BUF_CHANGED) {
343 curr->buffer_addr = htole64(paddr);
344 /* buffer has changed, reload map */
345 netmap_reload_map(rxr->rxtag, rxbuf->map, addr);
346 slot->flags &= ~NS_BUF_CHANGED;
349 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
350 BUS_DMASYNC_PREREAD);
351 j = (j == lim) ? 0 : j + 1;
352 l = (l == lim) ? 0 : l + 1;
354 kring->nr_hwavail -= n;
356 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
357 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
359 * IMPORTANT: we must leave one free slot in the ring,
360 * so move l back by one unit
362 l = (l == 0) ? lim : l - 1;
363 E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), l);
365 /* tell userspace that there are new packets */
366 ring->avail = kring->nr_hwavail - resvd;
374 em_netmap_attach(struct adapter *adapter)
376 struct netmap_adapter na;
378 bzero(&na, sizeof(na));
380 na.ifp = adapter->ifp;
381 na.separate_locks = 1;
382 na.num_tx_desc = adapter->num_tx_desc;
383 na.num_rx_desc = adapter->num_rx_desc;
384 na.nm_txsync = em_netmap_txsync;
385 na.nm_rxsync = em_netmap_rxsync;
386 na.nm_lock = em_netmap_lock_wrapper;
387 na.nm_register = em_netmap_reg;
388 netmap_attach(&na, adapter->num_queues);