2 * Copyright (C) 2011 Universita` di Pisa. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * $Id: if_igb_netmap.h 10627 2012-02-23 19:37:15Z luigi $
30 * Netmap support for igb, partly contributed by Ahmed Kooli
31 * For details on netmap support please see ixgbe_netmap.h
35 #include <net/netmap.h>
36 #include <sys/selinfo.h>
38 #include <vm/pmap.h> /* vtophys ? */
39 #include <dev/netmap/netmap_kern.h>
43 * wrapper to export locks to the generic code
46 igb_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
48 struct adapter *adapter = ifp->if_softc;
50 ASSERT(queueid < adapter->num_queues);
52 case NETMAP_CORE_LOCK:
53 IGB_CORE_LOCK(adapter);
55 case NETMAP_CORE_UNLOCK:
56 IGB_CORE_UNLOCK(adapter);
59 IGB_TX_LOCK(&adapter->tx_rings[queueid]);
61 case NETMAP_TX_UNLOCK:
62 IGB_TX_UNLOCK(&adapter->tx_rings[queueid]);
65 IGB_RX_LOCK(&adapter->rx_rings[queueid]);
67 case NETMAP_RX_UNLOCK:
68 IGB_RX_UNLOCK(&adapter->rx_rings[queueid]);
75 * register-unregister routine
78 igb_netmap_reg(struct ifnet *ifp, int onoff)
80 struct adapter *adapter = ifp->if_softc;
81 struct netmap_adapter *na = NA(ifp);
85 return EINVAL; /* no netmap support here */
87 igb_disable_intr(adapter);
89 /* Tell the stack that the interface is no longer active */
90 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
93 ifp->if_capenable |= IFCAP_NETMAP;
95 na->if_transmit = ifp->if_transmit;
96 ifp->if_transmit = netmap_start;
98 igb_init_locked(adapter);
99 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
105 /* restore if_transmit */
106 ifp->if_transmit = na->if_transmit;
107 ifp->if_capenable &= ~IFCAP_NETMAP;
108 igb_init_locked(adapter); /* also enable intr */
115 * Reconcile kernel and user view of the transmit ring.
118 igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
120 struct adapter *adapter = ifp->if_softc;
121 struct tx_ring *txr = &adapter->tx_rings[ring_nr];
122 struct netmap_adapter *na = NA(ifp);
123 struct netmap_kring *kring = &na->tx_rings[ring_nr];
124 struct netmap_ring *ring = kring->ring;
125 u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
127 /* generate an interrupt approximately every half ring */
128 u_int report_frequency = kring->nkr_num_slots >> 1;
132 return netmap_ring_reinit(kring);
136 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
137 BUS_DMASYNC_POSTREAD);
139 /* check for new packets to send.
140 * j indexes the netmap ring, l indexes the nic ring, and
141 * j = kring->nr_hwcur, l = E1000_TDT (not tracked),
142 * j == (l + kring->nkr_hwofs) % ring_size
145 if (j != k) { /* we have new packets to send */
146 /* 82575 needs the queue index added */
148 (adapter->hw.mac.type == e1000_82575) ? (txr->me << 4) : 0;
150 l = netmap_idx_k2n(kring, j);
151 for (n = 0; j != k; n++) {
152 /* slot is the current slot in the netmap ring */
153 struct netmap_slot *slot = &ring->slot[j];
154 /* curr is the current slot in the nic ring */
155 union e1000_adv_tx_desc *curr =
156 (union e1000_adv_tx_desc *)&txr->tx_base[l];
157 struct igb_tx_buffer *txbuf = &txr->tx_buffers[l];
158 int flags = ((slot->flags & NS_REPORT) ||
159 j == 0 || j == report_frequency) ?
160 E1000_ADVTXD_DCMD_RS : 0;
162 void *addr = PNMB(slot, &paddr);
163 u_int len = slot->len;
165 if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
168 return netmap_ring_reinit(kring);
171 slot->flags &= ~NS_REPORT;
172 if (slot->flags & NS_BUF_CHANGED) {
173 /* buffer has changed, reload map */
174 netmap_reload_map(txr->txtag, txbuf->map, addr);
175 slot->flags &= ~NS_BUF_CHANGED;
177 curr->read.buffer_addr = htole64(paddr);
178 // XXX check olinfo and cmd_type_len
179 curr->read.olinfo_status =
180 htole32(olinfo_status |
181 (len<< E1000_ADVTXD_PAYLEN_SHIFT));
182 curr->read.cmd_type_len =
183 htole32(len | E1000_ADVTXD_DTYP_DATA |
184 E1000_ADVTXD_DCMD_IFCS |
185 E1000_ADVTXD_DCMD_DEXT |
186 E1000_ADVTXD_DCMD_EOP | flags);
188 bus_dmamap_sync(txr->txtag, txbuf->map,
189 BUS_DMASYNC_PREWRITE);
190 j = (j == lim) ? 0 : j + 1;
191 l = (l == lim) ? 0 : l + 1;
193 kring->nr_hwcur = k; /* the saved ring->cur */
194 kring->nr_hwavail -= n;
196 /* Set the watchdog XXX ? */
197 txr->queue_status = IGB_QUEUE_WORKING;
198 txr->watchdog_time = ticks;
200 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
201 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
203 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), l);
206 if (n == 0 || kring->nr_hwavail < 1) {
209 /* record completed transmissions using TDH */
210 l = E1000_READ_REG(&adapter->hw, E1000_TDH(ring_nr));
211 if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */
213 l -= kring->nkr_num_slots;
215 delta = l - txr->next_to_clean;
217 /* some completed, increment hwavail. */
219 delta += kring->nkr_num_slots;
220 txr->next_to_clean = l;
221 kring->nr_hwavail += delta;
224 /* update avail to what the kernel knows */
225 ring->avail = kring->nr_hwavail;
234 * Reconcile kernel and user view of the receive ring.
237 igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
239 struct adapter *adapter = ifp->if_softc;
240 struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
241 struct netmap_adapter *na = NA(ifp);
242 struct netmap_kring *kring = &na->rx_rings[ring_nr];
243 struct netmap_ring *ring = kring->ring;
244 u_int j, l, n, lim = kring->nkr_num_slots - 1;
245 int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
246 u_int k = ring->cur, resvd = ring->reserved;
250 return netmap_ring_reinit(kring);
255 /* XXX check sync modes */
256 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
257 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
260 * import newly received packets into the netmap ring.
261 * j is an index in the netmap ring, l in the NIC ring.
263 l = rxr->next_to_check;
264 j = netmap_idx_n2k(kring, l);
265 if (netmap_no_pendintr || force_update) {
266 uint16_t slot_flags = kring->nkr_slot_flags;
269 union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
270 uint32_t staterr = le32toh(curr->wb.upper.status_error);
272 if ((staterr & E1000_RXD_STAT_DD) == 0)
274 ring->slot[j].len = le16toh(curr->wb.upper.length);
275 ring->slot[j].flags = slot_flags;
276 bus_dmamap_sync(rxr->ptag,
277 rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD);
278 j = (j == lim) ? 0 : j + 1;
279 l = (l == lim) ? 0 : l + 1;
281 if (n) { /* update the state variables */
282 rxr->next_to_check = l;
283 kring->nr_hwavail += n;
285 kring->nr_kflags &= ~NKR_PENDINTR;
288 /* skip past packets that userspace has released */
289 j = kring->nr_hwcur; /* netmap ring index */
291 if (resvd + ring->avail >= lim + 1) {
292 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
293 ring->reserved = resvd = 0; // XXX panic...
295 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
297 if (j != k) { /* userspace has released some packets. */
298 l = netmap_idx_k2n(kring, j);
299 for (n = 0; j != k; n++) {
300 struct netmap_slot *slot = ring->slot + j;
301 union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
302 struct igb_rx_buf *rxbuf = rxr->rx_buffers + l;
304 void *addr = PNMB(slot, &paddr);
306 if (addr == netmap_buffer_base) { /* bad buf */
309 return netmap_ring_reinit(kring);
312 if (slot->flags & NS_BUF_CHANGED) {
313 netmap_reload_map(rxr->ptag, rxbuf->pmap, addr);
314 slot->flags &= ~NS_BUF_CHANGED;
316 curr->read.pkt_addr = htole64(paddr);
317 curr->wb.upper.status_error = 0;
318 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
319 BUS_DMASYNC_PREREAD);
320 j = (j == lim) ? 0 : j + 1;
321 l = (l == lim) ? 0 : l + 1;
323 kring->nr_hwavail -= n;
325 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
326 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
328 * IMPORTANT: we must leave one free slot in the ring,
329 * so move l back by one unit
331 l = (l == 0) ? lim : l - 1;
332 E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), l);
334 /* tell userspace that there are new packets */
335 ring->avail = kring->nr_hwavail - resvd;
343 igb_netmap_attach(struct adapter *adapter)
345 struct netmap_adapter na;
347 bzero(&na, sizeof(na));
349 na.ifp = adapter->ifp;
350 na.separate_locks = 1;
351 na.num_tx_desc = adapter->num_tx_desc;
352 na.num_rx_desc = adapter->num_rx_desc;
353 na.nm_txsync = igb_netmap_txsync;
354 na.nm_rxsync = igb_netmap_rxsync;
355 na.nm_lock = igb_netmap_lock_wrapper;
356 na.nm_register = igb_netmap_reg;
357 netmap_attach(&na, adapter->num_queues);