2 * Copyright (C) 2015, Luigi Rizzo. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * netmap support for: ixl
32 * netmap support for a network driver.
33 * This file contains code but only static or inline functions used
34 * by a single driver. To avoid replication of code we just #include
35 * it near the beginning of the standard driver.
36 * For ixl the file is imported in two places, hence the conditional at the
40 #include <net/netmap.h>
41 #include <sys/selinfo.h>
44 * Some drivers may need the following headers. Others
45 * already include them by default
51 #include <dev/netmap/netmap_kern.h>
53 int ixl_netmap_txsync(struct netmap_kring *kring, int flags);
54 int ixl_netmap_rxsync(struct netmap_kring *kring, int flags);
56 extern int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip;
58 #ifdef NETMAP_IXL_MAIN
60 * device-specific sysctl variables:
62 * ixl_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
63 * During regular operations the CRC is stripped, but on some
64 * hardware reception of frames not multiple of 64 is slower,
65 * so using crcstrip=0 helps in benchmarks.
67 * ixl_rx_miss, ixl_rx_miss_bufs:
68 * count packets that might be missed due to lost interrupts.
70 SYSCTL_DECL(_dev_netmap);
71 int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip;
72 SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_crcstrip,
73 CTLFLAG_RW, &ixl_crcstrip, 0, "strip CRC on rx frames");
74 SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_rx_miss,
75 CTLFLAG_RW, &ixl_rx_miss, 0, "potentially missed rx intr");
76 SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_rx_miss_bufs,
77 CTLFLAG_RW, &ixl_rx_miss_bufs, 0, "potentially missed rx intr bufs");
81 * Register/unregister. We are already under netmap lock.
82 * Only called on the first register or the last unregister.
85 ixl_netmap_reg(struct netmap_adapter *na, int onoff)
87 struct ifnet *ifp = na->ifp;
88 struct ixl_vsi *vsi = ifp->if_softc;
89 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
92 ixl_disable_intr(vsi);
94 /* Tell the stack that the interface is no longer active */
95 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
97 //set_crcstrip(&adapter->hw, onoff);
98 /* enable or disable flags and callbacks in na and ifp */
100 nm_set_native_flags(na);
102 nm_clear_native_flags(na);
104 ixl_init_locked(pf); /* also enables intr */
105 //set_crcstrip(&adapter->hw, onoff); // XXX why twice ?
107 return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
112 * The attach routine, called near the end of ixl_attach(),
113 * fills the parameters for netmap_attach() and calls it.
114 * It cannot fail, in the worst case (such as no memory)
115 * netmap mode will be disabled and the driver will only
116 * operate in standard mode.
119 ixl_netmap_attach(struct ixl_vsi *vsi)
121 struct netmap_adapter na;
123 bzero(&na, sizeof(na));
126 na.na_flags = NAF_BDG_MAYSLEEP;
127 // XXX check that queues is set.
128 printf("queues is %p\n", vsi->queues);
130 na.num_tx_desc = vsi->queues[0].num_desc;
131 na.num_rx_desc = vsi->queues[0].num_desc;
133 na.nm_txsync = ixl_netmap_txsync;
134 na.nm_rxsync = ixl_netmap_rxsync;
135 na.nm_register = ixl_netmap_reg;
136 na.num_tx_rings = na.num_rx_rings = vsi->num_queues;
141 #else /* !NETMAP_IXL_MAIN, code for ixl_txrx.c */
144 * Reconcile kernel and user view of the transmit ring.
146 * All information is in the kring.
147 * Userspace wants to send packets up to the one before kring->rhead,
148 * kernel knows kring->nr_hwcur is the first unsent packet.
150 * Here we push packets out (as many as possible), and possibly
151 * reclaim buffers from previously completed transmission.
153 * The caller (netmap) guarantees that there is only one instance
154 * running at any time. Any interference with other driver
155 * methods should be handled by the individual drivers.
158 ixl_netmap_txsync(struct netmap_kring *kring, int flags)
160 struct netmap_adapter *na = kring->na;
161 struct ifnet *ifp = na->ifp;
162 struct netmap_ring *ring = kring->ring;
163 u_int nm_i; /* index into the netmap ring */
164 u_int nic_i; /* index into the NIC ring */
166 u_int const lim = kring->nkr_num_slots - 1;
167 u_int const head = kring->rhead;
169 * interrupts on every tx packet are expensive so request
170 * them every half ring, or where NS_REPORT is set
172 u_int report_frequency = kring->nkr_num_slots >> 1;
174 /* device-specific */
175 struct ixl_vsi *vsi = ifp->if_softc;
176 struct ixl_queue *que = &vsi->queues[kring->ring_id];
177 struct tx_ring *txr = &que->txr;
179 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
180 BUS_DMASYNC_POSTREAD);
183 * First part: process new packets to send.
184 * nm_i is the current index in the netmap ring,
185 * nic_i is the corresponding index in the NIC ring.
187 * If we have packets to send (nm_i != head)
188 * iterate over the netmap ring, fetch length and update
189 * the corresponding slot in the NIC ring. Some drivers also
190 * need to update the buffer's physical address in the NIC slot
191 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
193 * The netmap_reload_map() calls is especially expensive,
194 * even when (as in this case) the tag is 0, so do only
195 * when the buffer has actually changed.
197 * If possible do not set the report/intr bit on all slots,
198 * but only a few times per ring or when NS_REPORT is set.
200 * Finally, on 10G and faster drivers, it might be useful
201 * to prefetch the next slot and txr entry.
204 nm_i = kring->nr_hwcur;
205 if (nm_i != head) { /* we have new packets to send */
206 nic_i = netmap_idx_k2n(kring, nm_i);
208 __builtin_prefetch(&ring->slot[nm_i]);
209 __builtin_prefetch(&txr->buffers[nic_i]);
211 for (n = 0; nm_i != head; n++) {
212 struct netmap_slot *slot = &ring->slot[nm_i];
213 u_int len = slot->len;
215 void *addr = PNMB(na, slot, &paddr);
217 /* device-specific */
218 struct i40e_tx_desc *curr = &txr->base[nic_i];
219 struct ixl_tx_buf *txbuf = &txr->buffers[nic_i];
220 u64 flags = (slot->flags & NS_REPORT ||
221 nic_i == 0 || nic_i == report_frequency) ?
222 ((u64)I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT) : 0;
224 /* prefetch for next round */
225 __builtin_prefetch(&ring->slot[nm_i + 1]);
226 __builtin_prefetch(&txr->buffers[nic_i + 1]);
228 NM_CHECK_ADDR_LEN(na, addr, len);
230 if (slot->flags & NS_BUF_CHANGED) {
231 /* buffer has changed, reload map */
232 netmap_reload_map(na, txr->dma.tag, txbuf->map, addr);
234 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
236 /* Fill the slot in the NIC ring. */
237 curr->buffer_addr = htole64(paddr);
238 curr->cmd_type_offset_bsz = htole64(
239 ((u64)len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
241 ((u64)I40E_TX_DESC_CMD_EOP << I40E_TXD_QW1_CMD_SHIFT)
244 /* make sure changes to the buffer are synced */
245 bus_dmamap_sync(txr->dma.tag, txbuf->map,
246 BUS_DMASYNC_PREWRITE);
248 nm_i = nm_next(nm_i, lim);
249 nic_i = nm_next(nic_i, lim);
251 kring->nr_hwcur = head;
253 /* synchronize the NIC ring */
254 bus_dmamap_sync(txr->dma.tag, txr->dma.map,
255 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
257 /* (re)start the tx unit up to slot nic_i (excluded) */
258 wr32(vsi->hw, txr->tail, nic_i);
262 * Second part: reclaim buffers for completed transmissions.
264 nic_i = LE32_TO_CPU(*(volatile __le32 *)&txr->base[que->num_desc]);
265 if (nic_i != txr->next_to_clean) {
266 /* some tx completed, increment avail */
267 txr->next_to_clean = nic_i;
268 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
271 nm_txsync_finalize(kring);
278 * Reconcile kernel and user view of the receive ring.
279 * Same as for the txsync, this routine must be efficient.
280 * The caller guarantees a single invocations, but races against
281 * the rest of the driver should be handled here.
283 * On call, kring->rhead is the first packet that userspace wants
284 * to keep, and kring->rcur is the wakeup point.
285 * The kernel has previously reported packets up to kring->rtail.
287 * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
288 * of whether or not we received an interrupt.
291 ixl_netmap_rxsync(struct netmap_kring *kring, int flags)
293 struct netmap_adapter *na = kring->na;
294 struct ifnet *ifp = na->ifp;
295 struct netmap_ring *ring = kring->ring;
296 u_int nm_i; /* index into the netmap ring */
297 u_int nic_i; /* index into the NIC ring */
299 u_int const lim = kring->nkr_num_slots - 1;
300 u_int const head = nm_rxsync_prologue(kring);
301 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
303 /* device-specific */
304 struct ixl_vsi *vsi = ifp->if_softc;
305 struct ixl_queue *que = &vsi->queues[kring->ring_id];
306 struct rx_ring *rxr = &que->rxr;
309 return netmap_ring_reinit(kring);
311 /* XXX check sync modes */
312 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
313 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
316 * First part: import newly received packets.
318 * nm_i is the index of the next free slot in the netmap ring,
319 * nic_i is the index of the next received packet in the NIC ring,
320 * and they may differ in case if_init() has been called while
321 * in netmap mode. For the receive ring we have
323 * nic_i = rxr->next_check;
324 * nm_i = kring->nr_hwtail (previous)
326 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
328 * rxr->next_check is set to 0 on a ring reinit
330 if (netmap_no_pendintr || force_update) {
331 int crclen = ixl_crcstrip ? 0 : 4;
332 uint16_t slot_flags = kring->nkr_slot_flags;
334 nic_i = rxr->next_check; // or also k2n(kring->nr_hwtail)
335 nm_i = netmap_idx_n2k(kring, nic_i);
338 union i40e_32byte_rx_desc *curr = &rxr->base[nic_i];
339 uint64_t qword = le64toh(curr->wb.qword1.status_error_len);
340 uint32_t staterr = (qword & I40E_RXD_QW1_STATUS_MASK)
341 >> I40E_RXD_QW1_STATUS_SHIFT;
343 if ((staterr & (1<<I40E_RX_DESC_STATUS_DD_SHIFT)) == 0)
345 ring->slot[nm_i].len = ((qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
346 >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - crclen;
347 ring->slot[nm_i].flags = slot_flags;
348 bus_dmamap_sync(rxr->ptag,
349 rxr->buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
350 nm_i = nm_next(nm_i, lim);
351 nic_i = nm_next(nic_i, lim);
353 if (n) { /* update the state variables */
354 if (netmap_no_pendintr && !force_update) {
357 ixl_rx_miss_bufs += n;
359 rxr->next_check = nic_i;
360 kring->nr_hwtail = nm_i;
362 kring->nr_kflags &= ~NKR_PENDINTR;
366 * Second part: skip past packets that userspace has released.
367 * (kring->nr_hwcur to head excluded),
368 * and make the buffers available for reception.
369 * As usual nm_i is the index in the netmap ring,
370 * nic_i is the index in the NIC ring, and
371 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
373 nm_i = kring->nr_hwcur;
375 nic_i = netmap_idx_k2n(kring, nm_i);
376 for (n = 0; nm_i != head; n++) {
377 struct netmap_slot *slot = &ring->slot[nm_i];
379 void *addr = PNMB(na, slot, &paddr);
381 union i40e_32byte_rx_desc *curr = &rxr->base[nic_i];
382 struct ixl_rx_buf *rxbuf = &rxr->buffers[nic_i];
384 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
387 if (slot->flags & NS_BUF_CHANGED) {
388 /* buffer has changed, reload map */
389 netmap_reload_map(na, rxr->ptag, rxbuf->pmap, addr);
390 slot->flags &= ~NS_BUF_CHANGED;
392 curr->read.pkt_addr = htole64(paddr);
393 curr->read.hdr_addr = 0; // XXX needed
394 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
395 BUS_DMASYNC_PREREAD);
396 nm_i = nm_next(nm_i, lim);
397 nic_i = nm_next(nic_i, lim);
399 kring->nr_hwcur = head;
401 bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
402 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
404 * IMPORTANT: we must leave one free slot in the ring,
405 * so move nic_i back by one unit
407 nic_i = nm_prev(nic_i, lim);
408 wr32(vsi->hw, rxr->tail, nic_i);
411 /* tell userspace that there might be new packets */
412 nm_rxsync_finalize(kring);
417 return netmap_ring_reinit(kring);
420 #endif /* !NETMAP_IXL_MAIN */