2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * netmap support for: ixgbe (both ix and ixv)
33 * This file is meant to be a reference on how to implement
34 * netmap support for a network driver.
35 * This file contains code but only static or inline functions used
36 * by a single driver. To avoid replication of code we just #include
37 * it near the beginning of the standard driver.
41 #include <net/netmap.h>
42 #include <sys/selinfo.h>
44 * Some drivers may need the following headers. Others
45 * already include them by default
51 #include <dev/netmap/netmap_kern.h>
53 void ixgbe_netmap_attach(struct adapter *adapter);
56 * device-specific sysctl variables:
58 * ix_crcstrip: 0: NIC keeps CRC in rx frames (default), 1: NIC strips it.
59 * During regular operations the CRC is stripped, but on some
60 * hardware reception of frames not multiple of 64 is slower,
61 * so using crcstrip=0 helps in benchmarks.
63 * ix_rx_miss, ix_rx_miss_bufs:
64 * count packets that might be missed due to lost interrupts.
66 SYSCTL_DECL(_dev_netmap);
67 static int ix_rx_miss, ix_rx_miss_bufs;
69 SYSCTL_INT(_dev_netmap, OID_AUTO, ix_crcstrip,
70 CTLFLAG_RW, &ix_crcstrip, 0, "NIC strips CRC on rx frames");
71 SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss,
72 CTLFLAG_RW, &ix_rx_miss, 0, "potentially missed rx intr");
73 SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss_bufs,
74 CTLFLAG_RW, &ix_rx_miss_bufs, 0, "potentially missed rx intr bufs");
78 set_crcstrip(struct ixgbe_hw *hw, int onoff)
80 /* crc stripping is set in two places:
81 * IXGBE_HLREG0 (modified on init_locked and hw reset)
82 * IXGBE_RDRXCTL (set by the original driver in
83 * ixgbe_setup_hw_rsc() called in init_locked.
84 * We disable the setting when netmap is compiled in).
85 * We update the values here, but also in ixgbe.c because
86 * init_locked sometimes is called outside our control.
90 hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
91 rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
93 D("%s read HLREG 0x%x rxc 0x%x",
94 onoff ? "enter" : "exit", hl, rxc);
95 /* hw requirements ... */
96 rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
97 rxc |= IXGBE_RDRXCTL_RSCACKC;
98 if (onoff && !ix_crcstrip) {
99 /* keep the crc. Fast rx */
100 hl &= ~IXGBE_HLREG0_RXCRCSTRP;
101 rxc &= ~IXGBE_RDRXCTL_CRCSTRIP;
103 /* reset default mode */
104 hl |= IXGBE_HLREG0_RXCRCSTRP;
105 rxc |= IXGBE_RDRXCTL_CRCSTRIP;
108 D("%s write HLREG 0x%x rxc 0x%x",
109 onoff ? "enter" : "exit", hl, rxc);
110 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
111 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
115 ixgbe_netmap_intr(struct netmap_adapter *na, int onoff)
117 struct ifnet *ifp = na->ifp;
118 struct adapter *adapter = ifp->if_softc;
120 IXGBE_CORE_LOCK(adapter);
122 ixgbe_enable_intr(adapter); // XXX maybe ixgbe_stop ?
124 ixgbe_disable_intr(adapter); // XXX maybe ixgbe_stop ?
126 IXGBE_CORE_UNLOCK(adapter);
130 * Register/unregister. We are already under netmap lock.
131 * Only called on the first register or the last unregister.
134 ixgbe_netmap_reg(struct netmap_adapter *na, int onoff)
136 struct ifnet *ifp = na->ifp;
137 struct adapter *adapter = ifp->if_softc;
139 IXGBE_CORE_LOCK(adapter);
140 adapter->stop_locked(adapter);
142 if (!IXGBE_IS_VF(adapter))
143 set_crcstrip(&adapter->hw, onoff);
144 /* enable or disable flags and callbacks in na and ifp */
146 nm_set_native_flags(na);
148 nm_clear_native_flags(na);
150 adapter->init_locked(adapter); /* also enables intr */
151 if (!IXGBE_IS_VF(adapter))
152 set_crcstrip(&adapter->hw, onoff); // XXX why twice ?
153 IXGBE_CORE_UNLOCK(adapter);
154 return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
159 * Reconcile kernel and user view of the transmit ring.
161 * All information is in the kring.
162 * Userspace wants to send packets up to the one before kring->rhead,
163 * kernel knows kring->nr_hwcur is the first unsent packet.
165 * Here we push packets out (as many as possible), and possibly
166 * reclaim buffers from previously completed transmission.
168 * The caller (netmap) guarantees that there is only one instance
169 * running at any time. Any interference with other driver
170 * methods should be handled by the individual drivers.
173 ixgbe_netmap_txsync(struct netmap_kring *kring, int flags)
175 struct netmap_adapter *na = kring->na;
176 struct ifnet *ifp = na->ifp;
177 struct netmap_ring *ring = kring->ring;
178 u_int nm_i; /* index into the netmap ring */
179 u_int nic_i; /* index into the NIC ring */
181 u_int const lim = kring->nkr_num_slots - 1;
182 u_int const head = kring->rhead;
184 * interrupts on every tx packet are expensive so request
185 * them every half ring, or where NS_REPORT is set
187 u_int report_frequency = kring->nkr_num_slots >> 1;
189 /* device-specific */
190 struct adapter *adapter = ifp->if_softc;
191 struct tx_ring *txr = &adapter->tx_rings[kring->ring_id];
194 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
195 BUS_DMASYNC_POSTREAD);
198 * First part: process new packets to send.
199 * nm_i is the current index in the netmap ring,
200 * nic_i is the corresponding index in the NIC ring.
201 * The two numbers differ because upon a *_init() we reset
202 * the NIC ring but leave the netmap ring unchanged.
203 * For the transmit ring, we have
205 * nm_i = kring->nr_hwcur
206 * nic_i = IXGBE_TDT (not tracked in the driver)
208 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
210 * In this driver kring->nkr_hwofs >= 0, but for other
211 * drivers it might be negative as well.
215 * If we have packets to send (kring->nr_hwcur != kring->rhead)
216 * iterate over the netmap ring, fetch length and update
217 * the corresponding slot in the NIC ring. Some drivers also
218 * need to update the buffer's physical address in the NIC slot
219 * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
221 * The netmap_reload_map() calls is especially expensive,
222 * even when (as in this case) the tag is 0, so do only
223 * when the buffer has actually changed.
225 * If possible do not set the report/intr bit on all slots,
226 * but only a few times per ring or when NS_REPORT is set.
228 * Finally, on 10G and faster drivers, it might be useful
229 * to prefetch the next slot and txr entry.
232 nm_i = kring->nr_hwcur;
233 if (nm_i != head) { /* we have new packets to send */
234 nic_i = netmap_idx_k2n(kring, nm_i);
236 __builtin_prefetch(&ring->slot[nm_i]);
237 __builtin_prefetch(&txr->tx_buffers[nic_i]);
239 for (n = 0; nm_i != head; n++) {
240 struct netmap_slot *slot = &ring->slot[nm_i];
241 u_int len = slot->len;
243 void *addr = PNMB(na, slot, &paddr);
245 /* device-specific */
246 union ixgbe_adv_tx_desc *curr = &txr->tx_base[nic_i];
247 struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[nic_i];
248 int flags = (slot->flags & NS_REPORT ||
249 nic_i == 0 || nic_i == report_frequency) ?
250 IXGBE_TXD_CMD_RS : 0;
252 /* prefetch for next round */
253 __builtin_prefetch(&ring->slot[nm_i + 1]);
254 __builtin_prefetch(&txr->tx_buffers[nic_i + 1]);
256 NM_CHECK_ADDR_LEN(na, addr, len);
258 if (slot->flags & NS_BUF_CHANGED) {
259 /* buffer has changed, reload map */
260 netmap_reload_map(na, txr->txtag, txbuf->map, addr);
262 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
264 /* Fill the slot in the NIC ring. */
265 /* Use legacy descriptor, they are faster? */
266 curr->read.buffer_addr = htole64(paddr);
267 curr->read.olinfo_status = 0;
268 curr->read.cmd_type_len = htole32(len | flags |
269 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_TXD_CMD_EOP);
271 /* make sure changes to the buffer are synced */
272 bus_dmamap_sync(txr->txtag, txbuf->map,
273 BUS_DMASYNC_PREWRITE);
275 nm_i = nm_next(nm_i, lim);
276 nic_i = nm_next(nic_i, lim);
278 kring->nr_hwcur = head;
280 /* synchronize the NIC ring */
281 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
282 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
284 /* (re)start the tx unit up to slot nic_i (excluded) */
285 IXGBE_WRITE_REG(&adapter->hw, txr->tail, nic_i);
289 * Second part: reclaim buffers for completed transmissions.
290 * Because this is expensive (we read a NIC register etc.)
291 * we only do it in specific cases (see below).
293 if (flags & NAF_FORCE_RECLAIM) {
294 reclaim_tx = 1; /* forced reclaim */
295 } else if (!nm_kr_txempty(kring)) {
296 reclaim_tx = 0; /* have buffers, no reclaim */
299 * No buffers available. Locate previous slot with
301 * If the slot has DD set, we can reclaim space,
302 * otherwise wait for the next interrupt.
303 * This enables interrupt moderation on the tx
304 * side though it might reduce throughput.
306 struct ixgbe_legacy_tx_desc *txd =
307 (struct ixgbe_legacy_tx_desc *)txr->tx_base;
309 nic_i = txr->next_to_clean + report_frequency;
312 // round to the closest with dd set
313 nic_i = (nic_i < kring->nkr_num_slots / 4 ||
314 nic_i >= kring->nkr_num_slots*3/4) ?
315 0 : report_frequency;
316 reclaim_tx = txd[nic_i].upper.fields.status & IXGBE_TXD_STAT_DD; // XXX cpu_to_le32 ?
320 * Record completed transmissions.
321 * We (re)use the driver's txr->next_to_clean to keep
322 * track of the most recently completed transmission.
324 * The datasheet discourages the use of TDH to find
325 * out the number of sent packets, but we only set
326 * REPORT_STATUS in a few slots so TDH is the only
329 nic_i = IXGBE_READ_REG(&adapter->hw, IXGBE_IS_VF(adapter) ?
330 IXGBE_VFTDH(kring->ring_id) : IXGBE_TDH(kring->ring_id));
331 if (nic_i >= kring->nkr_num_slots) { /* XXX can it happen ? */
332 D("TDH wrap %d", nic_i);
333 nic_i -= kring->nkr_num_slots;
335 if (nic_i != txr->next_to_clean) {
336 /* some tx completed, increment avail */
337 txr->next_to_clean = nic_i;
338 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
347 * Reconcile kernel and user view of the receive ring.
348 * Same as for the txsync, this routine must be efficient.
349 * The caller guarantees a single invocations, but races against
350 * the rest of the driver should be handled here.
352 * On call, kring->rhead is the first packet that userspace wants
353 * to keep, and kring->rcur is the wakeup point.
354 * The kernel has previously reported packets up to kring->rtail.
356 * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
357 * of whether or not we received an interrupt.
360 ixgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
362 struct netmap_adapter *na = kring->na;
363 struct ifnet *ifp = na->ifp;
364 struct netmap_ring *ring = kring->ring;
365 u_int nm_i; /* index into the netmap ring */
366 u_int nic_i; /* index into the NIC ring */
368 u_int const lim = kring->nkr_num_slots - 1;
369 u_int const head = kring->rhead;
370 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
372 /* device-specific */
373 struct adapter *adapter = ifp->if_softc;
374 struct rx_ring *rxr = &adapter->rx_rings[kring->ring_id];
377 return netmap_ring_reinit(kring);
379 /* XXX check sync modes */
380 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
381 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
384 * First part: import newly received packets.
386 * nm_i is the index of the next free slot in the netmap ring,
387 * nic_i is the index of the next received packet in the NIC ring,
388 * and they may differ in case if_init() has been called while
389 * in netmap mode. For the receive ring we have
391 * nic_i = rxr->next_to_check;
392 * nm_i = kring->nr_hwtail (previous)
394 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
396 * rxr->next_to_check is set to 0 on a ring reinit
398 if (netmap_no_pendintr || force_update) {
399 int crclen = (ix_crcstrip || IXGBE_IS_VF(adapter) ) ? 0 : 4;
400 uint16_t slot_flags = kring->nkr_slot_flags;
402 nic_i = rxr->next_to_check; // or also k2n(kring->nr_hwtail)
403 nm_i = netmap_idx_n2k(kring, nic_i);
406 union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i];
407 uint32_t staterr = le32toh(curr->wb.upper.status_error);
409 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
411 ring->slot[nm_i].len = le16toh(curr->wb.upper.length) - crclen;
412 ring->slot[nm_i].flags = slot_flags;
413 bus_dmamap_sync(rxr->ptag,
414 rxr->rx_buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
415 nm_i = nm_next(nm_i, lim);
416 nic_i = nm_next(nic_i, lim);
418 if (n) { /* update the state variables */
419 if (netmap_no_pendintr && !force_update) {
422 ix_rx_miss_bufs += n;
424 rxr->next_to_check = nic_i;
425 kring->nr_hwtail = nm_i;
427 kring->nr_kflags &= ~NKR_PENDINTR;
431 * Second part: skip past packets that userspace has released.
432 * (kring->nr_hwcur to kring->rhead excluded),
433 * and make the buffers available for reception.
434 * As usual nm_i is the index in the netmap ring,
435 * nic_i is the index in the NIC ring, and
436 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
438 nm_i = kring->nr_hwcur;
440 nic_i = netmap_idx_k2n(kring, nm_i);
441 for (n = 0; nm_i != head; n++) {
442 struct netmap_slot *slot = &ring->slot[nm_i];
444 void *addr = PNMB(na, slot, &paddr);
446 union ixgbe_adv_rx_desc *curr = &rxr->rx_base[nic_i];
447 struct ixgbe_rx_buf *rxbuf = &rxr->rx_buffers[nic_i];
449 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
452 if (slot->flags & NS_BUF_CHANGED) {
453 /* buffer has changed, reload map */
454 netmap_reload_map(na, rxr->ptag, rxbuf->pmap, addr);
455 slot->flags &= ~NS_BUF_CHANGED;
457 curr->wb.upper.status_error = 0;
458 curr->read.pkt_addr = htole64(paddr);
459 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
460 BUS_DMASYNC_PREREAD);
461 nm_i = nm_next(nm_i, lim);
462 nic_i = nm_next(nic_i, lim);
464 kring->nr_hwcur = head;
466 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
467 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
469 * IMPORTANT: we must leave one free slot in the ring,
470 * so move nic_i back by one unit
472 nic_i = nm_prev(nic_i, lim);
473 IXGBE_WRITE_REG(&adapter->hw, rxr->tail, nic_i);
479 return netmap_ring_reinit(kring);
484 * The attach routine, called near the end of ixgbe_attach(),
485 * fills the parameters for netmap_attach() and calls it.
486 * It cannot fail, in the worst case (such as no memory)
487 * netmap mode will be disabled and the driver will only
488 * operate in standard mode.
491 ixgbe_netmap_attach(struct adapter *adapter)
493 struct netmap_adapter na;
495 bzero(&na, sizeof(na));
497 na.ifp = adapter->ifp;
498 na.na_flags = NAF_BDG_MAYSLEEP;
499 na.num_tx_desc = adapter->num_tx_desc;
500 na.num_rx_desc = adapter->num_rx_desc;
501 na.nm_txsync = ixgbe_netmap_txsync;
502 na.nm_rxsync = ixgbe_netmap_rxsync;
503 na.nm_register = ixgbe_netmap_reg;
504 na.num_tx_rings = na.num_rx_rings = adapter->num_queues;
505 na.nm_intr = ixgbe_netmap_intr;