2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * netmap support for: em.
31 * For more details on netmap support please see ixgbe_netmap.h
35 #include <net/netmap.h>
36 #include <sys/selinfo.h>
38 #include <vm/pmap.h> /* vtophys ? */
39 #include <dev/netmap/netmap_kern.h>
42 // XXX do we need to block/unblock the tasks ?
44 em_netmap_block_tasks(struct adapter *adapter)
46 if (adapter->msix > 1) { /* MSIX */
48 struct tx_ring *txr = adapter->tx_rings;
49 struct rx_ring *rxr = adapter->rx_rings;
51 for (i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
52 taskqueue_block(txr->tq);
53 taskqueue_drain(txr->tq, &txr->tx_task);
54 taskqueue_block(rxr->tq);
55 taskqueue_drain(rxr->tq, &rxr->rx_task);
58 taskqueue_block(adapter->tq);
59 taskqueue_drain(adapter->tq, &adapter->link_task);
60 taskqueue_drain(adapter->tq, &adapter->que_task);
66 em_netmap_unblock_tasks(struct adapter *adapter)
68 if (adapter->msix > 1) {
69 struct tx_ring *txr = adapter->tx_rings;
70 struct rx_ring *rxr = adapter->rx_rings;
73 for (i = 0; i < adapter->num_queues; i++, txr++, rxr++) {
74 taskqueue_unblock(txr->tq);
75 taskqueue_unblock(rxr->tq);
78 taskqueue_unblock(adapter->tq);
84 * Register/unregister. We are already under netmap lock.
87 em_netmap_reg(struct netmap_adapter *na, int onoff)
89 struct ifnet *ifp = na->ifp;
90 struct adapter *adapter = ifp->if_softc;
92 EM_CORE_LOCK(adapter);
93 em_disable_intr(adapter);
95 /* Tell the stack that the interface is no longer active */
96 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
98 em_netmap_block_tasks(adapter);
99 /* enable or disable flags and callbacks in na and ifp */
101 nm_set_native_flags(na);
103 nm_clear_native_flags(na);
105 em_init_locked(adapter); /* also enable intr */
106 em_netmap_unblock_tasks(adapter);
107 EM_CORE_UNLOCK(adapter);
108 return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
113 * Reconcile kernel and user view of the transmit ring.
116 em_netmap_txsync(struct netmap_kring *kring, int flags)
118 struct netmap_adapter *na = kring->na;
119 struct ifnet *ifp = na->ifp;
120 struct netmap_ring *ring = kring->ring;
121 u_int nm_i; /* index into the netmap ring */
122 u_int nic_i; /* index into the NIC ring */
124 u_int const lim = kring->nkr_num_slots - 1;
125 u_int const head = kring->rhead;
126 /* generate an interrupt approximately every half ring */
127 u_int report_frequency = kring->nkr_num_slots >> 1;
129 /* device-specific */
130 struct adapter *adapter = ifp->if_softc;
131 struct tx_ring *txr = &adapter->tx_rings[kring->ring_id];
133 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
134 BUS_DMASYNC_POSTREAD);
137 * First part: process new packets to send.
140 nm_i = kring->nr_hwcur;
141 if (nm_i != head) { /* we have new packets to send */
142 nic_i = netmap_idx_k2n(kring, nm_i);
143 for (n = 0; nm_i != head; n++) {
144 struct netmap_slot *slot = &ring->slot[nm_i];
145 u_int len = slot->len;
147 void *addr = PNMB(na, slot, &paddr);
149 /* device-specific */
150 struct e1000_tx_desc *curr = &txr->tx_base[nic_i];
151 struct em_txbuffer *txbuf = &txr->tx_buffers[nic_i];
152 int flags = (slot->flags & NS_REPORT ||
153 nic_i == 0 || nic_i == report_frequency) ?
154 E1000_TXD_CMD_RS : 0;
156 NM_CHECK_ADDR_LEN(na, addr, len);
158 if (slot->flags & NS_BUF_CHANGED) {
159 curr->buffer_addr = htole64(paddr);
160 /* buffer has changed, reload map */
161 netmap_reload_map(na, txr->txtag, txbuf->map, addr);
163 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
165 /* Fill the slot in the NIC ring. */
166 curr->upper.data = 0;
167 curr->lower.data = htole32(adapter->txd_cmd | len |
168 (E1000_TXD_CMD_EOP | flags) );
169 bus_dmamap_sync(txr->txtag, txbuf->map,
170 BUS_DMASYNC_PREWRITE);
172 nm_i = nm_next(nm_i, lim);
173 nic_i = nm_next(nic_i, lim);
175 kring->nr_hwcur = head;
177 /* synchronize the NIC ring */
178 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
179 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
181 /* (re)start the tx unit up to slot nic_i (excluded) */
182 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), nic_i);
186 * Second part: reclaim buffers for completed transmissions.
188 if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
189 /* record completed transmissions using TDH */
190 nic_i = E1000_READ_REG(&adapter->hw, E1000_TDH(kring->ring_id));
191 if (nic_i >= kring->nkr_num_slots) { /* XXX can it happen ? */
192 D("TDH wrap %d", nic_i);
193 nic_i -= kring->nkr_num_slots;
195 if (nic_i != txr->next_to_clean) {
196 txr->next_to_clean = nic_i;
197 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
201 nm_txsync_finalize(kring);
208 * Reconcile kernel and user view of the receive ring.
211 em_netmap_rxsync(struct netmap_kring *kring, int flags)
213 struct netmap_adapter *na = kring->na;
214 struct ifnet *ifp = na->ifp;
215 struct netmap_ring *ring = kring->ring;
216 u_int nm_i; /* index into the netmap ring */
217 u_int nic_i; /* index into the NIC ring */
219 u_int const lim = kring->nkr_num_slots - 1;
220 u_int const head = nm_rxsync_prologue(kring);
221 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
223 /* device-specific */
224 struct adapter *adapter = ifp->if_softc;
225 struct rx_ring *rxr = &adapter->rx_rings[kring->ring_id];
228 return netmap_ring_reinit(kring);
230 /* XXX check sync modes */
231 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
232 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
235 * First part: import newly received packets.
237 if (netmap_no_pendintr || force_update) {
238 uint16_t slot_flags = kring->nkr_slot_flags;
240 nic_i = rxr->next_to_check;
241 nm_i = netmap_idx_n2k(kring, nic_i);
243 for (n = 0; ; n++) { // XXX no need to count
244 union e1000_rx_desc_extended *curr = &rxr->rx_base[nic_i];
245 uint32_t staterr = le32toh(curr->wb.upper.status_error);
247 if ((staterr & E1000_RXD_STAT_DD) == 0)
249 ring->slot[nm_i].len = le16toh(curr->wb.upper.length);
250 ring->slot[nm_i].flags = slot_flags;
251 bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[nic_i].map,
252 BUS_DMASYNC_POSTREAD);
253 nm_i = nm_next(nm_i, lim);
254 /* make sure next_to_refresh follows next_to_check */
255 rxr->next_to_refresh = nic_i; // XXX
256 nic_i = nm_next(nic_i, lim);
258 if (n) { /* update the state variables */
259 rxr->next_to_check = nic_i;
260 kring->nr_hwtail = nm_i;
262 kring->nr_kflags &= ~NKR_PENDINTR;
266 * Second part: skip past packets that userspace has released.
268 nm_i = kring->nr_hwcur;
270 nic_i = netmap_idx_k2n(kring, nm_i);
271 for (n = 0; nm_i != head; n++) {
272 struct netmap_slot *slot = &ring->slot[nm_i];
274 void *addr = PNMB(na, slot, &paddr);
276 union e1000_rx_desc_extended *curr = &rxr->rx_base[nic_i];
277 struct em_rxbuffer *rxbuf = &rxr->rx_buffers[nic_i];
279 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
282 if (slot->flags & NS_BUF_CHANGED) {
283 /* buffer has changed, reload map */
284 curr->read.buffer_addr = htole64(paddr);
285 netmap_reload_map(na, rxr->rxtag, rxbuf->map, addr);
286 slot->flags &= ~NS_BUF_CHANGED;
288 curr->wb.upper.status_error = 0;
289 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
290 BUS_DMASYNC_PREREAD);
291 nm_i = nm_next(nm_i, lim);
292 nic_i = nm_next(nic_i, lim);
294 kring->nr_hwcur = head;
296 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
297 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
299 * IMPORTANT: we must leave one free slot in the ring,
300 * so move nic_i back by one unit
302 nic_i = nm_prev(nic_i, lim);
303 E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), nic_i);
306 /* tell userspace that there might be new packets */
307 nm_rxsync_finalize(kring);
312 return netmap_ring_reinit(kring);
317 em_netmap_attach(struct adapter *adapter)
319 struct netmap_adapter na;
321 bzero(&na, sizeof(na));
323 na.ifp = adapter->ifp;
324 na.na_flags = NAF_BDG_MAYSLEEP;
325 na.num_tx_desc = adapter->num_tx_desc;
326 na.num_rx_desc = adapter->num_rx_desc;
327 na.nm_txsync = em_netmap_txsync;
328 na.nm_rxsync = em_netmap_rxsync;
329 na.nm_register = em_netmap_reg;
330 na.num_tx_rings = na.num_rx_rings = adapter->num_queues;