]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/netmap/if_igb_netmap.h
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / netmap / if_igb_netmap.h
1 /*
2  * Copyright (C) 2011 Universita` di Pisa. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25
26 /*
27  * $FreeBSD$
28  *
29  * Netmap support for igb, partly contributed by Ahmed Kooli
30  * For details on netmap support please see ixgbe_netmap.h
31  */
32
33
34 #include <net/netmap.h>
35 #include <sys/selinfo.h>
36 #include <vm/vm.h>
37 #include <vm/pmap.h>    /* vtophys ? */
38 #include <dev/netmap/netmap_kern.h>
39
40
41 /*
42  * wrapper to export locks to the generic code
43  */
44 static void
45 igb_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
46 {
47         struct adapter *adapter = ifp->if_softc;
48
49         ASSERT(queueid < adapter->num_queues);
50         switch (what) {
51         case NETMAP_CORE_LOCK:
52                 IGB_CORE_LOCK(adapter);
53                 break;
54         case NETMAP_CORE_UNLOCK:
55                 IGB_CORE_UNLOCK(adapter);
56                 break;
57         case NETMAP_TX_LOCK:
58                 IGB_TX_LOCK(&adapter->tx_rings[queueid]);
59                 break;
60         case NETMAP_TX_UNLOCK:
61                 IGB_TX_UNLOCK(&adapter->tx_rings[queueid]);
62                 break;
63         case NETMAP_RX_LOCK:
64                 IGB_RX_LOCK(&adapter->rx_rings[queueid]);
65                 break;
66         case NETMAP_RX_UNLOCK:
67                 IGB_RX_UNLOCK(&adapter->rx_rings[queueid]);
68                 break;
69         }
70 }
71
72
73 /*
74  * register-unregister routine
75  */
76 static int
77 igb_netmap_reg(struct ifnet *ifp, int onoff)
78 {
79         struct adapter *adapter = ifp->if_softc;
80         struct netmap_adapter *na = NA(ifp);
81         int error = 0;
82
83         if (na == NULL)
84                 return EINVAL;  /* no netmap support here */
85
86         igb_disable_intr(adapter);
87
88         /* Tell the stack that the interface is no longer active */
89         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
90
91         if (onoff) {
92                 ifp->if_capenable |= IFCAP_NETMAP;
93
94                 na->if_transmit = ifp->if_transmit;
95                 ifp->if_transmit = netmap_start;
96
97                 igb_init_locked(adapter);
98                 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
99                         error = ENOMEM;
100                         goto fail;
101                 }
102         } else {
103 fail:
104                 /* restore if_transmit */
105                 ifp->if_transmit = na->if_transmit;
106                 ifp->if_capenable &= ~IFCAP_NETMAP;
107                 igb_init_locked(adapter);       /* also enable intr */
108         }
109         return (error);
110 }
111
112
113 /*
114  * Reconcile kernel and user view of the transmit ring.
115  */
116 static int
117 igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
118 {
119         struct adapter *adapter = ifp->if_softc;
120         struct tx_ring *txr = &adapter->tx_rings[ring_nr];
121         struct netmap_adapter *na = NA(ifp);
122         struct netmap_kring *kring = &na->tx_rings[ring_nr];
123         struct netmap_ring *ring = kring->ring;
124         u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
125
126         /* generate an interrupt approximately every half ring */
127         u_int report_frequency = kring->nkr_num_slots >> 1;
128
129         k = ring->cur;
130         if (k > lim)
131                 return netmap_ring_reinit(kring);
132
133         if (do_lock)
134                 IGB_TX_LOCK(txr);
135         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
136             BUS_DMASYNC_POSTREAD);
137
138         /* check for new packets to send.
139          * j indexes the netmap ring, l indexes the nic ring, and
140          *      j = kring->nr_hwcur, l = E1000_TDT (not tracked),
141          *      j == (l + kring->nkr_hwofs) % ring_size
142          */
143         j = kring->nr_hwcur;
144         if (j != k) {   /* we have new packets to send */
145                 /* 82575 needs the queue index added */
146                 u32 olinfo_status =
147                     (adapter->hw.mac.type == e1000_82575) ? (txr->me << 4) : 0;
148
149                 l = netmap_idx_k2n(kring, j);
150                 for (n = 0; j != k; n++) {
151                         /* slot is the current slot in the netmap ring */
152                         struct netmap_slot *slot = &ring->slot[j];
153                         /* curr is the current slot in the nic ring */
154                         union e1000_adv_tx_desc *curr =
155                             (union e1000_adv_tx_desc *)&txr->tx_base[l];
156                         struct igb_tx_buf *txbuf = &txr->tx_buffers[l];
157                         int flags = ((slot->flags & NS_REPORT) ||
158                                 j == 0 || j == report_frequency) ?
159                                         E1000_ADVTXD_DCMD_RS : 0;
160                         uint64_t paddr;
161                         void *addr = PNMB(slot, &paddr);
162                         u_int len = slot->len;
163
164                         if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
165                                 if (do_lock)
166                                         IGB_TX_UNLOCK(txr);
167                                 return netmap_ring_reinit(kring);
168                         }
169
170                         slot->flags &= ~NS_REPORT;
171                         if (slot->flags & NS_BUF_CHANGED) {
172                                 /* buffer has changed, reload map */
173                                 netmap_reload_map(txr->txtag, txbuf->map, addr);
174                                 slot->flags &= ~NS_BUF_CHANGED;
175                         }
176                         curr->read.buffer_addr = htole64(paddr);
177                         // XXX check olinfo and cmd_type_len
178                         curr->read.olinfo_status =
179                             htole32(olinfo_status |
180                                 (len<< E1000_ADVTXD_PAYLEN_SHIFT));
181                         curr->read.cmd_type_len =
182                             htole32(len | E1000_ADVTXD_DTYP_DATA |
183                                     E1000_ADVTXD_DCMD_IFCS |
184                                     E1000_ADVTXD_DCMD_DEXT |
185                                     E1000_ADVTXD_DCMD_EOP | flags);
186
187                         bus_dmamap_sync(txr->txtag, txbuf->map,
188                                 BUS_DMASYNC_PREWRITE);
189                         j = (j == lim) ? 0 : j + 1;
190                         l = (l == lim) ? 0 : l + 1;
191                 }
192                 kring->nr_hwcur = k; /* the saved ring->cur */
193                 kring->nr_hwavail -= n;
194
195                 /* Set the watchdog XXX ? */
196                 txr->queue_status = IGB_QUEUE_WORKING;
197                 txr->watchdog_time = ticks;
198
199                 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
200                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
201
202                 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), l);
203         }
204
205         if (n == 0 || kring->nr_hwavail < 1) {
206                 int delta;
207
208                 /* record completed transmissions using TDH */
209                 l = E1000_READ_REG(&adapter->hw, E1000_TDH(ring_nr));
210                 if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */
211                         D("TDH wrap %d", l);
212                         l -= kring->nkr_num_slots;
213                 }
214                 delta = l - txr->next_to_clean;
215                 if (delta) {
216                         /* some completed, increment hwavail. */
217                         if (delta < 0)
218                                 delta += kring->nkr_num_slots;
219                         txr->next_to_clean = l;
220                         kring->nr_hwavail += delta;
221                 }
222         }
223         /* update avail to what the kernel knows */
224         ring->avail = kring->nr_hwavail;
225
226         if (do_lock)
227                 IGB_TX_UNLOCK(txr);
228         return 0;
229 }
230
231
232 /*
233  * Reconcile kernel and user view of the receive ring.
234  */
235 static int
236 igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
237 {
238         struct adapter *adapter = ifp->if_softc;
239         struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
240         struct netmap_adapter *na = NA(ifp);
241         struct netmap_kring *kring = &na->rx_rings[ring_nr];
242         struct netmap_ring *ring = kring->ring;
243         u_int j, l, n, lim = kring->nkr_num_slots - 1;
244         int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
245         u_int k = ring->cur, resvd = ring->reserved;
246
247         k = ring->cur;
248         if (k > lim)
249                 return netmap_ring_reinit(kring);
250
251         if (do_lock)
252                 IGB_RX_LOCK(rxr);
253
254         /* XXX check sync modes */
255         bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
256             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
257
258         /*
259          * import newly received packets into the netmap ring.
260          * j is an index in the netmap ring, l in the NIC ring.
261          */
262         l = rxr->next_to_check;
263         j = netmap_idx_n2k(kring, l);
264         if (netmap_no_pendintr || force_update) {
265                 uint16_t slot_flags = kring->nkr_slot_flags;
266
267                 for (n = 0; ; n++) {
268                         union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
269                         uint32_t staterr = le32toh(curr->wb.upper.status_error);
270
271                         if ((staterr & E1000_RXD_STAT_DD) == 0)
272                                 break;
273                         ring->slot[j].len = le16toh(curr->wb.upper.length);
274                         ring->slot[j].flags = slot_flags;
275                         bus_dmamap_sync(rxr->ptag,
276                                 rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD);
277                         j = (j == lim) ? 0 : j + 1;
278                         l = (l == lim) ? 0 : l + 1;
279                 }
280                 if (n) { /* update the state variables */
281                         rxr->next_to_check = l;
282                         kring->nr_hwavail += n;
283                 }
284                 kring->nr_kflags &= ~NKR_PENDINTR;
285         }
286
287         /* skip past packets that userspace has released */
288         j = kring->nr_hwcur;    /* netmap ring index */
289         if (resvd > 0) {
290                 if (resvd + ring->avail >= lim + 1) {
291                         D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
292                         ring->reserved = resvd = 0; // XXX panic...
293                 }
294                 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
295         }
296         if (j != k) { /* userspace has released some packets. */
297                 l = netmap_idx_k2n(kring, j);
298                 for (n = 0; j != k; n++) {
299                         struct netmap_slot *slot = ring->slot + j;
300                         union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
301                         struct igb_rx_buf *rxbuf = rxr->rx_buffers + l;
302                         uint64_t paddr;
303                         void *addr = PNMB(slot, &paddr);
304
305                         if (addr == netmap_buffer_base) { /* bad buf */
306                                 if (do_lock)
307                                         IGB_RX_UNLOCK(rxr);
308                                 return netmap_ring_reinit(kring);
309                         }
310
311                         if (slot->flags & NS_BUF_CHANGED) {
312                                 netmap_reload_map(rxr->ptag, rxbuf->pmap, addr);
313                                 slot->flags &= ~NS_BUF_CHANGED;
314                         }
315                         curr->read.pkt_addr = htole64(paddr);
316                         curr->wb.upper.status_error = 0;
317                         bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
318                                 BUS_DMASYNC_PREREAD);
319                         j = (j == lim) ? 0 : j + 1;
320                         l = (l == lim) ? 0 : l + 1;
321                 }
322                 kring->nr_hwavail -= n;
323                 kring->nr_hwcur = k;
324                 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
325                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
326                 /*
327                  * IMPORTANT: we must leave one free slot in the ring,
328                  * so move l back by one unit
329                  */
330                 l = (l == 0) ? lim : l - 1;
331                 E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), l);
332         }
333         /* tell userspace that there are new packets */
334         ring->avail = kring->nr_hwavail - resvd;
335         if (do_lock)
336                 IGB_RX_UNLOCK(rxr);
337         return 0;
338 }
339
340
341 static void
342 igb_netmap_attach(struct adapter *adapter)
343 {
344         struct netmap_adapter na;
345
346         bzero(&na, sizeof(na));
347
348         na.ifp = adapter->ifp;
349         na.separate_locks = 1;
350         na.num_tx_desc = adapter->num_tx_desc;
351         na.num_rx_desc = adapter->num_rx_desc;
352         na.nm_txsync = igb_netmap_txsync;
353         na.nm_rxsync = igb_netmap_rxsync;
354         na.nm_lock = igb_netmap_lock_wrapper;
355         na.nm_register = igb_netmap_reg;
356         netmap_attach(&na, adapter->num_queues);
357 }       
358 /* end of file */