]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/netmap/if_lem_netmap.h
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / netmap / if_lem_netmap.h
1 /*
2  * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25
26
27 /*
28  * $FreeBSD$
29  *
30  * netmap support for "lem"
31  *
32  * For details on netmap support please see ixgbe_netmap.h
33  */
34
35 #include <net/netmap.h>
36 #include <sys/selinfo.h>
37 #include <vm/vm.h>
38 #include <vm/pmap.h>    /* vtophys ? */
39 #include <dev/netmap/netmap_kern.h>
40
41
42 static void
43 lem_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int ringid)
44 {
45         struct adapter *adapter = ifp->if_softc;
46
47         /* only one ring here so ignore the ringid */
48         switch (what) {
49         case NETMAP_CORE_LOCK:
50                 EM_CORE_LOCK(adapter);
51                 break;
52         case NETMAP_CORE_UNLOCK:
53                 EM_CORE_UNLOCK(adapter);
54                 break;
55         case NETMAP_TX_LOCK:
56                 EM_TX_LOCK(adapter);
57                 break;
58         case NETMAP_TX_UNLOCK:
59                 EM_TX_UNLOCK(adapter);
60                 break;
61         case NETMAP_RX_LOCK:
62                 EM_RX_LOCK(adapter);
63                 break;
64         case NETMAP_RX_UNLOCK:
65                 EM_RX_UNLOCK(adapter);
66                 break;
67         }
68 }
69
70
71 /*
72  * Register/unregister
73  */
74 static int
75 lem_netmap_reg(struct ifnet *ifp, int onoff)
76 {
77         struct adapter *adapter = ifp->if_softc;
78         struct netmap_adapter *na = NA(ifp);
79         int error = 0;
80
81         if (na == NULL)
82                 return EINVAL;
83
84         lem_disable_intr(adapter);
85
86         /* Tell the stack that the interface is no longer active */
87         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
88
89 #ifndef EM_LEGACY_IRQ // XXX do we need this ?
90         taskqueue_block(adapter->tq);
91         taskqueue_drain(adapter->tq, &adapter->rxtx_task);
92         taskqueue_drain(adapter->tq, &adapter->link_task);
93 #endif /* !EM_LEGCY_IRQ */
94         if (onoff) {
95                 ifp->if_capenable |= IFCAP_NETMAP;
96
97                 na->if_transmit = ifp->if_transmit;
98                 ifp->if_transmit = netmap_start;
99
100                 lem_init_locked(adapter);
101                 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
102                         error = ENOMEM;
103                         goto fail;
104                 }
105         } else {
106 fail:
107                 /* return to non-netmap mode */
108                 ifp->if_transmit = na->if_transmit;
109                 ifp->if_capenable &= ~IFCAP_NETMAP;
110                 lem_init_locked(adapter);       /* also enable intr */
111         }
112
113 #ifndef EM_LEGACY_IRQ
114         taskqueue_unblock(adapter->tq); // XXX do we need this ?
115 #endif /* !EM_LEGCY_IRQ */
116
117         return (error);
118 }
119
120
121 /*
122  * Reconcile kernel and user view of the transmit ring.
123  */
124 static int
125 lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
126 {
127         struct adapter *adapter = ifp->if_softc;
128         struct netmap_adapter *na = NA(ifp);
129         struct netmap_kring *kring = &na->tx_rings[ring_nr];
130         struct netmap_ring *ring = kring->ring;
131         u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
132
133         /* generate an interrupt approximately every half ring */
134         int report_frequency = kring->nkr_num_slots >> 1;
135
136         /* take a copy of ring->cur now, and never read it again */
137         k = ring->cur;
138         if (k > lim)
139                 return netmap_ring_reinit(kring);
140
141         if (do_lock)
142                 EM_TX_LOCK(adapter);
143         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
144                         BUS_DMASYNC_POSTREAD);
145         /*
146          * Process new packets to send. j is the current index in the
147          * netmap ring, l is the corresponding index in the NIC ring.
148          */
149         j = kring->nr_hwcur;
150         if (j != k) {   /* we have new packets to send */
151                 l = netmap_idx_k2n(kring, j);
152                 for (n = 0; j != k; n++) {
153                         /* slot is the current slot in the netmap ring */
154                         struct netmap_slot *slot = &ring->slot[j];
155                         /* curr is the current slot in the nic ring */
156                         struct e1000_tx_desc *curr = &adapter->tx_desc_base[l];
157                         struct em_buffer *txbuf = &adapter->tx_buffer_area[l];
158                         int flags = ((slot->flags & NS_REPORT) ||
159                                 j == 0 || j == report_frequency) ?
160                                         E1000_TXD_CMD_RS : 0;
161                         uint64_t paddr;
162                         void *addr = PNMB(slot, &paddr);
163                         u_int len = slot->len;
164
165                         if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
166                                 if (do_lock)
167                                         EM_TX_UNLOCK(adapter);
168                                 return netmap_ring_reinit(kring);
169                         }
170
171                         slot->flags &= ~NS_REPORT;
172                         if (slot->flags & NS_BUF_CHANGED) {
173                                 /* buffer has changed, reload map */
174                                 netmap_reload_map(adapter->txtag, txbuf->map, addr);
175                                 curr->buffer_addr = htole64(paddr);
176                                 slot->flags &= ~NS_BUF_CHANGED;
177                         }
178                         curr->upper.data = 0;
179                         curr->lower.data =
180                             htole32( adapter->txd_cmd | len |
181                                 (E1000_TXD_CMD_EOP | flags) );
182
183                         bus_dmamap_sync(adapter->txtag, txbuf->map,
184                             BUS_DMASYNC_PREWRITE);
185                         j = (j == lim) ? 0 : j + 1;
186                         l = (l == lim) ? 0 : l + 1;
187                 }
188                 kring->nr_hwcur = k; /* the saved ring->cur */
189                 kring->nr_hwavail -= n;
190
191                 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
192                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
193
194                 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), l);
195         }
196
197         if (n == 0 || kring->nr_hwavail < 1) {
198                 int delta;
199
200                 /* record completed transmissions using TDH */
201                 l = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
202                 if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */
203                         D("bad TDH %d", l);
204                         l -= kring->nkr_num_slots;
205                 }
206                 delta = l - adapter->next_tx_to_clean;
207                 if (delta) {
208                         /* some tx completed, increment hwavail. */
209                         if (delta < 0)
210                                 delta += kring->nkr_num_slots;
211                         adapter->next_tx_to_clean = l;
212                         kring->nr_hwavail += delta;
213                 }
214         }
215         /* update avail to what the kernel knows */
216         ring->avail = kring->nr_hwavail;
217
218         if (do_lock)
219                 EM_TX_UNLOCK(adapter);
220         return 0;
221 }
222
223
224 /*
225  * Reconcile kernel and user view of the receive ring.
226  */
227 static int
228 lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
229 {
230         struct adapter *adapter = ifp->if_softc;
231         struct netmap_adapter *na = NA(ifp);
232         struct netmap_kring *kring = &na->rx_rings[ring_nr];
233         struct netmap_ring *ring = kring->ring;
234         int j, l, n, lim = kring->nkr_num_slots - 1;
235         int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
236         u_int k = ring->cur, resvd = ring->reserved;
237
238         if (k > lim)
239                 return netmap_ring_reinit(kring);
240
241         if (do_lock)
242                 EM_RX_LOCK(adapter);
243
244         /* XXX check sync modes */
245         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
246                         BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
247
248         /*
249          * Import newly received packets into the netmap ring.
250          * j is an index in the netmap ring, l in the NIC ring.
251          */
252         l = adapter->next_rx_desc_to_check;
253         j = netmap_idx_n2k(kring, l);
254         if (netmap_no_pendintr || force_update) {
255                 uint16_t slot_flags = kring->nkr_slot_flags;
256
257                 for (n = 0; ; n++) {
258                         struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
259                         uint32_t staterr = le32toh(curr->status);
260                         int len;
261
262                         if ((staterr & E1000_RXD_STAT_DD) == 0)
263                                 break;
264                         len = le16toh(curr->length) - 4; // CRC
265                         if (len < 0) {
266                                 D("bogus pkt size at %d", j);
267                                 len = 0;
268                         }
269                         ring->slot[j].len = len;
270                         ring->slot[j].flags = slot_flags;
271                         bus_dmamap_sync(adapter->rxtag,
272                                 adapter->rx_buffer_area[l].map,
273                                     BUS_DMASYNC_POSTREAD);
274                         j = (j == lim) ? 0 : j + 1;
275                         l = (l == lim) ? 0 : l + 1;
276                 }
277                 if (n) { /* update the state variables */
278                         adapter->next_rx_desc_to_check = l;
279                         kring->nr_hwavail += n;
280                 }
281                 kring->nr_kflags &= ~NKR_PENDINTR;
282         }
283
284         /* skip past packets that userspace has released */
285         j = kring->nr_hwcur;    /* netmap ring index */
286         if (resvd > 0) {
287                 if (resvd + ring->avail >= lim + 1) {
288                         D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
289                         ring->reserved = resvd = 0; // XXX panic...
290                 }
291                 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
292         }
293         if (j != k) { /* userspace has released some packets. */
294                 l = netmap_idx_k2n(kring, j); /* NIC ring index */
295                 for (n = 0; j != k; n++) {
296                         struct netmap_slot *slot = &ring->slot[j];
297                         struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
298                         struct em_buffer *rxbuf = &adapter->rx_buffer_area[l];
299                         uint64_t paddr;
300                         void *addr = PNMB(slot, &paddr);
301
302                         if (addr == netmap_buffer_base) { /* bad buf */
303                                 if (do_lock)
304                                         EM_RX_UNLOCK(adapter);
305                                 return netmap_ring_reinit(kring);
306                         }
307
308                         if (slot->flags & NS_BUF_CHANGED) {
309                                 /* buffer has changed, reload map */
310                                 netmap_reload_map(adapter->rxtag, rxbuf->map, addr);
311                                 curr->buffer_addr = htole64(paddr);
312                                 slot->flags &= ~NS_BUF_CHANGED;
313                         }
314                         curr->status = 0;
315
316                         bus_dmamap_sync(adapter->rxtag, rxbuf->map,
317                             BUS_DMASYNC_PREREAD);
318
319                         j = (j == lim) ? 0 : j + 1;
320                         l = (l == lim) ? 0 : l + 1;
321                 }
322                 kring->nr_hwavail -= n;
323                 kring->nr_hwcur = k;
324                 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
325                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
326                 /*
327                  * IMPORTANT: we must leave one free slot in the ring,
328                  * so move l back by one unit
329                  */
330                 l = (l == 0) ? lim : l - 1;
331                 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), l);
332         }
333         /* tell userspace that there are new packets */
334         ring->avail = kring->nr_hwavail - resvd;
335         if (do_lock)
336                 EM_RX_UNLOCK(adapter);
337         return 0;
338 }
339
340
341 static void
342 lem_netmap_attach(struct adapter *adapter)
343 {
344         struct netmap_adapter na;
345
346         bzero(&na, sizeof(na));
347
348         na.ifp = adapter->ifp;
349         na.separate_locks = 1;
350         na.num_tx_desc = adapter->num_tx_desc;
351         na.num_rx_desc = adapter->num_rx_desc;
352         na.nm_txsync = lem_netmap_txsync;
353         na.nm_rxsync = lem_netmap_rxsync;
354         na.nm_lock = lem_netmap_lock_wrapper;
355         na.nm_register = lem_netmap_reg;
356         netmap_attach(&na, 1);
357 }
358
359 /* end of file */