]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/netmap/if_igb_netmap.h
MFC: import netmap core files into RELENG_9.
[FreeBSD/stable/9.git] / sys / dev / netmap / if_igb_netmap.h
1 /*
2  * Copyright (C) 2011 Universita` di Pisa. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25
26 /*
27  * $FreeBSD$
28  * $Id: if_igb_netmap.h 9802 2011-12-02 18:42:37Z luigi $
29  *
30  * netmap modifications for igb
31  * contribured by Ahmed Kooli
32  */
33
34 #include <net/netmap.h>
35 #include <sys/selinfo.h>
36 #include <vm/vm.h>
37 #include <vm/pmap.h>    /* vtophys ? */
38 #include <dev/netmap/netmap_kern.h>
39
40 static int      igb_netmap_reg(struct ifnet *, int onoff);
41 static int      igb_netmap_txsync(struct ifnet *, u_int, int);
42 static int      igb_netmap_rxsync(struct ifnet *, u_int, int);
43 static void     igb_netmap_lock_wrapper(struct ifnet *, int, u_int);
44
45
46 static void
47 igb_netmap_attach(struct adapter *adapter)
48 {
49         struct netmap_adapter na;
50
51         bzero(&na, sizeof(na));
52
53         na.ifp = adapter->ifp;
54         na.separate_locks = 1;
55         na.num_tx_desc = adapter->num_tx_desc;
56         na.num_rx_desc = adapter->num_rx_desc;
57         na.nm_txsync = igb_netmap_txsync;
58         na.nm_rxsync = igb_netmap_rxsync;
59         na.nm_lock = igb_netmap_lock_wrapper;
60         na.nm_register = igb_netmap_reg;
61         netmap_attach(&na, adapter->num_queues);
62 }       
63
64
65 /*
66  * wrapper to export locks to the generic code
67  */
68 static void
69 igb_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
70 {
71         struct adapter *adapter = ifp->if_softc;
72
73         ASSERT(queueid < adapter->num_queues);
74         switch (what) {
75         case NETMAP_CORE_LOCK:
76                 IGB_CORE_LOCK(adapter);
77                 break;
78         case NETMAP_CORE_UNLOCK:
79                 IGB_CORE_UNLOCK(adapter);
80                 break;
81         case NETMAP_TX_LOCK:
82                 IGB_TX_LOCK(&adapter->tx_rings[queueid]);
83                 break;
84         case NETMAP_TX_UNLOCK:
85                 IGB_TX_UNLOCK(&adapter->tx_rings[queueid]);
86                 break;
87         case NETMAP_RX_LOCK:
88                 IGB_RX_LOCK(&adapter->rx_rings[queueid]);
89                 break;
90         case NETMAP_RX_UNLOCK:
91                 IGB_RX_UNLOCK(&adapter->rx_rings[queueid]);
92                 break;
93         }
94 }
95
96
97 /*
98  * support for netmap register/unregisted. We are already under core lock.
99  * only called on the first init or the last unregister.
100  */
101 static int
102 igb_netmap_reg(struct ifnet *ifp, int onoff)
103 {
104         struct adapter *adapter = ifp->if_softc;
105         struct netmap_adapter *na = NA(ifp);
106         int error = 0;
107
108         if (na == NULL)
109                 return EINVAL;
110
111         igb_disable_intr(adapter);
112
113         /* Tell the stack that the interface is no longer active */
114         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
115
116         if (onoff) {
117                 ifp->if_capenable |= IFCAP_NETMAP;
118
119                 /* save if_transmit to restore it later */
120                 na->if_transmit = ifp->if_transmit;
121                 ifp->if_transmit = netmap_start;
122
123                 igb_init_locked(adapter);
124                 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
125                         error = ENOMEM;
126                         goto fail;
127                 }
128         } else {
129 fail:
130                 /* restore if_transmit */
131                 ifp->if_transmit = na->if_transmit;
132                 ifp->if_capenable &= ~IFCAP_NETMAP;
133                 igb_init_locked(adapter);       /* also enables intr */
134         }
135         return (error);
136 }
137
138
139 /*
140  * Reconcile kernel and user view of the transmit ring.
141  */
142 static int
143 igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
144 {
145         struct adapter *adapter = ifp->if_softc;
146         struct tx_ring *txr = &adapter->tx_rings[ring_nr];
147         struct netmap_adapter *na = NA(adapter->ifp);
148         struct netmap_kring *kring = &na->tx_rings[ring_nr];
149         struct netmap_ring *ring = kring->ring;
150         int j, k, l, n = 0, lim = kring->nkr_num_slots - 1;
151
152         /* generate an interrupt approximately every half ring */
153         int report_frequency = kring->nkr_num_slots >> 1;
154
155         k = ring->cur;
156         if (k > lim)
157                 return netmap_ring_reinit(kring);
158
159         if (do_lock)
160                 IGB_TX_LOCK(txr);
161         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
162             BUS_DMASYNC_POSTREAD);
163
164         /* update avail to what the hardware knows */
165         ring->avail = kring->nr_hwavail;
166
167         j = kring->nr_hwcur; /* netmap ring index */
168         if (j != k) {   /* we have new packets to send */
169                 u32 olinfo_status = 0;
170
171                 l = j - kring->nkr_hwofs; /* NIC ring index */
172                 if (l < 0)
173                         l += lim + 1;
174                 /* 82575 needs the queue index added */
175                 if (adapter->hw.mac.type == e1000_82575)
176                         olinfo_status |= txr->me << 4;
177
178                 while (j != k) {
179                         struct netmap_slot *slot = &ring->slot[j];
180                         struct igb_tx_buffer *txbuf = &txr->tx_buffers[l];
181                         union e1000_adv_tx_desc *curr =
182                             (union e1000_adv_tx_desc *)&txr->tx_base[l];
183                         uint64_t paddr;
184                         void *addr = PNMB(slot, &paddr);
185                         int flags = ((slot->flags & NS_REPORT) ||
186                                 j == 0 || j == report_frequency) ?
187                                         E1000_ADVTXD_DCMD_RS : 0;
188                         int len = slot->len;
189
190                         if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
191                                 if (do_lock)
192                                         IGB_TX_UNLOCK(txr);
193                                 return netmap_ring_reinit(kring);
194                         }
195
196                         slot->flags &= ~NS_REPORT;
197                         // XXX do we need to set the address ?
198                         curr->read.buffer_addr = htole64(paddr);
199                         curr->read.olinfo_status =
200                             htole32(olinfo_status |
201                                 (len<< E1000_ADVTXD_PAYLEN_SHIFT));
202                         curr->read.cmd_type_len =
203                             htole32(len | E1000_ADVTXD_DTYP_DATA |
204                                     E1000_ADVTXD_DCMD_IFCS |
205                                     E1000_ADVTXD_DCMD_DEXT |
206                                     E1000_ADVTXD_DCMD_EOP | flags);
207                         if (slot->flags & NS_BUF_CHANGED) {
208                                 /* buffer has changed, reload map */
209                                 netmap_reload_map(txr->txtag, txbuf->map, addr);
210                                 slot->flags &= ~NS_BUF_CHANGED;
211                         }
212
213                         bus_dmamap_sync(txr->txtag, txbuf->map,
214                                 BUS_DMASYNC_PREWRITE);
215                         j = (j == lim) ? 0 : j + 1;
216                         l = (l == lim) ? 0 : l + 1;
217                         n++;
218                 }
219                 kring->nr_hwcur = k;
220
221                 /* decrease avail by number of sent packets */
222                 kring->nr_hwavail -= n;
223                 ring->avail = kring->nr_hwavail;
224
225                 /* Set the watchdog XXX ? */
226                 txr->queue_status = IGB_QUEUE_WORKING;
227                 txr->watchdog_time = ticks;
228
229                 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
230                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
231
232                 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), l);
233         }
234         if (n == 0 || kring->nr_hwavail < 1) {
235                 int delta;
236
237                 /* record completed transmission using TDH */
238                 l = E1000_READ_REG(&adapter->hw, E1000_TDH(ring_nr));
239                 if (l >= kring->nkr_num_slots) /* XXX can it happen ? */
240                         l -= kring->nkr_num_slots;
241                 delta = l - txr->next_to_clean;
242                 if (delta) {
243                         /* new tx were completed */
244                         if (delta < 0)
245                                 delta += kring->nkr_num_slots;
246                         txr->next_to_clean = l;
247                         kring->nr_hwavail += delta;
248                         ring->avail = kring->nr_hwavail;
249                 }
250         }
251         if (do_lock)
252                 IGB_TX_UNLOCK(txr);
253         return 0;
254 }
255
256
257 /*
258  * Reconcile kernel and user view of the receive ring.
259  */
260 static int
261 igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
262 {
263         struct adapter *adapter = ifp->if_softc;
264         struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
265         struct netmap_adapter *na = NA(adapter->ifp);
266         struct netmap_kring *kring = &na->rx_rings[ring_nr];
267         struct netmap_ring *ring = kring->ring;
268         int j, k, l, n, lim = kring->nkr_num_slots - 1;
269
270         k = ring->cur;
271         if (k > lim)
272                 return netmap_ring_reinit(kring);
273
274         if (do_lock)
275                 IGB_RX_LOCK(rxr);
276
277         /* Sync the ring. */
278         bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
279             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
280
281         l = rxr->next_to_check;
282         j = l + kring->nkr_hwofs;
283         if (j > lim)
284                 j -= lim + 1;
285         for (n = 0; ; n++) {
286                 union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
287                 uint32_t staterr = le32toh(curr->wb.upper.status_error);
288
289                 if ((staterr & E1000_RXD_STAT_DD) == 0)
290                         break;
291                 ring->slot[j].len = le16toh(curr->wb.upper.length);
292                 
293                 bus_dmamap_sync(rxr->ptag,
294                         rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD);
295                 j = (j == lim) ? 0 : j + 1;
296                 l = (l == lim) ? 0 : l + 1;
297         }
298         if (n) {
299                 rxr->next_to_check = l;
300                 kring->nr_hwavail += n;
301         }
302
303         /* skip past packets that userspace has already processed,
304          * making them available for reception.
305          * advance nr_hwcur and issue a bus_dmamap_sync on the
306          * buffers so it is safe to write to them.
307          * Also increase nr_hwavail
308          */
309         j = kring->nr_hwcur;
310         l = kring->nr_hwcur - kring->nkr_hwofs;
311         if (l < 0)
312                 l += lim + 1;
313         if (j != k) {   /* userspace has read some packets. */
314                 n = 0;
315                 while (j != k) {
316                         struct netmap_slot *slot = ring->slot + j;
317                         union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
318                         struct igb_rx_buf *rxbuf = rxr->rx_buffers + l;
319                         uint64_t paddr;
320                         void *addr = PNMB(slot, &paddr);
321
322                         if (addr == netmap_buffer_base) { /* bad buf */
323                                 if (do_lock)
324                                         IGB_RX_UNLOCK(rxr);
325                                 return netmap_ring_reinit(kring);
326                         }
327
328                         curr->wb.upper.status_error = 0;
329                         curr->read.pkt_addr = htole64(paddr);
330                         if (slot->flags & NS_BUF_CHANGED) {
331                                 netmap_reload_map(rxr->ptag, rxbuf->pmap, addr);
332                                 slot->flags &= ~NS_BUF_CHANGED;
333                         }
334
335                         bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
336                                 BUS_DMASYNC_PREREAD);
337
338                         j = (j == lim) ? 0 : j + 1;
339                         l = (l == lim) ? 0 : l + 1;
340                         n++;
341                 }
342                 kring->nr_hwavail -= n;
343                 kring->nr_hwcur = ring->cur;
344                 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
345                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
346                 /* IMPORTANT: we must leave one free slot in the ring,
347                  * so move l back by one unit
348                  */
349                 l = (l == 0) ? lim : l - 1;
350                 E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), l);
351         }
352         /* tell userspace that there are new packets */
353         ring->avail = kring->nr_hwavail ;
354         if (do_lock)
355                 IGB_RX_UNLOCK(rxr);
356         return 0;
357 }