]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/netmap/if_lem_netmap.h
MFH: sync the netmap code with the one in HEAD
[FreeBSD/stable/9.git] / sys / dev / netmap / if_lem_netmap.h
1 /*
2  * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25
26
27 /*
28  * $FreeBSD$
29  *
30  * netmap support for: lem
31  *
32  * For details on netmap support please see ixgbe_netmap.h
33  */
34
35
36 #include <net/netmap.h>
37 #include <sys/selinfo.h>
38 #include <vm/vm.h>
39 #include <vm/pmap.h>    /* vtophys ? */
40 #include <dev/netmap/netmap_kern.h>
41
42
43 /*
44  * Register/unregister. We are already under netmap lock.
45  */
46 static int
47 lem_netmap_reg(struct netmap_adapter *na, int onoff)
48 {
49         struct ifnet *ifp = na->ifp;
50         struct adapter *adapter = ifp->if_softc;
51
52         EM_CORE_LOCK(adapter);
53
54         lem_disable_intr(adapter);
55
56         /* Tell the stack that the interface is no longer active */
57         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
58
59 #ifndef EM_LEGACY_IRQ // XXX do we need this ?
60         taskqueue_block(adapter->tq);
61         taskqueue_drain(adapter->tq, &adapter->rxtx_task);
62         taskqueue_drain(adapter->tq, &adapter->link_task);
63 #endif /* !EM_LEGCY_IRQ */
64
65         /* enable or disable flags and callbacks in na and ifp */
66         if (onoff) {
67                 nm_set_native_flags(na);
68         } else {
69                 nm_clear_native_flags(na);
70         }
71         lem_init_locked(adapter);       /* also enable intr */
72
73 #ifndef EM_LEGACY_IRQ
74         taskqueue_unblock(adapter->tq); // XXX do we need this ?
75 #endif /* !EM_LEGCY_IRQ */
76
77         EM_CORE_UNLOCK(adapter);
78
79         return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
80 }
81
82
83 /*
84  * Reconcile kernel and user view of the transmit ring.
85  */
86 static int
87 lem_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
88 {
89         struct ifnet *ifp = na->ifp;
90         struct netmap_kring *kring = &na->tx_rings[ring_nr];
91         struct netmap_ring *ring = kring->ring;
92         u_int nm_i;     /* index into the netmap ring */
93         u_int nic_i;    /* index into the NIC ring */
94         u_int const lim = kring->nkr_num_slots - 1;
95         u_int const head = kring->rhead;
96         /* generate an interrupt approximately every half ring */
97         u_int report_frequency = kring->nkr_num_slots >> 1;
98
99         /* device-specific */
100         struct adapter *adapter = ifp->if_softc;
101
102         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
103                         BUS_DMASYNC_POSTREAD);
104
105         /*
106          * First part: process new packets to send.
107          */
108
109         nm_i = kring->nr_hwcur;
110         if (nm_i != head) {     /* we have new packets to send */
111                 nic_i = netmap_idx_k2n(kring, nm_i);
112                 while (nm_i != head) {
113                         struct netmap_slot *slot = &ring->slot[nm_i];
114                         u_int len = slot->len;
115                         uint64_t paddr;
116                         void *addr = PNMB(slot, &paddr);
117
118                         /* device-specific */
119                         struct e1000_tx_desc *curr = &adapter->tx_desc_base[nic_i];
120                         struct em_buffer *txbuf = &adapter->tx_buffer_area[nic_i];
121                         int flags = (slot->flags & NS_REPORT ||
122                                 nic_i == 0 || nic_i == report_frequency) ?
123                                 E1000_TXD_CMD_RS : 0;
124
125                         NM_CHECK_ADDR_LEN(addr, len);
126
127                         if (slot->flags & NS_BUF_CHANGED) {
128                                 /* buffer has changed, reload map */
129                                 curr->buffer_addr = htole64(paddr);
130                                 netmap_reload_map(adapter->txtag, txbuf->map, addr);
131                         }
132                         slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
133
134                         /* Fill the slot in the NIC ring. */
135                         curr->upper.data = 0;
136                         curr->lower.data = htole32(adapter->txd_cmd | len |
137                                 (E1000_TXD_CMD_EOP | flags) );
138                         bus_dmamap_sync(adapter->txtag, txbuf->map,
139                                 BUS_DMASYNC_PREWRITE);
140
141                         nm_i = nm_next(nm_i, lim);
142                         nic_i = nm_next(nic_i, lim);
143                 }
144                 kring->nr_hwcur = head;
145
146                  /* synchronize the NIC ring */
147                 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
148                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
149
150                 /* (re)start the tx unit up to slot nic_i (excluded) */
151                 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), nic_i);
152         }
153
154         /*
155          * Second part: reclaim buffers for completed transmissions.
156          */
157         if (ticks != kring->last_reclaim || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
158                 kring->last_reclaim = ticks;
159                 /* record completed transmissions using TDH */
160                 nic_i = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
161                 if (nic_i >= kring->nkr_num_slots) { /* XXX can it happen ? */
162                         D("TDH wrap %d", nic_i);
163                         nic_i -= kring->nkr_num_slots;
164                 }
165                 adapter->next_tx_to_clean = nic_i;
166                 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
167         }
168
169         nm_txsync_finalize(kring);
170
171         return 0;
172 }
173
174
175 /*
176  * Reconcile kernel and user view of the receive ring.
177  */
178 static int
179 lem_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
180 {
181         struct ifnet *ifp = na->ifp;
182         struct netmap_kring *kring = &na->rx_rings[ring_nr];
183         struct netmap_ring *ring = kring->ring;
184         u_int nm_i;     /* index into the netmap ring */
185         u_int nic_i;    /* index into the NIC ring */
186         u_int n;
187         u_int const lim = kring->nkr_num_slots - 1;
188         u_int const head = nm_rxsync_prologue(kring);
189         int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
190
191         /* device-specific */
192         struct adapter *adapter = ifp->if_softc;
193
194         if (head > lim)
195                 return netmap_ring_reinit(kring);
196
197         /* XXX check sync modes */
198         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
199                         BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
200
201         /*
202          * First part: import newly received packets.
203          */
204         if (netmap_no_pendintr || force_update) {
205                 uint16_t slot_flags = kring->nkr_slot_flags;
206
207                 nic_i = adapter->next_rx_desc_to_check;
208                 nm_i = netmap_idx_n2k(kring, nic_i);
209
210                 for (n = 0; ; n++) {
211                         struct e1000_rx_desc *curr = &adapter->rx_desc_base[nic_i];
212                         uint32_t staterr = le32toh(curr->status);
213                         int len;
214
215                         if ((staterr & E1000_RXD_STAT_DD) == 0)
216                                 break;
217                         len = le16toh(curr->length) - 4; // CRC
218                         if (len < 0) {
219                                 D("bogus pkt size %d nic idx %d", len, nic_i);
220                                 len = 0;
221                         }
222                         ring->slot[nm_i].len = len;
223                         ring->slot[nm_i].flags = slot_flags;
224                         bus_dmamap_sync(adapter->rxtag,
225                                 adapter->rx_buffer_area[nic_i].map,
226                                 BUS_DMASYNC_POSTREAD);
227                         nm_i = nm_next(nm_i, lim);
228                         nic_i = nm_next(nic_i, lim);
229                 }
230                 if (n) { /* update the state variables */
231                         ND("%d new packets at nic %d nm %d tail %d",
232                                 n,
233                                 adapter->next_rx_desc_to_check,
234                                 netmap_idx_n2k(kring, adapter->next_rx_desc_to_check),
235                                 kring->nr_hwtail);
236                         adapter->next_rx_desc_to_check = nic_i;
237                         // ifp->if_ipackets += n;
238                         kring->nr_hwtail = nm_i;
239                 }
240                 kring->nr_kflags &= ~NKR_PENDINTR;
241         }
242
243         /*
244          * Second part: skip past packets that userspace has released.
245          */
246         nm_i = kring->nr_hwcur;
247         if (nm_i != head) {
248                 nic_i = netmap_idx_k2n(kring, nm_i);
249                 for (n = 0; nm_i != head; n++) {
250                         struct netmap_slot *slot = &ring->slot[nm_i];
251                         uint64_t paddr;
252                         void *addr = PNMB(slot, &paddr);
253
254                         struct e1000_rx_desc *curr = &adapter->rx_desc_base[nic_i];
255                         struct em_buffer *rxbuf = &adapter->rx_buffer_area[nic_i];
256
257                         if (addr == netmap_buffer_base) /* bad buf */
258                                 goto ring_reset;
259
260                         if (slot->flags & NS_BUF_CHANGED) {
261                                 /* buffer has changed, reload map */
262                                 curr->buffer_addr = htole64(paddr);
263                                 netmap_reload_map(adapter->rxtag, rxbuf->map, addr);
264                                 slot->flags &= ~NS_BUF_CHANGED;
265                         }
266                         curr->status = 0;
267                         bus_dmamap_sync(adapter->rxtag, rxbuf->map,
268                             BUS_DMASYNC_PREREAD);
269                         nm_i = nm_next(nm_i, lim);
270                         nic_i = nm_next(nic_i, lim);
271                 }
272                 kring->nr_hwcur = head;
273                 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
274                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
275                 /*
276                  * IMPORTANT: we must leave one free slot in the ring,
277                  * so move nic_i back by one unit
278                  */
279                 nic_i = nm_prev(nic_i, lim);
280                 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), nic_i);
281         }
282
283         /* tell userspace that there might be new packets */
284         nm_rxsync_finalize(kring);
285
286         return 0;
287
288 ring_reset:
289         return netmap_ring_reinit(kring);
290 }
291
292
293 static void
294 lem_netmap_attach(struct adapter *adapter)
295 {
296         struct netmap_adapter na;
297
298         bzero(&na, sizeof(na));
299
300         na.ifp = adapter->ifp;
301         na.na_flags = NAF_BDG_MAYSLEEP;
302         na.num_tx_desc = adapter->num_tx_desc;
303         na.num_rx_desc = adapter->num_rx_desc;
304         na.nm_txsync = lem_netmap_txsync;
305         na.nm_rxsync = lem_netmap_rxsync;
306         na.nm_register = lem_netmap_reg;
307         na.num_tx_rings = na.num_rx_rings = 1;
308         netmap_attach(&na);
309 }
310
311 /* end of file */