]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nfe/if_nfe.c
Give MEXTADD() another argument to make both void pointers to the
[FreeBSD/FreeBSD.git] / sys / dev / nfe / if_nfe.c
1 /*      $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
2
3 /*-
4  * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5  * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
25
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
28 #endif
29
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
34 #include <sys/mbuf.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
42
43 #include <net/if.h>
44 #include <net/if_arp.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
50
51 #include <net/bpf.h>
52
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <sys/bus.h>
56 #include <sys/rman.h>
57
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
60
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63
64 #include <dev/nfe/if_nfereg.h>
65 #include <dev/nfe/if_nfevar.h>
66
67 MODULE_DEPEND(nfe, pci, 1, 1, 1);
68 MODULE_DEPEND(nfe, ether, 1, 1, 1);
69 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
70
71 /* "device miibus" required.  See GENERIC if you get errors here. */
72 #include "miibus_if.h"
73
74 static int  nfe_probe(device_t);
75 static int  nfe_attach(device_t);
76 static int  nfe_detach(device_t);
77 static int  nfe_suspend(device_t);
78 static int  nfe_resume(device_t);
79 static int nfe_shutdown(device_t);
80 static void nfe_power(struct nfe_softc *);
81 static int  nfe_miibus_readreg(device_t, int, int);
82 static int  nfe_miibus_writereg(device_t, int, int, int);
83 static void nfe_miibus_statchg(device_t);
84 static void nfe_link_task(void *, int);
85 static void nfe_set_intr(struct nfe_softc *);
86 static __inline void nfe_enable_intr(struct nfe_softc *);
87 static __inline void nfe_disable_intr(struct nfe_softc *);
88 static int  nfe_ioctl(struct ifnet *, u_long, caddr_t);
89 static void nfe_alloc_msix(struct nfe_softc *, int);
90 static int nfe_intr(void *);
91 static void nfe_int_task(void *, int);
92 static void *nfe_jalloc(struct nfe_softc *);
93 static void nfe_jfree(void *, void *);
94 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
95 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
96 static int nfe_newbuf(struct nfe_softc *, int);
97 static int nfe_jnewbuf(struct nfe_softc *, int);
98 static int  nfe_rxeof(struct nfe_softc *, int);
99 static int  nfe_jrxeof(struct nfe_softc *, int);
100 static void nfe_txeof(struct nfe_softc *);
101 static int  nfe_encap(struct nfe_softc *, struct mbuf **);
102 static void nfe_setmulti(struct nfe_softc *);
103 static void nfe_tx_task(void *, int);
104 static void nfe_start(struct ifnet *);
105 static void nfe_watchdog(struct ifnet *);
106 static void nfe_init(void *);
107 static void nfe_init_locked(void *);
108 static void nfe_stop(struct ifnet *);
109 static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
111 static int  nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
112 static int  nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
113 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
114 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
115 static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
118 static int  nfe_ifmedia_upd(struct ifnet *);
119 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
120 static void nfe_tick(void *);
121 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
122 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
123 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
124
125 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
126 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
127
128 #ifdef NFE_DEBUG
129 static int nfedebug = 0;
130 #define DPRINTF(sc, ...)        do {                            \
131         if (nfedebug)                                           \
132                 device_printf((sc)->nfe_dev, __VA_ARGS__);      \
133 } while (0)
134 #define DPRINTFN(sc, n, ...)    do {                            \
135         if (nfedebug >= (n))                                    \
136                 device_printf((sc)->nfe_dev, __VA_ARGS__);      \
137 } while (0)
138 #else
139 #define DPRINTF(sc, ...)
140 #define DPRINTFN(sc, n, ...)
141 #endif
142
143 #define NFE_LOCK(_sc)           mtx_lock(&(_sc)->nfe_mtx)
144 #define NFE_UNLOCK(_sc)         mtx_unlock(&(_sc)->nfe_mtx)
145 #define NFE_LOCK_ASSERT(_sc)    mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
146
147 #define NFE_JLIST_LOCK(_sc)     mtx_lock(&(_sc)->nfe_jlist_mtx)
148 #define NFE_JLIST_UNLOCK(_sc)   mtx_unlock(&(_sc)->nfe_jlist_mtx)
149
150 /* Tunables. */
151 static int msi_disable = 0;
152 static int msix_disable = 0;
153 static int jumbo_disable = 0;
154 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
155 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
156 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
157
158 static device_method_t nfe_methods[] = {
159         /* Device interface */
160         DEVMETHOD(device_probe,         nfe_probe),
161         DEVMETHOD(device_attach,        nfe_attach),
162         DEVMETHOD(device_detach,        nfe_detach),
163         DEVMETHOD(device_suspend,       nfe_suspend),
164         DEVMETHOD(device_resume,        nfe_resume),
165         DEVMETHOD(device_shutdown,      nfe_shutdown),
166
167         /* bus interface */
168         DEVMETHOD(bus_print_child,      bus_generic_print_child),
169         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
170
171         /* MII interface */
172         DEVMETHOD(miibus_readreg,       nfe_miibus_readreg),
173         DEVMETHOD(miibus_writereg,      nfe_miibus_writereg),
174         DEVMETHOD(miibus_statchg,       nfe_miibus_statchg),
175
176         { NULL, NULL }
177 };
178
179 static driver_t nfe_driver = {
180         "nfe",
181         nfe_methods,
182         sizeof(struct nfe_softc)
183 };
184
185 static devclass_t nfe_devclass;
186
187 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
188 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
189
190 static struct nfe_type nfe_devs[] = {
191         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
192             "NVIDIA nForce MCP Networking Adapter"},
193         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
194             "NVIDIA nForce2 MCP2 Networking Adapter"},
195         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
196             "NVIDIA nForce2 400 MCP4 Networking Adapter"},
197         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
198             "NVIDIA nForce2 400 MCP5 Networking Adapter"},
199         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
200             "NVIDIA nForce3 MCP3 Networking Adapter"},
201         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
202             "NVIDIA nForce3 250 MCP6 Networking Adapter"},
203         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
204             "NVIDIA nForce3 MCP7 Networking Adapter"},
205         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
206             "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
207         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
208             "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
209         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
210             "NVIDIA nForce MCP04 Networking Adapter"},          /* MCP10 */
211         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
212             "NVIDIA nForce MCP04 Networking Adapter"},          /* MCP11 */
213         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
214             "NVIDIA nForce 430 MCP12 Networking Adapter"},
215         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
216             "NVIDIA nForce 430 MCP13 Networking Adapter"},
217         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
218             "NVIDIA nForce MCP55 Networking Adapter"},
219         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
220             "NVIDIA nForce MCP55 Networking Adapter"},
221         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
222             "NVIDIA nForce MCP61 Networking Adapter"},
223         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
224             "NVIDIA nForce MCP61 Networking Adapter"},
225         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
226             "NVIDIA nForce MCP61 Networking Adapter"},
227         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
228             "NVIDIA nForce MCP61 Networking Adapter"},
229         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
230             "NVIDIA nForce MCP65 Networking Adapter"},
231         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
232             "NVIDIA nForce MCP65 Networking Adapter"},
233         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
234             "NVIDIA nForce MCP65 Networking Adapter"},
235         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
236             "NVIDIA nForce MCP65 Networking Adapter"},
237         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
238             "NVIDIA nForce MCP67 Networking Adapter"},
239         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
240             "NVIDIA nForce MCP67 Networking Adapter"},
241         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
242             "NVIDIA nForce MCP67 Networking Adapter"},
243         {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
244             "NVIDIA nForce MCP67 Networking Adapter"},
245         {0, 0, NULL}
246 };
247
248
249 /* Probe for supported hardware ID's */
250 static int
251 nfe_probe(device_t dev)
252 {
253         struct nfe_type *t;
254
255         t = nfe_devs;
256         /* Check for matching PCI DEVICE ID's */
257         while (t->name != NULL) {
258                 if ((pci_get_vendor(dev) == t->vid_id) &&
259                     (pci_get_device(dev) == t->dev_id)) {
260                         device_set_desc(dev, t->name);
261                         return (BUS_PROBE_DEFAULT);
262                 }
263                 t++;
264         }
265
266         return (ENXIO);
267 }
268
269 static void
270 nfe_alloc_msix(struct nfe_softc *sc, int count)
271 {
272         int rid;
273
274         rid = PCIR_BAR(2);
275         sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
276             &rid, RF_ACTIVE);
277         if (sc->nfe_msix_res == NULL) {
278                 device_printf(sc->nfe_dev,
279                     "couldn't allocate MSIX table resource\n");
280                 return;
281         }
282         rid = PCIR_BAR(3);
283         sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
284             SYS_RES_MEMORY, &rid, RF_ACTIVE);
285         if (sc->nfe_msix_pba_res == NULL) {
286                 device_printf(sc->nfe_dev,
287                     "couldn't allocate MSIX PBA resource\n");
288                 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
289                     sc->nfe_msix_res);
290                 sc->nfe_msix_res = NULL;
291                 return;
292         }
293
294         if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
295                 if (count == NFE_MSI_MESSAGES) {
296                         if (bootverbose)
297                                 device_printf(sc->nfe_dev,
298                                     "Using %d MSIX messages\n", count);
299                         sc->nfe_msix = 1;
300                 } else {
301                         if (bootverbose)
302                                 device_printf(sc->nfe_dev,
303                                     "couldn't allocate MSIX\n");
304                         pci_release_msi(sc->nfe_dev);
305                         bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
306                             PCIR_BAR(3), sc->nfe_msix_pba_res);
307                         bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
308                             PCIR_BAR(2), sc->nfe_msix_res);
309                         sc->nfe_msix_pba_res = NULL;
310                         sc->nfe_msix_res = NULL;
311                 }
312         }
313 }
314
315 static int
316 nfe_attach(device_t dev)
317 {
318         struct nfe_softc *sc;
319         struct ifnet *ifp;
320         bus_addr_t dma_addr_max;
321         int error = 0, i, msic, reg, rid;
322
323         sc = device_get_softc(dev);
324         sc->nfe_dev = dev;
325
326         mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
327             MTX_DEF);
328         mtx_init(&sc->nfe_jlist_mtx, "nfe_jlist_mtx", NULL, MTX_DEF);
329         callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
330         TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc);
331         SLIST_INIT(&sc->nfe_jfree_listhead);
332         SLIST_INIT(&sc->nfe_jinuse_listhead);
333
334         pci_enable_busmaster(dev);
335
336         rid = PCIR_BAR(0);
337         sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
338             RF_ACTIVE);
339         if (sc->nfe_res[0] == NULL) {
340                 device_printf(dev, "couldn't map memory resources\n");
341                 mtx_destroy(&sc->nfe_mtx);
342                 return (ENXIO);
343         }
344
345         if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
346                 uint16_t v, width;
347
348                 v = pci_read_config(dev, reg + 0x08, 2);
349                 /* Change max. read request size to 4096. */
350                 v &= ~(7 << 12);
351                 v |= (5 << 12);
352                 pci_write_config(dev, reg + 0x08, v, 2);
353
354                 v = pci_read_config(dev, reg + 0x0c, 2);
355                 /* link capability */
356                 v = (v >> 4) & 0x0f;
357                 width = pci_read_config(dev, reg + 0x12, 2);
358                 /* negotiated link width */
359                 width = (width >> 4) & 0x3f;
360                 if (v != width)
361                         device_printf(sc->nfe_dev,
362                             "warning, negotiated width of link(x%d) != "
363                             "max. width of link(x%d)\n", width, v);
364         }
365
366         /* Allocate interrupt */
367         if (msix_disable == 0 || msi_disable == 0) {
368                 if (msix_disable == 0 &&
369                     (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
370                         nfe_alloc_msix(sc, msic);
371                 if (msi_disable == 0 && sc->nfe_msix == 0 &&
372                     (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
373                     pci_alloc_msi(dev, &msic) == 0) {
374                         if (msic == NFE_MSI_MESSAGES) {
375                                 if (bootverbose)
376                                         device_printf(dev,
377                                             "Using %d MSI messages\n", msic);
378                                 sc->nfe_msi = 1;
379                         } else
380                                 pci_release_msi(dev);
381                 }
382         }
383
384         if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
385                 rid = 0;
386                 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
387                     RF_SHAREABLE | RF_ACTIVE);
388                 if (sc->nfe_irq[0] == NULL) {
389                         device_printf(dev, "couldn't allocate IRQ resources\n");
390                         error = ENXIO;
391                         goto fail;
392                 }
393         } else {
394                 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
395                         sc->nfe_irq[i] = bus_alloc_resource_any(dev,
396                             SYS_RES_IRQ, &rid, RF_ACTIVE);
397                         if (sc->nfe_irq[i] == NULL) {
398                                 device_printf(dev,
399                                     "couldn't allocate IRQ resources for "
400                                     "message %d\n", rid);
401                                 error = ENXIO;
402                                 goto fail;
403                         }
404                 }
405                 /* Map interrupts to vector 0. */
406                 if (sc->nfe_msix != 0) {
407                         NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
408                         NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
409                 } else if (sc->nfe_msi != 0) {
410                         NFE_WRITE(sc, NFE_MSI_MAP0, 0);
411                         NFE_WRITE(sc, NFE_MSI_MAP1, 0);
412                 }
413         }
414
415         /* Set IRQ status/mask register. */
416         sc->nfe_irq_status = NFE_IRQ_STATUS;
417         sc->nfe_irq_mask = NFE_IRQ_MASK;
418         sc->nfe_intrs = NFE_IRQ_WANTED;
419         sc->nfe_nointrs = 0;
420         if (sc->nfe_msix != 0) {
421                 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
422                 sc->nfe_nointrs = NFE_IRQ_WANTED;
423         } else if (sc->nfe_msi != 0) {
424                 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
425                 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
426         }
427
428         sc->nfe_devid = pci_get_device(dev);
429         sc->nfe_revid = pci_get_revid(dev);
430         sc->nfe_flags = 0;
431
432         switch (sc->nfe_devid) {
433         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
434         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
435         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
436         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
437                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
438                 break;
439         case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
440         case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
441                 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
442                 break;
443         case PCI_PRODUCT_NVIDIA_CK804_LAN1:
444         case PCI_PRODUCT_NVIDIA_CK804_LAN2:
445         case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
446         case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
447                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
448                 break;
449         case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
450         case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
451                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
452                     NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL;
453                 break;
454
455         case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
456         case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
457         case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
458         case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
459         case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
460         case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
461         case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
462         case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
463                 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
464                     NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL;
465                 break;
466         case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
467         case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
468         case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
469         case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
470                 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
471                     NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL;
472                 break;
473         }
474
475         nfe_power(sc);
476         /* Check for reversed ethernet address */
477         if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
478                 sc->nfe_flags |= NFE_CORRECT_MACADDR;
479         nfe_get_macaddr(sc, sc->eaddr);
480         /*
481          * Allocate the parent bus DMA tag appropriate for PCI.
482          */
483         dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
484         if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
485                 dma_addr_max = NFE_DMA_MAXADDR;
486         error = bus_dma_tag_create(
487             bus_get_dma_tag(sc->nfe_dev),       /* parent */
488             1, 0,                               /* alignment, boundary */
489             dma_addr_max,                       /* lowaddr */
490             BUS_SPACE_MAXADDR,                  /* highaddr */
491             NULL, NULL,                         /* filter, filterarg */
492             BUS_SPACE_MAXSIZE_32BIT, 0,         /* maxsize, nsegments */
493             BUS_SPACE_MAXSIZE_32BIT,            /* maxsegsize */
494             0,                                  /* flags */
495             NULL, NULL,                         /* lockfunc, lockarg */
496             &sc->nfe_parent_tag);
497         if (error)
498                 goto fail;
499
500         ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
501         if (ifp == NULL) {
502                 device_printf(dev, "can not if_alloc()\n");
503                 error = ENOSPC;
504                 goto fail;
505         }
506         TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp);
507
508         /*
509          * Allocate Tx and Rx rings.
510          */
511         if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
512                 goto fail;
513
514         if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
515                 goto fail;
516
517         nfe_alloc_jrx_ring(sc, &sc->jrxq);
518
519         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
520             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
521             OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
522             &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
523             "max number of Rx events to process");
524
525         sc->nfe_process_limit = NFE_PROC_DEFAULT;
526         error = resource_int_value(device_get_name(dev), device_get_unit(dev),
527             "process_limit", &sc->nfe_process_limit);
528         if (error == 0) {
529                 if (sc->nfe_process_limit < NFE_PROC_MIN ||
530                     sc->nfe_process_limit > NFE_PROC_MAX) {
531                         device_printf(dev, "process_limit value out of range; "
532                             "using default: %d\n", NFE_PROC_DEFAULT);
533                         sc->nfe_process_limit = NFE_PROC_DEFAULT;
534                 }
535         }
536
537         ifp->if_softc = sc;
538         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
539         ifp->if_mtu = ETHERMTU;
540         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
541         ifp->if_ioctl = nfe_ioctl;
542         ifp->if_start = nfe_start;
543         ifp->if_hwassist = 0;
544         ifp->if_capabilities = 0;
545         ifp->if_watchdog = NULL;
546         ifp->if_init = nfe_init;
547         IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
548         ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
549         IFQ_SET_READY(&ifp->if_snd);
550
551         if (sc->nfe_flags & NFE_HW_CSUM) {
552                 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
553                 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
554         }
555         ifp->if_capenable = ifp->if_capabilities;
556
557         sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
558         /* VLAN capability setup. */
559         ifp->if_capabilities |= IFCAP_VLAN_MTU;
560         if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
561                 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
562                 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
563                         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
564         }
565         ifp->if_capenable = ifp->if_capabilities;
566
567         /*
568          * Tell the upper layer(s) we support long frames.
569          * Must appear after the call to ether_ifattach() because
570          * ether_ifattach() sets ifi_hdrlen to the default value.
571          */
572         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
573
574 #ifdef DEVICE_POLLING
575         ifp->if_capabilities |= IFCAP_POLLING;
576 #endif
577
578         /* Do MII setup */
579         if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd,
580             nfe_ifmedia_sts)) {
581                 device_printf(dev, "MII without any phy!\n");
582                 error = ENXIO;
583                 goto fail;
584         }
585         ether_ifattach(ifp, sc->eaddr);
586
587         TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
588         sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
589             taskqueue_thread_enqueue, &sc->nfe_tq);
590         taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
591             device_get_nameunit(sc->nfe_dev));
592         error = 0;
593         if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
594                 error = bus_setup_intr(dev, sc->nfe_irq[0],
595                     INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
596                     &sc->nfe_intrhand[0]);
597         } else {
598                 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
599                         error = bus_setup_intr(dev, sc->nfe_irq[i],
600                             INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
601                             &sc->nfe_intrhand[i]);
602                         if (error != 0)
603                                 break;
604                 }
605         }
606         if (error) {
607                 device_printf(dev, "couldn't set up irq\n");
608                 taskqueue_free(sc->nfe_tq);
609                 sc->nfe_tq = NULL;
610                 ether_ifdetach(ifp);
611                 goto fail;
612         }
613
614 fail:
615         if (error)
616                 nfe_detach(dev);
617
618         return (error);
619 }
620
621
622 static int
623 nfe_detach(device_t dev)
624 {
625         struct nfe_softc *sc;
626         struct ifnet *ifp;
627         uint8_t eaddr[ETHER_ADDR_LEN];
628         int i, rid;
629
630         sc = device_get_softc(dev);
631         KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
632         ifp = sc->nfe_ifp;
633
634 #ifdef DEVICE_POLLING
635         if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
636                 ether_poll_deregister(ifp);
637 #endif
638         if (device_is_attached(dev)) {
639                 NFE_LOCK(sc);
640                 nfe_stop(ifp);
641                 ifp->if_flags &= ~IFF_UP;
642                 NFE_UNLOCK(sc);
643                 callout_drain(&sc->nfe_stat_ch);
644                 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task);
645                 taskqueue_drain(taskqueue_swi, &sc->nfe_link_task);
646                 ether_ifdetach(ifp);
647         }
648
649         if (ifp) {
650                 /* restore ethernet address */
651                 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
652                         for (i = 0; i < ETHER_ADDR_LEN; i++) {
653                                 eaddr[i] = sc->eaddr[5 - i];
654                         }
655                 } else
656                         bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
657                 nfe_set_macaddr(sc, eaddr);
658                 if_free(ifp);
659         }
660         if (sc->nfe_miibus)
661                 device_delete_child(dev, sc->nfe_miibus);
662         bus_generic_detach(dev);
663         if (sc->nfe_tq != NULL) {
664                 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
665                 taskqueue_free(sc->nfe_tq);
666                 sc->nfe_tq = NULL;
667         }
668
669         for (i = 0; i < NFE_MSI_MESSAGES; i++) {
670                 if (sc->nfe_intrhand[i] != NULL) {
671                         bus_teardown_intr(dev, sc->nfe_irq[i],
672                             sc->nfe_intrhand[i]);
673                         sc->nfe_intrhand[i] = NULL;
674                 }
675         }
676
677         if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
678                 if (sc->nfe_irq[0] != NULL)
679                         bus_release_resource(dev, SYS_RES_IRQ, 0,
680                             sc->nfe_irq[0]);
681         } else {
682                 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
683                         if (sc->nfe_irq[i] != NULL) {
684                                 bus_release_resource(dev, SYS_RES_IRQ, rid,
685                                     sc->nfe_irq[i]);
686                                 sc->nfe_irq[i] = NULL;
687                         }
688                 }
689                 pci_release_msi(dev);
690         }
691         if (sc->nfe_msix_pba_res != NULL) {
692                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
693                     sc->nfe_msix_pba_res);
694                 sc->nfe_msix_pba_res = NULL;
695         }
696         if (sc->nfe_msix_res != NULL) {
697                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
698                     sc->nfe_msix_res);
699                 sc->nfe_msix_res = NULL;
700         }
701         if (sc->nfe_res[0] != NULL) {
702                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
703                     sc->nfe_res[0]);
704                 sc->nfe_res[0] = NULL;
705         }
706
707         nfe_free_tx_ring(sc, &sc->txq);
708         nfe_free_rx_ring(sc, &sc->rxq);
709         nfe_free_jrx_ring(sc, &sc->jrxq);
710
711         if (sc->nfe_parent_tag) {
712                 bus_dma_tag_destroy(sc->nfe_parent_tag);
713                 sc->nfe_parent_tag = NULL;
714         }
715
716         mtx_destroy(&sc->nfe_jlist_mtx);
717         mtx_destroy(&sc->nfe_mtx);
718
719         return (0);
720 }
721
722
723 static int
724 nfe_suspend(device_t dev)
725 {
726         struct nfe_softc *sc;
727
728         sc = device_get_softc(dev);
729
730         NFE_LOCK(sc);
731         nfe_stop(sc->nfe_ifp);
732         sc->nfe_suspended = 1;
733         NFE_UNLOCK(sc);
734
735         return (0);
736 }
737
738
739 static int
740 nfe_resume(device_t dev)
741 {
742         struct nfe_softc *sc;
743         struct ifnet *ifp;
744
745         sc = device_get_softc(dev);
746
747         NFE_LOCK(sc);
748         ifp = sc->nfe_ifp;
749         if (ifp->if_flags & IFF_UP)
750                 nfe_init_locked(sc);
751         sc->nfe_suspended = 0;
752         NFE_UNLOCK(sc);
753
754         return (0);
755 }
756
757
758 /* Take PHY/NIC out of powerdown, from Linux */
759 static void
760 nfe_power(struct nfe_softc *sc)
761 {
762         uint32_t pwr;
763
764         if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
765                 return;
766         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
767         NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
768         DELAY(100);
769         NFE_WRITE(sc, NFE_MAC_RESET, 0);
770         DELAY(100);
771         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
772         pwr = NFE_READ(sc, NFE_PWR2_CTL);
773         pwr &= ~NFE_PWR2_WAKEUP_MASK;
774         if (sc->nfe_revid >= 0xa3 &&
775             (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
776             sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
777                 pwr |= NFE_PWR2_REVA3;
778         NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
779 }
780
781
782 static void
783 nfe_miibus_statchg(device_t dev)
784 {
785         struct nfe_softc *sc;
786
787         sc = device_get_softc(dev);
788         taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task);
789 }
790
791
792 static void
793 nfe_link_task(void *arg, int pending)
794 {
795         struct nfe_softc *sc;
796         struct mii_data *mii;
797         struct ifnet *ifp;
798         uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
799         uint32_t gmask, rxctl, txctl, val;
800
801         sc = (struct nfe_softc *)arg;
802
803         NFE_LOCK(sc);
804
805         mii = device_get_softc(sc->nfe_miibus);
806         ifp = sc->nfe_ifp;
807         if (mii == NULL || ifp == NULL) {
808                 NFE_UNLOCK(sc);
809                 return;
810         }
811
812         if (mii->mii_media_status & IFM_ACTIVE) {
813                 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
814                         sc->nfe_link = 1;
815         } else
816                 sc->nfe_link = 0;
817
818         phy = NFE_READ(sc, NFE_PHY_IFACE);
819         phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
820
821         seed = NFE_READ(sc, NFE_RNDSEED);
822         seed &= ~NFE_SEED_MASK;
823
824         if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) {
825                 phy  |= NFE_PHY_HDX;    /* half-duplex */
826                 misc |= NFE_MISC1_HDX;
827         }
828
829         switch (IFM_SUBTYPE(mii->mii_media_active)) {
830         case IFM_1000_T:        /* full-duplex only */
831                 link |= NFE_MEDIA_1000T;
832                 seed |= NFE_SEED_1000T;
833                 phy  |= NFE_PHY_1000T;
834                 break;
835         case IFM_100_TX:
836                 link |= NFE_MEDIA_100TX;
837                 seed |= NFE_SEED_100TX;
838                 phy  |= NFE_PHY_100TX;
839                 break;
840         case IFM_10_T:
841                 link |= NFE_MEDIA_10T;
842                 seed |= NFE_SEED_10T;
843                 break;
844         }
845
846         if ((phy & 0x10000000) != 0) {
847                 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
848                         val = NFE_R1_MAGIC_1000;
849                 else
850                         val = NFE_R1_MAGIC_10_100;
851         } else
852                 val = NFE_R1_MAGIC_DEFAULT;
853         NFE_WRITE(sc, NFE_SETUP_R1, val);
854
855         NFE_WRITE(sc, NFE_RNDSEED, seed);       /* XXX: gigabit NICs only? */
856
857         NFE_WRITE(sc, NFE_PHY_IFACE, phy);
858         NFE_WRITE(sc, NFE_MISC1, misc);
859         NFE_WRITE(sc, NFE_LINKSPEED, link);
860
861         gmask = mii->mii_media_active & IFM_GMASK;
862         if ((gmask & IFM_FDX) != 0) {
863                 /* It seems all hardwares supports Rx pause frames. */
864                 val = NFE_READ(sc, NFE_RXFILTER);
865                 if ((gmask & IFM_FLAG0) != 0)
866                         val |= NFE_PFF_RX_PAUSE;
867                 else
868                         val &= ~NFE_PFF_RX_PAUSE;
869                 NFE_WRITE(sc, NFE_RXFILTER, val);
870                 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
871                         val = NFE_READ(sc, NFE_MISC1);
872                         if ((gmask & IFM_FLAG1) != 0) {
873                                 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
874                                     NFE_TX_PAUSE_FRAME_ENABLE);
875                                 val |= NFE_MISC1_TX_PAUSE;
876                         } else {
877                                 val &= ~NFE_MISC1_TX_PAUSE;
878                                 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
879                                     NFE_TX_PAUSE_FRAME_DISABLE);
880                         }
881                         NFE_WRITE(sc, NFE_MISC1, val);
882                 }
883         } else {
884                 /* disable rx/tx pause frames */
885                 val = NFE_READ(sc, NFE_RXFILTER);
886                 val &= ~NFE_PFF_RX_PAUSE;
887                 NFE_WRITE(sc, NFE_RXFILTER, val);
888                 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
889                         NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
890                             NFE_TX_PAUSE_FRAME_DISABLE);
891                         val = NFE_READ(sc, NFE_MISC1);
892                         val &= ~NFE_MISC1_TX_PAUSE;
893                         NFE_WRITE(sc, NFE_MISC1, val);
894                 }
895         }
896
897         txctl = NFE_READ(sc, NFE_TX_CTL);
898         rxctl = NFE_READ(sc, NFE_RX_CTL);
899         if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
900                 txctl |= NFE_TX_START;
901                 rxctl |= NFE_RX_START;
902         } else {
903                 txctl &= ~NFE_TX_START;
904                 rxctl &= ~NFE_RX_START;
905         }
906         NFE_WRITE(sc, NFE_TX_CTL, txctl);
907         NFE_WRITE(sc, NFE_RX_CTL, rxctl);
908
909         NFE_UNLOCK(sc);
910 }
911
912
913 static int
914 nfe_miibus_readreg(device_t dev, int phy, int reg)
915 {
916         struct nfe_softc *sc = device_get_softc(dev);
917         uint32_t val;
918         int ntries;
919
920         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
921
922         if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
923                 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
924                 DELAY(100);
925         }
926
927         NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
928
929         for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
930                 DELAY(100);
931                 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
932                         break;
933         }
934         if (ntries == NFE_TIMEOUT) {
935                 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
936                 return 0;
937         }
938
939         if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
940                 DPRINTFN(sc, 2, "could not read PHY\n");
941                 return 0;
942         }
943
944         val = NFE_READ(sc, NFE_PHY_DATA);
945         if (val != 0xffffffff && val != 0)
946                 sc->mii_phyaddr = phy;
947
948         DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
949
950         return (val);
951 }
952
953
954 static int
955 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
956 {
957         struct nfe_softc *sc = device_get_softc(dev);
958         uint32_t ctl;
959         int ntries;
960
961         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
962
963         if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
964                 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
965                 DELAY(100);
966         }
967
968         NFE_WRITE(sc, NFE_PHY_DATA, val);
969         ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
970         NFE_WRITE(sc, NFE_PHY_CTL, ctl);
971
972         for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
973                 DELAY(100);
974                 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
975                         break;
976         }
977 #ifdef NFE_DEBUG
978         if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
979                 device_printf(sc->nfe_dev, "could not write to PHY\n");
980 #endif
981         return (0);
982 }
983
984 /*
985  * Allocate a jumbo buffer.
986  */
987 static void *
988 nfe_jalloc(struct nfe_softc *sc)
989 {
990         struct nfe_jpool_entry *entry;
991
992         NFE_JLIST_LOCK(sc);
993
994         entry = SLIST_FIRST(&sc->nfe_jfree_listhead);
995
996         if (entry == NULL) {
997                 NFE_JLIST_UNLOCK(sc);
998                 return (NULL);
999         }
1000
1001         SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries);
1002         SLIST_INSERT_HEAD(&sc->nfe_jinuse_listhead, entry, jpool_entries);
1003
1004         NFE_JLIST_UNLOCK(sc);
1005
1006         return (sc->jrxq.jslots[entry->slot]);
1007 }
1008
1009 /*
1010  * Release a jumbo buffer.
1011  */
1012 static void
1013 nfe_jfree(void *buf, void *args)
1014 {
1015         struct nfe_softc *sc;
1016         struct nfe_jpool_entry *entry;
1017         int i;
1018
1019         /* Extract the softc struct pointer. */
1020         sc = (struct nfe_softc *)args;
1021         KASSERT(sc != NULL, ("%s: can't find softc pointer!", __func__));
1022
1023         NFE_JLIST_LOCK(sc);
1024         /* Calculate the slot this buffer belongs to. */
1025         i = ((vm_offset_t)buf
1026              - (vm_offset_t)sc->jrxq.jpool) / NFE_JLEN;
1027         KASSERT(i >= 0 && i < NFE_JSLOTS,
1028             ("%s: asked to free buffer that we don't manage!", __func__));
1029
1030         entry = SLIST_FIRST(&sc->nfe_jinuse_listhead);
1031         KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
1032         entry->slot = i;
1033         SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries);
1034         SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry, jpool_entries);
1035         if (SLIST_EMPTY(&sc->nfe_jinuse_listhead))
1036                 wakeup(sc);
1037
1038         NFE_JLIST_UNLOCK(sc);
1039 }
1040
1041 struct nfe_dmamap_arg {
1042         bus_addr_t nfe_busaddr;
1043 };
1044
1045 static int
1046 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1047 {
1048         struct nfe_dmamap_arg ctx;
1049         struct nfe_rx_data *data;
1050         void *desc;
1051         int i, error, descsize;
1052
1053         if (sc->nfe_flags & NFE_40BIT_ADDR) {
1054                 desc = ring->desc64;
1055                 descsize = sizeof (struct nfe_desc64);
1056         } else {
1057                 desc = ring->desc32;
1058                 descsize = sizeof (struct nfe_desc32);
1059         }
1060
1061         ring->cur = ring->next = 0;
1062
1063         error = bus_dma_tag_create(sc->nfe_parent_tag,
1064             NFE_RING_ALIGN, 0,                  /* alignment, boundary */
1065             BUS_SPACE_MAXADDR,                  /* lowaddr */
1066             BUS_SPACE_MAXADDR,                  /* highaddr */
1067             NULL, NULL,                         /* filter, filterarg */
1068             NFE_RX_RING_COUNT * descsize, 1,    /* maxsize, nsegments */
1069             NFE_RX_RING_COUNT * descsize,       /* maxsegsize */
1070             0,                                  /* flags */
1071             NULL, NULL,                         /* lockfunc, lockarg */
1072             &ring->rx_desc_tag);
1073         if (error != 0) {
1074                 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1075                 goto fail;
1076         }
1077
1078         /* allocate memory to desc */
1079         error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1080             BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1081         if (error != 0) {
1082                 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1083                 goto fail;
1084         }
1085         if (sc->nfe_flags & NFE_40BIT_ADDR)
1086                 ring->desc64 = desc;
1087         else
1088                 ring->desc32 = desc;
1089
1090         /* map desc to device visible address space */
1091         ctx.nfe_busaddr = 0;
1092         error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1093             NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1094         if (error != 0) {
1095                 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1096                 goto fail;
1097         }
1098         ring->physaddr = ctx.nfe_busaddr;
1099
1100         error = bus_dma_tag_create(sc->nfe_parent_tag,
1101             1, 0,                       /* alignment, boundary */
1102             BUS_SPACE_MAXADDR,          /* lowaddr */
1103             BUS_SPACE_MAXADDR,          /* highaddr */
1104             NULL, NULL,                 /* filter, filterarg */
1105             MCLBYTES, 1,                /* maxsize, nsegments */
1106             MCLBYTES,                   /* maxsegsize */
1107             0,                          /* flags */
1108             NULL, NULL,                 /* lockfunc, lockarg */
1109             &ring->rx_data_tag);
1110         if (error != 0) {
1111                 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1112                 goto fail;
1113         }
1114
1115         error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1116         if (error != 0) {
1117                 device_printf(sc->nfe_dev,
1118                     "could not create Rx DMA spare map\n");
1119                 goto fail;
1120         }
1121
1122         /*
1123          * Pre-allocate Rx buffers and populate Rx ring.
1124          */
1125         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1126                 data = &sc->rxq.data[i];
1127                 data->rx_data_map = NULL;
1128                 data->m = NULL;
1129                 error = bus_dmamap_create(ring->rx_data_tag, 0,
1130                     &data->rx_data_map);
1131                 if (error != 0) {
1132                         device_printf(sc->nfe_dev,
1133                             "could not create Rx DMA map\n");
1134                         goto fail;
1135                 }
1136         }
1137
1138 fail:
1139         return (error);
1140 }
1141
1142
1143 static void
1144 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1145 {
1146         struct nfe_dmamap_arg ctx;
1147         struct nfe_rx_data *data;
1148         void *desc;
1149         struct nfe_jpool_entry *entry;
1150         uint8_t *ptr;
1151         int i, error, descsize;
1152
1153         if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1154                 return;
1155         if (jumbo_disable != 0) {
1156                 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1157                 sc->nfe_jumbo_disable = 1;
1158                 return;
1159         }
1160
1161         if (sc->nfe_flags & NFE_40BIT_ADDR) {
1162                 desc = ring->jdesc64;
1163                 descsize = sizeof (struct nfe_desc64);
1164         } else {
1165                 desc = ring->jdesc32;
1166                 descsize = sizeof (struct nfe_desc32);
1167         }
1168
1169         ring->jcur = ring->jnext = 0;
1170
1171         /* Create DMA tag for jumbo Rx ring. */
1172         error = bus_dma_tag_create(sc->nfe_parent_tag,
1173             NFE_RING_ALIGN, 0,                  /* alignment, boundary */
1174             BUS_SPACE_MAXADDR,                  /* lowaddr */
1175             BUS_SPACE_MAXADDR,                  /* highaddr */
1176             NULL, NULL,                         /* filter, filterarg */
1177             NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1178             1,                                  /* nsegments */
1179             NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1180             0,                                  /* flags */
1181             NULL, NULL,                         /* lockfunc, lockarg */
1182             &ring->jrx_desc_tag);
1183         if (error != 0) {
1184                 device_printf(sc->nfe_dev,
1185                     "could not create jumbo ring DMA tag\n");
1186                 goto fail;
1187         }
1188
1189         /* Create DMA tag for jumbo buffer blocks. */
1190         error = bus_dma_tag_create(sc->nfe_parent_tag,
1191             PAGE_SIZE, 0,                       /* alignment, boundary */
1192             BUS_SPACE_MAXADDR,                  /* lowaddr */
1193             BUS_SPACE_MAXADDR,                  /* highaddr */
1194             NULL, NULL,                         /* filter, filterarg */
1195             NFE_JMEM,                           /* maxsize */
1196             1,                                  /* nsegments */
1197             NFE_JMEM,                           /* maxsegsize */
1198             0,                                  /* flags */
1199             NULL, NULL,                         /* lockfunc, lockarg */
1200             &ring->jrx_jumbo_tag);
1201         if (error != 0) {
1202                 device_printf(sc->nfe_dev,
1203                     "could not create jumbo Rx buffer block DMA tag\n");
1204                 goto fail;
1205         }
1206
1207         /* Create DMA tag for jumbo Rx buffers. */
1208         error = bus_dma_tag_create(sc->nfe_parent_tag,
1209             PAGE_SIZE, 0,                       /* alignment, boundary */
1210             BUS_SPACE_MAXADDR,                  /* lowaddr */
1211             BUS_SPACE_MAXADDR,                  /* highaddr */
1212             NULL, NULL,                         /* filter, filterarg */
1213             NFE_JLEN,                           /* maxsize */
1214             1,                                  /* nsegments */
1215             NFE_JLEN,                           /* maxsegsize */
1216             0,                                  /* flags */
1217             NULL, NULL,                         /* lockfunc, lockarg */
1218             &ring->jrx_data_tag);
1219         if (error != 0) {
1220                 device_printf(sc->nfe_dev,
1221                     "could not create jumbo Rx buffer DMA tag\n");
1222                 goto fail;
1223         }
1224
1225         /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1226         error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1227             BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1228         if (error != 0) {
1229                 device_printf(sc->nfe_dev,
1230                     "could not allocate DMA'able memory for jumbo Rx ring\n");
1231                 goto fail;
1232         }
1233         if (sc->nfe_flags & NFE_40BIT_ADDR)
1234                 ring->jdesc64 = desc;
1235         else
1236                 ring->jdesc32 = desc;
1237
1238         ctx.nfe_busaddr = 0;
1239         error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1240             NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1241         if (error != 0) {
1242                 device_printf(sc->nfe_dev,
1243                     "could not load DMA'able memory for jumbo Rx ring\n");
1244                 goto fail;
1245         }
1246         ring->jphysaddr = ctx.nfe_busaddr;
1247
1248         /* Create DMA maps for jumbo Rx buffers. */
1249         error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1250         if (error != 0) {
1251                 device_printf(sc->nfe_dev,
1252                     "could not create jumbo Rx DMA spare map\n");
1253                 goto fail;
1254         }
1255
1256         for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1257                 data = &sc->jrxq.jdata[i];
1258                 data->rx_data_map = NULL;
1259                 data->m = NULL;
1260                 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1261                     &data->rx_data_map);
1262                 if (error != 0) {
1263                         device_printf(sc->nfe_dev,
1264                             "could not create jumbo Rx DMA map\n");
1265                         goto fail;
1266                 }
1267         }
1268
1269         /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
1270         error = bus_dmamem_alloc(ring->jrx_jumbo_tag, (void **)&ring->jpool,
1271             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1272             &ring->jrx_jumbo_map);
1273         if (error != 0) {
1274                 device_printf(sc->nfe_dev,
1275                     "could not allocate DMA'able memory for jumbo pool\n");
1276                 goto fail;
1277         }
1278
1279         ctx.nfe_busaddr = 0;
1280         error = bus_dmamap_load(ring->jrx_jumbo_tag, ring->jrx_jumbo_map,
1281             ring->jpool, NFE_JMEM, nfe_dma_map_segs, &ctx, 0);
1282         if (error != 0) {
1283                 device_printf(sc->nfe_dev,
1284                     "could not load DMA'able memory for jumbo pool\n");
1285                 goto fail;
1286         }
1287
1288         /*
1289          * Now divide it up into 9K pieces and save the addresses
1290          * in an array.
1291          */
1292         ptr = ring->jpool;
1293         for (i = 0; i < NFE_JSLOTS; i++) {
1294                 ring->jslots[i] = ptr;
1295                 ptr += NFE_JLEN;
1296                 entry = malloc(sizeof(struct nfe_jpool_entry), M_DEVBUF,
1297                     M_WAITOK);
1298                 if (entry == NULL) {
1299                         device_printf(sc->nfe_dev,
1300                             "no memory for jumbo buffers!\n");
1301                         error = ENOMEM;
1302                         goto fail;
1303                 }
1304                 entry->slot = i;
1305                 SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry,
1306                     jpool_entries);
1307         }
1308
1309         return;
1310
1311 fail:
1312         /*
1313          * Running without jumbo frame support is ok for most cases
1314          * so don't fail on creating dma tag/map for jumbo frame.
1315          */
1316         nfe_free_jrx_ring(sc, ring);
1317         device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1318             "resource shortage\n");
1319         sc->nfe_jumbo_disable = 1;
1320 }
1321
1322
1323 static int
1324 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1325 {
1326         void *desc;
1327         size_t descsize;
1328         int i;
1329
1330         ring->cur = ring->next = 0;
1331         if (sc->nfe_flags & NFE_40BIT_ADDR) {
1332                 desc = ring->desc64;
1333                 descsize = sizeof (struct nfe_desc64);
1334         } else {
1335                 desc = ring->desc32;
1336                 descsize = sizeof (struct nfe_desc32);
1337         }
1338         bzero(desc, descsize * NFE_RX_RING_COUNT);
1339         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1340                 if (nfe_newbuf(sc, i) != 0)
1341                         return (ENOBUFS);
1342         }
1343
1344         bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1345             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1346
1347         return (0);
1348 }
1349
1350
1351 static int
1352 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1353 {
1354         void *desc;
1355         size_t descsize;
1356         int i;
1357
1358         ring->jcur = ring->jnext = 0;
1359         if (sc->nfe_flags & NFE_40BIT_ADDR) {
1360                 desc = ring->jdesc64;
1361                 descsize = sizeof (struct nfe_desc64);
1362         } else {
1363                 desc = ring->jdesc32;
1364                 descsize = sizeof (struct nfe_desc32);
1365         }
1366         bzero(desc, descsize * NFE_RX_RING_COUNT);
1367         for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1368                 if (nfe_jnewbuf(sc, i) != 0)
1369                         return (ENOBUFS);
1370         }
1371
1372         bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1373             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1374
1375         return (0);
1376 }
1377
1378
1379 static void
1380 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1381 {
1382         struct nfe_rx_data *data;
1383         void *desc;
1384         int i, descsize;
1385
1386         if (sc->nfe_flags & NFE_40BIT_ADDR) {
1387                 desc = ring->desc64;
1388                 descsize = sizeof (struct nfe_desc64);
1389         } else {
1390                 desc = ring->desc32;
1391                 descsize = sizeof (struct nfe_desc32);
1392         }
1393
1394         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1395                 data = &ring->data[i];
1396                 if (data->rx_data_map != NULL) {
1397                         bus_dmamap_destroy(ring->rx_data_tag,
1398                             data->rx_data_map);
1399                         data->rx_data_map = NULL;
1400                 }
1401                 if (data->m != NULL) {
1402                         m_freem(data->m);
1403                         data->m = NULL;
1404                 }
1405         }
1406         if (ring->rx_data_tag != NULL) {
1407                 if (ring->rx_spare_map != NULL) {
1408                         bus_dmamap_destroy(ring->rx_data_tag,
1409                             ring->rx_spare_map);
1410                         ring->rx_spare_map = NULL;
1411                 }
1412                 bus_dma_tag_destroy(ring->rx_data_tag);
1413                 ring->rx_data_tag = NULL;
1414         }
1415
1416         if (desc != NULL) {
1417                 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1418                 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1419                 ring->desc64 = NULL;
1420                 ring->desc32 = NULL;
1421                 ring->rx_desc_map = NULL;
1422         }
1423         if (ring->rx_desc_tag != NULL) {
1424                 bus_dma_tag_destroy(ring->rx_desc_tag);
1425                 ring->rx_desc_tag = NULL;
1426         }
1427 }
1428
1429
1430 static void
1431 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1432 {
1433         struct nfe_jpool_entry *entry;
1434         struct nfe_rx_data *data;
1435         void *desc;
1436         int i, descsize;
1437
1438         if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1439                 return;
1440
1441         NFE_JLIST_LOCK(sc);
1442         while ((entry = SLIST_FIRST(&sc->nfe_jinuse_listhead))) {
1443                 device_printf(sc->nfe_dev,
1444                     "asked to free buffer that is in use!\n");
1445                 SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries);
1446                 SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry,
1447                     jpool_entries);
1448         }
1449
1450         while (!SLIST_EMPTY(&sc->nfe_jfree_listhead)) {
1451                 entry = SLIST_FIRST(&sc->nfe_jfree_listhead);
1452                 SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries);
1453                 free(entry, M_DEVBUF);
1454         }
1455         NFE_JLIST_UNLOCK(sc);
1456
1457         if (sc->nfe_flags & NFE_40BIT_ADDR) {
1458                 desc = ring->jdesc64;
1459                 descsize = sizeof (struct nfe_desc64);
1460         } else {
1461                 desc = ring->jdesc32;
1462                 descsize = sizeof (struct nfe_desc32);
1463         }
1464
1465         for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1466                 data = &ring->jdata[i];
1467                 if (data->rx_data_map != NULL) {
1468                         bus_dmamap_destroy(ring->jrx_data_tag,
1469                             data->rx_data_map);
1470                         data->rx_data_map = NULL;
1471                 }
1472                 if (data->m != NULL) {
1473                         m_freem(data->m);
1474                         data->m = NULL;
1475                 }
1476         }
1477         if (ring->jrx_data_tag != NULL) {
1478                 if (ring->jrx_spare_map != NULL) {
1479                         bus_dmamap_destroy(ring->jrx_data_tag,
1480                             ring->jrx_spare_map);
1481                         ring->jrx_spare_map = NULL;
1482                 }
1483                 bus_dma_tag_destroy(ring->jrx_data_tag);
1484                 ring->jrx_data_tag = NULL;
1485         }
1486
1487         if (desc != NULL) {
1488                 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1489                 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1490                 ring->jdesc64 = NULL;
1491                 ring->jdesc32 = NULL;
1492                 ring->jrx_desc_map = NULL;
1493         }
1494         /* Destroy jumbo buffer block. */
1495         if (ring->jrx_jumbo_map != NULL)
1496                 bus_dmamap_unload(ring->jrx_jumbo_tag, ring->jrx_jumbo_map);
1497         if (ring->jrx_jumbo_map != NULL) {
1498                 bus_dmamem_free(ring->jrx_jumbo_tag, ring->jpool,
1499                     ring->jrx_jumbo_map);
1500                 ring->jpool = NULL;
1501                 ring->jrx_jumbo_map = NULL;
1502         }
1503         if (ring->jrx_desc_tag != NULL) {
1504                 bus_dma_tag_destroy(ring->jrx_desc_tag);
1505                 ring->jrx_desc_tag = NULL;
1506         }
1507 }
1508
1509
1510 static int
1511 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1512 {
1513         struct nfe_dmamap_arg ctx;
1514         int i, error;
1515         void *desc;
1516         int descsize;
1517
1518         if (sc->nfe_flags & NFE_40BIT_ADDR) {
1519                 desc = ring->desc64;
1520                 descsize = sizeof (struct nfe_desc64);
1521         } else {
1522                 desc = ring->desc32;
1523                 descsize = sizeof (struct nfe_desc32);
1524         }
1525
1526         ring->queued = 0;
1527         ring->cur = ring->next = 0;
1528
1529         error = bus_dma_tag_create(sc->nfe_parent_tag,
1530             NFE_RING_ALIGN, 0,                  /* alignment, boundary */
1531             BUS_SPACE_MAXADDR,                  /* lowaddr */
1532             BUS_SPACE_MAXADDR,                  /* highaddr */
1533             NULL, NULL,                         /* filter, filterarg */
1534             NFE_TX_RING_COUNT * descsize, 1,    /* maxsize, nsegments */
1535             NFE_TX_RING_COUNT * descsize,       /* maxsegsize */
1536             0,                                  /* flags */
1537             NULL, NULL,                         /* lockfunc, lockarg */
1538             &ring->tx_desc_tag);
1539         if (error != 0) {
1540                 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1541                 goto fail;
1542         }
1543
1544         error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1545             BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1546         if (error != 0) {
1547                 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1548                 goto fail;
1549         }
1550         if (sc->nfe_flags & NFE_40BIT_ADDR)
1551                 ring->desc64 = desc;
1552         else
1553                 ring->desc32 = desc;
1554
1555         ctx.nfe_busaddr = 0;
1556         error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1557             NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1558         if (error != 0) {
1559                 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1560                 goto fail;
1561         }
1562         ring->physaddr = ctx.nfe_busaddr;
1563
1564         error = bus_dma_tag_create(sc->nfe_parent_tag,
1565             1, 0,
1566             BUS_SPACE_MAXADDR,
1567             BUS_SPACE_MAXADDR,
1568             NULL, NULL,
1569             NFE_TSO_MAXSIZE,
1570             NFE_MAX_SCATTER,
1571             NFE_TSO_MAXSGSIZE,
1572             0,
1573             NULL, NULL,
1574             &ring->tx_data_tag);
1575         if (error != 0) {
1576                 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1577                 goto fail;
1578         }
1579
1580         for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1581                 error = bus_dmamap_create(ring->tx_data_tag, 0,
1582                     &ring->data[i].tx_data_map);
1583                 if (error != 0) {
1584                         device_printf(sc->nfe_dev,
1585                             "could not create Tx DMA map\n");
1586                         goto fail;
1587                 }
1588         }
1589
1590 fail:
1591         return (error);
1592 }
1593
1594
1595 static void
1596 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1597 {
1598         void *desc;
1599         size_t descsize;
1600
1601         sc->nfe_force_tx = 0;
1602         ring->queued = 0;
1603         ring->cur = ring->next = 0;
1604         if (sc->nfe_flags & NFE_40BIT_ADDR) {
1605                 desc = ring->desc64;
1606                 descsize = sizeof (struct nfe_desc64);
1607         } else {
1608                 desc = ring->desc32;
1609                 descsize = sizeof (struct nfe_desc32);
1610         }
1611         bzero(desc, descsize * NFE_TX_RING_COUNT);
1612
1613         bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1614             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1615 }
1616
1617
1618 static void
1619 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1620 {
1621         struct nfe_tx_data *data;
1622         void *desc;
1623         int i, descsize;
1624
1625         if (sc->nfe_flags & NFE_40BIT_ADDR) {
1626                 desc = ring->desc64;
1627                 descsize = sizeof (struct nfe_desc64);
1628         } else {
1629                 desc = ring->desc32;
1630                 descsize = sizeof (struct nfe_desc32);
1631         }
1632
1633         for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1634                 data = &ring->data[i];
1635
1636                 if (data->m != NULL) {
1637                         bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1638                             BUS_DMASYNC_POSTWRITE);
1639                         bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1640                         m_freem(data->m);
1641                         data->m = NULL;
1642                 }
1643                 if (data->tx_data_map != NULL) {
1644                         bus_dmamap_destroy(ring->tx_data_tag,
1645                             data->tx_data_map);
1646                         data->tx_data_map = NULL;
1647                 }
1648         }
1649
1650         if (ring->tx_data_tag != NULL) {
1651                 bus_dma_tag_destroy(ring->tx_data_tag);
1652                 ring->tx_data_tag = NULL;
1653         }
1654
1655         if (desc != NULL) {
1656                 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1657                     BUS_DMASYNC_POSTWRITE);
1658                 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1659                 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1660                 ring->desc64 = NULL;
1661                 ring->desc32 = NULL;
1662                 ring->tx_desc_map = NULL;
1663                 bus_dma_tag_destroy(ring->tx_desc_tag);
1664                 ring->tx_desc_tag = NULL;
1665         }
1666 }
1667
1668 #ifdef DEVICE_POLLING
1669 static poll_handler_t nfe_poll;
1670
1671
1672 static void
1673 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1674 {
1675         struct nfe_softc *sc = ifp->if_softc;
1676         uint32_t r;
1677
1678         NFE_LOCK(sc);
1679
1680         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1681                 NFE_UNLOCK(sc);
1682                 return;
1683         }
1684
1685         if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1686                 nfe_jrxeof(sc, count);
1687         else
1688                 nfe_rxeof(sc, count);
1689         nfe_txeof(sc);
1690         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1691                 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1692
1693         if (cmd == POLL_AND_CHECK_STATUS) {
1694                 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1695                         NFE_UNLOCK(sc);
1696                         return;
1697                 }
1698                 NFE_WRITE(sc, sc->nfe_irq_status, r);
1699
1700                 if (r & NFE_IRQ_LINK) {
1701                         NFE_READ(sc, NFE_PHY_STATUS);
1702                         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1703                         DPRINTF(sc, "link state changed\n");
1704                 }
1705         }
1706         NFE_UNLOCK(sc);
1707 }
1708 #endif /* DEVICE_POLLING */
1709
1710 static void
1711 nfe_set_intr(struct nfe_softc *sc)
1712 {
1713
1714         if (sc->nfe_msi != 0)
1715                 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1716 }
1717
1718
1719 /* In MSIX, a write to mask reegisters behaves as XOR. */
1720 static __inline void
1721 nfe_enable_intr(struct nfe_softc *sc)
1722 {
1723
1724         if (sc->nfe_msix != 0) {
1725                 /* XXX Should have a better way to enable interrupts! */
1726                 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1727                         NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1728         } else
1729                 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1730 }
1731
1732
1733 static __inline void
1734 nfe_disable_intr(struct nfe_softc *sc)
1735 {
1736
1737         if (sc->nfe_msix != 0) {
1738                 /* XXX Should have a better way to disable interrupts! */
1739                 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1740                         NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1741         } else
1742                 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1743 }
1744
1745
1746 static int
1747 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1748 {
1749         struct nfe_softc *sc;
1750         struct ifreq *ifr;
1751         struct mii_data *mii;
1752         int error, init, mask;
1753
1754         sc = ifp->if_softc;
1755         ifr = (struct ifreq *) data;
1756         error = 0;
1757         init = 0;
1758         switch (cmd) {
1759         case SIOCSIFMTU:
1760                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1761                         error = EINVAL;
1762                 else if (ifp->if_mtu != ifr->ifr_mtu) {
1763                         if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1764                             (sc->nfe_jumbo_disable != 0)) &&
1765                             ifr->ifr_mtu > ETHERMTU)
1766                                 error = EINVAL;
1767                         else {
1768                                 NFE_LOCK(sc);
1769                                 ifp->if_mtu = ifr->ifr_mtu;
1770                                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1771                                         nfe_init_locked(sc);
1772                                 NFE_UNLOCK(sc);
1773                         }
1774                 }
1775                 break;
1776         case SIOCSIFFLAGS:
1777                 NFE_LOCK(sc);
1778                 if (ifp->if_flags & IFF_UP) {
1779                         /*
1780                          * If only the PROMISC or ALLMULTI flag changes, then
1781                          * don't do a full re-init of the chip, just update
1782                          * the Rx filter.
1783                          */
1784                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1785                             ((ifp->if_flags ^ sc->nfe_if_flags) &
1786                              (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1787                                 nfe_setmulti(sc);
1788                         else
1789                                 nfe_init_locked(sc);
1790                 } else {
1791                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1792                                 nfe_stop(ifp);
1793                 }
1794                 sc->nfe_if_flags = ifp->if_flags;
1795                 NFE_UNLOCK(sc);
1796                 error = 0;
1797                 break;
1798         case SIOCADDMULTI:
1799         case SIOCDELMULTI:
1800                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1801                         NFE_LOCK(sc);
1802                         nfe_setmulti(sc);
1803                         NFE_UNLOCK(sc);
1804                         error = 0;
1805                 }
1806                 break;
1807         case SIOCSIFMEDIA:
1808         case SIOCGIFMEDIA:
1809                 mii = device_get_softc(sc->nfe_miibus);
1810                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1811                 break;
1812         case SIOCSIFCAP:
1813                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1814 #ifdef DEVICE_POLLING
1815                 if ((mask & IFCAP_POLLING) != 0) {
1816                         if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1817                                 error = ether_poll_register(nfe_poll, ifp);
1818                                 if (error)
1819                                         break;
1820                                 NFE_LOCK(sc);
1821                                 nfe_disable_intr(sc);
1822                                 ifp->if_capenable |= IFCAP_POLLING;
1823                                 NFE_UNLOCK(sc);
1824                         } else {
1825                                 error = ether_poll_deregister(ifp);
1826                                 /* Enable interrupt even in error case */
1827                                 NFE_LOCK(sc);
1828                                 nfe_enable_intr(sc);
1829                                 ifp->if_capenable &= ~IFCAP_POLLING;
1830                                 NFE_UNLOCK(sc);
1831                         }
1832                 }
1833 #endif /* DEVICE_POLLING */
1834                 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1835                     (mask & IFCAP_HWCSUM) != 0) {
1836                         ifp->if_capenable ^= IFCAP_HWCSUM;
1837                         if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
1838                             (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
1839                                 ifp->if_hwassist |= NFE_CSUM_FEATURES;
1840                         else
1841                                 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1842                         init++;
1843                 }
1844                 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 &&
1845                     (mask & IFCAP_VLAN_HWTAGGING) != 0) {
1846                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1847                         init++;
1848                 }
1849                 /*
1850                  * XXX
1851                  * It seems that VLAN stripping requires Rx checksum offload.
1852                  * Unfortunately FreeBSD has no way to disable only Rx side
1853                  * VLAN stripping. So when we know Rx checksum offload is
1854                  * disabled turn entire hardware VLAN assist off.
1855                  */
1856                 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) ==
1857                     (NFE_HW_CSUM | NFE_HW_VLAN)) {
1858                         if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
1859                                 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
1860                 }
1861
1862                 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1863                     (mask & IFCAP_TSO4) != 0) {
1864                         ifp->if_capenable ^= IFCAP_TSO4;
1865                         if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1866                             (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1867                                 ifp->if_hwassist |= CSUM_TSO;
1868                         else
1869                                 ifp->if_hwassist &= ~CSUM_TSO;
1870                 }
1871
1872                 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1873                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1874                         nfe_init(sc);
1875                 }
1876                 if ((sc->nfe_flags & NFE_HW_VLAN) != 0)
1877                         VLAN_CAPABILITIES(ifp);
1878                 break;
1879         default:
1880                 error = ether_ioctl(ifp, cmd, data);
1881                 break;
1882         }
1883
1884         return (error);
1885 }
1886
1887
1888 static int
1889 nfe_intr(void *arg)
1890 {
1891         struct nfe_softc *sc;
1892         uint32_t status;
1893
1894         sc = (struct nfe_softc *)arg;
1895
1896         status = NFE_READ(sc, sc->nfe_irq_status);
1897         if (status == 0 || status == 0xffffffff)
1898                 return (FILTER_STRAY);
1899         nfe_disable_intr(sc);
1900         taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1901
1902         return (FILTER_HANDLED);
1903 }
1904
1905
1906 static void
1907 nfe_int_task(void *arg, int pending)
1908 {
1909         struct nfe_softc *sc = arg;
1910         struct ifnet *ifp = sc->nfe_ifp;
1911         uint32_t r;
1912         int domore;
1913
1914         NFE_LOCK(sc);
1915
1916         if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1917                 nfe_enable_intr(sc);
1918                 NFE_UNLOCK(sc);
1919                 return; /* not for us */
1920         }
1921         NFE_WRITE(sc, sc->nfe_irq_status, r);
1922
1923         DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1924
1925 #ifdef DEVICE_POLLING
1926         if (ifp->if_capenable & IFCAP_POLLING) {
1927                 NFE_UNLOCK(sc);
1928                 return;
1929         }
1930 #endif
1931
1932         if (r & NFE_IRQ_LINK) {
1933                 NFE_READ(sc, NFE_PHY_STATUS);
1934                 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1935                 DPRINTF(sc, "link state changed\n");
1936         }
1937
1938         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1939                 NFE_UNLOCK(sc);
1940                 nfe_enable_intr(sc);
1941                 return;
1942         }
1943
1944         domore = 0;
1945         /* check Rx ring */
1946         if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1947                 domore = nfe_jrxeof(sc, sc->nfe_process_limit);
1948         else
1949                 domore = nfe_rxeof(sc, sc->nfe_process_limit);
1950         /* check Tx ring */
1951         nfe_txeof(sc);
1952
1953         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1954                 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1955
1956         NFE_UNLOCK(sc);
1957
1958         if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1959                 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1960                 return;
1961         }
1962
1963         /* Reenable interrupts. */
1964         nfe_enable_intr(sc);
1965 }
1966
1967
1968 static __inline void
1969 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1970 {
1971         struct nfe_desc32 *desc32;
1972         struct nfe_desc64 *desc64;
1973         struct nfe_rx_data *data;
1974         struct mbuf *m;
1975
1976         data = &sc->rxq.data[idx];
1977         m = data->m;
1978
1979         if (sc->nfe_flags & NFE_40BIT_ADDR) {
1980                 desc64 = &sc->rxq.desc64[idx];
1981                 /* VLAN packet may have overwritten it. */
1982                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1983                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1984                 desc64->length = htole16(m->m_len);
1985                 desc64->flags = htole16(NFE_RX_READY);
1986         } else {
1987                 desc32 = &sc->rxq.desc32[idx];
1988                 desc32->length = htole16(m->m_len);
1989                 desc32->flags = htole16(NFE_RX_READY);
1990         }
1991 }
1992
1993
1994 static __inline void
1995 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1996 {
1997         struct nfe_desc32 *desc32;
1998         struct nfe_desc64 *desc64;
1999         struct nfe_rx_data *data;
2000         struct mbuf *m;
2001
2002         data = &sc->jrxq.jdata[idx];
2003         m = data->m;
2004
2005         if (sc->nfe_flags & NFE_40BIT_ADDR) {
2006                 desc64 = &sc->jrxq.jdesc64[idx];
2007                 /* VLAN packet may have overwritten it. */
2008                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
2009                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
2010                 desc64->length = htole16(m->m_len);
2011                 desc64->flags = htole16(NFE_RX_READY);
2012         } else {
2013                 desc32 = &sc->jrxq.jdesc32[idx];
2014                 desc32->length = htole16(m->m_len);
2015                 desc32->flags = htole16(NFE_RX_READY);
2016         }
2017 }
2018
2019
2020 static int
2021 nfe_newbuf(struct nfe_softc *sc, int idx)
2022 {
2023         struct nfe_rx_data *data;
2024         struct nfe_desc32 *desc32;
2025         struct nfe_desc64 *desc64;
2026         struct mbuf *m;
2027         bus_dma_segment_t segs[1];
2028         bus_dmamap_t map;
2029         int nsegs;
2030
2031         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2032         if (m == NULL)
2033                 return (ENOBUFS);
2034
2035         m->m_len = m->m_pkthdr.len = MCLBYTES;
2036         m_adj(m, ETHER_ALIGN);
2037
2038         if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
2039             m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2040                 m_freem(m);
2041                 return (ENOBUFS);
2042         }
2043         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2044
2045         data = &sc->rxq.data[idx];
2046         if (data->m != NULL) {
2047                 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2048                     BUS_DMASYNC_POSTREAD);
2049                 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
2050         }
2051         map = data->rx_data_map;
2052         data->rx_data_map = sc->rxq.rx_spare_map;
2053         sc->rxq.rx_spare_map = map;
2054         bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2055             BUS_DMASYNC_PREREAD);
2056         data->paddr = segs[0].ds_addr;
2057         data->m = m;
2058         /* update mapping address in h/w descriptor */
2059         if (sc->nfe_flags & NFE_40BIT_ADDR) {
2060                 desc64 = &sc->rxq.desc64[idx];
2061                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2062                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2063                 desc64->length = htole16(segs[0].ds_len);
2064                 desc64->flags = htole16(NFE_RX_READY);
2065         } else {
2066                 desc32 = &sc->rxq.desc32[idx];
2067                 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2068                 desc32->length = htole16(segs[0].ds_len);
2069                 desc32->flags = htole16(NFE_RX_READY);
2070         }
2071
2072         return (0);
2073 }
2074
2075
2076 static int
2077 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2078 {
2079         struct nfe_rx_data *data;
2080         struct nfe_desc32 *desc32;
2081         struct nfe_desc64 *desc64;
2082         struct mbuf *m;
2083         bus_dma_segment_t segs[1];
2084         bus_dmamap_t map;
2085         int nsegs;
2086         void *buf;
2087
2088         MGETHDR(m, M_DONTWAIT, MT_DATA);
2089         if (m == NULL)
2090                 return (ENOBUFS);
2091         buf = nfe_jalloc(sc);
2092         if (buf == NULL) {
2093                 m_freem(m);
2094                 return (ENOBUFS);
2095         }
2096         /* Attach the buffer to the mbuf. */
2097         MEXTADD(m, buf, NFE_JLEN, nfe_jfree, buf, (struct nfe_softc *)sc, 0,
2098             EXT_NET_DRV);
2099         if ((m->m_flags & M_EXT) == 0) {
2100                 m_freem(m);
2101                 return (ENOBUFS);
2102         }
2103         m->m_pkthdr.len = m->m_len = NFE_JLEN;
2104         m_adj(m, ETHER_ALIGN);
2105
2106         if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2107             sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2108                 m_freem(m);
2109                 return (ENOBUFS);
2110         }
2111         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2112
2113         data = &sc->jrxq.jdata[idx];
2114         if (data->m != NULL) {
2115                 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2116                     BUS_DMASYNC_POSTREAD);
2117                 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2118         }
2119         map = data->rx_data_map;
2120         data->rx_data_map = sc->jrxq.jrx_spare_map;
2121         sc->jrxq.jrx_spare_map = map;
2122         bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2123             BUS_DMASYNC_PREREAD);
2124         data->paddr = segs[0].ds_addr;
2125         data->m = m;
2126         /* update mapping address in h/w descriptor */
2127         if (sc->nfe_flags & NFE_40BIT_ADDR) {
2128                 desc64 = &sc->jrxq.jdesc64[idx];
2129                 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2130                 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2131                 desc64->length = htole16(segs[0].ds_len);
2132                 desc64->flags = htole16(NFE_RX_READY);
2133         } else {
2134                 desc32 = &sc->jrxq.jdesc32[idx];
2135                 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2136                 desc32->length = htole16(segs[0].ds_len);
2137                 desc32->flags = htole16(NFE_RX_READY);
2138         }
2139
2140         return (0);
2141 }
2142
2143
2144 static int
2145 nfe_rxeof(struct nfe_softc *sc, int count)
2146 {
2147         struct ifnet *ifp = sc->nfe_ifp;
2148         struct nfe_desc32 *desc32;
2149         struct nfe_desc64 *desc64;
2150         struct nfe_rx_data *data;
2151         struct mbuf *m;
2152         uint16_t flags;
2153         int len, prog;
2154         uint32_t vtag = 0;
2155
2156         NFE_LOCK_ASSERT(sc);
2157
2158         bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2159             BUS_DMASYNC_POSTREAD);
2160
2161         for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2162                 if (count <= 0)
2163                         break;
2164                 count--;
2165
2166                 data = &sc->rxq.data[sc->rxq.cur];
2167
2168                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2169                         desc64 = &sc->rxq.desc64[sc->rxq.cur];
2170                         vtag = le32toh(desc64->physaddr[1]);
2171                         flags = le16toh(desc64->flags);
2172                         len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2173                 } else {
2174                         desc32 = &sc->rxq.desc32[sc->rxq.cur];
2175                         flags = le16toh(desc32->flags);
2176                         len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2177                 }
2178
2179                 if (flags & NFE_RX_READY)
2180                         break;
2181                 prog++;
2182                 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2183                         if (!(flags & NFE_RX_VALID_V1)) {
2184                                 ifp->if_ierrors++;
2185                                 nfe_discard_rxbuf(sc, sc->rxq.cur);
2186                                 continue;
2187                         }
2188                         if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2189                                 flags &= ~NFE_RX_ERROR;
2190                                 len--;  /* fix buffer length */
2191                         }
2192                 } else {
2193                         if (!(flags & NFE_RX_VALID_V2)) {
2194                                 ifp->if_ierrors++;
2195                                 nfe_discard_rxbuf(sc, sc->rxq.cur);
2196                                 continue;
2197                         }
2198
2199                         if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2200                                 flags &= ~NFE_RX_ERROR;
2201                                 len--;  /* fix buffer length */
2202                         }
2203                 }
2204
2205                 if (flags & NFE_RX_ERROR) {
2206                         ifp->if_ierrors++;
2207                         nfe_discard_rxbuf(sc, sc->rxq.cur);
2208                         continue;
2209                 }
2210
2211                 m = data->m;
2212                 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2213                         ifp->if_iqdrops++;
2214                         nfe_discard_rxbuf(sc, sc->rxq.cur);
2215                         continue;
2216                 }
2217
2218                 if ((vtag & NFE_RX_VTAG) != 0 &&
2219                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2220                         m->m_pkthdr.ether_vtag = vtag & 0xffff;
2221                         m->m_flags |= M_VLANTAG;
2222                 }
2223
2224                 m->m_pkthdr.len = m->m_len = len;
2225                 m->m_pkthdr.rcvif = ifp;
2226
2227                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2228                         if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2229                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2230                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2231                                 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2232                                     (flags & NFE_RX_UDP_CSUMOK) != 0) {
2233                                         m->m_pkthdr.csum_flags |=
2234                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2235                                         m->m_pkthdr.csum_data = 0xffff;
2236                                 }
2237                         }
2238                 }
2239
2240                 ifp->if_ipackets++;
2241
2242                 NFE_UNLOCK(sc);
2243                 (*ifp->if_input)(ifp, m);
2244                 NFE_LOCK(sc);
2245         }
2246
2247         if (prog > 0)
2248                 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2249                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2250
2251         return (count > 0 ? 0 : EAGAIN);
2252 }
2253
2254
2255 static int
2256 nfe_jrxeof(struct nfe_softc *sc, int count)
2257 {
2258         struct ifnet *ifp = sc->nfe_ifp;
2259         struct nfe_desc32 *desc32;
2260         struct nfe_desc64 *desc64;
2261         struct nfe_rx_data *data;
2262         struct mbuf *m;
2263         uint16_t flags;
2264         int len, prog;
2265         uint32_t vtag = 0;
2266
2267         NFE_LOCK_ASSERT(sc);
2268
2269         bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2270             BUS_DMASYNC_POSTREAD);
2271
2272         for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2273             vtag = 0) {
2274                 if (count <= 0)
2275                         break;
2276                 count--;
2277
2278                 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2279
2280                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2281                         desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2282                         vtag = le32toh(desc64->physaddr[1]);
2283                         flags = le16toh(desc64->flags);
2284                         len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2285                 } else {
2286                         desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2287                         flags = le16toh(desc32->flags);
2288                         len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2289                 }
2290
2291                 if (flags & NFE_RX_READY)
2292                         break;
2293                 prog++;
2294                 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2295                         if (!(flags & NFE_RX_VALID_V1)) {
2296                                 ifp->if_ierrors++;
2297                                 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2298                                 continue;
2299                         }
2300                         if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2301                                 flags &= ~NFE_RX_ERROR;
2302                                 len--;  /* fix buffer length */
2303                         }
2304                 } else {
2305                         if (!(flags & NFE_RX_VALID_V2)) {
2306                                 ifp->if_ierrors++;
2307                                 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2308                                 continue;
2309                         }
2310
2311                         if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2312                                 flags &= ~NFE_RX_ERROR;
2313                                 len--;  /* fix buffer length */
2314                         }
2315                 }
2316
2317                 if (flags & NFE_RX_ERROR) {
2318                         ifp->if_ierrors++;
2319                         nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2320                         continue;
2321                 }
2322
2323                 m = data->m;
2324                 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2325                         ifp->if_iqdrops++;
2326                         nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2327                         continue;
2328                 }
2329
2330                 if ((vtag & NFE_RX_VTAG) != 0 &&
2331                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2332                         m->m_pkthdr.ether_vtag = vtag & 0xffff;
2333                         m->m_flags |= M_VLANTAG;
2334                 }
2335
2336                 m->m_pkthdr.len = m->m_len = len;
2337                 m->m_pkthdr.rcvif = ifp;
2338
2339                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2340                         if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2341                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2342                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2343                                 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2344                                     (flags & NFE_RX_UDP_CSUMOK) != 0) {
2345                                         m->m_pkthdr.csum_flags |=
2346                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2347                                         m->m_pkthdr.csum_data = 0xffff;
2348                                 }
2349                         }
2350                 }
2351
2352                 ifp->if_ipackets++;
2353
2354                 NFE_UNLOCK(sc);
2355                 (*ifp->if_input)(ifp, m);
2356                 NFE_LOCK(sc);
2357         }
2358
2359         if (prog > 0)
2360                 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2361                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2362
2363         return (count > 0 ? 0 : EAGAIN);
2364 }
2365
2366
2367 static void
2368 nfe_txeof(struct nfe_softc *sc)
2369 {
2370         struct ifnet *ifp = sc->nfe_ifp;
2371         struct nfe_desc32 *desc32;
2372         struct nfe_desc64 *desc64;
2373         struct nfe_tx_data *data = NULL;
2374         uint16_t flags;
2375         int cons, prog;
2376
2377         NFE_LOCK_ASSERT(sc);
2378
2379         bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2380             BUS_DMASYNC_POSTREAD);
2381
2382         prog = 0;
2383         for (cons = sc->txq.next; cons != sc->txq.cur;
2384             NFE_INC(cons, NFE_TX_RING_COUNT)) {
2385                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2386                         desc64 = &sc->txq.desc64[cons];
2387                         flags = le16toh(desc64->flags);
2388                 } else {
2389                         desc32 = &sc->txq.desc32[cons];
2390                         flags = le16toh(desc32->flags);
2391                 }
2392
2393                 if (flags & NFE_TX_VALID)
2394                         break;
2395
2396                 prog++;
2397                 sc->txq.queued--;
2398                 data = &sc->txq.data[cons];
2399
2400                 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2401                         if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2402                                 continue;
2403                         if ((flags & NFE_TX_ERROR_V1) != 0) {
2404                                 device_printf(sc->nfe_dev,
2405                                     "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2406
2407                                 ifp->if_oerrors++;
2408                         } else
2409                                 ifp->if_opackets++;
2410                 } else {
2411                         if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2412                                 continue;
2413                         if ((flags & NFE_TX_ERROR_V2) != 0) {
2414                                 device_printf(sc->nfe_dev,
2415                                     "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2416                                 ifp->if_oerrors++;
2417                         } else
2418                                 ifp->if_opackets++;
2419                 }
2420
2421                 /* last fragment of the mbuf chain transmitted */
2422                 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2423                 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2424                     BUS_DMASYNC_POSTWRITE);
2425                 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2426                 m_freem(data->m);
2427                 data->m = NULL;
2428         }
2429
2430         if (prog > 0) {
2431                 sc->nfe_force_tx = 0;
2432                 sc->txq.next = cons;
2433                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2434                 if (sc->txq.queued == 0)
2435                         sc->nfe_watchdog_timer = 0;
2436         }
2437 }
2438
2439 static int
2440 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2441 {
2442         struct nfe_desc32 *desc32 = NULL;
2443         struct nfe_desc64 *desc64 = NULL;
2444         bus_dmamap_t map;
2445         bus_dma_segment_t segs[NFE_MAX_SCATTER];
2446         int error, i, nsegs, prod, si;
2447         uint32_t tso_segsz;
2448         uint16_t cflags, flags;
2449         struct mbuf *m;
2450
2451         prod = si = sc->txq.cur;
2452         map = sc->txq.data[prod].tx_data_map;
2453
2454         error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2455             &nsegs, BUS_DMA_NOWAIT);
2456         if (error == EFBIG) {
2457                 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
2458                 if (m == NULL) {
2459                         m_freem(*m_head);
2460                         *m_head = NULL;
2461                         return (ENOBUFS);
2462                 }
2463                 *m_head = m;
2464                 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2465                     *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2466                 if (error != 0) {
2467                         m_freem(*m_head);
2468                         *m_head = NULL;
2469                         return (ENOBUFS);
2470                 }
2471         } else if (error != 0)
2472                 return (error);
2473         if (nsegs == 0) {
2474                 m_freem(*m_head);
2475                 *m_head = NULL;
2476                 return (EIO);
2477         }
2478
2479         if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2480                 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2481                 return (ENOBUFS);
2482         }
2483
2484         m = *m_head;
2485         cflags = flags = 0;
2486         tso_segsz = 0;
2487         if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2488                 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2489                         cflags |= NFE_TX_IP_CSUM;
2490                 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2491                         cflags |= NFE_TX_TCP_UDP_CSUM;
2492                 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2493                         cflags |= NFE_TX_TCP_UDP_CSUM;
2494         }
2495         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2496                 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2497                     NFE_TX_TSO_SHIFT;
2498                 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2499                 cflags |= NFE_TX_TSO;
2500         }
2501
2502         for (i = 0; i < nsegs; i++) {
2503                 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2504                         desc64 = &sc->txq.desc64[prod];
2505                         desc64->physaddr[0] =
2506                             htole32(NFE_ADDR_HI(segs[i].ds_addr));
2507                         desc64->physaddr[1] =
2508                             htole32(NFE_ADDR_LO(segs[i].ds_addr));
2509                         desc64->vtag = 0;
2510                         desc64->length = htole16(segs[i].ds_len - 1);
2511                         desc64->flags = htole16(flags);
2512                 } else {
2513                         desc32 = &sc->txq.desc32[prod];
2514                         desc32->physaddr =
2515                             htole32(NFE_ADDR_LO(segs[i].ds_addr));
2516                         desc32->length = htole16(segs[i].ds_len - 1);
2517                         desc32->flags = htole16(flags);
2518                 }
2519
2520                 /*
2521                  * Setting of the valid bit in the first descriptor is
2522                  * deferred until the whole chain is fully setup.
2523                  */
2524                 flags |= NFE_TX_VALID;
2525
2526                 sc->txq.queued++;
2527                 NFE_INC(prod, NFE_TX_RING_COUNT);
2528         }
2529
2530         /*
2531          * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2532          * csum flags, vtag and TSO belong to the first fragment only.
2533          */
2534         if (sc->nfe_flags & NFE_40BIT_ADDR) {
2535                 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2536                 desc64 = &sc->txq.desc64[si];
2537                 if ((m->m_flags & M_VLANTAG) != 0)
2538                         desc64->vtag = htole32(NFE_TX_VTAG |
2539                             m->m_pkthdr.ether_vtag);
2540                 if (tso_segsz != 0) {
2541                         /*
2542                          * XXX
2543                          * The following indicates the descriptor element
2544                          * is a 32bit quantity.
2545                          */
2546                         desc64->length |= htole16((uint16_t)tso_segsz);
2547                         desc64->flags |= htole16(tso_segsz >> 16);
2548                 }
2549                 /*
2550                  * finally, set the valid/checksum/TSO bit in the first
2551                  * descriptor.
2552                  */
2553                 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2554         } else {
2555                 if (sc->nfe_flags & NFE_JUMBO_SUP)
2556                         desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2557                 else
2558                         desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2559                 desc32 = &sc->txq.desc32[si];
2560                 if (tso_segsz != 0) {
2561                         /*
2562                          * XXX
2563                          * The following indicates the descriptor element
2564                          * is a 32bit quantity.
2565                          */
2566                         desc32->length |= htole16((uint16_t)tso_segsz);
2567                         desc32->flags |= htole16(tso_segsz >> 16);
2568                 }
2569                 /*
2570                  * finally, set the valid/checksum/TSO bit in the first
2571                  * descriptor.
2572                  */
2573                 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2574         }
2575
2576         sc->txq.cur = prod;
2577         prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2578         sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2579         sc->txq.data[prod].tx_data_map = map;
2580         sc->txq.data[prod].m = m;
2581
2582         bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2583
2584         return (0);
2585 }
2586
2587
2588 static void
2589 nfe_setmulti(struct nfe_softc *sc)
2590 {
2591         struct ifnet *ifp = sc->nfe_ifp;
2592         struct ifmultiaddr *ifma;
2593         int i;
2594         uint32_t filter;
2595         uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2596         uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2597                 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2598         };
2599
2600         NFE_LOCK_ASSERT(sc);
2601
2602         if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2603                 bzero(addr, ETHER_ADDR_LEN);
2604                 bzero(mask, ETHER_ADDR_LEN);
2605                 goto done;
2606         }
2607
2608         bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2609         bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2610
2611         IF_ADDR_LOCK(ifp);
2612         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2613                 u_char *addrp;
2614
2615                 if (ifma->ifma_addr->sa_family != AF_LINK)
2616                         continue;
2617
2618                 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2619                 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2620                         u_int8_t mcaddr = addrp[i];
2621                         addr[i] &= mcaddr;
2622                         mask[i] &= ~mcaddr;
2623                 }
2624         }
2625         IF_ADDR_UNLOCK(ifp);
2626
2627         for (i = 0; i < ETHER_ADDR_LEN; i++) {
2628                 mask[i] |= addr[i];
2629         }
2630
2631 done:
2632         addr[0] |= 0x01;        /* make sure multicast bit is set */
2633
2634         NFE_WRITE(sc, NFE_MULTIADDR_HI,
2635             addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2636         NFE_WRITE(sc, NFE_MULTIADDR_LO,
2637             addr[5] <<  8 | addr[4]);
2638         NFE_WRITE(sc, NFE_MULTIMASK_HI,
2639             mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2640         NFE_WRITE(sc, NFE_MULTIMASK_LO,
2641             mask[5] <<  8 | mask[4]);
2642
2643         filter = NFE_READ(sc, NFE_RXFILTER);
2644         filter &= NFE_PFF_RX_PAUSE;
2645         filter |= NFE_RXFILTER_MAGIC;
2646         filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2647         NFE_WRITE(sc, NFE_RXFILTER, filter);
2648 }
2649
2650
2651 static void
2652 nfe_tx_task(void *arg, int pending)
2653 {
2654         struct ifnet *ifp;
2655
2656         ifp = (struct ifnet *)arg;
2657         nfe_start(ifp);
2658 }
2659
2660
2661 static void
2662 nfe_start(struct ifnet *ifp)
2663 {
2664         struct nfe_softc *sc = ifp->if_softc;
2665         struct mbuf *m0;
2666         int enq;
2667
2668         NFE_LOCK(sc);
2669
2670         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2671             IFF_DRV_RUNNING || sc->nfe_link == 0) {
2672                 NFE_UNLOCK(sc);
2673                 return;
2674         }
2675
2676         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2677                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2678                 if (m0 == NULL)
2679                         break;
2680
2681                 if (nfe_encap(sc, &m0) != 0) {
2682                         if (m0 == NULL)
2683                                 break;
2684                         IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2685                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2686                         break;
2687                 }
2688                 enq++;
2689                 ETHER_BPF_MTAP(ifp, m0);
2690         }
2691
2692         if (enq > 0) {
2693                 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2694                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2695
2696                 /* kick Tx */
2697                 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2698
2699                 /*
2700                  * Set a timeout in case the chip goes out to lunch.
2701                  */
2702                 sc->nfe_watchdog_timer = 5;
2703         }
2704
2705         NFE_UNLOCK(sc);
2706 }
2707
2708
2709 static void
2710 nfe_watchdog(struct ifnet *ifp)
2711 {
2712         struct nfe_softc *sc = ifp->if_softc;
2713
2714         if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2715                 return;
2716
2717         /* Check if we've lost Tx completion interrupt. */
2718         nfe_txeof(sc);
2719         if (sc->txq.queued == 0) {
2720                 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2721                     "-- recovering\n");
2722                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2723                         taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
2724                 return;
2725         }
2726         /* Check if we've lost start Tx command. */
2727         sc->nfe_force_tx++;
2728         if (sc->nfe_force_tx <= 3) {
2729                 /*
2730                  * If this is the case for watchdog timeout, the following
2731                  * code should go to nfe_txeof().
2732                  */
2733                 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2734                 return;
2735         }
2736         sc->nfe_force_tx = 0;
2737
2738         if_printf(ifp, "watchdog timeout\n");
2739
2740         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2741         ifp->if_oerrors++;
2742         nfe_init_locked(sc);
2743 }
2744
2745
2746 static void
2747 nfe_init(void *xsc)
2748 {
2749         struct nfe_softc *sc = xsc;
2750
2751         NFE_LOCK(sc);
2752         nfe_init_locked(sc);
2753         NFE_UNLOCK(sc);
2754 }
2755
2756
2757 static void
2758 nfe_init_locked(void *xsc)
2759 {
2760         struct nfe_softc *sc = xsc;
2761         struct ifnet *ifp = sc->nfe_ifp;
2762         struct mii_data *mii;
2763         uint32_t val;
2764         int error;
2765
2766         NFE_LOCK_ASSERT(sc);
2767
2768         mii = device_get_softc(sc->nfe_miibus);
2769
2770         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2771                 return;
2772
2773         nfe_stop(ifp);
2774
2775         sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2776
2777         nfe_init_tx_ring(sc, &sc->txq);
2778         if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2779                 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2780         else
2781                 error = nfe_init_rx_ring(sc, &sc->rxq);
2782         if (error != 0) {
2783                 device_printf(sc->nfe_dev,
2784                     "initialization failed: no memory for rx buffers\n");
2785                 nfe_stop(ifp);
2786                 return;
2787         }
2788
2789         val = 0;
2790         if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2791                 val |= NFE_MAC_ADDR_INORDER;
2792         NFE_WRITE(sc, NFE_TX_UNK, val);
2793         NFE_WRITE(sc, NFE_STATUS, 0);
2794
2795         if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2796                 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2797
2798         sc->rxtxctl = NFE_RXTX_BIT2;
2799         if (sc->nfe_flags & NFE_40BIT_ADDR)
2800                 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2801         else if (sc->nfe_flags & NFE_JUMBO_SUP)
2802                 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2803
2804         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2805                 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2806         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2807                 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2808
2809         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2810         DELAY(10);
2811         NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2812
2813         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2814                 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2815         else
2816                 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2817
2818         NFE_WRITE(sc, NFE_SETUP_R6, 0);
2819
2820         /* set MAC address */
2821         nfe_set_macaddr(sc, IF_LLADDR(ifp));
2822
2823         /* tell MAC where rings are in memory */
2824         if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2825                 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2826                     NFE_ADDR_HI(sc->jrxq.jphysaddr));
2827                 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2828                     NFE_ADDR_LO(sc->jrxq.jphysaddr));
2829         } else {
2830                 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2831                     NFE_ADDR_HI(sc->rxq.physaddr));
2832                 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2833                     NFE_ADDR_LO(sc->rxq.physaddr));
2834         }
2835         NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2836         NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2837
2838         NFE_WRITE(sc, NFE_RING_SIZE,
2839             (NFE_RX_RING_COUNT - 1) << 16 |
2840             (NFE_TX_RING_COUNT - 1));
2841
2842         NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2843
2844         /* force MAC to wakeup */
2845         val = NFE_READ(sc, NFE_PWR_STATE);
2846         if ((val & NFE_PWR_WAKEUP) == 0)
2847                 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2848         DELAY(10);
2849         val = NFE_READ(sc, NFE_PWR_STATE);
2850         NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2851
2852 #if 1
2853         /* configure interrupts coalescing/mitigation */
2854         NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2855 #else
2856         /* no interrupt mitigation: one interrupt per packet */
2857         NFE_WRITE(sc, NFE_IMTIMER, 970);
2858 #endif
2859
2860         NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2861         NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2862         NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2863
2864         /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2865         NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2866
2867         NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2868         NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
2869
2870         sc->rxtxctl &= ~NFE_RXTX_BIT2;
2871         NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2872         DELAY(10);
2873         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2874
2875         /* set Rx filter */
2876         nfe_setmulti(sc);
2877
2878         /* enable Rx */
2879         NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2880
2881         /* enable Tx */
2882         NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2883
2884         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2885
2886 #ifdef DEVICE_POLLING
2887         if (ifp->if_capenable & IFCAP_POLLING)
2888                 nfe_disable_intr(sc);
2889         else
2890 #endif
2891         nfe_set_intr(sc);
2892         nfe_enable_intr(sc); /* enable interrupts */
2893
2894         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2895         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2896
2897         sc->nfe_link = 0;
2898         mii_mediachg(mii);
2899
2900         callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2901 }
2902
2903
2904 static void
2905 nfe_stop(struct ifnet *ifp)
2906 {
2907         struct nfe_softc *sc = ifp->if_softc;
2908         struct nfe_rx_ring *rx_ring;
2909         struct nfe_jrx_ring *jrx_ring;
2910         struct nfe_tx_ring *tx_ring;
2911         struct nfe_rx_data *rdata;
2912         struct nfe_tx_data *tdata;
2913         int i;
2914
2915         NFE_LOCK_ASSERT(sc);
2916
2917         sc->nfe_watchdog_timer = 0;
2918         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2919
2920         callout_stop(&sc->nfe_stat_ch);
2921
2922         /* abort Tx */
2923         NFE_WRITE(sc, NFE_TX_CTL, 0);
2924
2925         /* disable Rx */
2926         NFE_WRITE(sc, NFE_RX_CTL, 0);
2927
2928         /* disable interrupts */
2929         nfe_disable_intr(sc);
2930
2931         sc->nfe_link = 0;
2932
2933         /* free Rx and Tx mbufs still in the queues. */
2934         rx_ring = &sc->rxq;
2935         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2936                 rdata = &rx_ring->data[i];
2937                 if (rdata->m != NULL) {
2938                         bus_dmamap_sync(rx_ring->rx_data_tag,
2939                             rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2940                         bus_dmamap_unload(rx_ring->rx_data_tag,
2941                             rdata->rx_data_map);
2942                         m_freem(rdata->m);
2943                         rdata->m = NULL;
2944                 }
2945         }
2946
2947         if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2948                 jrx_ring = &sc->jrxq;
2949                 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2950                         rdata = &jrx_ring->jdata[i];
2951                         if (rdata->m != NULL) {
2952                                 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2953                                     rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2954                                 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2955                                     rdata->rx_data_map);
2956                                 m_freem(rdata->m);
2957                                 rdata->m = NULL;
2958                         }
2959                 }
2960         }
2961
2962         tx_ring = &sc->txq;
2963         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2964                 tdata = &tx_ring->data[i];
2965                 if (tdata->m != NULL) {
2966                         bus_dmamap_sync(tx_ring->tx_data_tag,
2967                             tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2968                         bus_dmamap_unload(tx_ring->tx_data_tag,
2969                             tdata->tx_data_map);
2970                         m_freem(tdata->m);
2971                         tdata->m = NULL;
2972                 }
2973         }
2974 }
2975
2976
2977 static int
2978 nfe_ifmedia_upd(struct ifnet *ifp)
2979 {
2980         struct nfe_softc *sc = ifp->if_softc;
2981         struct mii_data *mii;
2982
2983         NFE_LOCK(sc);
2984         mii = device_get_softc(sc->nfe_miibus);
2985         mii_mediachg(mii);
2986         NFE_UNLOCK(sc);
2987
2988         return (0);
2989 }
2990
2991
2992 static void
2993 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2994 {
2995         struct nfe_softc *sc;
2996         struct mii_data *mii;
2997
2998         sc = ifp->if_softc;
2999
3000         NFE_LOCK(sc);
3001         mii = device_get_softc(sc->nfe_miibus);
3002         mii_pollstat(mii);
3003         NFE_UNLOCK(sc);
3004
3005         ifmr->ifm_active = mii->mii_media_active;
3006         ifmr->ifm_status = mii->mii_media_status;
3007 }
3008
3009
3010 void
3011 nfe_tick(void *xsc)
3012 {
3013         struct nfe_softc *sc;
3014         struct mii_data *mii;
3015         struct ifnet *ifp;
3016
3017         sc = (struct nfe_softc *)xsc;
3018
3019         NFE_LOCK_ASSERT(sc);
3020
3021         ifp = sc->nfe_ifp;
3022
3023         mii = device_get_softc(sc->nfe_miibus);
3024         mii_tick(mii);
3025         nfe_watchdog(ifp);
3026         callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
3027 }
3028
3029
3030 static int
3031 nfe_shutdown(device_t dev)
3032 {
3033         struct nfe_softc *sc;
3034         struct ifnet *ifp;
3035
3036         sc = device_get_softc(dev);
3037
3038         NFE_LOCK(sc);
3039         ifp = sc->nfe_ifp;
3040         nfe_stop(ifp);
3041         /* nfe_reset(sc); */
3042         NFE_UNLOCK(sc);
3043
3044         return (0);
3045 }
3046
3047
3048 static void
3049 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
3050 {
3051         uint32_t val;
3052
3053         if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
3054                 val = NFE_READ(sc, NFE_MACADDR_LO);
3055                 addr[0] = (val >> 8) & 0xff;
3056                 addr[1] = (val & 0xff);
3057
3058                 val = NFE_READ(sc, NFE_MACADDR_HI);
3059                 addr[2] = (val >> 24) & 0xff;
3060                 addr[3] = (val >> 16) & 0xff;
3061                 addr[4] = (val >>  8) & 0xff;
3062                 addr[5] = (val & 0xff);
3063         } else {
3064                 val = NFE_READ(sc, NFE_MACADDR_LO);
3065                 addr[5] = (val >> 8) & 0xff;
3066                 addr[4] = (val & 0xff);
3067
3068                 val = NFE_READ(sc, NFE_MACADDR_HI);
3069                 addr[3] = (val >> 24) & 0xff;
3070                 addr[2] = (val >> 16) & 0xff;
3071                 addr[1] = (val >>  8) & 0xff;
3072                 addr[0] = (val & 0xff);
3073         }
3074 }
3075
3076
3077 static void
3078 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3079 {
3080
3081         NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
3082         NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3083             addr[1] << 8 | addr[0]);
3084 }
3085
3086
3087 /*
3088  * Map a single buffer address.
3089  */
3090
3091 static void
3092 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3093 {
3094         struct nfe_dmamap_arg *ctx;
3095
3096         if (error != 0)
3097                 return;
3098
3099         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3100
3101         ctx = (struct nfe_dmamap_arg *)arg;
3102         ctx->nfe_busaddr = segs[0].ds_addr;
3103 }
3104
3105
3106 static int
3107 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3108 {
3109         int error, value;
3110
3111         if (!arg1)
3112                 return (EINVAL);
3113         value = *(int *)arg1;
3114         error = sysctl_handle_int(oidp, &value, 0, req);
3115         if (error || !req->newptr)
3116                 return (error);
3117         if (value < low || value > high)
3118                 return (EINVAL);
3119         *(int *)arg1 = value;
3120
3121         return (0);
3122 }
3123
3124
3125 static int
3126 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3127 {
3128
3129         return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3130             NFE_PROC_MAX));
3131 }