1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
44 #include <net/if_arp.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
64 #include <dev/nfe/if_nfereg.h>
65 #include <dev/nfe/if_nfevar.h>
67 MODULE_DEPEND(nfe, pci, 1, 1, 1);
68 MODULE_DEPEND(nfe, ether, 1, 1, 1);
69 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
71 /* "device miibus" required. See GENERIC if you get errors here. */
72 #include "miibus_if.h"
74 static int nfe_probe(device_t);
75 static int nfe_attach(device_t);
76 static int nfe_detach(device_t);
77 static int nfe_suspend(device_t);
78 static int nfe_resume(device_t);
79 static int nfe_shutdown(device_t);
80 static int nfe_can_use_msix(struct nfe_softc *);
81 static int nfe_detect_msik9(struct nfe_softc *);
82 static void nfe_power(struct nfe_softc *);
83 static int nfe_miibus_readreg(device_t, int, int);
84 static int nfe_miibus_writereg(device_t, int, int, int);
85 static void nfe_miibus_statchg(device_t);
86 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
87 static void nfe_set_intr(struct nfe_softc *);
88 static __inline void nfe_enable_intr(struct nfe_softc *);
89 static __inline void nfe_disable_intr(struct nfe_softc *);
90 static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
91 static void nfe_alloc_msix(struct nfe_softc *, int);
92 static int nfe_intr(void *);
93 static void nfe_int_task(void *, int);
94 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
95 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
96 static int nfe_newbuf(struct nfe_softc *, int);
97 static int nfe_jnewbuf(struct nfe_softc *, int);
98 static int nfe_rxeof(struct nfe_softc *, int, int *);
99 static int nfe_jrxeof(struct nfe_softc *, int, int *);
100 static void nfe_txeof(struct nfe_softc *);
101 static int nfe_encap(struct nfe_softc *, struct mbuf **);
102 static void nfe_setmulti(struct nfe_softc *);
103 static void nfe_start(struct ifnet *);
104 static void nfe_start_locked(struct ifnet *);
105 static void nfe_watchdog(struct ifnet *);
106 static void nfe_init(void *);
107 static void nfe_init_locked(void *);
108 static void nfe_stop(struct ifnet *);
109 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
111 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
112 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
113 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
114 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
115 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
118 static int nfe_ifmedia_upd(struct ifnet *);
119 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
120 static void nfe_tick(void *);
121 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
122 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
123 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
125 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
126 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
127 static void nfe_sysctl_node(struct nfe_softc *);
128 static void nfe_stats_clear(struct nfe_softc *);
129 static void nfe_stats_update(struct nfe_softc *);
130 static void nfe_set_linkspeed(struct nfe_softc *);
131 static void nfe_set_wol(struct nfe_softc *);
134 static int nfedebug = 0;
135 #define DPRINTF(sc, ...) do { \
137 device_printf((sc)->nfe_dev, __VA_ARGS__); \
139 #define DPRINTFN(sc, n, ...) do { \
140 if (nfedebug >= (n)) \
141 device_printf((sc)->nfe_dev, __VA_ARGS__); \
144 #define DPRINTF(sc, ...)
145 #define DPRINTFN(sc, n, ...)
148 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
149 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
150 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
153 static int msi_disable = 0;
154 static int msix_disable = 0;
155 static int jumbo_disable = 0;
156 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
157 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
158 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
160 static device_method_t nfe_methods[] = {
161 /* Device interface */
162 DEVMETHOD(device_probe, nfe_probe),
163 DEVMETHOD(device_attach, nfe_attach),
164 DEVMETHOD(device_detach, nfe_detach),
165 DEVMETHOD(device_suspend, nfe_suspend),
166 DEVMETHOD(device_resume, nfe_resume),
167 DEVMETHOD(device_shutdown, nfe_shutdown),
170 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
171 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
172 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
177 static driver_t nfe_driver = {
180 sizeof(struct nfe_softc)
183 static devclass_t nfe_devclass;
185 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
186 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
188 static struct nfe_type nfe_devs[] = {
189 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
190 "NVIDIA nForce MCP Networking Adapter"},
191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
192 "NVIDIA nForce2 MCP2 Networking Adapter"},
193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
194 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
196 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
198 "NVIDIA nForce3 MCP3 Networking Adapter"},
199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
200 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
202 "NVIDIA nForce3 MCP7 Networking Adapter"},
203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
204 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
206 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
208 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
210 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
212 "NVIDIA nForce 430 MCP12 Networking Adapter"},
213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
214 "NVIDIA nForce 430 MCP13 Networking Adapter"},
215 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
216 "NVIDIA nForce MCP55 Networking Adapter"},
217 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
218 "NVIDIA nForce MCP55 Networking Adapter"},
219 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
220 "NVIDIA nForce MCP61 Networking Adapter"},
221 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
222 "NVIDIA nForce MCP61 Networking Adapter"},
223 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
224 "NVIDIA nForce MCP61 Networking Adapter"},
225 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
226 "NVIDIA nForce MCP61 Networking Adapter"},
227 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
228 "NVIDIA nForce MCP65 Networking Adapter"},
229 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
230 "NVIDIA nForce MCP65 Networking Adapter"},
231 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
232 "NVIDIA nForce MCP65 Networking Adapter"},
233 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
234 "NVIDIA nForce MCP65 Networking Adapter"},
235 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
236 "NVIDIA nForce MCP67 Networking Adapter"},
237 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
238 "NVIDIA nForce MCP67 Networking Adapter"},
239 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
240 "NVIDIA nForce MCP67 Networking Adapter"},
241 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
242 "NVIDIA nForce MCP67 Networking Adapter"},
243 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
244 "NVIDIA nForce MCP73 Networking Adapter"},
245 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
246 "NVIDIA nForce MCP73 Networking Adapter"},
247 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
248 "NVIDIA nForce MCP73 Networking Adapter"},
249 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
250 "NVIDIA nForce MCP73 Networking Adapter"},
251 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
252 "NVIDIA nForce MCP77 Networking Adapter"},
253 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
254 "NVIDIA nForce MCP77 Networking Adapter"},
255 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
256 "NVIDIA nForce MCP77 Networking Adapter"},
257 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
258 "NVIDIA nForce MCP77 Networking Adapter"},
259 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
260 "NVIDIA nForce MCP79 Networking Adapter"},
261 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
262 "NVIDIA nForce MCP79 Networking Adapter"},
263 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
264 "NVIDIA nForce MCP79 Networking Adapter"},
265 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
266 "NVIDIA nForce MCP79 Networking Adapter"},
271 /* Probe for supported hardware ID's */
273 nfe_probe(device_t dev)
278 /* Check for matching PCI DEVICE ID's */
279 while (t->name != NULL) {
280 if ((pci_get_vendor(dev) == t->vid_id) &&
281 (pci_get_device(dev) == t->dev_id)) {
282 device_set_desc(dev, t->name);
283 return (BUS_PROBE_DEFAULT);
292 nfe_alloc_msix(struct nfe_softc *sc, int count)
297 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
299 if (sc->nfe_msix_res == NULL) {
300 device_printf(sc->nfe_dev,
301 "couldn't allocate MSIX table resource\n");
305 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
306 SYS_RES_MEMORY, &rid, RF_ACTIVE);
307 if (sc->nfe_msix_pba_res == NULL) {
308 device_printf(sc->nfe_dev,
309 "couldn't allocate MSIX PBA resource\n");
310 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
312 sc->nfe_msix_res = NULL;
316 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
317 if (count == NFE_MSI_MESSAGES) {
319 device_printf(sc->nfe_dev,
320 "Using %d MSIX messages\n", count);
324 device_printf(sc->nfe_dev,
325 "couldn't allocate MSIX\n");
326 pci_release_msi(sc->nfe_dev);
327 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
328 PCIR_BAR(3), sc->nfe_msix_pba_res);
329 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
330 PCIR_BAR(2), sc->nfe_msix_res);
331 sc->nfe_msix_pba_res = NULL;
332 sc->nfe_msix_res = NULL;
339 nfe_detect_msik9(struct nfe_softc *sc)
341 static const char *maker = "MSI";
342 static const char *product = "K9N6PGM2-V2 (MS-7309)";
347 m = getenv("smbios.planar.maker");
348 p = getenv("smbios.planar.product");
349 if (m != NULL && p != NULL) {
350 if (strcmp(m, maker) == 0 && strcmp(p, product) == 0)
363 nfe_attach(device_t dev)
365 struct nfe_softc *sc;
367 bus_addr_t dma_addr_max;
368 int error = 0, i, msic, phyloc, reg, rid;
370 sc = device_get_softc(dev);
373 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
375 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
377 pci_enable_busmaster(dev);
380 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
382 if (sc->nfe_res[0] == NULL) {
383 device_printf(dev, "couldn't map memory resources\n");
384 mtx_destroy(&sc->nfe_mtx);
388 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
391 v = pci_read_config(dev, reg + 0x08, 2);
392 /* Change max. read request size to 4096. */
395 pci_write_config(dev, reg + 0x08, v, 2);
397 v = pci_read_config(dev, reg + 0x0c, 2);
398 /* link capability */
400 width = pci_read_config(dev, reg + 0x12, 2);
401 /* negotiated link width */
402 width = (width >> 4) & 0x3f;
404 device_printf(sc->nfe_dev,
405 "warning, negotiated width of link(x%d) != "
406 "max. width of link(x%d)\n", width, v);
409 if (nfe_can_use_msix(sc) == 0) {
410 device_printf(sc->nfe_dev,
411 "MSI/MSI-X capability black-listed, will use INTx\n");
416 /* Allocate interrupt */
417 if (msix_disable == 0 || msi_disable == 0) {
418 if (msix_disable == 0 &&
419 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
420 nfe_alloc_msix(sc, msic);
421 if (msi_disable == 0 && sc->nfe_msix == 0 &&
422 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
423 pci_alloc_msi(dev, &msic) == 0) {
424 if (msic == NFE_MSI_MESSAGES) {
427 "Using %d MSI messages\n", msic);
430 pci_release_msi(dev);
434 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
436 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
437 RF_SHAREABLE | RF_ACTIVE);
438 if (sc->nfe_irq[0] == NULL) {
439 device_printf(dev, "couldn't allocate IRQ resources\n");
444 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
445 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
446 SYS_RES_IRQ, &rid, RF_ACTIVE);
447 if (sc->nfe_irq[i] == NULL) {
449 "couldn't allocate IRQ resources for "
450 "message %d\n", rid);
455 /* Map interrupts to vector 0. */
456 if (sc->nfe_msix != 0) {
457 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
458 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
459 } else if (sc->nfe_msi != 0) {
460 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
461 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
465 /* Set IRQ status/mask register. */
466 sc->nfe_irq_status = NFE_IRQ_STATUS;
467 sc->nfe_irq_mask = NFE_IRQ_MASK;
468 sc->nfe_intrs = NFE_IRQ_WANTED;
470 if (sc->nfe_msix != 0) {
471 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
472 sc->nfe_nointrs = NFE_IRQ_WANTED;
473 } else if (sc->nfe_msi != 0) {
474 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
475 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
478 sc->nfe_devid = pci_get_device(dev);
479 sc->nfe_revid = pci_get_revid(dev);
482 switch (sc->nfe_devid) {
483 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
484 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
485 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
486 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
487 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
489 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
490 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
491 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
493 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
494 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
495 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
496 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
497 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
500 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
501 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
502 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
503 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
506 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
507 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
508 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
509 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
510 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
511 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
512 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
513 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
514 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
515 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
516 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
517 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
518 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
519 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
521 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
522 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
523 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
524 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
525 /* XXX flow control */
526 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
527 NFE_CORRECT_MACADDR | NFE_MIB_V3;
529 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
530 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
531 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
532 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
533 /* XXX flow control */
534 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
535 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
537 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
538 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
539 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
540 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
541 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
542 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
548 /* Check for reversed ethernet address */
549 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
550 sc->nfe_flags |= NFE_CORRECT_MACADDR;
551 nfe_get_macaddr(sc, sc->eaddr);
553 * Allocate the parent bus DMA tag appropriate for PCI.
555 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
556 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
557 dma_addr_max = NFE_DMA_MAXADDR;
558 error = bus_dma_tag_create(
559 bus_get_dma_tag(sc->nfe_dev), /* parent */
560 1, 0, /* alignment, boundary */
561 dma_addr_max, /* lowaddr */
562 BUS_SPACE_MAXADDR, /* highaddr */
563 NULL, NULL, /* filter, filterarg */
564 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
565 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
567 NULL, NULL, /* lockfunc, lockarg */
568 &sc->nfe_parent_tag);
572 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
574 device_printf(dev, "can not if_alloc()\n");
580 * Allocate Tx and Rx rings.
582 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
585 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
588 nfe_alloc_jrx_ring(sc, &sc->jrxq);
589 /* Create sysctl node. */
593 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
594 ifp->if_mtu = ETHERMTU;
595 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
596 ifp->if_ioctl = nfe_ioctl;
597 ifp->if_start = nfe_start;
598 ifp->if_hwassist = 0;
599 ifp->if_capabilities = 0;
600 ifp->if_init = nfe_init;
601 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
602 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
603 IFQ_SET_READY(&ifp->if_snd);
605 if (sc->nfe_flags & NFE_HW_CSUM) {
606 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
607 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
609 ifp->if_capenable = ifp->if_capabilities;
611 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
612 /* VLAN capability setup. */
613 ifp->if_capabilities |= IFCAP_VLAN_MTU;
614 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
615 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
616 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
617 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM |
621 if (pci_find_cap(dev, PCIY_PMG, ®) == 0)
622 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
623 ifp->if_capenable = ifp->if_capabilities;
626 * Tell the upper layer(s) we support long frames.
627 * Must appear after the call to ether_ifattach() because
628 * ether_ifattach() sets ifi_hdrlen to the default value.
630 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
632 #ifdef DEVICE_POLLING
633 ifp->if_capabilities |= IFCAP_POLLING;
637 phyloc = MII_PHY_ANY;
638 if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 ||
639 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 ||
640 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 ||
641 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) {
642 if (nfe_detect_msik9(sc) != 0)
645 error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
646 nfe_ifmedia_sts, BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY,
649 device_printf(dev, "attaching PHYs failed\n");
652 ether_ifattach(ifp, sc->eaddr);
654 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
655 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
656 taskqueue_thread_enqueue, &sc->nfe_tq);
657 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
658 device_get_nameunit(sc->nfe_dev));
660 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
661 error = bus_setup_intr(dev, sc->nfe_irq[0],
662 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
663 &sc->nfe_intrhand[0]);
665 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
666 error = bus_setup_intr(dev, sc->nfe_irq[i],
667 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
668 &sc->nfe_intrhand[i]);
674 device_printf(dev, "couldn't set up irq\n");
675 taskqueue_free(sc->nfe_tq);
690 nfe_detach(device_t dev)
692 struct nfe_softc *sc;
694 uint8_t eaddr[ETHER_ADDR_LEN];
697 sc = device_get_softc(dev);
698 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
701 #ifdef DEVICE_POLLING
702 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
703 ether_poll_deregister(ifp);
705 if (device_is_attached(dev)) {
708 ifp->if_flags &= ~IFF_UP;
710 callout_drain(&sc->nfe_stat_ch);
715 /* restore ethernet address */
716 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
717 for (i = 0; i < ETHER_ADDR_LEN; i++) {
718 eaddr[i] = sc->eaddr[5 - i];
721 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
722 nfe_set_macaddr(sc, eaddr);
726 device_delete_child(dev, sc->nfe_miibus);
727 bus_generic_detach(dev);
728 if (sc->nfe_tq != NULL) {
729 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
730 taskqueue_free(sc->nfe_tq);
734 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
735 if (sc->nfe_intrhand[i] != NULL) {
736 bus_teardown_intr(dev, sc->nfe_irq[i],
737 sc->nfe_intrhand[i]);
738 sc->nfe_intrhand[i] = NULL;
742 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
743 if (sc->nfe_irq[0] != NULL)
744 bus_release_resource(dev, SYS_RES_IRQ, 0,
747 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
748 if (sc->nfe_irq[i] != NULL) {
749 bus_release_resource(dev, SYS_RES_IRQ, rid,
751 sc->nfe_irq[i] = NULL;
754 pci_release_msi(dev);
756 if (sc->nfe_msix_pba_res != NULL) {
757 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
758 sc->nfe_msix_pba_res);
759 sc->nfe_msix_pba_res = NULL;
761 if (sc->nfe_msix_res != NULL) {
762 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
764 sc->nfe_msix_res = NULL;
766 if (sc->nfe_res[0] != NULL) {
767 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
769 sc->nfe_res[0] = NULL;
772 nfe_free_tx_ring(sc, &sc->txq);
773 nfe_free_rx_ring(sc, &sc->rxq);
774 nfe_free_jrx_ring(sc, &sc->jrxq);
776 if (sc->nfe_parent_tag) {
777 bus_dma_tag_destroy(sc->nfe_parent_tag);
778 sc->nfe_parent_tag = NULL;
781 mtx_destroy(&sc->nfe_mtx);
788 nfe_suspend(device_t dev)
790 struct nfe_softc *sc;
792 sc = device_get_softc(dev);
795 nfe_stop(sc->nfe_ifp);
797 sc->nfe_suspended = 1;
805 nfe_resume(device_t dev)
807 struct nfe_softc *sc;
810 sc = device_get_softc(dev);
815 if (ifp->if_flags & IFF_UP)
817 sc->nfe_suspended = 0;
825 nfe_can_use_msix(struct nfe_softc *sc)
827 static struct msix_blacklist {
830 } msix_blacklists[] = {
831 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
834 struct msix_blacklist *mblp;
835 char *maker, *product;
836 int count, n, use_msix;
839 * Search base board manufacturer and product name table
840 * to see this system has a known MSI/MSI-X issue.
842 maker = getenv("smbios.planar.maker");
843 product = getenv("smbios.planar.product");
845 if (maker != NULL && product != NULL) {
846 count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]);
847 mblp = msix_blacklists;
848 for (n = 0; n < count; n++) {
849 if (strcmp(maker, mblp->maker) == 0 &&
850 strcmp(product, mblp->product) == 0) {
866 /* Take PHY/NIC out of powerdown, from Linux */
868 nfe_power(struct nfe_softc *sc)
872 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
874 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
875 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
877 NFE_WRITE(sc, NFE_MAC_RESET, 0);
879 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
880 pwr = NFE_READ(sc, NFE_PWR2_CTL);
881 pwr &= ~NFE_PWR2_WAKEUP_MASK;
882 if (sc->nfe_revid >= 0xa3 &&
883 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
884 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
885 pwr |= NFE_PWR2_REVA3;
886 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
891 nfe_miibus_statchg(device_t dev)
893 struct nfe_softc *sc;
894 struct mii_data *mii;
896 uint32_t rxctl, txctl;
898 sc = device_get_softc(dev);
900 mii = device_get_softc(sc->nfe_miibus);
904 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
905 (IFM_ACTIVE | IFM_AVALID)) {
906 switch (IFM_SUBTYPE(mii->mii_media_active)) {
917 nfe_mac_config(sc, mii);
918 txctl = NFE_READ(sc, NFE_TX_CTL);
919 rxctl = NFE_READ(sc, NFE_RX_CTL);
920 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
921 txctl |= NFE_TX_START;
922 rxctl |= NFE_RX_START;
924 txctl &= ~NFE_TX_START;
925 rxctl &= ~NFE_RX_START;
927 NFE_WRITE(sc, NFE_TX_CTL, txctl);
928 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
933 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
935 uint32_t link, misc, phy, seed;
940 phy = NFE_READ(sc, NFE_PHY_IFACE);
941 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
943 seed = NFE_READ(sc, NFE_RNDSEED);
944 seed &= ~NFE_SEED_MASK;
946 misc = NFE_MISC1_MAGIC;
947 link = NFE_MEDIA_SET;
949 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
950 phy |= NFE_PHY_HDX; /* half-duplex */
951 misc |= NFE_MISC1_HDX;
954 switch (IFM_SUBTYPE(mii->mii_media_active)) {
955 case IFM_1000_T: /* full-duplex only */
956 link |= NFE_MEDIA_1000T;
957 seed |= NFE_SEED_1000T;
958 phy |= NFE_PHY_1000T;
961 link |= NFE_MEDIA_100TX;
962 seed |= NFE_SEED_100TX;
963 phy |= NFE_PHY_100TX;
966 link |= NFE_MEDIA_10T;
967 seed |= NFE_SEED_10T;
971 if ((phy & 0x10000000) != 0) {
972 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
973 val = NFE_R1_MAGIC_1000;
975 val = NFE_R1_MAGIC_10_100;
977 val = NFE_R1_MAGIC_DEFAULT;
978 NFE_WRITE(sc, NFE_SETUP_R1, val);
980 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
982 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
983 NFE_WRITE(sc, NFE_MISC1, misc);
984 NFE_WRITE(sc, NFE_LINKSPEED, link);
986 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
987 /* It seems all hardwares supports Rx pause frames. */
988 val = NFE_READ(sc, NFE_RXFILTER);
989 if ((IFM_OPTIONS(mii->mii_media_active) &
990 IFM_ETH_RXPAUSE) != 0)
991 val |= NFE_PFF_RX_PAUSE;
993 val &= ~NFE_PFF_RX_PAUSE;
994 NFE_WRITE(sc, NFE_RXFILTER, val);
995 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
996 val = NFE_READ(sc, NFE_MISC1);
997 if ((IFM_OPTIONS(mii->mii_media_active) &
998 IFM_ETH_TXPAUSE) != 0) {
999 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1000 NFE_TX_PAUSE_FRAME_ENABLE);
1001 val |= NFE_MISC1_TX_PAUSE;
1003 val &= ~NFE_MISC1_TX_PAUSE;
1004 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1005 NFE_TX_PAUSE_FRAME_DISABLE);
1007 NFE_WRITE(sc, NFE_MISC1, val);
1010 /* disable rx/tx pause frames */
1011 val = NFE_READ(sc, NFE_RXFILTER);
1012 val &= ~NFE_PFF_RX_PAUSE;
1013 NFE_WRITE(sc, NFE_RXFILTER, val);
1014 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
1015 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1016 NFE_TX_PAUSE_FRAME_DISABLE);
1017 val = NFE_READ(sc, NFE_MISC1);
1018 val &= ~NFE_MISC1_TX_PAUSE;
1019 NFE_WRITE(sc, NFE_MISC1, val);
1026 nfe_miibus_readreg(device_t dev, int phy, int reg)
1028 struct nfe_softc *sc = device_get_softc(dev);
1032 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1034 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1035 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1039 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1041 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1043 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1046 if (ntries == NFE_TIMEOUT) {
1047 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1051 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1052 DPRINTFN(sc, 2, "could not read PHY\n");
1056 val = NFE_READ(sc, NFE_PHY_DATA);
1057 if (val != 0xffffffff && val != 0)
1058 sc->mii_phyaddr = phy;
1060 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1067 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1069 struct nfe_softc *sc = device_get_softc(dev);
1073 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1075 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1076 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1080 NFE_WRITE(sc, NFE_PHY_DATA, val);
1081 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1082 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1084 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1086 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1090 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1091 device_printf(sc->nfe_dev, "could not write to PHY\n");
1096 struct nfe_dmamap_arg {
1097 bus_addr_t nfe_busaddr;
1101 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1103 struct nfe_dmamap_arg ctx;
1104 struct nfe_rx_data *data;
1106 int i, error, descsize;
1108 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1109 desc = ring->desc64;
1110 descsize = sizeof (struct nfe_desc64);
1112 desc = ring->desc32;
1113 descsize = sizeof (struct nfe_desc32);
1116 ring->cur = ring->next = 0;
1118 error = bus_dma_tag_create(sc->nfe_parent_tag,
1119 NFE_RING_ALIGN, 0, /* alignment, boundary */
1120 BUS_SPACE_MAXADDR, /* lowaddr */
1121 BUS_SPACE_MAXADDR, /* highaddr */
1122 NULL, NULL, /* filter, filterarg */
1123 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1124 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1126 NULL, NULL, /* lockfunc, lockarg */
1127 &ring->rx_desc_tag);
1129 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1133 /* allocate memory to desc */
1134 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1135 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1137 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1140 if (sc->nfe_flags & NFE_40BIT_ADDR)
1141 ring->desc64 = desc;
1143 ring->desc32 = desc;
1145 /* map desc to device visible address space */
1146 ctx.nfe_busaddr = 0;
1147 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1148 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1150 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1153 ring->physaddr = ctx.nfe_busaddr;
1155 error = bus_dma_tag_create(sc->nfe_parent_tag,
1156 1, 0, /* alignment, boundary */
1157 BUS_SPACE_MAXADDR, /* lowaddr */
1158 BUS_SPACE_MAXADDR, /* highaddr */
1159 NULL, NULL, /* filter, filterarg */
1160 MCLBYTES, 1, /* maxsize, nsegments */
1161 MCLBYTES, /* maxsegsize */
1163 NULL, NULL, /* lockfunc, lockarg */
1164 &ring->rx_data_tag);
1166 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1170 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1172 device_printf(sc->nfe_dev,
1173 "could not create Rx DMA spare map\n");
1178 * Pre-allocate Rx buffers and populate Rx ring.
1180 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1181 data = &sc->rxq.data[i];
1182 data->rx_data_map = NULL;
1184 error = bus_dmamap_create(ring->rx_data_tag, 0,
1185 &data->rx_data_map);
1187 device_printf(sc->nfe_dev,
1188 "could not create Rx DMA map\n");
1199 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1201 struct nfe_dmamap_arg ctx;
1202 struct nfe_rx_data *data;
1204 int i, error, descsize;
1206 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1208 if (jumbo_disable != 0) {
1209 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1210 sc->nfe_jumbo_disable = 1;
1214 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1215 desc = ring->jdesc64;
1216 descsize = sizeof (struct nfe_desc64);
1218 desc = ring->jdesc32;
1219 descsize = sizeof (struct nfe_desc32);
1222 ring->jcur = ring->jnext = 0;
1224 /* Create DMA tag for jumbo Rx ring. */
1225 error = bus_dma_tag_create(sc->nfe_parent_tag,
1226 NFE_RING_ALIGN, 0, /* alignment, boundary */
1227 BUS_SPACE_MAXADDR, /* lowaddr */
1228 BUS_SPACE_MAXADDR, /* highaddr */
1229 NULL, NULL, /* filter, filterarg */
1230 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1232 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1234 NULL, NULL, /* lockfunc, lockarg */
1235 &ring->jrx_desc_tag);
1237 device_printf(sc->nfe_dev,
1238 "could not create jumbo ring DMA tag\n");
1242 /* Create DMA tag for jumbo Rx buffers. */
1243 error = bus_dma_tag_create(sc->nfe_parent_tag,
1244 1, 0, /* alignment, boundary */
1245 BUS_SPACE_MAXADDR, /* lowaddr */
1246 BUS_SPACE_MAXADDR, /* highaddr */
1247 NULL, NULL, /* filter, filterarg */
1248 MJUM9BYTES, /* maxsize */
1250 MJUM9BYTES, /* maxsegsize */
1252 NULL, NULL, /* lockfunc, lockarg */
1253 &ring->jrx_data_tag);
1255 device_printf(sc->nfe_dev,
1256 "could not create jumbo Rx buffer DMA tag\n");
1260 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1261 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1262 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1264 device_printf(sc->nfe_dev,
1265 "could not allocate DMA'able memory for jumbo Rx ring\n");
1268 if (sc->nfe_flags & NFE_40BIT_ADDR)
1269 ring->jdesc64 = desc;
1271 ring->jdesc32 = desc;
1273 ctx.nfe_busaddr = 0;
1274 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1275 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1277 device_printf(sc->nfe_dev,
1278 "could not load DMA'able memory for jumbo Rx ring\n");
1281 ring->jphysaddr = ctx.nfe_busaddr;
1283 /* Create DMA maps for jumbo Rx buffers. */
1284 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1286 device_printf(sc->nfe_dev,
1287 "could not create jumbo Rx DMA spare map\n");
1291 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1292 data = &sc->jrxq.jdata[i];
1293 data->rx_data_map = NULL;
1295 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1296 &data->rx_data_map);
1298 device_printf(sc->nfe_dev,
1299 "could not create jumbo Rx DMA map\n");
1308 * Running without jumbo frame support is ok for most cases
1309 * so don't fail on creating dma tag/map for jumbo frame.
1311 nfe_free_jrx_ring(sc, ring);
1312 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1313 "resource shortage\n");
1314 sc->nfe_jumbo_disable = 1;
1319 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1325 ring->cur = ring->next = 0;
1326 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1327 desc = ring->desc64;
1328 descsize = sizeof (struct nfe_desc64);
1330 desc = ring->desc32;
1331 descsize = sizeof (struct nfe_desc32);
1333 bzero(desc, descsize * NFE_RX_RING_COUNT);
1334 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1335 if (nfe_newbuf(sc, i) != 0)
1339 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1340 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1347 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1353 ring->jcur = ring->jnext = 0;
1354 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1355 desc = ring->jdesc64;
1356 descsize = sizeof (struct nfe_desc64);
1358 desc = ring->jdesc32;
1359 descsize = sizeof (struct nfe_desc32);
1361 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1362 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1363 if (nfe_jnewbuf(sc, i) != 0)
1367 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1368 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1375 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1377 struct nfe_rx_data *data;
1381 if (sc->nfe_flags & NFE_40BIT_ADDR)
1382 desc = ring->desc64;
1384 desc = ring->desc32;
1386 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1387 data = &ring->data[i];
1388 if (data->rx_data_map != NULL) {
1389 bus_dmamap_destroy(ring->rx_data_tag,
1391 data->rx_data_map = NULL;
1393 if (data->m != NULL) {
1398 if (ring->rx_data_tag != NULL) {
1399 if (ring->rx_spare_map != NULL) {
1400 bus_dmamap_destroy(ring->rx_data_tag,
1401 ring->rx_spare_map);
1402 ring->rx_spare_map = NULL;
1404 bus_dma_tag_destroy(ring->rx_data_tag);
1405 ring->rx_data_tag = NULL;
1409 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1410 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1411 ring->desc64 = NULL;
1412 ring->desc32 = NULL;
1413 ring->rx_desc_map = NULL;
1415 if (ring->rx_desc_tag != NULL) {
1416 bus_dma_tag_destroy(ring->rx_desc_tag);
1417 ring->rx_desc_tag = NULL;
1423 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1425 struct nfe_rx_data *data;
1429 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1432 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1433 desc = ring->jdesc64;
1434 descsize = sizeof (struct nfe_desc64);
1436 desc = ring->jdesc32;
1437 descsize = sizeof (struct nfe_desc32);
1440 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1441 data = &ring->jdata[i];
1442 if (data->rx_data_map != NULL) {
1443 bus_dmamap_destroy(ring->jrx_data_tag,
1445 data->rx_data_map = NULL;
1447 if (data->m != NULL) {
1452 if (ring->jrx_data_tag != NULL) {
1453 if (ring->jrx_spare_map != NULL) {
1454 bus_dmamap_destroy(ring->jrx_data_tag,
1455 ring->jrx_spare_map);
1456 ring->jrx_spare_map = NULL;
1458 bus_dma_tag_destroy(ring->jrx_data_tag);
1459 ring->jrx_data_tag = NULL;
1463 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1464 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1465 ring->jdesc64 = NULL;
1466 ring->jdesc32 = NULL;
1467 ring->jrx_desc_map = NULL;
1470 if (ring->jrx_desc_tag != NULL) {
1471 bus_dma_tag_destroy(ring->jrx_desc_tag);
1472 ring->jrx_desc_tag = NULL;
1478 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1480 struct nfe_dmamap_arg ctx;
1485 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1486 desc = ring->desc64;
1487 descsize = sizeof (struct nfe_desc64);
1489 desc = ring->desc32;
1490 descsize = sizeof (struct nfe_desc32);
1494 ring->cur = ring->next = 0;
1496 error = bus_dma_tag_create(sc->nfe_parent_tag,
1497 NFE_RING_ALIGN, 0, /* alignment, boundary */
1498 BUS_SPACE_MAXADDR, /* lowaddr */
1499 BUS_SPACE_MAXADDR, /* highaddr */
1500 NULL, NULL, /* filter, filterarg */
1501 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1502 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1504 NULL, NULL, /* lockfunc, lockarg */
1505 &ring->tx_desc_tag);
1507 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1511 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1512 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1514 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1517 if (sc->nfe_flags & NFE_40BIT_ADDR)
1518 ring->desc64 = desc;
1520 ring->desc32 = desc;
1522 ctx.nfe_busaddr = 0;
1523 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1524 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1526 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1529 ring->physaddr = ctx.nfe_busaddr;
1531 error = bus_dma_tag_create(sc->nfe_parent_tag,
1541 &ring->tx_data_tag);
1543 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1547 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1548 error = bus_dmamap_create(ring->tx_data_tag, 0,
1549 &ring->data[i].tx_data_map);
1551 device_printf(sc->nfe_dev,
1552 "could not create Tx DMA map\n");
1563 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1568 sc->nfe_force_tx = 0;
1570 ring->cur = ring->next = 0;
1571 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1572 desc = ring->desc64;
1573 descsize = sizeof (struct nfe_desc64);
1575 desc = ring->desc32;
1576 descsize = sizeof (struct nfe_desc32);
1578 bzero(desc, descsize * NFE_TX_RING_COUNT);
1580 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1581 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1586 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1588 struct nfe_tx_data *data;
1592 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1593 desc = ring->desc64;
1594 descsize = sizeof (struct nfe_desc64);
1596 desc = ring->desc32;
1597 descsize = sizeof (struct nfe_desc32);
1600 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1601 data = &ring->data[i];
1603 if (data->m != NULL) {
1604 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1605 BUS_DMASYNC_POSTWRITE);
1606 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1610 if (data->tx_data_map != NULL) {
1611 bus_dmamap_destroy(ring->tx_data_tag,
1613 data->tx_data_map = NULL;
1617 if (ring->tx_data_tag != NULL) {
1618 bus_dma_tag_destroy(ring->tx_data_tag);
1619 ring->tx_data_tag = NULL;
1623 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1624 BUS_DMASYNC_POSTWRITE);
1625 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1626 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1627 ring->desc64 = NULL;
1628 ring->desc32 = NULL;
1629 ring->tx_desc_map = NULL;
1630 bus_dma_tag_destroy(ring->tx_desc_tag);
1631 ring->tx_desc_tag = NULL;
1635 #ifdef DEVICE_POLLING
1636 static poll_handler_t nfe_poll;
1640 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1642 struct nfe_softc *sc = ifp->if_softc;
1648 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1653 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1654 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1656 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1658 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1659 nfe_start_locked(ifp);
1661 if (cmd == POLL_AND_CHECK_STATUS) {
1662 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1666 NFE_WRITE(sc, sc->nfe_irq_status, r);
1668 if (r & NFE_IRQ_LINK) {
1669 NFE_READ(sc, NFE_PHY_STATUS);
1670 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1671 DPRINTF(sc, "link state changed\n");
1677 #endif /* DEVICE_POLLING */
1680 nfe_set_intr(struct nfe_softc *sc)
1683 if (sc->nfe_msi != 0)
1684 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1688 /* In MSIX, a write to mask reegisters behaves as XOR. */
1689 static __inline void
1690 nfe_enable_intr(struct nfe_softc *sc)
1693 if (sc->nfe_msix != 0) {
1694 /* XXX Should have a better way to enable interrupts! */
1695 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1696 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1698 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1702 static __inline void
1703 nfe_disable_intr(struct nfe_softc *sc)
1706 if (sc->nfe_msix != 0) {
1707 /* XXX Should have a better way to disable interrupts! */
1708 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1709 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1711 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1716 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1718 struct nfe_softc *sc;
1720 struct mii_data *mii;
1721 int error, init, mask;
1724 ifr = (struct ifreq *) data;
1729 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1731 else if (ifp->if_mtu != ifr->ifr_mtu) {
1732 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1733 (sc->nfe_jumbo_disable != 0)) &&
1734 ifr->ifr_mtu > ETHERMTU)
1738 ifp->if_mtu = ifr->ifr_mtu;
1739 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1740 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1741 nfe_init_locked(sc);
1749 if (ifp->if_flags & IFF_UP) {
1751 * If only the PROMISC or ALLMULTI flag changes, then
1752 * don't do a full re-init of the chip, just update
1755 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1756 ((ifp->if_flags ^ sc->nfe_if_flags) &
1757 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1760 nfe_init_locked(sc);
1762 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1765 sc->nfe_if_flags = ifp->if_flags;
1771 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1780 mii = device_get_softc(sc->nfe_miibus);
1781 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1784 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1785 #ifdef DEVICE_POLLING
1786 if ((mask & IFCAP_POLLING) != 0) {
1787 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1788 error = ether_poll_register(nfe_poll, ifp);
1792 nfe_disable_intr(sc);
1793 ifp->if_capenable |= IFCAP_POLLING;
1796 error = ether_poll_deregister(ifp);
1797 /* Enable interrupt even in error case */
1799 nfe_enable_intr(sc);
1800 ifp->if_capenable &= ~IFCAP_POLLING;
1804 #endif /* DEVICE_POLLING */
1805 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1806 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1807 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1808 if ((mask & IFCAP_TXCSUM) != 0 &&
1809 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1810 ifp->if_capenable ^= IFCAP_TXCSUM;
1811 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1812 ifp->if_hwassist |= NFE_CSUM_FEATURES;
1814 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1816 if ((mask & IFCAP_RXCSUM) != 0 &&
1817 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1818 ifp->if_capenable ^= IFCAP_RXCSUM;
1821 if ((mask & IFCAP_TSO4) != 0 &&
1822 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
1823 ifp->if_capenable ^= IFCAP_TSO4;
1824 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1825 ifp->if_hwassist |= CSUM_TSO;
1827 ifp->if_hwassist &= ~CSUM_TSO;
1829 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1830 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
1831 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1832 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1833 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1834 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1835 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1836 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
1841 * It seems that VLAN stripping requires Rx checksum offload.
1842 * Unfortunately FreeBSD has no way to disable only Rx side
1843 * VLAN stripping. So when we know Rx checksum offload is
1844 * disabled turn entire hardware VLAN assist off.
1846 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
1847 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
1849 ifp->if_capenable &= ~(IFCAP_VLAN_HWTAGGING |
1852 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1853 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1856 VLAN_CAPABILITIES(ifp);
1859 error = ether_ioctl(ifp, cmd, data);
1870 struct nfe_softc *sc;
1873 sc = (struct nfe_softc *)arg;
1875 status = NFE_READ(sc, sc->nfe_irq_status);
1876 if (status == 0 || status == 0xffffffff)
1877 return (FILTER_STRAY);
1878 nfe_disable_intr(sc);
1879 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1881 return (FILTER_HANDLED);
1886 nfe_int_task(void *arg, int pending)
1888 struct nfe_softc *sc = arg;
1889 struct ifnet *ifp = sc->nfe_ifp;
1895 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1896 nfe_enable_intr(sc);
1898 return; /* not for us */
1900 NFE_WRITE(sc, sc->nfe_irq_status, r);
1902 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1904 #ifdef DEVICE_POLLING
1905 if (ifp->if_capenable & IFCAP_POLLING) {
1911 if (r & NFE_IRQ_LINK) {
1912 NFE_READ(sc, NFE_PHY_STATUS);
1913 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1914 DPRINTF(sc, "link state changed\n");
1917 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1919 nfe_disable_intr(sc);
1925 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1926 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1928 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1932 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1933 nfe_start_locked(ifp);
1937 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1938 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1942 /* Reenable interrupts. */
1943 nfe_enable_intr(sc);
1947 static __inline void
1948 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1950 struct nfe_desc32 *desc32;
1951 struct nfe_desc64 *desc64;
1952 struct nfe_rx_data *data;
1955 data = &sc->rxq.data[idx];
1958 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1959 desc64 = &sc->rxq.desc64[idx];
1960 /* VLAN packet may have overwritten it. */
1961 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1962 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1963 desc64->length = htole16(m->m_len);
1964 desc64->flags = htole16(NFE_RX_READY);
1966 desc32 = &sc->rxq.desc32[idx];
1967 desc32->length = htole16(m->m_len);
1968 desc32->flags = htole16(NFE_RX_READY);
1973 static __inline void
1974 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1976 struct nfe_desc32 *desc32;
1977 struct nfe_desc64 *desc64;
1978 struct nfe_rx_data *data;
1981 data = &sc->jrxq.jdata[idx];
1984 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1985 desc64 = &sc->jrxq.jdesc64[idx];
1986 /* VLAN packet may have overwritten it. */
1987 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1988 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1989 desc64->length = htole16(m->m_len);
1990 desc64->flags = htole16(NFE_RX_READY);
1992 desc32 = &sc->jrxq.jdesc32[idx];
1993 desc32->length = htole16(m->m_len);
1994 desc32->flags = htole16(NFE_RX_READY);
2000 nfe_newbuf(struct nfe_softc *sc, int idx)
2002 struct nfe_rx_data *data;
2003 struct nfe_desc32 *desc32;
2004 struct nfe_desc64 *desc64;
2006 bus_dma_segment_t segs[1];
2010 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2014 m->m_len = m->m_pkthdr.len = MCLBYTES;
2015 m_adj(m, ETHER_ALIGN);
2017 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
2018 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2022 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2024 data = &sc->rxq.data[idx];
2025 if (data->m != NULL) {
2026 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2027 BUS_DMASYNC_POSTREAD);
2028 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
2030 map = data->rx_data_map;
2031 data->rx_data_map = sc->rxq.rx_spare_map;
2032 sc->rxq.rx_spare_map = map;
2033 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2034 BUS_DMASYNC_PREREAD);
2035 data->paddr = segs[0].ds_addr;
2037 /* update mapping address in h/w descriptor */
2038 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2039 desc64 = &sc->rxq.desc64[idx];
2040 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2041 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2042 desc64->length = htole16(segs[0].ds_len);
2043 desc64->flags = htole16(NFE_RX_READY);
2045 desc32 = &sc->rxq.desc32[idx];
2046 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2047 desc32->length = htole16(segs[0].ds_len);
2048 desc32->flags = htole16(NFE_RX_READY);
2056 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2058 struct nfe_rx_data *data;
2059 struct nfe_desc32 *desc32;
2060 struct nfe_desc64 *desc64;
2062 bus_dma_segment_t segs[1];
2066 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2069 if ((m->m_flags & M_EXT) == 0) {
2073 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2074 m_adj(m, ETHER_ALIGN);
2076 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2077 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2081 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2083 data = &sc->jrxq.jdata[idx];
2084 if (data->m != NULL) {
2085 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2086 BUS_DMASYNC_POSTREAD);
2087 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2089 map = data->rx_data_map;
2090 data->rx_data_map = sc->jrxq.jrx_spare_map;
2091 sc->jrxq.jrx_spare_map = map;
2092 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2093 BUS_DMASYNC_PREREAD);
2094 data->paddr = segs[0].ds_addr;
2096 /* update mapping address in h/w descriptor */
2097 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2098 desc64 = &sc->jrxq.jdesc64[idx];
2099 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2100 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2101 desc64->length = htole16(segs[0].ds_len);
2102 desc64->flags = htole16(NFE_RX_READY);
2104 desc32 = &sc->jrxq.jdesc32[idx];
2105 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2106 desc32->length = htole16(segs[0].ds_len);
2107 desc32->flags = htole16(NFE_RX_READY);
2115 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2117 struct ifnet *ifp = sc->nfe_ifp;
2118 struct nfe_desc32 *desc32;
2119 struct nfe_desc64 *desc64;
2120 struct nfe_rx_data *data;
2123 int len, prog, rx_npkts;
2127 NFE_LOCK_ASSERT(sc);
2129 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2130 BUS_DMASYNC_POSTREAD);
2132 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2137 data = &sc->rxq.data[sc->rxq.cur];
2139 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2140 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2141 vtag = le32toh(desc64->physaddr[1]);
2142 flags = le16toh(desc64->flags);
2143 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2145 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2146 flags = le16toh(desc32->flags);
2147 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2150 if (flags & NFE_RX_READY)
2153 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2154 if (!(flags & NFE_RX_VALID_V1)) {
2156 nfe_discard_rxbuf(sc, sc->rxq.cur);
2159 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2160 flags &= ~NFE_RX_ERROR;
2161 len--; /* fix buffer length */
2164 if (!(flags & NFE_RX_VALID_V2)) {
2166 nfe_discard_rxbuf(sc, sc->rxq.cur);
2170 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2171 flags &= ~NFE_RX_ERROR;
2172 len--; /* fix buffer length */
2176 if (flags & NFE_RX_ERROR) {
2178 nfe_discard_rxbuf(sc, sc->rxq.cur);
2183 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2185 nfe_discard_rxbuf(sc, sc->rxq.cur);
2189 if ((vtag & NFE_RX_VTAG) != 0 &&
2190 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2191 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2192 m->m_flags |= M_VLANTAG;
2195 m->m_pkthdr.len = m->m_len = len;
2196 m->m_pkthdr.rcvif = ifp;
2198 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2199 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2200 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2201 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2202 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2203 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2204 m->m_pkthdr.csum_flags |=
2205 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2206 m->m_pkthdr.csum_data = 0xffff;
2214 (*ifp->if_input)(ifp, m);
2220 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2221 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2223 if (rx_npktsp != NULL)
2224 *rx_npktsp = rx_npkts;
2225 return (count > 0 ? 0 : EAGAIN);
2230 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2232 struct ifnet *ifp = sc->nfe_ifp;
2233 struct nfe_desc32 *desc32;
2234 struct nfe_desc64 *desc64;
2235 struct nfe_rx_data *data;
2238 int len, prog, rx_npkts;
2242 NFE_LOCK_ASSERT(sc);
2244 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2245 BUS_DMASYNC_POSTREAD);
2247 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2253 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2255 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2256 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2257 vtag = le32toh(desc64->physaddr[1]);
2258 flags = le16toh(desc64->flags);
2259 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2261 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2262 flags = le16toh(desc32->flags);
2263 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2266 if (flags & NFE_RX_READY)
2269 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2270 if (!(flags & NFE_RX_VALID_V1)) {
2272 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2275 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2276 flags &= ~NFE_RX_ERROR;
2277 len--; /* fix buffer length */
2280 if (!(flags & NFE_RX_VALID_V2)) {
2282 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2286 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2287 flags &= ~NFE_RX_ERROR;
2288 len--; /* fix buffer length */
2292 if (flags & NFE_RX_ERROR) {
2294 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2299 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2301 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2305 if ((vtag & NFE_RX_VTAG) != 0 &&
2306 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2307 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2308 m->m_flags |= M_VLANTAG;
2311 m->m_pkthdr.len = m->m_len = len;
2312 m->m_pkthdr.rcvif = ifp;
2314 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2315 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2316 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2317 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2318 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2319 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2320 m->m_pkthdr.csum_flags |=
2321 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2322 m->m_pkthdr.csum_data = 0xffff;
2330 (*ifp->if_input)(ifp, m);
2336 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2337 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2339 if (rx_npktsp != NULL)
2340 *rx_npktsp = rx_npkts;
2341 return (count > 0 ? 0 : EAGAIN);
2346 nfe_txeof(struct nfe_softc *sc)
2348 struct ifnet *ifp = sc->nfe_ifp;
2349 struct nfe_desc32 *desc32;
2350 struct nfe_desc64 *desc64;
2351 struct nfe_tx_data *data = NULL;
2355 NFE_LOCK_ASSERT(sc);
2357 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2358 BUS_DMASYNC_POSTREAD);
2361 for (cons = sc->txq.next; cons != sc->txq.cur;
2362 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2363 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2364 desc64 = &sc->txq.desc64[cons];
2365 flags = le16toh(desc64->flags);
2367 desc32 = &sc->txq.desc32[cons];
2368 flags = le16toh(desc32->flags);
2371 if (flags & NFE_TX_VALID)
2376 data = &sc->txq.data[cons];
2378 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2379 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2381 if ((flags & NFE_TX_ERROR_V1) != 0) {
2382 device_printf(sc->nfe_dev,
2383 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2389 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2391 if ((flags & NFE_TX_ERROR_V2) != 0) {
2392 device_printf(sc->nfe_dev,
2393 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2399 /* last fragment of the mbuf chain transmitted */
2400 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2401 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2402 BUS_DMASYNC_POSTWRITE);
2403 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2409 sc->nfe_force_tx = 0;
2410 sc->txq.next = cons;
2411 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2412 if (sc->txq.queued == 0)
2413 sc->nfe_watchdog_timer = 0;
2418 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2420 struct nfe_desc32 *desc32 = NULL;
2421 struct nfe_desc64 *desc64 = NULL;
2423 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2424 int error, i, nsegs, prod, si;
2426 uint16_t cflags, flags;
2429 prod = si = sc->txq.cur;
2430 map = sc->txq.data[prod].tx_data_map;
2432 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2433 &nsegs, BUS_DMA_NOWAIT);
2434 if (error == EFBIG) {
2435 m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
2442 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2443 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2449 } else if (error != 0)
2457 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2458 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2465 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2466 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2468 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2469 cflags |= NFE_TX_TSO;
2470 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2471 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2472 cflags |= NFE_TX_IP_CSUM;
2473 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2474 cflags |= NFE_TX_TCP_UDP_CSUM;
2475 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2476 cflags |= NFE_TX_TCP_UDP_CSUM;
2479 for (i = 0; i < nsegs; i++) {
2480 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2481 desc64 = &sc->txq.desc64[prod];
2482 desc64->physaddr[0] =
2483 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2484 desc64->physaddr[1] =
2485 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2487 desc64->length = htole16(segs[i].ds_len - 1);
2488 desc64->flags = htole16(flags);
2490 desc32 = &sc->txq.desc32[prod];
2492 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2493 desc32->length = htole16(segs[i].ds_len - 1);
2494 desc32->flags = htole16(flags);
2498 * Setting of the valid bit in the first descriptor is
2499 * deferred until the whole chain is fully setup.
2501 flags |= NFE_TX_VALID;
2504 NFE_INC(prod, NFE_TX_RING_COUNT);
2508 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2509 * csum flags, vtag and TSO belong to the first fragment only.
2511 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2512 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2513 desc64 = &sc->txq.desc64[si];
2514 if ((m->m_flags & M_VLANTAG) != 0)
2515 desc64->vtag = htole32(NFE_TX_VTAG |
2516 m->m_pkthdr.ether_vtag);
2517 if (tso_segsz != 0) {
2520 * The following indicates the descriptor element
2521 * is a 32bit quantity.
2523 desc64->length |= htole16((uint16_t)tso_segsz);
2524 desc64->flags |= htole16(tso_segsz >> 16);
2527 * finally, set the valid/checksum/TSO bit in the first
2530 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2532 if (sc->nfe_flags & NFE_JUMBO_SUP)
2533 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2535 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2536 desc32 = &sc->txq.desc32[si];
2537 if (tso_segsz != 0) {
2540 * The following indicates the descriptor element
2541 * is a 32bit quantity.
2543 desc32->length |= htole16((uint16_t)tso_segsz);
2544 desc32->flags |= htole16(tso_segsz >> 16);
2547 * finally, set the valid/checksum/TSO bit in the first
2550 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2554 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2555 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2556 sc->txq.data[prod].tx_data_map = map;
2557 sc->txq.data[prod].m = m;
2559 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2566 nfe_setmulti(struct nfe_softc *sc)
2568 struct ifnet *ifp = sc->nfe_ifp;
2569 struct ifmultiaddr *ifma;
2572 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2573 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2574 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2577 NFE_LOCK_ASSERT(sc);
2579 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2580 bzero(addr, ETHER_ADDR_LEN);
2581 bzero(mask, ETHER_ADDR_LEN);
2585 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2586 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2588 if_maddr_rlock(ifp);
2589 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2592 if (ifma->ifma_addr->sa_family != AF_LINK)
2595 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2596 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2597 u_int8_t mcaddr = addrp[i];
2602 if_maddr_runlock(ifp);
2604 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2609 addr[0] |= 0x01; /* make sure multicast bit is set */
2611 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2612 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2613 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2614 addr[5] << 8 | addr[4]);
2615 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2616 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2617 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2618 mask[5] << 8 | mask[4]);
2620 filter = NFE_READ(sc, NFE_RXFILTER);
2621 filter &= NFE_PFF_RX_PAUSE;
2622 filter |= NFE_RXFILTER_MAGIC;
2623 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2624 NFE_WRITE(sc, NFE_RXFILTER, filter);
2629 nfe_start(struct ifnet *ifp)
2631 struct nfe_softc *sc = ifp->if_softc;
2634 nfe_start_locked(ifp);
2639 nfe_start_locked(struct ifnet *ifp)
2641 struct nfe_softc *sc = ifp->if_softc;
2645 NFE_LOCK_ASSERT(sc);
2647 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2648 IFF_DRV_RUNNING || sc->nfe_link == 0)
2651 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2652 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2656 if (nfe_encap(sc, &m0) != 0) {
2659 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2660 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2664 ETHER_BPF_MTAP(ifp, m0);
2668 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2669 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2672 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2675 * Set a timeout in case the chip goes out to lunch.
2677 sc->nfe_watchdog_timer = 5;
2683 nfe_watchdog(struct ifnet *ifp)
2685 struct nfe_softc *sc = ifp->if_softc;
2687 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2690 /* Check if we've lost Tx completion interrupt. */
2692 if (sc->txq.queued == 0) {
2693 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2695 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2696 nfe_start_locked(ifp);
2699 /* Check if we've lost start Tx command. */
2701 if (sc->nfe_force_tx <= 3) {
2703 * If this is the case for watchdog timeout, the following
2704 * code should go to nfe_txeof().
2706 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2709 sc->nfe_force_tx = 0;
2711 if_printf(ifp, "watchdog timeout\n");
2713 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2715 nfe_init_locked(sc);
2722 struct nfe_softc *sc = xsc;
2725 nfe_init_locked(sc);
2731 nfe_init_locked(void *xsc)
2733 struct nfe_softc *sc = xsc;
2734 struct ifnet *ifp = sc->nfe_ifp;
2735 struct mii_data *mii;
2739 NFE_LOCK_ASSERT(sc);
2741 mii = device_get_softc(sc->nfe_miibus);
2743 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2748 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2750 nfe_init_tx_ring(sc, &sc->txq);
2751 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2752 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2754 error = nfe_init_rx_ring(sc, &sc->rxq);
2756 device_printf(sc->nfe_dev,
2757 "initialization failed: no memory for rx buffers\n");
2763 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2764 val |= NFE_MAC_ADDR_INORDER;
2765 NFE_WRITE(sc, NFE_TX_UNK, val);
2766 NFE_WRITE(sc, NFE_STATUS, 0);
2768 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2769 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2771 sc->rxtxctl = NFE_RXTX_BIT2;
2772 if (sc->nfe_flags & NFE_40BIT_ADDR)
2773 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2774 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2775 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2777 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2778 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2779 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2780 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2782 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2784 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2786 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2787 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2789 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2791 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2793 /* set MAC address */
2794 nfe_set_macaddr(sc, IF_LLADDR(ifp));
2796 /* tell MAC where rings are in memory */
2797 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2798 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2799 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2800 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2801 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2803 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2804 NFE_ADDR_HI(sc->rxq.physaddr));
2805 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2806 NFE_ADDR_LO(sc->rxq.physaddr));
2808 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2809 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2811 NFE_WRITE(sc, NFE_RING_SIZE,
2812 (NFE_RX_RING_COUNT - 1) << 16 |
2813 (NFE_TX_RING_COUNT - 1));
2815 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2817 /* force MAC to wakeup */
2818 val = NFE_READ(sc, NFE_PWR_STATE);
2819 if ((val & NFE_PWR_WAKEUP) == 0)
2820 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2822 val = NFE_READ(sc, NFE_PWR_STATE);
2823 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2826 /* configure interrupts coalescing/mitigation */
2827 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2829 /* no interrupt mitigation: one interrupt per packet */
2830 NFE_WRITE(sc, NFE_IMTIMER, 970);
2833 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2834 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2835 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2837 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2838 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2840 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2842 NFE_WRITE(sc, NFE_WOL_CTL, 0);
2844 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2845 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2847 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2853 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2856 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2858 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2860 /* Clear hardware stats. */
2861 nfe_stats_clear(sc);
2863 #ifdef DEVICE_POLLING
2864 if (ifp->if_capenable & IFCAP_POLLING)
2865 nfe_disable_intr(sc);
2869 nfe_enable_intr(sc); /* enable interrupts */
2871 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2872 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2877 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2882 nfe_stop(struct ifnet *ifp)
2884 struct nfe_softc *sc = ifp->if_softc;
2885 struct nfe_rx_ring *rx_ring;
2886 struct nfe_jrx_ring *jrx_ring;
2887 struct nfe_tx_ring *tx_ring;
2888 struct nfe_rx_data *rdata;
2889 struct nfe_tx_data *tdata;
2892 NFE_LOCK_ASSERT(sc);
2894 sc->nfe_watchdog_timer = 0;
2895 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2897 callout_stop(&sc->nfe_stat_ch);
2900 NFE_WRITE(sc, NFE_TX_CTL, 0);
2903 NFE_WRITE(sc, NFE_RX_CTL, 0);
2905 /* disable interrupts */
2906 nfe_disable_intr(sc);
2910 /* free Rx and Tx mbufs still in the queues. */
2912 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2913 rdata = &rx_ring->data[i];
2914 if (rdata->m != NULL) {
2915 bus_dmamap_sync(rx_ring->rx_data_tag,
2916 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2917 bus_dmamap_unload(rx_ring->rx_data_tag,
2918 rdata->rx_data_map);
2924 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2925 jrx_ring = &sc->jrxq;
2926 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2927 rdata = &jrx_ring->jdata[i];
2928 if (rdata->m != NULL) {
2929 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2930 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2931 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2932 rdata->rx_data_map);
2940 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2941 tdata = &tx_ring->data[i];
2942 if (tdata->m != NULL) {
2943 bus_dmamap_sync(tx_ring->tx_data_tag,
2944 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2945 bus_dmamap_unload(tx_ring->tx_data_tag,
2946 tdata->tx_data_map);
2951 /* Update hardware stats. */
2952 nfe_stats_update(sc);
2957 nfe_ifmedia_upd(struct ifnet *ifp)
2959 struct nfe_softc *sc = ifp->if_softc;
2960 struct mii_data *mii;
2963 mii = device_get_softc(sc->nfe_miibus);
2972 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2974 struct nfe_softc *sc;
2975 struct mii_data *mii;
2980 mii = device_get_softc(sc->nfe_miibus);
2983 ifmr->ifm_active = mii->mii_media_active;
2984 ifmr->ifm_status = mii->mii_media_status;
2992 struct nfe_softc *sc;
2993 struct mii_data *mii;
2996 sc = (struct nfe_softc *)xsc;
2998 NFE_LOCK_ASSERT(sc);
3002 mii = device_get_softc(sc->nfe_miibus);
3004 nfe_stats_update(sc);
3006 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
3011 nfe_shutdown(device_t dev)
3014 return (nfe_suspend(dev));
3019 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
3023 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
3024 val = NFE_READ(sc, NFE_MACADDR_LO);
3025 addr[0] = (val >> 8) & 0xff;
3026 addr[1] = (val & 0xff);
3028 val = NFE_READ(sc, NFE_MACADDR_HI);
3029 addr[2] = (val >> 24) & 0xff;
3030 addr[3] = (val >> 16) & 0xff;
3031 addr[4] = (val >> 8) & 0xff;
3032 addr[5] = (val & 0xff);
3034 val = NFE_READ(sc, NFE_MACADDR_LO);
3035 addr[5] = (val >> 8) & 0xff;
3036 addr[4] = (val & 0xff);
3038 val = NFE_READ(sc, NFE_MACADDR_HI);
3039 addr[3] = (val >> 24) & 0xff;
3040 addr[2] = (val >> 16) & 0xff;
3041 addr[1] = (val >> 8) & 0xff;
3042 addr[0] = (val & 0xff);
3048 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3051 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
3052 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3053 addr[1] << 8 | addr[0]);
3058 * Map a single buffer address.
3062 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3064 struct nfe_dmamap_arg *ctx;
3069 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3071 ctx = (struct nfe_dmamap_arg *)arg;
3072 ctx->nfe_busaddr = segs[0].ds_addr;
3077 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3083 value = *(int *)arg1;
3084 error = sysctl_handle_int(oidp, &value, 0, req);
3085 if (error || !req->newptr)
3087 if (value < low || value > high)
3089 *(int *)arg1 = value;
3096 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3099 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3104 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3105 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3106 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3107 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3110 nfe_sysctl_node(struct nfe_softc *sc)
3112 struct sysctl_ctx_list *ctx;
3113 struct sysctl_oid_list *child, *parent;
3114 struct sysctl_oid *tree;
3115 struct nfe_hw_stats *stats;
3118 stats = &sc->nfe_stats;
3119 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3120 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3121 SYSCTL_ADD_PROC(ctx, child,
3122 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3123 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3124 "max number of Rx events to process");
3126 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3127 error = resource_int_value(device_get_name(sc->nfe_dev),
3128 device_get_unit(sc->nfe_dev), "process_limit",
3129 &sc->nfe_process_limit);
3131 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3132 sc->nfe_process_limit > NFE_PROC_MAX) {
3133 device_printf(sc->nfe_dev,
3134 "process_limit value out of range; "
3135 "using default: %d\n", NFE_PROC_DEFAULT);
3136 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3140 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3143 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3144 NULL, "NFE statistics");
3145 parent = SYSCTL_CHILDREN(tree);
3147 /* Rx statistics. */
3148 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3149 NULL, "Rx MAC statistics");
3150 child = SYSCTL_CHILDREN(tree);
3152 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3153 &stats->rx_frame_errors, "Framing Errors");
3154 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3155 &stats->rx_extra_bytes, "Extra Bytes");
3156 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3157 &stats->rx_late_cols, "Late Collisions");
3158 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3159 &stats->rx_runts, "Runts");
3160 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3161 &stats->rx_jumbos, "Jumbos");
3162 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3163 &stats->rx_fifo_overuns, "FIFO Overruns");
3164 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3165 &stats->rx_crc_errors, "CRC Errors");
3166 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3167 &stats->rx_fae, "Frame Alignment Errors");
3168 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3169 &stats->rx_len_errors, "Length Errors");
3170 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3171 &stats->rx_unicast, "Unicast Frames");
3172 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3173 &stats->rx_multicast, "Multicast Frames");
3174 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3175 &stats->rx_broadcast, "Broadcast Frames");
3176 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3177 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3178 &stats->rx_octets, "Octets");
3179 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3180 &stats->rx_pause, "Pause frames");
3181 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3182 &stats->rx_drops, "Drop frames");
3185 /* Tx statistics. */
3186 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3187 NULL, "Tx MAC statistics");
3188 child = SYSCTL_CHILDREN(tree);
3189 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3190 &stats->tx_octets, "Octets");
3191 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3192 &stats->tx_zero_rexmits, "Zero Retransmits");
3193 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3194 &stats->tx_one_rexmits, "One Retransmits");
3195 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3196 &stats->tx_multi_rexmits, "Multiple Retransmits");
3197 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3198 &stats->tx_late_cols, "Late Collisions");
3199 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3200 &stats->tx_fifo_underuns, "FIFO Underruns");
3201 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3202 &stats->tx_carrier_losts, "Carrier Losts");
3203 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3204 &stats->tx_excess_deferals, "Excess Deferrals");
3205 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3206 &stats->tx_retry_errors, "Retry Errors");
3207 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3208 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3209 &stats->tx_deferals, "Deferrals");
3210 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3211 &stats->tx_frames, "Frames");
3212 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3213 &stats->tx_pause, "Pause Frames");
3215 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3216 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3217 &stats->tx_deferals, "Unicast Frames");
3218 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3219 &stats->tx_frames, "Multicast Frames");
3220 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3221 &stats->tx_pause, "Broadcast Frames");
3225 #undef NFE_SYSCTL_STAT_ADD32
3226 #undef NFE_SYSCTL_STAT_ADD64
3229 nfe_stats_clear(struct nfe_softc *sc)
3233 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3234 mib_cnt = NFE_NUM_MIB_STATV1;
3235 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3236 mib_cnt = NFE_NUM_MIB_STATV2;
3240 for (i = 0; i < mib_cnt; i++)
3241 NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t));
3243 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3244 NFE_READ(sc, NFE_TX_UNICAST);
3245 NFE_READ(sc, NFE_TX_MULTICAST);
3246 NFE_READ(sc, NFE_TX_BROADCAST);
3251 nfe_stats_update(struct nfe_softc *sc)
3253 struct nfe_hw_stats *stats;
3255 NFE_LOCK_ASSERT(sc);
3257 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3260 stats = &sc->nfe_stats;
3261 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3262 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3263 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3264 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3265 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3266 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3267 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3268 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3269 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3270 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3271 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3272 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3273 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3274 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3275 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3276 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3277 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3278 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3279 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3280 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3281 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3283 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3284 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3285 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3286 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3287 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3288 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3289 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3292 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3293 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3294 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3295 stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3301 nfe_set_linkspeed(struct nfe_softc *sc)
3303 struct mii_softc *miisc;
3304 struct mii_data *mii;
3307 NFE_LOCK_ASSERT(sc);
3309 mii = device_get_softc(sc->nfe_miibus);
3312 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3313 (IFM_ACTIVE | IFM_AVALID)) {
3314 switch IFM_SUBTYPE(mii->mii_media_active) {
3325 miisc = LIST_FIRST(&mii->mii_phys);
3326 phyno = miisc->mii_phy;
3327 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3329 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3330 nfe_miibus_writereg(sc->nfe_dev, phyno,
3331 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3332 nfe_miibus_writereg(sc->nfe_dev, phyno,
3333 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3337 * Poll link state until nfe(4) get a 10/100Mbps link.
3339 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3341 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3342 == (IFM_ACTIVE | IFM_AVALID)) {
3343 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3346 nfe_mac_config(sc, mii);
3353 pause("nfelnk", hz);
3356 if (i == MII_ANEGTICKS_GIGE)
3357 device_printf(sc->nfe_dev,
3358 "establishing a link failed, WOL may not work!");
3361 * No link, force MAC to have 100Mbps, full-duplex link.
3362 * This is the last resort and may/may not work.
3364 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3365 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3366 nfe_mac_config(sc, mii);
3371 nfe_set_wol(struct nfe_softc *sc)
3378 NFE_LOCK_ASSERT(sc);
3380 if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3383 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3384 wolctl = NFE_WOL_MAGIC;
3387 NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3388 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
3389 nfe_set_linkspeed(sc);
3390 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3391 NFE_WRITE(sc, NFE_PWR2_CTL,
3392 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3394 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3395 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3396 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3399 /* Request PME if WOL is requested. */
3400 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3401 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3402 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3403 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3404 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);