1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
44 #include <net/if_arp.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
64 #include <dev/nfe/if_nfereg.h>
65 #include <dev/nfe/if_nfevar.h>
67 MODULE_DEPEND(nfe, pci, 1, 1, 1);
68 MODULE_DEPEND(nfe, ether, 1, 1, 1);
69 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
71 /* "device miibus" required. See GENERIC if you get errors here. */
72 #include "miibus_if.h"
74 static int nfe_probe(device_t);
75 static int nfe_attach(device_t);
76 static int nfe_detach(device_t);
77 static int nfe_suspend(device_t);
78 static int nfe_resume(device_t);
79 static int nfe_shutdown(device_t);
80 static int nfe_can_use_msix(struct nfe_softc *);
81 static int nfe_detect_msik9(struct nfe_softc *);
82 static void nfe_power(struct nfe_softc *);
83 static int nfe_miibus_readreg(device_t, int, int);
84 static int nfe_miibus_writereg(device_t, int, int, int);
85 static void nfe_miibus_statchg(device_t);
86 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
87 static void nfe_set_intr(struct nfe_softc *);
88 static __inline void nfe_enable_intr(struct nfe_softc *);
89 static __inline void nfe_disable_intr(struct nfe_softc *);
90 static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
91 static void nfe_alloc_msix(struct nfe_softc *, int);
92 static int nfe_intr(void *);
93 static void nfe_int_task(void *, int);
94 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
95 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
96 static int nfe_newbuf(struct nfe_softc *, int);
97 static int nfe_jnewbuf(struct nfe_softc *, int);
98 static int nfe_rxeof(struct nfe_softc *, int, int *);
99 static int nfe_jrxeof(struct nfe_softc *, int, int *);
100 static void nfe_txeof(struct nfe_softc *);
101 static int nfe_encap(struct nfe_softc *, struct mbuf **);
102 static void nfe_setmulti(struct nfe_softc *);
103 static void nfe_start(struct ifnet *);
104 static void nfe_start_locked(struct ifnet *);
105 static void nfe_watchdog(struct ifnet *);
106 static void nfe_init(void *);
107 static void nfe_init_locked(void *);
108 static void nfe_stop(struct ifnet *);
109 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
111 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
112 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
113 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
114 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
115 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
118 static int nfe_ifmedia_upd(struct ifnet *);
119 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
120 static void nfe_tick(void *);
121 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
122 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
123 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
125 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
126 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
127 static void nfe_sysctl_node(struct nfe_softc *);
128 static void nfe_stats_clear(struct nfe_softc *);
129 static void nfe_stats_update(struct nfe_softc *);
130 static void nfe_set_linkspeed(struct nfe_softc *);
131 static void nfe_set_wol(struct nfe_softc *);
134 static int nfedebug = 0;
135 #define DPRINTF(sc, ...) do { \
137 device_printf((sc)->nfe_dev, __VA_ARGS__); \
139 #define DPRINTFN(sc, n, ...) do { \
140 if (nfedebug >= (n)) \
141 device_printf((sc)->nfe_dev, __VA_ARGS__); \
144 #define DPRINTF(sc, ...)
145 #define DPRINTFN(sc, n, ...)
148 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
149 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
150 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
153 static int msi_disable = 0;
154 static int msix_disable = 0;
155 static int jumbo_disable = 0;
156 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
157 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
158 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
160 static device_method_t nfe_methods[] = {
161 /* Device interface */
162 DEVMETHOD(device_probe, nfe_probe),
163 DEVMETHOD(device_attach, nfe_attach),
164 DEVMETHOD(device_detach, nfe_detach),
165 DEVMETHOD(device_suspend, nfe_suspend),
166 DEVMETHOD(device_resume, nfe_resume),
167 DEVMETHOD(device_shutdown, nfe_shutdown),
170 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
171 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
172 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
177 static driver_t nfe_driver = {
180 sizeof(struct nfe_softc)
183 static devclass_t nfe_devclass;
185 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
186 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
188 static struct nfe_type nfe_devs[] = {
189 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
190 "NVIDIA nForce MCP Networking Adapter"},
191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
192 "NVIDIA nForce2 MCP2 Networking Adapter"},
193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
194 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
196 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
198 "NVIDIA nForce3 MCP3 Networking Adapter"},
199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
200 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
202 "NVIDIA nForce3 MCP7 Networking Adapter"},
203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
204 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
206 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
208 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
210 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
212 "NVIDIA nForce 430 MCP12 Networking Adapter"},
213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
214 "NVIDIA nForce 430 MCP13 Networking Adapter"},
215 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
216 "NVIDIA nForce MCP55 Networking Adapter"},
217 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
218 "NVIDIA nForce MCP55 Networking Adapter"},
219 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
220 "NVIDIA nForce MCP61 Networking Adapter"},
221 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
222 "NVIDIA nForce MCP61 Networking Adapter"},
223 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
224 "NVIDIA nForce MCP61 Networking Adapter"},
225 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
226 "NVIDIA nForce MCP61 Networking Adapter"},
227 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
228 "NVIDIA nForce MCP65 Networking Adapter"},
229 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
230 "NVIDIA nForce MCP65 Networking Adapter"},
231 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
232 "NVIDIA nForce MCP65 Networking Adapter"},
233 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
234 "NVIDIA nForce MCP65 Networking Adapter"},
235 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
236 "NVIDIA nForce MCP67 Networking Adapter"},
237 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
238 "NVIDIA nForce MCP67 Networking Adapter"},
239 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
240 "NVIDIA nForce MCP67 Networking Adapter"},
241 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
242 "NVIDIA nForce MCP67 Networking Adapter"},
243 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
244 "NVIDIA nForce MCP73 Networking Adapter"},
245 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
246 "NVIDIA nForce MCP73 Networking Adapter"},
247 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
248 "NVIDIA nForce MCP73 Networking Adapter"},
249 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
250 "NVIDIA nForce MCP73 Networking Adapter"},
251 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
252 "NVIDIA nForce MCP77 Networking Adapter"},
253 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
254 "NVIDIA nForce MCP77 Networking Adapter"},
255 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
256 "NVIDIA nForce MCP77 Networking Adapter"},
257 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
258 "NVIDIA nForce MCP77 Networking Adapter"},
259 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
260 "NVIDIA nForce MCP79 Networking Adapter"},
261 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
262 "NVIDIA nForce MCP79 Networking Adapter"},
263 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
264 "NVIDIA nForce MCP79 Networking Adapter"},
265 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
266 "NVIDIA nForce MCP79 Networking Adapter"},
271 /* Probe for supported hardware ID's */
273 nfe_probe(device_t dev)
278 /* Check for matching PCI DEVICE ID's */
279 while (t->name != NULL) {
280 if ((pci_get_vendor(dev) == t->vid_id) &&
281 (pci_get_device(dev) == t->dev_id)) {
282 device_set_desc(dev, t->name);
283 return (BUS_PROBE_DEFAULT);
292 nfe_alloc_msix(struct nfe_softc *sc, int count)
297 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
299 if (sc->nfe_msix_res == NULL) {
300 device_printf(sc->nfe_dev,
301 "couldn't allocate MSIX table resource\n");
305 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
306 SYS_RES_MEMORY, &rid, RF_ACTIVE);
307 if (sc->nfe_msix_pba_res == NULL) {
308 device_printf(sc->nfe_dev,
309 "couldn't allocate MSIX PBA resource\n");
310 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
312 sc->nfe_msix_res = NULL;
316 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
317 if (count == NFE_MSI_MESSAGES) {
319 device_printf(sc->nfe_dev,
320 "Using %d MSIX messages\n", count);
324 device_printf(sc->nfe_dev,
325 "couldn't allocate MSIX\n");
326 pci_release_msi(sc->nfe_dev);
327 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
328 PCIR_BAR(3), sc->nfe_msix_pba_res);
329 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
330 PCIR_BAR(2), sc->nfe_msix_res);
331 sc->nfe_msix_pba_res = NULL;
332 sc->nfe_msix_res = NULL;
339 nfe_detect_msik9(struct nfe_softc *sc)
341 static const char *maker = "MSI";
342 static const char *product = "K9N6PGM2-V2 (MS-7309)";
347 m = getenv("smbios.planar.maker");
348 p = getenv("smbios.planar.product");
349 if (m != NULL && p != NULL) {
350 if (strcmp(m, maker) == 0 && strcmp(p, product) == 0)
363 nfe_attach(device_t dev)
365 struct nfe_softc *sc;
367 bus_addr_t dma_addr_max;
368 int error = 0, i, msic, phyloc, reg, rid;
370 sc = device_get_softc(dev);
373 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
375 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
377 pci_enable_busmaster(dev);
380 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
382 if (sc->nfe_res[0] == NULL) {
383 device_printf(dev, "couldn't map memory resources\n");
384 mtx_destroy(&sc->nfe_mtx);
388 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
391 v = pci_read_config(dev, reg + 0x08, 2);
392 /* Change max. read request size to 4096. */
395 pci_write_config(dev, reg + 0x08, v, 2);
397 v = pci_read_config(dev, reg + 0x0c, 2);
398 /* link capability */
400 width = pci_read_config(dev, reg + 0x12, 2);
401 /* negotiated link width */
402 width = (width >> 4) & 0x3f;
404 device_printf(sc->nfe_dev,
405 "warning, negotiated width of link(x%d) != "
406 "max. width of link(x%d)\n", width, v);
409 if (nfe_can_use_msix(sc) == 0) {
410 device_printf(sc->nfe_dev,
411 "MSI/MSI-X capability black-listed, will use INTx\n");
416 /* Allocate interrupt */
417 if (msix_disable == 0 || msi_disable == 0) {
418 if (msix_disable == 0 &&
419 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
420 nfe_alloc_msix(sc, msic);
421 if (msi_disable == 0 && sc->nfe_msix == 0 &&
422 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
423 pci_alloc_msi(dev, &msic) == 0) {
424 if (msic == NFE_MSI_MESSAGES) {
427 "Using %d MSI messages\n", msic);
430 pci_release_msi(dev);
434 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
436 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
437 RF_SHAREABLE | RF_ACTIVE);
438 if (sc->nfe_irq[0] == NULL) {
439 device_printf(dev, "couldn't allocate IRQ resources\n");
444 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
445 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
446 SYS_RES_IRQ, &rid, RF_ACTIVE);
447 if (sc->nfe_irq[i] == NULL) {
449 "couldn't allocate IRQ resources for "
450 "message %d\n", rid);
455 /* Map interrupts to vector 0. */
456 if (sc->nfe_msix != 0) {
457 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
458 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
459 } else if (sc->nfe_msi != 0) {
460 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
461 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
465 /* Set IRQ status/mask register. */
466 sc->nfe_irq_status = NFE_IRQ_STATUS;
467 sc->nfe_irq_mask = NFE_IRQ_MASK;
468 sc->nfe_intrs = NFE_IRQ_WANTED;
470 if (sc->nfe_msix != 0) {
471 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
472 sc->nfe_nointrs = NFE_IRQ_WANTED;
473 } else if (sc->nfe_msi != 0) {
474 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
475 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
478 sc->nfe_devid = pci_get_device(dev);
479 sc->nfe_revid = pci_get_revid(dev);
482 switch (sc->nfe_devid) {
483 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
484 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
485 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
486 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
487 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
489 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
490 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
491 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
493 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
494 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
495 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
496 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
497 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
500 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
501 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
502 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
503 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
506 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
507 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
508 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
509 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
510 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
511 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
512 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
513 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
514 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
515 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
516 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
517 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
518 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
519 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
521 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
522 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
523 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
524 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
525 /* XXX flow control */
526 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
527 NFE_CORRECT_MACADDR | NFE_MIB_V3;
529 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
530 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
531 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
532 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
533 /* XXX flow control */
534 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
535 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
537 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
538 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
539 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
540 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
541 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
542 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
548 /* Check for reversed ethernet address */
549 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
550 sc->nfe_flags |= NFE_CORRECT_MACADDR;
551 nfe_get_macaddr(sc, sc->eaddr);
553 * Allocate the parent bus DMA tag appropriate for PCI.
555 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
556 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
557 dma_addr_max = NFE_DMA_MAXADDR;
558 error = bus_dma_tag_create(
559 bus_get_dma_tag(sc->nfe_dev), /* parent */
560 1, 0, /* alignment, boundary */
561 dma_addr_max, /* lowaddr */
562 BUS_SPACE_MAXADDR, /* highaddr */
563 NULL, NULL, /* filter, filterarg */
564 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
565 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
567 NULL, NULL, /* lockfunc, lockarg */
568 &sc->nfe_parent_tag);
572 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
574 device_printf(dev, "can not if_alloc()\n");
580 * Allocate Tx and Rx rings.
582 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
585 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
588 nfe_alloc_jrx_ring(sc, &sc->jrxq);
589 /* Create sysctl node. */
593 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
594 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
595 ifp->if_ioctl = nfe_ioctl;
596 ifp->if_start = nfe_start;
597 ifp->if_hwassist = 0;
598 ifp->if_capabilities = 0;
599 ifp->if_init = nfe_init;
600 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
601 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
602 IFQ_SET_READY(&ifp->if_snd);
604 if (sc->nfe_flags & NFE_HW_CSUM) {
605 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
606 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
608 ifp->if_capenable = ifp->if_capabilities;
610 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
611 /* VLAN capability setup. */
612 ifp->if_capabilities |= IFCAP_VLAN_MTU;
613 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
614 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
615 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
616 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM |
620 if (pci_find_cap(dev, PCIY_PMG, ®) == 0)
621 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
622 ifp->if_capenable = ifp->if_capabilities;
625 * Tell the upper layer(s) we support long frames.
626 * Must appear after the call to ether_ifattach() because
627 * ether_ifattach() sets ifi_hdrlen to the default value.
629 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
631 #ifdef DEVICE_POLLING
632 ifp->if_capabilities |= IFCAP_POLLING;
636 phyloc = MII_PHY_ANY;
637 if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 ||
638 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 ||
639 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 ||
640 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) {
641 if (nfe_detect_msik9(sc) != 0)
644 error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
645 nfe_ifmedia_sts, BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY,
648 device_printf(dev, "attaching PHYs failed\n");
651 ether_ifattach(ifp, sc->eaddr);
653 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
654 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
655 taskqueue_thread_enqueue, &sc->nfe_tq);
656 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
657 device_get_nameunit(sc->nfe_dev));
659 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
660 error = bus_setup_intr(dev, sc->nfe_irq[0],
661 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
662 &sc->nfe_intrhand[0]);
664 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
665 error = bus_setup_intr(dev, sc->nfe_irq[i],
666 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
667 &sc->nfe_intrhand[i]);
673 device_printf(dev, "couldn't set up irq\n");
674 taskqueue_free(sc->nfe_tq);
689 nfe_detach(device_t dev)
691 struct nfe_softc *sc;
693 uint8_t eaddr[ETHER_ADDR_LEN];
696 sc = device_get_softc(dev);
697 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
700 #ifdef DEVICE_POLLING
701 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
702 ether_poll_deregister(ifp);
704 if (device_is_attached(dev)) {
707 ifp->if_flags &= ~IFF_UP;
709 callout_drain(&sc->nfe_stat_ch);
714 /* restore ethernet address */
715 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
716 for (i = 0; i < ETHER_ADDR_LEN; i++) {
717 eaddr[i] = sc->eaddr[5 - i];
720 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
721 nfe_set_macaddr(sc, eaddr);
725 device_delete_child(dev, sc->nfe_miibus);
726 bus_generic_detach(dev);
727 if (sc->nfe_tq != NULL) {
728 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
729 taskqueue_free(sc->nfe_tq);
733 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
734 if (sc->nfe_intrhand[i] != NULL) {
735 bus_teardown_intr(dev, sc->nfe_irq[i],
736 sc->nfe_intrhand[i]);
737 sc->nfe_intrhand[i] = NULL;
741 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
742 if (sc->nfe_irq[0] != NULL)
743 bus_release_resource(dev, SYS_RES_IRQ, 0,
746 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
747 if (sc->nfe_irq[i] != NULL) {
748 bus_release_resource(dev, SYS_RES_IRQ, rid,
750 sc->nfe_irq[i] = NULL;
753 pci_release_msi(dev);
755 if (sc->nfe_msix_pba_res != NULL) {
756 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
757 sc->nfe_msix_pba_res);
758 sc->nfe_msix_pba_res = NULL;
760 if (sc->nfe_msix_res != NULL) {
761 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
763 sc->nfe_msix_res = NULL;
765 if (sc->nfe_res[0] != NULL) {
766 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
768 sc->nfe_res[0] = NULL;
771 nfe_free_tx_ring(sc, &sc->txq);
772 nfe_free_rx_ring(sc, &sc->rxq);
773 nfe_free_jrx_ring(sc, &sc->jrxq);
775 if (sc->nfe_parent_tag) {
776 bus_dma_tag_destroy(sc->nfe_parent_tag);
777 sc->nfe_parent_tag = NULL;
780 mtx_destroy(&sc->nfe_mtx);
787 nfe_suspend(device_t dev)
789 struct nfe_softc *sc;
791 sc = device_get_softc(dev);
794 nfe_stop(sc->nfe_ifp);
796 sc->nfe_suspended = 1;
804 nfe_resume(device_t dev)
806 struct nfe_softc *sc;
809 sc = device_get_softc(dev);
814 if (ifp->if_flags & IFF_UP)
816 sc->nfe_suspended = 0;
824 nfe_can_use_msix(struct nfe_softc *sc)
826 static struct msix_blacklist {
829 } msix_blacklists[] = {
830 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
833 struct msix_blacklist *mblp;
834 char *maker, *product;
835 int count, n, use_msix;
838 * Search base board manufacturer and product name table
839 * to see this system has a known MSI/MSI-X issue.
841 maker = getenv("smbios.planar.maker");
842 product = getenv("smbios.planar.product");
844 if (maker != NULL && product != NULL) {
845 count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]);
846 mblp = msix_blacklists;
847 for (n = 0; n < count; n++) {
848 if (strcmp(maker, mblp->maker) == 0 &&
849 strcmp(product, mblp->product) == 0) {
865 /* Take PHY/NIC out of powerdown, from Linux */
867 nfe_power(struct nfe_softc *sc)
871 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
873 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
874 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
876 NFE_WRITE(sc, NFE_MAC_RESET, 0);
878 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
879 pwr = NFE_READ(sc, NFE_PWR2_CTL);
880 pwr &= ~NFE_PWR2_WAKEUP_MASK;
881 if (sc->nfe_revid >= 0xa3 &&
882 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
883 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
884 pwr |= NFE_PWR2_REVA3;
885 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
890 nfe_miibus_statchg(device_t dev)
892 struct nfe_softc *sc;
893 struct mii_data *mii;
895 uint32_t rxctl, txctl;
897 sc = device_get_softc(dev);
899 mii = device_get_softc(sc->nfe_miibus);
903 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
904 (IFM_ACTIVE | IFM_AVALID)) {
905 switch (IFM_SUBTYPE(mii->mii_media_active)) {
916 nfe_mac_config(sc, mii);
917 txctl = NFE_READ(sc, NFE_TX_CTL);
918 rxctl = NFE_READ(sc, NFE_RX_CTL);
919 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
920 txctl |= NFE_TX_START;
921 rxctl |= NFE_RX_START;
923 txctl &= ~NFE_TX_START;
924 rxctl &= ~NFE_RX_START;
926 NFE_WRITE(sc, NFE_TX_CTL, txctl);
927 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
932 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
934 uint32_t link, misc, phy, seed;
939 phy = NFE_READ(sc, NFE_PHY_IFACE);
940 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
942 seed = NFE_READ(sc, NFE_RNDSEED);
943 seed &= ~NFE_SEED_MASK;
945 misc = NFE_MISC1_MAGIC;
946 link = NFE_MEDIA_SET;
948 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
949 phy |= NFE_PHY_HDX; /* half-duplex */
950 misc |= NFE_MISC1_HDX;
953 switch (IFM_SUBTYPE(mii->mii_media_active)) {
954 case IFM_1000_T: /* full-duplex only */
955 link |= NFE_MEDIA_1000T;
956 seed |= NFE_SEED_1000T;
957 phy |= NFE_PHY_1000T;
960 link |= NFE_MEDIA_100TX;
961 seed |= NFE_SEED_100TX;
962 phy |= NFE_PHY_100TX;
965 link |= NFE_MEDIA_10T;
966 seed |= NFE_SEED_10T;
970 if ((phy & 0x10000000) != 0) {
971 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
972 val = NFE_R1_MAGIC_1000;
974 val = NFE_R1_MAGIC_10_100;
976 val = NFE_R1_MAGIC_DEFAULT;
977 NFE_WRITE(sc, NFE_SETUP_R1, val);
979 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
981 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
982 NFE_WRITE(sc, NFE_MISC1, misc);
983 NFE_WRITE(sc, NFE_LINKSPEED, link);
985 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
986 /* It seems all hardwares supports Rx pause frames. */
987 val = NFE_READ(sc, NFE_RXFILTER);
988 if ((IFM_OPTIONS(mii->mii_media_active) &
989 IFM_ETH_RXPAUSE) != 0)
990 val |= NFE_PFF_RX_PAUSE;
992 val &= ~NFE_PFF_RX_PAUSE;
993 NFE_WRITE(sc, NFE_RXFILTER, val);
994 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
995 val = NFE_READ(sc, NFE_MISC1);
996 if ((IFM_OPTIONS(mii->mii_media_active) &
997 IFM_ETH_TXPAUSE) != 0) {
998 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
999 NFE_TX_PAUSE_FRAME_ENABLE);
1000 val |= NFE_MISC1_TX_PAUSE;
1002 val &= ~NFE_MISC1_TX_PAUSE;
1003 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1004 NFE_TX_PAUSE_FRAME_DISABLE);
1006 NFE_WRITE(sc, NFE_MISC1, val);
1009 /* disable rx/tx pause frames */
1010 val = NFE_READ(sc, NFE_RXFILTER);
1011 val &= ~NFE_PFF_RX_PAUSE;
1012 NFE_WRITE(sc, NFE_RXFILTER, val);
1013 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
1014 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1015 NFE_TX_PAUSE_FRAME_DISABLE);
1016 val = NFE_READ(sc, NFE_MISC1);
1017 val &= ~NFE_MISC1_TX_PAUSE;
1018 NFE_WRITE(sc, NFE_MISC1, val);
1025 nfe_miibus_readreg(device_t dev, int phy, int reg)
1027 struct nfe_softc *sc = device_get_softc(dev);
1031 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1033 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1034 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1038 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1040 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1042 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1045 if (ntries == NFE_TIMEOUT) {
1046 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1050 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1051 DPRINTFN(sc, 2, "could not read PHY\n");
1055 val = NFE_READ(sc, NFE_PHY_DATA);
1056 if (val != 0xffffffff && val != 0)
1057 sc->mii_phyaddr = phy;
1059 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1066 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1068 struct nfe_softc *sc = device_get_softc(dev);
1072 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1074 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1075 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1079 NFE_WRITE(sc, NFE_PHY_DATA, val);
1080 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1081 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1083 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1085 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1089 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1090 device_printf(sc->nfe_dev, "could not write to PHY\n");
1095 struct nfe_dmamap_arg {
1096 bus_addr_t nfe_busaddr;
1100 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1102 struct nfe_dmamap_arg ctx;
1103 struct nfe_rx_data *data;
1105 int i, error, descsize;
1107 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1108 desc = ring->desc64;
1109 descsize = sizeof (struct nfe_desc64);
1111 desc = ring->desc32;
1112 descsize = sizeof (struct nfe_desc32);
1115 ring->cur = ring->next = 0;
1117 error = bus_dma_tag_create(sc->nfe_parent_tag,
1118 NFE_RING_ALIGN, 0, /* alignment, boundary */
1119 BUS_SPACE_MAXADDR, /* lowaddr */
1120 BUS_SPACE_MAXADDR, /* highaddr */
1121 NULL, NULL, /* filter, filterarg */
1122 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1123 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1125 NULL, NULL, /* lockfunc, lockarg */
1126 &ring->rx_desc_tag);
1128 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1132 /* allocate memory to desc */
1133 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1134 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1136 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1139 if (sc->nfe_flags & NFE_40BIT_ADDR)
1140 ring->desc64 = desc;
1142 ring->desc32 = desc;
1144 /* map desc to device visible address space */
1145 ctx.nfe_busaddr = 0;
1146 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1147 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1149 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1152 ring->physaddr = ctx.nfe_busaddr;
1154 error = bus_dma_tag_create(sc->nfe_parent_tag,
1155 1, 0, /* alignment, boundary */
1156 BUS_SPACE_MAXADDR, /* lowaddr */
1157 BUS_SPACE_MAXADDR, /* highaddr */
1158 NULL, NULL, /* filter, filterarg */
1159 MCLBYTES, 1, /* maxsize, nsegments */
1160 MCLBYTES, /* maxsegsize */
1162 NULL, NULL, /* lockfunc, lockarg */
1163 &ring->rx_data_tag);
1165 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1169 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1171 device_printf(sc->nfe_dev,
1172 "could not create Rx DMA spare map\n");
1177 * Pre-allocate Rx buffers and populate Rx ring.
1179 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1180 data = &sc->rxq.data[i];
1181 data->rx_data_map = NULL;
1183 error = bus_dmamap_create(ring->rx_data_tag, 0,
1184 &data->rx_data_map);
1186 device_printf(sc->nfe_dev,
1187 "could not create Rx DMA map\n");
1198 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1200 struct nfe_dmamap_arg ctx;
1201 struct nfe_rx_data *data;
1203 int i, error, descsize;
1205 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1207 if (jumbo_disable != 0) {
1208 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1209 sc->nfe_jumbo_disable = 1;
1213 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1214 desc = ring->jdesc64;
1215 descsize = sizeof (struct nfe_desc64);
1217 desc = ring->jdesc32;
1218 descsize = sizeof (struct nfe_desc32);
1221 ring->jcur = ring->jnext = 0;
1223 /* Create DMA tag for jumbo Rx ring. */
1224 error = bus_dma_tag_create(sc->nfe_parent_tag,
1225 NFE_RING_ALIGN, 0, /* alignment, boundary */
1226 BUS_SPACE_MAXADDR, /* lowaddr */
1227 BUS_SPACE_MAXADDR, /* highaddr */
1228 NULL, NULL, /* filter, filterarg */
1229 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1231 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1233 NULL, NULL, /* lockfunc, lockarg */
1234 &ring->jrx_desc_tag);
1236 device_printf(sc->nfe_dev,
1237 "could not create jumbo ring DMA tag\n");
1241 /* Create DMA tag for jumbo Rx buffers. */
1242 error = bus_dma_tag_create(sc->nfe_parent_tag,
1243 1, 0, /* alignment, boundary */
1244 BUS_SPACE_MAXADDR, /* lowaddr */
1245 BUS_SPACE_MAXADDR, /* highaddr */
1246 NULL, NULL, /* filter, filterarg */
1247 MJUM9BYTES, /* maxsize */
1249 MJUM9BYTES, /* maxsegsize */
1251 NULL, NULL, /* lockfunc, lockarg */
1252 &ring->jrx_data_tag);
1254 device_printf(sc->nfe_dev,
1255 "could not create jumbo Rx buffer DMA tag\n");
1259 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1260 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1261 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1263 device_printf(sc->nfe_dev,
1264 "could not allocate DMA'able memory for jumbo Rx ring\n");
1267 if (sc->nfe_flags & NFE_40BIT_ADDR)
1268 ring->jdesc64 = desc;
1270 ring->jdesc32 = desc;
1272 ctx.nfe_busaddr = 0;
1273 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1274 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1276 device_printf(sc->nfe_dev,
1277 "could not load DMA'able memory for jumbo Rx ring\n");
1280 ring->jphysaddr = ctx.nfe_busaddr;
1282 /* Create DMA maps for jumbo Rx buffers. */
1283 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1285 device_printf(sc->nfe_dev,
1286 "could not create jumbo Rx DMA spare map\n");
1290 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1291 data = &sc->jrxq.jdata[i];
1292 data->rx_data_map = NULL;
1294 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1295 &data->rx_data_map);
1297 device_printf(sc->nfe_dev,
1298 "could not create jumbo Rx DMA map\n");
1307 * Running without jumbo frame support is ok for most cases
1308 * so don't fail on creating dma tag/map for jumbo frame.
1310 nfe_free_jrx_ring(sc, ring);
1311 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1312 "resource shortage\n");
1313 sc->nfe_jumbo_disable = 1;
1318 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1324 ring->cur = ring->next = 0;
1325 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1326 desc = ring->desc64;
1327 descsize = sizeof (struct nfe_desc64);
1329 desc = ring->desc32;
1330 descsize = sizeof (struct nfe_desc32);
1332 bzero(desc, descsize * NFE_RX_RING_COUNT);
1333 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1334 if (nfe_newbuf(sc, i) != 0)
1338 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1339 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1346 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1352 ring->jcur = ring->jnext = 0;
1353 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1354 desc = ring->jdesc64;
1355 descsize = sizeof (struct nfe_desc64);
1357 desc = ring->jdesc32;
1358 descsize = sizeof (struct nfe_desc32);
1360 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1361 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1362 if (nfe_jnewbuf(sc, i) != 0)
1366 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1367 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1374 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1376 struct nfe_rx_data *data;
1380 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1381 desc = ring->desc64;
1382 descsize = sizeof (struct nfe_desc64);
1384 desc = ring->desc32;
1385 descsize = sizeof (struct nfe_desc32);
1388 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1389 data = &ring->data[i];
1390 if (data->rx_data_map != NULL) {
1391 bus_dmamap_destroy(ring->rx_data_tag,
1393 data->rx_data_map = NULL;
1395 if (data->m != NULL) {
1400 if (ring->rx_data_tag != NULL) {
1401 if (ring->rx_spare_map != NULL) {
1402 bus_dmamap_destroy(ring->rx_data_tag,
1403 ring->rx_spare_map);
1404 ring->rx_spare_map = NULL;
1406 bus_dma_tag_destroy(ring->rx_data_tag);
1407 ring->rx_data_tag = NULL;
1411 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1412 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1413 ring->desc64 = NULL;
1414 ring->desc32 = NULL;
1415 ring->rx_desc_map = NULL;
1417 if (ring->rx_desc_tag != NULL) {
1418 bus_dma_tag_destroy(ring->rx_desc_tag);
1419 ring->rx_desc_tag = NULL;
1425 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1427 struct nfe_rx_data *data;
1431 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1434 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1435 desc = ring->jdesc64;
1436 descsize = sizeof (struct nfe_desc64);
1438 desc = ring->jdesc32;
1439 descsize = sizeof (struct nfe_desc32);
1442 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1443 data = &ring->jdata[i];
1444 if (data->rx_data_map != NULL) {
1445 bus_dmamap_destroy(ring->jrx_data_tag,
1447 data->rx_data_map = NULL;
1449 if (data->m != NULL) {
1454 if (ring->jrx_data_tag != NULL) {
1455 if (ring->jrx_spare_map != NULL) {
1456 bus_dmamap_destroy(ring->jrx_data_tag,
1457 ring->jrx_spare_map);
1458 ring->jrx_spare_map = NULL;
1460 bus_dma_tag_destroy(ring->jrx_data_tag);
1461 ring->jrx_data_tag = NULL;
1465 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1466 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1467 ring->jdesc64 = NULL;
1468 ring->jdesc32 = NULL;
1469 ring->jrx_desc_map = NULL;
1472 if (ring->jrx_desc_tag != NULL) {
1473 bus_dma_tag_destroy(ring->jrx_desc_tag);
1474 ring->jrx_desc_tag = NULL;
1480 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1482 struct nfe_dmamap_arg ctx;
1487 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1488 desc = ring->desc64;
1489 descsize = sizeof (struct nfe_desc64);
1491 desc = ring->desc32;
1492 descsize = sizeof (struct nfe_desc32);
1496 ring->cur = ring->next = 0;
1498 error = bus_dma_tag_create(sc->nfe_parent_tag,
1499 NFE_RING_ALIGN, 0, /* alignment, boundary */
1500 BUS_SPACE_MAXADDR, /* lowaddr */
1501 BUS_SPACE_MAXADDR, /* highaddr */
1502 NULL, NULL, /* filter, filterarg */
1503 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1504 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1506 NULL, NULL, /* lockfunc, lockarg */
1507 &ring->tx_desc_tag);
1509 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1513 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1514 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1516 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1519 if (sc->nfe_flags & NFE_40BIT_ADDR)
1520 ring->desc64 = desc;
1522 ring->desc32 = desc;
1524 ctx.nfe_busaddr = 0;
1525 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1526 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1528 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1531 ring->physaddr = ctx.nfe_busaddr;
1533 error = bus_dma_tag_create(sc->nfe_parent_tag,
1543 &ring->tx_data_tag);
1545 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1549 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1550 error = bus_dmamap_create(ring->tx_data_tag, 0,
1551 &ring->data[i].tx_data_map);
1553 device_printf(sc->nfe_dev,
1554 "could not create Tx DMA map\n");
1565 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1570 sc->nfe_force_tx = 0;
1572 ring->cur = ring->next = 0;
1573 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1574 desc = ring->desc64;
1575 descsize = sizeof (struct nfe_desc64);
1577 desc = ring->desc32;
1578 descsize = sizeof (struct nfe_desc32);
1580 bzero(desc, descsize * NFE_TX_RING_COUNT);
1582 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1583 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1588 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1590 struct nfe_tx_data *data;
1594 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1595 desc = ring->desc64;
1596 descsize = sizeof (struct nfe_desc64);
1598 desc = ring->desc32;
1599 descsize = sizeof (struct nfe_desc32);
1602 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1603 data = &ring->data[i];
1605 if (data->m != NULL) {
1606 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1607 BUS_DMASYNC_POSTWRITE);
1608 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1612 if (data->tx_data_map != NULL) {
1613 bus_dmamap_destroy(ring->tx_data_tag,
1615 data->tx_data_map = NULL;
1619 if (ring->tx_data_tag != NULL) {
1620 bus_dma_tag_destroy(ring->tx_data_tag);
1621 ring->tx_data_tag = NULL;
1625 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1626 BUS_DMASYNC_POSTWRITE);
1627 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1628 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1629 ring->desc64 = NULL;
1630 ring->desc32 = NULL;
1631 ring->tx_desc_map = NULL;
1632 bus_dma_tag_destroy(ring->tx_desc_tag);
1633 ring->tx_desc_tag = NULL;
1637 #ifdef DEVICE_POLLING
1638 static poll_handler_t nfe_poll;
1642 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1644 struct nfe_softc *sc = ifp->if_softc;
1650 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1655 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1656 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1658 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1660 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1661 nfe_start_locked(ifp);
1663 if (cmd == POLL_AND_CHECK_STATUS) {
1664 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1668 NFE_WRITE(sc, sc->nfe_irq_status, r);
1670 if (r & NFE_IRQ_LINK) {
1671 NFE_READ(sc, NFE_PHY_STATUS);
1672 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1673 DPRINTF(sc, "link state changed\n");
1679 #endif /* DEVICE_POLLING */
1682 nfe_set_intr(struct nfe_softc *sc)
1685 if (sc->nfe_msi != 0)
1686 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1690 /* In MSIX, a write to mask reegisters behaves as XOR. */
1691 static __inline void
1692 nfe_enable_intr(struct nfe_softc *sc)
1695 if (sc->nfe_msix != 0) {
1696 /* XXX Should have a better way to enable interrupts! */
1697 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1698 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1700 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1704 static __inline void
1705 nfe_disable_intr(struct nfe_softc *sc)
1708 if (sc->nfe_msix != 0) {
1709 /* XXX Should have a better way to disable interrupts! */
1710 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1711 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1713 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1718 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1720 struct nfe_softc *sc;
1722 struct mii_data *mii;
1723 int error, init, mask;
1726 ifr = (struct ifreq *) data;
1731 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1733 else if (ifp->if_mtu != ifr->ifr_mtu) {
1734 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1735 (sc->nfe_jumbo_disable != 0)) &&
1736 ifr->ifr_mtu > ETHERMTU)
1740 ifp->if_mtu = ifr->ifr_mtu;
1741 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1742 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1743 nfe_init_locked(sc);
1751 if (ifp->if_flags & IFF_UP) {
1753 * If only the PROMISC or ALLMULTI flag changes, then
1754 * don't do a full re-init of the chip, just update
1757 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1758 ((ifp->if_flags ^ sc->nfe_if_flags) &
1759 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1762 nfe_init_locked(sc);
1764 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1767 sc->nfe_if_flags = ifp->if_flags;
1773 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1782 mii = device_get_softc(sc->nfe_miibus);
1783 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1786 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1787 #ifdef DEVICE_POLLING
1788 if ((mask & IFCAP_POLLING) != 0) {
1789 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1790 error = ether_poll_register(nfe_poll, ifp);
1794 nfe_disable_intr(sc);
1795 ifp->if_capenable |= IFCAP_POLLING;
1798 error = ether_poll_deregister(ifp);
1799 /* Enable interrupt even in error case */
1801 nfe_enable_intr(sc);
1802 ifp->if_capenable &= ~IFCAP_POLLING;
1806 #endif /* DEVICE_POLLING */
1807 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1808 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1809 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1810 if ((mask & IFCAP_TXCSUM) != 0 &&
1811 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1812 ifp->if_capenable ^= IFCAP_TXCSUM;
1813 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1814 ifp->if_hwassist |= NFE_CSUM_FEATURES;
1816 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1818 if ((mask & IFCAP_RXCSUM) != 0 &&
1819 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1820 ifp->if_capenable ^= IFCAP_RXCSUM;
1823 if ((mask & IFCAP_TSO4) != 0 &&
1824 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
1825 ifp->if_capenable ^= IFCAP_TSO4;
1826 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1827 ifp->if_hwassist |= CSUM_TSO;
1829 ifp->if_hwassist &= ~CSUM_TSO;
1831 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1832 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
1833 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1834 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1835 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1836 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1837 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1838 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
1843 * It seems that VLAN stripping requires Rx checksum offload.
1844 * Unfortunately FreeBSD has no way to disable only Rx side
1845 * VLAN stripping. So when we know Rx checksum offload is
1846 * disabled turn entire hardware VLAN assist off.
1848 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
1849 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
1851 ifp->if_capenable &= ~(IFCAP_VLAN_HWTAGGING |
1854 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1855 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1858 VLAN_CAPABILITIES(ifp);
1861 error = ether_ioctl(ifp, cmd, data);
1872 struct nfe_softc *sc;
1875 sc = (struct nfe_softc *)arg;
1877 status = NFE_READ(sc, sc->nfe_irq_status);
1878 if (status == 0 || status == 0xffffffff)
1879 return (FILTER_STRAY);
1880 nfe_disable_intr(sc);
1881 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1883 return (FILTER_HANDLED);
1888 nfe_int_task(void *arg, int pending)
1890 struct nfe_softc *sc = arg;
1891 struct ifnet *ifp = sc->nfe_ifp;
1897 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1898 nfe_enable_intr(sc);
1900 return; /* not for us */
1902 NFE_WRITE(sc, sc->nfe_irq_status, r);
1904 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1906 #ifdef DEVICE_POLLING
1907 if (ifp->if_capenable & IFCAP_POLLING) {
1913 if (r & NFE_IRQ_LINK) {
1914 NFE_READ(sc, NFE_PHY_STATUS);
1915 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1916 DPRINTF(sc, "link state changed\n");
1919 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1921 nfe_disable_intr(sc);
1927 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1928 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1930 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1934 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1935 nfe_start_locked(ifp);
1939 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1940 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1944 /* Reenable interrupts. */
1945 nfe_enable_intr(sc);
1949 static __inline void
1950 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1952 struct nfe_desc32 *desc32;
1953 struct nfe_desc64 *desc64;
1954 struct nfe_rx_data *data;
1957 data = &sc->rxq.data[idx];
1960 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1961 desc64 = &sc->rxq.desc64[idx];
1962 /* VLAN packet may have overwritten it. */
1963 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1964 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1965 desc64->length = htole16(m->m_len);
1966 desc64->flags = htole16(NFE_RX_READY);
1968 desc32 = &sc->rxq.desc32[idx];
1969 desc32->length = htole16(m->m_len);
1970 desc32->flags = htole16(NFE_RX_READY);
1975 static __inline void
1976 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1978 struct nfe_desc32 *desc32;
1979 struct nfe_desc64 *desc64;
1980 struct nfe_rx_data *data;
1983 data = &sc->jrxq.jdata[idx];
1986 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1987 desc64 = &sc->jrxq.jdesc64[idx];
1988 /* VLAN packet may have overwritten it. */
1989 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1990 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1991 desc64->length = htole16(m->m_len);
1992 desc64->flags = htole16(NFE_RX_READY);
1994 desc32 = &sc->jrxq.jdesc32[idx];
1995 desc32->length = htole16(m->m_len);
1996 desc32->flags = htole16(NFE_RX_READY);
2002 nfe_newbuf(struct nfe_softc *sc, int idx)
2004 struct nfe_rx_data *data;
2005 struct nfe_desc32 *desc32;
2006 struct nfe_desc64 *desc64;
2008 bus_dma_segment_t segs[1];
2012 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2016 m->m_len = m->m_pkthdr.len = MCLBYTES;
2017 m_adj(m, ETHER_ALIGN);
2019 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
2020 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2024 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2026 data = &sc->rxq.data[idx];
2027 if (data->m != NULL) {
2028 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2029 BUS_DMASYNC_POSTREAD);
2030 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
2032 map = data->rx_data_map;
2033 data->rx_data_map = sc->rxq.rx_spare_map;
2034 sc->rxq.rx_spare_map = map;
2035 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2036 BUS_DMASYNC_PREREAD);
2037 data->paddr = segs[0].ds_addr;
2039 /* update mapping address in h/w descriptor */
2040 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2041 desc64 = &sc->rxq.desc64[idx];
2042 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2043 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2044 desc64->length = htole16(segs[0].ds_len);
2045 desc64->flags = htole16(NFE_RX_READY);
2047 desc32 = &sc->rxq.desc32[idx];
2048 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2049 desc32->length = htole16(segs[0].ds_len);
2050 desc32->flags = htole16(NFE_RX_READY);
2058 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2060 struct nfe_rx_data *data;
2061 struct nfe_desc32 *desc32;
2062 struct nfe_desc64 *desc64;
2064 bus_dma_segment_t segs[1];
2068 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2071 if ((m->m_flags & M_EXT) == 0) {
2075 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2076 m_adj(m, ETHER_ALIGN);
2078 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2079 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2083 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2085 data = &sc->jrxq.jdata[idx];
2086 if (data->m != NULL) {
2087 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2088 BUS_DMASYNC_POSTREAD);
2089 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2091 map = data->rx_data_map;
2092 data->rx_data_map = sc->jrxq.jrx_spare_map;
2093 sc->jrxq.jrx_spare_map = map;
2094 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2095 BUS_DMASYNC_PREREAD);
2096 data->paddr = segs[0].ds_addr;
2098 /* update mapping address in h/w descriptor */
2099 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2100 desc64 = &sc->jrxq.jdesc64[idx];
2101 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2102 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2103 desc64->length = htole16(segs[0].ds_len);
2104 desc64->flags = htole16(NFE_RX_READY);
2106 desc32 = &sc->jrxq.jdesc32[idx];
2107 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2108 desc32->length = htole16(segs[0].ds_len);
2109 desc32->flags = htole16(NFE_RX_READY);
2117 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2119 struct ifnet *ifp = sc->nfe_ifp;
2120 struct nfe_desc32 *desc32;
2121 struct nfe_desc64 *desc64;
2122 struct nfe_rx_data *data;
2125 int len, prog, rx_npkts;
2129 NFE_LOCK_ASSERT(sc);
2131 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2132 BUS_DMASYNC_POSTREAD);
2134 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2139 data = &sc->rxq.data[sc->rxq.cur];
2141 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2142 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2143 vtag = le32toh(desc64->physaddr[1]);
2144 flags = le16toh(desc64->flags);
2145 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2147 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2148 flags = le16toh(desc32->flags);
2149 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2152 if (flags & NFE_RX_READY)
2155 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2156 if (!(flags & NFE_RX_VALID_V1)) {
2158 nfe_discard_rxbuf(sc, sc->rxq.cur);
2161 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2162 flags &= ~NFE_RX_ERROR;
2163 len--; /* fix buffer length */
2166 if (!(flags & NFE_RX_VALID_V2)) {
2168 nfe_discard_rxbuf(sc, sc->rxq.cur);
2172 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2173 flags &= ~NFE_RX_ERROR;
2174 len--; /* fix buffer length */
2178 if (flags & NFE_RX_ERROR) {
2180 nfe_discard_rxbuf(sc, sc->rxq.cur);
2185 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2187 nfe_discard_rxbuf(sc, sc->rxq.cur);
2191 if ((vtag & NFE_RX_VTAG) != 0 &&
2192 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2193 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2194 m->m_flags |= M_VLANTAG;
2197 m->m_pkthdr.len = m->m_len = len;
2198 m->m_pkthdr.rcvif = ifp;
2200 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2201 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2202 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2203 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2204 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2205 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2206 m->m_pkthdr.csum_flags |=
2207 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2208 m->m_pkthdr.csum_data = 0xffff;
2216 (*ifp->if_input)(ifp, m);
2222 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2223 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2225 if (rx_npktsp != NULL)
2226 *rx_npktsp = rx_npkts;
2227 return (count > 0 ? 0 : EAGAIN);
2232 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2234 struct ifnet *ifp = sc->nfe_ifp;
2235 struct nfe_desc32 *desc32;
2236 struct nfe_desc64 *desc64;
2237 struct nfe_rx_data *data;
2240 int len, prog, rx_npkts;
2244 NFE_LOCK_ASSERT(sc);
2246 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2247 BUS_DMASYNC_POSTREAD);
2249 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2255 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2257 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2258 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2259 vtag = le32toh(desc64->physaddr[1]);
2260 flags = le16toh(desc64->flags);
2261 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2263 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2264 flags = le16toh(desc32->flags);
2265 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2268 if (flags & NFE_RX_READY)
2271 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2272 if (!(flags & NFE_RX_VALID_V1)) {
2274 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2277 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2278 flags &= ~NFE_RX_ERROR;
2279 len--; /* fix buffer length */
2282 if (!(flags & NFE_RX_VALID_V2)) {
2284 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2288 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2289 flags &= ~NFE_RX_ERROR;
2290 len--; /* fix buffer length */
2294 if (flags & NFE_RX_ERROR) {
2296 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2301 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2303 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2307 if ((vtag & NFE_RX_VTAG) != 0 &&
2308 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2309 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2310 m->m_flags |= M_VLANTAG;
2313 m->m_pkthdr.len = m->m_len = len;
2314 m->m_pkthdr.rcvif = ifp;
2316 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2317 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2318 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2319 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2320 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2321 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2322 m->m_pkthdr.csum_flags |=
2323 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2324 m->m_pkthdr.csum_data = 0xffff;
2332 (*ifp->if_input)(ifp, m);
2338 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2339 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2341 if (rx_npktsp != NULL)
2342 *rx_npktsp = rx_npkts;
2343 return (count > 0 ? 0 : EAGAIN);
2348 nfe_txeof(struct nfe_softc *sc)
2350 struct ifnet *ifp = sc->nfe_ifp;
2351 struct nfe_desc32 *desc32;
2352 struct nfe_desc64 *desc64;
2353 struct nfe_tx_data *data = NULL;
2357 NFE_LOCK_ASSERT(sc);
2359 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2360 BUS_DMASYNC_POSTREAD);
2363 for (cons = sc->txq.next; cons != sc->txq.cur;
2364 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2365 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2366 desc64 = &sc->txq.desc64[cons];
2367 flags = le16toh(desc64->flags);
2369 desc32 = &sc->txq.desc32[cons];
2370 flags = le16toh(desc32->flags);
2373 if (flags & NFE_TX_VALID)
2378 data = &sc->txq.data[cons];
2380 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2381 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2383 if ((flags & NFE_TX_ERROR_V1) != 0) {
2384 device_printf(sc->nfe_dev,
2385 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2391 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2393 if ((flags & NFE_TX_ERROR_V2) != 0) {
2394 device_printf(sc->nfe_dev,
2395 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2401 /* last fragment of the mbuf chain transmitted */
2402 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2403 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2404 BUS_DMASYNC_POSTWRITE);
2405 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2411 sc->nfe_force_tx = 0;
2412 sc->txq.next = cons;
2413 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2414 if (sc->txq.queued == 0)
2415 sc->nfe_watchdog_timer = 0;
2420 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2422 struct nfe_desc32 *desc32 = NULL;
2423 struct nfe_desc64 *desc64 = NULL;
2425 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2426 int error, i, nsegs, prod, si;
2428 uint16_t cflags, flags;
2431 prod = si = sc->txq.cur;
2432 map = sc->txq.data[prod].tx_data_map;
2434 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2435 &nsegs, BUS_DMA_NOWAIT);
2436 if (error == EFBIG) {
2437 m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
2444 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2445 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2451 } else if (error != 0)
2459 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2460 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2467 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2468 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2470 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2471 cflags |= NFE_TX_TSO;
2472 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2473 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2474 cflags |= NFE_TX_IP_CSUM;
2475 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2476 cflags |= NFE_TX_TCP_UDP_CSUM;
2477 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2478 cflags |= NFE_TX_TCP_UDP_CSUM;
2481 for (i = 0; i < nsegs; i++) {
2482 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2483 desc64 = &sc->txq.desc64[prod];
2484 desc64->physaddr[0] =
2485 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2486 desc64->physaddr[1] =
2487 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2489 desc64->length = htole16(segs[i].ds_len - 1);
2490 desc64->flags = htole16(flags);
2492 desc32 = &sc->txq.desc32[prod];
2494 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2495 desc32->length = htole16(segs[i].ds_len - 1);
2496 desc32->flags = htole16(flags);
2500 * Setting of the valid bit in the first descriptor is
2501 * deferred until the whole chain is fully setup.
2503 flags |= NFE_TX_VALID;
2506 NFE_INC(prod, NFE_TX_RING_COUNT);
2510 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2511 * csum flags, vtag and TSO belong to the first fragment only.
2513 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2514 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2515 desc64 = &sc->txq.desc64[si];
2516 if ((m->m_flags & M_VLANTAG) != 0)
2517 desc64->vtag = htole32(NFE_TX_VTAG |
2518 m->m_pkthdr.ether_vtag);
2519 if (tsosegsz != 0) {
2522 * The following indicates the descriptor element
2523 * is a 32bit quantity.
2525 desc64->length |= htole16((uint16_t)tsosegsz);
2526 desc64->flags |= htole16(tsosegsz >> 16);
2529 * finally, set the valid/checksum/TSO bit in the first
2532 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2534 if (sc->nfe_flags & NFE_JUMBO_SUP)
2535 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2537 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2538 desc32 = &sc->txq.desc32[si];
2539 if (tsosegsz != 0) {
2542 * The following indicates the descriptor element
2543 * is a 32bit quantity.
2545 desc32->length |= htole16((uint16_t)tsosegsz);
2546 desc32->flags |= htole16(tsosegsz >> 16);
2549 * finally, set the valid/checksum/TSO bit in the first
2552 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2556 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2557 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2558 sc->txq.data[prod].tx_data_map = map;
2559 sc->txq.data[prod].m = m;
2561 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2568 nfe_setmulti(struct nfe_softc *sc)
2570 struct ifnet *ifp = sc->nfe_ifp;
2571 struct ifmultiaddr *ifma;
2574 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2575 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2576 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2579 NFE_LOCK_ASSERT(sc);
2581 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2582 bzero(addr, ETHER_ADDR_LEN);
2583 bzero(mask, ETHER_ADDR_LEN);
2587 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2588 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2590 if_maddr_rlock(ifp);
2591 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2594 if (ifma->ifma_addr->sa_family != AF_LINK)
2597 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2598 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2599 u_int8_t mcaddr = addrp[i];
2604 if_maddr_runlock(ifp);
2606 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2611 addr[0] |= 0x01; /* make sure multicast bit is set */
2613 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2614 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2615 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2616 addr[5] << 8 | addr[4]);
2617 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2618 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2619 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2620 mask[5] << 8 | mask[4]);
2622 filter = NFE_READ(sc, NFE_RXFILTER);
2623 filter &= NFE_PFF_RX_PAUSE;
2624 filter |= NFE_RXFILTER_MAGIC;
2625 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2626 NFE_WRITE(sc, NFE_RXFILTER, filter);
2631 nfe_start(struct ifnet *ifp)
2633 struct nfe_softc *sc = ifp->if_softc;
2636 nfe_start_locked(ifp);
2641 nfe_start_locked(struct ifnet *ifp)
2643 struct nfe_softc *sc = ifp->if_softc;
2647 NFE_LOCK_ASSERT(sc);
2649 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2650 IFF_DRV_RUNNING || sc->nfe_link == 0)
2653 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2654 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2658 if (nfe_encap(sc, &m0) != 0) {
2661 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2662 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2666 ETHER_BPF_MTAP(ifp, m0);
2670 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2671 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2674 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2677 * Set a timeout in case the chip goes out to lunch.
2679 sc->nfe_watchdog_timer = 5;
2685 nfe_watchdog(struct ifnet *ifp)
2687 struct nfe_softc *sc = ifp->if_softc;
2689 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2692 /* Check if we've lost Tx completion interrupt. */
2694 if (sc->txq.queued == 0) {
2695 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2697 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2698 nfe_start_locked(ifp);
2701 /* Check if we've lost start Tx command. */
2703 if (sc->nfe_force_tx <= 3) {
2705 * If this is the case for watchdog timeout, the following
2706 * code should go to nfe_txeof().
2708 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2711 sc->nfe_force_tx = 0;
2713 if_printf(ifp, "watchdog timeout\n");
2715 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2717 nfe_init_locked(sc);
2724 struct nfe_softc *sc = xsc;
2727 nfe_init_locked(sc);
2733 nfe_init_locked(void *xsc)
2735 struct nfe_softc *sc = xsc;
2736 struct ifnet *ifp = sc->nfe_ifp;
2737 struct mii_data *mii;
2741 NFE_LOCK_ASSERT(sc);
2743 mii = device_get_softc(sc->nfe_miibus);
2745 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2750 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2752 nfe_init_tx_ring(sc, &sc->txq);
2753 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2754 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2756 error = nfe_init_rx_ring(sc, &sc->rxq);
2758 device_printf(sc->nfe_dev,
2759 "initialization failed: no memory for rx buffers\n");
2765 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2766 val |= NFE_MAC_ADDR_INORDER;
2767 NFE_WRITE(sc, NFE_TX_UNK, val);
2768 NFE_WRITE(sc, NFE_STATUS, 0);
2770 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2771 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2773 sc->rxtxctl = NFE_RXTX_BIT2;
2774 if (sc->nfe_flags & NFE_40BIT_ADDR)
2775 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2776 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2777 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2779 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2780 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2781 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2782 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2784 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2786 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2788 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2789 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2791 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2793 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2795 /* set MAC address */
2796 nfe_set_macaddr(sc, IF_LLADDR(ifp));
2798 /* tell MAC where rings are in memory */
2799 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2800 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2801 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2802 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2803 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2805 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2806 NFE_ADDR_HI(sc->rxq.physaddr));
2807 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2808 NFE_ADDR_LO(sc->rxq.physaddr));
2810 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2811 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2813 NFE_WRITE(sc, NFE_RING_SIZE,
2814 (NFE_RX_RING_COUNT - 1) << 16 |
2815 (NFE_TX_RING_COUNT - 1));
2817 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2819 /* force MAC to wakeup */
2820 val = NFE_READ(sc, NFE_PWR_STATE);
2821 if ((val & NFE_PWR_WAKEUP) == 0)
2822 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2824 val = NFE_READ(sc, NFE_PWR_STATE);
2825 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2828 /* configure interrupts coalescing/mitigation */
2829 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2831 /* no interrupt mitigation: one interrupt per packet */
2832 NFE_WRITE(sc, NFE_IMTIMER, 970);
2835 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2836 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2837 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2839 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2840 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2842 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2844 NFE_WRITE(sc, NFE_WOL_CTL, 0);
2846 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2847 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2849 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2855 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2858 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2860 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2862 /* Clear hardware stats. */
2863 nfe_stats_clear(sc);
2865 #ifdef DEVICE_POLLING
2866 if (ifp->if_capenable & IFCAP_POLLING)
2867 nfe_disable_intr(sc);
2871 nfe_enable_intr(sc); /* enable interrupts */
2873 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2874 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2879 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2884 nfe_stop(struct ifnet *ifp)
2886 struct nfe_softc *sc = ifp->if_softc;
2887 struct nfe_rx_ring *rx_ring;
2888 struct nfe_jrx_ring *jrx_ring;
2889 struct nfe_tx_ring *tx_ring;
2890 struct nfe_rx_data *rdata;
2891 struct nfe_tx_data *tdata;
2894 NFE_LOCK_ASSERT(sc);
2896 sc->nfe_watchdog_timer = 0;
2897 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2899 callout_stop(&sc->nfe_stat_ch);
2902 NFE_WRITE(sc, NFE_TX_CTL, 0);
2905 NFE_WRITE(sc, NFE_RX_CTL, 0);
2907 /* disable interrupts */
2908 nfe_disable_intr(sc);
2912 /* free Rx and Tx mbufs still in the queues. */
2914 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2915 rdata = &rx_ring->data[i];
2916 if (rdata->m != NULL) {
2917 bus_dmamap_sync(rx_ring->rx_data_tag,
2918 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2919 bus_dmamap_unload(rx_ring->rx_data_tag,
2920 rdata->rx_data_map);
2926 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2927 jrx_ring = &sc->jrxq;
2928 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2929 rdata = &jrx_ring->jdata[i];
2930 if (rdata->m != NULL) {
2931 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2932 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2933 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2934 rdata->rx_data_map);
2942 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2943 tdata = &tx_ring->data[i];
2944 if (tdata->m != NULL) {
2945 bus_dmamap_sync(tx_ring->tx_data_tag,
2946 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2947 bus_dmamap_unload(tx_ring->tx_data_tag,
2948 tdata->tx_data_map);
2953 /* Update hardware stats. */
2954 nfe_stats_update(sc);
2959 nfe_ifmedia_upd(struct ifnet *ifp)
2961 struct nfe_softc *sc = ifp->if_softc;
2962 struct mii_data *mii;
2965 mii = device_get_softc(sc->nfe_miibus);
2974 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2976 struct nfe_softc *sc;
2977 struct mii_data *mii;
2982 mii = device_get_softc(sc->nfe_miibus);
2985 ifmr->ifm_active = mii->mii_media_active;
2986 ifmr->ifm_status = mii->mii_media_status;
2994 struct nfe_softc *sc;
2995 struct mii_data *mii;
2998 sc = (struct nfe_softc *)xsc;
3000 NFE_LOCK_ASSERT(sc);
3004 mii = device_get_softc(sc->nfe_miibus);
3006 nfe_stats_update(sc);
3008 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
3013 nfe_shutdown(device_t dev)
3016 return (nfe_suspend(dev));
3021 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
3025 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
3026 val = NFE_READ(sc, NFE_MACADDR_LO);
3027 addr[0] = (val >> 8) & 0xff;
3028 addr[1] = (val & 0xff);
3030 val = NFE_READ(sc, NFE_MACADDR_HI);
3031 addr[2] = (val >> 24) & 0xff;
3032 addr[3] = (val >> 16) & 0xff;
3033 addr[4] = (val >> 8) & 0xff;
3034 addr[5] = (val & 0xff);
3036 val = NFE_READ(sc, NFE_MACADDR_LO);
3037 addr[5] = (val >> 8) & 0xff;
3038 addr[4] = (val & 0xff);
3040 val = NFE_READ(sc, NFE_MACADDR_HI);
3041 addr[3] = (val >> 24) & 0xff;
3042 addr[2] = (val >> 16) & 0xff;
3043 addr[1] = (val >> 8) & 0xff;
3044 addr[0] = (val & 0xff);
3050 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3053 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
3054 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3055 addr[1] << 8 | addr[0]);
3060 * Map a single buffer address.
3064 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3066 struct nfe_dmamap_arg *ctx;
3071 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3073 ctx = (struct nfe_dmamap_arg *)arg;
3074 ctx->nfe_busaddr = segs[0].ds_addr;
3079 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3085 value = *(int *)arg1;
3086 error = sysctl_handle_int(oidp, &value, 0, req);
3087 if (error || !req->newptr)
3089 if (value < low || value > high)
3091 *(int *)arg1 = value;
3098 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3101 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3106 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3107 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3108 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3109 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3112 nfe_sysctl_node(struct nfe_softc *sc)
3114 struct sysctl_ctx_list *ctx;
3115 struct sysctl_oid_list *child, *parent;
3116 struct sysctl_oid *tree;
3117 struct nfe_hw_stats *stats;
3120 stats = &sc->nfe_stats;
3121 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3122 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3123 SYSCTL_ADD_PROC(ctx, child,
3124 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3125 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3126 "max number of Rx events to process");
3128 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3129 error = resource_int_value(device_get_name(sc->nfe_dev),
3130 device_get_unit(sc->nfe_dev), "process_limit",
3131 &sc->nfe_process_limit);
3133 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3134 sc->nfe_process_limit > NFE_PROC_MAX) {
3135 device_printf(sc->nfe_dev,
3136 "process_limit value out of range; "
3137 "using default: %d\n", NFE_PROC_DEFAULT);
3138 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3142 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3145 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3146 NULL, "NFE statistics");
3147 parent = SYSCTL_CHILDREN(tree);
3149 /* Rx statistics. */
3150 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3151 NULL, "Rx MAC statistics");
3152 child = SYSCTL_CHILDREN(tree);
3154 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3155 &stats->rx_frame_errors, "Framing Errors");
3156 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3157 &stats->rx_extra_bytes, "Extra Bytes");
3158 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3159 &stats->rx_late_cols, "Late Collisions");
3160 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3161 &stats->rx_runts, "Runts");
3162 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3163 &stats->rx_jumbos, "Jumbos");
3164 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3165 &stats->rx_fifo_overuns, "FIFO Overruns");
3166 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3167 &stats->rx_crc_errors, "CRC Errors");
3168 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3169 &stats->rx_fae, "Frame Alignment Errors");
3170 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3171 &stats->rx_len_errors, "Length Errors");
3172 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3173 &stats->rx_unicast, "Unicast Frames");
3174 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3175 &stats->rx_multicast, "Multicast Frames");
3176 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3177 &stats->rx_broadcast, "Broadcast Frames");
3178 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3179 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3180 &stats->rx_octets, "Octets");
3181 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3182 &stats->rx_pause, "Pause frames");
3183 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3184 &stats->rx_drops, "Drop frames");
3187 /* Tx statistics. */
3188 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3189 NULL, "Tx MAC statistics");
3190 child = SYSCTL_CHILDREN(tree);
3191 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3192 &stats->tx_octets, "Octets");
3193 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3194 &stats->tx_zero_rexmits, "Zero Retransmits");
3195 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3196 &stats->tx_one_rexmits, "One Retransmits");
3197 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3198 &stats->tx_multi_rexmits, "Multiple Retransmits");
3199 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3200 &stats->tx_late_cols, "Late Collisions");
3201 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3202 &stats->tx_fifo_underuns, "FIFO Underruns");
3203 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3204 &stats->tx_carrier_losts, "Carrier Losts");
3205 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3206 &stats->tx_excess_deferals, "Excess Deferrals");
3207 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3208 &stats->tx_retry_errors, "Retry Errors");
3209 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3210 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3211 &stats->tx_deferals, "Deferrals");
3212 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3213 &stats->tx_frames, "Frames");
3214 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3215 &stats->tx_pause, "Pause Frames");
3217 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3218 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3219 &stats->tx_deferals, "Unicast Frames");
3220 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3221 &stats->tx_frames, "Multicast Frames");
3222 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3223 &stats->tx_pause, "Broadcast Frames");
3227 #undef NFE_SYSCTL_STAT_ADD32
3228 #undef NFE_SYSCTL_STAT_ADD64
3231 nfe_stats_clear(struct nfe_softc *sc)
3235 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3236 mib_cnt = NFE_NUM_MIB_STATV1;
3237 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3238 mib_cnt = NFE_NUM_MIB_STATV2;
3242 for (i = 0; i < mib_cnt; i++)
3243 NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t));
3245 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3246 NFE_READ(sc, NFE_TX_UNICAST);
3247 NFE_READ(sc, NFE_TX_MULTICAST);
3248 NFE_READ(sc, NFE_TX_BROADCAST);
3253 nfe_stats_update(struct nfe_softc *sc)
3255 struct nfe_hw_stats *stats;
3257 NFE_LOCK_ASSERT(sc);
3259 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3262 stats = &sc->nfe_stats;
3263 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3264 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3265 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3266 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3267 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3268 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3269 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3270 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3271 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3272 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3273 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3274 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3275 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3276 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3277 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3278 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3279 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3280 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3281 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3282 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3283 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3285 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3286 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3287 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3288 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3289 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3290 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3291 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3294 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3295 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3296 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3297 stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3303 nfe_set_linkspeed(struct nfe_softc *sc)
3305 struct mii_softc *miisc;
3306 struct mii_data *mii;
3309 NFE_LOCK_ASSERT(sc);
3311 mii = device_get_softc(sc->nfe_miibus);
3314 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3315 (IFM_ACTIVE | IFM_AVALID)) {
3316 switch IFM_SUBTYPE(mii->mii_media_active) {
3327 miisc = LIST_FIRST(&mii->mii_phys);
3328 phyno = miisc->mii_phy;
3329 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3331 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3332 nfe_miibus_writereg(sc->nfe_dev, phyno,
3333 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3334 nfe_miibus_writereg(sc->nfe_dev, phyno,
3335 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3339 * Poll link state until nfe(4) get a 10/100Mbps link.
3341 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3343 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3344 == (IFM_ACTIVE | IFM_AVALID)) {
3345 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3348 nfe_mac_config(sc, mii);
3355 pause("nfelnk", hz);
3358 if (i == MII_ANEGTICKS_GIGE)
3359 device_printf(sc->nfe_dev,
3360 "establishing a link failed, WOL may not work!");
3363 * No link, force MAC to have 100Mbps, full-duplex link.
3364 * This is the last resort and may/may not work.
3366 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3367 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3368 nfe_mac_config(sc, mii);
3373 nfe_set_wol(struct nfe_softc *sc)
3380 NFE_LOCK_ASSERT(sc);
3382 if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3385 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3386 wolctl = NFE_WOL_MAGIC;
3389 NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3390 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
3391 nfe_set_linkspeed(sc);
3392 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3393 NFE_WRITE(sc, NFE_PWR2_CTL,
3394 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3396 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3397 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3398 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3401 /* Request PME if WOL is requested. */
3402 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3403 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3404 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3405 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3406 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);