1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
44 #include <net/if_arp.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
64 #include <dev/nfe/if_nfereg.h>
65 #include <dev/nfe/if_nfevar.h>
67 MODULE_DEPEND(nfe, pci, 1, 1, 1);
68 MODULE_DEPEND(nfe, ether, 1, 1, 1);
69 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
71 /* "device miibus" required. See GENERIC if you get errors here. */
72 #include "miibus_if.h"
74 static int nfe_probe(device_t);
75 static int nfe_attach(device_t);
76 static int nfe_detach(device_t);
77 static int nfe_suspend(device_t);
78 static int nfe_resume(device_t);
79 static int nfe_shutdown(device_t);
80 static int nfe_can_use_msix(struct nfe_softc *);
81 static void nfe_power(struct nfe_softc *);
82 static int nfe_miibus_readreg(device_t, int, int);
83 static int nfe_miibus_writereg(device_t, int, int, int);
84 static void nfe_miibus_statchg(device_t);
85 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
86 static void nfe_set_intr(struct nfe_softc *);
87 static __inline void nfe_enable_intr(struct nfe_softc *);
88 static __inline void nfe_disable_intr(struct nfe_softc *);
89 static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
90 static void nfe_alloc_msix(struct nfe_softc *, int);
91 static int nfe_intr(void *);
92 static void nfe_int_task(void *, int);
93 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
94 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
95 static int nfe_newbuf(struct nfe_softc *, int);
96 static int nfe_jnewbuf(struct nfe_softc *, int);
97 static int nfe_rxeof(struct nfe_softc *, int, int *);
98 static int nfe_jrxeof(struct nfe_softc *, int, int *);
99 static void nfe_txeof(struct nfe_softc *);
100 static int nfe_encap(struct nfe_softc *, struct mbuf **);
101 static void nfe_setmulti(struct nfe_softc *);
102 static void nfe_tx_task(void *, int);
103 static void nfe_start(struct ifnet *);
104 static void nfe_watchdog(struct ifnet *);
105 static void nfe_init(void *);
106 static void nfe_init_locked(void *);
107 static void nfe_stop(struct ifnet *);
108 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
109 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
110 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
111 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
112 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
113 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
114 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static int nfe_ifmedia_upd(struct ifnet *);
118 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
119 static void nfe_tick(void *);
120 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
121 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
122 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
124 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
125 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
126 static void nfe_sysctl_node(struct nfe_softc *);
127 static void nfe_stats_clear(struct nfe_softc *);
128 static void nfe_stats_update(struct nfe_softc *);
129 static void nfe_set_linkspeed(struct nfe_softc *);
130 static void nfe_set_wol(struct nfe_softc *);
133 static int nfedebug = 0;
134 #define DPRINTF(sc, ...) do { \
136 device_printf((sc)->nfe_dev, __VA_ARGS__); \
138 #define DPRINTFN(sc, n, ...) do { \
139 if (nfedebug >= (n)) \
140 device_printf((sc)->nfe_dev, __VA_ARGS__); \
143 #define DPRINTF(sc, ...)
144 #define DPRINTFN(sc, n, ...)
147 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
148 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
149 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
152 static int msi_disable = 0;
153 static int msix_disable = 0;
154 static int jumbo_disable = 0;
155 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
156 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
157 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
159 static device_method_t nfe_methods[] = {
160 /* Device interface */
161 DEVMETHOD(device_probe, nfe_probe),
162 DEVMETHOD(device_attach, nfe_attach),
163 DEVMETHOD(device_detach, nfe_detach),
164 DEVMETHOD(device_suspend, nfe_suspend),
165 DEVMETHOD(device_resume, nfe_resume),
166 DEVMETHOD(device_shutdown, nfe_shutdown),
169 DEVMETHOD(bus_print_child, bus_generic_print_child),
170 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
173 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
174 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
175 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
180 static driver_t nfe_driver = {
183 sizeof(struct nfe_softc)
186 static devclass_t nfe_devclass;
188 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
189 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
191 static struct nfe_type nfe_devs[] = {
192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
193 "NVIDIA nForce MCP Networking Adapter"},
194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
195 "NVIDIA nForce2 MCP2 Networking Adapter"},
196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
197 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
199 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
201 "NVIDIA nForce3 MCP3 Networking Adapter"},
202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
203 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
205 "NVIDIA nForce3 MCP7 Networking Adapter"},
206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
207 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
209 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
211 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
213 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
215 "NVIDIA nForce 430 MCP12 Networking Adapter"},
216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
217 "NVIDIA nForce 430 MCP13 Networking Adapter"},
218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
219 "NVIDIA nForce MCP55 Networking Adapter"},
220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
221 "NVIDIA nForce MCP55 Networking Adapter"},
222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
223 "NVIDIA nForce MCP61 Networking Adapter"},
224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
225 "NVIDIA nForce MCP61 Networking Adapter"},
226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
227 "NVIDIA nForce MCP61 Networking Adapter"},
228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
229 "NVIDIA nForce MCP61 Networking Adapter"},
230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
231 "NVIDIA nForce MCP65 Networking Adapter"},
232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
233 "NVIDIA nForce MCP65 Networking Adapter"},
234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
235 "NVIDIA nForce MCP65 Networking Adapter"},
236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
237 "NVIDIA nForce MCP65 Networking Adapter"},
238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
239 "NVIDIA nForce MCP67 Networking Adapter"},
240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
241 "NVIDIA nForce MCP67 Networking Adapter"},
242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
243 "NVIDIA nForce MCP67 Networking Adapter"},
244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
245 "NVIDIA nForce MCP67 Networking Adapter"},
246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
247 "NVIDIA nForce MCP73 Networking Adapter"},
248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
249 "NVIDIA nForce MCP73 Networking Adapter"},
250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
251 "NVIDIA nForce MCP73 Networking Adapter"},
252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
253 "NVIDIA nForce MCP73 Networking Adapter"},
254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
255 "NVIDIA nForce MCP77 Networking Adapter"},
256 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
257 "NVIDIA nForce MCP77 Networking Adapter"},
258 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
259 "NVIDIA nForce MCP77 Networking Adapter"},
260 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
261 "NVIDIA nForce MCP77 Networking Adapter"},
262 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
263 "NVIDIA nForce MCP79 Networking Adapter"},
264 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
265 "NVIDIA nForce MCP79 Networking Adapter"},
266 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
267 "NVIDIA nForce MCP79 Networking Adapter"},
268 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
269 "NVIDIA nForce MCP79 Networking Adapter"},
274 /* Probe for supported hardware ID's */
276 nfe_probe(device_t dev)
281 /* Check for matching PCI DEVICE ID's */
282 while (t->name != NULL) {
283 if ((pci_get_vendor(dev) == t->vid_id) &&
284 (pci_get_device(dev) == t->dev_id)) {
285 device_set_desc(dev, t->name);
286 return (BUS_PROBE_DEFAULT);
295 nfe_alloc_msix(struct nfe_softc *sc, int count)
300 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
302 if (sc->nfe_msix_res == NULL) {
303 device_printf(sc->nfe_dev,
304 "couldn't allocate MSIX table resource\n");
308 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
309 SYS_RES_MEMORY, &rid, RF_ACTIVE);
310 if (sc->nfe_msix_pba_res == NULL) {
311 device_printf(sc->nfe_dev,
312 "couldn't allocate MSIX PBA resource\n");
313 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
315 sc->nfe_msix_res = NULL;
319 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
320 if (count == NFE_MSI_MESSAGES) {
322 device_printf(sc->nfe_dev,
323 "Using %d MSIX messages\n", count);
327 device_printf(sc->nfe_dev,
328 "couldn't allocate MSIX\n");
329 pci_release_msi(sc->nfe_dev);
330 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
331 PCIR_BAR(3), sc->nfe_msix_pba_res);
332 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
333 PCIR_BAR(2), sc->nfe_msix_res);
334 sc->nfe_msix_pba_res = NULL;
335 sc->nfe_msix_res = NULL;
341 nfe_attach(device_t dev)
343 struct nfe_softc *sc;
345 bus_addr_t dma_addr_max;
346 int error = 0, i, msic, reg, rid;
348 sc = device_get_softc(dev);
351 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
353 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
355 pci_enable_busmaster(dev);
358 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
360 if (sc->nfe_res[0] == NULL) {
361 device_printf(dev, "couldn't map memory resources\n");
362 mtx_destroy(&sc->nfe_mtx);
366 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
369 v = pci_read_config(dev, reg + 0x08, 2);
370 /* Change max. read request size to 4096. */
373 pci_write_config(dev, reg + 0x08, v, 2);
375 v = pci_read_config(dev, reg + 0x0c, 2);
376 /* link capability */
378 width = pci_read_config(dev, reg + 0x12, 2);
379 /* negotiated link width */
380 width = (width >> 4) & 0x3f;
382 device_printf(sc->nfe_dev,
383 "warning, negotiated width of link(x%d) != "
384 "max. width of link(x%d)\n", width, v);
387 if (nfe_can_use_msix(sc) == 0) {
388 device_printf(sc->nfe_dev,
389 "MSI/MSI-X capability black-listed, will use INTx\n");
394 /* Allocate interrupt */
395 if (msix_disable == 0 || msi_disable == 0) {
396 if (msix_disable == 0 &&
397 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
398 nfe_alloc_msix(sc, msic);
399 if (msi_disable == 0 && sc->nfe_msix == 0 &&
400 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
401 pci_alloc_msi(dev, &msic) == 0) {
402 if (msic == NFE_MSI_MESSAGES) {
405 "Using %d MSI messages\n", msic);
408 pci_release_msi(dev);
412 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
414 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
415 RF_SHAREABLE | RF_ACTIVE);
416 if (sc->nfe_irq[0] == NULL) {
417 device_printf(dev, "couldn't allocate IRQ resources\n");
422 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
423 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
424 SYS_RES_IRQ, &rid, RF_ACTIVE);
425 if (sc->nfe_irq[i] == NULL) {
427 "couldn't allocate IRQ resources for "
428 "message %d\n", rid);
433 /* Map interrupts to vector 0. */
434 if (sc->nfe_msix != 0) {
435 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
436 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
437 } else if (sc->nfe_msi != 0) {
438 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
439 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
443 /* Set IRQ status/mask register. */
444 sc->nfe_irq_status = NFE_IRQ_STATUS;
445 sc->nfe_irq_mask = NFE_IRQ_MASK;
446 sc->nfe_intrs = NFE_IRQ_WANTED;
448 if (sc->nfe_msix != 0) {
449 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
450 sc->nfe_nointrs = NFE_IRQ_WANTED;
451 } else if (sc->nfe_msi != 0) {
452 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
453 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
456 sc->nfe_devid = pci_get_device(dev);
457 sc->nfe_revid = pci_get_revid(dev);
460 switch (sc->nfe_devid) {
461 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
462 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
463 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
464 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
465 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
467 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
468 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
469 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
471 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
472 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
473 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
474 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
475 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
478 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
479 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
480 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
481 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
484 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
485 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
486 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
487 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
488 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
489 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
490 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
491 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
492 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
493 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
494 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
495 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
496 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
497 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
499 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
500 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
501 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
502 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
503 /* XXX flow control */
504 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
505 NFE_CORRECT_MACADDR | NFE_MIB_V3;
507 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
508 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
509 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
510 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
511 /* XXX flow control */
512 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
513 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
515 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
516 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
517 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
518 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
519 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
520 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
526 /* Check for reversed ethernet address */
527 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
528 sc->nfe_flags |= NFE_CORRECT_MACADDR;
529 nfe_get_macaddr(sc, sc->eaddr);
531 * Allocate the parent bus DMA tag appropriate for PCI.
533 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
534 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
535 dma_addr_max = NFE_DMA_MAXADDR;
536 error = bus_dma_tag_create(
537 bus_get_dma_tag(sc->nfe_dev), /* parent */
538 1, 0, /* alignment, boundary */
539 dma_addr_max, /* lowaddr */
540 BUS_SPACE_MAXADDR, /* highaddr */
541 NULL, NULL, /* filter, filterarg */
542 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
543 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
545 NULL, NULL, /* lockfunc, lockarg */
546 &sc->nfe_parent_tag);
550 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
552 device_printf(dev, "can not if_alloc()\n");
556 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp);
559 * Allocate Tx and Rx rings.
561 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
564 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
567 nfe_alloc_jrx_ring(sc, &sc->jrxq);
568 /* Create sysctl node. */
572 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
573 ifp->if_mtu = ETHERMTU;
574 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
575 ifp->if_ioctl = nfe_ioctl;
576 ifp->if_start = nfe_start;
577 ifp->if_hwassist = 0;
578 ifp->if_capabilities = 0;
579 ifp->if_init = nfe_init;
580 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
581 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
582 IFQ_SET_READY(&ifp->if_snd);
584 if (sc->nfe_flags & NFE_HW_CSUM) {
585 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
586 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
588 ifp->if_capenable = ifp->if_capabilities;
590 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
591 /* VLAN capability setup. */
592 ifp->if_capabilities |= IFCAP_VLAN_MTU;
593 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
594 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
595 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
596 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
599 if (pci_find_extcap(dev, PCIY_PMG, ®) == 0)
600 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
601 ifp->if_capenable = ifp->if_capabilities;
604 * Tell the upper layer(s) we support long frames.
605 * Must appear after the call to ether_ifattach() because
606 * ether_ifattach() sets ifi_hdrlen to the default value.
608 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
610 #ifdef DEVICE_POLLING
611 ifp->if_capabilities |= IFCAP_POLLING;
615 error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
616 nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
617 MIIF_DOPAUSE | MIIF_FORCEPAUSE);
619 device_printf(dev, "attaching PHYs failed\n");
622 ether_ifattach(ifp, sc->eaddr);
624 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
625 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
626 taskqueue_thread_enqueue, &sc->nfe_tq);
627 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
628 device_get_nameunit(sc->nfe_dev));
630 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
631 error = bus_setup_intr(dev, sc->nfe_irq[0],
632 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
633 &sc->nfe_intrhand[0]);
635 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
636 error = bus_setup_intr(dev, sc->nfe_irq[i],
637 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
638 &sc->nfe_intrhand[i]);
644 device_printf(dev, "couldn't set up irq\n");
645 taskqueue_free(sc->nfe_tq);
660 nfe_detach(device_t dev)
662 struct nfe_softc *sc;
664 uint8_t eaddr[ETHER_ADDR_LEN];
667 sc = device_get_softc(dev);
668 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
671 #ifdef DEVICE_POLLING
672 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
673 ether_poll_deregister(ifp);
675 if (device_is_attached(dev)) {
678 ifp->if_flags &= ~IFF_UP;
680 callout_drain(&sc->nfe_stat_ch);
681 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task);
686 /* restore ethernet address */
687 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
688 for (i = 0; i < ETHER_ADDR_LEN; i++) {
689 eaddr[i] = sc->eaddr[5 - i];
692 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
693 nfe_set_macaddr(sc, eaddr);
697 device_delete_child(dev, sc->nfe_miibus);
698 bus_generic_detach(dev);
699 if (sc->nfe_tq != NULL) {
700 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
701 taskqueue_free(sc->nfe_tq);
705 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
706 if (sc->nfe_intrhand[i] != NULL) {
707 bus_teardown_intr(dev, sc->nfe_irq[i],
708 sc->nfe_intrhand[i]);
709 sc->nfe_intrhand[i] = NULL;
713 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
714 if (sc->nfe_irq[0] != NULL)
715 bus_release_resource(dev, SYS_RES_IRQ, 0,
718 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
719 if (sc->nfe_irq[i] != NULL) {
720 bus_release_resource(dev, SYS_RES_IRQ, rid,
722 sc->nfe_irq[i] = NULL;
725 pci_release_msi(dev);
727 if (sc->nfe_msix_pba_res != NULL) {
728 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
729 sc->nfe_msix_pba_res);
730 sc->nfe_msix_pba_res = NULL;
732 if (sc->nfe_msix_res != NULL) {
733 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
735 sc->nfe_msix_res = NULL;
737 if (sc->nfe_res[0] != NULL) {
738 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
740 sc->nfe_res[0] = NULL;
743 nfe_free_tx_ring(sc, &sc->txq);
744 nfe_free_rx_ring(sc, &sc->rxq);
745 nfe_free_jrx_ring(sc, &sc->jrxq);
747 if (sc->nfe_parent_tag) {
748 bus_dma_tag_destroy(sc->nfe_parent_tag);
749 sc->nfe_parent_tag = NULL;
752 mtx_destroy(&sc->nfe_mtx);
759 nfe_suspend(device_t dev)
761 struct nfe_softc *sc;
763 sc = device_get_softc(dev);
766 nfe_stop(sc->nfe_ifp);
768 sc->nfe_suspended = 1;
776 nfe_resume(device_t dev)
778 struct nfe_softc *sc;
781 sc = device_get_softc(dev);
786 if (ifp->if_flags & IFF_UP)
788 sc->nfe_suspended = 0;
796 nfe_can_use_msix(struct nfe_softc *sc)
798 static struct msix_blacklist {
801 } msix_blacklists[] = {
802 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
805 struct msix_blacklist *mblp;
806 char *maker, *product;
807 int count, n, use_msix;
810 * Search base board manufacturer and product name table
811 * to see this system has a known MSI/MSI-X issue.
813 maker = getenv("smbios.planar.maker");
814 product = getenv("smbios.planar.product");
816 if (maker != NULL && product != NULL) {
817 count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]);
818 mblp = msix_blacklists;
819 for (n = 0; n < count; n++) {
820 if (strcmp(maker, mblp->maker) == 0 &&
821 strcmp(product, mblp->product) == 0) {
837 /* Take PHY/NIC out of powerdown, from Linux */
839 nfe_power(struct nfe_softc *sc)
843 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
845 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
846 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
848 NFE_WRITE(sc, NFE_MAC_RESET, 0);
850 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
851 pwr = NFE_READ(sc, NFE_PWR2_CTL);
852 pwr &= ~NFE_PWR2_WAKEUP_MASK;
853 if (sc->nfe_revid >= 0xa3 &&
854 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
855 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
856 pwr |= NFE_PWR2_REVA3;
857 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
862 nfe_miibus_statchg(device_t dev)
864 struct nfe_softc *sc;
865 struct mii_data *mii;
867 uint32_t rxctl, txctl;
869 sc = device_get_softc(dev);
871 mii = device_get_softc(sc->nfe_miibus);
875 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
876 (IFM_ACTIVE | IFM_AVALID)) {
877 switch (IFM_SUBTYPE(mii->mii_media_active)) {
888 nfe_mac_config(sc, mii);
889 txctl = NFE_READ(sc, NFE_TX_CTL);
890 rxctl = NFE_READ(sc, NFE_RX_CTL);
891 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
892 txctl |= NFE_TX_START;
893 rxctl |= NFE_RX_START;
895 txctl &= ~NFE_TX_START;
896 rxctl &= ~NFE_RX_START;
898 NFE_WRITE(sc, NFE_TX_CTL, txctl);
899 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
904 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
906 uint32_t link, misc, phy, seed;
911 phy = NFE_READ(sc, NFE_PHY_IFACE);
912 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
914 seed = NFE_READ(sc, NFE_RNDSEED);
915 seed &= ~NFE_SEED_MASK;
917 misc = NFE_MISC1_MAGIC;
918 link = NFE_MEDIA_SET;
920 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
921 phy |= NFE_PHY_HDX; /* half-duplex */
922 misc |= NFE_MISC1_HDX;
925 switch (IFM_SUBTYPE(mii->mii_media_active)) {
926 case IFM_1000_T: /* full-duplex only */
927 link |= NFE_MEDIA_1000T;
928 seed |= NFE_SEED_1000T;
929 phy |= NFE_PHY_1000T;
932 link |= NFE_MEDIA_100TX;
933 seed |= NFE_SEED_100TX;
934 phy |= NFE_PHY_100TX;
937 link |= NFE_MEDIA_10T;
938 seed |= NFE_SEED_10T;
942 if ((phy & 0x10000000) != 0) {
943 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
944 val = NFE_R1_MAGIC_1000;
946 val = NFE_R1_MAGIC_10_100;
948 val = NFE_R1_MAGIC_DEFAULT;
949 NFE_WRITE(sc, NFE_SETUP_R1, val);
951 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
953 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
954 NFE_WRITE(sc, NFE_MISC1, misc);
955 NFE_WRITE(sc, NFE_LINKSPEED, link);
957 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
958 /* It seems all hardwares supports Rx pause frames. */
959 val = NFE_READ(sc, NFE_RXFILTER);
960 if ((IFM_OPTIONS(mii->mii_media_active) &
961 IFM_ETH_RXPAUSE) != 0)
962 val |= NFE_PFF_RX_PAUSE;
964 val &= ~NFE_PFF_RX_PAUSE;
965 NFE_WRITE(sc, NFE_RXFILTER, val);
966 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
967 val = NFE_READ(sc, NFE_MISC1);
968 if ((IFM_OPTIONS(mii->mii_media_active) &
969 IFM_ETH_TXPAUSE) != 0) {
970 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
971 NFE_TX_PAUSE_FRAME_ENABLE);
972 val |= NFE_MISC1_TX_PAUSE;
974 val &= ~NFE_MISC1_TX_PAUSE;
975 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
976 NFE_TX_PAUSE_FRAME_DISABLE);
978 NFE_WRITE(sc, NFE_MISC1, val);
981 /* disable rx/tx pause frames */
982 val = NFE_READ(sc, NFE_RXFILTER);
983 val &= ~NFE_PFF_RX_PAUSE;
984 NFE_WRITE(sc, NFE_RXFILTER, val);
985 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
986 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
987 NFE_TX_PAUSE_FRAME_DISABLE);
988 val = NFE_READ(sc, NFE_MISC1);
989 val &= ~NFE_MISC1_TX_PAUSE;
990 NFE_WRITE(sc, NFE_MISC1, val);
997 nfe_miibus_readreg(device_t dev, int phy, int reg)
999 struct nfe_softc *sc = device_get_softc(dev);
1003 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1005 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1006 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1010 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1012 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1014 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1017 if (ntries == NFE_TIMEOUT) {
1018 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1022 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1023 DPRINTFN(sc, 2, "could not read PHY\n");
1027 val = NFE_READ(sc, NFE_PHY_DATA);
1028 if (val != 0xffffffff && val != 0)
1029 sc->mii_phyaddr = phy;
1031 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1038 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1040 struct nfe_softc *sc = device_get_softc(dev);
1044 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1046 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1047 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1051 NFE_WRITE(sc, NFE_PHY_DATA, val);
1052 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1053 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1055 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1057 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1061 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1062 device_printf(sc->nfe_dev, "could not write to PHY\n");
1067 struct nfe_dmamap_arg {
1068 bus_addr_t nfe_busaddr;
1072 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1074 struct nfe_dmamap_arg ctx;
1075 struct nfe_rx_data *data;
1077 int i, error, descsize;
1079 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1080 desc = ring->desc64;
1081 descsize = sizeof (struct nfe_desc64);
1083 desc = ring->desc32;
1084 descsize = sizeof (struct nfe_desc32);
1087 ring->cur = ring->next = 0;
1089 error = bus_dma_tag_create(sc->nfe_parent_tag,
1090 NFE_RING_ALIGN, 0, /* alignment, boundary */
1091 BUS_SPACE_MAXADDR, /* lowaddr */
1092 BUS_SPACE_MAXADDR, /* highaddr */
1093 NULL, NULL, /* filter, filterarg */
1094 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1095 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1097 NULL, NULL, /* lockfunc, lockarg */
1098 &ring->rx_desc_tag);
1100 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1104 /* allocate memory to desc */
1105 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1106 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1108 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1111 if (sc->nfe_flags & NFE_40BIT_ADDR)
1112 ring->desc64 = desc;
1114 ring->desc32 = desc;
1116 /* map desc to device visible address space */
1117 ctx.nfe_busaddr = 0;
1118 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1119 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1121 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1124 ring->physaddr = ctx.nfe_busaddr;
1126 error = bus_dma_tag_create(sc->nfe_parent_tag,
1127 1, 0, /* alignment, boundary */
1128 BUS_SPACE_MAXADDR, /* lowaddr */
1129 BUS_SPACE_MAXADDR, /* highaddr */
1130 NULL, NULL, /* filter, filterarg */
1131 MCLBYTES, 1, /* maxsize, nsegments */
1132 MCLBYTES, /* maxsegsize */
1134 NULL, NULL, /* lockfunc, lockarg */
1135 &ring->rx_data_tag);
1137 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1141 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1143 device_printf(sc->nfe_dev,
1144 "could not create Rx DMA spare map\n");
1149 * Pre-allocate Rx buffers and populate Rx ring.
1151 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1152 data = &sc->rxq.data[i];
1153 data->rx_data_map = NULL;
1155 error = bus_dmamap_create(ring->rx_data_tag, 0,
1156 &data->rx_data_map);
1158 device_printf(sc->nfe_dev,
1159 "could not create Rx DMA map\n");
1170 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1172 struct nfe_dmamap_arg ctx;
1173 struct nfe_rx_data *data;
1175 int i, error, descsize;
1177 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1179 if (jumbo_disable != 0) {
1180 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1181 sc->nfe_jumbo_disable = 1;
1185 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1186 desc = ring->jdesc64;
1187 descsize = sizeof (struct nfe_desc64);
1189 desc = ring->jdesc32;
1190 descsize = sizeof (struct nfe_desc32);
1193 ring->jcur = ring->jnext = 0;
1195 /* Create DMA tag for jumbo Rx ring. */
1196 error = bus_dma_tag_create(sc->nfe_parent_tag,
1197 NFE_RING_ALIGN, 0, /* alignment, boundary */
1198 BUS_SPACE_MAXADDR, /* lowaddr */
1199 BUS_SPACE_MAXADDR, /* highaddr */
1200 NULL, NULL, /* filter, filterarg */
1201 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1203 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1205 NULL, NULL, /* lockfunc, lockarg */
1206 &ring->jrx_desc_tag);
1208 device_printf(sc->nfe_dev,
1209 "could not create jumbo ring DMA tag\n");
1213 /* Create DMA tag for jumbo Rx buffers. */
1214 error = bus_dma_tag_create(sc->nfe_parent_tag,
1215 1, 0, /* alignment, boundary */
1216 BUS_SPACE_MAXADDR, /* lowaddr */
1217 BUS_SPACE_MAXADDR, /* highaddr */
1218 NULL, NULL, /* filter, filterarg */
1219 MJUM9BYTES, /* maxsize */
1221 MJUM9BYTES, /* maxsegsize */
1223 NULL, NULL, /* lockfunc, lockarg */
1224 &ring->jrx_data_tag);
1226 device_printf(sc->nfe_dev,
1227 "could not create jumbo Rx buffer DMA tag\n");
1231 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1232 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1233 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1235 device_printf(sc->nfe_dev,
1236 "could not allocate DMA'able memory for jumbo Rx ring\n");
1239 if (sc->nfe_flags & NFE_40BIT_ADDR)
1240 ring->jdesc64 = desc;
1242 ring->jdesc32 = desc;
1244 ctx.nfe_busaddr = 0;
1245 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1246 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1248 device_printf(sc->nfe_dev,
1249 "could not load DMA'able memory for jumbo Rx ring\n");
1252 ring->jphysaddr = ctx.nfe_busaddr;
1254 /* Create DMA maps for jumbo Rx buffers. */
1255 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1257 device_printf(sc->nfe_dev,
1258 "could not create jumbo Rx DMA spare map\n");
1262 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1263 data = &sc->jrxq.jdata[i];
1264 data->rx_data_map = NULL;
1266 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1267 &data->rx_data_map);
1269 device_printf(sc->nfe_dev,
1270 "could not create jumbo Rx DMA map\n");
1279 * Running without jumbo frame support is ok for most cases
1280 * so don't fail on creating dma tag/map for jumbo frame.
1282 nfe_free_jrx_ring(sc, ring);
1283 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1284 "resource shortage\n");
1285 sc->nfe_jumbo_disable = 1;
1290 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1296 ring->cur = ring->next = 0;
1297 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1298 desc = ring->desc64;
1299 descsize = sizeof (struct nfe_desc64);
1301 desc = ring->desc32;
1302 descsize = sizeof (struct nfe_desc32);
1304 bzero(desc, descsize * NFE_RX_RING_COUNT);
1305 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1306 if (nfe_newbuf(sc, i) != 0)
1310 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1311 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1318 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1324 ring->jcur = ring->jnext = 0;
1325 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1326 desc = ring->jdesc64;
1327 descsize = sizeof (struct nfe_desc64);
1329 desc = ring->jdesc32;
1330 descsize = sizeof (struct nfe_desc32);
1332 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1333 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1334 if (nfe_jnewbuf(sc, i) != 0)
1338 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1339 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1346 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1348 struct nfe_rx_data *data;
1352 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1353 desc = ring->desc64;
1354 descsize = sizeof (struct nfe_desc64);
1356 desc = ring->desc32;
1357 descsize = sizeof (struct nfe_desc32);
1360 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1361 data = &ring->data[i];
1362 if (data->rx_data_map != NULL) {
1363 bus_dmamap_destroy(ring->rx_data_tag,
1365 data->rx_data_map = NULL;
1367 if (data->m != NULL) {
1372 if (ring->rx_data_tag != NULL) {
1373 if (ring->rx_spare_map != NULL) {
1374 bus_dmamap_destroy(ring->rx_data_tag,
1375 ring->rx_spare_map);
1376 ring->rx_spare_map = NULL;
1378 bus_dma_tag_destroy(ring->rx_data_tag);
1379 ring->rx_data_tag = NULL;
1383 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1384 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1385 ring->desc64 = NULL;
1386 ring->desc32 = NULL;
1387 ring->rx_desc_map = NULL;
1389 if (ring->rx_desc_tag != NULL) {
1390 bus_dma_tag_destroy(ring->rx_desc_tag);
1391 ring->rx_desc_tag = NULL;
1397 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1399 struct nfe_rx_data *data;
1403 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1406 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1407 desc = ring->jdesc64;
1408 descsize = sizeof (struct nfe_desc64);
1410 desc = ring->jdesc32;
1411 descsize = sizeof (struct nfe_desc32);
1414 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1415 data = &ring->jdata[i];
1416 if (data->rx_data_map != NULL) {
1417 bus_dmamap_destroy(ring->jrx_data_tag,
1419 data->rx_data_map = NULL;
1421 if (data->m != NULL) {
1426 if (ring->jrx_data_tag != NULL) {
1427 if (ring->jrx_spare_map != NULL) {
1428 bus_dmamap_destroy(ring->jrx_data_tag,
1429 ring->jrx_spare_map);
1430 ring->jrx_spare_map = NULL;
1432 bus_dma_tag_destroy(ring->jrx_data_tag);
1433 ring->jrx_data_tag = NULL;
1437 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1438 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1439 ring->jdesc64 = NULL;
1440 ring->jdesc32 = NULL;
1441 ring->jrx_desc_map = NULL;
1444 if (ring->jrx_desc_tag != NULL) {
1445 bus_dma_tag_destroy(ring->jrx_desc_tag);
1446 ring->jrx_desc_tag = NULL;
1452 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1454 struct nfe_dmamap_arg ctx;
1459 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1460 desc = ring->desc64;
1461 descsize = sizeof (struct nfe_desc64);
1463 desc = ring->desc32;
1464 descsize = sizeof (struct nfe_desc32);
1468 ring->cur = ring->next = 0;
1470 error = bus_dma_tag_create(sc->nfe_parent_tag,
1471 NFE_RING_ALIGN, 0, /* alignment, boundary */
1472 BUS_SPACE_MAXADDR, /* lowaddr */
1473 BUS_SPACE_MAXADDR, /* highaddr */
1474 NULL, NULL, /* filter, filterarg */
1475 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1476 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1478 NULL, NULL, /* lockfunc, lockarg */
1479 &ring->tx_desc_tag);
1481 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1485 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1486 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1488 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1491 if (sc->nfe_flags & NFE_40BIT_ADDR)
1492 ring->desc64 = desc;
1494 ring->desc32 = desc;
1496 ctx.nfe_busaddr = 0;
1497 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1498 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1500 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1503 ring->physaddr = ctx.nfe_busaddr;
1505 error = bus_dma_tag_create(sc->nfe_parent_tag,
1515 &ring->tx_data_tag);
1517 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1521 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1522 error = bus_dmamap_create(ring->tx_data_tag, 0,
1523 &ring->data[i].tx_data_map);
1525 device_printf(sc->nfe_dev,
1526 "could not create Tx DMA map\n");
1537 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1542 sc->nfe_force_tx = 0;
1544 ring->cur = ring->next = 0;
1545 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1546 desc = ring->desc64;
1547 descsize = sizeof (struct nfe_desc64);
1549 desc = ring->desc32;
1550 descsize = sizeof (struct nfe_desc32);
1552 bzero(desc, descsize * NFE_TX_RING_COUNT);
1554 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1555 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1560 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1562 struct nfe_tx_data *data;
1566 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1567 desc = ring->desc64;
1568 descsize = sizeof (struct nfe_desc64);
1570 desc = ring->desc32;
1571 descsize = sizeof (struct nfe_desc32);
1574 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1575 data = &ring->data[i];
1577 if (data->m != NULL) {
1578 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1579 BUS_DMASYNC_POSTWRITE);
1580 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1584 if (data->tx_data_map != NULL) {
1585 bus_dmamap_destroy(ring->tx_data_tag,
1587 data->tx_data_map = NULL;
1591 if (ring->tx_data_tag != NULL) {
1592 bus_dma_tag_destroy(ring->tx_data_tag);
1593 ring->tx_data_tag = NULL;
1597 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1598 BUS_DMASYNC_POSTWRITE);
1599 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1600 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1601 ring->desc64 = NULL;
1602 ring->desc32 = NULL;
1603 ring->tx_desc_map = NULL;
1604 bus_dma_tag_destroy(ring->tx_desc_tag);
1605 ring->tx_desc_tag = NULL;
1609 #ifdef DEVICE_POLLING
1610 static poll_handler_t nfe_poll;
1614 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1616 struct nfe_softc *sc = ifp->if_softc;
1622 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1627 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1628 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1630 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1632 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1633 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1635 if (cmd == POLL_AND_CHECK_STATUS) {
1636 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1640 NFE_WRITE(sc, sc->nfe_irq_status, r);
1642 if (r & NFE_IRQ_LINK) {
1643 NFE_READ(sc, NFE_PHY_STATUS);
1644 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1645 DPRINTF(sc, "link state changed\n");
1651 #endif /* DEVICE_POLLING */
1654 nfe_set_intr(struct nfe_softc *sc)
1657 if (sc->nfe_msi != 0)
1658 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1662 /* In MSIX, a write to mask reegisters behaves as XOR. */
1663 static __inline void
1664 nfe_enable_intr(struct nfe_softc *sc)
1667 if (sc->nfe_msix != 0) {
1668 /* XXX Should have a better way to enable interrupts! */
1669 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1670 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1672 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1676 static __inline void
1677 nfe_disable_intr(struct nfe_softc *sc)
1680 if (sc->nfe_msix != 0) {
1681 /* XXX Should have a better way to disable interrupts! */
1682 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1683 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1685 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1690 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1692 struct nfe_softc *sc;
1694 struct mii_data *mii;
1695 int error, init, mask;
1698 ifr = (struct ifreq *) data;
1703 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1705 else if (ifp->if_mtu != ifr->ifr_mtu) {
1706 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1707 (sc->nfe_jumbo_disable != 0)) &&
1708 ifr->ifr_mtu > ETHERMTU)
1712 ifp->if_mtu = ifr->ifr_mtu;
1713 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1714 nfe_init_locked(sc);
1721 if (ifp->if_flags & IFF_UP) {
1723 * If only the PROMISC or ALLMULTI flag changes, then
1724 * don't do a full re-init of the chip, just update
1727 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1728 ((ifp->if_flags ^ sc->nfe_if_flags) &
1729 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1732 nfe_init_locked(sc);
1734 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1737 sc->nfe_if_flags = ifp->if_flags;
1743 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1752 mii = device_get_softc(sc->nfe_miibus);
1753 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1756 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1757 #ifdef DEVICE_POLLING
1758 if ((mask & IFCAP_POLLING) != 0) {
1759 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1760 error = ether_poll_register(nfe_poll, ifp);
1764 nfe_disable_intr(sc);
1765 ifp->if_capenable |= IFCAP_POLLING;
1768 error = ether_poll_deregister(ifp);
1769 /* Enable interrupt even in error case */
1771 nfe_enable_intr(sc);
1772 ifp->if_capenable &= ~IFCAP_POLLING;
1776 #endif /* DEVICE_POLLING */
1777 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1778 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1779 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1781 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1782 (mask & IFCAP_HWCSUM) != 0) {
1783 ifp->if_capenable ^= IFCAP_HWCSUM;
1784 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
1785 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
1786 ifp->if_hwassist |= NFE_CSUM_FEATURES;
1788 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1791 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 &&
1792 (mask & IFCAP_VLAN_HWTAGGING) != 0) {
1793 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1798 * It seems that VLAN stripping requires Rx checksum offload.
1799 * Unfortunately FreeBSD has no way to disable only Rx side
1800 * VLAN stripping. So when we know Rx checksum offload is
1801 * disabled turn entire hardware VLAN assist off.
1803 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) ==
1804 (NFE_HW_CSUM | NFE_HW_VLAN)) {
1805 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
1806 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
1809 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1810 (mask & IFCAP_TSO4) != 0) {
1811 ifp->if_capenable ^= IFCAP_TSO4;
1812 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1813 (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1814 ifp->if_hwassist |= CSUM_TSO;
1816 ifp->if_hwassist &= ~CSUM_TSO;
1819 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1820 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1823 if ((sc->nfe_flags & NFE_HW_VLAN) != 0)
1824 VLAN_CAPABILITIES(ifp);
1827 error = ether_ioctl(ifp, cmd, data);
1838 struct nfe_softc *sc;
1841 sc = (struct nfe_softc *)arg;
1843 status = NFE_READ(sc, sc->nfe_irq_status);
1844 if (status == 0 || status == 0xffffffff)
1845 return (FILTER_STRAY);
1846 nfe_disable_intr(sc);
1847 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1849 return (FILTER_HANDLED);
1854 nfe_int_task(void *arg, int pending)
1856 struct nfe_softc *sc = arg;
1857 struct ifnet *ifp = sc->nfe_ifp;
1863 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1864 nfe_enable_intr(sc);
1866 return; /* not for us */
1868 NFE_WRITE(sc, sc->nfe_irq_status, r);
1870 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1872 #ifdef DEVICE_POLLING
1873 if (ifp->if_capenable & IFCAP_POLLING) {
1879 if (r & NFE_IRQ_LINK) {
1880 NFE_READ(sc, NFE_PHY_STATUS);
1881 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1882 DPRINTF(sc, "link state changed\n");
1885 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1887 nfe_enable_intr(sc);
1893 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1894 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1896 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1900 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1901 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1905 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1906 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1910 /* Reenable interrupts. */
1911 nfe_enable_intr(sc);
1915 static __inline void
1916 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1918 struct nfe_desc32 *desc32;
1919 struct nfe_desc64 *desc64;
1920 struct nfe_rx_data *data;
1923 data = &sc->rxq.data[idx];
1926 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1927 desc64 = &sc->rxq.desc64[idx];
1928 /* VLAN packet may have overwritten it. */
1929 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1930 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1931 desc64->length = htole16(m->m_len);
1932 desc64->flags = htole16(NFE_RX_READY);
1934 desc32 = &sc->rxq.desc32[idx];
1935 desc32->length = htole16(m->m_len);
1936 desc32->flags = htole16(NFE_RX_READY);
1941 static __inline void
1942 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1944 struct nfe_desc32 *desc32;
1945 struct nfe_desc64 *desc64;
1946 struct nfe_rx_data *data;
1949 data = &sc->jrxq.jdata[idx];
1952 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1953 desc64 = &sc->jrxq.jdesc64[idx];
1954 /* VLAN packet may have overwritten it. */
1955 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1956 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1957 desc64->length = htole16(m->m_len);
1958 desc64->flags = htole16(NFE_RX_READY);
1960 desc32 = &sc->jrxq.jdesc32[idx];
1961 desc32->length = htole16(m->m_len);
1962 desc32->flags = htole16(NFE_RX_READY);
1968 nfe_newbuf(struct nfe_softc *sc, int idx)
1970 struct nfe_rx_data *data;
1971 struct nfe_desc32 *desc32;
1972 struct nfe_desc64 *desc64;
1974 bus_dma_segment_t segs[1];
1978 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1982 m->m_len = m->m_pkthdr.len = MCLBYTES;
1983 m_adj(m, ETHER_ALIGN);
1985 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1986 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1990 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1992 data = &sc->rxq.data[idx];
1993 if (data->m != NULL) {
1994 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1995 BUS_DMASYNC_POSTREAD);
1996 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1998 map = data->rx_data_map;
1999 data->rx_data_map = sc->rxq.rx_spare_map;
2000 sc->rxq.rx_spare_map = map;
2001 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2002 BUS_DMASYNC_PREREAD);
2003 data->paddr = segs[0].ds_addr;
2005 /* update mapping address in h/w descriptor */
2006 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2007 desc64 = &sc->rxq.desc64[idx];
2008 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2009 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2010 desc64->length = htole16(segs[0].ds_len);
2011 desc64->flags = htole16(NFE_RX_READY);
2013 desc32 = &sc->rxq.desc32[idx];
2014 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2015 desc32->length = htole16(segs[0].ds_len);
2016 desc32->flags = htole16(NFE_RX_READY);
2024 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2026 struct nfe_rx_data *data;
2027 struct nfe_desc32 *desc32;
2028 struct nfe_desc64 *desc64;
2030 bus_dma_segment_t segs[1];
2034 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2037 if ((m->m_flags & M_EXT) == 0) {
2041 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2042 m_adj(m, ETHER_ALIGN);
2044 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2045 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2049 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2051 data = &sc->jrxq.jdata[idx];
2052 if (data->m != NULL) {
2053 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2054 BUS_DMASYNC_POSTREAD);
2055 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2057 map = data->rx_data_map;
2058 data->rx_data_map = sc->jrxq.jrx_spare_map;
2059 sc->jrxq.jrx_spare_map = map;
2060 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2061 BUS_DMASYNC_PREREAD);
2062 data->paddr = segs[0].ds_addr;
2064 /* update mapping address in h/w descriptor */
2065 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2066 desc64 = &sc->jrxq.jdesc64[idx];
2067 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2068 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2069 desc64->length = htole16(segs[0].ds_len);
2070 desc64->flags = htole16(NFE_RX_READY);
2072 desc32 = &sc->jrxq.jdesc32[idx];
2073 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2074 desc32->length = htole16(segs[0].ds_len);
2075 desc32->flags = htole16(NFE_RX_READY);
2083 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2085 struct ifnet *ifp = sc->nfe_ifp;
2086 struct nfe_desc32 *desc32;
2087 struct nfe_desc64 *desc64;
2088 struct nfe_rx_data *data;
2091 int len, prog, rx_npkts;
2095 NFE_LOCK_ASSERT(sc);
2097 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2098 BUS_DMASYNC_POSTREAD);
2100 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2105 data = &sc->rxq.data[sc->rxq.cur];
2107 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2108 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2109 vtag = le32toh(desc64->physaddr[1]);
2110 flags = le16toh(desc64->flags);
2111 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2113 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2114 flags = le16toh(desc32->flags);
2115 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2118 if (flags & NFE_RX_READY)
2121 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2122 if (!(flags & NFE_RX_VALID_V1)) {
2124 nfe_discard_rxbuf(sc, sc->rxq.cur);
2127 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2128 flags &= ~NFE_RX_ERROR;
2129 len--; /* fix buffer length */
2132 if (!(flags & NFE_RX_VALID_V2)) {
2134 nfe_discard_rxbuf(sc, sc->rxq.cur);
2138 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2139 flags &= ~NFE_RX_ERROR;
2140 len--; /* fix buffer length */
2144 if (flags & NFE_RX_ERROR) {
2146 nfe_discard_rxbuf(sc, sc->rxq.cur);
2151 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2153 nfe_discard_rxbuf(sc, sc->rxq.cur);
2157 if ((vtag & NFE_RX_VTAG) != 0 &&
2158 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2159 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2160 m->m_flags |= M_VLANTAG;
2163 m->m_pkthdr.len = m->m_len = len;
2164 m->m_pkthdr.rcvif = ifp;
2166 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2167 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2168 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2169 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2170 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2171 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2172 m->m_pkthdr.csum_flags |=
2173 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2174 m->m_pkthdr.csum_data = 0xffff;
2182 (*ifp->if_input)(ifp, m);
2188 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2189 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2191 if (rx_npktsp != NULL)
2192 *rx_npktsp = rx_npkts;
2193 return (count > 0 ? 0 : EAGAIN);
2198 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2200 struct ifnet *ifp = sc->nfe_ifp;
2201 struct nfe_desc32 *desc32;
2202 struct nfe_desc64 *desc64;
2203 struct nfe_rx_data *data;
2206 int len, prog, rx_npkts;
2210 NFE_LOCK_ASSERT(sc);
2212 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2213 BUS_DMASYNC_POSTREAD);
2215 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2221 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2223 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2224 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2225 vtag = le32toh(desc64->physaddr[1]);
2226 flags = le16toh(desc64->flags);
2227 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2229 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2230 flags = le16toh(desc32->flags);
2231 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2234 if (flags & NFE_RX_READY)
2237 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2238 if (!(flags & NFE_RX_VALID_V1)) {
2240 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2243 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2244 flags &= ~NFE_RX_ERROR;
2245 len--; /* fix buffer length */
2248 if (!(flags & NFE_RX_VALID_V2)) {
2250 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2254 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2255 flags &= ~NFE_RX_ERROR;
2256 len--; /* fix buffer length */
2260 if (flags & NFE_RX_ERROR) {
2262 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2267 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2269 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2273 if ((vtag & NFE_RX_VTAG) != 0 &&
2274 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2275 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2276 m->m_flags |= M_VLANTAG;
2279 m->m_pkthdr.len = m->m_len = len;
2280 m->m_pkthdr.rcvif = ifp;
2282 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2283 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2284 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2285 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2286 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2287 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2288 m->m_pkthdr.csum_flags |=
2289 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2290 m->m_pkthdr.csum_data = 0xffff;
2298 (*ifp->if_input)(ifp, m);
2304 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2305 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2307 if (rx_npktsp != NULL)
2308 *rx_npktsp = rx_npkts;
2309 return (count > 0 ? 0 : EAGAIN);
2314 nfe_txeof(struct nfe_softc *sc)
2316 struct ifnet *ifp = sc->nfe_ifp;
2317 struct nfe_desc32 *desc32;
2318 struct nfe_desc64 *desc64;
2319 struct nfe_tx_data *data = NULL;
2323 NFE_LOCK_ASSERT(sc);
2325 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2326 BUS_DMASYNC_POSTREAD);
2329 for (cons = sc->txq.next; cons != sc->txq.cur;
2330 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2331 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2332 desc64 = &sc->txq.desc64[cons];
2333 flags = le16toh(desc64->flags);
2335 desc32 = &sc->txq.desc32[cons];
2336 flags = le16toh(desc32->flags);
2339 if (flags & NFE_TX_VALID)
2344 data = &sc->txq.data[cons];
2346 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2347 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2349 if ((flags & NFE_TX_ERROR_V1) != 0) {
2350 device_printf(sc->nfe_dev,
2351 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2357 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2359 if ((flags & NFE_TX_ERROR_V2) != 0) {
2360 device_printf(sc->nfe_dev,
2361 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2367 /* last fragment of the mbuf chain transmitted */
2368 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2369 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2370 BUS_DMASYNC_POSTWRITE);
2371 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2377 sc->nfe_force_tx = 0;
2378 sc->txq.next = cons;
2379 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2380 if (sc->txq.queued == 0)
2381 sc->nfe_watchdog_timer = 0;
2386 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2388 struct nfe_desc32 *desc32 = NULL;
2389 struct nfe_desc64 *desc64 = NULL;
2391 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2392 int error, i, nsegs, prod, si;
2394 uint16_t cflags, flags;
2397 prod = si = sc->txq.cur;
2398 map = sc->txq.data[prod].tx_data_map;
2400 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2401 &nsegs, BUS_DMA_NOWAIT);
2402 if (error == EFBIG) {
2403 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
2410 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2411 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2417 } else if (error != 0)
2425 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2426 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2433 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2434 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2436 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2437 cflags |= NFE_TX_TSO;
2438 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2439 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2440 cflags |= NFE_TX_IP_CSUM;
2441 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2442 cflags |= NFE_TX_TCP_UDP_CSUM;
2443 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2444 cflags |= NFE_TX_TCP_UDP_CSUM;
2447 for (i = 0; i < nsegs; i++) {
2448 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2449 desc64 = &sc->txq.desc64[prod];
2450 desc64->physaddr[0] =
2451 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2452 desc64->physaddr[1] =
2453 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2455 desc64->length = htole16(segs[i].ds_len - 1);
2456 desc64->flags = htole16(flags);
2458 desc32 = &sc->txq.desc32[prod];
2460 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2461 desc32->length = htole16(segs[i].ds_len - 1);
2462 desc32->flags = htole16(flags);
2466 * Setting of the valid bit in the first descriptor is
2467 * deferred until the whole chain is fully setup.
2469 flags |= NFE_TX_VALID;
2472 NFE_INC(prod, NFE_TX_RING_COUNT);
2476 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2477 * csum flags, vtag and TSO belong to the first fragment only.
2479 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2480 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2481 desc64 = &sc->txq.desc64[si];
2482 if ((m->m_flags & M_VLANTAG) != 0)
2483 desc64->vtag = htole32(NFE_TX_VTAG |
2484 m->m_pkthdr.ether_vtag);
2485 if (tso_segsz != 0) {
2488 * The following indicates the descriptor element
2489 * is a 32bit quantity.
2491 desc64->length |= htole16((uint16_t)tso_segsz);
2492 desc64->flags |= htole16(tso_segsz >> 16);
2495 * finally, set the valid/checksum/TSO bit in the first
2498 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2500 if (sc->nfe_flags & NFE_JUMBO_SUP)
2501 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2503 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2504 desc32 = &sc->txq.desc32[si];
2505 if (tso_segsz != 0) {
2508 * The following indicates the descriptor element
2509 * is a 32bit quantity.
2511 desc32->length |= htole16((uint16_t)tso_segsz);
2512 desc32->flags |= htole16(tso_segsz >> 16);
2515 * finally, set the valid/checksum/TSO bit in the first
2518 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2522 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2523 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2524 sc->txq.data[prod].tx_data_map = map;
2525 sc->txq.data[prod].m = m;
2527 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2534 nfe_setmulti(struct nfe_softc *sc)
2536 struct ifnet *ifp = sc->nfe_ifp;
2537 struct ifmultiaddr *ifma;
2540 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2541 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2542 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2545 NFE_LOCK_ASSERT(sc);
2547 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2548 bzero(addr, ETHER_ADDR_LEN);
2549 bzero(mask, ETHER_ADDR_LEN);
2553 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2554 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2556 if_maddr_rlock(ifp);
2557 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2560 if (ifma->ifma_addr->sa_family != AF_LINK)
2563 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2564 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2565 u_int8_t mcaddr = addrp[i];
2570 if_maddr_runlock(ifp);
2572 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2577 addr[0] |= 0x01; /* make sure multicast bit is set */
2579 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2580 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2581 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2582 addr[5] << 8 | addr[4]);
2583 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2584 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2585 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2586 mask[5] << 8 | mask[4]);
2588 filter = NFE_READ(sc, NFE_RXFILTER);
2589 filter &= NFE_PFF_RX_PAUSE;
2590 filter |= NFE_RXFILTER_MAGIC;
2591 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2592 NFE_WRITE(sc, NFE_RXFILTER, filter);
2597 nfe_tx_task(void *arg, int pending)
2601 ifp = (struct ifnet *)arg;
2607 nfe_start(struct ifnet *ifp)
2609 struct nfe_softc *sc = ifp->if_softc;
2615 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2616 IFF_DRV_RUNNING || sc->nfe_link == 0) {
2621 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2622 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2626 if (nfe_encap(sc, &m0) != 0) {
2629 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2630 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2634 ETHER_BPF_MTAP(ifp, m0);
2638 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2639 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2642 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2645 * Set a timeout in case the chip goes out to lunch.
2647 sc->nfe_watchdog_timer = 5;
2655 nfe_watchdog(struct ifnet *ifp)
2657 struct nfe_softc *sc = ifp->if_softc;
2659 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2662 /* Check if we've lost Tx completion interrupt. */
2664 if (sc->txq.queued == 0) {
2665 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2667 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2668 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
2671 /* Check if we've lost start Tx command. */
2673 if (sc->nfe_force_tx <= 3) {
2675 * If this is the case for watchdog timeout, the following
2676 * code should go to nfe_txeof().
2678 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2681 sc->nfe_force_tx = 0;
2683 if_printf(ifp, "watchdog timeout\n");
2685 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2687 nfe_init_locked(sc);
2694 struct nfe_softc *sc = xsc;
2697 nfe_init_locked(sc);
2703 nfe_init_locked(void *xsc)
2705 struct nfe_softc *sc = xsc;
2706 struct ifnet *ifp = sc->nfe_ifp;
2707 struct mii_data *mii;
2711 NFE_LOCK_ASSERT(sc);
2713 mii = device_get_softc(sc->nfe_miibus);
2715 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2720 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2722 nfe_init_tx_ring(sc, &sc->txq);
2723 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2724 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2726 error = nfe_init_rx_ring(sc, &sc->rxq);
2728 device_printf(sc->nfe_dev,
2729 "initialization failed: no memory for rx buffers\n");
2735 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2736 val |= NFE_MAC_ADDR_INORDER;
2737 NFE_WRITE(sc, NFE_TX_UNK, val);
2738 NFE_WRITE(sc, NFE_STATUS, 0);
2740 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2741 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2743 sc->rxtxctl = NFE_RXTX_BIT2;
2744 if (sc->nfe_flags & NFE_40BIT_ADDR)
2745 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2746 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2747 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2749 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2750 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2751 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2752 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2754 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2756 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2758 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2759 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2761 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2763 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2765 /* set MAC address */
2766 nfe_set_macaddr(sc, IF_LLADDR(ifp));
2768 /* tell MAC where rings are in memory */
2769 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2770 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2771 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2772 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2773 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2775 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2776 NFE_ADDR_HI(sc->rxq.physaddr));
2777 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2778 NFE_ADDR_LO(sc->rxq.physaddr));
2780 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2781 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2783 NFE_WRITE(sc, NFE_RING_SIZE,
2784 (NFE_RX_RING_COUNT - 1) << 16 |
2785 (NFE_TX_RING_COUNT - 1));
2787 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2789 /* force MAC to wakeup */
2790 val = NFE_READ(sc, NFE_PWR_STATE);
2791 if ((val & NFE_PWR_WAKEUP) == 0)
2792 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2794 val = NFE_READ(sc, NFE_PWR_STATE);
2795 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2798 /* configure interrupts coalescing/mitigation */
2799 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2801 /* no interrupt mitigation: one interrupt per packet */
2802 NFE_WRITE(sc, NFE_IMTIMER, 970);
2805 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2806 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2807 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2809 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2810 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2812 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2814 NFE_WRITE(sc, NFE_WOL_CTL, 0);
2816 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2817 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2819 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2825 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2828 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2830 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2832 /* Clear hardware stats. */
2833 nfe_stats_clear(sc);
2835 #ifdef DEVICE_POLLING
2836 if (ifp->if_capenable & IFCAP_POLLING)
2837 nfe_disable_intr(sc);
2841 nfe_enable_intr(sc); /* enable interrupts */
2843 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2844 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2849 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2854 nfe_stop(struct ifnet *ifp)
2856 struct nfe_softc *sc = ifp->if_softc;
2857 struct nfe_rx_ring *rx_ring;
2858 struct nfe_jrx_ring *jrx_ring;
2859 struct nfe_tx_ring *tx_ring;
2860 struct nfe_rx_data *rdata;
2861 struct nfe_tx_data *tdata;
2864 NFE_LOCK_ASSERT(sc);
2866 sc->nfe_watchdog_timer = 0;
2867 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2869 callout_stop(&sc->nfe_stat_ch);
2872 NFE_WRITE(sc, NFE_TX_CTL, 0);
2875 NFE_WRITE(sc, NFE_RX_CTL, 0);
2877 /* disable interrupts */
2878 nfe_disable_intr(sc);
2882 /* free Rx and Tx mbufs still in the queues. */
2884 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2885 rdata = &rx_ring->data[i];
2886 if (rdata->m != NULL) {
2887 bus_dmamap_sync(rx_ring->rx_data_tag,
2888 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2889 bus_dmamap_unload(rx_ring->rx_data_tag,
2890 rdata->rx_data_map);
2896 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2897 jrx_ring = &sc->jrxq;
2898 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2899 rdata = &jrx_ring->jdata[i];
2900 if (rdata->m != NULL) {
2901 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2902 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2903 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2904 rdata->rx_data_map);
2912 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2913 tdata = &tx_ring->data[i];
2914 if (tdata->m != NULL) {
2915 bus_dmamap_sync(tx_ring->tx_data_tag,
2916 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2917 bus_dmamap_unload(tx_ring->tx_data_tag,
2918 tdata->tx_data_map);
2923 /* Update hardware stats. */
2924 nfe_stats_update(sc);
2929 nfe_ifmedia_upd(struct ifnet *ifp)
2931 struct nfe_softc *sc = ifp->if_softc;
2932 struct mii_data *mii;
2935 mii = device_get_softc(sc->nfe_miibus);
2944 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2946 struct nfe_softc *sc;
2947 struct mii_data *mii;
2952 mii = device_get_softc(sc->nfe_miibus);
2956 ifmr->ifm_active = mii->mii_media_active;
2957 ifmr->ifm_status = mii->mii_media_status;
2964 struct nfe_softc *sc;
2965 struct mii_data *mii;
2968 sc = (struct nfe_softc *)xsc;
2970 NFE_LOCK_ASSERT(sc);
2974 mii = device_get_softc(sc->nfe_miibus);
2976 nfe_stats_update(sc);
2978 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2983 nfe_shutdown(device_t dev)
2986 return (nfe_suspend(dev));
2991 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2995 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2996 val = NFE_READ(sc, NFE_MACADDR_LO);
2997 addr[0] = (val >> 8) & 0xff;
2998 addr[1] = (val & 0xff);
3000 val = NFE_READ(sc, NFE_MACADDR_HI);
3001 addr[2] = (val >> 24) & 0xff;
3002 addr[3] = (val >> 16) & 0xff;
3003 addr[4] = (val >> 8) & 0xff;
3004 addr[5] = (val & 0xff);
3006 val = NFE_READ(sc, NFE_MACADDR_LO);
3007 addr[5] = (val >> 8) & 0xff;
3008 addr[4] = (val & 0xff);
3010 val = NFE_READ(sc, NFE_MACADDR_HI);
3011 addr[3] = (val >> 24) & 0xff;
3012 addr[2] = (val >> 16) & 0xff;
3013 addr[1] = (val >> 8) & 0xff;
3014 addr[0] = (val & 0xff);
3020 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3023 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
3024 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3025 addr[1] << 8 | addr[0]);
3030 * Map a single buffer address.
3034 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3036 struct nfe_dmamap_arg *ctx;
3041 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3043 ctx = (struct nfe_dmamap_arg *)arg;
3044 ctx->nfe_busaddr = segs[0].ds_addr;
3049 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3055 value = *(int *)arg1;
3056 error = sysctl_handle_int(oidp, &value, 0, req);
3057 if (error || !req->newptr)
3059 if (value < low || value > high)
3061 *(int *)arg1 = value;
3068 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3071 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3076 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3077 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3078 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3079 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3082 nfe_sysctl_node(struct nfe_softc *sc)
3084 struct sysctl_ctx_list *ctx;
3085 struct sysctl_oid_list *child, *parent;
3086 struct sysctl_oid *tree;
3087 struct nfe_hw_stats *stats;
3090 stats = &sc->nfe_stats;
3091 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3092 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3093 SYSCTL_ADD_PROC(ctx, child,
3094 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3095 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3096 "max number of Rx events to process");
3098 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3099 error = resource_int_value(device_get_name(sc->nfe_dev),
3100 device_get_unit(sc->nfe_dev), "process_limit",
3101 &sc->nfe_process_limit);
3103 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3104 sc->nfe_process_limit > NFE_PROC_MAX) {
3105 device_printf(sc->nfe_dev,
3106 "process_limit value out of range; "
3107 "using default: %d\n", NFE_PROC_DEFAULT);
3108 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3112 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3115 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3116 NULL, "NFE statistics");
3117 parent = SYSCTL_CHILDREN(tree);
3119 /* Rx statistics. */
3120 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3121 NULL, "Rx MAC statistics");
3122 child = SYSCTL_CHILDREN(tree);
3124 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3125 &stats->rx_frame_errors, "Framing Errors");
3126 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3127 &stats->rx_extra_bytes, "Extra Bytes");
3128 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3129 &stats->rx_late_cols, "Late Collisions");
3130 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3131 &stats->rx_runts, "Runts");
3132 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3133 &stats->rx_jumbos, "Jumbos");
3134 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3135 &stats->rx_fifo_overuns, "FIFO Overruns");
3136 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3137 &stats->rx_crc_errors, "CRC Errors");
3138 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3139 &stats->rx_fae, "Frame Alignment Errors");
3140 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3141 &stats->rx_len_errors, "Length Errors");
3142 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3143 &stats->rx_unicast, "Unicast Frames");
3144 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3145 &stats->rx_multicast, "Multicast Frames");
3146 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3147 &stats->rx_broadcast, "Broadcast Frames");
3148 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3149 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3150 &stats->rx_octets, "Octets");
3151 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3152 &stats->rx_pause, "Pause frames");
3153 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3154 &stats->rx_drops, "Drop frames");
3157 /* Tx statistics. */
3158 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3159 NULL, "Tx MAC statistics");
3160 child = SYSCTL_CHILDREN(tree);
3161 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3162 &stats->tx_octets, "Octets");
3163 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3164 &stats->tx_zero_rexmits, "Zero Retransmits");
3165 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3166 &stats->tx_one_rexmits, "One Retransmits");
3167 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3168 &stats->tx_multi_rexmits, "Multiple Retransmits");
3169 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3170 &stats->tx_late_cols, "Late Collisions");
3171 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3172 &stats->tx_fifo_underuns, "FIFO Underruns");
3173 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3174 &stats->tx_carrier_losts, "Carrier Losts");
3175 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3176 &stats->tx_excess_deferals, "Excess Deferrals");
3177 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3178 &stats->tx_retry_errors, "Retry Errors");
3179 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3180 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3181 &stats->tx_deferals, "Deferrals");
3182 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3183 &stats->tx_frames, "Frames");
3184 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3185 &stats->tx_pause, "Pause Frames");
3187 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3188 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3189 &stats->tx_deferals, "Unicast Frames");
3190 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3191 &stats->tx_frames, "Multicast Frames");
3192 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3193 &stats->tx_pause, "Broadcast Frames");
3197 #undef NFE_SYSCTL_STAT_ADD32
3198 #undef NFE_SYSCTL_STAT_ADD64
3201 nfe_stats_clear(struct nfe_softc *sc)
3205 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3206 mib_cnt = NFE_NUM_MIB_STATV1;
3207 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3208 mib_cnt = NFE_NUM_MIB_STATV2;
3212 for (i = 0; i < mib_cnt; i += sizeof(uint32_t))
3213 NFE_READ(sc, NFE_TX_OCTET + i);
3215 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3216 NFE_READ(sc, NFE_TX_UNICAST);
3217 NFE_READ(sc, NFE_TX_MULTICAST);
3218 NFE_READ(sc, NFE_TX_BROADCAST);
3223 nfe_stats_update(struct nfe_softc *sc)
3225 struct nfe_hw_stats *stats;
3227 NFE_LOCK_ASSERT(sc);
3229 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3232 stats = &sc->nfe_stats;
3233 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3234 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3235 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3236 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3237 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3238 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3239 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3240 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3241 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3242 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3243 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3244 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3245 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3246 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3247 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3248 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3249 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3250 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3251 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3252 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3253 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3255 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3256 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3257 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3258 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3259 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3260 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3261 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3264 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3265 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3266 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3267 stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3273 nfe_set_linkspeed(struct nfe_softc *sc)
3275 struct mii_softc *miisc;
3276 struct mii_data *mii;
3279 NFE_LOCK_ASSERT(sc);
3281 mii = device_get_softc(sc->nfe_miibus);
3284 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3285 (IFM_ACTIVE | IFM_AVALID)) {
3286 switch IFM_SUBTYPE(mii->mii_media_active) {
3298 if (mii->mii_instance) {
3299 miisc = LIST_FIRST(&mii->mii_phys);
3300 phyno = miisc->mii_phy;
3301 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3302 mii_phy_reset(miisc);
3305 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3306 nfe_miibus_writereg(sc->nfe_dev, phyno,
3307 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3308 nfe_miibus_writereg(sc->nfe_dev, phyno,
3309 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3313 * Poll link state until nfe(4) get a 10/100Mbps link.
3315 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3317 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3318 == (IFM_ACTIVE | IFM_AVALID)) {
3319 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3322 nfe_mac_config(sc, mii);
3329 pause("nfelnk", hz);
3332 if (i == MII_ANEGTICKS_GIGE)
3333 device_printf(sc->nfe_dev,
3334 "establishing a link failed, WOL may not work!");
3337 * No link, force MAC to have 100Mbps, full-duplex link.
3338 * This is the last resort and may/may not work.
3340 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3341 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3342 nfe_mac_config(sc, mii);
3347 nfe_set_wol(struct nfe_softc *sc)
3354 NFE_LOCK_ASSERT(sc);
3356 if (pci_find_extcap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3359 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3360 wolctl = NFE_WOL_MAGIC;
3363 NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3364 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
3365 nfe_set_linkspeed(sc);
3366 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3367 NFE_WRITE(sc, NFE_PWR2_CTL,
3368 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3370 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3371 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3372 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3375 /* Request PME if WOL is requested. */
3376 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3377 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3378 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3379 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3380 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);